3ed8cb7020cfa03a4b0d47ace8f1e1a0883fe6cb
[alexxy/gromacs.git] / admin / containers / scripted_gmx_docker_builds.py
1 #!/usr/bin/env python
2 #
3 # This file is part of the GROMACS molecular simulation package.
4 #
5 # Copyright (c) 2020,2021, by the GROMACS development team, led by
6 # Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 # and including many others, as listed in the AUTHORS file in the
8 # top-level source directory and at http://www.gromacs.org.
9 #
10 # GROMACS is free software; you can redistribute it and/or
11 # modify it under the terms of the GNU Lesser General Public License
12 # as published by the Free Software Foundation; either version 2.1
13 # of the License, or (at your option) any later version.
14 #
15 # GROMACS is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 # Lesser General Public License for more details.
19 #
20 # You should have received a copy of the GNU Lesser General Public
21 # License along with GROMACS; if not, see
22 # http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24 #
25 # If you want to redistribute modifications to GROMACS, please
26 # consider that scientific software is very special. Version
27 # control is crucial - bugs must be traceable. We will be happy to
28 # consider code for inclusion in the official distribution, but
29 # derived work must not be called official GROMACS. Details are found
30 # in the README & COPYING files - if they are missing, get the
31 # official version at http://www.gromacs.org.
32 #
33 # To help us fund GROMACS development, we humbly ask that you cite
34 # the research papers on the package. Check out http://www.gromacs.org.
35
36 """Building block based Dockerfile generation for CI testing images.
37
38 Generates a set of docker images used for running GROMACS CI on Gitlab.
39 The images are prepared according to a selection of build configuration targets
40 that hope to cover a broad enough scope of different possible systems,
41 allowing us to check compiler types and versions, as well as libraries used
42 for accelerators and parallel communication systems. Each combinations is
43 described as an entry in the build_configs dictionary, with the script
44 analysing the logic and adding build stages as needed.
45
46 Based on the example script provided by the NVidia HPCCM repository.
47
48 Reference:
49     `NVidia HPC Container Maker <https://github.com/NVIDIA/hpc-container-maker>`__
50
51 Authors:
52     * Paul Bauer <paul.bauer.q@gmail.com>
53     * Eric Irrgang <ericirrgang@gmail.com>
54     * Joe Jordan <e.jjordan12@gmail.com>
55     * Mark Abraham <mark.j.abraham@gmail.com>
56
57 Usage::
58
59     $ python3 scripted_gmx_docker_builds.py --help
60     $ python3 scripted_gmx_docker_builds.py --format docker > Dockerfile && docker build .
61     $ python3 scripted_gmx_docker_builds.py | docker build -
62
63 See Also:
64     :file:`buildall.sh`
65
66 """
67
68 import argparse
69 import collections
70 import typing
71 from distutils.version import StrictVersion
72
73 import hpccm
74 import hpccm.config
75 from hpccm.building_blocks.base import bb_base
76
77 try:
78     import utility
79 except ImportError:
80     raise RuntimeError(
81         'This module assumes availability of supporting modules in the same directory. Add the directory to '
82         'PYTHONPATH or invoke Python from within the module directory so module location can be resolved.')
83
84 # Basic packages for all final images.
85 _common_packages = ['build-essential',
86                     'ca-certificates',
87                     'ccache',
88                     'git',
89                     'gnupg',
90                     'gpg-agent',
91                     'libfftw3-dev',
92                     'libhwloc-dev',
93                     'liblapack-dev',
94                     'libx11-dev',
95                     'moreutils',
96                     'ninja-build',
97                     'rsync',
98                     'valgrind',
99                     'vim',
100                     'wget',
101                     'xsltproc']
102
103 _opencl_extra_packages = [
104     'nvidia-opencl-dev',
105     # The following require apt_ppas=['ppa:intel-opencl/intel-opencl']
106     'intel-opencl-icd',
107     'ocl-icd-libopencl1',
108     'ocl-icd-opencl-dev',
109     'opencl-headers',
110 ]
111
112 _rocm_extra_packages = [
113         # The following require
114         #             apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
115         #             apt_repositories=['deb [arch=amd64] http://repo.radeon.com/rocm/apt/4.0.1/ xenial main']
116         'libelf1',
117         'rocm-opencl',
118         'rocm-dev',
119         'clinfo'
120 ]
121                      
122
123 # Extra packages needed to build Intel Compute Runtime
124 _intel_compute_runtime_extra_packages = ['pkg-config',
125                                          'libxml2',
126                                          'libxml2-dev',
127                                          'libigc',
128                                          'libigc-dev',
129                                          'libigdgmm11',
130                                          'libigdgmm-dev',
131                                          'libze-loader',
132                                          'libze-loader-dev',
133                                          'ocl-icd-libopencl1',
134                                          'ocl-icd-opencl-dev',
135                                          'opencl-headers']
136
137 # Extra packages needed to build Python installations from source.
138 _python_extra_packages = ['build-essential',
139                           'ca-certificates',
140                           'ccache',
141                           'curl',
142                           'git',
143                           'libbz2-dev',
144                           'libffi-dev',
145                           'liblzma-dev',
146                           'libncurses5-dev',
147                           'libncursesw5-dev',
148                           'libreadline-dev',
149                           'libsqlite3-dev',
150                           'libssl-dev',
151                           'llvm',
152                           'python-openssl',
153                           'vim',
154                           'wget',
155                           'zlib1g-dev']
156
157 # Extra packages needed for images for building documentation.
158 _docs_extra_packages = ['autoconf',
159                         'automake',
160                         'autopoint',
161                         'autotools-dev',
162                         'bison',
163                         'flex',
164                         'ghostscript',
165                         'graphviz',
166                         'help2man',
167                         'imagemagick',
168                         'libtool',
169                         'mscgen',
170                         'm4',
171                         'openssh-client',
172                         'texinfo',
173                         'texlive-latex-base',
174                         'texlive-latex-extra',
175                         'texlive-fonts-recommended',
176                         'texlive-fonts-extra',
177                         'tex-gyre']
178
179 # Parse command line arguments
180 parser = argparse.ArgumentParser(description='GROMACS CI image creation script',
181                                  parents=[utility.parser])
182
183 parser.add_argument('--format', type=str, default='docker',
184                     choices=['docker', 'singularity'],
185                     help='Container specification format (default: docker)')
186
187
188 def base_image_tag(args) -> str:
189     # Check if we use CUDA images or plain linux images
190     if args.cuda is not None:
191         cuda_version_tag = 'nvidia/cuda:' + args.cuda + '-devel'
192         if args.centos is not None:
193             cuda_version_tag += '-centos' + args.centos
194         elif args.ubuntu is not None:
195             cuda_version_tag += '-ubuntu' + args.ubuntu
196         else:
197             raise RuntimeError('Logic error: no Linux distribution selected.')
198
199         base_image_tag = cuda_version_tag
200     else:
201         if args.centos is not None:
202             base_image_tag = 'centos:centos' + args.centos
203         elif args.ubuntu is not None:
204             base_image_tag = 'ubuntu:' + args.ubuntu
205         else:
206             raise RuntimeError('Logic error: no Linux distribution selected.')
207     return base_image_tag
208
209
210 def get_llvm_packages(args) -> typing.Iterable[str]:
211     # If we use the package version of LLVM, we need to install extra packages for it.
212     if (args.llvm is not None) and (args.tsan is None):
213         packages = [f'libomp-{args.llvm}-dev',
214                     f'libomp5-{args.llvm}',
215                     'clang-format-' + str(args.llvm),
216                     'clang-tidy-' + str(args.llvm)]
217         if args.hipsycl is not None:
218             packages += [f'llvm-{args.llvm}-dev',
219                          f'libclang-{args.llvm}-dev',
220                          f'lld-{args.llvm}']
221         return packages
222     else:
223         return []
224
225 def get_opencl_packages(args) -> typing.Iterable[str]:
226     if (args.doxygen is None) and (args.oneapi is None):
227         return _opencl_extra_packages
228     else:
229         return []
230
231 def get_rocm_packages(args) -> typing.Iterable[str]:
232     if (args.rocm is None):
233         return []
234     else:
235         return _rocm_extra_packages
236
237 def get_compiler(args, compiler_build_stage: hpccm.Stage = None) -> bb_base:
238     # Compiler
239     if args.llvm is not None:
240         # Build our own version instead to get TSAN + OMP
241         if args.tsan is not None:
242             if compiler_build_stage is not None:
243                 compiler = compiler_build_stage.runtime(_from='tsan')
244             else:
245                 raise RuntimeError('No TSAN compiler build stage!')
246         # Build the default compiler if we don't need special support
247         else:
248             compiler = hpccm.building_blocks.llvm(extra_repository=True, version=args.llvm)
249
250     elif args.oneapi is not None:
251         if compiler_build_stage is not None:
252             compiler = compiler_build_stage.runtime(_from='oneapi')
253             # Prepare the toolchain (needed only for builds done within the Dockerfile, e.g.
254             # OpenMPI builds, which don't currently work for other reasons)
255             oneapi_toolchain = hpccm.toolchain(CC=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icx',
256                                                CXX=f'/opt/intel/oneapi/compiler/{args.oneapi}/linux/bin/intel64/icpx')
257             setattr(compiler, 'toolchain', oneapi_toolchain)
258
259         else:
260             raise RuntimeError('No oneAPI compiler build stage!')
261
262     elif args.gcc is not None:
263         compiler = hpccm.building_blocks.gnu(extra_repository=True,
264                                              version=args.gcc,
265                                              fortran=False)
266     else:
267         raise RuntimeError('Logic error: no compiler toolchain selected.')
268     return compiler
269
270
271 def get_gdrcopy(args, compiler):
272     if args.cuda is not None:
273         if hasattr(compiler, 'toolchain'):
274             # Version last updated June 7, 2021
275             return hpccm.building_blocks.gdrcopy(toolchain=compiler.toolchain, version="2.2")
276         else:
277             raise RuntimeError('compiler is not an HPCCM compiler building block!')
278     else:
279         return None
280
281 def get_ucx(args, compiler, gdrcopy):
282     if args.cuda is not None:
283         if hasattr(compiler, 'toolchain'):
284             use_gdrcopy = (gdrcopy is not None)
285             # Version last updated June 7, 2021
286             return hpccm.building_blocks.ucx(toolchain=compiler.toolchain, gdrcopy=use_gdrcopy, version="1.10.1", cuda=True)
287         else:
288             raise RuntimeError('compiler is not an HPCCM compiler building block!')
289     else:
290         return None
291
292 def get_mpi(args, compiler, ucx):
293     # If needed, add MPI to the image
294     if args.mpi is not None:
295         if args.mpi == 'openmpi':
296             if hasattr(compiler, 'toolchain'):
297                 if args.oneapi is not None:
298                     raise RuntimeError('oneAPI building OpenMPI is not supported')
299                 use_cuda = (args.cuda is not None)
300                 use_ucx = (ucx is not None)
301                 # Version last updated June 7, 2021
302                 return hpccm.building_blocks.openmpi(toolchain=compiler.toolchain, version="4.1.1", cuda=use_cuda, ucx=use_ucx, infiniband=False)
303             else:
304                 raise RuntimeError('compiler is not an HPCCM compiler building block!')
305
306         elif args.mpi == 'impi':
307             # TODO Intel MPI from the oneAPI repo is not working reliably,
308             # reasons are unclear. When solved, add packagages called:
309             # 'intel-oneapi-mpi', 'intel-oneapi-mpi-devel'
310             # during the compiler stage.
311             # TODO also consider hpccm's intel_mpi package if that doesn't need
312             # a license to run.
313             raise RuntimeError('Intel MPI recipe not implemented yet.')
314         else:
315             raise RuntimeError('Requested unknown MPI implementation.')
316     else:
317         return None
318
319
320 def get_clfft(args):
321     if (args.clfft is not None):
322         return hpccm.building_blocks.generic_cmake(
323             repository='https://github.com/clMathLibraries/clFFT.git',
324             prefix='/usr/local', recursive=True, branch=args.clfft, directory='clFFT/src')
325     else:
326         return None
327
328 def get_hipsycl(args):
329     if args.hipsycl is None:
330         return None
331     if args.llvm is None:
332         raise RuntimeError('Can not build hipSYCL without llvm')
333
334     if args.rocm is None:
335         raise RuntimeError('hipSYCL requires the rocm packages')
336
337     cmake_opts = [f'-DLLVM_DIR=/usr/lib/llvm-{args.llvm}/cmake',
338                   f'-DCLANG_EXECUTABLE_PATH=/usr/bin/clang++-{args.llvm}',
339                   '-DCMAKE_PREFIX_PATH=/opt/rocm/lib/cmake',
340                   '-DWITH_ROCM_BACKEND=ON']
341     if args.cuda is not None:
342         cmake_opts += [f'-DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
343                        '-DWITH_CUDA_BACKEND=ON']
344
345     postinstall = [
346             # https://github.com/illuhad/hipSYCL/issues/361#issuecomment-718943645
347             'for f in /opt/rocm/amdgcn/bitcode/*.bc; do ln -s "$f" "/opt/rocm/lib/$(basename $f .bc).amdgcn.bc"; done'
348             ]
349     if args.cuda is not None:
350         postinstall += [
351             # https://github.com/illuhad/hipSYCL/issues/410#issuecomment-743301929
352             f'sed s/_OPENMP/__OPENMP_NVPTX__/ -i /usr/lib/llvm-{args.llvm}/lib/clang/*/include/__clang_cuda_complex_builtins.h',
353             # Not needed unless we're building with CUDA 11.x, but no harm in doing always
354             f'ln -s /usr/local/cuda/compat/* /usr/local/cuda/lib64/'
355             ]
356
357     return hpccm.building_blocks.generic_cmake(
358         repository='https://github.com/illuhad/hipSYCL.git',
359         directory='/var/tmp/hipSYCL',
360         prefix='/usr/local', recursive=True, commit=args.hipsycl,
361         cmake_opts=['-DCMAKE_BUILD_TYPE=Release', *cmake_opts],
362         postinstall=postinstall)
363
364 def get_intel_compute_runtime(args):
365     # The only reason we need to build Compute Runtime ourselves is because Intel packages have no DG1 support
366     # Otherwise, we could have just installed DEB packages from GitHub or Intel PPA
367     if args.intel_compute_runtime is None:
368         return None
369
370     cmake_opts = ['-DCMAKE_BUILD_TYPE=Release',
371                   '-DSKIP_UNIT_TESTS=TRUE',
372                   '-DSUPPORT_GEN8=0', '-DSUPPORT_GEN9=1', '-DSUPPORT_GEN11=1', '-DSUPPORT_GEN12LP=1', '-DSUPPORT_DG1=1',
373                   '-DBUILD_WITH_L0=1']
374
375     return hpccm.building_blocks.generic_cmake(
376         repository='https://github.com/intel/compute-runtime.git',
377         directory='compute-runtime',
378         prefix='/usr/local', recursive=True, branch=args.intel_compute_runtime,
379         cmake_opts=cmake_opts,
380         postinstall=['ldconfig'])
381
382 def add_tsan_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
383     """Isolate the expensive TSAN preparation stage.
384
385     This is a very expensive stage, but has few and disjoint dependencies, and
386     its output is easily compartmentalized (/usr/local) so we can isolate this
387     build stage to maximize build cache hits and reduce rebuild time, bookkeeping,
388     and final image size.
389     """
390     if not isinstance(output_stages, collections.abc.MutableMapping):
391         raise RuntimeError('Need output_stages container.')
392     tsan_stage = hpccm.Stage()
393     tsan_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='tsan')
394
395     tsan_stage += hpccm.building_blocks.packages(ospackages=['git', 'ca-certificates', 'build-essential', 'cmake'])
396     # CMake will get duplicated later, but this is an expensive image, and it isn't worth optimizing
397     # out that duplication...
398     tsan_stage += hpccm.building_blocks.python(python3=True, python2=False, devel=False)
399
400     compiler_branch = 'release/' + str(input_args.llvm) + '.x'
401     tsan_stage += hpccm.building_blocks.generic_cmake(
402         repository='https://github.com/llvm/llvm-project.git',
403         directory='/var/tmp/llvm-project/llvm/',
404         prefix='/usr/local', recursive=True, branch=compiler_branch,
405         cmake_opts=['-D CMAKE_BUILD_TYPE=Release', '-D LLVM_ENABLE_PROJECTS="clang;openmp;clang-tools-extra;compiler-rt;lld"',
406                     '-D LIBOMP_TSAN_SUPPORT=on'],
407         postinstall=['ln -s /usr/local/bin/clang++ /usr/local/bin/clang++-' + str(input_args.llvm),
408                      'ln -s /usr/local/bin/clang-format /usr/local/bin/clang-format-' + str(input_args.llvm),
409                      'ln -s /usr/local/bin/clang-tidy /usr/local/bin/clang-tidy-' + str(input_args.llvm),
410                      'ln -s /usr/local/share/clang/run-clang-tidy.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py',
411                      'ln -s /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm),
412                      'ln -s /usr/local/libexec/c++-analyzer /usr/local/bin/c++-analyzer-' + str(input_args.llvm)])
413     output_stages['compiler_build'] = tsan_stage
414
415 def oneapi_runtime(_from='0'):
416     oneapi_runtime_stage = hpccm.Stage()
417     oneapi_runtime_stage += hpccm.primitives.copy(_from='oneapi-build',
418                                                   files={"/opt/intel": "/opt/intel",
419                                                          "/etc/bash.bashrc": "/etc/bash.bashrc"})
420     return oneapi_runtime_stage
421
422 def add_oneapi_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
423     """Isolate the oneAPI preparation stage.
424
425     This stage is isolated so that its installed components are minimized in the
426     final image (chiefly /opt/intel) and its environment setup script can be
427     sourced. This also helps with rebuild time and final image size.
428     """
429     if not isinstance(output_stages, collections.abc.MutableMapping):
430         raise RuntimeError('Need output_stages container.')
431     oneapi_stage = hpccm.Stage()
432     oneapi_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='oneapi-build')
433
434     version = str(input_args.oneapi)
435
436     # Add required components for the next stage (both for hpccm and Intel's setvars.sh script)
437     oneapi_stage += hpccm.building_blocks.packages(ospackages=['wget', 'gnupg2', 'ca-certificates', 'lsb-release'])
438     oneapi_stage += hpccm.building_blocks.packages(
439         apt_keys=['https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB'],
440         apt_repositories=['deb https://apt.repos.intel.com/oneapi all main'],
441         # Add minimal packages (not the whole HPC toolkit!)
442         ospackages=[f'intel-oneapi-dpcpp-cpp-{version}',
443             f'intel-oneapi-openmp-{version}',
444             f'intel-oneapi-mkl-{version}',
445             f'intel-oneapi-mkl-devel-{version}']
446     )
447     # Ensure that all bash shells on the final container will have access to oneAPI
448     oneapi_stage += hpccm.primitives.shell(
449             commands=['echo "source /opt/intel/oneapi/setvars.sh" >> /etc/bash.bashrc',
450                       'unlink /opt/intel/oneapi/compiler/latest',
451                      f'ln -sf /opt/intel/oneapi/compiler/{version} /opt/intel/oneapi/compiler/latest']
452             )
453     setattr(oneapi_stage, 'runtime', oneapi_runtime)
454
455     output_stages['compiler_build'] = oneapi_stage
456
457 def prepare_venv(version: StrictVersion) -> typing.Sequence[str]:
458     """Get shell commands to set up the venv for the requested Python version."""
459     major = version.version[0]
460     minor = version.version[1]  # type: int
461
462     pyenv = '$HOME/.pyenv/bin/pyenv'
463
464     py_ver = f'{major}.{minor}'
465     venv_path = f'$HOME/venv/py{py_ver}'
466     commands = [f'$({pyenv} prefix `{pyenv} whence python{py_ver}`)/bin/python -m venv {venv_path}']
467
468     commands.append(f'{venv_path}/bin/python -m pip install --upgrade pip setuptools')
469     # Install dependencies for building and testing gmxapi Python package.
470     # WARNING: Please keep this list synchronized with python_packaging/requirements-test.txt
471     # TODO: Get requirements.txt from an input argument.
472     commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
473             'cmake>=3.16.3' \
474             'flake8>=3.7.7' \
475             'gcovr>=4.2' \
476             'mpi4py>=3.0.3' \
477             'networkx>=2.0' \
478             'numpy>=1' \
479             'pip>=10.1' \
480             'Pygments>=2.2.0' \
481             'pytest>=3.9' \
482             'setuptools>=42' \
483             'scikit-build>=0.10' \
484             'Sphinx>=1.6.3' \
485             'sphinxcontrib-plantuml>=0.14'""")
486
487     # TODO: Remove 'importlib_resources' dependency when Python >=3.7 is required.
488     if minor == 6:
489         commands.append(f"""{venv_path}/bin/python -m pip install --upgrade \
490                 'importlib_resources'""")
491
492     return commands
493
494
495 def add_python_stages(building_blocks: typing.Mapping[str, bb_base],
496                       input_args,
497                       output_stages: typing.MutableMapping[str, hpccm.Stage]):
498     """Add the stage(s) necessary for the requested venvs.
499
500     One intermediate build stage is created for each venv (see --venv option).
501
502     Each stage partially populates Python installations and venvs in the home
503     directory. The home directory is collected by the 'pyenv' stage for use by
504     the main build stage.
505     """
506     if len(input_args.venvs) < 1:
507         raise RuntimeError('No venvs to build...')
508     if output_stages is None or not isinstance(output_stages, collections.abc.Mapping):
509         raise RuntimeError('Need a container for output stages.')
510
511     # Main Python stage that collects the environments from individual stages.
512     # We collect the stages individually, rather than chaining them, because the
513     # copy is a bit slow and wastes local Docker image space for each filesystem
514     # layer.
515     pyenv_stage = hpccm.Stage()
516     pyenv_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='pyenv')
517     pyenv_stage += building_blocks['compiler']
518     if building_blocks['gdrcopy'] is not None:
519         pyenv_stage += building_blocks['gdrcopy']
520     if building_blocks['ucx'] is not None:
521         pyenv_stage += building_blocks['ucx']
522     pyenv_stage += building_blocks['mpi']
523     pyenv_stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
524
525     for version in [StrictVersion(py_ver) for py_ver in sorted(input_args.venvs)]:
526         stage_name = 'py' + str(version)
527         stage = hpccm.Stage()
528         stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as=stage_name)
529         stage += building_blocks['compiler']
530         if building_blocks['gdrcopy'] is not None:
531             stage += building_blocks['gdrcopy']
532         if building_blocks['ucx'] is not None:
533             stage += building_blocks['ucx']
534         stage += building_blocks['mpi']
535         stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
536
537         # TODO: Use a non-root user for testing and Python virtual environments.
538         stage += hpccm.primitives.shell(commands=[
539             'curl https://pyenv.run | bash',
540             """echo 'export PYENV_ROOT="$HOME/.pyenv"' >> $HOME/.bashrc""",
541             """echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> $HOME/.bashrc""",
542             """echo 'eval "$(pyenv init -)"' >> $HOME/.bashrc""",
543             """echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/.bashrc"""])
544         pyenv = '$HOME/.pyenv/bin/pyenv'
545         commands = [f'PYTHON_CONFIGURE_OPTS="--enable-shared" {pyenv} install -s {version}']
546         stage += hpccm.primitives.shell(commands=commands)
547
548         commands = prepare_venv(version)
549         stage += hpccm.primitives.shell(commands=commands)
550
551         # TODO: Update user home directory.
552         pyenv_stage += hpccm.primitives.copy(_from=stage_name, _mkdir=True, src=['/root/'],
553                                              dest='/root')
554
555         # Add the intermediate build stage to the sequence
556         output_stages[stage_name] = stage
557
558     # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
559     # # `version` should be a system installation or pyenv environment (or pyenv-virtualenv)
560     # # with the dependencies for all of the Python aspects of CMake-driven builds.
561     # commands = '{pyenv} global {version}'.format(
562     #             pyenv=pyenv,
563     #             version=...)
564     # pyenv_stage += hpccm.primitives.shell(commands=commands)
565
566     # Add the aggregating build stage to the sequence. This allows the main stage to copy
567     # the files in a single stage, potentially reducing the overall output image size.
568     output_stages['pyenv'] = pyenv_stage
569
570
571 def add_documentation_dependencies(input_args,
572                                    output_stages: typing.MutableMapping[str, hpccm.Stage]):
573     """Add appropriate layers according to doxygen input arguments."""
574     if input_args.doxygen is None:
575         return
576     # Always clone the same version of linkchecker (latest release at June 1, 2021)
577     output_stages['main'] += hpccm.building_blocks.pip(pip='pip3', packages=['git+https://github.com/linkchecker/linkchecker.git@v10.0.1'])
578     output_stages['main'] += hpccm.primitives.shell(
579         commands=['sed -i \'/\"XPS\"/d;/\"PDF\"/d;/\"PS\"/d;/\"EPS\"/d;/disable ghostscript format types/d\' /etc/ImageMagick-6/policy.xml'])
580     if input_args.doxygen == '1.8.5':
581         doxygen_commit = 'ed4ed873ab0e7f15116e2052119a6729d4589f7a'
582         output_stages['main'] += hpccm.building_blocks.generic_autotools(
583             repository='https://github.com/westes/flex.git',
584             commit='f7788a9a0ecccdc953ed12043ccb59ca25714018',
585             prefix='/tmp/install-of-flex',
586             configure_opts=['--disable-shared'],
587             preconfigure=['./autogen.sh'])
588         output_stages['main'] += hpccm.building_blocks.generic_autotools(
589             repository='https://github.com/doxygen/doxygen.git',
590             commit=doxygen_commit,
591             prefix='',
592             configure_opts=[
593                 '--flex /tmp/install-of-flex/bin/flex',
594                 '--static'])
595     else:
596         version = input_args.doxygen
597         archive_name = f'doxygen-{version}.linux.bin.tar.gz'
598         archive_url = f'https://sourceforge.net/projects/doxygen/files/rel-{version}/{archive_name}'
599         binary_path = f'doxygen-{version}/bin/doxygen'
600         commands = [
601             'mkdir doxygen && cd doxygen',
602             f'wget {archive_url}',
603             f'tar xf {archive_name} {binary_path}',
604             f'cp {binary_path} /usr/local/bin/',
605             'cd .. && rm -rf doxygen'
606         ]
607         output_stages['main'] += hpccm.primitives.shell(commands=commands)
608
609
610 def build_stages(args) -> typing.Iterable[hpccm.Stage]:
611     """Define and sequence the stages for the recipe corresponding to *args*."""
612
613     # A Dockerfile or Singularity recipe can have multiple build stages.
614     # The main build stage can copy files from previous stages, though only
615     # the last stage is included in the tagged output image. This means that
616     # large or expensive sets of build instructions can be isolated in
617     # local/temporary images, but all of the stages need to be output by this
618     # script, and need to occur in the correct order, so we create a sequence
619     # object early in this function.
620     stages = collections.OrderedDict()
621
622     # If we need TSAN or oneAPI support the early build is more complex,
623     # so that our compiler images don't have all the cruft needed to get those things
624     # installed.
625     if args.llvm is not None and args.tsan is not None:
626         add_tsan_compiler_build_stage(input_args=args, output_stages=stages)
627     if args.oneapi is not None:
628         add_oneapi_compiler_build_stage(input_args=args, output_stages=stages)
629
630     # Building blocks are chunks of container-builder instructions that can be
631     # copied to any build stage with the addition operator.
632     building_blocks = collections.OrderedDict()
633     building_blocks['base_packages'] = hpccm.building_blocks.packages(
634         ospackages=_common_packages)
635
636     # These are the most expensive and most reusable layers, so we put them first.
637     building_blocks['compiler'] = get_compiler(args, compiler_build_stage=stages.get('compiler_build'))
638     building_blocks['gdrcopy'] = get_gdrcopy(args, building_blocks['compiler'])
639     building_blocks['ucx'] = get_ucx(args, building_blocks['compiler'], building_blocks['gdrcopy'])
640     building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'], building_blocks['ucx'])
641     for i, cmake in enumerate(args.cmake):
642         building_blocks['cmake' + str(i)] = hpccm.building_blocks.cmake(
643             eula=True,
644             prefix=f'/usr/local/cmake-{cmake}',
645             version=cmake)
646
647     # Install additional packages early in the build to optimize Docker build layer cache.
648     os_packages = list(get_llvm_packages(args)) + get_opencl_packages(args) + get_rocm_packages(args)
649     if args.doxygen is not None:
650         os_packages += _docs_extra_packages
651     if args.oneapi is not None:
652         os_packages += ['lsb-release']
653     if args.hipsycl is not None:
654         os_packages += ['libboost-fiber-dev']
655     if args.intel_compute_runtime is not None:
656         os_packages += _intel_compute_runtime_extra_packages
657     building_blocks['extra_packages'] = []
658     if args.rocm is not None:
659         building_blocks['extra_packages'] += hpccm.building_blocks.packages(
660             apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
661             apt_repositories=[f'deb [arch=amd64] http://repo.radeon.com/rocm/apt/{args.rocm}/ xenial main']
662         )
663     building_blocks['extra_packages'] += hpccm.building_blocks.packages(
664         ospackages=os_packages,
665         apt_ppas=['ppa:intel-opencl/intel-opencl'])
666
667     if args.cuda is not None and args.llvm is not None:
668         # Hack to tell clang what version of CUDA we're using
669         # based on https://github.com/llvm/llvm-project/blob/1fdec59bffc11ae37eb51a1b9869f0696bfd5312/clang/lib/Driver/ToolChains/Cuda.cpp#L43
670         cuda_version_split = args.cuda.split('.')
671         # LLVM requires having the version in x.y.z format, while args.cuda be be either x.y or x.y.z
672         cuda_version_str = '{}.{}.{}'.format(
673             cuda_version_split[0],
674             cuda_version_split[1],
675             cuda_version_split[2] if len(cuda_version_split) > 2 else 0
676         )
677         building_blocks['cuda-clang-workaround'] = hpccm.primitives.shell(commands=[
678             f'echo "CUDA Version {cuda_version_str}" > /usr/local/cuda/version.txt'
679             ])
680
681     building_blocks['clfft'] = get_clfft(args)
682
683     building_blocks['hipSYCL'] = get_hipsycl(args)
684
685     building_blocks['intel-compute-runtime'] = get_intel_compute_runtime(args)
686
687     # Add Python environments to MPI images, only, so we don't have to worry
688     # about whether to install mpi4py.
689     if args.mpi is not None and len(args.venvs) > 0:
690         add_python_stages(building_blocks=building_blocks, input_args=args, output_stages=stages)
691
692     # Create the stage from which the targeted image will be tagged.
693     stages['main'] = hpccm.Stage()
694
695     stages['main'] += hpccm.primitives.baseimage(image=base_image_tag(args))
696     for bb in building_blocks.values():
697         if bb is not None:
698             stages['main'] += bb
699
700     # We always add Python3 and Pip
701     stages['main'] += hpccm.building_blocks.python(python3=True, python2=False)
702
703     # Add documentation requirements (doxygen and sphinx + misc).
704     if args.doxygen is not None:
705         add_documentation_dependencies(args, stages)
706
707     if 'pyenv' in stages and stages['pyenv'] is not None:
708         stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/.pyenv/'],
709                                                 dest='/root/.pyenv')
710         stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/venv/'],
711                                                 dest='/root/venv')
712         # TODO: Update user home directory.
713         # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
714         # stages['main'] += hpccm.primitives.copy(_from='pyenv', src=['/root/.bashrc'],
715         #                                         dest='/root/')
716
717     # Make sure that `python` resolves to something.
718     stages['main'] += hpccm.primitives.shell(commands=['test -x /usr/bin/python || '
719                                                        'update-alternatives --install /usr/bin/python python /usr/bin/python3 1 && '
720                                                        '/usr/bin/python --version'])
721
722     # Note that the list of stages should be sorted in dependency order.
723     for build_stage in stages.values():
724         if build_stage is not None:
725             yield build_stage
726
727
728 if __name__ == '__main__':
729     args = parser.parse_args()
730
731     # Set container specification output format
732     hpccm.config.set_container_format(args.format)
733
734     container_recipe = build_stages(args)
735
736     # Output container specification
737     for stage in container_recipe:
738         print(stage)