Support clang 13 in CI
[alexxy/gromacs.git] / admin / containers / scripted_gmx_docker_builds.py
index ecdbb27b0399aa2acf6c583976af22da395f8343..0f9bc18ad2ea24c1c5c4746f00e4c52f5f979574 100755 (executable)
@@ -124,19 +124,10 @@ _rocm_extra_packages = [
 ]
 
 # Extra packages needed to build Intel Compute Runtime
-_intel_compute_runtime_extra_packages = ['libigc',
-                                         'libigc-dev',
-                                         'libigdgmm11',
-                                         'libigdgmm-dev',
-                                         'libxml2',
-                                         'libxml2-dev',
-                                         'libze-loader',
-                                         'libze-loader-dev',
-                                         'ocl-icd-libopencl1',
-                                         'ocl-icd-opencl-dev',
-                                         'opencl-headers',
-                                         'pkg-config',
-                                         ]
+_intel_compute_runtime_extra_packages = ['intel-opencl-icd',
+                                         'intel-level-zero-gpu',
+                                         'level-zero',
+                                         'libmfx1']
 
 # Extra packages needed to build Python installations from source.
 _python_extra_packages = ['build-essential',
@@ -281,10 +272,10 @@ def get_compiler(args, compiler_build_stage: hpccm.Stage = None) -> bb_base:
                 raise RuntimeError('No TSAN compiler build stage!')
         # Build the default compiler if we don't need special support
         else:
-            # Currently the focal apt repositories do not contain
-            # llvm higher than 11, so we work around that. This will
-            # need further work when we start supporting ubuntu 22.04
-            compiler = hpccm.building_blocks.llvm(version=args.llvm, upstream=True if int(args.llvm) > 11 else False)
+            # Always use the "upstream" llvm repositories because the
+            # main ubuntu repositories stop adding support for new
+            # llvm versions after a few llvm releases.
+            compiler = hpccm.building_blocks.llvm(version=args.llvm, upstream=True)
 
     elif args.oneapi is not None:
         if compiler_build_stage is not None:
@@ -419,25 +410,6 @@ def get_hipsycl(args):
         postinstall=postinstall)
 
 
-def get_intel_compute_runtime(args):
-    # The only reason we need to build Compute Runtime ourselves is because Intel packages have no DG1 support
-    # Otherwise, we could have just installed DEB packages from GitHub or Intel PPA
-    if args.intel_compute_runtime is None:
-        return None
-
-    cmake_opts = ['-DCMAKE_BUILD_TYPE=Release',
-                  '-DSKIP_UNIT_TESTS=TRUE',
-                  '-DSUPPORT_GEN8=0', '-DSUPPORT_GEN9=1', '-DSUPPORT_GEN11=1', '-DSUPPORT_GEN12LP=1', '-DSUPPORT_DG1=1',
-                  '-DBUILD_WITH_L0=1']
-
-    return hpccm.building_blocks.generic_cmake(
-        repository='https://github.com/intel/compute-runtime.git',
-        directory='compute-runtime',
-        prefix='/usr/local', recursive=True, branch=args.intel_compute_runtime,
-        cmake_opts=cmake_opts,
-        postinstall=['ldconfig'])
-
-
 def add_tsan_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
     """Isolate the expensive TSAN preparation stage.
 
@@ -562,8 +534,8 @@ def prepare_venv(version: StrictVersion) -> typing.Sequence[str]:
     return commands
 
 
-def add_python_stages(building_blocks: typing.Mapping[str, bb_base],
-                      input_args,
+def add_python_stages(input_args: argparse.Namespace, *,
+                      base: str,
                       output_stages: typing.MutableMapping[str, hpccm.Stage]):
     """Add the stage(s) necessary for the requested venvs.
 
@@ -583,29 +555,17 @@ def add_python_stages(building_blocks: typing.Mapping[str, bb_base],
     # copy is a bit slow and wastes local Docker image space for each filesystem
     # layer.
     pyenv_stage = hpccm.Stage()
-    pyenv_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args),
+    pyenv_stage += hpccm.primitives.baseimage(image=base,
                                               _distro=hpccm_distro_name(input_args),
                                               _as='pyenv')
-    pyenv_stage += building_blocks['compiler']
-    if building_blocks['gdrcopy'] is not None:
-        pyenv_stage += building_blocks['gdrcopy']
-    if building_blocks['ucx'] is not None:
-        pyenv_stage += building_blocks['ucx']
-    pyenv_stage += building_blocks['mpi']
     pyenv_stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
 
     for version in [StrictVersion(py_ver) for py_ver in sorted(input_args.venvs)]:
         stage_name = 'py' + str(version)
         stage = hpccm.Stage()
-        stage += hpccm.primitives.baseimage(image=base_image_tag(input_args),
+        stage += hpccm.primitives.baseimage(image=base,
                                             _distro=hpccm_distro_name(input_args),
                                             _as=stage_name)
-        stage += building_blocks['compiler']
-        if building_blocks['gdrcopy'] is not None:
-            stage += building_blocks['gdrcopy']
-        if building_blocks['ucx'] is not None:
-            stage += building_blocks['ucx']
-        stage += building_blocks['mpi']
         stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)
 
         # TODO: Use a non-root user for testing and Python virtual environments.
@@ -683,6 +643,33 @@ def add_documentation_dependencies(input_args,
         output_stages['main'] += hpccm.primitives.shell(commands=commands)
 
 
+def add_base_stage(name: str,
+                   input_args,
+                   output_stages: typing.MutableMapping[str, hpccm.Stage]):
+    """Establish dependencies that are shared by multiple parallel stages."""
+    # Building blocks are chunks of container-builder instructions that can be
+    # copied to any build stage with the addition operator.
+    building_blocks = collections.OrderedDict()
+    building_blocks['base_packages'] = hpccm.building_blocks.packages(
+        ospackages=_common_packages)
+
+    # These are the most expensive and most reusable layers, so we put them first.
+    building_blocks['compiler'] = get_compiler(input_args, compiler_build_stage=output_stages.get('compiler_build'))
+    building_blocks['gdrcopy'] = get_gdrcopy(input_args, building_blocks['compiler'])
+    building_blocks['ucx'] = get_ucx(input_args, building_blocks['compiler'], building_blocks['gdrcopy'])
+    building_blocks['mpi'] = get_mpi(input_args, building_blocks['compiler'], building_blocks['ucx'])
+
+    # Create the stage from which the targeted image will be tagged.
+    output_stages[name] = hpccm.Stage()
+
+    output_stages[name] += hpccm.primitives.baseimage(image=base_image_tag(input_args),
+                                                      _distro=hpccm_distro_name(input_args),
+                                                      _as=name)
+    for bb in building_blocks.values():
+        if bb is not None:
+            output_stages[name] += bb
+
+
 def build_stages(args) -> typing.Iterable[hpccm.Stage]:
     """Define and sequence the stages for the recipe corresponding to *args*."""
 
@@ -703,17 +690,17 @@ def build_stages(args) -> typing.Iterable[hpccm.Stage]:
     if args.oneapi is not None:
         add_oneapi_compiler_build_stage(input_args=args, output_stages=stages)
 
+    add_base_stage(name='build_base', input_args=args, output_stages=stages)
+
+    # Add Python environments to MPI images, only, so we don't have to worry
+    # about whether to install mpi4py.
+    if args.mpi is not None and len(args.venvs) > 0:
+        add_python_stages(base='build_base', input_args=args, output_stages=stages)
+
     # Building blocks are chunks of container-builder instructions that can be
     # copied to any build stage with the addition operator.
     building_blocks = collections.OrderedDict()
-    building_blocks['base_packages'] = hpccm.building_blocks.packages(
-        ospackages=_common_packages)
 
-    # These are the most expensive and most reusable layers, so we put them first.
-    building_blocks['compiler'] = get_compiler(args, compiler_build_stage=stages.get('compiler_build'))
-    building_blocks['gdrcopy'] = get_gdrcopy(args, building_blocks['compiler'])
-    building_blocks['ucx'] = get_ucx(args, building_blocks['compiler'], building_blocks['gdrcopy'])
-    building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'], building_blocks['ucx'])
     for i, cmake in enumerate(args.cmake):
         building_blocks['cmake' + str(i)] = hpccm.building_blocks.cmake(
             eula=True,
@@ -728,9 +715,13 @@ def build_stages(args) -> typing.Iterable[hpccm.Stage]:
         os_packages += ['lsb-release']
     if args.hipsycl is not None:
         os_packages += ['libboost-fiber-dev']
-    if args.intel_compute_runtime is not None:
-        os_packages += _intel_compute_runtime_extra_packages
     building_blocks['extra_packages'] = []
+    if args.intel_compute_runtime:
+        building_blocks['extra_packages'] += hpccm.building_blocks.packages(
+            apt_keys=['https://repositories.intel.com/graphics/intel-graphics.key'],
+            apt_repositories=[f'deb [arch=amd64] https://repositories.intel.com/graphics/ubuntu focal main']
+        )
+        os_packages += _intel_compute_runtime_extra_packages
     if args.rocm is not None:
         building_blocks['extra_packages'] += hpccm.building_blocks.packages(
             apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
@@ -760,17 +751,17 @@ def build_stages(args) -> typing.Iterable[hpccm.Stage]:
 
     building_blocks['hipSYCL'] = get_hipsycl(args)
 
-    building_blocks['intel-compute-runtime'] = get_intel_compute_runtime(args)
-
     # Add Python environments to MPI images, only, so we don't have to worry
     # about whether to install mpi4py.
     if args.mpi is not None and len(args.venvs) > 0:
-        add_python_stages(building_blocks=building_blocks, input_args=args, output_stages=stages)
+        add_python_stages(base='build_base', input_args=args, output_stages=stages)
 
     # Create the stage from which the targeted image will be tagged.
     stages['main'] = hpccm.Stage()
 
-    stages['main'] += hpccm.primitives.baseimage(image=base_image_tag(args))
+    stages['main'] += hpccm.primitives.baseimage(image='build_base',
+                                                 _distro=hpccm_distro_name(args),
+                                                 _as='main')
     for bb in building_blocks.values():
         if bb is not None:
             stages['main'] += bb
@@ -808,6 +799,12 @@ if __name__ == '__main__':
 
     # Set container specification output format
     hpccm.config.set_container_format(args.format)
+    # Normally in hpccm the first call to baseimage sets the context
+    # for other packages, e.g. which distro version to use. We want to
+    # set that early on, so that hpccm can do the right thing to use
+    # the right llvm apt repo when we want to use versions of llvm
+    # that were never supported by the main apt repo.
+    hpccm.config.set_linux_distro(hpccm_distro_name(args))
 
     container_recipe = build_stages(args)