#
# This file is part of the GROMACS molecular simulation package.
#
- # Copyright (c) 2015,2016, by the GROMACS development team, led by
+ # Copyright (c) 2015,2016,2017, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
import os.path
+ # These are accessible later in the script, just like other
+ # declared options, via e.g. context.opts.release.
extra_options = {
'mdrun-only': Option.simple,
'static': Option.simple,
'thread-mpi': Option.bool,
'gpu': Option.bool,
'opencl': Option.bool,
- 'openmp': Option.bool
+ 'openmp': Option.bool,
+ 'nranks': Option.string,
+ 'npme': Option.string,
+ 'gpu_id': Option.string
}
+
extra_projects = [Project.REGRESSIONTESTS]
def do_build(context):
if context.opts.x11:
cmake_opts['GMX_X11'] = 'ON'
+ # At least hwloc on Jenkins produces a massive amount of reports about
+ # memory leaks, which cannot be reasonably suppressed because ASAN cannot
+ # produce a reasonable stack trace for them.
+ if context.opts.asan:
+ cmake_opts['GMX_HWLOC'] = 'OFF'
+
regressiontests_path = context.workspace.get_project_dir(Project.REGRESSIONTESTS)
if context.job_type == JobType.RELEASE:
else:
context.build_target(target='tests', keep_going=True)
- context.run_ctest(args=['--output-on-failure'])
+ context.run_ctest(args=['--output-on-failure'], memcheck=context.opts.asan)
context.build_target(target='install')
# TODO: Consider what could be tested about the installed binaries.
use_tmpi = not context.opts.mpi and context.opts.thread_mpi is not False
cmd = 'perl gmxtest.pl -mpirun mpirun -xml -nosuffix all'
- if context.opts.asan:
- cmd+=' -parse asan_symbolize.py'
# setting this stuff below is just a temporary solution,
# it should all be passed as a proper the runconf from outside
# not explicitly set
cmd += ' -ntomp 2'
- if context.opts.gpu:
- if context.opts.mpi or use_tmpi:
- gpu_id = '01' # for (T)MPI use the two GT 640-s
- else:
- gpu_id = '0' # use GPU #0 by default
- cmd += ' -gpu_id ' + gpu_id
+ if context.opts.gpu_id:
+ cmd += ' -gpu_id ' + context.opts.gpu_id
+
+ if context.opts.nranks:
+ nranks = context.opts.nranks
+ else:
+ nranks = '2'
+
+ if context.opts.npme:
+ cmd += ' -npme ' + context.opts.npme
- # TODO: Add options to influence this (should be now local to the build
- # script).
if context.opts.mpi:
- cmd += ' -np 2'
+ cmd += ' -np ' + nranks
elif use_tmpi:
- cmd += ' -nt 2'
+ cmd += ' -nt ' + nranks
if context.opts.double:
cmd += ' -double'
+ if context.opts.asan:
+ context.env.set_env_var('ASAN_OPTIONS', 'detect_leaks=0')
context.run_cmd(cmd, shell=True, failure_message='Regression tests failed to execute')
- gcc-4.8 gpu cuda-6.5 mpi openmp x11
- gcc-4.8 gpu cuda-8.0 openmp release
+ # Comment line(s) preceding each configuration document the main
+ # intent behind that configuration, so that we can correctly judge
+ # whether to preserve that during maintenance decisions.
++#
++# The phrase "at time of release" refers to either the initial
++# GROMACS release from that branch, or the next expected release
++# from that branch. (We will tend not to retroactively support
++# newer infrastructure in CI, unless it's either easy or of
++# high impact.)
+
+ # Test older gcc
+ # Test oldest supported CUDA
-# Test oldest supported cmake
+ # Test oldest supported Ubuntu
+ # Test X11 build
+ # Test MPI with CUDA
+ # Test MPMD PME with library MPI
-gcc-4.6 gpu cuda-5.0 mpi npme=1 nranks=2 openmp x11 cmake-2.8.8
++gcc-4.8 gpu cuda-6.5 mpi npme=1 nranks=2 openmp x11
+
+ # Test newest gcc supported by newest CUDA at time of release
+ # Test thread-MPI with CUDA
-gcc-4.8 gpu cuda-7.5 openmp release
++gcc-5.4 gpu cuda-8.0 openmp release
+
-# Test with ThreadSanitizer
++# Test with ThreadSanitizer (without OpenMP, because of Redmine #1850)
+ # Test AVX2_256 SIMD
+ # Test fftpack fallback
gcc-4.9 tsan fftpack simd=avx2_256
+
+ # Test newest gcc at time of release
+ # Test on MacOS
gcc-6.1 double
-# Test with AddressSanitizer
+
+ # Test older clang
+ # Test double precision
-clang-3.4 double no-openmp fftpack asan
+ # Test without OpenMP
- clang-3.8 no-openmp asan cmake-3.4.3
- # TODO move mdrun-only config to post-submit matrix
- clang-3.7 double mpi no-openmp fftpack mdrun-only cmake-3.4.3
+clang-3.4 double no-openmp fftpack
++
++# Test newer clang
++# Test with AddressSanitizer
++clang-3.8 no-openmp asan
+
+ # Test oldest supported MSVC on Windows
+ # Test newest supported MSVC on Windows
msvc-2015 openmp release
+
+ # Test oldest supported icc on Windows
+ # Test newest supported icc on Windows
icc-16.0 msvc-2015 fftpack
- icc-16.0 no-thread-mpi openmp mkl simd=avx_256
- gcc-5.1 mpi openmp cmake-3.4.3
- gcc-4.8 openmp opencl cuda-8.0 mpi release
- gcc-5.2 openmp opencl simd=avx_128_fma amdappsdk-3.0
+
+ # Test newest cmake at time of release
+ # Test MKL
+ # Test without any MPI
+ # Test on CentOS
-icc-16.0 no-thread-mpi openmp mkl cmake-3.3.2 simd=avx_256
++icc-16.0 no-thread-mpi openmp mkl cmake-3.6.1 simd=avx_256
+
++# Test oldest supported cmake
+ # Test AVX_128_FMA SIMD
-gcc-5.2 mpi openmp simd=avx_128_fma
++gcc-5.2 mpi openmp simd=avx_128_fma cmake-3.4.3
+
+ # Test NVIDIA OpenCL
+ # Test MPI + OpenCL
+ gcc-4.8 openmp opencl cuda-7.5 mpi release
+
+ # Test AMD OpenCL
+ gcc-5.2 openmp opencl amdappsdk-3.0
+
+ # TODO
-# When merging to master branch, make a configuration use bs_nix1204
-# When merging to master branch, note TODO to support updated compilers and cmake for release-2017
++# Add testing for support for cmake 3.8 for release-2017, e.g. to bs_mic and a CUDA slave (for the new CUDA support)
++# Add testing for support for clang 4.0 for release-2017, and move e.g. ASAN build to it, e.g. to bs_nix-amd
++# Add testing for support for gcc 7 for release-2017, e.g. to bs_mac
++# Add OpenMP support to a clang build
++# Add OpenMP support to ASAN build
+ # Test hwloc support
-# Test newest supported Ubuntu
++# Test newest supported LTS Ubuntu
+ # Migrate ThreadSanitizer test off GPU build slave
++# Explore adding openmp to ThreadSanitizer configuration, perhaps can avoid Redmine #1850 if done differently
++# Update gcc and clang specifiers per https://redmine.gromacs.org/issues/2161
++# Update gpu testing specifiers per https://redmine.gromacs.org/issues/2161
# To help us fund GROMACS development, we humbly ask that you cite
# the research papers on the package. Check out http://www.gromacs.org.
-# include avx test source, used if the AVX flags are set below
-include(gmxTestAVXMaskload)
include(gmxFindFlagsForSource)
# Macro that manages setting the respective C and C++ toolchain
set(GMX_SIMD_X86_${GMX_SIMD} 1)
set(SIMD_STATUS_MESSAGE "Enabling 128-bit AVX SIMD GROMACS SIMD (with fused-multiply add)")
- gmx_test_avx_gcc_maskload_bug(GMX_SIMD_X86_AVX_GCC_MASKLOAD_BUG "${SIMD_C_FLAGS}")
-
elseif(GMX_SIMD STREQUAL "AVX_256")
prepare_x86_toolchain(TOOLCHAIN_C_FLAGS TOOLCHAIN_CXX_FLAGS)
set(GMX_SIMD_X86_${GMX_SIMD} 1)
set(SIMD_STATUS_MESSAGE "Enabling 256-bit AVX SIMD instructions")
- gmx_test_avx_gcc_maskload_bug(GMX_SIMD_X86_AVX_GCC_MASKLOAD_BUG "${SIMD_C_FLAGS}")
-
-elseif(GMX_SIMD STREQUAL "AVX2_256")
+elseif(GMX_SIMD MATCHES "AVX2_")
prepare_x86_toolchain(TOOLCHAIN_C_FLAGS TOOLCHAIN_CXX_FLAGS)
set(SIMD_C_FLAGS "${TOOLCHAIN_C_FLAGS}")
set(SIMD_CXX_FLAGS "${TOOLCHAIN_CXX_FLAGS}")
set(GMX_SIMD_X86_${GMX_SIMD} 1)
- set(SIMD_STATUS_MESSAGE "Enabling 256-bit AVX2 SIMD instructions")
- # No need to test for Maskload bug - it was fixed before gcc added AVX2 support
+ if(GMX_SIMD STREQUAL "AVX2_128")
+ set(SIMD_STATUS_MESSAGE "Enabling 128-bit AVX2 SIMD instructions")
+ else()
+ set(SIMD_STATUS_MESSAGE "Enabling 256-bit AVX2 SIMD instructions")
+ endif()
elseif(GMX_SIMD STREQUAL "MIC")
int main(){__m512 y,x=_mm512_set1_ps(0.5);y=_mm512_fmadd_ps(x,x,x);return (int)_mm512_cmp_ps_mask(x,y,_CMP_LT_OS);}"
TOOLCHAIN_C_FLAGS TOOLCHAIN_CXX_FLAGS
SIMD_${GMX_SIMD}_C_FLAGS SIMD_${GMX_SIMD}_CXX_FLAGS
- "-xMIC-AVX512" "-mavx512f -mfma" "-mavx512f" "/arch:AVX" "-hgnu") # no AVX_512F flags known for MSVC yet
+ "-xCORE-AVX512" "-mavx512f -mfma" "-mavx512f" "/arch:AVX" "-hgnu") # no AVX_512F flags known for MSVC yet
if(NOT SIMD_${GMX_SIMD}_C_FLAGS OR NOT SIMD_${GMX_SIMD}_CXX_FLAGS)
gmx_give_fatal_error_when_simd_support_not_found("AVX 512F" "choose a lower level of SIMD (slower)" "${SUGGEST_BINUTILS_UPDATE}")
set(SIMD_STATUS_MESSAGE "Enabling IBM QPX SIMD instructions")
else()
- gmx_give_fatal_error_when_simd_support_not_found("IBM QPX" "or 'cmake .. -DCMAKE_TOOLCHAIN_FILE=Platform/BlueGeneQ-static-XL-CXX' to set up the tool chain" "${SUGGEST_BINUTILS_UPDATE}")
+ gmx_give_fatal_error_when_simd_support_not_found("IBM QPX" "or 'cmake .. -DCMAKE_TOOLCHAIN_FILE=Platform/BlueGeneQ-static-bgclang-CXX' to set up the tool chain" "${SUGGEST_BINUTILS_UPDATE}")
endif()
elseif(GMX_SIMD STREQUAL "IBM_VMX")