#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2009,2010,2011,2012,2013,2014, The GROMACS development team.
-# Copyright (c) 2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
+# Copyright (c) 2009,2010,2011,2012,2013 by the GROMACS development team.
+# Copyright (c) 2014,2015,2016,2017,2018 by the GROMACS development team.
+# Copyright (c) 2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
# To help us fund GROMACS development, we humbly ask that you cite
# the research papers on the package. Check out http://www.gromacs.org.
-cmake_minimum_required(VERSION 3.9.6)
-if(POLICY CMP0074) #3.12
- cmake_policy(SET CMP0074 NEW)
-endif()
+cmake_minimum_required(VERSION 3.13)
+cmake_policy(SET CMP0074 NEW) # From CMake 3.12
cmake_policy(SET CMP0068 NEW) # From CMake-3.9
# CMake modules/macros are in a subdirectory to keep this file cleaner
list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Platform)
if(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
- # Providing a default value >=10.9 helps to find modern C++ compatibility,
+ # Providing a default value >=10.14 helps to find modern C++ compatibility,
# such as by defaulting to the Clang libc++ instead of libstdc++.
- set(CMAKE_OSX_DEPLOYMENT_TARGET 10.9 CACHE STRING
+ set(CMAKE_OSX_DEPLOYMENT_TARGET 10.14 CACHE STRING
"OS X deployment target affects default SDK version and compiler flags."
FORCE)
# By default, limit the binary architecture to a single 64-bit build.
project(Gromacs)
-set(CMAKE_CXX_STANDARD 14)
+set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
find_package(LibStdCpp)
+# Python is first referenced in gmxVersionInfo, so we perform the search early
+# to find a suitable installation for all components.
+include(gmxPythonDiscovery)
# Set up common version variables, as well as general information about
# the build tree (whether the build is from a source package or from a git
# repository). Also declares a few functions that will be used for generating
include(gmxBuildTypeTSAN)
include(gmxBuildTypeASAN)
include(gmxBuildTypeMSAN)
+include(gmxBuildTypeUBSAN)
include(gmxBuildTypeReleaseWithAssert)
if(NOT CMAKE_BUILD_TYPE)
- set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel Reference RelWithAssert Profile TSAN ASAN MSAN." FORCE)
+ set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel Reference RelWithAssert Profile TSAN ASAN MSAN UBSAN." FORCE)
# Set the possible values of build type for cmake-gui
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release"
- "MinSizeRel" "RelWithDebInfo" "Reference" "RelWithAssert" "Profile" "TSAN" "ASAN" "MSAN")
+ "MinSizeRel" "RelWithDebInfo" "Reference" "RelWithAssert" "Profile" "TSAN" "ASAN" "MSAN" "UBSAN")
endif()
if(CMAKE_CONFIGURATION_TYPES)
# Add appropriate GROMACS-specific build types for the Visual
mark_as_advanced(GMX_COOL_QUOTES)
gmx_add_cache_dependency(GMX_COOL_QUOTES BOOL "NOT GMX_FAHCORE" OFF)
-option(GMX_USE_OPENCL "Enable OpenCL acceleration" OFF)
-
option(GMX_INSTALL_LEGACY_API "Install legacy headers" OFF)
-# The earliest version of the CUDA toolkit that supports c++14 is 9.0
-set(REQUIRED_CUDA_VERSION 9.0)
-set(REQUIRED_CUDA_COMPUTE_CAPABILITY 3.0)
-
-# OpenCL required version: 1.2 or newer
-set(REQUIRED_OPENCL_MIN_VERSION_MAJOR 1)
-set(REQUIRED_OPENCL_MIN_VERSION_MINOR 2)
-set(REQUIRED_OPENCL_MIN_VERSION ${REQUIRED_OPENCL_MIN_VERSION_MAJOR}.${REQUIRED_OPENCL_MIN_VERSION_MINOR})
-
-if(NOT GMX_USE_OPENCL)
- # CUDA detection is done only if GMX_USE_OPENCL is OFF.
- include(gmxManageGPU)
- set(GMX_USE_CUDA ${GMX_GPU})
- if(GMX_GPU)
- set(GMX_GPU_ACCELERATION_FRAMEWORK "GMX_GPU_CUDA")
- else()
- set(GMX_GPU_ACCELERATION_FRAMEWORK "GMX_GPU_NONE")
- endif()
-else()
- #Now the OpenCL path (for both AMD and NVIDIA)
- if(GMX_GPU)
- include(gmxManageOpenCL)
- set(GMX_GPU_ACCELERATION_FRAMEWORK "GMX_GPU_OPENCL")
- else()
- message(FATAL_ERROR "OpenCL requested but GPU option is not enabled (try -DGMX_GPU=on) ")
- endif()
-endif()
+gmx_option_multichoice(
+ GMX_GPU
+ "Framework for GPU acceleration"
+ OFF
+ OFF CUDA OpenCL SYCL)
gmx_option_multichoice(
GMX_SIMD
mark_as_advanced(GMX_BUILD_OWN_FFTW)
mark_as_advanced(GMX_DISABLE_FFTW_MEASURE)
-gmx_option_multichoice(
- GMX_QMMM_PROGRAM
- "QM package for QM/MM"
- None
- none gaussian mopac gamess orca)
-
gmx_dependent_cache_variable(GMX_SIMD_REF_FLOAT_WIDTH "Reference SIMD single precision width" STRING "4" "GMX_SIMD STREQUAL REFERENCE")
gmx_dependent_cache_variable(GMX_SIMD_REF_DOUBLE_WIDTH "Reference SIMD double precision width" STRING "2" "GMX_SIMD STREQUAL REFERENCE")
include(gmxManageSharedLibraries)
+########################################################################
+# Specify install locations
+########################################################################
+# Use GNUInstallDirs to set paths on multiarch systems.
+include(GNUInstallDirs)
+
+set(GMX_INSTALL_DATASUBDIR "gromacs" CACHE STRING "Subdirectory for GROMACS data under CMAKE_INSTALL_DATADIR")
+mark_as_advanced(GMX_INSTALL_DATASUBDIR)
+
+# Internal convenience so we do not have to join two path segments in the code
+set(GMX_INSTALL_GMXDATADIR ${CMAKE_INSTALL_DATADIR}/${GMX_INSTALL_DATASUBDIR})
+
+# If the nesting level wrt. the installation root is changed,
+# gromacs-config.cmake.cmakein needs to be adapted.
+set(GMX_INSTALL_CMAKEDIR ${CMAKE_INSTALL_DATAROOTDIR}/cmake)
+
+# TODO: Make GMXRC adapt if this is changed
+set(GMX_INSTALL_PKGCONFIGDIR ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
+
+list(APPEND INSTALLED_HEADER_INCLUDE_DIRS ${CMAKE_INSTALL_INCLUDEDIR})
+
+# Binary and library suffix options
+include(gmxManageSuffixes)
+
+
########################################################################
# Find external packages #
########################################################################
# set(XML_LIBRARIES ${LIBXML2_LIBRARIES})
#endif()
-gmx_option_trivalue(
- GMX_HWLOC
- "Use hwloc portable hardware locality library"
- "AUTO")
+option(GMX_HWLOC "Use hwloc portable hardware locality library" OFF)
if (GMX_HWLOC)
# Find quietly the second time.
include(gmxManageLmfit)
if(GMX_GPU)
- # now that we have detected the dependencies, do the second configure pass
- gmx_gpu_setup()
- if (GMX_CLANG_CUDA)
- list(APPEND GMX_EXTRA_LIBRARIES ${GMX_CUDA_CLANG_LINK_LIBS})
- link_directories("${GMX_CUDA_CLANG_LINK_DIRS}")
+
+ string(TOUPPER "${GMX_GPU}" _gmx_gpu_uppercase)
+ if(${_gmx_gpu_uppercase} STREQUAL "CUDA")
+ include(gmxManageCuda)
+ elseif(${_gmx_gpu_uppercase} STREQUAL "OPENCL")
+ include(gmxManageOpenCL)
+ elseif(${_gmx_gpu_uppercase} STREQUAL "SYCL")
+ include(gmxManageSYCL)
+ endif()
+ if(NOT GMX_OPENMP)
+ message(WARNING "To use GPU acceleration efficiently, mdrun requires OpenMP multi-threading, which is currently not enabled.")
endif()
+
endif()
if(CYGWIN)
########################################################################
include_directories(BEFORE ${CMAKE_SOURCE_DIR}/src)
+include_directories(SYSTEM ${CMAKE_SOURCE_DIR}/src/external)
# Required for config.h, maybe should only be set in src/CMakeLists.txt
include_directories(BEFORE ${CMAKE_BINARY_DIR}/src)
include(gmxSetBuildInformation)
gmx_set_build_information()
-gmx_option_multichoice(
- GMX_USE_RDTSCP
- "Use low-latency RDTSCP instruction for CPU-based timers for mdrun execution; might need to be off when compiling for heterogeneous environments)"
- "AUTO"
- OFF ON AUTO DETECT)
+# Anything but truly ancient x86 hardware should support rdtscp, so we enable it by default.
+# The inline assembly calling it is only ever compiled on x86, so defaulting to ON is OK.
+option(GMX_USE_RDTSCP "Use low-latency RDTSCP instruction for x86 CPU-based timers for mdrun execution; might need to be off when compiling for heterogeneous environments" ON)
mark_as_advanced(GMX_USE_RDTSCP)
-macro(gmx_check_rdtscp)
- if (CPU_DETECTION_FEATURES MATCHES "rdtscp")
- set(HAVE_RDTSCP 1)
- set(RDTSCP_DETECTION_MESSAGE " - detected on the build host")
- else()
- set(RDTSCP_DETECTION_MESSAGE " - not detected on the build host")
- endif()
-endmacro()
-
-set(HAVE_RDTSCP 0)
-if (GMX_USE_RDTSCP STREQUAL "ON")
- set(HAVE_RDTSCP 1)
-elseif(GMX_USE_RDTSCP STREQUAL "DETECT")
- gmx_check_rdtscp()
-elseif(GMX_USE_RDTSCP STREQUAL "AUTO")
- # If the user specified automated SIMD selection, that the choice
- # is made based on detection on the build host. If so, then RDTSCP
- # should be chosen the same way.
- #
- # If the user specified an AVX SIMD level (e.g. when
- # cross-compiling GROMACS) then they will get our best guess, ie
- # that in practice AVX mostly correlates with rdtscp (and anyway
- # is only relevant in rather old x86 hardware).
- if (GMX_SIMD STREQUAL "AUTO")
- gmx_check_rdtscp()
- elseif (GMX_SIMD MATCHES "AVX")
- set(HAVE_RDTSCP 1)
- endif()
-endif()
-gmx_check_if_changed(HAVE_RDTSCP_CHANGED HAVE_RDTSCP)
-if (HAVE_RDTSCP_CHANGED)
- if (HAVE_RDTSCP)
- message(STATUS "Enabling RDTSCP support${RDTSCP_DETECTION_MESSAGE}")
- else()
- message(STATUS "Disabling RDTSCP support${RDTSCP_DETECTION_MESSAGE}")
- endif()
-endif()
-
include(gmxTestLargeFiles)
gmx_test_large_files(GMX_LARGEFILES)
include(gmxManageSimd)
gmx_manage_simd()
-include(gmxManageCycleCounters)
-gmx_manage_cycle_counters()
-
-# Process QM/MM Settings
-if(${GMX_QMMM_PROGRAM} STREQUAL "GAUSSIAN")
- set(GMX_QMMM_GAUSSIAN 1)
-elseif(${GMX_QMMM_PROGRAM} STREQUAL "MOPAC")
- set(GMX_QMMM_MOPAC 1)
-elseif(${GMX_QMMM_PROGRAM} STREQUAL "GAMESS")
- set(GMX_QMMM_GAMESS 1)
-elseif(${GMX_QMMM_PROGRAM} STREQUAL "ORCA")
- set(GMX_QMMM_ORCA 1)
-elseif(${GMX_QMMM_PROGRAM} STREQUAL "NONE")
- # nothing to do
-else()
- gmx_invalid_option_value(GMX_QMMM_PROGRAM)
-endif()
-
-
##################################################
# Process FFT library settings
##################################################
include(gmxManageLinearAlgebraLibraries)
include(gmxManagePluginSupport)
+gmx_manage_plugin_support()
-if (GMX_USE_PLUGINS)
+if(GMX_USE_PLUGINS)
if(NOT GMX_VMD_PLUGIN_PATH)
find_package(VMD)
endif()
endif()
+ # People might want to customize the default location for the DSSP binary
+ set(GMX_DSSP_PROGRAM_PATH "/usr/local/bin/dssp"
+ CACHE PATH
+ "The default location to use for the DSSP binary")
+ mark_as_advanced(GMX_DSSP_PROGRAM_PATH)
+
# Link real-time library for POSIX timers. The check for clock_gettime
# confirms the linkability of rt.
if(HAVE_TIME_H AND HAVE_UNISTD_H AND HAVE_CLOCK_GETTIME)
include_directories(${COREWRAP_INCLUDE_DIR})
endif()
-# Value of GMX_BUILD_HELP=AUTO tries to generate things, but will only
-# produce warnings if that fails.
-set(build_help_default AUTO)
-if (SOURCE_IS_SOURCE_DISTRIBUTION OR CMAKE_CROSSCOMPILING)
- set(build_help_default OFF)
-endif()
-gmx_option_trivalue(GMX_BUILD_HELP "Build completions automatically (requires that compiled binaries can be executed on the build host) and install man pages if built (requires building the 'man' target manually)" ${build_help_default})
+option(GMX_BUILD_HELP "Build completions (requires that compiled binaries can be executed on build host) and install man pages if built (requires building the 'man' target manually)" OFF)
mark_as_advanced(GMX_BUILD_HELP)
if (GMX_BUILD_HELP AND SOURCE_IS_SOURCE_DISTRIBUTION AND BUILD_IS_INSOURCE)
message(FATAL_ERROR
foreach(build_type ${build_types_with_explicit_flags})
message("CMAKE_C_FLAGS_${build_type}: ${GMXC_CFLAGS_${build_type}}")
endforeach()
- message("CMAKE_CXX_FLAGS: ${SIMD_CXX_FLAGS};${MPI_COMPILE_FLAGS};${EXTRA_CXX_FLAGS};${GMXC_CXXFLAGS}")
+ message("CMAKE_CXX_FLAGS: ${DISABLE_SYCL_CXX_FLAGS};${SIMD_CXX_FLAGS};${MPI_COMPILE_FLAGS};${EXTRA_CXX_FLAGS};${GMXC_CXXFLAGS}")
foreach(build_type ${build_types_with_explicit_flags})
message("CMAKE_CXX_FLAGS_${build_type}: ${GMXC_CXXFLAGS_${build_type}}")
endforeach()
message("CMAKE_EXE_LINKER_FLAGS: ${FFT_LINKER_FLAGS} ${MPI_LINKER_FLAGS}")
message("CMAKE_SHARED_LINKER_FLAGS: ${FFT_LINKER_FLAGS} ${MPI_LINKER_FLAGS}")
endif()
-
-########################################################################
-# Specify install locations
-########################################################################
-# Use GNUInstallDirs to set paths on multiarch systems.
-include(GNUInstallDirs)
-
-set(GMX_INSTALL_DATASUBDIR "gromacs" CACHE STRING "Subdirectory for GROMACS data under CMAKE_INSTALL_DATADIR")
-mark_as_advanced(GMX_INSTALL_DATASUBDIR)
-
-# Internal convenience so we do not have to join two path segments in the code
-set(GMX_INSTALL_GMXDATADIR ${CMAKE_INSTALL_DATADIR}/${GMX_INSTALL_DATASUBDIR})
-
-# If the nesting level wrt. the installation root is changed,
-# gromacs-config.cmake.cmakein needs to be adapted.
-set(GMX_INSTALL_CMAKEDIR ${CMAKE_INSTALL_DATAROOTDIR}/cmake)
-
-# TODO: Make GMXRC adapt if this is changed
-set(GMX_INSTALL_PKGCONFIGDIR ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
-set(GMX_INSTALL_OCLDIR ${GMX_INSTALL_GMXDATADIR}/opencl)
-
-list(APPEND INSTALLED_HEADER_INCLUDE_DIRS ${CMAKE_INSTALL_INCLUDEDIR})
-
-# Binary and library suffix options
-include(gmxManageSuffixes)
+# Allow `admin` directory to be easily conveyed to nested CMake commands.
+set(GMX_ADMIN_DIR ${CMAKE_SOURCE_DIR}/admin)
################################################################
# Shared library load path settings
endif()
# TODO: Determine control flow and defaults for package installation and testing use cases.
-# Ref: http://redmine.gromacs.org/issues/2896
+# Ref: https://gitlab.com/gromacs/gromacs/-/issues/2896
option(GMX_PYTHON_PACKAGE "Configure gmxapi Python package" OFF)
mark_as_advanced(GMX_PYTHON_PACKAGE)
if (NOT GMX_BUILD_MDRUN_ONLY)
- # Note: Though only documented as an output variable, PYTHON_EXECUTABLE is
- # also effective as a CMake input variable to effectively hint the location
- # of the Python interpreter. This may be helpful in environments with both
- # Python 2 and Python 3 on the default PATH.
- # Ref: https://cmake.org/cmake/help/latest/module/FindPythonInterp.html
- if(FIND_PACKAGE_MESSAGE_DETAILS_PythonInterp)
- # Keep quiet on subsequent runs of cmake
- set(PythonInterp_FIND_QUIETLY ON)
- endif()
- # Older CMake versions might not search for Python newer than 3.7.
- set(Python_ADDITIONAL_VERSIONS 3.8)
- if(GMX_PYTHON_PACKAGE)
- find_package(PythonInterp 3.5 REQUIRED)
- # Note: PythonLibs will be found later by pybind11.
- # TODO: (issue #2998) When CMake >= 3.12 is required, update detection.
- # I.e. find_package(Python3 3.5 COMPONENTS Interpreter Development REQUIRED)
- else()
- find_package(PythonInterp 3.5)
- endif()
find_package(ImageMagick QUIET COMPONENTS convert)
include(gmxTestImageMagick)
GMX_TEST_IMAGEMAGICK(IMAGE_CONVERT_POSSIBLE)
add_subdirectory(share)
add_subdirectory(scripts)
endif()
+add_subdirectory(api)
add_subdirectory(src)
if (BUILD_TESTING)
gmx_cpack_write_config()
-# Issue a warning if NVIDIA GPUs were detected, but CUDA was not found.
-# Don't bother the user after the first configure pass.
-if ((CUDA_NOTFOUND_AUTO AND GMX_DETECT_GPU_AVAILABLE) AND NOT GMX_GPU_DETECTION_DONE)
- message(WARNING "${CUDA_NOTFOUND_MESSAGE}")
-endif()
-set(GMX_GPU_DETECTION_DONE TRUE CACHE INTERNAL "Whether GPU detection has already been done")
-
#######################
## uninstall target
#######################
if [ -z "$RUN_CLANG_TIDY" ]
then
echo "Please set the path to run-clang-tidy using the git hook"
- echo "git config hooks.runclangtidypath /path/to/run-clang-tidy-8.py"
+ echo "git config hooks.runclangtidypath /path/to/run-clang-tidy-9.py"
echo "or by setting an environment variable, e.g."
- echo "RUN_CLANG_TIDY=/path/to/run-clang-tidy-8.py"
+ echo "RUN_CLANG_TIDY=/path/to/run-clang-tidy-9.py"
exit 2
fi
if ! which "$RUN_CLANG_TIDY" 1>/dev/null
then
- echo "run-clang-tidy-8.py not found: $RUN_CLANG_TIDY"
+ echo "run-clang-tidy-9.py not found: $RUN_CLANG_TIDY"
exit 2
fi
fi
if [[ $tidy_mode != "off" && -s $tmpdir/filelist_clangtidy ]] ; then
$RUN_CLANG_TIDY `cat $tmpdir/filelist_clangtidy` -header-filter=.* -j $concurrency -fix -quiet -extra-arg=--cuda-host-only -extra-arg=-nocudainc>$tmpdir/clang-tidy.out 2>&1
awk '/warning/,/clang-tidy|^$/' $tmpdir/clang-tidy.out | grep -v "warnings generated." | grep -v "Suppressed .* warnings" | grep -v "clang-analyzer" | grep -v "to display errors from all non" | sed '/^\s*$/d' > $tmpdir/clang-tidy-warnings.out
- awk '/.*\berror\b.(/' $tmpdir/clang-tidy.out > $tmpdir/clang-tidy-errors.out || true
+ grep '\berror:' $tmpdir/clang-tidy.out > $tmpdir/clang-tidy-errors.out || true
if [ -s $tmpdir/clang-tidy-errors.out ]; then
echo "Running of clang-tidy failed. Check output below for errors:"
cat $tmpdir/clang-tidy-errors.out
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
+# Copyright (c) 2014,2015,2016,2017,2018 by the GROMACS development team.
+# Copyright (c) 2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
# of configure time, because 1) some of the version variables are only
# available during build time, and 2) we don't want to do all the Sphinx setup
# during configuration to save some time when not building the content.
-# All the generated values get put into conf-vars.py (generated from
-# conf-vars.py.cmakein), which in turn is included by the Sphinx configuration
-# file conf.py.
+# All of the generated values get put into conf.py (generated from
+# conf.cmakein.py).
set(SOURCE_MD5SUM "unknown" CACHE STRING
"MD5 sum of the source tarball, normally used only for the pre-release webpage build")
set(SPHINX_SOURCE_FILES
index.rst
download.rst
- conf.py
links.dat
dev-manual/build-system.rst
dev-manual/change-management.rst
dev-manual/commitstyle.rst
+ dev-manual/containers.rst
dev-manual/documentation-generation.rst
dev-manual/contribute.rst
dev-manual/doxygen.rst
dev-manual/error-handling.rst
dev-manual/formatting.rst
+ dev-manual/gitlab.rst
dev-manual/gmxtree.rst
dev-manual/includestyle.rst
dev-manual/index.rst
+ dev-manual/infrastructure.rst
dev-manual/jenkins.rst
dev-manual/known-issues.rst
dev-manual/language-features.rst
how-to/visualize.rst
install-guide/index.rst
release-notes/index.rst
+ release-notes/2021/major/highlights.rst
+ release-notes/2021/major/features.rst
+ release-notes/2021/major/performance.rst
+ release-notes/2021/major/tools.rst
+ release-notes/2021/major/bugs-fixed.rst
+ release-notes/2021/major/removed-functionality.rst
+ release-notes/2021/major/deprecated-functionality.rst
+ release-notes/2021/major/portability.rst
+ release-notes/2021/major/miscellaneous.rst
release-notes/2020/2020.1.rst
release-notes/2020/2020.2.rst
release-notes/2020/2020.3.rst
+ release-notes/2020/2020.4.rst
release-notes/2020/major/highlights.rst
release-notes/2020/major/features.rst
release-notes/2020/major/performance.rst
set(IMAGE_CONVERT_STRING "impossible")
endif()
- set(SPHINX_CONFIG_VARS_FILE ${SPHINX_INPUT_DIR}/conf-vars.py)
+ set(SPHINX_CONFIG_FILE ${SPHINX_INPUT_DIR}/conf.py)
if (GMX_PYTHON_PACKAGE)
set(GMXAPI_PYTHON_STAGING_DIR ${CMAKE_BINARY_DIR}/python_packaging/src/gmxapi_staging)
# TODO: Resolve circular reference. We would like to get the CMake build-time directory for
# in this context?
endif ()
- gmx_configure_version_file(conf-vars.py.cmakein ${SPHINX_CONFIG_VARS_FILE}
+ gmx_configure_version_file(
+ conf.cmakein.py ${SPHINX_CONFIG_FILE}
EXTRA_VARS
- SPHINX_EXTENSION_PATH RELENG_PATH
- IMAGE_CONVERT_STRING
+ CMAKE_MINIMUM_REQUIRED_VERSION
EXPECTED_DOXYGEN_VERSION
EXPECTED_SPHINX_VERSION
- CMAKE_MINIMUM_REQUIRED_VERSION REQUIRED_CUDA_VERSION
- REQUIRED_OPENCL_MIN_VERSION
- REQUIRED_CUDA_COMPUTE_CAPABILITY REGRESSIONTEST_VERSION
- SOURCE_MD5SUM REGRESSIONTEST_MD5SUM_STRING
- GMX_TNG_MINIMUM_REQUIRED_VERSION
+ GMX_ADMIN_DIR
GMX_LMFIT_REQUIRED_VERSION
GMX_MANUAL_DOI_STRING
+ GMX_TNG_MINIMUM_REQUIRED_VERSION
GMX_SOURCE_DOI_STRING
GMXAPI_PYTHON_STAGING_DIR
+ IMAGE_CONVERT_STRING
+ REGRESSIONTEST_VERSION
+ REQUIRED_CUDA_COMPUTE_CAPABILITY
+ REQUIRED_CUDA_VERSION
+ REQUIRED_OPENCL_MIN_VERSION
+ REGRESSIONTEST_MD5SUM_STRING
+ RELENG_PATH
+ SOURCE_MD5SUM
+ SPHINX_EXTENSION_PATH
COMMENT "Configuring Sphinx configuration file")
- gmx_add_sphinx_input_file(${SPHINX_CONFIG_VARS_FILE})
+ gmx_add_sphinx_input_file(${SPHINX_CONFIG_FILE})
gmx_add_sphinx_source_files(FILES ${SPHINX_SOURCE_FILES})
if (EXISTS ${RELENG_PATH}/docs/FileList.cmake)
include(${RELENG_PATH}/docs/FileList.cmake)
# Sphinx cache with pickled ReST documents
set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/_doctrees")
+ set(SPHINX_CONFIG_OVERRIDES "")
+ if (GMX_DEVELOPER_BUILD)
+ set(SPHINX_CONFIG_OVERRIDES "-Dtodo_include_todos=1")
+ endif()
add_custom_target(webpage-sphinx
DEPENDS sphinx-programs
DEPENDS sphinx-input
-q -b html
-w sphinx-html.log
-d "${SPHINX_CACHE_DIR}"
+ ${SPHINX_CONFIG_OVERRIDES}
"${SPHINX_INPUT_DIR}"
"${HTML_OUTPUT_DIR}"
WORKING_DIRECTORY
set(HTML_BUILD_WARNINGS)
# Next, turn it off if any of the preconditions are unsatisified
-if (NOT PythonInterp_FOUND)
+if (NOT Python3_Interpreter_FOUND)
set(HTML_BUILD_IS_POSSIBLE OFF)
set(HTML_BUILD_NOT_POSSIBLE_REASON "Python is required")
elseif (NOT SPHINX_FOUND)
.. Note to developers!
Please use """"""" to underline the individual entries for fixed issues in the subfolders,
otherwise the formatting on the webpage is messed up.
- Also, please use the syntax :issue:`number` to reference issues on redmine, without the
+ Also, please use the syntax :issue:`number` to reference issues on GitLab, without the
a space between the colon and number!
Up to a factor 2.5 speed-up of the non-bonded free-energy kernel
the PME-mesh calculation now always takes the most CPU time.
- Proper dihedrals of Fourier type and improper dihedrals of preriodic type are SIMD accelerated
- """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+ Proper dihedrals of Fourier type and improper dihedrals of periodic type are SIMD accelerated
+ """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Avoid configuring the own-FFTW with AVX512 enabled when |Gromacs| does not use AVX512
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
functionality supported, whereas patch releases contain only fixes for
issues identified in the corresponding major releases.
-Two versions of |Gromacs| are under active maintenance, the 2020
-series and the 2019 series. In the latter, only highly conservative
+Two versions of |Gromacs| are under active maintenance, the 2021
+series and the 2020 series. In the latter, only highly conservative
fixes will be made, and only to address issues that affect scientific
correctness. Naturally, some of those releases will be made after the
-year 2019 ends, but we keep 2019 in the name so users understand how
+year 2020 ends, but we keep 2019 in the name so users understand how
up to date their version is. Such fixes will also be incorporated into
-the 2020 release series, as appropriate. Around the time the 2021
-release is made, the 2019 series will no longer be maintained.
+the 2021 release series, as appropriate. Around the time the 2022
+release is made, the 2020 series will no longer be maintained.
Where issue numbers are reported in these release notes, more details
-can be found at https://redmine.gromacs.org at that issue number.
+can be found at https://gitlab.com/gromacs/gromacs/-/issues at that issue number.
+
+|Gromacs| 2021 series
+---------------------
+
+.. todolist::
+
+Major release
+^^^^^^^^^^^^^
+
+.. toctree::
+ :maxdepth: 1
+
+ 2021/major/highlights
+ 2021/major/features
+ 2021/major/performance
+ 2021/major/tools
+ 2021/major/bugs-fixed
+ 2021/major/deprecated-functionality
+ 2021/major/removed-functionality
+ 2021/major/portability
+ 2021/major/miscellaneous
+
|Gromacs| 2020 series
---------------------
.. toctree::
:maxdepth: 1
- 2020/2020.1
- 2020/2020.2
+ 2020/2020.4
2020/2020.3
+ 2020/2020.2
+ 2020/2020.1
Major release
^^^^^^^^^^^^^
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2009,2010,2011,2012,2013,2014,2018,2020, by the GROMACS development team, led by
+# Copyright (c) 2009,2010,2011,2012,2013 by the GROMACS development team.
+# Copyright (c) 2014,2018,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
# To help us fund GROMACS development, we humbly ask that you cite
# the research papers on the package. Check out http://www.gromacs.org.
- add_subdirectory(template)
+ if(GMX_INSTALL_LEGACY_API)
+ add_subdirectory(template)
+ endif()
install(FILES README.tutor README_FreeEnergyModifications.txt
DESTINATION ${GMX_INSTALL_GMXDATADIR}
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2009,2010,2011,2012,2013,2014, The GROMACS development team.
- * Copyright (c) 2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2009,2010,2011,2012,2013 by the GROMACS development team.
+ * Copyright (c) 2014,2015,2016,2017,2018 by the GROMACS development team.
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/* Enable code that requires AVX-512 instruction support, without GMX_SIMD=AVX_512 */
#cmakedefine01 SIMD_AVX_512_CXX_SUPPORTED
+/* Whether NBNXM and other SIMD kernels should be compiled */
+#cmakedefine01 GMX_USE_SIMD_KERNELS
+
/* Whether a double-precision configuration may target accuracy equivalent to single precision */
#cmakedefine01 GMX_RELAXED_DOUBLE_PRECISION
/* Use if we cannot rename checkpoints */
#cmakedefine01 GMX_NO_RENAME
-/* Use (modified) Gamess-UK for QM-MM calculations */
-#cmakedefine01 GMX_QMMM_GAMESS
-
-/* Use (modified) Gaussian0x for QM-MM calculations */
-#cmakedefine01 GMX_QMMM_GAUSSIAN
-
-/* Use (modified) Mopac 7 for QM-MM calculations */
-#cmakedefine01 GMX_QMMM_MOPAC
-
-/* Use ORCA for QM-MM calculations */
-#cmakedefine01 GMX_QMMM_ORCA
-
-/* Use cycle counters */
-#cmakedefine01 GMX_CYCLECOUNTERS
-
/* Use sub-counters */
#cmakedefine01 GMX_CYCLE_SUBCOUNTERS
/* Fallback path for VMD plug-ins */
#define GMX_VMD_PLUGIN_PATH "@GMX_VMD_PLUGIN_PATH@"
+ /* Default path for DSSP program */
+ #define GMX_DSSP_PROGRAM_PATH "@GMX_DSSP_PROGRAM_PATH@"
+
/* Define when pthreads are used */
#cmakedefine THREAD_PTHREADS
/* Enable x86 gcc inline assembly */
#cmakedefine01 GMX_X86_GCC_INLINE_ASM
-/* Define constants useful for handling GPU support */
-#define GMX_GPU_NONE 0
-#define GMX_GPU_CUDA 1
-#define GMX_GPU_OPENCL 2
-/* Which kind of GPU support is configured */
-#define GMX_GPU @GMX_GPU_ACCELERATION_FRAMEWORK@
+/* Define if any type of GPU acceleration is compiled */
+#cmakedefine01 GMX_GPU
+
+/* Define if CUDA GPU acceleration is compiled */
+#cmakedefine01 GMX_GPU_CUDA
+
+/* Define if OpenCL GPU acceleration is compiled */
+#cmakedefine01 GMX_GPU_OPENCL
+
+/* Define if SYCL GPU acceleration is compiled */
+#cmakedefine01 GMX_GPU_SYCL
/* Use a single compilation unit when compiling the CUDA (non-bonded) kernels. */
#cmakedefine01 GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
#define CMAKE_BUILD_TYPE_TSAN 8
#define CMAKE_BUILD_TYPE_ASAN 9
#define CMAKE_BUILD_TYPE_MSAN 10
+#define CMAKE_BUILD_TYPE_UBSAN 11
#cmakedefine CMAKE_BUILD_TYPE CMAKE_BUILD_TYPE_@CMAKE_BUILD_TYPE_UPPER@
/* Define relative path to OpenCL kernels */
/* Define to 1 if you have the gettimeofday() function. */
#cmakedefine01 HAVE_GETTIMEOFDAY
-/* Define to 1 if you have the rdtscp instruction. */
-#cmakedefine01 HAVE_RDTSCP
+/* Define to 1 if you have the rdtscp instruction (ie essentially all x86 still in use. */
+#cmakedefine01 GMX_USE_RDTSCP
/* Define to 1 if you have the fsync() function. */
#cmakedefine01 HAVE_FSYNC
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2010,2011,2012,2013,2014,2015, The GROMACS development team.
-# Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
+# Copyright (c) 2010,2011,2012,2013,2014 by the GROMACS development team.
+# Copyright (c) 2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
_gmx_add_files_to_property(GMX_LIBGROMACS_SOURCES ${ARGN})
endfunction ()
-# TODO Reconsider this, as the CUDA driver API is probably a simpler
-# approach, at least for the build system. See Redmine #2530
-function (gmx_compile_cpp_as_cuda)
- _gmx_add_files_to_property(GMX_LIBGROMACS_GPU_IMPL_SOURCES ${ARGN})
-endfunction ()
+# Permit the configuration to disable compiling the many nbnxm kernels
+# and others involved in force calculations. Currently only
+# short-ranged and bonded kernels are disabled this way, but in future
+# others may be appropriate. Thus the cmake option is not specific to
+# nbnxm module.
+option(GMX_USE_SIMD_KERNELS "Whether to compile NBNXM and other SIMD kernels" ON)
+mark_as_advanced(GMX_USE_SIMD_KERNELS)
# Add these contents first because linking their tests can take a lot
# of time, so we want lots of parallel work still available after
tmpi_get_source_list(THREAD_MPI_SOURCES ${PROJECT_SOURCE_DIR}/src/external/thread_mpi/src)
add_library(thread_mpi OBJECT ${THREAD_MPI_SOURCES})
target_compile_definitions(thread_mpi PRIVATE HAVE_CONFIG_H)
+if(CYGWIN)
+ # Needs POSIX-isms for strdup, not just std-isms
+ target_compile_definitions(thread_mpi PRIVATE _POSIX_C_SOURCE=200809L)
+endif()
gmx_target_compile_options(thread_mpi)
if (WIN32)
gmx_target_warning_suppression(thread_mpi /wd4996 HAS_NO_MSVC_UNSAFE_FUNCTION)
if(GMX_INSTALL_LEGACY_API)
install(FILES
${CMAKE_CURRENT_BINARY_DIR}/version.h
+ analysisdata.h
+ options.h
+ selection.h
+ trajectoryanalysis.h
DESTINATION include/gromacs)
endif()
list(APPEND LIBGROMACS_SOURCES ${GENERATED_VERSION_FILE})
# Mark some shared GPU implementation files to compile with CUDA if needed
-if (GMX_USE_CUDA)
+if (GMX_GPU_CUDA)
get_property(LIBGROMACS_GPU_IMPL_SOURCES GLOBAL PROPERTY GMX_LIBGROMACS_GPU_IMPL_SOURCES)
set_source_files_properties(${LIBGROMACS_GPU_IMPL_SOURCES} PROPERTIES CUDA_SOURCE_PROPERTY_FORMAT OBJ)
endif()
endforeach()
endif()
-if (GMX_USE_CUDA)
+if (GMX_GPU_CUDA)
# Work around FindCUDA that prevents using target_link_libraries()
# with keywords otherwise...
set(CUDA_LIBRARIES PRIVATE ${CUDA_LIBRARIES})
target_compile_definitions(libgromacs PRIVATE HAVE_CONFIG_H)
target_include_directories(libgromacs SYSTEM BEFORE PRIVATE ${PROJECT_SOURCE_DIR}/src/external/thread_mpi/include)
-if (GMX_USE_OPENCL)
+if (GMX_GPU_OPENCL)
option(GMX_EXTERNAL_CLFFT "True if an external clFFT is required to be used" FALSE)
mark_as_advanced(GMX_EXTERNAL_CLFFT)
gmx_manage_lmfit()
target_link_libraries(libgromacs PRIVATE lmfit)
-# Fix everything found by the latest version of clang that we use in
-# Jenkins testing. This should be updated when we update the latest
-# tested version of clang.
-if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND CMAKE_CXX_COMPILER_VERSION MATCHES "^7\.0")
+# Make sure we fix "everything" found by more recent versions of clang.
+if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "7")
target_compile_options(libgromacs PRIVATE $<$<COMPILE_LANGUAGE:CXX>:-Weverything ${IGNORED_CLANG_ALL_WARNINGS}>)
endif()
if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
DESTINATION ${CMAKE_INSTALL_LIBDIR}
COMPONENT libraries
INCLUDES DESTINATION include)
+ target_compile_definitions(libgromacs PUBLIC $<INSTALL_INTERFACE:GMX_DOUBLE=${GMX_DOUBLE_VALUE}>)
+ # legacy headers use c++17 features, so consumer codes need to use that standard, too
+ if(GMX_INSTALL_LEGACY_API)
+ target_compile_features(libgromacs INTERFACE cxx_std_${CMAKE_CXX_STANDARD})
+ endif()
+ add_library(Gromacs::libgromacs ALIAS libgromacs)
endif()
if (NOT GMX_BUILD_MDRUN_ONLY)
# using the CUDA runtime, but currently there's no reason to want to
# do that.
if (INSTALL_CUDART_LIB) #can be set manual by user
- if (GMX_USE_CUDA)
+ if (GMX_GPU_CUDA)
foreach(CUDA_LIB ${CUDA_LIBRARIES})
string(REGEX MATCH "cudart" IS_CUDART ${CUDA_LIB})
if(IS_CUDART) #libcuda should not be installed
endif()
endif()
-if(GMX_USE_OPENCL)
+if(GMX_GPU_OPENCL)
# Install the utility headers
file(GLOB OPENCL_INSTALLED_FILES
gpu_utils/vectype_ops.clh
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2010,2011,2012,2013,2014,2019,2020, by the GROMACS development team, led by
+# Copyright (c) 2010,2011,2012,2013,2014 by the GROMACS development team.
+# Copyright (c) 2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
file(GLOB ANALYSISDATA_SOURCES *.cpp modules/*.cpp)
set(LIBGROMACS_SOURCES ${LIBGROMACS_SOURCES} ${ANALYSISDATA_SOURCES} PARENT_SCOPE)
+ if(GMX_INSTALL_LEGACY_API)
+ install(FILES
+ abstractdata.h
+ analysisdata.h
+ arraydata.h
+ dataframe.h
+ datamodule.h
+ DESTINATION include/gromacs/analysisdata)
+ endif()
+
+
add_subdirectory(modules)
if (BUILD_TESTING)
#include "gromacs/mdtypes/forcerec.h"
#include "gromacs/mdtypes/inputrec.h"
#include "gromacs/mdtypes/md_enums.h"
+#include "gromacs/mdtypes/simulation_workload.h"
#include "gromacs/pbcutil/pbc.h"
#include "gromacs/timing/cyclecounter.h"
#include "gromacs/timing/wallcycle.h"
{
errorReasons.emplace_back("a double-precision build");
}
- if (GMX_GPU == GMX_GPU_NONE)
+ if (!GMX_GPU)
{
errorReasons.emplace_back("a non-GPU build");
}
{
std::list<std::string> errorReasons;
- if (GMX_GPU == GMX_GPU_OPENCL)
+ if (GMX_GPU_OPENCL)
{
#ifdef __APPLE__
errorReasons.emplace_back("Apple OS X operating system");
return addMessageIfNotSupported(errorReasons, error);
}
-bool pme_gpu_supports_input(const t_inputrec& ir, const gmx_mtop_t& mtop, std::string* error)
+bool pme_gpu_supports_input(const t_inputrec& ir, std::string* error)
{
std::list<std::string> errorReasons;
if (!EEL_PME(ir.coulombtype))
{
errorReasons.emplace_back("interpolation orders other than 4");
}
- if (ir.efep != efepNO)
- {
- if (gmx_mtop_has_perturbed_charges(mtop))
- {
- errorReasons.emplace_back(
- "free energy calculations with perturbed charges (multiple grids)");
- }
- }
if (EVDW_PME(ir.vdwtype))
{
errorReasons.emplace_back("Lennard-Jones PME");
{
errorReasons.emplace_back("interpolation orders other than 4");
}
- if (pme->bFEP)
- {
- errorReasons.emplace_back("free energy calculations (multiple grids)");
- }
if (pme->doLJ)
{
errorReasons.emplace_back("Lennard-Jones PME");
{
errorReasons.emplace_back("double precision");
}
- if (GMX_GPU == GMX_GPU_NONE)
+ if (!GMX_GPU)
{
errorReasons.emplace_back("non-GPU build of GROMACS");
}
return (enumerator + denominator - 1) / denominator;
}
-gmx_pme_t* gmx_pme_init(const t_commrec* cr,
- const NumPmeDomains& numPmeDomains,
- const t_inputrec* ir,
- gmx_bool bFreeEnergy_q,
- gmx_bool bFreeEnergy_lj,
- gmx_bool bReproducible,
- real ewaldcoeff_q,
- real ewaldcoeff_lj,
- int nthread,
- PmeRunMode runMode,
- PmeGpu* pmeGpu,
- const gmx_device_info_t* gpuInfo,
- PmeGpuProgramHandle pmeGpuProgram,
- const gmx::MDLogger& mdlog)
+gmx_pme_t* gmx_pme_init(const t_commrec* cr,
+ const NumPmeDomains& numPmeDomains,
+ const t_inputrec* ir,
+ gmx_bool bFreeEnergy_q,
+ gmx_bool bFreeEnergy_lj,
+ gmx_bool bReproducible,
+ real ewaldcoeff_q,
+ real ewaldcoeff_lj,
+ int nthread,
+ PmeRunMode runMode,
+ PmeGpu* pmeGpu,
+ const DeviceContext* deviceContext,
+ const DeviceStream* deviceStream,
+ const PmeGpuProgram* pmeGpuProgram,
- const gmx::MDLogger& /*mdlog*/)
++ const gmx::MDLogger& mdlog)
{
int use_threads, sum_use_threads, i;
ivec ndata;
}
pme->bUseThreads = (sum_use_threads > 0);
- if (ir->ePBC == epbcSCREW)
+ if (ir->pbcType == PbcType::Screw)
{
gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
}
imbal = estimate_pme_load_imbalance(pme.get());
if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
{
- fprintf(stderr,
- "\n"
- "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
- " For optimal PME load balancing\n"
- " PME grid_x (%d) and grid_y (%d) should be divisible by #PME_ranks_x "
- "(%d)\n"
- " and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_ranks_y "
- "(%d)\n"
- "\n",
- gmx::roundToInt((imbal - 1) * 100), pme->nkx, pme->nky, pme->nnodes_major,
- pme->nky, pme->nkz, pme->nnodes_minor);
+ GMX_LOG(mdlog.warning)
+ .asParagraph()
+ .appendTextFormatted(
+ "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
+ " For optimal PME load balancing\n"
+ " PME grid_x (%d) and grid_y (%d) should be divisible by "
+ "#PME_ranks_x "
+ "(%d)\n"
+ " and PME grid_y (%d) and grid_z (%d) should be divisible by "
+ "#PME_ranks_y "
+ "(%d)",
+ gmx::roundToInt((imbal - 1) * 100), pme->nkx, pme->nky,
+ pme->nnodes_major, pme->nky, pme->nkz, pme->nnodes_minor);
}
}
pme->atc.emplace_back(pme->mpi_comm_d[1], pme->nthread, pme->pme_order, secondDimIndex, doSpread);
}
- if (pme_gpu_active(pme.get()))
+ // Initial check of validity of the input for running on the GPU
+ if (pme->runMode != PmeRunMode::CPU)
{
- if (!pme->gpu)
+ std::string errorString;
+ bool canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
+ if (!canRunOnGpu)
{
- // Initial check of validity of the data
- std::string errorString;
- bool canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
- if (!canRunOnGpu)
- {
- GMX_THROW(gmx::NotImplementedError(errorString));
- }
+ GMX_THROW(gmx::NotImplementedError(errorString));
}
-
- pme_gpu_reinit(pme.get(), gpuInfo, pmeGpuProgram);
+ pme_gpu_reinit(pme.get(), deviceContext, deviceStream, pmeGpuProgram);
+ }
+ else
+ {
+ GMX_ASSERT(pme->gpu == nullptr, "Should not have PME GPU object when PME is on a CPU.");
}
+
pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
// no exception was thrown during the init, so we hand over the PME structure handle
// TODO: This would be better as just copying a sub-structure that contains
// all the PME parameters and nothing else.
t_inputrec irc;
- irc.ePBC = ir->ePBC;
+ irc.pbcType = ir->pbcType;
irc.coulombtype = ir->coulombtype;
irc.vdwtype = ir->vdwtype;
irc.efep = ir->efep;
try
{
+ // This is reinit. Any logging should have been done at first init.
+ // Here we should avoid writing notes for settings the user did not
+ // set directly.
const gmx::MDLogger dummyLogger;
- // This is reinit which is currently only changing grid size/coefficients,
- // so we don't expect the actual logging.
- // TODO: when PME is an object, it should take reference to mdlog on construction and save it.
GMX_ASSERT(pmedata, "Invalid PME pointer");
NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
*pmedata = gmx_pme_init(cr, numPmeDomains, &irc, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE,
ewaldcoeff_q, ewaldcoeff_lj, pme_src->nthread, pme_src->runMode,
- pme_src->gpu, nullptr, nullptr, dummyLogger);
+ pme_src->gpu, nullptr, nullptr, nullptr, dummyLogger);
/* When running PME on the CPU not using domain decomposition,
* the atom data is allocated once only in gmx_pme_(re)init().
*/
if (!pme_src->gpu && pme_src->nnodes == 1)
{
- gmx_pme_reinit_atoms(*pmedata, pme_src->atc[0].numAtoms(), nullptr);
+ gmx_pme_reinit_atoms(*pmedata, pme_src->atc[0].numAtoms(), nullptr, nullptr);
}
// TODO this is mostly passing around current values
}
real lambda_lj,
real* dvdlambda_q,
real* dvdlambda_lj,
- int flags)
+ const gmx::StepWorkload& stepWork)
{
GMX_ASSERT(pme->runMode == PmeRunMode::CPU,
"gmx_pme_do should not be called on the GPU PME run.");
gmx_bool bFirst, bDoSplines;
int fep_state;
int fep_states_lj = pme->bFEP_lj ? 2 : 1;
- const gmx_bool bCalcEnerVir = (flags & GMX_PME_CALC_ENER_VIR) != 0;
- const gmx_bool bBackFFT = (flags & (GMX_PME_CALC_F | GMX_PME_CALC_POT)) != 0;
- const gmx_bool bCalcF = (flags & GMX_PME_CALC_F) != 0;
+ // There's no support for computing energy without virial, or vice versa
+ const bool computeEnergyAndVirial = (stepWork.computeEnergy || stepWork.computeVirial);
/* We could be passing lambda!=0 while no q or LJ is actually perturbed */
if (!pme->bFEP_q)
fprintf(debug, "Rank= %6d, pme local particles=%6d\n", cr->nodeid, atc.numAtoms());
}
- if (flags & GMX_PME_SPREAD)
+ wallcycle_start(wcycle, ewcPME_SPREAD);
+
+ /* Spread the coefficients on a grid */
+ spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
+
+ if (bFirst)
{
- wallcycle_start(wcycle, ewcPME_SPREAD);
+ inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
+ }
+ inc_nrnb(nrnb, eNR_SPREADBSP, pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
- /* Spread the coefficients on a grid */
- spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
+ if (!pme->bUseThreads)
+ {
+ wrap_periodic_pmegrid(pme, grid);
- if (bFirst)
+ /* sum contributions to local grid from other nodes */
+ if (pme->nnodes > 1)
{
- inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
+ gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
}
- inc_nrnb(nrnb, eNR_SPREADBSP,
- pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
-
- if (!pme->bUseThreads)
- {
- wrap_periodic_pmegrid(pme, grid);
-
- /* sum contributions to local grid from other nodes */
- if (pme->nnodes > 1)
- {
- gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
- }
- copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
- }
+ copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
+ }
- wallcycle_stop(wcycle, ewcPME_SPREAD);
+ wallcycle_stop(wcycle, ewcPME_SPREAD);
- /* TODO If the OpenMP and single-threaded implementations
- converge, then spread_on_grid() and
- copy_pmegrid_to_fftgrid() will perhaps live in the same
- source file.
- */
- }
+ /* TODO If the OpenMP and single-threaded implementations
+ converge, then spread_on_grid() and
+ copy_pmegrid_to_fftgrid() will perhaps live in the same
+ source file.
+ */
/* Here we start a large thread parallel region */
#pragma omp parallel num_threads(pme->nthread) private(thread)
try
{
thread = gmx_omp_get_thread_num();
- if (flags & GMX_PME_SOLVE)
- {
- int loop_count;
-
- /* do 3d-fft */
- if (thread == 0)
- {
- wallcycle_start(wcycle, ewcPME_FFT);
- }
- gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
- if (thread == 0)
- {
- wallcycle_stop(wcycle, ewcPME_FFT);
- }
+ int loop_count;
- /* solve in k-space for our local cells */
- if (thread == 0)
- {
- wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
- }
- if (grid_index < DO_Q)
- {
- loop_count = solve_pme_yzx(
- pme, cfftgrid, scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
- bCalcEnerVir, pme->nthread, thread);
- }
- else
- {
- loop_count = solve_pme_lj_yzx(
- pme, &cfftgrid, FALSE,
- scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
- bCalcEnerVir, pme->nthread, thread);
- }
+ /* do 3d-fft */
+ if (thread == 0)
+ {
+ wallcycle_start(wcycle, ewcPME_FFT);
+ }
+ gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
+ if (thread == 0)
+ {
+ wallcycle_stop(wcycle, ewcPME_FFT);
+ }
- if (thread == 0)
- {
- wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
- inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
- }
+ /* solve in k-space for our local cells */
+ if (thread == 0)
+ {
+ wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
+ }
+ if (grid_index < DO_Q)
+ {
+ loop_count = solve_pme_yzx(
+ pme, cfftgrid, scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
+ computeEnergyAndVirial, pme->nthread, thread);
+ }
+ else
+ {
+ loop_count =
+ solve_pme_lj_yzx(pme, &cfftgrid, FALSE,
+ scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
+ computeEnergyAndVirial, pme->nthread, thread);
}
- if (bBackFFT)
+ if (thread == 0)
{
- /* do 3d-invfft */
- if (thread == 0)
- {
- wallcycle_start(wcycle, ewcPME_FFT);
- }
- gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
- if (thread == 0)
- {
- wallcycle_stop(wcycle, ewcPME_FFT);
+ wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
+ inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
+ }
+ /* do 3d-invfft */
+ if (thread == 0)
+ {
+ wallcycle_start(wcycle, ewcPME_FFT);
+ }
+ gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
+ if (thread == 0)
+ {
+ wallcycle_stop(wcycle, ewcPME_FFT);
- if (pme->nodeid == 0)
- {
- real ntot = pme->nkx * pme->nky * pme->nkz;
- npme = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
- inc_nrnb(nrnb, eNR_FFT, 2 * npme);
- }
- /* Note: this wallcycle region is closed below
- outside an OpenMP region, so take care if
- refactoring code here. */
- wallcycle_start(wcycle, ewcPME_GATHER);
+ if (pme->nodeid == 0)
+ {
+ real ntot = pme->nkx * pme->nky * pme->nkz;
+ npme = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
+ inc_nrnb(nrnb, eNR_FFT, 2 * npme);
}
- copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
+ /* Note: this wallcycle region is closed below
+ outside an OpenMP region, so take care if
+ refactoring code here. */
+ wallcycle_start(wcycle, ewcPME_GATHER);
}
+
+ copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
* With MPI we have to synchronize here before gmx_sum_qgrid_dd.
*/
- if (bBackFFT)
+ /* distribute local grid to all nodes */
+ if (pme->nnodes > 1)
{
- /* distribute local grid to all nodes */
- if (pme->nnodes > 1)
- {
- gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
- }
-
- unwrap_periodic_pmegrid(pme, grid);
+ gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
}
- if (bCalcF)
+ unwrap_periodic_pmegrid(pme, grid);
+
+ if (stepWork.computeForces)
{
/* interpolate forces for our local atoms */
wallcycle_stop(wcycle, ewcPME_GATHER);
}
- if (bCalcEnerVir)
+ if (computeEnergyAndVirial)
{
/* This should only be called on the master thread
* and after the threads have synchronized.
calc_next_lb_coeffs(coefficientBuffer, local_sigma);
grid = pmegrid->grid.grid;
- if (flags & GMX_PME_SPREAD)
- {
- wallcycle_start(wcycle, ewcPME_SPREAD);
- /* Spread the c6 on a grid */
- spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
+ wallcycle_start(wcycle, ewcPME_SPREAD);
+ /* Spread the c6 on a grid */
+ spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
- if (bFirst)
- {
- inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
- }
+ if (bFirst)
+ {
+ inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
+ }
- inc_nrnb(nrnb, eNR_SPREADBSP,
- pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
- if (pme->nthread == 1)
+ inc_nrnb(nrnb, eNR_SPREADBSP,
+ pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
+ if (pme->nthread == 1)
+ {
+ wrap_periodic_pmegrid(pme, grid);
+ /* sum contributions to local grid from other nodes */
+ if (pme->nnodes > 1)
{
- wrap_periodic_pmegrid(pme, grid);
- /* sum contributions to local grid from other nodes */
- if (pme->nnodes > 1)
- {
- gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
- }
- copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
+ gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
}
- wallcycle_stop(wcycle, ewcPME_SPREAD);
+ copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
}
+ wallcycle_stop(wcycle, ewcPME_SPREAD);
+
/*Here we start a large thread parallel region*/
#pragma omp parallel num_threads(pme->nthread) private(thread)
{
try
{
thread = gmx_omp_get_thread_num();
- if (flags & GMX_PME_SOLVE)
+ /* do 3d-fft */
+ if (thread == 0)
{
- /* do 3d-fft */
- if (thread == 0)
- {
- wallcycle_start(wcycle, ewcPME_FFT);
- }
+ wallcycle_start(wcycle, ewcPME_FFT);
+ }
- gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
- if (thread == 0)
- {
- wallcycle_stop(wcycle, ewcPME_FFT);
- }
+ gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
+ if (thread == 0)
+ {
+ wallcycle_stop(wcycle, ewcPME_FFT);
}
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
bFirst = FALSE;
}
- if (flags & GMX_PME_SOLVE)
- {
- /* solve in k-space for our local cells */
+ /* solve in k-space for our local cells */
#pragma omp parallel num_threads(pme->nthread) private(thread)
+ {
+ try
{
- try
+ int loop_count;
+ thread = gmx_omp_get_thread_num();
+ if (thread == 0)
{
- int loop_count;
- thread = gmx_omp_get_thread_num();
- if (thread == 0)
- {
- wallcycle_start(wcycle, ewcLJPME);
- }
+ wallcycle_start(wcycle, ewcLJPME);
+ }
- loop_count = solve_pme_lj_yzx(
- pme, &pme->cfftgrid[2], TRUE,
- scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
- bCalcEnerVir, pme->nthread, thread);
- if (thread == 0)
- {
- wallcycle_stop(wcycle, ewcLJPME);
- inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
- }
+ loop_count =
+ solve_pme_lj_yzx(pme, &pme->cfftgrid[2], TRUE,
+ scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
+ computeEnergyAndVirial, pme->nthread, thread);
+ if (thread == 0)
+ {
+ wallcycle_stop(wcycle, ewcLJPME);
+ inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
}
- GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
+ GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
- if (bCalcEnerVir)
+ if (computeEnergyAndVirial)
{
/* This should only be called on the master thread and
* after the threads have synchronized.
get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[fep_state]);
}
- if (bBackFFT)
+ bFirst = !pme->doCoulomb;
+ calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
+ for (grid_index = 8; grid_index >= 2; --grid_index)
{
- bFirst = !pme->doCoulomb;
- calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
- for (grid_index = 8; grid_index >= 2; --grid_index)
- {
- /* Unpack structure */
- pmegrid = &pme->pmegrid[grid_index];
- fftgrid = pme->fftgrid[grid_index];
- pfft_setup = pme->pfft_setup[grid_index];
- grid = pmegrid->grid.grid;
- calc_next_lb_coeffs(coefficientBuffer, local_sigma);
+ /* Unpack structure */
+ pmegrid = &pme->pmegrid[grid_index];
+ fftgrid = pme->fftgrid[grid_index];
+ pfft_setup = pme->pfft_setup[grid_index];
+ grid = pmegrid->grid.grid;
+ calc_next_lb_coeffs(coefficientBuffer, local_sigma);
#pragma omp parallel num_threads(pme->nthread) private(thread)
+ {
+ try
{
- try
+ thread = gmx_omp_get_thread_num();
+ /* do 3d-invfft */
+ if (thread == 0)
{
- thread = gmx_omp_get_thread_num();
- /* do 3d-invfft */
- if (thread == 0)
- {
- wallcycle_start(wcycle, ewcPME_FFT);
- }
+ wallcycle_start(wcycle, ewcPME_FFT);
+ }
- gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
- if (thread == 0)
- {
- wallcycle_stop(wcycle, ewcPME_FFT);
+ gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
+ if (thread == 0)
+ {
+ wallcycle_stop(wcycle, ewcPME_FFT);
- if (pme->nodeid == 0)
- {
- real ntot = pme->nkx * pme->nky * pme->nkz;
- npme = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
- inc_nrnb(nrnb, eNR_FFT, 2 * npme);
- }
- wallcycle_start(wcycle, ewcPME_GATHER);
+ if (pme->nodeid == 0)
+ {
+ real ntot = pme->nkx * pme->nky * pme->nkz;
+ npme = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
+ inc_nrnb(nrnb, eNR_FFT, 2 * npme);
}
-
- copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
+ wallcycle_start(wcycle, ewcPME_GATHER);
}
- GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
- } /*#pragma omp parallel*/
- /* distribute local grid to all nodes */
- if (pme->nnodes > 1)
- {
- gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
+ copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
}
+ GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
+ } /*#pragma omp parallel*/
- unwrap_periodic_pmegrid(pme, grid);
+ /* distribute local grid to all nodes */
+ if (pme->nnodes > 1)
+ {
+ gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
+ }
- if (bCalcF)
- {
- /* interpolate forces for our local atoms */
- bClearF = (bFirst && PAR(cr));
- scale = pme->bFEP ? (fep_state < 1 ? 1.0 - lambda_lj : lambda_lj) : 1.0;
- scale *= lb_scale_factor[grid_index - 2];
+ unwrap_periodic_pmegrid(pme, grid);
+
+ if (stepWork.computeForces)
+ {
+ /* interpolate forces for our local atoms */
+ bClearF = (bFirst && PAR(cr));
+ scale = pme->bFEP ? (fep_state < 1 ? 1.0 - lambda_lj : lambda_lj) : 1.0;
+ scale *= lb_scale_factor[grid_index - 2];
#pragma omp parallel for num_threads(pme->nthread) schedule(static)
- for (thread = 0; thread < pme->nthread; thread++)
+ for (thread = 0; thread < pme->nthread; thread++)
+ {
+ try
{
- try
- {
- gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
- &pme->atc[0].spline[thread], scale);
- }
- GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
+ gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
+ &pme->atc[0].spline[thread], scale);
}
+ GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
+ }
- inc_nrnb(nrnb, eNR_GATHERFBSP,
- pme->pme_order * pme->pme_order * pme->pme_order * pme->atc[0].numAtoms());
- }
- wallcycle_stop(wcycle, ewcPME_GATHER);
+ inc_nrnb(nrnb, eNR_GATHERFBSP,
+ pme->pme_order * pme->pme_order * pme->pme_order * pme->atc[0].numAtoms());
+ }
+ wallcycle_stop(wcycle, ewcPME_GATHER);
- bFirst = FALSE;
- } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
- } /* if (bCalcF) */
- } /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
- } /* if ((flags & GMX_PME_DO_LJ) && pme->ljpme_combination_rule == eljpmeLB) */
+ bFirst = FALSE;
+ } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
+ } /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
+ } /* if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB) */
- if (bCalcF && pme->nnodes > 1)
+ if (stepWork.computeForces && pme->nnodes > 1)
{
wallcycle_start(wcycle, ewcPME_REDISTXF);
for (d = 0; d < pme->ndecompdim; d++)
wallcycle_stop(wcycle, ewcPME_REDISTXF);
}
- if (bCalcEnerVir)
+ if (computeEnergyAndVirial)
{
if (pme->doCoulomb)
{
destroy_pme_spline_work(pme->spline_work);
- if (pme_gpu_active(pme) && pme->gpu)
+ if (pme->gpu != nullptr)
{
pme_gpu_destroy(pme->gpu);
}
delete pme;
}
-void gmx_pme_reinit_atoms(gmx_pme_t* pme, const int numAtoms, const real* charges)
+void gmx_pme_reinit_atoms(gmx_pme_t* pme, const int numAtoms, const real* chargesA, const real* chargesB)
{
- if (pme_gpu_active(pme))
+ if (pme->gpu != nullptr)
{
- pme_gpu_reinit_atoms(pme->gpu, numAtoms, charges);
+ GMX_ASSERT(!(pme->bFEP_q && chargesB == nullptr),
+ "B state charges must be specified if running Coulomb FEP on the GPU");
+ pme_gpu_reinit_atoms(pme->gpu, numAtoms, chargesA, pme->bFEP_q ? chargesB : nullptr);
}
else
{
// TODO: set the charges here as well
}
}
+
+bool gmx_pme_grid_matches(const gmx_pme_t& pme, const ivec grid_size)
+{
+ return (pme.nkx == grid_size[XX] && pme.nky == grid_size[YY] && pme.nkz == grid_size[ZZ]);
+}
file(GLOB FILEIO_SOURCES *.cpp)
-target_sources(libgromacs PRIVATE ${FILEIO_SOURCES})
-
if(GMX_USE_PLUGINS)
add_library(vmddlopen OBJECT ${CMAKE_SOURCE_DIR}/src/external/vmd_molfile/vmddlopen.cpp)
gmx_target_compile_options(vmddlopen)
endif()
list(APPEND libgromacs_object_library_dependencies vmddlopen)
set(libgromacs_object_library_dependencies ${libgromacs_object_library_dependencies} PARENT_SCOPE)
+else()
+ # Remove vmdio.cpp from sources since we do not have plugin support
+ list(FILTER FILEIO_SOURCES EXCLUDE REGEX ".*vmdio.cpp$")
endif()
+target_sources(libgromacs PRIVATE ${FILEIO_SOURCES})
+
if(GMX_INSTALL_LEGACY_API)
install(FILES
oenv.h
+ confio.h
pdbio.h
tpxio.h
trxio.h
+ filetypes.h
DESTINATION include/gromacs/fileio)
endif()
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2017,2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2017,2018 by the GROMACS development team.
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
static gmx::index search_col(t_psdata* ps, real r, real g, real b)
{
- for (gmx::index i = 0; ssize(ps->rgb); ++i)
+ for (gmx::index i = 0; i < ssize(ps->rgb); ++i)
{
if ((ps->rgb[i].r == r) && (ps->rgb[i].g == g) && (ps->rgb[i].b == b))
{
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2012,2013,2014,2015,2017,2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2017 by the GROMACS development team.
+ * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
bFirst = false;
}
mat->axis_x.push_back(t);
- mat->matrix.resize(mat->matrix.extent(0), nr);
- mat->nx = mat->matrix.extent(0);
+ mat->matrix.resize(++(mat->nx), nr);
auto columnIndex = mat->nx - 1;
for (int i = 0; i < nr; i++)
{
for (i = 0; (i < atoms->nr); i++)
{
- if (std::strcmp(*(atoms->atomname[i]), "OXT") == 0)
- {
- *atoms->atomname[i] = OOO;
- }
- else if (std::strcmp(*(atoms->atomname[i]), "O1") == 0)
- {
- *atoms->atomname[i] = OOO;
- }
- else if (std::strcmp(*(atoms->atomname[i]), "OC1") == 0)
+ if ((std::strcmp(*(atoms->atomname[i]), "OXT") == 0)
+ || (std::strcmp(*(atoms->atomname[i]), "O1") == 0)
+ || (std::strcmp(*(atoms->atomname[i]), "OC1") == 0))
{
*atoms->atomname[i] = OOO;
}
int gmx_do_dssp(int argc, char* argv[])
{
const char* desc[] = {
- "[THISMODULE] ",
- "reads a trajectory file and computes the secondary structure for",
- "each time frame ",
- "calling the dssp program. If you do not have the dssp program,",
+ "[THISMODULE] ", "reads a trajectory file and computes the secondary structure for",
+ "each time frame ", "calling the dssp program. If you do not have the dssp program,",
"get it from http://swift.cmbi.ru.nl/gv/dssp. [THISMODULE] assumes ",
"that the dssp executable is located in ",
- "[TT]/usr/local/bin/dssp[tt]. If this is not the case, then you should",
- "set an environment variable [TT]DSSP[tt] pointing to the dssp",
- "executable, e.g.: [PAR]",
+ // NOLINTNEXTLINE(bugprone-suspicious-missing-comma)
+ "[TT]" GMX_DSSP_PROGRAM_PATH "[tt]. If this is not the case, then you should",
+ "set an environment variable [TT]DSSP[tt] pointing to the dssp", "executable, e.g.: [PAR]",
"[TT]setenv DSSP /opt/dssp/bin/dssp[tt][PAR]",
"Since version 2.0.0, dssp is invoked with a syntax that differs",
"from earlier versions. If you have an older version of dssp,",
"[REF].xpm[ref] matrix file. This file can be visualized with for instance",
"[TT]xv[tt] and can be converted to postscript with [TT]xpm2ps[tt].",
"Individual chains are separated by light grey lines in the [REF].xpm[ref] and",
- "postscript files.",
- "The number of residues with each secondary structure type and the",
+ "postscript files.", "The number of residues with each secondary structure type and the",
"total secondary structure ([TT]-sss[tt]) count as a function of",
"time are also written to file ([TT]-sc[tt]).[PAR]",
"Solvent accessible surface (SAS) per residue can be calculated, both in",
const char * fnSCount, *fnArea, *fnTArea, *fnAArea;
const char* leg[] = { "Phobic", "Phylic" };
t_topology top;
- int ePBC;
+ PbcType pbcType;
t_atoms* atoms;
t_matrix mat;
int nres, nr0, naccr, nres_plus_separators;
fnAArea = opt2fn_null("-aa", NFILE, fnm);
bDoAccSurf = ((fnArea != nullptr) || (fnTArea != nullptr) || (fnAArea != nullptr));
- read_tps_conf(ftp2fn(efTPS, NFILE, fnm), &top, &ePBC, &xp, nullptr, box, FALSE);
+ read_tps_conf(ftp2fn(efTPS, NFILE, fnm), &top, &pbcType, &xp, nullptr, box, FALSE);
atoms = &(top.atoms);
check_oo(atoms);
bPhbres = bPhobics(atoms);
}
fclose(tmpf);
+ const std::string defpathenv = GMX_DSSP_PROGRAM_PATH;
if ((dptr = getenv("DSSP")) == nullptr)
{
- dptr = "/usr/local/bin/dssp";
+ dptr = defpathenv.c_str();
}
if (!gmx_fexist(dptr))
{
accr = nullptr;
naccr = 0;
- gpbc = gmx_rmpbc_init(&top.idef, ePBC, natoms);
+ gpbc = gmx_rmpbc_init(&top.idef, pbcType, natoms);
do
{
t = output_env_conv_time(oenv, t);
}
gmx_rmpbc(gpbc, natoms, box, x);
tapein = gmx_ffopen(pdbfile, "w");
- write_pdbfile_indexed(tapein, nullptr, atoms, x, ePBC, box, ' ', -1, gnx, index, nullptr, FALSE);
+ write_pdbfile_indexed(tapein, nullptr, atoms, x, pbcType, box, ' ', -1, gnx, index, nullptr, FALSE);
gmx_ffclose(tapein);
/* strip_dssp returns the number of lines found in the dssp file, i.e.
* the number of residues plus the separator lines */
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2008, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
+ * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
gmx_fatal(FARGS, "Topology (%d atoms) does not match trajectory (%d atoms)", top.atoms.nr, natoms);
}
- bBox = (ir->ePBC != epbcNONE);
+ bBox = (ir->pbcType != PbcType::No);
grid = init_grid(bBox, box, (rcut > r2cut) ? rcut : r2cut, ngrid);
nabin = static_cast<int>(acut / abin);
nrbin = static_cast<int>(rcut / rbin);
mat.ny = hb->nrhb;
mat.matrix.resize(mat.nx, mat.ny);
+ mat.axis_x.resize(mat.nx);
for (auto& value : mat.matrix.toArrayRef())
{
value = 0;
mat.label_y = bContact ? "Contact Index" : "Hydrogen Bond Index";
mat.bDiscrete = true;
mat.map.resize(2);
- for (auto& m : mat.map)
{
- m.code.c1 = hbmap[i];
- m.desc = hbdesc[i];
- m.rgb = hbrgb[i];
+ int i = 0;
+ for (auto& m : mat.map)
+ {
+ m.code.c1 = hbmap[i];
+ m.desc = hbdesc[i];
+ m.rgb = hbrgb[i];
+ i++;
+ }
}
fp = opt2FILE("-hbm", NFILE, fnm, "w");
write_xpm_m(fp, mat);
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2017,2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2017,2018 by the GROMACS development team.
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
char self[256];
/* Skip until we come to pressure */
- for (i = 0; (i < F_NRE); i++)
+ for (i = 0; (i < nre); i++)
{
if (std::strcmp(names[i].name, interaction_function[F_PRES].longname) == 0)
{
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
+ * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
static void prune_mat(gmx::ArrayRef<t_matrix> mat, gmx::ArrayRef<t_matrix> mat2, int skip)
{
- GMX_RELEASE_ASSERT(mat.size() == mat2.size(),
+ GMX_RELEASE_ASSERT(mat.size() == mat2.size() || mat2.empty(),
"Matrix pruning requires matrices of the same size");
for (gmx::index i = 0; i != gmx::ssize(mat); ++i)
{
int skip,
int mapoffset)
{
- GMX_RELEASE_ASSERT(mat.size() == mat2.size(),
+ GMX_RELEASE_ASSERT(mat.size() == mat2.size() || mat2.empty(),
"Combined matrix write requires matrices of the same size");
if (!mat2.empty())
{
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2017,2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2017,2018 by the GROMACS development team.
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
FILE* out;
int igrp;
real d, dd, lo, hi;
- int* ind_grp;
const char * xfn, *nfn;
- char* gn_grp;
matrix box;
gmx_bool bFreeze;
rvec dx, *x = nullptr, *v = nullptr;
{
return 0;
}
+ output_env_done(oenv);
bFreeze = opt2bSet("-of", NFILE, fnm) || opt2parg_bSet("-freeze", asize(pa), pa);
bDisre = bDisre || opt2parg_bSet("-disre_dist", npargs, pa);
const char* title = "";
bool haveTopology = false;
+ gmx_mtop_t mtop;
+ int* indexGroups = nullptr;
+ char* indexGroupNames = nullptr;
+
if (xfn != nullptr)
{
fprintf(stderr, "\nReading structure file\n");
- gmx_mtop_t mtop;
readConfAndTopology(xfn, &haveTopology, &mtop, nullptr, &x, &v, box);
title = *mtop.name;
atoms = gmx_mtop_global_atoms(&mtop);
else if ((bDisre || bConstr) && x)
{
printf("Select group to generate %s matrix from\n", bConstr ? "constraint" : "distance restraint");
- get_index(&atoms, nfn, 1, &igrp, &ind_grp, &gn_grp);
+ get_index(&atoms, nfn, 1, &igrp, &indexGroups, &indexGroupNames);
out = ftp2FILE(efITP, NFILE, fnm, "w");
if (bConstr)
{
- fprintf(out, "; constraints for %s of %s\n\n", gn_grp, title);
+ fprintf(out, "; constraints for %s of %s\n\n", indexGroupNames, title);
fprintf(out, "[ constraints ]\n");
fprintf(out, ";%4s %5s %1s %10s\n", "i", "j", "tp", "dist");
}
else
{
- fprintf(out, "; distance restraints for %s of %s\n\n", gn_grp, title);
+ fprintf(out, "; distance restraints for %s of %s\n\n", indexGroupNames, title);
fprintf(out, "[ distance_restraints ]\n");
fprintf(out, ";%4s %5s %1s %5s %10s %10s %10s %10s %10s\n", "i", "j", "?", "label",
"funct", "lo", "up1", "up2", "weight");
{
for (j = i + 1; j < igrp; j++, k++)
{
- rvec_sub(x[ind_grp[i]], x[ind_grp[j]], dx);
+ rvec_sub(x[indexGroups[i]], x[indexGroups[j]], dx);
d = norm(dx);
if (bConstr)
{
- fprintf(out, "%5d %5d %1d %10g\n", ind_grp[i] + 1, ind_grp[j] + 1, 2, d);
+ fprintf(out, "%5d %5d %1d %10g\n", indexGroups[i] + 1, indexGroups[j] + 1, 2, d);
}
else
{
}
lo = std::max(0.0_real, d - dd);
hi = d + dd;
- fprintf(out, "%5d %5d %1d %5d %10d %10g %10g %10g %10g\n", ind_grp[i] + 1,
- ind_grp[j] + 1, 1, k, 1, lo, hi, hi + disre_up2, 1.0);
+ fprintf(out, "%5d %5d %1d %5d %10d %10g %10g %10g %10g\n", indexGroups[i] + 1,
+ indexGroups[j] + 1, 1, k, 1, lo, hi, hi + disre_up2, 1.0);
}
}
}
else
{
printf("Select group to position restrain\n");
- get_index(&atoms, nfn, 1, &igrp, &ind_grp, &gn_grp);
+ get_index(&atoms, nfn, 1, &igrp, &indexGroups, &indexGroupNames);
out = ftp2FILE(efITP, NFILE, fnm, "w");
- fprintf(out, "; position restraints for %s of %s\n\n", gn_grp, title);
+ fprintf(out, "; position restraints for %s of %s\n\n", indexGroupNames, title);
fprintf(out, "[ position_restraints ]\n");
fprintf(out, ";%3s %5s %9s %10s %10s\n", "i", "funct", "fcx", "fcy", "fcz");
for (i = 0; i < igrp; i++)
{
- fprintf(out, "%4d %4d %10g %10g %10g\n", ind_grp[i] + 1, 1, fc[XX], fc[YY], fc[ZZ]);
+ fprintf(out, "%4d %4d %10g %10g %10g\n", indexGroups[i] + 1, 1, fc[XX], fc[YY], fc[ZZ]);
}
gmx_ffclose(out);
}
sfree(v);
done_atom(&atoms);
}
+ sfree(indexGroupNames);
+ sfree(indexGroups);
return 0;
}
# To help us fund GROMACS development, we humbly ask that you cite
# the research papers on the package. Check out http://www.gromacs.org.
-gmx_add_unit_test(GmxPreprocessTests gmxpreprocess-test
- editconf.cpp
- genconf.cpp
- genion.cpp
- genrestr.cpp
- gpp_atomtype.cpp
- gpp_bond_atomtype.cpp
- insert_molecules.cpp
- readir.cpp
- solvate.cpp
- topdirs.cpp
- )
+gmx_add_gtest_executable(gmxpreprocess-test
+ CPP_SOURCE_FILES
+ editconf.cpp
+ genconf.cpp
+ genion.cpp
++ genrestr.cpp
+ gpp_atomtype.cpp
+ gpp_bond_atomtype.cpp
+ insert_molecules.cpp
+ readir.cpp
+ solvate.cpp
+ topdirs.cpp
+ )
+gmx_register_gtest_test(GmxPreprocessTests gmxpreprocess-test SLOW_TEST)
# Currently these can be slow to run in Jenkins, so they are in
# several test binaries.
set(exename pdb2gmx1-test)
gmx_add_gtest_executable(${exename}
- pdb2gmx.cpp
-)
+ CPP_SOURCE_FILES
+ pdb2gmx.cpp
+ )
target_compile_definitions(${exename} PRIVATE OPLSAA=1 GROMOS=0 AMBER=0 CHARMM=0)
-gmx_register_gtest_test(Pdb2gmx1Test ${exename})
+gmx_register_gtest_test(Pdb2gmx1Test ${exename} SLOW_TEST)
set(exename pdb2gmx2-test)
gmx_add_gtest_executable(${exename}
- pdb2gmx.cpp
- )
+ CPP_SOURCE_FILES
+ pdb2gmx.cpp
+ )
target_compile_definitions(${exename} PRIVATE OPLSAA=0 GROMOS=1 AMBER=0 CHARMM=0)
-gmx_register_gtest_test(Pdb2gmx2Test ${exename})
+gmx_register_gtest_test(Pdb2gmx2Test ${exename} SLOW_TEST)
set(exename pdb2gmx3-test)
gmx_add_gtest_executable(${exename}
- pdb2gmx.cpp
- )
+ CPP_SOURCE_FILES
+ pdb2gmx.cpp
+ )
target_compile_definitions(${exename} PRIVATE OPLSAA=0 GROMOS=0 AMBER=1 CHARMM=1)
-gmx_register_gtest_test(Pdb2gmx3Test ${exename})
+gmx_register_gtest_test(Pdb2gmx3Test ${exename} SLOW_TEST)
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2014,2016,2019, by the GROMACS development team, led by
+# Copyright (c) 2014,2016,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
include(${_gmx_import_file})
unset(_gmx_import_file)
-get_target_property(_libs libgromacs INTERFACE_LINK_LIBRARIES)
+get_target_property(_libs Gromacs::libgromacs INTERFACE_LINK_LIBRARIES)
if (_libs MATCHES "tng_io::tng_io")
include(CMakeFindDependencyMacro)
- find_dependency(TNG_IO)
+ find_dependency(TNG_IO REQUIRED)
+endif()
+if (_libs MATCHES "OpenMP::OpenMP_CXX")
+ include(CMakeFindDependencyMacro)
+ find_dependency(OpenMP REQUIRED)
endif()
+ if (_libs MATCHES "OpenMP::OpenMP_CXX")
+ include(CMakeFindDependencyMacro)
+ find_dependency(OpenMP REQUIRED)
+ endif()
unset(_libs)
set(GROMACS_INCLUDE_DIRS)
list(APPEND GROMACS_INCLUDE_DIRS ${_gmx_root_dir}/${_dir})
endif()
endforeach()
-set(GROMACS_LIBRARIES libgromacs)
+set(GROMACS_LIBRARIES Gromacs::libgromacs)
set(GROMACS_DEFINITIONS @INSTALLED_HEADER_DEFINITIONS@)
set(GROMACS_IS_DOUBLE @GMX_DOUBLE@)
if (DEFINED GROMACS_SUFFIX AND NOT "${GROMACS_SUFFIX}" STREQUAL "@GMX_LIBS_SUFFIX@")
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2013,2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
+# Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
+# Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
file(GLOB MATH_SOURCES *.cpp)
set(LIBGROMACS_SOURCES ${LIBGROMACS_SOURCES} ${MATH_SOURCES} PARENT_SCOPE)
-# TODO: (https://redmine.gromacs.org/issues/988) Find a new convention for defining public API.
+# TODO: (https://gitlab.com/gromacs/gromacs/-/issues/988) Find a new convention for defining public API.
install(FILES
+ do_fit.h
+ functions.h
+ units.h
+ utilities.h
+ vec.h
vectypes.h
DESTINATION include/gromacs/math)
+ if(GMX_INSTALL_LEGACY_API)
+ install(FILES
+ do_fit.h
+ units.h
+ utilities.h
+ DESTINATION include/gromacs/math)
+ endif()
+
+
if (BUILD_TESTING)
add_subdirectory(tests)
endif()
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2010,2012,2013,2014,2015,2016,2019,2020, by the GROMACS development team, led by
+# Copyright (c) 2010,2012,2013,2014,2015 by the GROMACS development team.
+# Copyright (c) 2016,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
treesupport.cpp
)
+ if(GMX_INSTALL_LEGACY_API)
+ install(FILES
+ abstractoption.h
+ basicoptions.h
+ filenameoption.h
+ filenameoptionmanager.h
+ ioptionsbehavior.h
+ ioptionscontainer.h
+ ioptionscontainerwithsections.h
+ ivaluestore.h
+ optionfiletype.h
+ optionflags.h
+ options.h
+ timeunitmanager.h
+ DESTINATION include/gromacs/options)
+ endif()
+
if (BUILD_TESTING)
add_subdirectory(tests)
endif()
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2010,2012,2013,2014,2015, The GROMACS development team.
+# Copyright (c) 2010,2012,2013,2014,2015 by the GROMACS development team.
# Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
gmx_target_warning_suppression(scanner -Wno-unused-parameter HAS_NO_UNUSED_PARAMETER)
gmx_target_warning_suppression(scanner -Wno-missing-declarations HAS_NO_MISSING_DECLARATIONS)
gmx_target_warning_suppression(scanner -Wno-null-conversion HAS_NO_NULL_CONVERSIONS)
- gmx_target_warning_suppression(scanner -wd1419 HAS_DECL_IN_SOURCE)
endif()
list(APPEND libgromacs_object_library_dependencies scanner)
set(libgromacs_object_library_dependencies ${libgromacs_object_library_dependencies} PARENT_SCOPE)
+ if(GMX_INSTALL_LEGACY_API)
+ install(FILES
+ indexutil.h
+ nbsearch.h
+ position.h
+ selectioncollection.h
+ selectionenums.h
+ selection.h
+ selectionoptionbehavior.h
+ selectionoption.h
+ selectionoptionmanager.h
+ DESTINATION include/gromacs/selection)
+ endif()
+
if (BUILD_TESTING)
add_subdirectory(tests)
endif()
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2015,2016,2017,2018,2019 by the GROMACS development team.
+ * Copyright (c) 2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "gromacs/hardware/hardwaretopology.h"
#include "gromacs/hardware/hw_info.h"
#include "gromacs/mdlib/gmx_omp_nthreads.h"
-#include "gromacs/mdlib/update_constrain_cuda.h"
+#include "gromacs/mdlib/update_constrain_gpu.h"
#include "gromacs/mdtypes/commrec.h"
#include "gromacs/mdtypes/inputrec.h"
#include "gromacs/mdtypes/md_enums.h"
const char* g_specifyEverythingFormatString =
"When you use mdrun -gputasks, %s must be set to non-default "
"values, so that the device IDs can be interpreted correctly."
-#if GMX_GPU != GMX_GPU_NONE
+#if GMX_GPU
" If you simply want to restrict which GPUs are used, then it is "
"better to use mdrun -gpu_id. Otherwise, setting the "
-# if GMX_GPU == GMX_GPU_CUDA
+# if GMX_GPU_CUDA
"CUDA_VISIBLE_DEVICES"
-# elif GMX_GPU == GMX_GPU_OPENCL
+# elif GMX_GPU_OPENCL
// Technically there is no portable way to do this offered by the
// OpenCL standard, but the only current relevant case for GROMACS
// is AMD OpenCL, which offers this variable.
const std::vector<int>& userGpuTaskAssignment,
const gmx_hw_info_t& hardwareInfo,
const t_inputrec& inputrec,
- const gmx_mtop_t& mtop,
const int numRanksPerSimulation,
const int numPmeRanksPerSimulation)
{
// First, exclude all cases where we can't run PME on GPUs.
if ((pmeTarget == TaskTarget::Cpu) || !useGpuForNonbonded || !pme_gpu_supports_build(nullptr)
- || !pme_gpu_supports_hardware(hardwareInfo, nullptr)
- || !pme_gpu_supports_input(inputrec, mtop, nullptr))
+ || !pme_gpu_supports_hardware(hardwareInfo, nullptr) || !pme_gpu_supports_input(inputrec, nullptr))
{
// PME can't run on a GPU. If the user required that, we issue
// an error later.
const std::vector<int>& userGpuTaskAssignment,
const gmx_hw_info_t& hardwareInfo,
const t_inputrec& inputrec,
- const gmx_mtop_t& mtop,
const int numRanksPerSimulation,
const int numPmeRanksPerSimulation,
const bool gpusWereDetected)
}
return false;
}
- if (!pme_gpu_supports_input(inputrec, mtop, &message))
+ if (!pme_gpu_supports_input(inputrec, &message))
{
if (pmeTarget == TaskTarget::Gpu)
{
{
errorMessage += "Compatible GPUs must have been found.\n";
}
- if (GMX_GPU != GMX_GPU_CUDA)
+ if (!GMX_GPU_CUDA)
{
errorMessage += "Only a CUDA build is supported.\n";
}
// The graph is needed, but not supported
errorMessage += "Orientation restraints are not supported.\n";
}
- if (inputrec.efep != efepNO)
+ if (inputrec.efep != efepNO
+ && (haveFreeEnergyType(inputrec, efptBONDED) || haveFreeEnergyType(inputrec, efptMASS)))
{
- // Actually all free-energy options except for mass and constraint perturbation are supported
- errorMessage += "Free energy perturbations are not supported.\n";
+ errorMessage += "Free energy perturbation for mass and constraints are not supported.\n";
}
const auto particleTypes = gmx_mtop_particletype_count(mtop);
if (particleTypes[eptShell] > 0)
// does not support it, the actual CUDA LINCS code does support it
if (gmx_mtop_ftype_count(mtop, F_CONSTRNC) > 0)
{
- errorMessage += "Non-connecting constraints are not supported";
+ errorMessage += "Non-connecting constraints are not supported\n";
}
- if (!UpdateConstrainCuda::isNumCoupledConstraintsSupported(mtop))
+ if (!UpdateConstrainGpu::isNumCoupledConstraintsSupported(mtop))
{
errorMessage +=
- "The number of coupled constraints is higher than supported in the CUDA LINCS "
+ "The number of coupled constraints is higher than supported in the GPU LINCS "
"code.\n";
}
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2010,2013,2014,2015,2018,2019,2020, by the GROMACS development team, led by
+# Copyright (c) 2010,2013,2014,2015,2018 by the GROMACS development team.
+# Copyright (c) 2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
file(GLOB TRAJECTORYANALYSIS_SOURCES *.cpp modules/*.cpp)
set(LIBGROMACS_SOURCES ${LIBGROMACS_SOURCES} ${TRAJECTORYANALYSIS_SOURCES} PARENT_SCOPE)
+ if(GMX_INSTALL_LEGACY_API)
+ install(FILES
+ analysismodule.h
+ analysissettings.h
+ cmdlinerunner.h
+ topologyinformation.h
+ DESTINATION include/gromacs/trajectoryanalysis)
+ endif()
+
if (BUILD_TESTING)
add_subdirectory(tests)
endif()
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2010,2011,2012,2013,2014,2015,2017,2018,2019,2020, by the GROMACS development team, led by
+# Copyright (c) 2010,2011,2012,2013,2014 by the GROMACS development team.
+# Copyright (c) 2015,2017,2018,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
# the research papers on the package. Check out http://www.gromacs.org.
file(GLOB UTILITY_SOURCES *.cpp)
-if (GMX_GPU AND NOT GMX_USE_OPENCL)
+if (GMX_GPU_CUDA)
gmx_add_libgromacs_sources(cuda_version_information.cu)
endif()
set(LIBGROMACS_SOURCES ${LIBGROMACS_SOURCES} ${UTILITY_SOURCES} PARENT_SCOPE)
-# TODO: (https://redmine.gromacs.org/issues/988) Find a new convention for defining public API.
+# TODO: (https://gitlab.com/gromacs/gromacs/-/issues/988) Find a new convention for defining public API.
install(FILES
basedefinitions.h
current_function.h
install(FILES
arrayref.h
baseversion.h
- classhelpers.h
+ classhelpers.h
enumerationhelpers.h
- exceptions.h
+ exceptions.h
+ listoflists.h
+ fileptr.h
+ futil.h
+ flags.h
+ pleasecite.h
programcontext.h
range.h
smalloc.h
+ stringutil.h
unique_cptr.h
DESTINATION include/gromacs/utility)
endif()