#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2009,2010,2011,2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+# Copyright (c) 2009,2010,2011,2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
# RPATHs), and makes the binaries in the build tree relocatable.
if(GMX_LIB_INSTALL_DIR STREQUAL "lib")
set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE)
+ if(POLICY CMP0068)
+ cmake_policy(SET CMP0068 NEW) # From CMake-3.9
+ set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR TRUE)
+ endif()
endif()
# Set the RPATH as relative to the executable location to make the
# binaries relocatable.
- if(NOT CMAKE_SYSTEM_NAME STREQUAL "Darwin") #Assume OS X >=10.5
- set(CMAKE_INSTALL_RPATH "\$ORIGIN/../${GMX_LIB_INSTALL_DIR}")
- else()
+ if(CMAKE_SYSTEM_NAME STREQUAL "Darwin") #Assume OS X >=10.5
set(CMAKE_INSTALL_RPATH "@executable_path/../${GMX_LIB_INSTALL_DIR}")
+ set(CMAKE_INSTALL_NAME_DIR ${CMAKE_INSTALL_RPATH})
+ else()
+ set(CMAKE_INSTALL_RPATH "\$ORIGIN/../${GMX_LIB_INSTALL_DIR}")
endif()
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
set(CMAKE_MACOSX_RPATH 1)
gcc-5 simd=ARM_NEON_ASIMD release-with-assert
# Test the mdrun-only build
+# Test newest gcc at time of release
# TODO In combination with gmx from another build, arrange to run regressiontests
-clang-3.7 double mpi no-openmp fftpack mdrun-only
+gcc-7 mpi no-openmp fftpack mdrun-only
# Test MPMD PME with thread-MPI
-# TODO Add double to this configuration if/when we stablize the essentialdynamics tests
-gcc-7 npme=1 nranks=2 no-openmp fftpack release-with-assert
+# Test newest icc at time of release
+# Test icc without SIMD in double precision in release mode
+# TODO enable simd=avx_256 in this config, and change the above description once #2335 is resolved
+icc-18 npme=1 nranks=2 no-openmp double fftpack release simd=none
+
+# Test CUDA build on a slave with no CUDA devices
+gcc-4.9 gpu cuda-7.0 openmp release-with-assert
# Test non-default GMX_PREFER_STATIC_LIBS behavior
# TODO enable this
# Test own-fftw build (from local copy of the file)
# Test mdrun -tunepme (e.g. with relaxed tolerances, for now)
# Consider testing of other CMake option paths
-# Add CUDA build and run the tests on a node with no CUDA GPUs
# Test SIMD implementation of pair search for GPU code-path
gcc-5 gpu cuda-8.0 thread-mpi openmp cmake-3.6.1 release-with-assert simd=avx2_256
+# Test newest cmake at time of release
# Test with ThreadSanitizer (compiled without OpenMP, even though
# this gcc was configured with --disable-linux-futex, because
# Redmine #1850 is unresolved, which causes more suspected
# false positives than races detected)
# Test fftpack fallback
-gcc-7 tsan no-openmp fftpack
+gcc-7 tsan no-openmp fftpack cmake-3.10.0
# Test newest gcc at time of release
-gcc-7 mpi
+# Test gcc in double precision
+# Test 128-bit SIMD in double precision (to cover SimdInt32 support better)
+gcc-7 double mpi simd=avx_128_fma
# Test on MacOS (because gcc-6 is only available there)
# Test X11 build
-gcc-6 double x11
+gcc-6 x11
# Test oldest supported cmake
# Test older clang
-# Test double precision
+# Test clang in double precision
# Test without OpenMP
# Test thread-MPI
# Test AVX_128_FMA SIMD + Double (Important for Simd4N=Simd4 and sizeof(SimdInt32)!=4*GMX_SIMD_REAL_WIDTH)
# Test oldest supported icc
# Test icc on Windows
-icc-16 msvc-2015 fftpack
+# Test icc with SIMD in mixed precision in release mode
+icc-16 msvc-2015 fftpack simd=avx2_256 release
-# Test newest cmake at time of release
# Test newest icc at the time of release
+# Test icc without SIMD in double precision in release mode
# Test MKL
# Test without any MPI
-# Test on CentOS (because icc-18.0 is only available there)
-icc-18 no-thread-mpi openmp mkl cmake-3.10.0 simd=avx_256
+# Test on CentOS (because cmake-3.9.6 is available there)
+icc-18 no-thread-mpi double openmp mkl cmake-3.9.6 simd=none release
# Test NVIDIA OpenCL
# Test MPI + OpenCL
# Test AVX2_256 SIMD
-gcc-4.8 openmp opencl cuda-7.5 mpi release-with-assert simd=avx2_256
+# Test icc with SIMD in mixed precision in release mode
+icc-18 openmp opencl cuda-7.5 mpi release simd=avx2_256
# Test AMD OpenCL
# Test AVX_128_FMA SIMD
gcc-5 openmp simd=avx_128_fma opencl amdappsdk-3.0
# TODO
+# Add support for CUDA 9.0
# Add OpenMP support to ASAN build (but libomp.so in clang-4 reports leaks, so might need a suitable build or suppression)
# Test hwloc support
# Test newest supported LTS Ubuntu
# Update gpu testing specifiers per https://redmine.gromacs.org/issues/2161
# Resolve Redmine #1850 so that ThreadSanitizer can test our OpenMP code
# Support icc 17
+# Test AVX-512 when hardware is available
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+# Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
if(GMX_BUILD_OWN_FFTW)
- if(WIN32)
- message(FATAL_ERROR "Cannot build FFTW3 automatically (GMX_BUILD_OWN_FFTW=ON) on Windows")
+ if(MSVC)
+ message(FATAL_ERROR "Cannot build FFTW3 automatically (GMX_BUILD_OWN_FFTW=ON) in Visual Studio")
+ endif()
+ if(CMAKE_GENERATOR STREQUAL "Ninja")
+ message(FATAL_ERROR "Cannot build FFTW3 automatically (GMX_BUILD_OWN_FFTW=ON) with ninja")
endif()
add_subdirectory(src/contrib/fftw)
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+# Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
# - if CUDA is not found but GPUs were detected issue a warning
if (NOT DEFINED GMX_GPU)
set(GMX_GPU_AUTO TRUE CACHE INTERNAL "GPU acceleration will be selected automatically")
+else()
+ set(GMX_GPU_AUTO FALSE CACHE INTERNAL "GPU acceleration will be selected automatically")
endif()
option(GMX_GPU "Enable GPU acceleration" OFF)
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+# Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
# candidates, where it signifies the most recent such release from
# this branch; it will be empty before the first such release, as well
# as after the final release is out.
-set(GMX_VERSION_SUFFIX "-beta2")
+set(GMX_VERSION_SUFFIX "-rc1")
# Conventionally with libtool, any ABI change must change the major
# version number, the minor version number should change if it's just
set(REGRESSIONTEST_VERSION "${GMX_VERSION_STRING}")
set(REGRESSIONTEST_BRANCH "refs/heads/release-2018")
-set(REGRESSIONTEST_MD5SUM "ebfdb7f7b0c70a03038655ee12b18300" CACHE INTERNAL "MD5 sum of the regressiontests tarball")
+set(REGRESSIONTEST_MD5SUM "d4c535a06fd8f34c9fd21e7ee5a30808" CACHE INTERNAL "MD5 sum of the regressiontests tarball")
math(EXPR GMX_VERSION_NUMERIC
"${GMX_VERSION_MAJOR}*10000 + ${GMX_VERSION_PATCH}")
--- /dev/null
+Physical validation
+===================
+
+Physical validation tests check whether simulation results correspond
+to physical (or mathematical) expectations.
+
+Unlike the existing tests, we are not be able to keep these tests in
+the "seconds, not minutes" time frame, rather aiming for "hours, not
+days". They should therefore be ran periodically, but probably not
+for every build.
+
+Also, given the long run time, it will in many cases be necessary to
+separate running of the systems (e.g. to run it at a specific time, or
+on a different resource), such that the make script does give the
+option to
+
+* prepare run files and an execution script,
+* analyze already present simulations,
+* or prepare, run and analyze in one go.
+
+
+Test description
+----------------
+
+Currently, simulation results are tested against three physically /
+mathematically expected results:
+
+* *Integrator convergence*: A symplectic integrator can be shown to
+ conserve a constant of motion (such as the energy in a
+ micro-canonical simulation) up to a fluctuation that is quadratic in
+ time step chosen. Comparing two or more constant-of-motion
+ trajectories realized using different time steps (but otherwise
+ unchanged simulation parameters) allows a check of the symplecticity
+ of the integration. Note that lack of symplecticity does not
+ necessarily imply an error in the integration algorithm, it can also
+ hint at physical violations in other parts of the model, such as
+ non-continuous potential functions, imprecise handling of
+ constraints, etc.
+* *Kinetic energy distribution*: The kinetic energy trajectory of a
+ (equilibrated) system sampling a canonical or an isothermal-isobaric
+ ensemble is expected to be Maxwell-Boltzmann distributed. The
+ similarity between the physically expected and the observed
+ distribution allows to validate the sampled kinetic energy ensemble.
+* *Distribution of configurational quantities*: As the distribution of
+ configurational quantities like the potential energy or the volume
+ are in general not known analytically, testing the likelihood of a
+ trajectory sampling a given ensemble is less straightforward than
+ for the kinetic energy. However, generally, the ratio of the
+ probability distribution between samples of the same ensemble at
+ different state points (e.g. at different temperatures, different
+ pressures) is known. Comparing two simulations at different state
+ points therefore allows a validation of the sampled ensemble.
+
+The physical validation included in GROMACS tests a range of the
+most-used settings on several systems. The general philosophy is to
+leave most settings to default values with the exception of the ones
+explicitly tested in order to be sensitive to changes in the default
+values. The test set will be enlarged as we discover interesting test
+systems and corner cases. Under double precision, some additional
+tests are ran, and some other tests are ran using a lower tolerance.
+
+
+Integrator convergence
+^^^^^^^^^^^^^^^^^^^^^^
+
+All simulations performed under NVE on Argon (1000 atoms) and water
+(900 molecules) systems. As these tests are very sensitive to
+numerical imprecision, they are performed with long-range corrections
+for both Lennard-Jones and electrostatic interactions, with a very low
+pair-list tolerance (``verlet-buffer-tolerance = 1e-10``), and high
+LINCS settings where applicable.
+
+**Argon**:
+
+* *Integrators*:
+ - ``integrator = md``
+ - ``integrator = md-vv``
+* *Long-range corrections LJ*:
+ - ``vdwtype = PME``
+ - ``vdwtype = cut-off``, ``vdw-modifier = force-switch``,
+ ``rvdw-switch = 0.8``
+
+**Water**:
+
+* *Integrators*:
+ - ``integrator = md``
+ - ``integrator = md-vv``
+* *Long-range corrections LJ*:
+ - ``vdwtype = PME``
+ - ``vdwtype = cut-off``, ``vdw-modifier = force-switch``,
+ ``rvdw-switch = 0.8``
+* *Long-range corrections electrostatics*:
+ - ``coulombtype = PME``, ``fourierspacing = 0.05``
+* *Constraint algorithms*:
+ - ``constraint-algorithm = lincs``, ``lincs-order = 6``,
+ ``lincs-iter = 2``
+ - ``constraint-algorithm = none``
+ - SETTLE
+
+
+Ensemble tests
+^^^^^^^^^^^^^^
+
+The generated ensembles are tested with Argon (1000 atoms) and water
+(900 molecules, with SETTLE and PME) systems, in the following
+combinations:
+
+* ``integrator = md``, ``tcoupl = v-rescale``, ``tau-t = 0.1``,
+ ``ref-t = 87.0`` (Argon) or ``ref-t = 298.15`` (Water)
+* ``integrator = md``, ``tcoupl = v-rescale``, ``tau-t = 0.1``,
+ ``ref-t = 87.0`` (Argon) or ``ref-t = 298.15`` (Water), ``pcoupl =
+ parrinello-rahman``, ``ref-p = 1.0``, ``compressibility = 4.5e-5``
+* ``integrator = md-vv``, ``tcoupl = v-rescale``, ``tau-t = 0.1``,
+ ``ref-t = 87.0`` (Argon) or ``ref-t = 298.15`` (Water)
+* ``integrator = md-vv``, ``tcoupl = nose-hoover``, ``tau-t = 1.0``,
+ ``ref-t = 87.0`` (Argon) or ``ref-t = 298.15`` (Water), ``pcoupl =
+ mttk``, ``ref-p = 1.0``, ``compressibility = 4.5e-5``
+
+All thermostats are applied to the entire system (``tc-grps =
+system``). The simulations run for 1ns at 2fs time step with Verlet
+cut-off. All other settings left to default values.
+
+
+Building and testing using the build system
+-------------------------------------------
+
+Since these tests can not be ran at the same frequency as the current
+tests, they are kept strictly opt-in via
+``-DGMX_PHYSICAL_VALIDATION=ON``, with
+``-DGMX_PHYSICAL_VALIDATION=OFF`` being the default. Independently of
+that, all previously existing build targets are unchanged, including
+``make check``.
+
+If physical validation is turned on, a number of additional make
+targets can be used:
+
+* ``make check`` is unchanged, it builds the main binaries and the unit
+ tests, then runs the unit tests and, if available, the regression
+ tests.
+* ``make check-phys`` builds the main binaries, then runs the physical
+ validation tests. **Warning**: This requires to simulate all systems
+ and might take several hours on a average machine!
+* ``make check-all`` combines ``make check`` and ``make check-phys``.
+
+As the simulations needed to perform the physical validation tests may
+take long, it might be advantageous to run them on an external
+resource. To enable this, two additional make targets are present:
+
+* ``make check-phys-prepare`` prepares all simulation files under
+ ``tests/physicalvalidation`` of the build directory, as well as a
+ rudimentary run script in the same directory.
+* ``make check-phys-analyze`` runs the same tests as ``make
+ check-phys``, but does not simulate the systems. Instead, this
+ target assumes that the results can be found under
+ ``tests/physicalvalidation`` of the build directory.
+
+The intended usage of these additional targets is to prepare the
+simulation files, then run them on a different resource or at a
+different time, and later analyze them. If you want to use this, be
+aware *(i)* that the run script generated is very simple and might
+need (considerable) tuning to work with your setup, and *(ii)* that
+the analysis script is sensitive to the folder structure, so make sure
+to preserve it when copying the results to / from another resource.
+
+Additionally to the mentioned make targets, a number of internal make
+targets are defined. These are not intended to be used directly, but
+are necessary to support the functionality described above, especially
+the complex dependencies. These internal targets include
+``run-ctest``, ``run-ctest-nophys``, ``run-ctest-phys`` and
+``run-ctest-phys-analyze`` running the different tests,
+``run-physval-sims`` running the simulations for physical validation,
+and ``missing-tests-notice``, ``missing-tests-notice-all``,
+``missing-phys-val-phys``, ``missing-phys-val-phys-analyze`` and
+``missing-phys-val-all`` notifying users about missing tests.
+
+
+Direct usage of the python script
+---------------------------------
+
+The ``make`` commands mentioned above are calling the python script
+``tests/physicalvalidation/gmx_physicalvalidation.py``, which can be
+used independently of the make system. Use the ``-h`` flag for the
+general usage information, and the ``--tests`` for more details on the
+available physical validations.
+
+The script requires a ``json`` file defining the tests as an input.
+Among other options, it allows to define the GROMACS binary and the
+working directory to be used, and to decide whether to only prepare
+the simulations, prepare and run the simulations, only analyze the
+simulations, or do all three steps at once.
+
+
+Adding new tests
+----------------
+
+The available tests are listed in the ``systems.json`` (tests
+standardly used for single precision builds) and ``systems_d.json``
+(tests standardly used for double precision builds) files in the same
+directory, the GROMACS files are in the folder ``systems/``.
+
+The ``json`` files lists the different test. Each test has a
+``"name"`` attribute, which needs to be unique, a ``"dir"`` attribute,
+which denotes the directory of the system (inside the ``systems/``
+directory) to be tested, and a ``"test"`` attribute which lists the
+validations to be performed on the system. Additionally, the optional
+``"grompp_args"`` and ``"mdrun_args"`` attributes allow to pass
+specific arguments to ``gmx grompp`` or ``gmx mdrun``, respectively. A
+single test can contain several validations, and several independent
+tests can be performed on the same input files.
+
+To add a new test to a present system, add the test name and the
+arguments to the ``json`` file(s). To use a new system, add a
+subfolder in the ``systems/`` directory containing
+``input/system.{gro,mdp,top}`` files defining your system.
NVML support is only available if detected, and may be disabled by
turning off the ``GMX_USE_NVML`` CMake advanced option.
-By default, optimized code will be generated for CUDA architectures
-supported by the nvcc compiler (and the |Gromacs| build system).
-However, it can be beneficial to manually pick the specific CUDA architecture(s)
-to generate code for either to reduce compilation time (and binary size) or to
-target a new architecture not yet supported by the |Gromacs| build system.
-Setting the desired CUDA architecture(s) and virtual architecture(s)
-can be done using the ``GMX_CUDA_TARGET_SM`` and ``GMX_CUDA_TARGET_COMPUTE``
-variables, respectively. These take a semicolon delimited string with
-the two digit suffixes of CUDA (virtual) architectures names
-(for details see the "Options for steering GPU code generation" section of the
-nvcc man / help or Chapter 6. of the nvcc manual).
+By default, code will be generated for the most common CUDA architectures.
+However, to reduce build time and binary size we do not generate code for
+every single possible architecture, which in rare cases (say, Tegra systems)
+can result in the default build not being able to use some GPUs.
+If this happens, or if you want to remove some architectures to reduce
+binary size and build time, you can alter the target CUDA architectures.
+This can be done either with the ``GMX_CUDA_TARGET_SM`` or
+``GMX_CUDA_TARGET_COMPUTE`` CMake variables, which take a semicolon delimited
+string with the two digit suffixes of CUDA (virtual) architectures names, for
+instance "35;50;51;52;53;60". For details, see the "Options for steering GPU
+code generation" section of the nvcc man / help or Chapter 6. of the nvcc
+manual.
The GPU acceleration has been tested on AMD64/x86-64 platforms with
Linux, Mac OS X and Windows operating systems, but Linux is the
%
% This file is part of the GROMACS molecular simulation package.
%
-% Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+% Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
% Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
% and including many others, as listed in the AUTHORS file in the
% top-level source directory and at http://www.gromacs.org.
\bea \frac {\de^2\ve{r}_i}{\de t^2} & = & \frac{\ve{F}_i}{m_i} -
\ve{M} \frac{\de \ve{r}_i}{\de t} , \\ \ve{M} & = & \ve{b}^{-1} \left[
\ve{b} \frac{\de \ve{b}'}{\de t} + \frac{\de \ve{b}}{\de t} \ve{b}'
- \right] \ve{b}'^{-1}. \eea The (inverse) mass parameter matrix
+ \right] \ve{b}'^{-1}.
+ \eea
+This extra term has the appearance of a friction, but it should be
+noted that it is ficticious, and rather an effect of the
+Parrinello-Rahman equations of motion being defined with all
+particle coordinates represented relative to the box vectors, while
+{\gromacs] uses normal Cartesian coordinates for positions,
+velocities and forces. It is worth noting that the kinetic energy too
+should formally be calculated based on velocities relative to the
+box vectors. This can have an effect e.g. for external constant stress,
+but for now we only support coupling to constant external
+pressures, and for any normal simulation the velocities of box
+vectors should be extremely small compared to particle velocities.
+Gang Liu has done some work on deriving this for Cartesian
+coordinates\cite{Liu2015} that we will try to implement at
+some point in the future together with support for external stress.
+
+The (inverse) mass parameter matrix
$\ve{W}^{-1}$ determines the strength of the coupling, and how the box
can be deformed. The box restriction (\ref{eqn:box_rot}) will be
fulfilled automatically if the corresponding elements of $\ve{W}^{-1}$
%
% This file is part of the GROMACS molecular simulation package.
%
-% Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+% Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
% Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
% and including many others, as listed in the AUTHORS file in the
% top-level source directory and at http://www.gromacs.org.
\end{enumerate}
\section{Non-bonded interactions}
-Non-bonded interactions in {\gromacs} are pair-additive and centro-symmetric:
+Non-bonded interactions in {\gromacs} are pair-additive:
\beq
V(\ve{r}_1,\ldots \ve{r}_N) = \sum_{i<j}V_{ij}(\rvij);
\eeq
\beq
-\ve{F}_i = -\sum_j \frac{dV_{ij}(r_{ij})}{dr_{ij}} \frac{\rvij}{r_{ij}} = -\ve{F}_j
+\ve{F}_i = -\sum_j \frac{dV_{ij}(r_{ij})}{dr_{ij}} \frac{\rvij}{r_{ij}}
\eeq
-The non-bonded interactions contain a \normindex{repulsion} term,
+Since the potential only depends on the scalar distance, interactions
+will be centro-symmetric, i.e.\ the vectorial partial force on particle $i$ from
+the pairwise interaction $V_{ij}(r_{ij})$ has the opposite direction of the partial force on
+particle $j$. For efficiency reasons, interactions are calculated by loops over interactions and
+updating both partial forces rather than summing one complete nonbonded force at
+a time. The non-bonded interactions contain a \normindex{repulsion} term,
a \normindex{dispersion}
term, and a Coulomb term. The repulsion and dispersion term are
combined in either the Lennard-Jones (or 6-12 interaction), or the
%
% This file is part of the GROMACS molecular simulation package.
%
-% Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
+% Copyright (c) 2013,2014,2015,2017,2018, by the GROMACS development team, led by
% Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
% and including many others, as listed in the AUTHORS file in the
% top-level source directory and at http://www.gromacs.org.
\subsection{Virial from non-bonded forces}
Here the derivation for the single sum virial in the {\em non-bonded force}
-routine is given. $i \neq j$ in all formulae below.
+routine is given. There are a couple of considerations that are special
+to {\gromacs} that we take into account:
+\begin{itemize}
+\item When calculating short-range interactions, we apply the
+{\em minimum image convention} and only consider the closest
+image of each neighbor - and in particular we never allow interactions
+between a particle and any of its periodic images. For all the
+equations below, this means $i \neq j$.
+\item In general, either the $i$ or $j$ particle might be shifted to a neighbor
+cell to get the closest interaction (shift $\delta_{ij}$). However, with minimum image
+convention there can be at most 27 different shifts for particles in the central cell,
+and for typical (very short-ranged) biomolecular interactions there are typically only a few
+different shifts involved for each particle, not to mention that each interaction can
+only be present for one shift.
+\item For the {\gromacs} nonbonded interactions
+we use this to split the neighborlist of each $i$ particle into multiple
+separate lists, where each list has a constant shift $\delta_i$ for the $i$ partlcle. We
+can represent this as a sum over shifts (for which we use index $s$), with the constraint that
+each particle interaction can only contribute to one of the terms in this sum, and the
+shift is no longer dependent on the $j$ particles. For any sum that does not contain
+complex dependence on $s$, this means the sum trivially reduces to just the sum
+over $i$ and/or $j$.
+\item To simplify some of the sums, we replace sums over $j<i$ with double sums over
+all particles (remember, $i \neq j$) and divide by 2.
+\end{itemize}
+
+Starting from the above definition of the virial, we then get
\newcommand{\di}{\delta_{i}}
\newcommand{\qrt}{\frac{1}{4}}
\bea
-\Xi
-&~=~&-\half~\sum_{i < j}^{N}~\rnij\otimes\Fvij \\
-&~=~&-\qrt\sum_{i=1}^N~\sum_{j=1}^N ~(\rvi+\di-\rvj)\otimes\Fvij \\
-&~=~&-\qrt\sum_{i=1}^N~\sum_{j=1}^N ~(\rvi+\di)\otimes\Fvij-\rvj\otimes\Fvij \\
-&~=~&-\qrt\left(\sum_{i=1}^N~\sum_{j=1}^N ~(\rvi+\di)\otimes\Fvij~-~\sum_{i=1}^N~\sum_{j=1}^N ~\rvj\otimes\Fvij\right) \\
-&~=~&-\qrt\left(\sum_{i=1}^N~(\rvi+\di)\otimes\sum_{j=1}^N~\Fvij~-~\sum_{j=1}^N ~\rvj\otimes\sum_{i=1}^N~\Fvij\right) \\
-&~=~&-\qrt\left(\sum_{i=1}^N~(\rvi+\di)\otimes\Fvi~+~\sum_{j=1}^N ~\rvj\otimes\Fvj\right) \\
-&~=~&-\qrt\left(2~\sum_{i=1}^N~\rvi\otimes\Fvi+\sum_{i=1}^N~\di\otimes\Fvi\right)
+\Xi
+&~=~&-\half~\sum_{i < j}^{N}~{\mathbf r}^n_{ij} \otimes {\mathbf F}_{ij} \nonumber \\
+&~=~&-\half~\sum_{i < j}^{N}~\left( {\mathbf r}_i + \delta_{ij} - {\mathbf r}_j \right) \otimes {\mathbf F}_{ij} \nonumber \\
+&~=~&-\qrt~\sum_{i=1}^{N}~\sum_{j=1}^{N}~\left( {\mathbf r}_i + \delta_{ij} - {\mathbf r}_j \right) \otimes {\mathbf F}_{ij} \nonumber \\
+&~=~&-\qrt~\sum_{i=1}^{N}~\sum_{s}~\sum_{j=1}^{N}~\left( {\mathbf r}_i + \delta_{i,s} - {\mathbf r}_j \right) \otimes {\mathbf F}_{ij,s} \nonumber \\
+&~=~&-\qrt~\sum_{i=}^{N}~\sum_{s}~\sum_{j=1}^{N}~\left( \left( {\mathbf r}_i + \delta_{i,s} \right) \otimes {\mathbf F}_{ij,s} -{\mathbf r}_j \otimes {\mathbf F}_{ij,s} \right) \nonumber \\
+&~=~&-\qrt~\sum_{i=1}^{N}~\sum_{s}~\sum_{j=1}^N ~\left( {\mathbf r}_i + \delta_{i,s} \right) \otimes {\mathbf F}_{ij,s} + \qrt \sum_{i=1}^{N}~\sum_{s}~\sum_{j=1}^{N} {\mathbf r}_j \otimes {\mathbf F}_{ij,s} \nonumber \\
+&~=~&-\qrt~\sum_{i=1}^{N}~\sum_{s}~\sum_{j=1}^N ~\left( {\mathbf r}_i + \delta_{i,s} \right) \otimes {\mathbf F}_{ij,s} + \qrt \sum_{i=1}^{N}~\sum_{j=1}^{N} {\mathbf r}_j \otimes {\mathbf F}_{ij} \nonumber \\
+&~=~&-\qrt~\sum_{s}~\sum_{i=1}^{N}~\left( {\mathbf r}_i + \delta_{i,s} \right) \otimes ~\sum_{j=1}^N {\mathbf F}_{ij,s} + \qrt \sum_{j=1}^N {\mathbf r}_j \otimes \sum_{i=1}^{N} {\mathbf F}_{ij} \nonumber \\
+&~=~&-\qrt~\sum_{s}~\sum_{i=1}^{N}~\left( {\mathbf r}_i + \delta_{i,s} \right) \otimes ~\sum_{j=1}^N {\mathbf F}_{ij,s} - \qrt \sum_{j=1}^N {\mathbf r}_j \otimes \sum_{i=1}^{N} {\mathbf F}_{ji} \nonumber \\
+&~=~&-\qrt~\sum_{s}~\sum_{i=1}^{N}~\left( {\mathbf r}_i + \delta_{i,s} \right) \otimes {\mathbf F}_{i,s} - \qrt \sum_{j=1}^N~{\mathbf r}_j \otimes {\mathbf F}_{j} \nonumber \\
+&~=~&-\qrt~\left(\sum_{i=1}^{N}~{\mathbf r}_i \otimes {\mathbf F}_{i} + \sum_{j=1}^N~{\mathbf r}_j \otimes {\mathbf F}_{j} \right) - \qrt \sum_{s}~\sum_{i=1}^{N} \delta_{i,s} \otimes {\mathbf F}_{i,s} \nonumber \\
+&~=~&-\half \sum_{i=1}^{N}~{\mathbf r}_i \otimes {\mathbf F}_{i} -\qrt \sum_{s}~\sum_{i=1}^{N}~\delta_{i,s} \otimes {\mathbf F}_{i,s} \nonumber \\
+&~=~&-\half \sum_{i=1}^{N}~{\mathbf r}_i \otimes {\mathbf F}_{i} -\qrt \sum_{s}~\delta_{s} \otimes {\mathbf F}_{s} \nonumber \\
+&~=~&\Xi_0 + \Xi_1
\eea
-In these formulae we introduced:
+In the second-last stage, we have used the property that each shift vector itself does not depend on the coordinates of particle $i$, so it is possible to sum
+up all forces corresponding to each shift vector (in the nonbonded kernels), and then just use a sum over the different shift vectors outside the kernels.
+We have also used
\bea
\Fvi&~=~&\sum_{j=1}^N~\Fvij \\
\Fvj&~=~&\sum_{i=1}^N~\Fvji
\beq
\Fvij~=~-\Fvji
\eeq
-we must, in the implementation, double the term containing the shift $\delta_i$.
+we must, in the implementation, double the term containing the shift $\delta_i$. Similarly, in a few places we have summed the shift-dependent force
+over all shifts to come up with the total force per interaction or particle.
+
+This separates the total virial $\Xi$ into a component $\Xi_0$ that is a single sum over particles, and a second component $\Xi_1$ that describes the influence of
+the particle shifts, and that is only a sum over the different shift vectors.
\subsection{The intra-molecular shift (mol-shift)}
For the bonded forces and SHAKE it is possible to make a {\em mol-shift}
pages = {1727--1739}
}
+@Article{Liu2015,
+ author = {Gang Liu},
+ title = {Dynamical Equations For The Period Vectors In A Periodic System Under Constant External Stress},
+ journal = {Can. J. Phys.},
+ year = 2015,
+ volume = 93,
+ pages = {974--978}
+}
+
@Article{Loof92,
author = "Hans de Loof and Lennart Nilsson and Rudolf Rigler",
title = "Molecular Dynamics Simulations of Galanin in Aqueous
%
% This file is part of the GROMACS molecular simulation package.
%
-% Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+% Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
% Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
% and including many others, as listed in the AUTHORS file in the
% top-level source directory and at http://www.gromacs.org.
3 1 1000 0 0 ; Restrain to a plane (Y-Z-plane)
[ dihedral_restraints ]
-; ai aj ak al type label phi dphi kfac power
- 3 6 1 2 1 1 180 0 1 2
- 1 4 3 5 1 1 180 0 1 2
+; ai aj ak al type phi dphi fc
+ 3 6 1 2 1 180 0 10
+ 1 4 3 5 1 180 0 10
; Include TIP3P water topology
#include "amber99/tip3p.itp"
The parameters specified in the [dihedral_restraints] directive are as follows:
\begin{itemize}
\item {\tt type} has only one possible value which is 1
-\item {\tt label} is unused and has been removed from the code.
\item {\tt phi} is the value of $\phi_0$ in \eqnref{dphi} and \eqnref{dihre} of the manual.
\item {\tt dphi} is the value of $\Delta\phi$ in \eqnref{dihre} of the manual.
-\item {\tt kfac} is analogous to {\tt fac} in the implementation of distance restraints. It is the factor by which the force constant is multiplied. By doing so, different restraints can be maintained with different force constants.
-\item {\tt power} is unused and has been removed from the code.
+\item {\tt fc} is the force constant $k_{dihr}$ in \eqnref{dihre} of the manual.
\end{itemize}
{\bf {\tt \#include "tip3p.itp"} :} includes a topology file that was already
Electric fields
+^^^^^^^^^^^^^^^
+
.. mdp:: electric-field-x ; electric-field-y ; electric-field-z
Here you can specify an electric field that optionally can be
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2009,2010,2011,2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2009,2010,2011,2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/* Define to 1 if yo have the <unistd.h> header file. */
#cmakedefine HAVE_UNISTD_H
+# ifdef __APPLE__
+// Mac OS 13.x has a bug where dispatch.h generates an error for OpenCL builds if
+// HAVE_UNISTD_H is merely defined, but not set to 1. Since unistd.h should always
+// be available on this platform we simply undefine and redefine it to 1 for now
+# undef HAVE_UNISTD_H
+# define HAVE_UNISTD_H 1
+#endif
/* Define to 1 if yo have the <pwd.h> header file. */
#cmakedefine01 HAVE_PWD_H
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
options.addOption(
gmx::DoubleOption("b")
.store(&tbegin).storeIsSet(&bBeginTimeSet).timeValue()
- .description("First frame (%t) to read from trajectory"));
+ .description("Time of first frame to read from trajectory (default unit %t)"));
}
if (isFlagSet(PCA_CAN_END))
{
options.addOption(
gmx::DoubleOption("e")
.store(&tend).storeIsSet(&bEndTimeSet).timeValue()
- .description("Last frame (%t) to read from trajectory"));
+ .description("Time of last frame to read from trajectory (default unit %t)"));
}
if (isFlagSet(PCA_CAN_DT))
{
options.addOption(
gmx::DoubleOption("dt")
.store(&tdelta).storeIsSet(&bDtSet).timeValue()
- .description("Only use frame when t MOD dt = first time (%t)"));
+ .description("Only use frame when t MOD dt = first time (default unit %t)"));
}
gmx::TimeUnit timeUnit = gmx::TimeUnit_Default;
if (isFlagSet(PCA_TIME_UNIT))
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2008,2009,2010,2011,2012,2013,2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2008,2009,2010,2011,2012,2013,2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
cr->npmenodes = nPmeRanks;
if (fplog)
{
- fprintf(fplog, "Using %d separate PME ranks, per user request\n", cr->npmenodes);
+ fprintf(fplog, "Using %d separate PME ranks\n", cr->npmenodes);
+ // TODO: there was a ", per user request" note here, but it's not correct anymore,
+ // as with GPUs decision about nPmeRanks can be made in runner() as well.
+ // Consider a single spot for setting nPmeRanks.
}
}
}
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2017, by the GROMACS development team, led by
+ * Copyright (c) 2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
stdioHelper.redirectStringToStdin(stringForStdin);
ASSERT_EQ(0, gmx_energy(cmdline.argc(), cmdline.argv()));
+ // All the .edr files used in the tests contain only
+ // single-precision values, so even from a
+ // double-precision build they should conform to
+ // tolerances suitable for single-precision values.
+ setDefaultTolerance(defaultFloatTolerance());
checkOutputFiles();
}
};
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*S0 = load4U(data-offset);
*S1 = load4U(data-offset+4);
#else
- GMX_ALIGNED(real, GMX_SIMD4_WIDTH) buf_aligned[GMX_SIMD4_WIDTH*2];
+ alignas(GMX_SIMD_ALIGNMENT) real buf_aligned[GMX_SIMD4_WIDTH*2];
/* Copy data to an aligned buffer */
for (int i = 0; i < order; i++)
{
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
pme_spline_work *work;
#ifdef PME_SIMD4_SPREAD_GATHER
- GMX_ALIGNED(real, GMX_SIMD4_WIDTH) tmp[GMX_SIMD4_WIDTH*2];
+ alignas(GMX_SIMD_ALIGNMENT) real tmp[GMX_SIMD4_WIDTH*2];
Simd4Real zero_S;
Simd4Real real_mask_S0, real_mask_S1;
int of, i;
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
int offx, offy, offz;
#if defined PME_SIMD4_SPREAD_GATHER && !defined PME_SIMD4_UNALIGNED
- GMX_ALIGNED(real, GMX_SIMD4_WIDTH) thz_aligned[GMX_SIMD4_WIDTH*2];
+ alignas(GMX_SIMD_ALIGNMENT) real thz_aligned[GMX_SIMD4_WIDTH*2];
#endif
pnx = pmegrid->s[XX];
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
id[n++] = dl[i].atm.O;
}
}
- for (i = 1; (i < nl); i++)
+ for (i = 0; (i < nl); i++)
{
/* Omega */
if (has_dihedral(edOmega, &(dl[i])))
if (maxchi > MAXCHI)
{
fprintf(stderr,
- "Will only calculate first %d Chi dihedrals in stead of %d.\n",
+ "Will only calculate first %d Chi dihedrals instead of %d.\n",
MAXCHI, maxchi);
maxchi = MAXCHI;
}
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
double ***slDensity, int *nslices, t_topology *top, int ePBC,
int axis, int nr_grps, real *slWidth, gmx_bool bCenter,
int *index_center, int ncenter,
- gmx_bool bRelative, const gmx_output_env_t *oenv)
+ gmx_bool bRelative, const gmx_output_env_t *oenv, const char **dens_opt)
{
rvec *x0; /* coordinates without pbc */
matrix box; /* box (3x3) */
real t,
z;
real boxSz, aveBox;
+ real *den_val; /* values from which the density is calculated */
gmx_rmpbc_t gpbc = nullptr;
if (axis < 0 || axis >= DIM)
gpbc = gmx_rmpbc_init(&top->idef, ePBC, top->atoms.nr);
/*********** Start processing trajectory ***********/
+
+ snew(den_val, top->atoms.nr);
+ if (dens_opt[0][0] == 'n')
+ {
+ for (i = 0; (i < top->atoms.nr); i++)
+ {
+ den_val[i] = 1;
+ }
+ }
+ else if (dens_opt[0][0] == 'c')
+ {
+ for (i = 0; (i < top->atoms.nr); i++)
+ {
+ den_val[i] = top->atoms.atom[i].q;
+ }
+ }
+ else
+ {
+ for (i = 0; (i < top->atoms.nr); i++)
+ {
+ den_val[i] = top->atoms.atom[i].m;
+ }
+ }
+
do
{
gmx_rmpbc(gpbc, natoms, box, x0);
slice -= *nslices;
}
- (*slDensity)[n][slice] += top->atoms.atom[index[n][i]].m*invvol;
+ (*slDensity)[n][slice] += den_val[index[n][i]]*invvol;
}
}
nr_frames++;
}
sfree(x0); /* free memory used by coordinate array */
+ sfree(den_val);
}
static void plot_density(double *slDensity[], const char *afile, int nslices,
int ePBC;
int *index_center; /* index for centering group */
int **index; /* indices for all groups */
- int i;
- t_filenm fnm[] = { /* files for g_density */
+ t_filenm fnm[] = { /* files for g_density */
{ efTRX, "-f", nullptr, ffREAD },
{ efNDX, nullptr, nullptr, ffOPTRD },
{ efTPR, nullptr, nullptr, ffREAD },
axis = toupper(axtitle[0]) - 'X';
top = read_top(ftp2fn(efTPR, NFILE, fnm), &ePBC); /* read topology file */
- if (dens_opt[0][0] == 'n')
- {
- for (i = 0; (i < top->atoms.nr); i++)
- {
- top->atoms.atom[i].m = 1;
- }
- }
- else if (dens_opt[0][0] == 'c')
- {
- for (i = 0; (i < top->atoms.nr); i++)
- {
- top->atoms.atom[i].m = top->atoms.atom[i].q;
- }
- }
snew(grpname, ngrps);
snew(index, ngrps);
{
calc_density(ftp2fn(efTRX, NFILE, fnm), index, ngx, &density, &nslices, top,
ePBC, axis, ngrps, &slWidth, bCenter, index_center, ncenter,
- bRelative, oenv);
+ bRelative, oenv, dens_opt);
}
plot_density(density, opt2fn("-o", NFILE, fnm),
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
snew(x[prev], natoms);
- if (bMol)
+ // if com is requested, the data structure needs to be large enough to do this
+ // to prevent overflow
+ if (bMol && !gnx_com)
{
curr->ncoords = curr->nmol;
snew(xa[0], curr->ncoords);
/* set the time */
curr->time[curr->nframes] = t - curr->t0;
- /* for the first frame, the previous frame is a copy of the first frame */
- if (bFirst)
- {
- std::memcpy(xa[prev], xa[cur], curr->ncoords*sizeof(xa[prev][0]));
- bFirst = FALSE;
- }
-
/* make the molecules whole */
if (bMol)
{
}
/* calculate the molecules' centers of masses and put them into xa */
+ // NOTE and WARNING! If above both COM removal and individual molecules have been
+ // requested, x and xa point to the same memory, and the coordinate
+ // data becomes overwritten by the molecule data.
if (bMol)
{
calc_mol_com(gnx[0], index[0], &top->mols, &top->atoms, x[cur], xa[cur]);
}
+ /* for the first frame, the previous frame is a copy of the first frame */
+ if (bFirst)
+ {
+ std::memcpy(xa[prev], xa[cur], curr->ncoords*sizeof(xa[prev][0]));
+ bFirst = FALSE;
+ }
+
/* first remove the periodic boundary condition crossings */
for (i = 0; i < curr->ngrp; i++)
{
/* calculate the center of mass */
if (gnx_com)
{
- prep_data(bMol, gnx_com[0], index_com[0], xa[cur], xa[prev], box);
calc_com(bMol, gnx_com[0], index_com[0], xa[cur], xa[prev], box,
&top->atoms, com);
}
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
int *nfwritten = nullptr;
int ndrop = 0, ncol, drop0 = 0, drop1 = 0, dropuse = 0;
double **dropval;
- real tshift = 0, t0 = -1, dt = 0.001, prec;
+ real tshift = 0, dt = -1, prec;
gmx_bool bFit, bPFit, bReset;
int nfitdim;
gmx_rmpbc_t gpbc = nullptr;
if (bHaveFirstFrame)
{
- set_trxframe_ePBC(&fr, ePBC);
+ if (bTDump)
+ {
+ // Determine timestep (assuming constant spacing for now) if we
+ // need to dump frames based on time. This is required so we do not
+ // skip the first frame if that was the one that should have been dumped
+ double firstFrameTime = fr.time;
+ if (read_next_frame(oenv, trxin, &fr))
+ {
+ dt = fr.time - firstFrameTime;
+ bDTset = TRUE;
+ if (dt <= 0)
+ {
+ fprintf(stderr, "Warning: Frame times are not incrementing - will dump first frame.\n");
+ }
+ }
+ // Now close and reopen so we are at first frame again
+ close_trx(trxin);
+ done_frame(&fr);
+ // Reopen at first frame (We already know it exists if we got here)
+ read_first_frame(oenv, &trxin, in_file, &fr, flags);
+ }
+ set_trxframe_ePBC(&fr, ePBC);
natoms = fr.natoms;
if (bSetTime)
frame = 0;
outframe = 0;
model_nr = 0;
- bDTset = FALSE;
/* Main loop over frames */
do
if (bTDump)
{
- /* determine timestep */
- if (t0 == -1)
+ // If we could not read two frames or times are not incrementing
+ // we have almost no idea what to do,
+ // but dump the first frame so output is not broken.
+ if (dt <= 0 || !bDTset)
{
- t0 = fr.time;
+ bDumpFrame = true;
}
else
{
- if (!bDTset)
- {
- dt = fr.time-t0;
- bDTset = TRUE;
- }
+ // Dump the frame if we are less than half a frame time
+ // below it. This will also ensure we at least dump a
+ // somewhat reasonable frame if the spacing is unequal
+ // and we have overrun the frame time. Once we dump one
+ // frame based on time we quit, so it does not matter
+ // that this might be true for all subsequent frames too.
+ bDumpFrame = (fr.time > tdump-0.5*dt);
}
- /* This is not very elegant, as one can not dump a frame after
- * a timestep with is more than twice as small as the first one. */
- bDumpFrame = (fr.time > tdump-0.5*dt) && (fr.time <= tdump+0.5*dt);
}
else
{
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2011,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2011,2014,2015,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "gromacs/gmxpreprocess/toputil.h"
#include "gromacs/topology/ifunc.h"
#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/gmxassert.h"
#include "gromacs/utility/smalloc.h"
/* #define DEBUG_NNB */
}
}
+/*! \brief Return true of neighbor is already present in some exclusion level
+ *
+ * To avoid exploding complexity when processing exclusions for highly
+ * connected molecules with lots of exclusions, this routine is used to
+ * check whether a particular neighbor has already been excluded at any lower
+ * bond distance, in which case we should not add it to avoid creating loops.
+ *
+ * \param nnb Valid initialized next-neighbor structure
+ * \param atom The host atom whose neighbors we are searching
+ * \param highest_order The highest-rank neighbor list to search.
+ * \param query Atom index to look for
+ *
+ * \return True if query is present as an exclusion of up to highest_order
+ * (inclusive) from atom. For instance, if highest_order is 2,
+ * the routine will return true if the query atom is already listed as
+ * first or second neighbor (exclusion) in nnb.
+ */
+static bool
+atom_is_present_in_nnb(const t_nextnb * nnb,
+ int atom,
+ int highest_order,
+ int query)
+{
+ GMX_RELEASE_ASSERT(highest_order < nnb->nrex, "Inconsistent nnb seach parameters");
+
+ for (int order = 0; order <= highest_order; order++)
+ {
+ for (int m = 0; m < nnb->nrexcl[atom][order]; m++)
+ {
+ if (nnb->a[atom][order][m] == query)
+ {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
static void do_gen(int nrbonds, /* total number of bonds in s */
sortable *s, /* bidirectional list of bonds */
t_nextnb *nnb) /* the tmp storage for excl */
/* store all atoms in nb's n-th list into i's n+1-th list */
for (k = 0; (k < nnb->nrexcl[nb][n]); k++)
{
- if (i != nnb->a[nb][n][k])
+ // Only add if it is not already present as a closer neighbor
+ // to avoid exploding complexity for highly connected molecules
+ // with high exclusion order
+ if (!atom_is_present_in_nnb(nnb, i, n, nnb->a[nb][n][k]))
{
add_nnb(nnb, n+1, i, nnb->a[nb][n][k]);
}
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
const char *fn = opt2fn("-r", NFILE, fnm);
const char *fnB;
+ if (!gmx_fexist(fn))
+ {
+ gmx_fatal(FARGS,
+ "Cannot find position restraint file %s (option -r).\n"
+ "From GROMACS-2018, you need to specify the position restraint "
+ "coordinate files explicitly to avoid mistakes, although you can "
+ "still use the same file as you specify for the -c option.", fn);
+ }
+
if (opt2bSet("-rb", NFILE, fnm))
{
fnB = opt2fn("-rb", NFILE, fnm);
+ if (!gmx_fexist(fnB))
+ {
+ gmx_fatal(FARGS,
+ "Cannot find B-state position restraint file %s (option -rb).\n"
+ "From GROMACS-2018, you need to specify the position restraint "
+ "coordinate files explicitly to avoid mistakes, although you can "
+ "still use the same file as you specify for the -c option.", fn);
+ }
}
else
{
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/futil.h"
#include "gromacs/utility/smalloc.h"
+#include "gromacs/utility/stringutil.h"
/* Number of control atoms for each 'add' type.
*
aah = *ah;
while (fgets2(line, STRLEN-1, in))
{
+ // Skip lines that are only whitespace
+ if (gmx::countWords(line) == 0)
+ {
+ continue;
+ }
if (sscanf(line, "%s%n", buf, &n) != 1)
{
fprintf(stderr, "Error in hdb file: nah = %d\nline = '%s'\n",
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/* First the N terminus */
if (nNtdb > 0)
{
- tdblist = filter_ter(nrtp, restp, nNtdb, ntdb,
+ tdblist = filter_ter(nNtdb, ntdb,
*pdba->resinfo[cc->r_start[i]].name,
- *pdba->resinfo[cc->r_start[i]].rtp,
&ntdblist);
if (ntdblist == 0)
{
/* And the C terminus */
if (nCtdb > 0)
{
- tdblist = filter_ter(nrtp, restp, nCtdb, ctdb,
+ tdblist = filter_ter(nCtdb, ctdb,
*pdba->resinfo[cc->r_end[i]].name,
- *pdba->resinfo[cc->r_end[i]].rtp,
&ntdblist);
if (ntdblist == 0)
{
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
return ntb;
}
-t_hackblock **filter_ter(int nrtp, t_restp rtp[],
- int nb, t_hackblock tb[],
+t_hackblock **filter_ter(int nb, t_hackblock tb[],
const char *resname,
- const char *rtpname,
int *nret)
{
// TODO Four years later, no force fields have ever used this, so decide status of this feature
* Remember to free the list when you are done with it...
*/
- t_restp * restp;
int i, j, n, none_idx;
gmx_bool found;
- char *rtpname_match, *s;
+ char *s;
t_hackblock **list;
- rtpname_match = search_rtp(rtpname, nrtp, rtp);
- restp = get_restp(rtpname_match, nrtp, rtp);
-
n = 0;
list = nullptr;
found = FALSE;
do
{
- /* The residue name should appear in a tdb file with the same base name
- * as the file containing the rtp entry.
- * This makes termini selection for different molecule types
- * much cleaner.
- */
- if (gmx_strcasecmp(restp->filebase, tb[i].filebase) == 0 &&
- gmx_strncasecmp(resname, s, 3) == 0)
+ if (gmx_strncasecmp(resname, s, 3) == 0)
{
found = TRUE;
srenew(list, n+1);
for (i = 0; i < nb; i++)
{
s = tb[i].name;
- /* The residue name should appear in a tdb file with the same base name
- * as the file containing the rtp entry.
- * This makes termini selection for different molecule types
- * much cleaner.
- */
- if (gmx_strcasecmp(restp->filebase, tb[i].filebase) == 0)
+ if (!gmx_strcasecmp("None", s))
{
- if (!gmx_strcasecmp("None", s))
- {
- none_idx = i;
- }
- else
+ none_idx = i;
+ }
+ else
+ {
+ /* Time to see if there's a generic terminus that matches.
+ Is there a hyphen? */
+ char *c = strchr(s, '-');
+
+ /* A conjunction hyphen normally indicates a residue-specific
+ terminus, which is named like "GLY-COOH". A generic terminus
+ won't have a hyphen. */
+ bool bFoundAnyHyphen = (c != nullptr);
+ /* '-' as the last character indicates charge, so if that's
+ the only one found e.g. "COO-", then it was not a conjunction
+ hyphen, so this is a generic terminus */
+ bool bOnlyFoundChargeHyphen = (bFoundAnyHyphen &&
+ *(c+1) == '\0');
+ /* Thus, "GLY-COO-" is not recognized as a generic terminus. */
+ bool bFoundGenericTerminus = !bFoundAnyHyphen || bOnlyFoundChargeHyphen;
+ if (bFoundGenericTerminus)
{
- /* Time to see if there's a generic terminus that matches.
- Is there a hyphen? */
- char *c = strchr(s, '-');
-
- /* A conjunction hyphen normally indicates a residue-specific
- terminus, which is named like "GLY-COOH". A generic terminus
- won't have a hyphen. */
- bool bFoundAnyHyphen = (c != nullptr);
- /* '-' as the last character indicates charge, so if that's
- the only one found e.g. "COO-", then it was not a conjunction
- hyphen, so this is a generic terminus */
- bool bOnlyFoundChargeHyphen = (bFoundAnyHyphen &&
- *(c+1) == '\0');
- /* Thus, "GLY-COO-" is not recognized as a generic terminus. */
- bool bFoundGenericTerminus = !bFoundAnyHyphen || bOnlyFoundChargeHyphen;
- if (bFoundGenericTerminus)
+ /* Check that we haven't already added a residue-specific version
+ * of this terminus.
+ */
+ for (j = 0; j < n && strstr((*list[j]).name, s) == nullptr; j++)
{
- /* Check that we haven't already added a residue-specific version
- * of this terminus.
- */
- for (j = 0; j < n && strstr((*list[j]).name, s) == nullptr; j++)
- {
- ;
- }
- if (j == n)
- {
- srenew(list, n+1);
- list[n] = &(tb[i]);
- n++;
- }
+ ;
+ }
+ if (j == n)
+ {
+ srenew(list, n+1);
+ list[n] = &(tb[i]);
+ n++;
}
}
}
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2011,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2011,2014,2015,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
t_hackblock **tbptr, gpp_atomtype_t atype);
/* Read database for N&C terminal hacking */
-t_hackblock **filter_ter(int nrtp, t_restp rtp[],
- int nb, t_hackblock tb[],
+t_hackblock **filter_ter(int nb, t_hackblock tb[],
const char *resname,
- const char *rtpname,
int *nret);
/* Return a list of pointers to blocks that match residue name */
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2010,2011,2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2010,2011,2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "gromacs/hardware/gpu_hw_info.h"
#include "gromacs/utility/basedefinitions.h"
#include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/exceptions.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxassert.h"
#include "gromacs/utility/logger.h"
if (cudaErrorInvalidDeviceFunction == stat)
{
gmx_fatal(FARGS,
- "The %s binary was not compiled for the selected GPU "
- "(device ID #%d, compute capability %d.%d).\n"
- "When selecting target GPU architectures with GMX_CUDA_TARGET_SM, "
- "make sure to pass the appropriate architecture(s) corresponding to the "
- "device(s) intended to be used (see in the GPU info listing) or alternatively "
- "pass in GMX_CUDA_TARGET_COMPUTE an appropriate virtual architecture. ",
+ "The %s binary does not include support for the CUDA architecture "
+ "of the selected GPU (device ID #%d, compute capability %d.%d). "
+ "By default, GROMACS supports all common architectures, so your GPU "
+ "might be rare, or some architectures were disabled in the build. ",
+ "Consult the install guide for how to use the GMX_CUDA_TARGET_SM and ",
+ "GMX_CUDA_TARGET_COMPUTE CMake variables to add this architecture.",
gmx::getProgramContext().displayName(), devInfo->id,
devInfo->prop.major, devInfo->prop.minor);
}
}
}
-bool canDetectGpus()
+bool canDetectGpus(std::string *errorMessage)
{
cudaError_t stat;
int driverVersion = -1;
gmx::formatString("An unexpected value was returned from cudaDriverGetVersion %s: %s",
cudaGetErrorName(stat), cudaGetErrorString(stat)).c_str());
bool foundDriver = (driverVersion > 0);
- return foundDriver;
+ if (!foundDriver)
+ {
+ // Can't detect GPUs if there is no driver
+ if (errorMessage != nullptr)
+ {
+ errorMessage->assign("No valid CUDA driver found");
+ }
+ return false;
+ }
+
+ int numDevices;
+ stat = cudaGetDeviceCount(&numDevices);
+ if (stat != cudaSuccess)
+ {
+ if (errorMessage != nullptr)
+ {
+ /* cudaGetDeviceCount failed which means that there is
+ * something wrong with the machine: driver-runtime
+ * mismatch, all GPUs being busy in exclusive mode,
+ * invalid CUDA_VISIBLE_DEVICES, or some other condition
+ * which should result in GROMACS issuing a warning a
+ * falling back to CPUs. */
+ errorMessage->assign(cudaGetErrorString(stat));
+ }
+
+ // Consume the error now that we have prepared to handle
+ // it. This stops it reappearing next time we check for
+ // errors. Note that if CUDA_VISIBLE_DEVICES does not contain
+ // valid devices, then cudaGetLastError returns the
+ // (undocumented) cudaErrorNoDevice, but this should not be a
+ // problem as there should be no future CUDA API calls.
+ // NVIDIA bug report #2038718 has been filed.
+ cudaGetLastError();
+ // Can't detect GPUs
+ return false;
+ }
+
+ // We don't actually use numDevices here, that's not the job of
+ // this function.
+ return true;
}
-int detect_gpus(gmx_gpu_info_t *gpu_info, char *err_str)
+void findGpus(gmx_gpu_info_t *gpu_info)
{
- int i, ndev, checkres, retval;
+ int i, ndev, checkres;
cudaError_t stat;
cudaDeviceProp prop;
gmx_device_info_t *devs;
assert(gpu_info);
- assert(err_str);
gpu_info->n_dev_compatible = 0;
stat = cudaGetDeviceCount(&ndev);
if (stat != cudaSuccess)
{
- const char *s;
-
- /* cudaGetDeviceCount failed which means that there is something
- * wrong with the machine: driver-runtime mismatch, all GPUs being
- * busy in exclusive mode, or some other condition which should
- * result in us issuing a warning a falling back to CPUs. */
- retval = -1;
- s = cudaGetErrorString(stat);
- strncpy(err_str, s, STRLEN*sizeof(err_str[0]));
-
- // Consume the error now that we have prepared to handle
- // it. This stops it reappearing next time we check for errors.
- cudaGetLastError();
+ GMX_THROW(gmx::InternalError("Invalid call of findGpus() when CUDA API returned an error, perhaps "
+ "canDetectGpus() was not called appropriately beforehand."));
}
- else
+
+ snew(devs, ndev);
+ for (i = 0; i < ndev; i++)
{
- snew(devs, ndev);
- for (i = 0; i < ndev; i++)
- {
- checkres = is_gmx_supported_gpu_id(i, &prop);
+ checkres = is_gmx_supported_gpu_id(i, &prop);
- devs[i].id = i;
- devs[i].prop = prop;
- devs[i].stat = checkres;
+ devs[i].id = i;
+ devs[i].prop = prop;
+ devs[i].stat = checkres;
- if (checkres == egpuCompatible)
- {
- gpu_info->n_dev_compatible++;
- }
+ if (checkres == egpuCompatible)
+ {
+ gpu_info->n_dev_compatible++;
}
- retval = 0;
}
+ GMX_RELEASE_ASSERT(cudaSuccess == cudaPeekAtLastError(), "Should be cudaSuccess");
gpu_info->n_dev = ndev;
gpu_info->gpu_dev = devs;
-
- return retval;
}
std::vector<int> getCompatibleGpus(const gmx_gpu_info_t &gpu_info)
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2010, The GROMACS development team.
- * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include <cstdio>
+#include <string>
#include <vector>
#include "gromacs/gpu_utils/gpu_macros.h"
/*! \brief Return whether GPUs can be detected
*
* Returns true when this is a build of \Gromacs configured to support
- * GPU usage, and a valid device driver or ICD was detected by the GPU
- * runtime.
+ * GPU usage, and a valid device driver, ICD, and/or runtime was detected.
+ *
+ * \param[out] errorMessage When returning false and non-nullptr was passed,
+ * the string contains a descriptive message about
+ * why GPUs cannot be detected.
*
* Does not throw. */
GPU_FUNC_QUALIFIER
-bool canDetectGpus() GPU_FUNC_TERM_WITH_RETURN(false);
+bool canDetectGpus(std::string *GPU_FUNC_ARGUMENT(errorMessage)) GPU_FUNC_TERM_WITH_RETURN(false);
-/*! \brief Detect all GPUs in the system.
+/*! \brief Find all GPUs in the system.
*
- * Will detect every GPU supported by the device driver in use. If
- * the device driver is missing or unsuitable, returns the same error
- * as for "no valid devices detected," so generally calling code
- * should have checked the return value from canDetectGpus() first,
- * in order to understand the behaviour of this routine. This routine
+ * Will detect every GPU supported by the device driver in use. Must
+ * only be called if canDetectGpus() has returned true. This routine
* also checks for the compatibility of each and fill the
* gpu_info->gpu_dev array with the required information on each the
* device: ID, device properties, status.
*
* \param[in] gpu_info pointer to structure holding GPU information.
- * \param[out] err_str The error message of any GPU API error that caused
- * the detection to fail (if there was any). The memory
- * the pointer points to should be managed externally.
- * \returns non-zero if the detection encountered a failure, zero otherwise.
+ *
+ * \throws InternalError if a GPU API returns an unexpected failure (because
+ * the call to canDetectGpus() should always prevent this occuring)
*/
GPU_FUNC_QUALIFIER
-int detect_gpus(struct gmx_gpu_info_t *GPU_FUNC_ARGUMENT(gpu_info), char *GPU_FUNC_ARGUMENT(err_str)) GPU_FUNC_TERM_WITH_RETURN(-1)
+void findGpus(struct gmx_gpu_info_t *GPU_FUNC_ARGUMENT(gpu_info)) GPU_FUNC_TERM
/*! \brief Return a container of the detected GPUs that are compatible.
*
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "gromacs/hardware/hw_info.h"
#include "gromacs/mdtypes/md_enums.h"
#include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/exceptions.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/smalloc.h"
#include "gromacs/utility/stringutil.h"
-/*! \brief Helper macro for error handling */
-#define CALLOCLFUNC_LOGERROR(func, err_str, retval) { \
- cl_int opencl_ret = func; \
- if (CL_SUCCESS != opencl_ret) \
- { \
- sprintf(err_str, "OpenCL error %d", opencl_ret); \
- retval = -1; \
- } \
- else{ \
- retval = 0; } \
-}
-
-
/*! \brief Return true if executing on compatible OS for AMD OpenCL.
*
* This is assumed to be true for OS X version of at least 10.10.4 and
//! This function is documented in the header file
-bool canDetectGpus()
+bool canDetectGpus(std::string *errorMessage)
{
- cl_uint numPlatforms = -1;
+ cl_uint numPlatforms;
cl_int status = clGetPlatformIDs(0, nullptr, &numPlatforms);
GMX_ASSERT(status != CL_INVALID_VALUE, "Incorrect call of clGetPlatformIDs detected");
+#ifdef cl_khr_icd
if (status == CL_PLATFORM_NOT_FOUND_KHR)
{
// No valid ICDs found
+ if (errorMessage != nullptr)
+ {
+ errorMessage->assign("No valid OpenCL driver found");
+ }
return false;
}
+#endif
GMX_RELEASE_ASSERT(status == CL_SUCCESS,
gmx::formatString("An unexpected value was returned from clGetPlatformIDs %u: %s",
status, ocl_get_error_string(status).c_str()).c_str());
bool foundPlatform = (numPlatforms > 0);
+ if (!foundPlatform && errorMessage != nullptr)
+ {
+ errorMessage->assign("No OpenCL platforms found even though the driver was valid");
+ }
return foundPlatform;
}
//! This function is documented in the header file
-int detect_gpus(gmx_gpu_info_t *gpu_info, char *err_str)
+void findGpus(gmx_gpu_info_t *gpu_info)
{
- int retval;
cl_uint ocl_platform_count;
cl_platform_id *ocl_platform_ids;
cl_device_type req_dev_type = CL_DEVICE_TYPE_GPU;
- retval = 0;
ocl_platform_ids = NULL;
if (getenv("GMX_OCL_FORCE_CPU") != NULL)
while (1)
{
- CALLOCLFUNC_LOGERROR(clGetPlatformIDs(0, NULL, &ocl_platform_count), err_str, retval)
- if (0 != retval)
+ cl_int status = clGetPlatformIDs(0, NULL, &ocl_platform_count);
+ if (CL_SUCCESS != status)
{
- break;
+ GMX_THROW(gmx::InternalError(gmx::formatString("An unexpected value %u was returned from clGetPlatformIDs: ",
+ status) + ocl_get_error_string(status)));
}
if (1 > ocl_platform_count)
{
+ // TODO this should have a descriptive error message that we only support one OpenCL platform
break;
}
snew(ocl_platform_ids, ocl_platform_count);
- CALLOCLFUNC_LOGERROR(clGetPlatformIDs(ocl_platform_count, ocl_platform_ids, NULL), err_str, retval)
- if (0 != retval)
+ status = clGetPlatformIDs(ocl_platform_count, ocl_platform_ids, NULL);
+ if (CL_SUCCESS != status)
{
- break;
+ GMX_THROW(gmx::InternalError(gmx::formatString("An unexpected value %u was returned from clGetPlatformIDs: ",
+ status) + ocl_get_error_string(status)));
}
for (unsigned int i = 0; i < ocl_platform_count; i++)
}
sfree(ocl_platform_ids);
-
- return retval;
}
//! This function is documented in the header file
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include <cstdio>
+#include <algorithm>
#include <string>
#include <vector>
return includePathOption;
}
+/*! \brief Replace duplicated spaces with a single one in string
+ *
+ * Only the first character will be kept for multiple adjacent characters that
+ * are both identical and where the first one returns true for isspace().
+ *
+ * \param str String that will be modified.
+ */
+static void
+removeExtraSpaces(std::string *str)
+{
+ GMX_RELEASE_ASSERT(str != nullptr, "A pointer to an actual string must be provided");
+ std::string::iterator newEnd =
+ std::unique( str->begin(), str->end(), [ = ](char a, char b){ return isspace(a) && (a == b); } );
+ str->erase(newEnd, str->end());
+}
+
/*! \brief Builds a string with build options for the OpenCL kernels
*
* \throws std::bad_alloc if out of memory. */
preprocessorOptions += ' ';
preprocessorOptions += makeKernelIncludePathOption(kernelRootPath);
+ // Mac OS (and maybe some other implementations) does not accept double spaces in options
+ removeExtraSpaces(&preprocessorOptions);
+
return preprocessorOptions;
}
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2017, by the GROMACS development team, led by
+ * Copyright (c) 2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "gromacs/gpu_utils/gpu_utils.h"
#include "gromacs/hardware/gpu_hw_info.h"
-#include "gromacs/utility/cstringutil.h"
#include "gromacs/utility/smalloc.h"
namespace gmx
GpuTest::GpuTest()
{
snew(gpuInfo_, 1);
- char errorString[STRLEN];
- detect_gpus(gpuInfo_, errorString);
+ if (canDetectGpus(nullptr))
+ {
+ findGpus(gpuInfo_);
+ }
+ // Failing to find valid GPUs does not require further action
}
GpuTest::~GpuTest()
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2017, by the GROMACS development team, led by
+ * Copyright (c) 2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
TYPED_TEST(HostAllocatorTest, TransfersWithPinningWorkWithCuda)
{
+ if (!this->haveValidGpus())
+ {
+ return;
+ }
+
typename TestFixture::VectorType input;
changePinningPolicy(&input, PinningPolicy::CanBePinned);
this->fillInput(&input);
TYPED_TEST(HostAllocatorTest, ManualPinningOperationsWorkWithCuda)
{
+ if (!this->haveValidGpus())
+ {
+ return;
+ }
+
typename TestFixture::VectorType input;
changePinningPolicy(&input, PinningPolicy::CanBePinned);
EXPECT_FALSE(isPinned(input));
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2017, by the GROMACS development team, led by
+ * Copyright (c) 2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
TEST_F(PinnedMemoryCheckerTest, DefaultContainerIsRecognized)
{
+ if (!haveValidGpus())
+ {
+ return;
+ }
+
std::vector<real> dummy(3, 1.5);
EXPECT_FALSE(isHostMemoryPinned(dummy.data()));
}
TEST_F(PinnedMemoryCheckerTest, NonpinnedContainerIsRecognized)
{
+ if (!haveValidGpus())
+ {
+ return;
+ }
+
HostVector<real> dummy(3, 1.5);
changePinningPolicy(&dummy, PinningPolicy::CannotBePinned);
EXPECT_FALSE(isHostMemoryPinned(dummy.data()));
TEST_F(PinnedMemoryCheckerTest, PinnedContainerIsRecognized)
{
+ if (!haveValidGpus())
+ {
+ return;
+ }
+
HostVector<real> dummy(3, 1.5);
changePinningPolicy(&dummy, PinningPolicy::CanBePinned);
EXPECT_TRUE(isHostMemoryPinned(dummy.data()));
TEST_F(PinnedMemoryCheckerTest, DefaultCBufferIsRecognized)
{
+ if (!haveValidGpus())
+ {
+ return;
+ }
+
real *dummy;
snew(dummy, 3);
EXPECT_FALSE(isHostMemoryPinned(dummy));
TEST_F(PinnedMemoryCheckerTest, PinnedCBufferIsRecognized)
{
+ if (!haveValidGpus())
+ {
+ return;
+ }
+
real *dummy = nullptr;
pmalloc((void **)&dummy, 3 * sizeof(real));
EXPECT_TRUE(isHostMemoryPinned(dummy));
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2017, by the GROMACS development team, led by
+ * Copyright (c) 2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
static constexpr Architecture c_architecture =
#if GMX_IS_X86_32 || GMX_IS_X86_64
Architecture::X86;
-#elif defined __arm__ || defined __arm || defined _M_ARM || defined __aarch64_
+#elif defined __arm__ || defined __arm || defined _M_ARM || defined __aarch64__
Architecture::Arm;
#elif defined __powerpc__ || defined __ppc__ || defined __PPC__
Architecture::PowerPC;
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
{
*brand = cpuInfo.at("Processor");
}
+ else if (cpuInfo.count("model name"))
+ {
+ *brand = cpuInfo.at("model name");
+ }
+
if (cpuInfo.count("CPU architecture"))
{
*family = std::strtol(cpuInfo.at("CPU architecture").c_str(), nullptr, 10);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
bool gpusCanBeDetected = false;
if (isMasterRankOfNode || isOpenclPpRank)
{
- gpusCanBeDetected = canDetectGpus();
- // No need to tell the user anything at this point, they get a
- // hardware report later.
- }
-
- if (gpusCanBeDetected)
- {
- char detection_error[STRLEN] = "", sbuf[STRLEN];
-
- if (detect_gpus(&hwinfo_g->gpu_info, detection_error) != 0)
+ std::string errorMessage;
+ gpusCanBeDetected = canDetectGpus(&errorMessage);
+ if (!gpusCanBeDetected)
{
- if (detection_error[0] != '\0')
- {
- sprintf(sbuf, ":\n %s\n", detection_error);
- }
- else
- {
- sprintf(sbuf, ".");
- }
GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
- "NOTE: Error occurred during GPU detection%s"
+ "NOTE: GPUs cannot be detected:\n"
+ " %s\n"
" Can not use GPU acceleration, will fall back to CPU kernels.",
- sbuf);
+ errorMessage.c_str());
}
}
+ if (gpusCanBeDetected)
+ {
+ findGpus(&hwinfo_g->gpu_info);
+ // No need to tell the user anything at this point, they get a
+ // hardware report later.
+ }
+
#if GMX_LIB_MPI
if (!isOpenclPpRank)
{
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
const int nfa1 = 4;
int i, iu, s;
int type;
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) ai[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) aj[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) ak[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) coeff[2*GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ai[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t aj[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ak[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real coeff[2*GMX_SIMD_REAL_WIDTH];
SimdReal deg2rad_S(DEG2RAD);
SimdReal xi_S, yi_S, zi_S;
SimdReal xj_S, yj_S, zj_S;
SimdReal cik_S, cii_S, ckk_S;
SimdReal f_ix_S, f_iy_S, f_iz_S;
SimdReal f_kx_S, f_ky_S, f_kz_S;
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) pbc_simd[9*GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real pbc_simd[9*GMX_SIMD_REAL_WIDTH];
set_pbc_simd(pbc, pbc_simd);
int gmx_unused *global_atom_index)
{
constexpr int nfa1 = 4;
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) ai[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) aj[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) ak[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) coeff[4*GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) pbc_simd[9*GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ai[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t aj[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ak[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real coeff[4*GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real pbc_simd[9*GMX_SIMD_REAL_WIDTH];
set_pbc_simd(pbc, pbc_simd);
const int nfa1 = 5;
int i, iu, s;
int type;
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) ai[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) aj[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) ak[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) al[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) buf[3*GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ai[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t aj[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ak[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t al[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real buf[3*GMX_SIMD_REAL_WIDTH];
real *cp, *phi0, *mult;
SimdReal deg2rad_S(DEG2RAD);
SimdReal p_S, q_S;
SimdReal sin_S, cos_S;
SimdReal mddphi_S;
SimdReal sf_i_S, msf_l_S;
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) pbc_simd[9*GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real pbc_simd[9*GMX_SIMD_REAL_WIDTH];
/* Extract aligned pointer for parameters and variables */
cp = buf + 0*GMX_SIMD_REAL_WIDTH;
const int nfa1 = 5;
int i, iu, s, j;
int type;
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) ai[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) aj[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) ak[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) al[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) parm[NR_RBDIHS*GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ai[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t aj[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ak[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t al[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real parm[NR_RBDIHS*GMX_SIMD_REAL_WIDTH];
SimdReal p_S, q_S;
SimdReal phi_S;
SimdReal parm_S, c_S;
SimdReal sin_S, cos_S;
SimdReal sf_i_S, msf_l_S;
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) pbc_simd[9*GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real pbc_simd[9*GMX_SIMD_REAL_WIDTH];
SimdReal pi_S(M_PI);
SimdReal one_S(1.0);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
T twelve(12);
T ef(scale_factor);
- const int align = 16;
- GMX_ASSERT(pack_size <= align, "align should be increased");
- GMX_ALIGNED(int, align) ai[pack_size];
- GMX_ALIGNED(int, align) aj[pack_size];
- GMX_ALIGNED(real, align) coeff[3*pack_size];
+#if GMX_SIMD_HAVE_REAL
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ai[pack_size];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t aj[pack_size];
+ alignas(GMX_SIMD_ALIGNMENT) real coeff[3*pack_size];
+#else
+ std::int32_t ai[pack_size];
+ std::int32_t aj[pack_size];
+ real coeff[3*pack_size];
+#endif
/* nbonds is #pairs*nfa1, here we step pack_size pairs */
for (int i = 0; i < nbonds; i += pack_size*nfa1)
* at once for the angles and dihedrals as well.
*/
#if GMX_SIMD
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) pbc_simd[9*GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real pbc_simd[9*GMX_SIMD_REAL_WIDTH];
set_pbc_simd(pbc, pbc_simd);
do_pairs_simple<SimdReal, GMX_SIMD_REAL_WIDTH,
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
{
assert(b0 % GMX_SIMD_REAL_WIDTH == 0);
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) offset2[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset2[GMX_SIMD_REAL_WIDTH];
for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
{
SimdReal x1_S, y1_S, z1_S;
SimdReal rx_S, ry_S, rz_S, n2_S, il_S;
SimdReal fx_S, fy_S, fz_S, ip_S, rhs_S;
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) offset0[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) offset1[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset0[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset1[GMX_SIMD_REAL_WIDTH];
for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
{
* The only difference is that we always call pbc code, as with SIMD
* the overhead of pbc computation (when not needed) is small.
*/
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) pbc_simd[9*GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real pbc_simd[9*GMX_SIMD_REAL_WIDTH];
/* Convert the pbc struct for SIMD */
set_pbc_simd(pbc, pbc_simd);
real * gmx_restrict sol)
{
assert(b0 % GMX_SIMD_REAL_WIDTH == 0);
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) offset2[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset2[GMX_SIMD_REAL_WIDTH];
for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
{
SimdReal x1_S, y1_S, z1_S;
SimdReal rx_S, ry_S, rz_S, n2_S, il_S;
SimdReal rxp_S, ryp_S, rzp_S, ip_S, rhs_S;
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) offset0[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) offset1[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset0[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset1[GMX_SIMD_REAL_WIDTH];
for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
{
SimdReal x1_S, y1_S, z1_S;
SimdReal rx_S, ry_S, rz_S, n2_S;
SimdReal len_S, len2_S, dlen2_S, lc_S, blc_S;
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) offset0[GMX_SIMD_REAL_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) offset1[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset0[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset1[GMX_SIMD_REAL_WIDTH];
for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
{
* The only difference is that we always call pbc code, as with SIMD
* the overhead of pbc computation (when not needed) is small.
*/
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) pbc_simd[9*GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real pbc_simd[9*GMX_SIMD_REAL_WIDTH];
/* Convert the pbc struct for SIMD */
set_pbc_simd(pbc, pbc_simd);
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
if (settled->bUseSimd)
{
/* Convert the pbc struct for SIMD */
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) pbcSimd[9*GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real pbcSimd[9*GMX_SIMD_REAL_WIDTH];
set_pbc_simd(pbc, pbcSimd);
settleTemplateWrapper<SimdReal, SimdBool, GMX_SIMD_REAL_WIDTH,
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
GMX_LOG(mdlog.warning);
}
-/*! \brief Detect and warn about oversubscription of cores.
- *
- * \todo This could probably live elsewhere, since it is not specifc
- * to OpenMP, and only needs modth.gnth.
- *
- * \todo Enable this for separate PME nodes as well! */
-static void
-issueOversubscriptionWarning(const gmx::MDLogger &mdlog,
- const t_commrec *cr,
- int nthreads_hw_avail,
- int nppn,
- gmx_bool bSepPME)
-{
- char sbuf[STRLEN], sbuf1[STRLEN], sbuf2[STRLEN];
-
- if (bSepPME || 0 != cr->rank_pp_intranode)
- {
- return;
- }
-
- if (modth.gnth*nppn > nthreads_hw_avail)
- {
- sprintf(sbuf, "threads");
- sbuf1[0] = '\0';
- sprintf(sbuf2, "O");
-#if GMX_MPI
- if (modth.gnth == 1)
- {
-#if GMX_THREAD_MPI
- sprintf(sbuf, "thread-MPI threads");
-#else
- sprintf(sbuf, "MPI processes");
- sprintf(sbuf1, " per rank");
- sprintf(sbuf2, "On rank %d: o", cr->sim_nodeid);
-#endif
- }
-#endif
- GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
- "WARNING: %sversubscribing the available %d logical CPU cores%s with %d %s.\n"
- " This will cause considerable performance loss!",
- sbuf2, nthreads_hw_avail, sbuf1, nppn*modth.gnth, sbuf);
- }
-}
-
void gmx_omp_nthreads_init(const gmx::MDLogger &mdlog, t_commrec *cr,
int nthreads_hw_avail,
int omp_nthreads_req,
#endif
reportOpenmpSettings(mdlog, cr, bOMP, bFullOmpSupport, bSepPME);
- issueOversubscriptionWarning(mdlog, cr, nthreads_hw_avail, nppn, bSepPME);
}
int gmx_omp_nthreads_get(int mod)
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
if (bStopCM)
{
check_cm_grp(fplog, vcm, ir, 1);
- /* Don't pass x with linear modes to avoid correction of the initial
- * coordinates for the initial COM velocity.
+ /* At initialization, do not pass x with acceleration-correction mode
+ * to avoid (incorrect) correction of the initial coordinates.
*/
+ rvec *xPtr = nullptr;
+ if (vcm->mode == ecmANGULAR || (vcm->mode == ecmLINEAR_ACCELERATION_CORRECTION && !(flags & CGLO_INITIALIZATION)))
+ {
+ xPtr = as_rvec_array(state->x.data());
+ }
do_stopcm_grp(mdatoms->homenr, mdatoms->cVCM,
- vcm->mode == ecmANGULAR ? as_rvec_array(state->x.data()) : nullptr,
- as_rvec_array(state->v.data()), *vcm);
+ xPtr, as_rvec_array(state->v.data()), *vcm);
inc_nrnb(nrnb, eNR_STOPCM, mdatoms->homenr);
}
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
* passed to compute_globals in md.c and global_stat.
*/
+/* we are initializing and not yet in the actual MD loop */
+#define CGLO_INITIALIZATION (1<<1)
/* we are computing the kinetic energy from average velocities */
#define CGLO_EKINAVEVEL (1<<2)
/* we are removing the center of mass momenta */
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
if (write_xtc(of->fp_xtc, of->natoms_x_compressed, step, t,
state_local->box, xxtc, of->x_compression_precision) == 0)
{
- gmx_fatal(FARGS, "XTC error - maybe you are out of disk space?");
+ gmx_fatal(FARGS,
+ "XTC error. This indicates you are out of disk space, or a "
+ "simulation with major instabilities resulting in coordinates "
+ "that are NaN or too large to be represented in the XTC format.\n");
}
gmx_fwrite_tng(of->tng_low_prec,
TRUE,
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
nbat->nenergrp = n_energygroups;
if (!simple)
{
- /* Energy groups not supported yet for super-sub lists */
- if (n_energygroups > 1)
- {
- GMX_LOG(mdlog.warning).asParagraph().appendText("NOTE: With GPUs, reporting energy group contributions is not supported");
- }
- nbat->nenergrp = 1;
+ // We now check for energy groups already when starting mdrun
+ GMX_RELEASE_ASSERT(n_energygroups == 1, "GPU kernels do not support energy groups");
}
/* Temporary storage goes as #grp^3*simd_width^2/2, so limit to 64 */
if (nbat->nenergrp > 64)
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#ifdef COUNT_PAIRS
{
int i, j;
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) tmp[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real tmp[GMX_SIMD_REAL_WIDTH];
for (i = 0; i < UNROLLI; i += 2)
{
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
SimdReal hsig_i_S2, seps_i_S2;
#else
#ifdef FIX_LJ_C
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) pvdw_c6[2*UNROLLI*UNROLLJ];
+ alignas(GMX_SIMD_ALIGNMENT) real pvdw_c6[2*UNROLLI*UNROLLJ];
real *pvdw_c12 = pvdw_c6 + UNROLLI*UNROLLJ;
#endif
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#ifdef COUNT_PAIRS
{
int i, j;
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) tmp[2*GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real tmp[2*GMX_SIMD_REAL_WIDTH];
for (i = 0; i < UNROLLI; i++)
{
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
x = nbat->x;
#ifdef FIX_LJ_C
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) pvdw_c6[2*UNROLLI*UNROLLJ];
+ alignas(GMX_SIMD_ALIGNMENT) real pvdw_c6[2*UNROLLI*UNROLLJ];
real *pvdw_c12 = pvdw_c6 + UNROLLI*UNROLLJ;
for (int jp = 0; jp < UNROLLJ; jp++)
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
{
" -DVDWNAME=_VdwLJ",
" -DLJ_COMB_GEOM -DVDWNAME=_VdwLJCombGeom",
- " -DLJ_COMB_LB -DVDWNAME=_VdwLJCombLB",
+ " -DLJ_COMB_LB -DVDWNAME=_VdwLJCombLB",
" -DLJ_FORCE_SWITCH -DVDWNAME=_VdwLJFsw",
" -DLJ_POT_SWITCH -DVDWNAME=_VdwLJPsw",
" -DLJ_EWALD_COMB_GEOM -DVDWNAME=_VdwLJEwCombGeom",
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
communication deadlocks, we always do the communication for the
report, even if we've decided not to write the report, because
how long it takes to finish the run is not important when we've
- decided not to report on the simulation performance. */
- bool printReport = SIMMASTER(cr);
+ decided not to report on the simulation performance.
- if (!walltime_accounting_get_valid_finish(walltime_accounting))
+ Further, we only report performance for dynamical integrators,
+ because those are the only ones for which we plan to
+ consider doing any optimizations. */
+ bool printReport = EI_DYNAMICS(inputrec->eI) && SIMMASTER(cr);
+
+ if (printReport && !walltime_accounting_get_valid_finish(walltime_accounting))
{
GMX_LOG(mdlog.warning).asParagraph().appendText("Simulation ended prematurely, no performance report will be written.");
printReport = false;
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
{
fprintf(fplog, "Will apply constraint COM pulling\n");
}
+ // Don't include the reference group 0 in output, so we report ngroup-1
+ GMX_RELEASE_ASSERT(pull->ngroup - 1 > 0, "The reference absolute position pull group should always be present");
fprintf(fplog, "with %d pull coordinate%s and %d group%s\n",
pull->ncoord, pull->ncoord == 1 ? "" : "s",
- pull->ngroup, pull->ngroup == 1 ? "" : "s");
+ (pull->ngroup - 1), (pull->ngroup - 1) == 1 ? "" : "s");
if (bAbs)
{
fprintf(fplog, "with an absolute reference\n");
}
bCos = FALSE;
- for (g = 0; g < pull->ngroup; g++)
+ // Don't include the reference group 0 in loop
+ for (g = 1; g < pull->ngroup; g++)
{
if (pull->group[g].params.nat > 1 &&
pull->group[g].params.pbcatom < 0)
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#define GMX_SIMD_FINT32_WIDTH 4
#undef GMX_SIMD_DINT32_WIDTH
#define GMX_SIMD4_WIDTH 4
+#define GMX_SIMD_ALIGNMENT 16 // Bytes (4*single)
#define GMX_SIMD_RSQRT_BITS 8
#define GMX_SIMD_RCP_BITS 8
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
SimdFloat * v2,
SimdFloat * v3)
{
- GMX_ALIGNED(int, GMX_SIMD_FINT32_WIDTH) ioffset[GMX_SIMD_FINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_FINT32_WIDTH];
assert(std::size_t(base) % 16 == 0);
assert(align % 4 == 0);
SimdFloat * v0,
SimdFloat * v1)
{
- GMX_ALIGNED(int, GMX_SIMD_FINT32_WIDTH) ioffset[GMX_SIMD_FINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_FINT32_WIDTH];
store(ioffset, offset);
gatherLoadTranspose<align>(base, ioffset, v0, v1);
SimdFloat * v0,
SimdFloat * v1)
{
- GMX_ALIGNED(int, GMX_SIMD_FINT32_WIDTH) ioffset[GMX_SIMD_FINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_FINT32_WIDTH];
store(ioffset, offset);
v0->simdInternal_ = vcombine_f32(vld1_f32( base + align * ioffset[0] ),
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#define GMX_SIMD_FINT32_WIDTH 4
#define GMX_SIMD_DINT32_WIDTH 2
#define GMX_SIMD4_WIDTH 4
+#define GMX_SIMD_ALIGNMENT 16 // Bytes (4*single or 2*double)
#define GMX_SIMD_RSQRT_BITS 8
#define GMX_SIMD_RCP_BITS 8
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
SimdDouble * v2,
SimdDouble * v3)
{
- GMX_ALIGNED(int, GMX_SIMD_DINT32_WIDTH) ioffset[GMX_SIMD_DINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_DINT32_WIDTH];
assert(std::size_t(base) % 16 == 0);
assert(align % 2 == 0);
SimdDouble * v0,
SimdDouble * v1)
{
- GMX_ALIGNED(int, GMX_SIMD_DINT32_WIDTH) ioffset[GMX_SIMD_DINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_DINT32_WIDTH];
assert(std::size_t(base) % 16 == 0);
assert(align % 2 == 0);
SimdDouble * v0,
SimdDouble * v1)
{
- GMX_ALIGNED(int, GMX_SIMD_DINT32_WIDTH) ioffset[GMX_SIMD_DINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_DINT32_WIDTH];
vst1_s32(ioffset, offset.simdInternal_);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#define GMX_SIMD_FINT32_WIDTH 4
#define GMX_SIMD_DINT32_WIDTH 4
#define GMX_SIMD4_WIDTH 4
+#define GMX_SIMD_ALIGNMENT 32 // Bytes (4*double)
#define GMX_SIMD_RSQRT_BITS 14
#define GMX_SIMD_RCP_BITS 14
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
SimdDInt32(std::int32_t i)
{
- GMX_ALIGNED(int, GMX_SIMD_DINT32_WIDTH) idata[GMX_SIMD_DINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t idata[GMX_SIMD_DINT32_WIDTH];
idata[0] = i;
simdInternal_ = vec_splat(vec_ldia(0, idata), 0);
}
static inline SimdDouble
frexp(SimdDouble value, SimdDInt32 * exponent)
{
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) rdata[GMX_SIMD_DOUBLE_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_DOUBLE_WIDTH) idata[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double rdata[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t idata[GMX_SIMD_DOUBLE_WIDTH];
vec_st(value.simdInternal_, 0, rdata);
static inline SimdDouble
ldexp(SimdDouble value, SimdDInt32 exponent)
{
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) rdata[GMX_SIMD_DOUBLE_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_DOUBLE_WIDTH) idata[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double rdata[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t idata[GMX_SIMD_DOUBLE_WIDTH];
vec_st(value.simdInternal_, 0, rdata);
vec_st(exponent.simdInternal_, 0, idata);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
SimdFInt32(std::int32_t i)
{
- GMX_ALIGNED(int, GMX_SIMD_FINT32_WIDTH) idata[GMX_SIMD_FINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t idata[GMX_SIMD_FINT32_WIDTH];
idata[0] = i;
simdInternal_ = vec_splat(vec_ldia(0, idata), 0);
}
static inline SimdFloat
frexp(SimdFloat value, SimdFInt32 * exponent)
{
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_FLOAT_WIDTH) idata[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float rdata[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t idata[GMX_SIMD_FLOAT_WIDTH];
vec_st(value.simdInternal_, 0, rdata);
static inline SimdFloat
ldexp(SimdFloat value, SimdFInt32 exponent)
{
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(int, GMX_SIMD_FLOAT_WIDTH) idata[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float rdata[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t idata[GMX_SIMD_FLOAT_WIDTH];
vec_st(value.simdInternal_, 0, rdata);
vec_st(exponent.simdInternal_, 0, idata);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
SimdDouble v1,
SimdDouble v2)
{
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) m0[GMX_SIMD_DOUBLE_WIDTH];
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) m1[GMX_SIMD_DOUBLE_WIDTH];
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) m2[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double m0[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double m1[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double m2[GMX_SIMD_DOUBLE_WIDTH];
store(m0, v0);
store(m1, v1);
}
else
{
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) m0[GMX_SIMD_DOUBLE_WIDTH];
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) m1[GMX_SIMD_DOUBLE_WIDTH];
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) m2[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double m0[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double m1[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double m2[GMX_SIMD_DOUBLE_WIDTH];
store(m0, v0);
store(m1, v1);
}
else
{
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) m0[GMX_SIMD_DOUBLE_WIDTH];
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) m1[GMX_SIMD_DOUBLE_WIDTH];
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) m2[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double m0[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double m1[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double m2[GMX_SIMD_DOUBLE_WIDTH];
store(m0, v0);
store(m1, v1);
SimdDouble * v2,
SimdDouble * v3)
{
- GMX_ALIGNED(int, GMX_SIMD_DOUBLE_WIDTH) ioffset[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_DOUBLE_WIDTH];
store(ioffset, simdoffset);
gatherLoadTranspose<align>(base, ioffset, v0, v1, v2, v3);
SimdDouble * v0,
SimdDouble * v1)
{
- GMX_ALIGNED(int, GMX_SIMD_DOUBLE_WIDTH) ioffset[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_DOUBLE_WIDTH];
store(ioffset, simdoffset);
gatherLoadTranspose<align>(base, ioffset, v0, v1);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
SimdFloat v1,
SimdFloat v2)
{
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) m0[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) m1[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) m2[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float m0[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float m1[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float m2[GMX_SIMD_FLOAT_WIDTH];
store(m0, v0);
store(m1, v1);
}
else
{
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) m0[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) m1[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) m2[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float m0[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float m1[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float m2[GMX_SIMD_FLOAT_WIDTH];
store(m0, v0);
store(m1, v1);
}
else
{
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) m0[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) m1[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) m2[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float m0[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float m1[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float m2[GMX_SIMD_FLOAT_WIDTH];
store(m0, v0);
store(m1, v1);
SimdFloat * v2,
SimdFloat * v3)
{
- GMX_ALIGNED(int, GMX_SIMD_FLOAT_WIDTH) ioffset[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) int ioffset[GMX_SIMD_FLOAT_WIDTH];
store(ioffset, simdoffset);
gatherLoadTranspose<align>(base, ioffset, v0, v1, v2, v3);
SimdFloat * v0,
SimdFloat * v1)
{
- GMX_ALIGNED(int, GMX_SIMD_FLOAT_WIDTH) ioffset[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) int ioffset[GMX_SIMD_FLOAT_WIDTH];
store(ioffset, simdoffset);
gatherLoadTranspose<align>(base, ioffset, v0, v1);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#define GMX_SIMD_FINT32_WIDTH 4
#undef GMX_SIMD_DINT32_WIDTH
#define GMX_SIMD4_WIDTH 4
+#define GMX_SIMD_ALIGNMENT 16 // Bytes (4*single)
#define GMX_SIMD_RSQRT_BITS 14
#define GMX_SIMD_RCP_BITS 14
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
}
else
{
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata0[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata1[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata2[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float rdata0[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float rdata1[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float rdata2[GMX_SIMD_FLOAT_WIDTH];
vec_st(v0.simdInternal_, 0, rdata0);
vec_st(v1.simdInternal_, 0, rdata1);
}
else
{
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata0[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata1[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata2[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float rdata0[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float rdata1[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float rdata2[GMX_SIMD_FLOAT_WIDTH];
vec_st(v0.simdInternal_, 0, rdata0);
vec_st(v1.simdInternal_, 0, rdata1);
SimdFloat * v2,
SimdFloat * v3)
{
- GMX_ALIGNED(int, GMX_SIMD_FINT32_WIDTH) ioffset[GMX_SIMD_FINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_FINT32_WIDTH];
vec_st( offset.simdInternal_, 0, ioffset);
gatherLoadTranspose<align>(base, ioffset, v0, v1, v2, v3);
SimdFloat * v0,
SimdFloat * v1)
{
- GMX_ALIGNED(int, GMX_SIMD_FINT32_WIDTH) ioffset[GMX_SIMD_FINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_FINT32_WIDTH];
vec_st( offset.simdInternal_, 0, ioffset);
gatherLoadTranspose<align>(base, ioffset, v0, v1);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#ifndef GMX_SIMD_IMPLEMENTATION_IBM_VSX_H
#define GMX_SIMD_IMPLEMENTATION_IBM_VSX_H
+// At high optimization levels, gcc 7.2 gives false
+// positives.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-but-set-parameter"
+
// While we do our best to also test VSX with Power7, that depends on having
// access to big-endian hardware, so for the long term our focus will be
// little-endian Power8.
#include "impl_ibm_vsx_util_double.h"
#include "impl_ibm_vsx_util_float.h"
+#pragma GCC diagnostic pop
+
#endif // GMX_SIMD_IMPLEMENTATION_IBM_VSX_H
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#define GMX_SIMD_FINT32_WIDTH 4
#define GMX_SIMD_DINT32_WIDTH 2
#define GMX_SIMD4_WIDTH 4
+#define GMX_SIMD_ALIGNMENT 16 // Bytes (4*single or 2*sdouble)
#define GMX_SIMD_RSQRT_BITS 14
#define GMX_SIMD_RCP_BITS 14
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
SimdDouble * v2,
SimdDouble * v3)
{
- GMX_ALIGNED(std::int32_t, GMX_SIMD_DINT32_WIDTH) ioffset[GMX_SIMD_DINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_DINT32_WIDTH];
store(ioffset, offset );
gatherLoadTranspose<align>(base, ioffset, v0, v1, v2, v3);
SimdDouble * v0,
SimdDouble * v1)
{
- GMX_ALIGNED(std::int32_t, GMX_SIMD_DINT32_WIDTH) ioffset[GMX_SIMD_DINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_DINT32_WIDTH];
store(ioffset, offset );
gatherLoadTranspose<align>(base, ioffset, v0, v1);
SimdDouble * v0,
SimdDouble * v1)
{
- GMX_ALIGNED(std::int32_t, GMX_SIMD_DINT32_WIDTH) ioffset[GMX_SIMD_DINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_DINT32_WIDTH];
store(ioffset, offset );
gatherLoadTranspose<align>(base, ioffset, v0, v1);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
SimdFloat * v2,
SimdFloat * v3)
{
- GMX_ALIGNED(std::int32_t, GMX_SIMD_FINT32_WIDTH) ioffset[GMX_SIMD_FINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_FINT32_WIDTH];
store(ioffset, offset );
gatherLoadTranspose<align>(base, ioffset, v0, v1, v2, v3);
SimdFloat * v0,
SimdFloat * v1)
{
- GMX_ALIGNED(std::int32_t, GMX_SIMD_FINT32_WIDTH) ioffset[GMX_SIMD_FINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_FINT32_WIDTH];
store(ioffset, offset );
gatherLoadTranspose<align>(base, ioffset, v0, v1);
SimdFloat * v0,
SimdFloat * v1)
{
- GMX_ALIGNED(std::int32_t, GMX_SIMD_FINT32_WIDTH) ioffset[GMX_SIMD_FINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_FINT32_WIDTH];
store(ioffset, offset );
gatherLoadTranspose<align>(base, ioffset, v0, v1);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2015, by the GROMACS development team, led by
+ * Copyright (c) 2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#undef GMX_SIMD_FINT32_WIDTH
#undef GMX_SIMD_DINT32_WIDTH
#undef GMX_SIMD4_WIDTH
+#define GMX_SIMD_ALIGNMENT 8 // 1*double
#undef GMX_SIMD_RSQRT_BITS
#undef GMX_SIMD_RCP_BITS
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
//! \brief The SIMD4 type is always four units wide, but this makes code more explicit
#define GMX_SIMD4_WIDTH 4
+//! \brief Required alignment in bytes for aligned load/store (always defined, even without SIMD)
+#define GMX_SIMD_ALIGNMENT 8 // 8 (1*double)
+
//! \brief Accuracy of SIMD 1/sqrt(x) lookup. Used to determine number of iterations.
#define GMX_SIMD_RSQRT_BITS 23
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#define GMX_SIMD_FINT32_WIDTH 4
#define GMX_SIMD_DINT32_WIDTH 2
#define GMX_SIMD4_WIDTH 4
+#define GMX_SIMD_ALIGNMENT 32 // Bytes (4*double for SIMD4)
#define GMX_SIMD_RSQRT_BITS 11
#define GMX_SIMD_RCP_BITS 11
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#define GMX_SIMD_FINT32_WIDTH 8
#define GMX_SIMD_DINT32_WIDTH 4
#define GMX_SIMD4_WIDTH 4
+#define GMX_SIMD_ALIGNMENT 32 // Bytes (8*single or 4*double)
#define GMX_SIMD_RSQRT_BITS 11
#define GMX_SIMD_RCP_BITS 11
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#define GMX_SIMD_FINT32_WIDTH 4
#define GMX_SIMD_DINT32_WIDTH 2
#define GMX_SIMD4_WIDTH 4
+#define GMX_SIMD_ALIGNMENT 32 // Bytes (4*double for SIMD4)
#define GMX_SIMD_RSQRT_BITS 11
#define GMX_SIMD_RCP_BITS 11
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#define GMX_SIMD_FINT32_WIDTH 8
#define GMX_SIMD_DINT32_WIDTH 4
#define GMX_SIMD4_WIDTH 4
+#define GMX_SIMD_ALIGNMENT 32 // Bytes (8*single or 4*double)
#define GMX_SIMD_RSQRT_BITS 11
#define GMX_SIMD_RCP_BITS 11
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
assert(std::size_t(base) % 32 == 0);
assert(align % 4 == 0);
- GMX_ALIGNED(int, GMX_SIMD_DINT32_WIDTH) ioffset[GMX_SIMD_DINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_DINT32_WIDTH];
_mm_store_si128( reinterpret_cast<__m128i *>(ioffset), offset.simdInternal_);
v0->simdInternal_ = _mm256_load_pd(base + align * ioffset[0]);
assert(std::size_t(base) % 16 == 0);
assert(align % 2 == 0);
- GMX_ALIGNED(int, GMX_SIMD_DINT32_WIDTH) ioffset[GMX_SIMD_DINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_DINT32_WIDTH];
_mm_store_si128( reinterpret_cast<__m128i *>(ioffset), offset.simdInternal_);
t1 = _mm_load_pd(base + align * ioffset[0]);
__m128d t1, t2, t3, t4;
__m256d tA, tB;
- GMX_ALIGNED(int, GMX_SIMD_DINT32_WIDTH) ioffset[GMX_SIMD_DINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_DINT32_WIDTH];
_mm_store_si128( reinterpret_cast<__m128i *>(ioffset), offset.simdInternal_);
t1 = _mm_loadu_pd(base + align * ioffset[0]);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
SimdFloat * v2,
SimdFloat * v3)
{
- GMX_ALIGNED(int, GMX_SIMD_FLOAT_WIDTH) offset[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset[GMX_SIMD_FLOAT_WIDTH];
_mm256_store_si256( reinterpret_cast<__m256i *>(offset), simdoffset.simdInternal_);
gatherLoadTranspose<align>(base, offset, v0, v1, v2, v3);
}
SimdFloat * v0,
SimdFloat * v1)
{
- GMX_ALIGNED(int, GMX_SIMD_FLOAT_WIDTH) offset[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset[GMX_SIMD_FLOAT_WIDTH];
_mm256_store_si256( reinterpret_cast<__m256i *>(offset), simdoffset.simdInternal_);
gatherLoadTranspose<align>(base, offset, v0, v1);
}
__m128 t1, t2, t3, t4, t5, t6, t7, t8;
__m256 tA, tB, tC, tD;
- GMX_ALIGNED(int, GMX_SIMD_FLOAT_WIDTH) offset[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset[GMX_SIMD_FLOAT_WIDTH];
_mm256_store_si256( reinterpret_cast<__m256i *>(offset), simdoffset.simdInternal_);
t1 = _mm_loadl_pi(_mm_setzero_ps(), reinterpret_cast<const __m64 *>( base + align * offset[0] ) );
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#define GMX_SIMD_FINT32_WIDTH 16
#define GMX_SIMD_DINT32_WIDTH 8
#define GMX_SIMD4_WIDTH 4
+#define GMX_SIMD_ALIGNMENT 64 // Bytes (16*single or 8*double)
#define GMX_SIMD_RSQRT_BITS 14
#define GMX_SIMD_RCP_BITS 14
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
SimdDouble v2)
{
__m512d t[4], t5, t6, t7, t8;
- GMX_ALIGNED(std::int64_t, 8) o[8];
+ alignas(GMX_SIMD_ALIGNMENT) std::int64_t o[8];
//TODO: should use fastMultiply
_mm512_store_epi64(o, _mm512_cvtepi32_epi64(_mm256_mullo_epi32(_mm256_load_si256((const __m256i*)(offset )), _mm256_set1_epi32(align))));
t5 = _mm512_unpacklo_pd(v0.simdInternal_, v1.simdInternal_);
SimdDouble v2)
{
__m512d t[4], t5, t6, t7, t8;
- GMX_ALIGNED(std::int64_t, 8) o[8];
+ alignas(GMX_SIMD_ALIGNMENT) std::int64_t o[8];
//TODO: should use fastMultiply
_mm512_store_epi64(o, _mm512_cvtepi32_epi64(_mm256_mullo_epi32(_mm256_load_si256((const __m256i*)(offset )), _mm256_set1_epi32(align))));
t5 = _mm512_unpacklo_pd(v0.simdInternal_, v1.simdInternal_);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
{
__m512 t[4], t5, t6, t7, t8;
int i;
- GMX_ALIGNED(std::int32_t, 16) o[16];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t o[16];
store(o, fastMultiply<align>(simdLoad(offset, SimdFInt32Tag())));
if (align < 4)
{
{
__m512 t[4], t5, t6, t7, t8;
int i;
- GMX_ALIGNED(std::int32_t, 16) o[16];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t o[16];
store(o, fastMultiply<align>(simdLoad(offset, SimdFInt32Tag())));
if (align < 4)
{
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#define GMX_SIMD_FINT32_WIDTH 16
#define GMX_SIMD_DINT32_WIDTH 8
#define GMX_SIMD4_WIDTH 4
+#define GMX_SIMD_ALIGNMENT 64 // Bytes (16*single or 8*double)
#define GMX_SIMD_RSQRT_BITS 28
#define GMX_SIMD_RCP_BITS 28
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#define GMX_SIMD_FINT32_WIDTH 16
#define GMX_SIMD_DINT32_WIDTH 8
#define GMX_SIMD4_WIDTH 4
+#define GMX_SIMD_ALIGNMENT 64 // Bytes (16*single or 8*double)
#define GMX_SIMD_RSQRT_BITS 23
#define GMX_SIMD_RCP_BITS 23
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
SimdDouble v1,
SimdDouble v2)
{
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) rdata0[GMX_SIMD_DOUBLE_WIDTH];
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) rdata1[GMX_SIMD_DOUBLE_WIDTH];
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) rdata2[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double rdata0[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double rdata1[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double rdata2[GMX_SIMD_DOUBLE_WIDTH];
store(rdata0, v0);
store(rdata1, v1);
SimdDouble v1,
SimdDouble v2)
{
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) rdata0[GMX_SIMD_DOUBLE_WIDTH];
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) rdata1[GMX_SIMD_DOUBLE_WIDTH];
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) rdata2[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double rdata0[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double rdata1[GMX_SIMD_DOUBLE_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double rdata2[GMX_SIMD_DOUBLE_WIDTH];
store(rdata0, v0);
store(rdata1, v1);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
SimdFloat v1,
SimdFloat v2)
{
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata0[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata1[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata2[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float rdata0[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float rdata1[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float rdata2[GMX_SIMD_FLOAT_WIDTH];
store(rdata0, v0);
store(rdata1, v1);
SimdFloat v1,
SimdFloat v2)
{
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata0[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata1[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata2[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float rdata0[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float rdata1[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float rdata2[GMX_SIMD_FLOAT_WIDTH];
store(rdata0, v0);
store(rdata1, v1);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#define GMX_SIMD_FINT32_WIDTH 4
#define GMX_SIMD_DINT32_WIDTH 2
#define GMX_SIMD4_WIDTH 4
+#define GMX_SIMD_ALIGNMENT 16 // Bytes (4*single or 2*double)
#define GMX_SIMD_RSQRT_BITS 11
#define GMX_SIMD_RCP_BITS 11
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
// This is likely because (a) the extract function is expensive, and (b)
// the alignment scaling can often be done as part of the load instruction
// (which is even cheaper than doing it in SIMD registers).
- GMX_ALIGNED(std::int32_t, GMX_SIMD_FINT32_WIDTH) ioffset[GMX_SIMD_FINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_FINT32_WIDTH];
_mm_store_si128( (__m128i *)ioffset, offset.simdInternal_);
gatherLoadTranspose<align>(base, ioffset, v0, v1, v2, v3);
}
// This is likely because (a) the extract function is expensive, and (b)
// the alignment scaling can often be done as part of the load instruction
// (which is even cheaper than doing it in SIMD registers).
- GMX_ALIGNED(std::int32_t, GMX_SIMD_FINT32_WIDTH) ioffset[GMX_SIMD_FINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_FINT32_WIDTH];
_mm_store_si128( (__m128i *)ioffset, offset.simdInternal_);
gatherLoadTranspose<align>(base, ioffset, v0, v1);
}
// This is likely because (a) the extract function is expensive, and (b)
// the alignment scaling can often be done as part of the load instruction
// (which is even cheaper than doing it in SIMD registers).
- GMX_ALIGNED(std::int32_t, GMX_SIMD_FINT32_WIDTH) ioffset[GMX_SIMD_FINT32_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_FINT32_WIDTH];
_mm_store_si128( (__m128i *)ioffset, offset.simdInternal_);
gatherLoadTranspose<align>(base, ioffset, v0, v1);
}
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#define GMX_SIMD_FINT32_WIDTH 4
#define GMX_SIMD_DINT32_WIDTH 2
#define GMX_SIMD4_WIDTH 4
+#define GMX_SIMD_ALIGNMENT 16 // Bytes (4*single or 2*double)
#define GMX_SIMD_RSQRT_BITS 11
#define GMX_SIMD_RCP_BITS 11
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
const SimdFloat sieve(SimdFloat(-5.965323564e+29f) | SimdFloat(7.05044434e-30f));
#else
const int isieve = 0xFFFFF000;
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) mem[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) float mem[GMX_SIMD_FLOAT_WIDTH];
union {
float f; int i;
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
* simdWidth elements at the beginning and end
* to test we are not polluting memory there either. Sum=4*simdWidth.
*/
-#if GMX_SIMD4_WIDTH > GMX_SIMD_REAL_WIDTH
- GMX_ALIGNED(T, GMX_SIMD4_WIDTH) src[simdWidth*4];
- GMX_ALIGNED(T, GMX_SIMD4_WIDTH) dst[simdWidth*4];
-#else
- GMX_ALIGNED(T, GMX_SIMD_REAL_WIDTH) src[simdWidth*4];
- GMX_ALIGNED(T, GMX_SIMD_REAL_WIDTH) dst[simdWidth*4];
-#endif
+ alignas(GMX_SIMD_ALIGNMENT) T src[simdWidth*4];
+ alignas(GMX_SIMD_ALIGNMENT) T dst[simdWidth*4];
// Make sure we have memory to check both before and after the test pointers
T * pCopySrc = src + simdWidth + loadOffset;
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#endif
#if GMX_SIMD_HAVE_REAL
+TEST(SimdTest, GmxAligned)
+{
+ // Test alignment with two variables that must be aligned, and one that
+ // doesn't have to be. The order of variables is up to the compiler, but
+ // if it ignores alignment it is highly unlikely that both r1/r3 still end
+ // up being aligned by mistake.
+ alignas(GMX_SIMD_ALIGNMENT) real r1;
+ real r2;
+ alignas(GMX_SIMD_ALIGNMENT) real r3;
+
+ std::uint64_t addr1 = reinterpret_cast<std::uint64_t>(&r1);
+ std::uint64_t addr2 = reinterpret_cast<std::uint64_t>(&r2);
+ std::uint64_t addr3 = reinterpret_cast<std::uint64_t>(&r3);
+
+ EXPECT_EQ(0, addr1 % GMX_SIMD_ALIGNMENT);
+ EXPECT_NE(0, addr2); // Just so r2 is not optimized away
+ EXPECT_EQ(0, addr3 % GMX_SIMD_ALIGNMENT);
+
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t i1;
+ std::int32_t i2;
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t i3;
+
+ addr1 = reinterpret_cast<std::uint64_t>(&i1);
+ addr2 = reinterpret_cast<std::uint64_t>(&i2);
+ addr3 = reinterpret_cast<std::uint64_t>(&i3);
+
+ EXPECT_EQ(0, addr1 % GMX_SIMD_ALIGNMENT);
+ EXPECT_NE(0, addr2); // Just so i2 is not optimized away
+ EXPECT_EQ(0, addr3 % GMX_SIMD_ALIGNMENT);
+}
+
+
::std::vector<real>
simdReal2Vector(const SimdReal simd)
{
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) mem[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real mem[GMX_SIMD_REAL_WIDTH];
store(mem, simd);
std::vector<real> v(mem, mem+GMX_SIMD_REAL_WIDTH);
SimdReal
vector2SimdReal(const std::vector<real> &v)
{
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) mem[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real mem[GMX_SIMD_REAL_WIDTH];
for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
{
return compareVectorEq(refExpr, tstExpr, simdReal2Vector(ref), simdReal2Vector(tst));
}
-std::vector<int>
+std::vector<std::int32_t>
simdInt2Vector(const SimdInt32 simd)
{
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) mem[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t mem[GMX_SIMD_REAL_WIDTH];
store(mem, simd);
- std::vector<int> v(mem, mem+GMX_SIMD_REAL_WIDTH);
+ std::vector<std::int32_t> v(mem, mem+GMX_SIMD_REAL_WIDTH);
return v;
}
SimdInt32
-vector2SimdInt(const std::vector<int> &v)
+vector2SimdInt(const std::vector<std::int32_t> &v)
{
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) mem[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t mem[GMX_SIMD_REAL_WIDTH];
for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
{
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*
* The returned vector will have the same length as the SIMD width.
*/
-std::vector<int> simdInt2Vector(const SimdInt32 simd);
+std::vector<std::int32_t> simdInt2Vector(const SimdInt32 simd);
/*! \brief Return 32-bit integer SIMD value from std::vector<int>.
*
* If the vector is longer than SIMD width, only the first elements will be used.
* If it is shorter, the contents will be repeated to fill the SIMD register.
*/
-SimdInt32 vector2SimdInt(const std::vector<int> &v);
+SimdInt32 vector2SimdInt(const std::vector<std::int32_t> &v);
/*! \brief Set SIMD register contents from three int values.
*
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
::std::vector<real>
simd4Real2Vector(const Simd4Real simd4)
{
- GMX_ALIGNED(real, GMX_SIMD4_WIDTH) mem[GMX_SIMD4_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real mem[GMX_SIMD4_WIDTH];
store4(mem, simd4);
std::vector<real> v(mem, mem+GMX_SIMD4_WIDTH);
Simd4Real
vector2Simd4Real(const std::vector<real> &v)
{
- GMX_ALIGNED(real, GMX_SIMD4_WIDTH) mem[GMX_SIMD4_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real mem[GMX_SIMD4_WIDTH];
for (int i = 0; i < GMX_SIMD4_WIDTH; i++)
{
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
Simd4Real v0, v1, v2, v3;
int i;
// aligned pointers
- GMX_ALIGNED(real, GMX_SIMD4_WIDTH) p0[4*GMX_SIMD4_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real p0[4*GMX_SIMD4_WIDTH];
real * p1 = p0 + GMX_SIMD4_WIDTH;
real * p2 = p0 + 2*GMX_SIMD4_WIDTH;
real * p3 = p0 + 3*GMX_SIMD4_WIDTH;
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#if GMX_SIMD_HAVE_FLOAT && GMX_SIMD_HAVE_DOUBLE
TEST_F(SimdFloatingpointTest, cvtFloat2Double)
{
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) f[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) d[GMX_SIMD_FLOAT_WIDTH]; // Yes, double array length should be same as float
+ alignas(GMX_SIMD_ALIGNMENT) float f[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double d[GMX_SIMD_FLOAT_WIDTH]; // Yes, double array length should be same as float
int i;
SimdFloat vf;
TEST_F(SimdFloatingpointTest, cvtDouble2Float)
{
- GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) f[GMX_SIMD_FLOAT_WIDTH];
- GMX_ALIGNED(double, GMX_SIMD_DOUBLE_WIDTH) d[GMX_SIMD_FLOAT_WIDTH]; // Yes, double array length should be same as float
+ alignas(GMX_SIMD_ALIGNMENT) float f[GMX_SIMD_FLOAT_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) double d[GMX_SIMD_FLOAT_WIDTH]; // Yes, double array length should be same as float
int i;
SimdFloat vf;
SimdDouble vd0;
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
real data[GMX_SIMD_REAL_WIDTH/4];
std::iota(data, data+GMX_SIMD_REAL_WIDTH/4, 1);
-#if defined _ICC && __ICC == 1800 || defined __ICL && __ICL == 1800
+#if defined __ICC && __ICC == 1800 || defined __ICL && __ICL == 1800
#pragma novector /* Work-around for incorrect vectorization for AVX_512(_KNL) */
#endif
for (i = 0; i < GMX_SIMD_REAL_WIDTH / 4; i++)
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#if GMX_SIMD_HAVE_INT32_EXTRACT
TEST_F(SimdIntegerTest, extract)
{
- GMX_ALIGNED(int, GMX_SIMD_REAL_WIDTH) idata[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) std::int32_t idata[GMX_SIMD_REAL_WIDTH];
SimdInt32 simd;
for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
// First test the range where we get normalized (non-denormal) results,
// since we don't require denormal results to be reproduced correctly.
+ //
+ // For very small arguments that would produce results close to the
+ // smallest representable value, some of the intermediate values might
+ // trigger flush-to-zero denormals without FMA operations,
+ // e.g. for the icc compiler. Since we never use such values in Gromacs, we
+ // shrink the range a bit in that case instead of requiring the compiler to
+ // handle denormals (which might reduce performance).
#if GMX_DOUBLE
+#if GMX_SIMD_HAVE_FMA
setRange(-708.3, 709.1);
#else
+ setRange(-690, 709.1);
+#endif
+#else
+#if GMX_SIMD_HAVE_FMA
setRange(-87.3, 88.0);
+#else
+ setRange(-80, 88.0);
+#endif
#endif
GMX_EXPECT_SIMD_FUNC_NEAR(std::exp, exp);
// Then multiply with ln(2) to get our limit for exp().
// In this range we allow the value to be either correct (denormal) or 0.0
#if GMX_DOUBLE
- setRange(-746.0, -708.3);
+ setRange(-746.0, -708.4);
#else
setRange(-104.0, -87.3);
#endif
TEST_F(SimdMathTest, expUnsafe)
{
#if GMX_DOUBLE
+#if GMX_SIMD_HAVE_FMA
setRange(-708.3, 709.1);
#else
+ setRange(-690, 709.1);
+#endif
+#else
+#if GMX_SIMD_HAVE_FMA
setRange(-87.3, 88.0);
+#else
+ setRange(-80, 88.0);
+#endif
#endif
GMX_EXPECT_SIMD_FUNC_NEAR(std::exp, exp<MathOptimization::Unsafe>);
}
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
ex.prependContext("Error generating cubic spline table for function '" + thisFuncInput.desc + "'");
throw;
}
- // Calculate the required table spacing h. The error we make with linear interpolation
- // of the derivative will be described by the third-derivative correction term.
- // This means we can compute the required spacing as h = sqrt(12*tolerance*min(f'/f''')),
- // where f'/f''' is the first and third derivative of the function, respectively.
+ // Calculate the required table spacing h. The error we make with a third order polynomial
+ // (second order for derivative) will be described by the fourth-derivative correction term.
+ //
+ // This means we can compute the required spacing as h = 0.5*cbrt(72*sqrt(3)*tolerance**min(f'/f'''')),
+ // where f'/f'''' is the first and fourth derivative of the function, respectively.
+ // Since we already have an analytical form of the derivative, we reduce the numerical
+ // errors by calculating the quotient of the function and third derivative of the
+ // input-derivative-analytical function instead.
double thisMinQuotient = internal::findSmallestQuotientOfFunctionAndThirdDerivative(thisFuncInput.derivative, range_);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2016, by the GROMACS development team, led by
+ * Copyright (c) 2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
* \note There will be a small additional accuracy loss from the internal
* operation where we calculate the epsilon offset from the nearest table
* point, since the integer part we subtract can get large in those cases.
- *
* While this is technically possible to solve with extended precision
* arithmetics, that would introduce extra instructions in some highly
* performance-sensitive code parts. For typical GROMACS interaction
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
// of the derivative will be described by the third-derivative correction term.
// This means we can compute the required spacing as h = sqrt(12*tolerance*min(f'/f''')),
// where f'/f''' is the first and third derivative of the function, respectively.
+ // Since we already have an analytical form of the derivative, we reduce the numerical
+ // errors by calculating the quotient of the function and second derivative of the
+ // input-derivative-analytical function instead.
double thisMinQuotient = internal::findSmallestQuotientOfFunctionAndSecondDerivative(thisFuncInput.derivative, thisFuncInput.spacing, range_);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
}
-/*! \brief Update minQuotient if the ratio of this function value and its second derivative is smaller
+/*! \brief Calculate absolute quotient of function and its second derivative
*
* This is a utility function used in the functions to find the smallest quotient
* in a range.
* \param[in] thisPoint Value of function at x.
* \param[in] nextPoint Value of function at x+h.
* \param[in] spacing Value of h.
- * \param[inout] minQuotient Current minimum of such quotients, updated if this quotient is smaller.
+ *
+ * \return The absolute value of the quotient. If either the function or second
+ * derivative is smaller than sqrt(GMX_REAL_MIN), they will be set to
+ * that value.
*/
-static void
-updateMinQuotientOfFunctionAndSecondDerivative(double previousPoint,
- double thisPoint,
- double nextPoint,
- double spacing,
- double * minQuotient)
+static double
+quotientOfFunctionAndSecondDerivative(double previousPoint,
+ double thisPoint,
+ double nextPoint,
+ double spacing)
{
- double value = std::abs( thisPoint );
+ double lowerLimit = static_cast<double>(std::sqrt(GMX_REAL_MIN));
+ double value = std::max(std::abs( thisPoint ), lowerLimit );
double secondDerivative = std::abs( (previousPoint - 2.0 * thisPoint + nextPoint) / (spacing * spacing ) );
// Make sure we do not divide by zero. This limit is arbitrary,
// but it doesnt matter since this point will have a very large value,
// and the whole routine is searching for the smallest value.
- secondDerivative = std::max(secondDerivative, static_cast<double>(std::sqrt(GMX_REAL_MIN)));
+ secondDerivative = std::max(secondDerivative, lowerLimit);
- *minQuotient = std::min(*minQuotient, value / secondDerivative);
+ return (value / secondDerivative);
}
// outside the range specified.
double h = std::pow( GMX_DOUBLE_EPS, 0.25 );
std::pair<double, double> newRange(range.first + h, range.second - h);
- const int points = 1000; // arbitrary
+ const int points = 500; // arbitrary
double dx = (newRange.second - newRange.first) / points;
double minQuotient = GMX_REAL_MAX;
for (double x = newRange.first; x <= newRange.second; x += dx)
{
- updateMinQuotientOfFunctionAndSecondDerivative(f(x-h), f(x), f(x+h), h, &minQuotient);
+ minQuotient = std::min(minQuotient, quotientOfFunctionAndSecondDerivative(f(x-h), f(x), f(x+h), h));
}
+
return static_cast<real>(minQuotient);
}
for (std::size_t i = firstIndex + 1; (i + 1) < lastIndex; i++)
{
- updateMinQuotientOfFunctionAndSecondDerivative(function[i-1], function[i], function[i+1], inputSpacing, &minQuotient);
+ minQuotient = std::min(minQuotient, quotientOfFunctionAndSecondDerivative(function[i-1], function[i], function[i+1], inputSpacing));
}
return static_cast<real>(minQuotient);
}
-/*! \brief Update minQuotient if the ratio of this function value and its third derivative is smaller
+/*! \brief Calculate absolute quotient of function and its third derivative
*
* This is a utility function used in the functions to find the smallest quotient
* in a range.
* \param[in] nextPoint Value of function at x+h.
* \param[in] nextNextPoint Value of function at x+2h.
* \param[in] spacing Value of h.
- * \param[inout] minQuotient Current minimum of such quotients, updated if this quotient is smaller.
+ *
+ * \return The absolute value of the quotient. If either the function or third
+ * derivative is smaller than sqrt(GMX_REAL_MIN), they will be set to
+ * that value.
*/
-static void
-updateMinQuotientOfFunctionAndThirdDerivative(double previousPreviousPoint,
- double previousPoint,
- double thisPoint,
- double nextPoint,
- double nextNextPoint,
- double spacing,
- double * minQuotient)
+static double
+quotientOfFunctionAndThirdDerivative(double previousPreviousPoint,
+ double previousPoint,
+ double thisPoint,
+ double nextPoint,
+ double nextNextPoint,
+ double spacing)
{
- double value = std::abs( thisPoint );
+ double lowerLimit = static_cast<double>(std::sqrt(GMX_REAL_MIN));
+ double value = std::max(std::abs( thisPoint ), lowerLimit );
double thirdDerivative = std::abs((nextNextPoint - 2 * nextPoint + 2 * previousPoint - previousPreviousPoint) / (2 * spacing * spacing * spacing));
// Make sure we do not divide by zero. This limit is arbitrary,
// but it doesnt matter since this point will have a very large value,
// and the whole routine is searching for the smallest value.
- thirdDerivative = std::max(thirdDerivative, static_cast<double>(std::sqrt(GMX_REAL_MIN)));
+ thirdDerivative = std::max(thirdDerivative, lowerLimit);
- *minQuotient = std::min(*minQuotient, value / thirdDerivative);
+ return (value / thirdDerivative);
}
// outside the range specified.
double h = std::pow( GMX_DOUBLE_EPS, 0.2 ); // optimal spacing for 3rd derivative
std::pair<double, double> newRange(range.first + 2*h, range.second - 2*h);
- const int points = 1000; // arbitrary
+ const int points = 500; // arbitrary
double dx = (newRange.second - newRange.first) / points;
double minQuotient = GMX_REAL_MAX;
for (double x = newRange.first; x <= newRange.second; x += dx)
{
- updateMinQuotientOfFunctionAndThirdDerivative(f(x-2*h), f(x-h), f(x), f(x+h), f(x+2*h), h, &minQuotient);
+ minQuotient = std::min(minQuotient, quotientOfFunctionAndThirdDerivative(f(x-2*h), f(x-h), f(x), f(x+h), f(x+2*h), h));
}
return static_cast<real>(minQuotient);
}
for (std::size_t i = firstIndex + 2; (i + 2) < lastIndex; i++)
{
- updateMinQuotientOfFunctionAndThirdDerivative(function[i-2], function[i-1], function[i], function[i+1], function[i+2], inputSpacing, &minQuotient);
+ minQuotient = std::min(minQuotient, quotientOfFunctionAndThirdDerivative(function[i-2], function[i-1], function[i], function[i+1], function[i+2], inputSpacing));
}
return static_cast<real>(minQuotient);
}
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
table.template evaluateDerivative<numFuncInTable, funcIndex>(x, &tmpDer);
- if (testFuncValue != tmpFunc)
+ // Before we even start to think about errors related to the table interpolation
+ // accuracy, we want to test that the interpolations are consistent whether we
+ // call the routine that evaluates both the function and derivative or only one
+ // of them.
+ // Note that for these tests the relevant tolerance is NOT the default one
+ // provided based on the requested accuracy of the table, but a tolerance related
+ // to the floating-point precision used. For now we only allow deviations up
+ // to 4 ulp (one for the FMA order, and then some margin).
+ FloatingPointTolerance consistencyTolerance(ulpTolerance(4));
+
+ FloatingPointDifference evaluateFuncDiff(tmpFunc, testFuncValue);
+ if (!consistencyTolerance.isWithin(evaluateFuncDiff))
{
ADD_FAILURE()
<< "Interpolation inconsistency for table " << desc << std::endl
<< "Function value when evaluating only function: " << tmpFunc << std::endl;
return;
}
- if (testDerValue != tmpDer)
+
+ FloatingPointDifference evaluateDerDiff(tmpDer, testDerValue);
+ if (!consistencyTolerance.isWithin(evaluateDerDiff))
{
ADD_FAILURE()
<< "Interpolation inconsistency for table " << desc << std::endl
return;
}
+ // Next, we should examine that the table is exact enough relative
+ // to the requested accuracy in the interpolation.
+ //
// There are two sources of errors that we need to account for when checking the values,
// and we only fail the test if both of these tolerances are violated:
//
<< "First failure at x = " << x << std::endl
<< "Reference function = " << refFuncValue << std::endl
<< "Test table function = " << testFuncValue << std::endl
+ << "Allowed abs func err. = " << allowedAbsFuncErr << std::endl
<< "Reference derivative = " << refDerValue << std::endl
- << "Test table derivative = " << testDerValue << std::endl;
+ << "Test table derivative = " << testDerValue << std::endl
+ << "Allowed abs der. err. = " << allowedAbsDerErr << std::endl
+ << "Actual abs der. err. = " << derDiff.asAbsolute() << std::endl;
return;
}
}
TYPED_TEST(SplineTableTest, Sinc)
{
- std::pair<real, real> range(0.1, 10);
+ // Sinc hits some sensitive parts of the table construction code which means
+ // we will not have full relative accuracy close to the zeros in the
+ // derivative. Since this is intentially a pathological function we reduce
+ // the interval slightly for now.
+ std::pair<real, real> range(0.1, 3.1);
TypeParam sincTable( {{"Sinc", sincFunction, sincDerivative}}, range);
real refDer = lj12Derivative(x);
SimdReal tstFunc, tstDer;
real funcErr, derErr;
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) alignedMem[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real alignedMem[GMX_SIMD_REAL_WIDTH];
table.evaluateFunctionAndDerivative(SimdReal(x), &tstFunc, &tstDer);
SimdReal tstFunc1, tstDer1;
real funcErr0, derErr0;
real funcErr1, derErr1;
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) alignedMem[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real alignedMem[GMX_SIMD_REAL_WIDTH];
table.evaluateFunctionAndDerivative(SimdReal(x), &tstFunc0, &tstDer0, &tstFunc1, &tstDer1);
TypeParam table( {{"LJ12", lj12Function, lj12Derivative}}, range);
SimdReal x, func, der;
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) alignedMem[GMX_SIMD_REAL_WIDTH];
+ alignas(GMX_SIMD_ALIGNMENT) real alignedMem[GMX_SIMD_REAL_WIDTH];
// Test all values between 0 and range.second
for (std::size_t i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
if (nonbondedTarget == TaskTarget::Gpu)
{
GMX_THROW(InconsistentInputError
- ("Nonbonded interactions on the GPU were required, but this would not be "
- "useful. Probably you should not require using GPUs."));
+ ("Nonbonded interactions on the GPU were required, but not supported for these "
+ "simulation settings. Change your settings, or do not require using GPUs."));
}
return false;
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*/
if (ngpu > 0)
{
+ if (hw_opt.nthreads_omp > 0)
+ {
+ /* In this case it is unclear if we should use 1 rank per GPU
+ * or more or less, so we require also setting the number of ranks.
+ */
+ gmx_fatal(FARGS, "When using GPUs, setting the number of OpenMP threads without specifying the number of ranks can lead to conflicting demands. Please specify the number of thread-MPI ranks as well (option -ntmpi).");
+ }
+
nrank = ngpu;
/* When the user sets nthreads_omp, we can end up oversubscribing CPU cores
print_hw_opt(debug, hw_opt);
}
}
+
+void checkHardwareOversubscription(int numThreadsOnThisRank,
+ const gmx::HardwareTopology &hwTop,
+ const t_commrec *cr,
+ const gmx::MDLogger &mdlog)
+{
+ if (hwTop.supportLevel() < gmx::HardwareTopology::SupportLevel::LogicalProcessorCount)
+ {
+ /* There is nothing we can check */
+ return;
+ }
+
+ int numRanksOnThisNode = 1;
+ int numThreadsOnThisNode = numThreadsOnThisRank;
+#if GMX_MPI
+ if (PAR(cr) || MULTISIM(cr))
+ {
+ /* Count the threads within this physical node */
+ MPI_Comm_size(cr->mpi_comm_physicalnode, &numRanksOnThisNode);
+ MPI_Allreduce(&numThreadsOnThisRank, &numThreadsOnThisNode, 1, MPI_INT, MPI_SUM, cr->mpi_comm_physicalnode);
+ }
+#endif
+
+ if (numThreadsOnThisNode > hwTop.machine().logicalProcessorCount)
+ {
+ std::string mesg = "WARNING: ";
+ if (GMX_LIB_MPI)
+ {
+ mesg += gmx::formatString("On rank %d: o", cr->sim_nodeid);
+ }
+ else
+ {
+ mesg += "O";
+ }
+ mesg += gmx::formatString("versubscribing the available %d logical CPU cores", hwTop.machine().logicalProcessorCount);
+ if (GMX_LIB_MPI)
+ {
+ mesg += " per node";
+ }
+ mesg += gmx::formatString(" with %d ", numThreadsOnThisNode);
+ if (numRanksOnThisNode == numThreadsOnThisNode)
+ {
+ if (GMX_THREAD_MPI)
+ {
+ mesg += "thread-MPI threads.";
+ }
+ else
+ {
+ mesg += "MPI processes.";
+ }
+ }
+ else
+ {
+ mesg += "threads.";
+ }
+ mesg += "\n This will cause considerable performance loss.";
+ /* Note that only the master rank logs to stderr and only ranks
+ * with an open log file write to log.
+ * TODO: When we have a proper parallel logging framework,
+ * the framework should add the rank and node numbers.
+ */
+ GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(mesg.c_str());
+ }
+}
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
namespace gmx
{
+class HardwareTopology;
class MDLogger;
}
PmeRunMode pmeRunMode,
const gmx_mtop_t &mtop);
+/*! \brief Warns for oversubscribing the hardware threads, when that is the case
+ */
+void checkHardwareOversubscription(int numThreadsOnThisRank,
+ const gmx::HardwareTopology &hwTop,
+ const t_commrec *cr,
+ const gmx::MDLogger &mdlog);
+
#endif
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2013, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
"virtual sites. With these flags, [TT]gmx check[tt] provides a quick check for such problems.[PAR]",
"The program can compare two run input ([REF].tpr[ref])",
"files",
- "when both [TT]-s1[tt] and [TT]-s2[tt] are supplied.",
+ "when both [TT]-s1[tt] and [TT]-s2[tt] are supplied. When comparing",
+ "run input files this way, the default relative tolerance is reduced",
+ "to 0.000001 and the absolute tolerance set to zero to find any differences",
+ "not due to minor compiler optimization differences, although you can",
+ "of course still set any other tolerances through the options."
"Similarly a pair of trajectory files can be compared (using the [TT]-f2[tt]",
"option), or a pair of energy files (using the [TT]-e2[tt] option).[PAR]",
"For free energy simulations the A and B state topology from one",
}
fn2 = nullptr;
}
+
+ fprintf(stderr, "Note: When comparing run input files, default tolerances are reduced.\n");
+ if (!opt2parg_bSet("-tol", asize(pa), pa))
+ {
+ ftol = 0.000001;
+ }
+ if (!opt2parg_bSet("-abstol", asize(pa), pa))
+ {
+ abstol = 0;
+ }
comp_tpx(fn1, fn2, bRMSD, ftol, abstol);
}
else if (fn1 && tex)
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#endif
#endif
-/*! \def GMX_ALIGNED(type, alignment)
- * \brief
- * Declare variable with data alignment
- *
- * \param[in] type Type of variable
- * \param[in] alignment Alignment in multiples of type
- *
- * Typical usage:
- * \code
- GMX_ALIGNED(real, GMX_SIMD_REAL_WIDTH) buf[...];
- \endcode
- */
-
-// We rely on C++11. This will for instance work for MSVC2015 and later.
-// If you get an error here, find out what attribute to use to get your compiler to align
-// data properly and add it as a case.
-#define GMX_ALIGNED(type, alignment) alignas(alignment*sizeof(type)) type
-
/*! \brief
* Macro to explicitly ignore an unused value.
*
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
{ "An intellectual is someone who has found something more interesting than sex.", "Edgar Wallace" },
{ "Base eight is just like base ten really, if you’re missing two fingers.", "Tom Lehrer" },
{ "If 10 years from now, when you are doing something quick and dirty, you suddenly visualize that I am looking over your shoulders and say to yourself: ‘Dijkstra would not have liked this’, well that would be enough immortality for me.", "Edsger Dijkstra" },
- { "Memory is like an orgasm. It’s a lot better of you don’t have to fake it.", "Seymour Cray, on virtual memory" },
+ { "Memory is like an orgasm. It’s a lot better if you don’t have to fake it.", "Seymour Cray, on virtual memory" },
{ "A computer once beat me at chess, but it was no match for me at kick boxing.", "Emo Philips" },
{ "Home computers are being called upon to perform many new functions, including the consumption of homework formerly eaten by the dog.", "Doug Larson" },
{ "Forcefields are like dating; things go fine for a while and then sometimes it goes really bad.", "Alex MacKerell" },
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include <cstdlib>
+#include <set>
#include <string>
#include <vector>
const char *const lib = getenv(envVarName);
if (!isNullOrEmpty(lib))
{
+ std::vector<std::string> &path = impl_->searchPath_; // convenience
+ const std::string defaultPath = impl_->getDefaultPath();
+ std::vector<std::string> tmpPath;
+ Path::splitPathEnvironment(lib, &tmpPath);
+ std::set<std::string> pathsSeen;
+ pathsSeen.insert(defaultPath);
+ for (auto &d : tmpPath)
+ {
+ if (!pathsSeen.count(d))
+ {
+ path.push_back(d);
+ pathsSeen.insert(d);
+ }
+ }
impl_->bEnvIsSet_ = true;
- Path::splitPathEnvironment(lib, &impl_->searchPath_);
}
}
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+# Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
gmx_add_unit_test(UtilityUnitTests utility-test
alignedallocator.cpp
arrayref.cpp
- basedefinitions.cpp
bitmask32.cpp bitmask64.cpp bitmask128.cpp
keyvaluetreeserializer.cpp
keyvaluetreetransform.cpp
+++ /dev/null
-/*
- * This file is part of the GROMACS molecular simulation package.
- *
- * Copyright (c) 2015, by the GROMACS development team, led by
- * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
- * and including many others, as listed in the AUTHORS file in the
- * top-level source directory and at http://www.gromacs.org.
- *
- * GROMACS is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * GROMACS is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with GROMACS; if not, see
- * http://www.gnu.org/licenses, or write to the Free Software Foundation,
- * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * If you want to redistribute modifications to GROMACS, please
- * consider that scientific software is very special. Version
- * control is crucial - bugs must be traceable. We will be happy to
- * consider code for inclusion in the official distribution, but
- * derived work must not be called official GROMACS. Details are found
- * in the README & COPYING files - if they are missing, get the
- * official version at http://www.gromacs.org.
- *
- * To help us fund GROMACS development, we humbly ask that you cite
- * the research papers on the package. Check out http://www.gromacs.org.
- */
-/*! \internal \file
- * \brief Tests for base definitions (only alignment attributes for now)
- *
- * \author Erik Lindahl <erik.lindahl@gmail.com>
- * \ingroup module_utility
- */
-
-#include "gmxpre.h"
-
-#include "gromacs/utility/basedefinitions.h"
-
-#include <cstdint>
-
-#include <gtest/gtest.h>
-
-#include "gromacs/utility/real.h"
-
-namespace gmx
-{
-
-TEST(BasedefinitionsTest, GmxAlignedDeclaresAlignedVariable)
-{
- GMX_ALIGNED(real, 2) r1;
- GMX_ALIGNED(real, 4) r2;
- GMX_ALIGNED(real, 8) r3;
-
- std::uint64_t addr1 = reinterpret_cast<std::uint64_t>(&r1);
- std::uint64_t addr2 = reinterpret_cast<std::uint64_t>(&r2);
- std::uint64_t addr3 = reinterpret_cast<std::uint64_t>(&r3);
-
- EXPECT_EQ(0, addr1 % 2);
- EXPECT_EQ(0, addr2 % 4);
- EXPECT_EQ(0, addr3 % 8);
-
- GMX_ALIGNED(int, 2) i1;
- GMX_ALIGNED(int, 4) i2;
- GMX_ALIGNED(int, 8) i3;
-
- addr1 = reinterpret_cast<std::uint64_t>(&i1);
- addr2 = reinterpret_cast<std::uint64_t>(&i2);
- addr3 = reinterpret_cast<std::uint64_t>(&i3);
-
- EXPECT_EQ(0, addr1 % 2);
- EXPECT_EQ(0, addr2 % 4);
- EXPECT_EQ(0, addr3 % 8);
-}
-
-}
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2011,2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2011,2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
restore_ekinstate_from_state(cr, ekind, &state_global->ekinstate);
}
- cglo_flags = (CGLO_TEMPERATURE | CGLO_GSTAT
+ cglo_flags = (CGLO_INITIALIZATION | CGLO_TEMPERATURE | CGLO_GSTAT
| (EI_VV(ir->eI) ? CGLO_PRESSURE : 0)
| (EI_VV(ir->eI) ? CGLO_CONSTRAINT : 0)
| (continuationOptions.haveReadEkin ? CGLO_READEKIN : 0));
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2010,2011,2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2010,2011,2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
"you can increase the maxwarn setting in the membed input file.");
}
+ // Re-establish the invariants of the derived values within
+ // mtop.
+ gmx_mtop_finalize(mtop);
+
if (ftp2bSet(efTOP, nfile, fnm))
{
top_update(opt2fn("-mp", nfile, fnm), rm_p, mtop);
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2011,2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2011,2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
namespace gmx
{
-/*! \brief Return whether GPU acceleration of nonbondeds is useful with the given settings.
+/*! \brief Return whether GPU acceleration of nonbondeds is supported with the given settings.
*
- * If not, logs a message about falling back to CPU code. */
+ * If not, and if a warning may be issued, logs a warning about
+ * falling back to CPU code. With thread-MPI, only the first
+ * call to this function should have \c issueWarning true. */
static bool gpuAccelerationOfNonbondedIsUseful(const MDLogger &mdlog,
const t_inputrec *ir,
- bool doRerun)
+ bool issueWarning)
{
- if (doRerun && ir->opts.ngener > 1)
- {
- /* Rerun execution time is dominated by I/O and pair search,
- * so GPUs are not very useful, plus they do not support more
- * than one energy group. If the user requested GPUs
- * explicitly, a fatal error is given later. With non-reruns,
- * we fall back to a single whole-of system energy group
- * (which runs much faster than a multiple-energy-groups
- * implementation would), and issue a note in the .log
- * file. Users can re-run if they want the information. */
- GMX_LOG(mdlog.warning).asParagraph().appendText("Multiple energy groups is not implemented for GPUs, so is not useful for this rerun, so falling back to the CPU");
+ if (ir->opts.ngener > 1)
+ {
+ /* The GPU code does not support more than one energy group.
+ * If the user requested GPUs explicitly, a fatal error is given later.
+ */
+ if (issueWarning)
+ {
+ GMX_LOG(mdlog.warning).asParagraph()
+ .appendText("Multiple energy groups is not implemented for GPUs, falling back to the CPU. "
+ "For better performance, run on the GPU without energy groups and then do "
+ "gmx mdrun -rerun option on the trajectory with an energy group .tpr file.");
+ }
return false;
}
-
return true;
}
useGpuForNonbonded = decideWhetherToUseGpusForNonbondedWithThreadMpi
(nonbondedTarget, gpuIdsToUse, userGpuTaskAssignment, emulateGpuNonbonded,
inputrec->cutoff_scheme == ecutsVERLET,
- gpuAccelerationOfNonbondedIsUseful(mdlog, inputrec, doRerun),
+ gpuAccelerationOfNonbondedIsUseful(mdlog, inputrec, GMX_THREAD_MPI),
hw_opt.nthreads_tmpi);
auto inputSystemHasPme = EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype);
auto canUseGpuForPme = inputSystemHasPme && pme_gpu_supports_input(inputrec, nullptr);
useGpuForPme = decideWhetherToUseGpusForPmeWithThreadMpi
(useGpuForNonbonded, pmeTarget, gpuIdsToUse, userGpuTaskAssignment,
canUseGpuForPme, hw_opt.nthreads_tmpi, domdecOptions.numPmeRanks);
+
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+
/* Determine how many thread-MPI ranks to start.
*
* TODO Over-writing the user-supplied value here does
bool gpusWereDetected = hwinfo->ngpu_compatible_tot > 0;
useGpuForNonbonded = decideWhetherToUseGpusForNonbonded(nonbondedTarget, userGpuTaskAssignment,
emulateGpuNonbonded, inputrec->cutoff_scheme == ecutsVERLET,
- gpuAccelerationOfNonbondedIsUseful(mdlog, inputrec, doRerun),
+ gpuAccelerationOfNonbondedIsUseful(mdlog, inputrec, !GMX_THREAD_MPI),
gpusWereDetected);
auto inputSystemHasPme = EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype);
auto canUseGpuForPme = inputSystemHasPme && pme_gpu_supports_input(inputrec, nullptr);
useGpuForPme = decideWhetherToUseGpusForPme(useGpuForNonbonded, pmeTarget, userGpuTaskAssignment,
canUseGpuForPme, cr->nnodes, domdecOptions.numPmeRanks,
gpusWereDetected);
+
pmeRunMode = (useGpuForPme ? PmeRunMode::GPU : PmeRunMode::CPU);
if (pmeRunMode == PmeRunMode::GPU)
{
*/
nthreads_pme = gmx_omp_nthreads_get(emntPME);
+ int numThreadsOnThisRank;
+ /* threads on this MPI process or TMPI thread */
+ if (thisRankHasDuty(cr, DUTY_PP))
+ {
+ numThreadsOnThisRank = gmx_omp_nthreads_get(emntNonbonded);
+ }
+ else
+ {
+ numThreadsOnThisRank = nthreads_pme;
+ }
+
+ checkHardwareOversubscription(numThreadsOnThisRank,
+ *hwinfo->hardwareTopology,
+ cr, mdlog);
+
+ if (hw_opt.thread_affinity != threadaffOFF)
+ {
+ /* Before setting affinity, check whether the affinity has changed
+ * - which indicates that probably the OpenMP library has changed it
+ * since we first checked).
+ */
+ gmx_check_thread_affinity_set(mdlog, cr,
+ &hw_opt, hwinfo->nthreads_hw_avail, TRUE);
+
+ /* Set the CPU affinity */
+ gmx_set_thread_affinity(mdlog, cr, &hw_opt, *hwinfo->hardwareTopology,
+ numThreadsOnThisRank, nullptr);
+ }
+
wcycle = wallcycle_init(fplog, mdrunOptions.timingOptions.resetStep, cr);
if (PAR(cr))
GMX_ASSERT(thisRankHasDuty(cr, DUTY_PP) == (fr != nullptr), "Double-checking that only PME-only ranks have no forcerec");
gmx_pme_t * &pmedata = fr ? fr->pmedata : sepPmeData;
- if (hw_opt.thread_affinity != threadaffOFF)
- {
- /* Before setting affinity, check whether the affinity has changed
- * - which indicates that probably the OpenMP library has changed it
- * since we first checked).
- */
- gmx_check_thread_affinity_set(mdlog, cr,
- &hw_opt, hwinfo->nthreads_hw_avail, TRUE);
-
- int nthread_local;
- /* threads on this MPI process or TMPI thread */
- if (thisRankHasDuty(cr, DUTY_PP))
- {
- nthread_local = gmx_omp_nthreads_get(emntNonbonded);
- }
- else
- {
- nthread_local = gmx_omp_nthreads_get(emntPME);
- }
-
- /* Set the CPU affinity */
- gmx_set_thread_affinity(mdlog, cr, &hw_opt, *hwinfo->hardwareTopology,
- nthread_local, nullptr);
- }
-
/* Initiate PME if necessary,
* either on all nodes or on dedicated PME nodes only. */
if (EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype))
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
namespace
{
-//! A basic PME runner
+/*! \brief A basic PME runner
+ *
+ * \todo Consider also using GpuTest class. */
class PmeTest : public MdrunTestFixture
{
public:
void PmeTest::SetUpTestCase()
{
gmx_gpu_info_t gpuInfo {};
- char detection_error[STRLEN];
- GMX_UNUSED_VALUE(detection_error); //TODO
// It would be nicer to do this detection once and have mdrun
// re-use it, but this is OK. Note that this also caters for when
// there is no GPU support in the build.
+ //
+ // TODO report any error messages gracefully.
if (GMX_GPU == GMX_GPU_CUDA &&
- (detect_gpus(&gpuInfo, detection_error) >= 0) &&
- gpuInfo.n_dev_compatible > 0)
+ canDetectGpus(nullptr))
{
- s_hasCompatibleCudaGpus = true;
+ findGpus(&gpuInfo);
+ s_hasCompatibleCudaGpus = (gpuInfo.n_dev_compatible > 0);
}
free_gpu_info(&gpuInfo);
}
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
return impl_->data_.rootChecker();
}
+void CommandLineTestBase::setDefaultTolerance(const FloatingPointTolerance &tolerance)
+{
+ impl_->data_.rootChecker().setDefaultTolerance(tolerance);
+}
+
void CommandLineTestBase::testWriteHelp(ICommandLineModule *module)
{
StringOutputStream stream;
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
namespace test
{
+class FloatingPointTolerance;
class IFileMatcherSettings;
class ITextBlockMatcherSettings;
class TestFileManager;
* file contents.
*/
TestReferenceChecker rootChecker();
-
+ /*! \brief
+ * Sets the tolerance for floating-point comparisons.
+ *
+ * All following floating-point comparisons using the checker will use
+ * the new tolerance.
+ *
+ * Does not throw.
+ */
+ void setDefaultTolerance(const FloatingPointTolerance &tolerance);
/*! \brief
* Checks the output of writeHelp() against reference data.
*/
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2013,2014,2015,2016, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
return relativeToleranceAsPrecisionDependentUlp(magnitude, ulpDiff, ulpDiff);
}
+namespace
+{
+//! Default tolerance in ULPs for two floating-point values to compare equal.
+static gmx_uint64_t g_defaultUlpTolerance = 4;
+}
+
/*! \brief
* Returns the default tolerance for comparing `real` numbers.
*
*/
static inline FloatingPointTolerance defaultRealTolerance()
{
- return relativeToleranceAsUlp(1.0, 4);
+ return relativeToleranceAsUlp(1.0, g_defaultUlpTolerance);
+}
+
+
+/*! \brief
+ * Returns the default tolerance for comparing single-precision numbers when
+ * compared by \Gromacs built in either precision mode.
+ *
+ * This permits a checker compiled with any \Gromacs precision to compare
+ * equal or not in the same way.
+ *
+ * \related FloatingPointTolerance
+ */
+static inline FloatingPointTolerance defaultFloatTolerance()
+{
+ return relativeToleranceAsPrecisionDependentUlp
+ (1.0, g_defaultUlpTolerance, g_defaultUlpTolerance * (GMX_FLOAT_EPS / GMX_DOUBLE_EPS));
}
/*! \name Assertions for floating-point comparison
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014, by the GROMACS development team, led by
+ * Copyright (c) 2014,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
EXPECT_FALSE(relativeToleranceAsUlp(1.0, 4).isWithin(dsmall2));
}
+TEST(FloatingPointToleranceTest, DefaultFloatTolerance)
+{
+ using gmx::test::defaultFloatTolerance;
+
+ // Differences within 4 single-precision ULPs are within the tolerance
+ FloatingPointDifference fequal(1.0f, 1.0f);
+ FloatingPointDifference fulp4(1.0f, addUlps(1.0f, 4));
+ FloatingPointDifference fulp8(1.0f, addUlps(1.0f, 8));
+ FloatingPointDifference fsmall(0.1f, addUlps(1.0f, 2) - 0.9f);
+ FloatingPointDifference fsmall2(0.1f, addUlps(1.0f, 6) - 0.9f);
+ EXPECT_TRUE(defaultFloatTolerance().isWithin(fequal));
+ EXPECT_TRUE(defaultFloatTolerance().isWithin(fulp4));
+ EXPECT_FALSE(defaultFloatTolerance().isWithin(fulp8));
+ EXPECT_TRUE(defaultFloatTolerance().isWithin(fsmall));
+ EXPECT_FALSE(defaultFloatTolerance().isWithin(fsmall2));
+
+ // Differences within 4 single-precision ULPs are still within the
+ // tolerance, even when expressed as double-precision values.
+ FloatingPointDifference dequal(1.0, 1.0);
+ FloatingPointDifference dulp4(1.0, addUlps(1.0, 4));
+ FloatingPointDifference dulp8(1.0, addUlps(1.0, 8));
+ FloatingPointDifference dulp4f(1.0, static_cast<double>(addUlps(1.0f, 4)));
+ FloatingPointDifference dulp8f(1.0, static_cast<double>(addUlps(1.0f, 8)));
+ FloatingPointDifference dsmallf(0.1, static_cast<double>(addUlps(1.0f, 2) - 0.9f));
+ FloatingPointDifference dsmall2f(0.1, static_cast<double>(addUlps(1.0f, 6) - 0.9f));
+ EXPECT_TRUE(defaultFloatTolerance().isWithin(dequal));
+ EXPECT_TRUE(defaultFloatTolerance().isWithin(dulp4));
+ EXPECT_TRUE(defaultFloatTolerance().isWithin(dulp8));
+ EXPECT_TRUE(defaultFloatTolerance().isWithin(dulp4f));
+ EXPECT_FALSE(defaultFloatTolerance().isWithin(dulp8f));
+ EXPECT_TRUE(defaultFloatTolerance().isWithin(dsmallf));
+ EXPECT_FALSE(defaultFloatTolerance().isWithin(dsmall2f));
+}
+
} // namespace
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
+# Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
GMX_PHYSCIAL_VALIDATION=OFF to disable the physical validation tests.")
endif()
#
- foreach(module numpy scipy) # add further modules if necessary
+ foreach(module numpy scipy pymbar) # add further modules if necessary
find_python_module(${module})
string(TOUPPER ${module} module_upper)
if(NOT PYTHONMODULE_${module_upper})
# Hook in our own tests
# Read them from json file to make every system a separate test
#
- if (GMX_DOUBLE)
- set(PHYSVALTEST_JSON "${PHYSVALTEST_SOURCE_PATH}/systems_d.json")
- else()
- set(PHYSVALTEST_JSON "${PHYSVALTEST_SOURCE_PATH}/systems.json")
- endif()
+ if (GMX_DOUBLE)
+ set(PHYSVALTEST_JSON "${PHYSVALTEST_SOURCE_PATH}/systems_d.json")
+ else()
+ set(PHYSVALTEST_JSON "${PHYSVALTEST_SOURCE_PATH}/systems.json")
+ endif()
file(STRINGS "${PHYSVALTEST_JSON}" json)
string(REPLACE "\"" "" json ${json})
string(REPLACE "," "" json ${json})
string(REPLACE "}" "" json ${json})
string(REPLACE "<<>>" ";" json ${json})
foreach(line ${json})
- if("${line}" MATCHES "dir:")
- string(REPLACE "dir:" "" dirname ${line})
- add_test(NAME physicalvalidationtests/${dirname}
- COMMAND ${PYTHON_EXECUTABLE} "${PHYSVALTEST_SOURCE_PATH}/gmx_physicalvalidation.py" "${PHYSVALTEST_JSON}" -s ${dirname} -a ${PARGS})
- set_tests_properties(physicalvalidationtests/${dirname} PROPERTIES
+ if("${line}" MATCHES "name:")
+ string(REPLACE "name:" "" testname ${line})
+ add_test(NAME physicalvalidationtests/${testname}
+ COMMAND ${PYTHON_EXECUTABLE} "${PHYSVALTEST_SOURCE_PATH}/gmx_physicalvalidation.py" "${PHYSVALTEST_JSON}" -s ${testname} -a ${PARGS})
+ set_tests_properties(physicalvalidationtests/${testname} PROPERTIES
ENVIRONMENT "PATH=${PATH}"
LABELS "PhysicalValidationTest")
endif()
# Create prepare and run targets while all variables are set
# Will be referenced in CheckTarget.cmake
#
- # "prepare-phys-tests" prepares the systems needed for physical validation for external running
- add_custom_target(prepare-phys-tests
- COMMAND ${PYTHON_EXECUTABLE} "${PHYSVALTEST_SOURCE_PATH}/gmx_physicalvalidation.py" "${PHYSVALTEST_SOURCE_PATH}/systems.json" -p ${PARGS})
- # "run-phys-tests" prepares and runs the systems needed for physical validation
- add_custom_target(run-phys-tests
- COMMAND ${PYTHON_EXECUTABLE} "${PHYSVALTEST_SOURCE_PATH}/gmx_physicalvalidation.py" "${PHYSVALTEST_SOURCE_PATH}/systems.json" -r ${PARGS})
+ # "check-phys-prepare" prepares the systems needed for physical validation for external running
+ add_custom_target(check-phys-prepare
+ COMMAND ${PYTHON_EXECUTABLE} "${PHYSVALTEST_SOURCE_PATH}/gmx_physicalvalidation.py" "${PHYSVALTEST_JSON}" -p ${PARGS}
+ COMMENT "Preparing systems for physical validation"
+ DEPENDS gmx)
+ # "run-physval-sims" prepares and runs the systems needed for physical validation
+ add_custom_target(run-physval-sims
+ COMMAND ${PYTHON_EXECUTABLE} "${PHYSVALTEST_SOURCE_PATH}/gmx_physicalvalidation.py" "${PHYSVALTEST_JSON}" -r ${PARGS}
+ COMMENT "Preparing and running systems for physical validation"
+ DEPENDS gmx)
endif()
else()
#
# Create dummy prepare and run targets
# Will be referenced in CheckTarget.cmake
#
- # "prepare-phys-tests" prepares the systems needed for physical validation for external running
- add_custom_target(prepare-phys-tests
- COMMAND ${CMAKE_COMMAND} -E echo "NOTE: You called the target `prepare-phys-tests`, but ran cmake with\
+ # "check-phys-prepare" prepares the systems needed for physical validation for external running
+ add_custom_target(check-phys-prepare
+ COMMAND ${CMAKE_COMMAND} -E echo "NOTE: You called the target `check-phys-prepare`, but ran cmake with\
`-DGMX_PHYSICAL_VALIDATION=OFF`. The physical validation tests are therefore unavailable,\
and this target is not doing anything."
COMMENT "No physical validation" VERBATIM)
- # "run-phys-tests" prepares and runs the systems needed for physical validation
- add_custom_target(run-phys-tests
- COMMAND ${CMAKE_COMMAND} -E echo "NOTE: You called the target `run-phys-tests`, but ran cmake with\
+ # "run-physval-sims" prepares and runs the systems needed for physical validation
+ add_custom_target(run-physval-sims
+ COMMAND ${CMAKE_COMMAND} -E echo "NOTE: You called the target `run-physval-sims`, but ran cmake with\
`-DGMX_PHYSICAL_VALIDATION=OFF`. The physical validation tests are therefore unavailable,\
and this target is not doing anything."
COMMENT "No physical validation" VERBATIM)
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2014,2016,2017, by the GROMACS development team, led by
+# Copyright (c) 2014,2016,2017,2018, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
add_custom_target(run-ctest
COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure
COMMENT "Running all tests"
- USES_TERMINAL VERBATIM)
+ USES_TERMINAL VERBATIM
+ DEPENDS run-physval-sims)
add_dependencies(run-ctest tests)
-# "check-all" target builds and runs all tests.
+# "check-all" target builds and runs all tests, simulating the physical validation systems first.
add_custom_target(check-all DEPENDS run-ctest)
-# "check-all-run" target builds and runs all tests, simulating the physical validation systems first.
-add_custom_target(check-all-run DEPENDS run-phys-tests check-all)
-# "run-ctest-nophys" is an internal target that actually runs the tests in analogy to "run-ctest".
+# "run-ctest-nophys" is an internal target that actually runs the tests analogously to "run-ctest".
# It runs all tests except the physical validation tests.
add_custom_target(run-ctest-nophys
COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure -E physicalvalidationtests
# "check" target builds and runs all tests except physical validation .
add_custom_target(check DEPENDS run-ctest-nophys)
-# "run-ctest-phys" is an internal target that actually runs the tests in analogy to "run-ctest".
+# "run-ctest-phys" is an internal target that actually runs the tests analogously to "run-ctest".
# It only runs the physical validation tests.
add_custom_target(run-ctest-phys
COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure -R physicalvalidationtests
COMMENT "Running physical validation tests"
- USES_TERMINAL VERBATIM)
-# "check-phys" target runs only physical validation tests.
+ USES_TERMINAL VERBATIM
+ DEPENDS run-physval-sims)
+# "run-ctest-phys-analyze" is the equivalent to "run-ctest-phys" not running the physical validation simulations.
+add_custom_target(run-ctest-phys-analyze
+ COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure -R physicalvalidationtests
+ COMMENT "Running physical validation tests"
+ USES_TERMINAL VERBATIM
+ DEPENDS gmx)
+# "check-phys" target runs only physical validation tests, simulating the systems first.
add_custom_target(check-phys DEPENDS run-ctest-phys)
-# "check-phys-run" target runs only physical validation tests, simulating the systems first
-add_custom_target(check-phys-run DEPENDS run-phys-tests check-phys)
-# "check-phys-prepare" target does only prepare physical validation runs, to be ran externally.
-add_custom_target(check-phys-prepare DEPENDS prepare-phys-tests)
-
+# "check-phys-analyze" target runs only physical validation tests, without simulating the systems first.
+add_custom_target(check-phys-analyze DEPENDS run-ctest-phys-analyze)
# Calling targets "check-all" and "check-phys" does not make much sense if -DGMX_PHYSICAL_VALIDATION=OFF
if(NOT GMX_PHYSICAL_VALIDATION)
DEPENDS run-ctest-phys
COMMENT "No physical validation" VERBATIM)
add_dependencies(check-phys missing-phys-val-phys)
+ add_custom_target(missing-phys-val-phys-analyze
+ COMMAND ${CMAKE_COMMAND} -E echo "NOTE: You called the target `check-phys-analyze`, but ran cmake with\
+ `-DGMX_PHYSICAL_VALIDATION=OFF`. The physical validation tests are therefore unavailable,\
+ and this target is not testing anything."
+ DEPENDS run-ctest-phys-analyze
+ COMMENT "No physical validation" VERBATIM)
+ add_dependencies(check-phys-analyze missing-phys-val-phys)
add_custom_target(missing-phys-val-all
COMMAND ${CMAKE_COMMAND} -E echo "NOTE: You called the target `check-all`, but ran cmake with\
`-DGMX_PHYSICAL_VALIDATION=OFF`. The physical validation tests are therefore unavailable,\
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+# Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
${CMAKE_SOURCE_DIR}/src/gromacs/selection/scanner.cpp
${CMAKE_SOURCE_DIR}/src/gromacs/selection/parser.cpp
${CMAKE_SOURCE_DIR}/src/gromacs/gpu_utils/ocl_compiler.cpp
+ ${CMAKE_SOURCE_DIR}/src/*\#* #ignore emacs lock files throwing of cmake
)
list(REMOVE_ITEM _inputfiles ${_files_to_ignore})
from __future__ import print_function, division, absolute_import
+
import sys
import os
import shutil
import json
import argparse
import re
+import math
from collections import OrderedDict
from physical_validation import integrator, ensemble, kinetic_energy
return [
'oldpath=$PWD',
'cd ' + directory,
- grompp,
- mdrun,
+ grompp + ' && ' + mdrun,
'cd $oldpath'
]
raise NotImplementedError
@classmethod
- def prepare(cls, input_dir, target_dir, system_name):
+ def prepare(cls, input_dir, target_dir, system_name, nobackup):
raise NotImplementedError
@classmethod
message = ''
max_quantiles = -1
for result, dt, dp in zip(results, dtemp, dpress):
- quantiles = ensemble.check(base_result, result, quiet=(verbosity == 0))
+ quantiles = ensemble.check(base_result, result, verbosity=verbosity)
# filename=os.path.join(system_dir, system_name + '_ens'))
- if any(q > tolerance for q in quantiles):
+ if any(q > tolerance or math.isnan(q) for q in quantiles):
passed = False
if len(quantiles) == 1:
message += '\n --dtemp={:.1f} --dpress={:.1f} : FAILED ({:.1f} quantiles off)'.format(
@classmethod
def prepare_parser(cls, input_dir, target_dir, system_name, nobackup, args):
- return cls.prepare(input_dir, target_dir, system_name)
+ return cls.prepare(input_dir, target_dir, system_name, nobackup)
@classmethod
def analyze_parser(cls, gmx_parser, system_dir, system_name, base_data, verbosity, args):
alpha=args.tolerance)
@classmethod
- def prepare(cls, input_dir, target_dir, system_name):
+ def prepare(cls, input_dir, target_dir, system_name, nobackup):
# no additional sims needed, base is enough
# could check energy writing settings
return []
)
base_result = base_data['reduced']
- p = kinetic_energy.mb_ensemble(base_result, verbose=(verbosity > 0))
+ p = kinetic_energy.mb_ensemble(base_result, verbosity=verbosity)
# filename=os.path.join(system_dir, system_name + '_mb'))
if p >= alpha:
return parser
@classmethod
- def prepare(cls, input_dir, target_dir, system_name):
+ def prepare(cls, input_dir, target_dir, system_name, nobackup):
# no additional sims needed, base is enough
# could check position, velocity & energy writing settings
return []
@classmethod
def prepare_parser(cls, input_dir, target_dir, system_name, nobackup, args):
- return cls.prepare(input_dir, target_dir, system_name)
+ return cls.prepare(input_dir, target_dir, system_name, nobackup)
@classmethod
def analyze_parser(cls, gmx_parser, system_dir, system_name, base_data, verbosity, args):
return parser
@classmethod
- def prepare(cls, input_dir, target_dir, system_name):
+ def prepare(cls, input_dir, target_dir, system_name, nobackup):
# no additional sims needed, base is enough
# could check if there are any constraints in the system
return []
@classmethod
def prepare_parser(cls, input_dir, target_dir, system_name, nobackup, args):
- return cls.prepare(input_dir, target_dir, system_name)
+ return cls.prepare(input_dir, target_dir, system_name, nobackup)
@classmethod
def analyze_parser(cls, gmx_parser, system_dir, system_name, base_data, verbosity, args):
])
-def parse_systems(systems_json, systems_user, source_path):
+def parse_systems(systems_json, systems_user, source_path,
+ analyze_only):
# Parse json
# As the order of the systems and the tests
# might be meaningful, we need ordered dicts!
system_list = json.load(systems_json)
system_dict = OrderedDict()
for system in system_list:
- system_name = system['dir']
- # do the input files exist?
- input_dir = os.path.join(source_path, system_name, 'input')
- if not (os.path.isdir(input_dir) and
- os.path.exists(os.path.join(input_dir, 'system.mdp')) and
- os.path.exists(os.path.join(input_dir, 'system.top')) and
- os.path.exists(os.path.join(input_dir, 'system.gro'))):
- raise ValueError('System ' + system_name + ' in ' +
- systems_json.name + ': Input files not found')
+ system_name = system['name']
+ system_dir = system['dir']
+ # do the input files exist? (only relevant if we're not only analyzing)
+ if not analyze_only:
+ input_dir = os.path.join(source_path, system_dir, 'input')
+ if not (os.path.isdir(input_dir) and
+ os.path.exists(os.path.join(input_dir, 'system.mdp')) and
+ os.path.exists(os.path.join(input_dir, 'system.top')) and
+ os.path.exists(os.path.join(input_dir, 'system.gro'))):
+ raise ValueError('System ' + system_name + ' in ' +
+ systems_json.name + ': Input files not found')
# no need to run systems that we don't test
if 'tests' not in system:
raise ValueError('System ' + system_name + ' in ' +
# delete systems not selected by user
for user_system in systems_user:
if re.match(user_system + '$', system):
- user_key = user_system
break
else:
system_dict.pop(system)
os.makedirs(args.wd)
target_path = args.wd
- # get ordered dict of systems from combination of json file and user choices
- systems = parse_systems(args.json, args.systems, source_path)
-
# parse simulation stage to perform
do_all = not (args.prepare or args.run or args.analyze)
do_prepare = do_all or args.prepare or args.run
do_run = do_all or args.run
do_analysis = do_all or args.analyze
+ # get ordered dict of systems from combination of json file and user choices
+ systems = parse_systems(args.json, args.systems, source_path, args.analyze)
+
# prepare GROMACS interface
if args.gmx:
gmx = args.gmx
gmx_parser = GromacsParser(exe=gmx)
if do_prepare:
+ nsystems = len(systems)
+ n = 0
runs = [] # this will contain all information needed to run the system
for system_name, system in systems.items():
+ n += 1
+ print('\rPreparing run files for systems... [{:d}/{:d}] '.format(n, nsystems), end='')
+ sys.stdout.flush() # py2 compatibility
+ system_dir = system['dir']
system_dirs = [] # list of directories with subsystems
# prepare the base system
- input_dir = os.path.join(source_path, system_name, 'input')
- target_dir = os.path.join(target_path, system_name)
+ input_dir = os.path.join(source_path, system_dir, 'input')
+ target_dir = os.path.join(target_path, system_dir)
mkdir_bk(target_dir, nobackup=args.nobackup)
basedir = os.path.join(target_dir, 'base')
mkdir_bk(basedir, nobackup=args.nobackup)
'mdrun_args': system['mdrun_args']
})
# end of loop over systems
+ print('-- done.')
if write_script:
+ print('Writing run script... ', end='')
+ sys.stdout.flush() # py2 compatibility
script_file = os.path.join(target_path, 'run_simulations.sh')
if not args.nobackup:
file_bk(script_file)
with open(script_file, 'w') as f:
f.write('# This file was created by the physical validation suite for GROMACS.\n')
f.write('\n# Define run variables\n')
- f.write('WORKDIR=' + target_path + '\n')
- f.write('GROMPPCMD="' + gmx + ' grompp"\n')
- f.write('MDRUNCMD="' + gmx + ' mdrun"\n')
+ f.write('WORKDIR=' + os.path.abspath(target_path) + '\n')
+ f.write('GROMPPCMD="' + os.path.abspath(gmx) + ' grompp"\n')
+ f.write('MDRUNCMD="' + os.path.abspath(gmx) + ' mdrun"\n')
f.write('\n# Run systems\n')
f.write('startpath=$PWD\n')
f.write('cd $WORKDIR\n')
for run in runs:
- for cmd in basic_run_cmds(directory=run['dir'],
+ for cmd in basic_run_cmds(directory=os.path.relpath(os.path.abspath(run['dir']),
+ os.path.abspath(target_path)),
grompp_args=run['grompp_args'],
mdrun_args=run['mdrun_args']):
f.write(cmd + '\n')
f.write('\n')
f.write('cd $startpath\n')
+ print('-- done.')
+ print('Run script written to ' + script_file)
+ print('Adapt script as necessary and run simulations. Make sure to preserve the folder structure!')
+ print('Once all simulations have ran, analyze the results using `make check-phys-analyze` or '
+ 'using the `-a` flag of `gmx_physicalvalidation.py`.')
# end if write_script
if do_run:
+ nruns = len(runs)
# send messages from GROMACS to log
gmx_log = open(os.path.join(target_path, 'physicalvalidation_gmx.log'), 'w')
- for run in runs:
+ for n, run in enumerate(runs):
+ print('\rRunning (sub)systems... [{:d}/{:d}] '.format(n+1, nruns), end='')
+ sys.stdout.flush() # py2 compatibility
gmx_interface.grompp(mdp='system.mdp',
top='system.top',
gro='system.gro',
stderr=gmx_log,
mpicmd=args.mpicmd)
gmx_log.close()
+ print('-- done.')
# end if do_run
# end if do_prepare
print()
passed = True
for system_name, system in systems.items():
+ system_dir = system['dir']
# save system data if re-used for different test
# massively reduces run time of multiple tests
system_data = {
'full': None
}
# system directory
- target_dir = os.path.join(target_path, system_name)
+ target_dir = os.path.join(target_path, system_dir)
print('Analyzing system ' + system_name)
except Exception as err:
print(' ' + all_tests[test_name].__name__ + ' FAILED (Exception in evaluation)')
print(' '*2 + type(err).__name__ + ': ' + str(err))
+ passed = False
else:
for line in result['message'].split('\n'):
print(' ' + line)
`physical_validation` is a package aimed at testing results obtained
by molecular dynamics simulations for their physical validity.
-Please check [https://shirtsgroup.github.io/physical-validation](https://shirtsgroup.github.io/physical-validation)
+Please check [http://physical_validation.readthedocs.io](http://physical_validation.readthedocs.io)
for the full reference.
`physical_validation` largely incorporates the functionality of
GROMACS-bundled version
-----------------------
The version of physical_validation shipped with GROMACS
-corresponds to commit 045370b.
+corresponds to commit 8b80492.
+It requires
+* numpy
+* scipy
+* pymbar
length_conversion=1.0,
volume_str='nm^3',
volume_conversion=1.0,
+ temperature_str='K',
+ temperature_conversion=1.0,
pressure_str='bar',
pressure_conversion=1.0,
time_str='ps',
time_conversion=1.0)
def __init__(self, exe=None, includepath=None):
+ r"""
+ Create a GromacsParser object
+
+ Parameters
+ ----------
+ exe: str, optional
+ Path to a gmx executable (or simply the executable name, if it is in the path)
+ Default: Looks for `gmx`, then for `gmx_d` in the path. If neither is found, `exe` is
+ set to None, and any parsing including simulation trajectories (`edr`, `trr`
+ and `gro` arguments in `get_simulation_data()`) will fail.
+ includepath: str or List[str], optional
+ Path or list of paths to location(s) of topology file. Is used for the lookup of
+ `#include` statements in topologies.
+ Default: None - no additional topology location. Lookup will be restricted to current
+ directory and location of the `top` file given to `get_simulation_data()`,
+ plus any include locations added to the `mdp` file.
+ """
super(GromacsParser, self).__init__()
self.__interface = GromacsInterface(exe=exe, includepath=includepath)
# gmx energy codes
Returns
-------
result: SimulationData
- A SimulationData filled with the provided ensemble and
- system objects as well as the trajectory data found in the
- edr and trr / gro files.
+ A SimulationData filled with the results of the simulation as described by
+ the provided GROMACS files.
"""
result = SimulationData()
molec_bonds.extend([all_bonds] * molecule['nmolecs'])
molec_bonds_constrained.extend([constrained_bonds] * molecule['nmolecs'])
- topology = SystemData()
- topology.natoms = natoms
- topology.mass = mass
- topology.molecule_idx = molecule_idx
- topology.nconstraints = np.sum(constraints_per_molec)
- topology.nconstraints_per_molecule = constraints_per_molec
- topology.ndof_reduction_tra = 3
- topology.ndof_reduction_rot = 0
+ system = SystemData()
+ system.natoms = natoms
+ system.mass = mass
+ system.molecule_idx = molecule_idx
+ system.nconstraints = np.sum(constraints_per_molec)
+ system.nconstraints_per_molecule = constraints_per_molec
+ system.ndof_reduction_tra = 3
+ system.ndof_reduction_rot = 0
if 'comm-mode' in mdp_options:
- if mdp_options['comm-mode'] == 'Linear':
- topology.ndof_reduction_tra = 3
- elif mdp_options['comm-mode'] == 'Angular':
- topology.ndof_reduction_tra = 3
- topology.ndof_reduction_rot = 3
- if mdp_options['comm-mode'] == 'None':
- topology.ndof_reduction_tra = 0
- topology.bonds = molec_bonds
- topology.constrained_bonds = molec_bonds_constrained
- result.system = topology
+ if mdp_options['comm-mode'] == 'linear':
+ system.ndof_reduction_tra = 3
+ elif mdp_options['comm-mode'] == 'angular':
+ system.ndof_reduction_tra = 3
+ system.ndof_reduction_rot = 3
+ if mdp_options['comm-mode'] == 'none':
+ system.ndof_reduction_tra = 0
+ system.bonds = molec_bonds
+ system.constrained_bonds = molec_bonds_constrained
+ result.system = system
thermostat = ('tcoupl' in mdp_options and
mdp_options['tcoupl'] and
- mdp_options['tcoupl'] != 'no' and
- mdp_options['tcoupl'] != 'No')
+ mdp_options['tcoupl'] != 'no')
stochastic_dyn = ('integrator' in mdp_options and
mdp_options['integrator'] in ['sd', 'sd2', 'bd'])
constant_temp = thermostat or stochastic_dyn
temperature = None
if constant_temp:
- ref_t_key = 'ref-t'
- if ref_t_key not in mdp_options and 'ref_t' in mdp_options:
- ref_t_key = 'ref_t'
- ref_t = [float(t) for t in mdp_options[ref_t_key].split()]
+ ref_t = [float(t) for t in mdp_options['ref-t'].split()]
if len(ref_t) == 1 or np.allclose(ref_t, [ref_t[0]]*len(ref_t)):
temperature = ref_t[0]
else:
raise pv_error.InputError('mdp',
- 'Ensemble definition ambiguous.')
+ 'Ensemble definition ambiguous: Different t-ref values found.')
constant_press = ('pcoupl' in mdp_options and
mdp_options['pcoupl'] and
- mdp_options['pcoupl'] != 'no' and
- mdp_options['pcoupl'] != 'No')
+ mdp_options['pcoupl'] != 'no')
volume = None
pressure = None
if constant_press:
- ref_p_key = 'ref-p'
- if ref_p_key not in mdp_options and 'ref_p' in mdp_options:
- ref_p_key = 'ref_p'
- pressure = float(mdp_options[ref_p_key])
+ ref_p = [float(p) for p in mdp_options['ref-p'].split()]
+ if len(ref_p) == 1 or np.allclose(ref_p, [ref_p[0]]*len(ref_p)):
+ pressure = ref_p[0]
+ else:
+ raise pv_error.InputError('mdp',
+ 'Ensemble definition ambiguous: Different p-ref values found.')
else:
if trajectory_dict is not None:
box = trajectory_dict['box'][0]
r"""
Parser base class
"""
+ @staticmethod
+ def units():
+ raise NotImplementedError
+
def get_simulation_data(self):
raise NotImplementedError
self.__dt = 0
if dt is not None:
self.dt = dt
- self.__topology = None
+ self.__system = None
if system is not None:
self.system = system
self.__ensemble = None
@ensemble.setter
def ensemble(self, ensemble):
if not isinstance(ensemble, EnsembleData):
- raise TypeError('No known conversion from ' + type(ensemble) +
+ raise TypeError('No known conversion from ' + str(type(ensemble)) +
'to EnsembleData')
self.__ensemble = ensemble
@units.setter
def units(self, units):
if not isinstance(units, UnitData):
- raise TypeError('No known conversion from ' + type(units) +
+ raise TypeError('No known conversion from ' + str(type(units)) +
'to UnitData')
self.__units = units
@observables.setter
def observables(self, observables):
if not isinstance(observables, ObservableData):
- raise TypeError('No known conversion from ' + type(observables) +
+ raise TypeError('No known conversion from ' + str(type(observables)) +
'to ObservableData')
self.__observables = observables
@trajectory.setter
def trajectory(self, trajectory):
if not isinstance(trajectory, TrajectoryData):
- raise TypeError('No known conversion from ' + type(trajectory) +
+ raise TypeError('No known conversion from ' + str(type(trajectory)) +
'to TrajectoryData')
self.__trajectory = trajectory
-------
system : SystemData
"""
- return self.__topology
+ return self.__system
@system.setter
- def system(self, topology):
- if not isinstance(topology, SystemData):
- raise TypeError('No known conversion from ' + type(topology) +
+ def system(self, system):
+ if not isinstance(system, SystemData):
+ raise TypeError('No known conversion from ' + str(type(system)) +
'to SystemData')
- self.__topology = topology
+ self.__system = system
@property
def dt(self):
in the system as far as the physical validation tests need it.
The system is described in terms of
- natoms: the total number of atoms in the system
- nconstraints: the total number of constraints in the system
- ndof_reduction_tra: global reduction of translational degrees of freedom (e.g.
- due to constraining the center of mass of the system)
- ndof_reduction_rot: global reduction of rotational degrees of freedom (e.g.
- due to constraining the center of mass of the system)
+
+ * natoms: the total number of atoms in the system
+ * nconstraints: the total number of constraints in the system
+ * ndof_reduction_tra: global reduction of translational degrees of freedom (e.g.
+ due to constraining the center of mass of the system)
+ * ndof_reduction_rot: global reduction of rotational degrees of freedom (e.g.
+ due to constraining the center of mass of the system)
The atoms are described in terms of
- mass: a list of the mass of every atom in the system
+
+ * mass: a list of the mass of every atom in the system
The molecules are described by
- molecule_idx: a list with the indices first atoms of every molecule (this assumes
- that the atoms are sorted by molecule)
- nconstraints_per_molecule: a list with the number of constraints in every molecule
+
+ * molecule_idx: a list with the indices first atoms of every molecule (this assumes
+ that the atoms are sorted by molecule)
+ * nconstraints_per_molecule: a list with the number of constraints in every molecule
Only used internally:
- ndof_per_molecule: a list with the number of degrees of freedom of every molecule
+
+ * ndof_per_molecule: a list with the number of degrees of freedom of every molecule
Reserved for future use:
- bonds
- constrained_bonds
-
- Notes:
- ------
- kinetic_energy.mb_ensemble() only requires information on the system
- (natoms, nconstraints, ndof_reduction_tra, ndof_reduction_rot)
- kinetic_energy.equipartition() additionally requires information on the atoms and molecules
- (mass, molecule_idx, nconstraints_per_molecule)
+
+ * bonds
+ * constrained_bonds
+
+ *Notes:*
+
+ * kinetic_energy.mb_ensemble() only requires information on the system
+ (natoms, nconstraints, ndof_reduction_tra, ndof_reduction_rot)
+ * kinetic_energy.equipartition() additionally requires information on the atoms and molecules
+ (mass, molecule_idx, nconstraints_per_molecule)
+
All other tests do not require and information from SystemData.
+
"""
def __init__(self,
The position and velocity trajectories can be accessed either using the getters
of an object, as in
- trajectory.position
- trajectory.velocity
+
+ * trajectory.position
+ * trajectory.velocity
+
or using the key notation, as in
- trajectory['position']
- trajectory['velocity']
+
+ * trajectory['position']
+ * trajectory['velocity']
+
"""
@staticmethod
r"""UnitData: Information about the units used
The information about units consists of different parts:
- * The name of the units (energy_str, length_str, volume_str, pressure_str, time_str),
+
+ * The name of the units (energy_str, length_str, volume_str,
+ temperature_str, pressure_str, time_str),
* the value of kB in the used energy units, and
- * the conversion factor to GROMACS units (kJ/mol, nm, nm^3, bar, ps).
+ * the conversion factor to GROMACS units (kJ/mol, nm, nm^3, K, bar, ps).
+
The names are only used for output (console printing and plotting), and are optional.
The conversion factors and kB are, on the other hand, used in computations and need
to be given.
"""
- def __init__(self, kb, energy_conversion, length_conversion,
- volume_conversion, pressure_conversion, time_conversion,
- energy_str='ENE', length_str='LEN',
- volume_str='VOL', pressure_str='PRESS', time_str='TIME'):
+ def __init__(self, kb, energy_conversion, length_conversion, volume_conversion,
+ temperature_conversion, pressure_conversion, time_conversion,
+ energy_str='ENE', length_str='LEN', volume_str='VOL',
+ temperature_str='TEMP', pressure_str='PRESS', time_str='TIME'):
self.__kb = float(kb)
self.__energy_str = str(energy_str)
self.__length_conversion = float(length_conversion)
self.__volume_str = str(volume_str)
self.__volume_conversion = float(volume_conversion)
+ self.__temperature_str = str(temperature_str)
+ self.__temperature_conversion = float(temperature_conversion)
self.__pressure_str = str(pressure_str)
self.__pressure_conversion = float(pressure_conversion)
self.__time_str = str(time_str)
self.__time_conversion = float(time_conversion)
+ @staticmethod
+ def __parsers():
+ from . import GromacsParser
+ return {
+ 'GROMACS': GromacsParser
+ }
+
+ @classmethod
+ def units(cls, name=None):
+ if name is None:
+ return cls.__parsers().keys()
+
+ if name in cls.__parsers():
+ return cls.__parsers()[name].units()
+ else:
+ raise KeyError('Name ' + name + ' does not match a registred unit type.')
+
def __eq__(self, other):
if not isinstance(other, UnitData):
return False
self.energy_conversion == other.energy_conversion and
self.length_conversion == other.length_conversion and
self.volume_conversion == other.volume_conversion and
+ self.temperature_conversion == other.temperature_conversion and
self.pressure_conversion == other.pressure_conversion and
self.time_conversion == other.time_conversion)
"""str: Volume unit"""
return self.__volume_str
+ @property
+ def temperature_str(self):
+ """str: Temperature unit"""
+ return self.__temperature_str
+
@property
def pressure_str(self):
"""str: Pressure unit"""
@property
def energy_conversion(self):
- """float: Energy conversion factor: 1 ene_unit = energy_conversion * kJ/mol"""
+ """float: Energy conversion factor, 1 energy_unit = energy_conversion * kJ/mol"""
return self.__energy_conversion
@property
def length_conversion(self):
- """float: Length conversion factor: 1 length_unit = length_conversion * nm"""
+ """float: Length conversion factor, 1 length_unit = length_conversion * nm"""
return self.__length_conversion
@property
def volume_conversion(self):
- """float: Volume conversion factor: 1 volume_unit = volume_conversion * nm^3"""
+ """float: Volume conversion factor, 1 volume_unit = volume_conversion * nm^3"""
return self.__volume_conversion
+ @property
+ def temperature_conversion(self):
+ """float: Temperature conversion factor, 1 temperature_unit = temperature_conversion * K"""
+ return self.__temperature_conversion
+
@property
def pressure_conversion(self):
- """float: Pressure conversion factor: 1 pressure_unit = pressure_conversion * bar"""
+ """float: Pressure conversion factor, 1 pressure_unit = pressure_conversion * bar"""
return self.__pressure_conversion
@property
def time_conversion(self):
- """float: Time conversion factor: 1 time_unit = time_conversion * ps"""
+ """float: Time conversion factor, 1 time_unit = time_conversion * ps"""
return self.__time_conversion
import numpy as np
-from .util import timeseries
-from .util import checkensemble
+from .util import ensemble
from .data import SimulationData
from .util import error as pv_error
def check(data_sim_one, data_sim_two,
total_energy=False,
screen=False, filename=None,
- quiet=False):
+ verbosity=1):
r"""
Check the ensemble. The correct check is inferred from the
simulation data given.
Plot distributions on screen. Default: False.
filename : string
Plot distributions to `filename`.pdf. Default: None.
- quiet : bool
- Turns off nearly all messages. Default: False.
+ verbosity : int
+ Level of verbosity, from 0 (quiet) to 3 (very verbose).
+ Default: 1
Returns
-------
+ quantiles : List[float]
+ The number of quantiles the computed result is off the analytical one.
"""
if not SimulationData.compatible(data_sim_one,
'(e.g. target temperature, target pressure), but not '
'in their sampled ensemble (e.g. NVT, NPT).')
- ensemble = data_sim_one.ensemble.ensemble
+ sampled_ensemble = data_sim_one.ensemble.ensemble
- if ensemble == 'NVE' or ensemble == 'muVE':
+ if sampled_ensemble == 'NVE' or sampled_ensemble == 'muVE':
raise pv_error.InputError(['data_sim_one', 'data_sim_two'],
- 'Test of ensemble ' + ensemble + ' is not implemented '
+ 'Test of ensemble ' + sampled_ensemble + ' is not implemented '
'(yet).')
- n1 = data_sim_one.observables.nframes
- n2 = data_sim_two.observables.nframes
-
if total_energy:
+ eneq = 'E'
e1 = data_sim_one.observables.total_energy
e2 = data_sim_two.observables.total_energy
else:
+ eneq = 'U'
e1 = data_sim_one.observables.potential_energy
e2 = data_sim_two.observables.potential_energy
- # padding the array - checkensemble requires same length
- if n1 < n2:
- e1 = np.append(e1, np.zeros(n2-n1))
- if n2 < n1:
- e2 = np.append(e2, np.zeros(n1-n2))
-
- number_of_samples = np.array([n1, n2])
- energy = np.array([e1, e2])
-
- do_linear_fit = True
- do_non_linear_fit = False
- do_max_likelhood = True
- do_maxwell = False
-
quantiles = None
- if ensemble == 'NVT':
- temperatures = np.array([data_sim_one.ensemble.temperature,
- data_sim_two.ensemble.temperature])
-
- analysis_type = 'dbeta-constV'
-
- ge = []
- for e in energy:
- ge.append(timeseries.statisticalInefficiency(e, fast=False))
-
- quantiles = checkensemble.ProbabilityAnalysis(
- number_of_samples, type=analysis_type,
- T_k=temperatures, P_k=None, mu_k=None,
- U_kn=energy, V_kn=None, N_kn=None,
- nbins=40, reptype=None, g=ge,
- bMaxwell=do_maxwell, bLinearFit=do_linear_fit,
- bNonLinearFit=do_non_linear_fit, bMaxLikelihood=do_max_likelhood,
- kB=data_sim_one.units.kb, units=data_sim_one.units,
- filename=filename, screen=screen, quiet=quiet
+ if sampled_ensemble == 'NVT':
+ quantiles = ensemble.check_1d(
+ traj1=e1, traj2=e2,
+ param1=data_sim_one.ensemble.temperature,
+ param2=data_sim_two.ensemble.temperature,
+ kb=data_sim_one.units.kb,
+ quantity=eneq,
+ dtemp=True, dpress=False,
+ verbosity=verbosity,
+ filename=filename, screen=screen
)
- elif ensemble == 'NPT':
+ elif sampled_ensemble == 'NPT':
temperatures = np.array([data_sim_one.ensemble.temperature,
data_sim_two.ensemble.temperature])
pressures = np.array([data_sim_one.ensemble.pressure,
v1 = data_sim_one.observables.volume
v2 = data_sim_two.observables.volume
- # padding the array - checkensemble requires same length
- if n1 < n2:
- v1 = np.append(v1, np.zeros(n2-n1))
- if n2 < n1:
- v2 = np.append(v2, np.zeros(n1-n2))
- volume = np.array([v1, v2])
+
+ # Calculate conversion from p*V to energy units
+ #
+ # GROMACS standard units are
+ # energy: kJ/mol
+ # volume: nm^3
+ # pressure: bar
+ # => pV-term: bar * nm^3 == 1e-25 kJ == 6.022140857e-2 kJ/mol
+ # => pvconvert = 6.022140857e-2
+ # UnitData stores conversion factors relative to GROMACS units
+ # energy: energy_conversion * kJ/mol
+ # volume: volume_conversion * nm^3
+ # pressure: pressure_conversion * bar
+ # => pV-term: [p]*[V] == pressure_conversion * volume_conversion bar * nm^3
+ # Units were checked earlier, so we can use either simulation data structure
+ pvconvert = 6.022140857e-2
+ pvconvert *= (data_sim_one.units.pressure_conversion *
+ data_sim_one.units.volume_conversion)
+ pvconvert /= data_sim_one.units.energy_conversion
if equal_press and not equal_temps:
- analysis_type = 'dbeta-constP'
+ e1 = e1 + pvconvert * pressures[0] * v1
+ e2 = e2 + pvconvert * pressures[1] * v2
+ if eneq == 'U':
+ eneq = 'H'
+ quantiles = ensemble.check_1d(
+ traj1=e1, traj2=e2,
+ param1=temperatures[0],
+ param2=temperatures[1],
+ kb=data_sim_one.units.kb,
+ quantity=eneq,
+ dtemp=True, dpress=False,
+ verbosity=verbosity,
+ filename=filename, screen=screen
+ )
elif equal_temps and not equal_press:
- analysis_type = 'dpressure-constB'
+ quantiles = ensemble.check_1d(
+ traj1=v1, traj2=v2,
+ param1=pressures[0],
+ param2=pressures[1],
+ kb=data_sim_one.units.kb,
+ quantity='V',
+ dtemp=False, dpress=True,
+ temp=temperatures[0],
+ pvconvert=pvconvert,
+ verbosity=verbosity,
+ filename=filename, screen=screen
+ )
else:
- analysis_type = 'dbeta-dpressure'
- do_linear_fit = False
- do_non_linear_fit = False
-
- ge = []
- for e in energy:
- ge.append(timeseries.statisticalInefficiency(e, fast=False))
- gv = []
- for v in volume:
- gv.append(timeseries.statisticalInefficiency(v, fast=False))
- g = np.maximum(ge, gv)
-
- quantiles = checkensemble.ProbabilityAnalysis(
- number_of_samples, type=analysis_type,
- T_k=temperatures, P_k=pressures, mu_k=None,
- U_kn=energy, V_kn=volume, N_kn=None,
- kB=data_sim_one.units.kb, nbins=40,
- bMaxLikelihood=do_max_likelhood, bLinearFit=do_linear_fit,
- bNonLinearFit=do_non_linear_fit, reptype=None,
- g=g,
- bMaxwell=do_maxwell,
- units=data_sim_one.units,
- screen=screen, filename=filename, quiet=quiet
- )
+ traj1 = np.array([e1, v1])
+ traj2 = np.array([e2, v2])
+ param1 = np.array([temperatures[0], pressures[0]])
+ param2 = np.array([temperatures[1], pressures[1]])
+ quantiles = ensemble.check_2d(
+ traj1=traj1, traj2=traj2,
+ param1=param1, param2=param2,
+ kb=data_sim_one.units.kb,
+ pvconvert=pvconvert,
+ quantity=[eneq, 'V'],
+ dtempdpress=True,
+ verbosity=verbosity,
+ filename=filename, screen=screen
+ )
return quantiles
+
+
+def estimate_interval(data, verbosity=1, total_energy=False):
+ r"""
+ In order to perform an ensemble check, two simulations at distinct state
+ point are needed. Choosing two state points too far apart will result
+ in poor or zero overlap between the distributions, leading to very noisy
+ results (due to sample errors in the tails) or a breakdown of the method,
+ respectively. Choosing two state points very close to each others, on the
+ other hand, makes it difficult to distinguish the slope from statistical
+ error in the samples.
+
+ This function implements a rule of thumb based on the standard deviations
+ of distributions. It takes a single simulation and suggests appropriate
+ intervals for a second simulation to be used for ensemble checking.
+
+ Parameters
+ ----------
+ data : SimulationData
+ The performed simulation.
+ verbosity : int, optional
+ If 0, no output is printed on screen. If 1, estimated intervals are
+ printed. If larger, additional information during calculation are
+ printed.
+ Default: 1
+ total_energy : bool, optional
+ Use total energy instead of potential energy only.
+ Default: False
+
+ Returns
+ -------
+ intervals : Dict
+ If `data` was performed under NVT conditions, `intervals` contains only
+ one entry:
+
+ * `'dT'`, containing the suggested temperature interval.
+
+ If `data` was performed under NPT conditions, `intervals` contains three
+ entries:
+
+ * `'dT'`: Suggested temperature interval at constant pressure
+ * `'dP'`: Suggested pressure interval at constant temperature
+ * `'dTdP'`: Suggested combined temperature and pressure interval
+
+ """
+
+ if total_energy:
+ ene = data.observables.total_energy
+ else:
+ ene = data.observables.potential_energy
+
+ if data.ensemble.ensemble == 'NVT':
+ result = ensemble.estimate_interval(
+ ens_string='NVT',
+ ens_temp=data.ensemble.temperature,
+ energy=ene,
+ kb=data.units.kb,
+ verbosity=verbosity,
+ tunit=data.units.temperature_str
+ )
+ elif data.ensemble.ensemble == 'NPT':
+ pvconvert = 6.022140857e-2
+ pvconvert *= (data.units.pressure_conversion *
+ data.units.volume_conversion)
+ pvconvert /= data.units.energy_conversion
+ result = ensemble.estimate_interval(
+ ens_string='NPT',
+ ens_temp=data.ensemble.temperature,
+ energy=ene,
+ kb=data.units.kb,
+ ens_press=data.ensemble.pressure,
+ volume=data.observables.volume,
+ pvconvert=pvconvert,
+ verbosity=verbosity,
+ tunit=data.units.temperature_str,
+ punit=data.units.pressure_str
+ )
+ else:
+ raise NotImplementedError('estimate_interval() not implemented for ensemble ' +
+ data.ensemble.ensemble)
+
+ return result
from .data import SimulationData
-def mb_ensemble(data, alpha=None, verbose=False,
+def mb_ensemble(data, alpha=None, verbosity=1,
screen=False, filename=None):
r"""Checks if a kinetic energy trajectory is Maxwell-Boltzmann distributed.
alpha : float, optional
If a confidence interval is given and verbose=True, the test outputs
a passed / failed message.
- verbose : bool, optional
- Print result details. Default: False.
+ verbosity : int, optional
+ Verbosity level, where 0 is quiet and 3 very chatty. Default: 1.
screen : bool, optional
Plot distributions on screen. Default: False.
filename : string, optional
data.system.nconstraints -
data.system.ndof_reduction_tra -
data.system.ndof_reduction_rot)
- return util_kin.check_mb_ensemble(kin=data.observables['kinetic_energy'],
+
+ return util_kin.check_mb_ensemble(kin=data.observables.kinetic_energy,
temp=data.ensemble.temperature,
ndof=ndof, alpha=alpha,
- kb=data.units.kb, verbose=verbose,
+ kb=data.units.kb, verbosity=verbosity,
screen=screen, filename=filename,
ene_unit=data.units.energy_str)
List of 1d arrays containing molecule indeces defining groups. Useful to pre-define
groups of molecules (e.g. solute / solvent, liquid mixture species, ...). If None,
no pre-defined molecule groups will be tested. Default: None.
- Note: If an empty 1d array is found as last element in the list, the remaining
- molecules are collected in this array. This allows, for example, to only
- specify the solute, and indicate the solvent by giving an empty array.
+
+ *Note:* If an empty 1d array is found as last element in the list, the remaining
+ molecules are collected in this array. This allows, for example, to only
+ specify the solute, and indicate the solvent by giving an empty array.
random_divisions : int, optional
Number of random division tests attempted. Default: 0 (random division tests off).
random_groups : int, optional
# Boston, MA 02110-1301 USA #
# #
###########################################################################
+
+# low-level implementations
+from . import kinetic_energy
+from . import ensemble
+from . import integrator
+
+# helper modules
+from . import trajectory
+from . import plot
+from . import error
+from . import gromacs_interface
+++ /dev/null
-###########################################################################
-# #
-# physical_validation, #
-# a python package to test the physical validity of MD results #
-# #
-# Written by Michael R. Shirts <michael.shirts@colorado.edu> #
-# Pascal T. Merz <pascal.merz@colorado.edu> #
-# #
-# Copyright (C) 2012 University of Virginia #
-# (C) 2017 University of Colorado Boulder #
-# #
-# This library is free software; you can redistribute it and/or #
-# modify it under the terms of the GNU Lesser General Public #
-# License as published by the Free Software Foundation; either #
-# version 2.1 of the License, or (at your option) any later version. #
-# #
-# This library is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
-# Lesser General Public License for more details. #
-# #
-# You should have received a copy of the GNU Lesser General Public #
-# License along with this library; if not, write to the #
-# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, #
-# Boston, MA 02110-1301 USA #
-# #
-###########################################################################
-r"""
-This file largely corresponds to the checkensemble.py code originally
-published on https://github.com/shirtsgroup/checkensemble. It now serves
-as the low-level functionality of the high-level module
-:mod:`physical_validation.ensemble`.
-"""
-from __future__ import division
-from __future__ import print_function
-import numpy
-import numpy.random
-import scipy
-import scipy.optimize
-import scipy.stats
-
-from . import plot
-
-#==========================
-# HELPER FUNCTIONS
-#=========================
-
-def check_twodtype(type): # check if it's a valid type
- if (type=='dbeta-dpressure') or (type=='dbeta-dmu') or (type=='dbeta-ddmu'):
- #print 'Warning: can\'t do 3D fits currently'
- # throw an exception?
- return False
- else:
- return True
-
-def PrepConversionFactors(eunits='kJ/mol',punits='bar',vunits='nm^3'):
-
- if (vunits == 'nm^3') and (punits == 'bar'):
- # default conversion is gromacs nm3*bar to kJ/mol
- # 1 nm3.bar = 0.00060221415 m3.bar / mol, 0.01 m3.bar/mol = 1 kJ/mol --> 6.0221415x10^-2 kJ/mol / nm3/bar
- pvconvert = 0.06221415
- elif (vunits == 'kT' and punits == 'kT'):
- pvconvert = 1
- else:
- print("I don't know the conversion factor for %s volume units and %s pressure units" % (vunits,punits))
- if (eunits == 'kJ/mol') or (eunits == 'kT'):
- econvert = 1;
- elif (eunits == 'kcal/mol'):
- pvconvert /= 4.184 # if eunits are in kcal/mol, then we need to change pconvert to kcal/mol / nm3/bar
- else:
- print("I don't know those energy units")
-
- muconvert = -1*econvert
- return econvert,pvconvert,muconvert
-
-
-def prepare_conversion_factors(units):
- # GROMACS standard units are
- # energy: kJ/mol
- # volume: nm^3
- # pressure: bar
- # => pV-term: bar * nm^3 == 1e-25 kJ == 6.022140857e-2 kJ/mol
- # => pvconvert = 6.022140857e-2
-
- econvert = 1
- pvconvert = 6.022140857e-2
-
- # UnitData stores conversion factors relative to GROMACS units
- # energy: energy_conversion * kJ/mol
- # volume: volume_conversion * nm^3
- # pressure: pressure_conversion * bar
- # => pV-term: [p]*[V] == pressure_conversion * volume_conversion bar * nm^3
- pvconvert *= units.pressure_conversion * units.volume_conversion
- pvconvert /= units.energy_conversion
-
- # TODO: check this once muVT is implemented
- muconvert = -econvert
-
- return econvert, pvconvert, muconvert
-
-
-def PrepStrings(type,vunits='kT'):
-
- if (type == 'dbeta-constV'):
- vt = 'E'
- plinfit = r'$-(\beta_2-\beta_1)E$'
- pnlfit = r'$\exp(-(\beta_2-\beta_1)E)$'
- varstring = r'$E (kT)$'
- legend_location = 'upper left'
-
- elif (type == 'dbeta-constP'):
- vt = 'H'
- plinfit = r'$-(\beta_2-\beta_1)H$'
- pnlfit = r'$\exp(-(\beta_2-\beta_1)H)$'
- varstring = r'$H (kT)$'
- legend_location = 'upper left'
-
- elif (type == 'dbeta-constmu'):
- #averages are A = <E>-mu<N>.
- vt = 'A'
- plinfit = r'$-(\beta_2-\beta_1)A$'
- pnlfit = r'$\exp(-(\beta_2-\beta_1)A)$'
- varstring = r'$A (kT)$'
- legend_location = 'upper left'
-
- elif (type == 'dbeta-constdmu'):
- #averages are <E>-dmu<N>.
- vt = '\langle E \rangle - \Delta \mu \langle N_2\rangle'
- plinfit = r'$-(\beta_2-\beta_1)A$'
- pnlfit = r'$\exp(-(\beta_2-\beta_1)A)$'
- varstring = r'$A (kT)$'
- legend_location = 'upper left'
-
- elif (type == 'dpressure-constB'):
- vt = 'V'
- plinfit = r'$-\beta(P_2-P_1)V$'
- pnlfit = r'$\exp(-\beta(P_2-P_1)V)$'
- varstring = r'$V (' + vunits + r')$'
- legend_location = 'upper right'
-
- elif (type == 'dmu-constB'):
- vt = 'N'
- plinfit = r'$\beta(\mu_2-\mu_1)N$'
- pnlfit = r'$\exp(\beta(\mu_2-\mu_1)N)$'
- varstring = r'$N (' + 'number' + r')$'
- legend_location = 'upper left'
-
- elif (type == 'ddmu-constB'):
- vt = 'N_2'
- plinfit = r'$\beta(\Delta \mu_2-\ Delta \mu_1)N_2$'
- pnlfit = r'$\exp(\beta(\Delta \mu_2-\Delta \mu_1)N)_2$'
- varstring = r'$N (' + 'number' + r')$'
- legend_location = 'upper left'
-
- elif (type == 'dbeta-dpressure') or (type == 'dbeta-dmu') or (type == 'dbeta-ddmu'):
- vt = ''
- plinfit = ''
- pnlfit = ''
- varstring = ''
- legend_location = ''
- else:
- print("Type is not defined for plotting!")
-
- pstring = 'ln(P_2(' + vt + ')/P_1(' + vt + '))'
-
- return vt,pstring,plinfit,pnlfit,varstring,legend_location
-
-
-def PrepInputs(N_k,conversions,type='dbeta-constV',beta=None,beta_ave=None,P=None,P_ave=None,mu=None,mu_ave=None,U_kn=None,V_kn=None,N_kn=None):
-
- """
- useg can be either "scale", where uncertainties are scaled, or "subsample" resulting in subsampled data.
- """
-
- pvconvert = conversions[1]
- # convenience variables
- N0 = N_k[0]
- N1 = N_k[1]
- maxN = numpy.max(N_k);
-
- # Currently seven types; fitting parameters are:
- # NVT
- # 1) free energy, dbeta, no P, N - constants are beta_ave, variables (vectors) are E
- # NPT
- # 2) free energy, dpressure - constants are p_ave, variables (vectors) are V
- # 3) free energy, dbeta, constP - constants are beta_ave, variables (vectors) are H
- # 4) free energy, dbeta, dpressure - constants are beta_ave, p_ave, variables (vectors) are E and V
- # muPT
- # 5) free energy, dmu = constants are mu_ave, variables (vectors) are N
- # 6) free energy, dbeta, constmu - constants are beta_ave, variables (vectors) are A
- # 7) free energy, dbeta, dmu - constants are beta_ave, mu_ave, variables (vectors) are E and N
- #
- # 8) free energy, ddmu = constants are dmu_ave, variables (vectors) are N_2
- # 9) free energy, dbeta, constdmu = constants are beta_ave, variables (vectors) are E-\delta\mu\N
- # 10) free energy, dbeta, ddmu - constants are beta_ave, dmu_ave, variables (vectors) are E and N_2
-
- # NVT types
- if (type == 'dbeta-constV'):
- # allocate space
- v = numpy.zeros([1,2,maxN],float) # the variables
- vr = numpy.zeros([1,2,maxN],float)
- const = numpy.zeros(1,float) # parameter constants
- dp = numpy.zeros(1,float) # "true" change in constants
-
- v[0,0,0:N0] = U_kn[0,0:N0]
- v[0,1,0:N1] = U_kn[1,0:N1]
- const[0] = 0.5*(beta[0] + beta[1])
- dp[0] = beta[0] - beta[1]
-
- # NPT types
- elif (type == 'dbeta-constP'):
- # allocate space
- v = numpy.zeros([1,2,maxN],float) # the variables
- vr = numpy.zeros([1,2,maxN],float)
- const = numpy.zeros(1,float) # parameter constants
- dp = numpy.zeros(1,float) # "true" change in constants
-
- v[0,0,0:N0] = U_kn[0,0:N0] + conversions[1]*P_ave*V_kn[0,0:N0] # everything goes into energy units
- v[0,1,0:N1] = U_kn[1,0:N1] + conversions[1]*P_ave*V_kn[1,0:N1]
- const[0] = 0.5*(beta[0] + beta[1])
- dp[0] = beta[0] - beta[1]
-
- elif (type == 'dpressure-constB'):
- # allocate space
- v = numpy.zeros([1,2,maxN],float)
- vr = numpy.zeros([1,2,maxN],float)
- const = numpy.zeros(1,float)
- dp = numpy.zeros(1,float)
-
- v[0,0,0:N0] = V_kn[0,0:N0]
- v[0,1,0:N1] = V_kn[1,0:N1]
- const[0] = 0.5*pvconvert*beta_ave*(P[0] + P[1]) # units of 1/volume
- dp[0] = pvconvert*beta_ave*(P[0] - P[1]) # units of 1/volume
-
- elif (type == 'dbeta-dpressure'):
- # allocate space
- v = numpy.zeros([2,2,maxN],float)
- vr = numpy.zeros([2,2,maxN],float)
- const = numpy.zeros(2,float)
- dp = numpy.zeros(2,float)
- v[0,0,0:N0] = U_kn[0,0:N0]
- v[0,1,0:N1] = U_kn[1,0:N1]
- v[1,0,0:N0] = V_kn[0,0:N0]
- v[1,1,0:N1] = V_kn[1,0:N1]
- const[0] = 0.5*(beta[0] + beta[1]) # units of 1/E
- const[1] = 0.5*pvconvert*(P[0] + P[1]) # units of E/V?
- dp[0] = beta[0] - beta[1] # units of 1/Energy
- dp[1] = pvconvert*(beta[0]*P[0] - beta[1]*P[1]) # units of 1/Volume
-
-# mu V T types
-
- elif (type == 'dbeta-constmu'):
- # allocate space
- v = numpy.zeros([1,2,maxN],float) # the variables
- vr = numpy.zeros([1,2,maxN],float)
- const = numpy.zeros(1,float) # parameter constants
- dp = numpy.zeros(1,float) # "true" change in constants
-
- v[0,0,0:N0] = U_kn[0,0:N0] - mu_ave*N_kn[0,0:N0] # everything goes into energy units
- v[0,1,0:N1] = U_kn[1,0:N1] - mu_ave*N_kn[1,0:N1] # everything is in energy units.
- const[0] = 0.5*(beta[0] + beta[1])
- dp[0] = beta[0] - beta[1]
-
- elif (type == 'dmu-constB'):
- # allocate space
- v = numpy.zeros([1,2,maxN],float)
- vr = numpy.zeros([1,2,maxN],float)
- const = numpy.zeros(1,float)
- dp = numpy.zeros(1,float)
-
- v[0,0,0:N0] = N_kn[0,0:N0]
- v[0,1,0:N1] = N_kn[1,0:N1]
- const[0] = -1*(beta_ave*(mu[0] + mu[1])) # units of 1/energy?
- dp[0] = -1*(beta_ave*(mu[0] - mu[1])) # units of 1/energy?
-
- elif (type == 'dbeta-dmu'):
- # allocate space
- v = numpy.zeros([2,2,maxN],float)
- vr = numpy.zeros([2,2,maxN],float)
- const = numpy.zeros(2,float)
- dp = numpy.zeros(2,float)
- v[0,0,0:N0] = U_kn[0,0:N0]
- v[0,1,0:N1] = U_kn[1,0:N1]
- v[1,0,0:N0] = N_kn[0,0:N0]
- v[1,1,0:N1] = N_kn[1,0:N1]
- const[0] = 0.5*(beta[0] + beta[1]) # units of 1/E
- const[1] = -0.5*(mu[0] + mu[1]) # units of E/V?
- dp[0] = beta[0] - beta[1] # units of 1/Energy
- dp[1] = -(beta[0]*mu[0] - beta[1]*mu[1]) # units of 1/number?
-
- else:
- print("Warning: Type of analysis %s is not defined!" % (type))
-
- return dp,const,v,vr
-
-def LogLikelihood(x,N_k,const,v):
-
- L = len(x)
-
- N0 = N_k[0]
- N1 = N_k[1]
- N = N0+N1
-
- M = numpy.log((1.0*N1)/(1.0*N0))
-
- #D0 = M + beta_ave*x[0] + U0*x[1]
- #D1 = M + beta_ave*x[0] + U1*x[1]
-
- D0 = D1 = M + const[0]*x[0]
- for i in range(L-1):
- D0 = D0 + v[i,0,0:N0]*x[i+1]
- D1 = D1 + v[i,1,0:N1]*x[i+1]
-
- # old version:
- # E0 = 1 + numpy.exp(D0)
- # E1 = 1 + numpy.exp(-D1)
- #
- # # this is the negative of the log likelihood, since we want to maximize it using fmin
- #
- # of = ((numpy.sum(numpy.log(E0)) + numpy.sum(numpy.log(E1)))) / N
-
- def log_1_plus_exp(y):
- def f(yy):
- with numpy.errstate(over='raise'):
- try:
- xx = numpy.log(1 + numpy.exp(yy))
- except FloatingPointError:
- xx = yy + numpy.log(1 + numpy.exp(-yy))
- return xx
- return numpy.vectorize(f)(y)
-
- E0 = log_1_plus_exp(D0)
- E1 = log_1_plus_exp(-D1)
-
- # this is the negative of the log likelihood, since we want to maximize it using fmin
-
- of = (numpy.sum(E0) + numpy.sum(E1)) / N
-
- return of
-
-def dLogLikelihood(x,N_k,const,v):
- """
- Derivative with respect to the parameters, to aid the minimization.
-
- """
-
- L = len(x)
-
- N0 = N_k[0]
- N1 = N_k[1]
- N = N0+N1
-
- M = numpy.log((1.0*N1)/(1.0*N0))
-
- D0 = D1 = M + const[0]*x[0]
- for i in range(L-1):
- D0 = D0 + v[i,0,0:N0]*x[i+1]
- D1 = D1 + v[i,1,0:N1]*x[i+1]
-
- # old version:
- # E0 = 1/(1 + numpy.exp(-D0))
- # E1 = 1/(1 + numpy.exp(D1))
-
- def inv_1_plus_exp(y):
- def f(yy):
- with numpy.errstate(over='raise'):
- try:
- xx = 1. / (1 + numpy.exp(yy))
- except FloatingPointError:
- xx = 0.
- return xx
- return numpy.vectorize(f)(y)
-
- E0 = inv_1_plus_exp(-D0)
- E1 = inv_1_plus_exp(D1)
-
- g = numpy.zeros(L,dtype=numpy.float64)
-
- #this is the gradient of -log likelihood
- #g[0] = (1.0/N)*(numpy.sum(beta*E0) - numpy.sum(beta*E1))
- #g[1] = (1.0/N)*(numpy.sum(U0*E0) - numpy.sum(U1*E1))
- #g[2] = (1.0/N)*(numpy.sum(V0*E0) - numpy.sum(V1*E1))
-
- g[0] = const[0]*(numpy.sum(E0) - numpy.sum(E1))
- for i in range(L-1):
- g[i+1] = numpy.sum(v[i,0,0:N0]*E0) - numpy.sum(v[i,1,0:N1]*E1)
- return (1.0/N)*g
-
-def d2LogLikelihood(x,N_k,const,v):
-
- """
-
- beta = const[0]
- pave = const[1]
-
- if D = M + beta*x[0] + x[1]*U
- I = \sum_{i=1}^N [[-beta^2/S,-beta*U/S],[-beta*U/S,-U^2/S]] where S = [(1+exp(-D))*(1+exp(D))]
-
- if D = M + beta*x[0] + x[1]*U + x[2]*V
- I = \sum_{i=1}^N [[-beta^2/S,-beta*U/S,-beta*V/S],[-beta*U/S,-U^2/S,-U*V/S],[-beta*V/S,-U*V^2/S,-V^2/S]] where S = [(1+exp(-D))*(1+exp(D))]
- """
-
- L = len(x)
-
- N0 = N_k[0]
- N1 = N_k[1]
- N = N0+N1
- M = numpy.log((1.0*N1)/(1.0*N0))
-
- vall = numpy.zeros([L-1,N],dtype=numpy.float64)
- for i in range(L-1):
- vall[i,0:N0] = v[i,0,0:N0]
- vall[i,N0:N] = v[i,1,0:N1]
-
- D = M + const[0]*x[0]
- for i in range(L-1):
- D = D + vall[i,:]*x[i+1]
-
- hf = numpy.zeros([L,L,N],dtype=numpy.float64)
-
- cones = const[0] * numpy.ones(N,dtype=numpy.float64)
-
- # fix this to match the
- for i in range(L):
- if (i == 0):
- a = cones
- else:
- a = vall[i-1,:]
- for j in range(L):
- if (j == 0):
- b = cones
- else:
- b = vall[j-1,:]
- hf[i,j,:] = a*b
-
- def log_E(y):
- def f(yy):
- with numpy.errstate(over='raise'):
- try:
- numpy.exp(yy)
- except FloatingPointError:
- return yy + numpy.log(1 + 2*numpy.exp(-yy) + numpy.exp(-2*yy))
- try:
- numpy.exp(-yy)
- except FloatingPointError:
- return -yy + numpy.log(1 + 2*numpy.exp(yy) + numpy.exp(2*yy))
- return numpy.log(2 + numpy.exp(yy) + numpy.exp(-yy))
- return numpy.vectorize(f)(y)
-
- #E = (1 + numpy.exp(-D)) * (1 + numpy.exp(D))
- hf_E = hf*numpy.exp(-log_E(D))
-
- # this is the hessian of the minimum function (not the max)
- h = -numpy.sum(hf_E,axis=2)/N
-
- return h
-
-def SolveMaxLike(x, N_k, const, v, tol = 1e-10, maxiter=20, quiet=False):
-
- converge = False
- itol = 1e-2
- rtol = 1e-2
- lasttol = 100000;
-
- for i in range(maxiter):
- lastx = x
- gx = numpy.transpose(dLogLikelihood(x,N_k,const,v))
- nh = d2LogLikelihood(x,N_k,const,v)
- dx = numpy.linalg.solve(nh,gx)
- if numpy.any(numpy.isinf(dx)):
- checktol = numpy.inf
- checkrtol = numpy.inf
- else:
- x += dx # adding, not subtracting because of the handling of negatives
- rx = dx/x
- checktol = numpy.sqrt(numpy.dot(dx,dx))
- checkrtol = numpy.sqrt(numpy.dot(rx,rx))
- if (checkrtol < tol):
- break
- converge = True
- if (checkrtol > 1.0) and (checktol > lasttol): # we are possibly diverging. Switch to cg for a bit.
- x = scipy.optimize.fmin_cg(LogLikelihood,lastx,fprime=dLogLikelihood,gtol=itol,args=(N_k,const,v),disp=(not quiet))
- itol *= rtol
- dx = x - lastx
- checktol = numpy.sqrt(numpy.dot(dx,dx))
- lasttol = checktol
-
- if (i == maxiter) and (converge == False):
- print("Too many iterations, convergence failing")
-
- return x
-
-def MaxLikeUncertain(x,N_k,const,v,vave):
-
- L = len(x)
- d = numpy.zeros(L,float)
-
- # multiply back by N, since we were dealing with smaller numbers for numerical robustness.
- fi = -(N_k[0] + N_k[1])*d2LogLikelihood(x,N_k,const,v)
-
- d2 = numpy.linalg.inv(fi)
-
- # We have a fit to the line y = m(x-Uave) + b, so to get the uncertainty in the free energy back, we need
- # to add M*Uave back to f. The uncertainty in cov(b + m*Uave,b+m*Uave) = var(b) + Uave**2*var(m) + Uave*cov(v,m)
-
- # For two dimensioms, we have the line y = m1(x1-vave1) + m2(x2-vave2) + b
- # Uncertainty will be cov(b + m1vave1 + m2vave2) = var(b) + vave1^2 var(m1) + vave2^2 var(m2)
- # + 2vave1 cov(m1,b) + 2vave2 cov(m2,b)
- # + 2vave1 cov(m1,m2)
- d[0] = const[0]**2*d2[0,0]
- for i in range(1,L):
- d[0] += vave[i-1]**2*d2[i,i] + 2*vave[i-1]*d2[0,i] # should this last one be plus or minus
- d[i] = d2[i,i]
- for j in range(i+1,L-1):
- d[0] += 2*vave[i-1]*vave[j-1]*d2[i,j]
- d = numpy.sqrt(d)
- return d
-
-def MaxLikeParams(N_k,dp,const,v,df=0,analytic_uncertainty=False,g=1,quiet=False):
-
- L = len(const)
- optimum = numpy.zeros(L+1,float)
- vave = numpy.zeros(L,dtype=numpy.float64)
- vmod = numpy.zeros([L,2,numpy.max(N_k)],dtype=numpy.float64)
- # for numerical stability, we need to translate the curve
- for i in range(L):
- vave[i] = (numpy.sum(v[i,0,0:N_k[0]]) + numpy.sum(v[i,1,0:N_k[1]]))/numpy.sum(N_k)
- vmod[i,0,0:N_k[0]] = v[i,0,0:N_k[0]] - vave[i]
- vmod[i,1,0:N_k[1]] = v[i,1,0:N_k[1]] - vave[i]
-
- xstart = numpy.zeros(L+1,float)
- for i in range(L):
- xstart[0] += vave[i]*dp[i]
- xstart[i+1] = dp[i]
- xstart[0] += df
- xstart[0] /= const[0]
-
- ofit = SolveMaxLike(xstart,N_k,const,vmod,tol=1e-10,quiet=quiet)
-
- optimum[0] = ofit[0]*const[0]
- for i in range(L):
- optimum[i+1] = ofit[i+1]
- optimum[0] -= (vave[i]*ofit[i+1])
-
- results = []
- results.append(optimum)
- if (analytic_uncertainty):
- doptimum = MaxLikeUncertain(ofit,N_k,const,vmod,vave)*numpy.sqrt(numpy.average(g))
- results.append(doptimum)
-
- return results
-
-#========================================================================================
-# Functions for computing Bennett acceptance ratio
-#==========================================================================================
-def logsum(a_n):
- """
- Compute the log of a sum of exponentiated terms exp(a_n) in a numerically-stable manner:
-
- logsum a_n = max_arg + \log \sum_{n=1}^N \exp[a_n - max_arg]
-
- where max_arg = max_n a_n. This is mathematically (but not numerically) equivalent to
-
- logsum a_n = \log \sum_{n=1}^N \exp[a_n]
-
- ARGUMENTS
- a_n (numpy array) - a_n[n] is the nth exponential argument
-
- RETURNS
- log_sum (float) - the log of the sum of exponentiated a_n, log (\sum_n exp(a_n))
-
- EXAMPLE
-
- """
-
- # Compute the maximum argument.
- max_log_term = numpy.max(a_n)
-
- # Compute the reduced terms.
- terms = numpy.exp(a_n - max_log_term)
-
- # Compute the log sum.
- log_sum = numpy.log(sum(terms)) + max_log_term
-
- return log_sum
-
-#=============================================================================================
-# Bennett acceptance ratio function to be zeroed to solve for BAR.
-#=============================================================================================
-def BARzero(w_F,w_R,DeltaF):
- """
- ARGUMENTS
- w_F (numpy.array) - w_F[t] is the forward work value from snapshot t.
- t = 0...(T_F-1) Length T_F is deduced from vector.
- w_R (numpy.array) - w_R[t] is the reverse work value from snapshot t.
- t = 0...(T_R-1) Length T_R is deduced from vector.
-
- DeltaF (float) - Our current guess
-
- RETURNS
-
- fzero - a variable that is zeroed when DeltaF satisfies BAR.
- """
-
- # Recommended stable implementation of BAR.
-
- # Determine number of forward and reverse work values provided.
- T_F = float(w_F.size) # number of forward work values
- T_R = float(w_R.size) # number of reverse work values
-
- # Compute log ratio of forward and reverse counts.
- M = numpy.log(T_F / T_R)
-
- # Compute log numerator.
- # log f(W) = - log [1 + exp((M + W - DeltaF))]
- # = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] )
- # = - maxarg - log[exp[-maxarg] + (T_F/T_R) exp[(M + W - DeltaF) - maxarg]]
- # where maxarg = max( (M + W - DeltaF) )
- exp_arg_F = (M + w_F - DeltaF)
- max_arg_F = numpy.choose(numpy.greater(0.0, exp_arg_F), (0.0, exp_arg_F))
- log_f_F = - max_arg_F - numpy.log( numpy.exp(-max_arg_F) + numpy.exp(exp_arg_F - max_arg_F) )
- log_numer = logsum(log_f_F) - numpy.log(T_F)
-
- # Compute log_denominator.
- # log_denom = log < f(-W) exp[-W] >_R
- # NOTE: log [f(-W) exp(-W)] = log f(-W) - W
- exp_arg_R = (M - w_R - DeltaF)
- max_arg_R = numpy.choose(numpy.greater(0.0, exp_arg_R), (0.0, exp_arg_R))
- log_f_R = - max_arg_R - numpy.log( numpy.exp(-max_arg_R) + numpy.exp(exp_arg_R - max_arg_R) ) - w_R
- log_denom = logsum(log_f_R) - numpy.log(T_R)
-
- # This function must be zeroed to find a root
- fzero = DeltaF - (log_denom - log_numer)
-
- return fzero
-
-def BAR(w_F, w_R, DeltaF=0.0, maximum_iterations=500, relative_tolerance=1.0e-10, verbose=False):
- """
- Compute free energy difference using the Bennett acceptance ratio (BAR) method using false position
-
- ARGUMENTS
- w_F (numpy.array) - w_F[t] is the forward work value from snapshot t.
- t = 0...(T_F-1) Length T_F is deduced from vector.
- w_R (numpy.array) - w_R[t] is the reverse work value from snapshot t.
- t = 0...(T_R-1) Length T_R is deduced from vector.
-
- OPTIONAL ARGUMENTS
-
- DeltaF (float) - DeltaF can be set to initialize the free energy difference with a guess (default 0.0)
- maximum_iterations (int) - can be set to limit the maximum number of iterations performed (default 500)
- relative_tolerance (float) - can be set to determine the relative tolerance convergence criteria (defailt 1.0e-5)
- verbose (boolean) - should be set to True if verbse debug output is desired (default False)
-
- RETURNS
-
- [DeltaF, dDeltaF] where dDeltaF is the estimated std dev uncertainty
-
- REFERENCE
-
- [1] Shirts MR, Bair E, Hooker G, and Pande VS. Equilibrium free energies from nonequilibrium
- measurements using maximum-likelihood methods. PRL 91(14):140601, 2003.
-
- EXAMPLES
-
- Compute free energy difference between two specified samples of work values.
-
- """
-
- UpperB = numpy.average(w_F)
- LowerB = -numpy.average(w_R)
-
- FUpperB = BARzero(w_F,w_R,UpperB)
- FLowerB = BARzero(w_F,w_R,LowerB)
- nfunc = 2;
-
- if (numpy.isnan(FUpperB) or numpy.isnan(FLowerB)):
- # this data set is returning NAN -- will likely not work. Return 0, print a warning:
- print("Warning: BAR is likely to be inaccurate because of poor sampling. Guessing 0.")
- return [0.0, 0.0]
-
- while FUpperB*FLowerB > 0:
- # if they have the same sign, they do not bracket. Widen the bracket until they have opposite signs.
- # There may be a better way to do this, and the above bracket should rarely fail.
- if verbose:
- print('Initial brackets did not actually bracket, widening them')
- FAve = (UpperB+LowerB)/2
- UpperB = UpperB - max(abs(UpperB-FAve),0.1)
- LowerB = LowerB + max(abs(LowerB-FAve),0.1)
- FUpperB = BARzero(w_F,w_R,UpperB)
- FLowerB = BARzero(w_F,w_R,LowerB)
- nfunc += 2
-
- # Iterate to convergence or until maximum number of iterations has been exceeded.
-
- for iteration in range(maximum_iterations):
-
- DeltaF_old = DeltaF
-
- # Predict the new value
- if (LowerB==0.0) and (UpperB==0.0):
- DeltaF = 0.0
- FNew = 0.0
- else:
- DeltaF = UpperB - FUpperB*(UpperB-LowerB)/(FUpperB-FLowerB)
- FNew = BARzero(w_F,w_R,DeltaF)
- nfunc += 1
-
- if FNew == 0:
- # Convergence is achieved.
- if verbose:
- print("Convergence achieved.")
- relative_change = 10^(-15)
- break
-
- # Check for convergence.
- if (DeltaF == 0.0):
- # The free energy difference appears to be zero -- return.
- if verbose: print("The free energy difference appears to be zero.")
- return [0.0, 0.0]
-
- relative_change = abs((DeltaF - DeltaF_old)/DeltaF)
- if verbose:
- print("relative_change = %12.3f" % relative_change)
-
- if ((iteration > 0) and (relative_change < relative_tolerance)):
- # Convergence is achieved.
- if verbose:
- print("Convergence achieved.")
- break
-
- if FUpperB*FNew < 0:
- # these two now bracket the root
- LowerB = DeltaF
- FLowerB = FNew
- elif FLowerB*FNew <= 0:
- # these two now bracket the root
- UpperB = DeltaF
- FUpperB = FNew
- else:
- message = 'WARNING: Cannot determine bound on free energy'
- raise BoundsError(message)
-
- if verbose:
- print("iteration %5d : DeltaF = %16.3f" % (iteration, DeltaF))
-
- # Report convergence, or warn user if not achieved.
- if iteration < maximum_iterations:
- if verbose:
- print('Converged to tolerance of %e in %d iterations (%d function evaluations)' % (relative_change, iteration,nfunc))
- else:
- message = 'WARNING: Did not converge to within specified tolerance. max_delta = %f, TOLERANCE = %f, MAX_ITS = %d' % (relative_change, tolerance, maximum_iterations)
- raise ConvergenceException(message)
-
- # Compute asymptotic variance estimate using Eq. 10a of Bennett, 1976 (except with n_1<f>_1^2 in
- # the second denominator, it is an error in the original
- # NOTE: The numerical stability of this computation may need to be improved.
-
- # Determine number of forward and reverse work values provided.
-
- T_F = float(w_F.size) # number of forward work values
- T_R = float(w_R.size) # number of reverse work values
-
- # Compute log ratio of forward and reverse counts.
- M = numpy.log(T_F / T_R)
-
- T_tot = T_F + T_R
-
- C = M-DeltaF
-
- fF = 1/(1+numpy.exp(w_F + C))
- fR = 1/(1+numpy.exp(w_R - C))
-
- afF2 = (numpy.average(fF))**2
- afR2 = (numpy.average(fR))**2
-
- vfF = numpy.var(fF)/T_F
- vfR = numpy.var(fR)/T_R
-
- variance = vfF/afF2 + vfR/afR2
-
- dDeltaF = numpy.sqrt(variance)
- if verbose:
- print("DeltaF = %8.3f +- %8.3f" % (DeltaF, dDeltaF))
- return (DeltaF, dDeltaF)
-
-def Print1DStats(title,type,fitvals,convert,trueslope,const,dfitvals='N/A'):
-
- # if dB, 'convert' is kB
- # if dP, 'convert' is beta*PV_convert
- # first element in fitvals is free energies df
- dfs = fitvals[0]
- # second element in fitvals is the slope
- slopes = fitvals[1]
-
- # Need to fix this so that uncertainties aren't printed when ddf is 'N/A'
-
- df = numpy.average(dfs) # true even if there is only one per slope
- if (numpy.size(dfs) > 1):
- ddf = numpy.std(dfs)
- else:
- ddf = dfitvals[0]
-
- slope = numpy.average(slopes) # true even if there is only one
- if (numpy.size(slopes) > 1):
- dslope = numpy.std(slopes)
- else:
- dslope = dfitvals[1]
-
- print("")
- print("---------------------------------------------")
- print(" %20s " % (title))
- print("---------------------------------------------")
- print(" df = %.5f +/- %.5f " % (df,ddf))
- print("---------------------------------------------")
- print(" Estimated slope vs. True slope")
- print("---------------------------------------------")
- print("%11.6f +/- %11.6f | %11.6f" % (slope, dslope, trueslope))
- print("---------------------------------------------")
-
- quant = numpy.abs((slope-trueslope)/dslope)
- print("")
- print("(That's %.2f quantiles from true slope=%5f, FYI.)" % (quant,trueslope))
- if (quant > 5):
- print(" (Ouch!)")
- else:
- print("")
-
- if (type[0:5] == 'dbeta'):
- #trueslope = B1 - B0, const = (B1 + B0)/2, B = 1/(k_B T)
- # so B0 = (const-trueslope/2), T0 = 1/(k_B*B0)
- # so B1 = (const+trueslope/2), T1 = 1/(k_B*B1)
- T0 = (convert*(const-trueslope/2))**(-1)
- T1 = (convert*(const+trueslope/2))**(-1)
-
- print("---------------------------------------------")
- print(" True dT = %7.3f, Eff. dT = %7.3f+/-%.3f" % (T0-T1, convert*T0*T1*slope,convert*dslope*T0*T1))
- print("---------------------------------------------")
-
- elif (type == 'dpressure-constB'):
- # trueslope = B*PV_conv*(P1-P0), const = B*PV_conv*(P1+P0)/2,
- # we need to convert this slope to a pressure. This should just be dividing by pvconvert*beta
- #
- print("---------------------------------------------")
- print(" True dP = %7.3f, Eff. dP = %7.3f+/-%.3f" % (-trueslope/convert, -slope/convert, numpy.abs(dslope/convert)))
- print("---------------------------------------------")
-
- elif (type == 'dmu-constB'):
- # trueslope = B*(mu1-mu0), const = B*(mu1+mu0)/2,
- # we need to convert this slope to a chemical potential. This should just be dividing by beta
- #
- print("---------------------------------------------")
- print(" True dmu = %7.3f, Eff. dmu = %7.3f+/-%.3f" % (-trueslope/convert, -slope/convert, numpy.abs(dslope/convert)))
- print("---------------------------------------------")
-
- elif (type == 'ddmu-constB'):
- # trueslope = B*(dmu1-dmu0), const = B*(dmu1+dmu0)/2,
- # we need to convert this slope to a chemical potential. This should just be dividing by beta
- #
- print("---------------------------------------------")
- print(" True ddmu = %7.3f, Eff. ddmu = %7.3f+/-%.3f" % (-trueslope/convert, -slope/convert, numpy.abs(dslope/convert)))
- print("---------------------------------------------")
-
-def Print2DStats(title,type,fitvals,kB,convertback,trueslope,const,dfitvals='N/A'):
-
- # first element in fitvals is free energies df
- dfs = fitvals[0]
- # Need to fix this so that uncertainties aren't printed when ddf is 'N/A'
- df = numpy.average(dfs) # true even if there is only one per slope
- if (numpy.size(dfs) > 1):
- ddf = numpy.std(dfs)
- else:
- ddf = dfitvals[0]
-
- slopes = []
- # second element in fitvals is the energy slope
- # third element in fitvals is the PV slope
- for i in range(2):
- slopes.append(fitvals[i+1])
-
- slope = numpy.zeros(2,float)
- dslope = numpy.zeros(2,float)
- for i in range(2):
- slope[i] = numpy.average(slopes[i]) # true even if there is only one
- if (numpy.size(slopes[i]) > 1):
- dslope[i] = numpy.std(slopes[i])
- else:
- dslope[i] = dfitvals[i+1]
-
- print("")
- print("---------------------------------------------------")
- print(" %20s " % (title))
- print("---------------------------------------------------")
- print(" df = %.5f +/- %.5f " % (df,ddf))
- for i in range(2):
- print("---------------------------------------------------")
- print(" Estimated slope[%d] vs. True slope[%d]" % (i,i))
- print("---------------------------------------------------")
- print("%11.6f +/- %11.6f | %11.6f" % (slope[i], dslope[i], trueslope[i]))
-
- quant = numpy.abs((slope[i]-trueslope[i])/dslope[i])
- print("")
- print("(That's %.2f quantiles from true slope=%5f, FYI.)" % (quant,trueslope[i]))
- if (quant > 5):
- print(" (Ouch!)")
- else:
- print("")
-
- #dp = B1 - B0, const = (B1 + B0)/2, B = 1/kbT
- # so B0 = (const[0]-trueslope[0]/2), T0 = 1/(kB*B0)
- # so B1 = (const[0]+trueslope[0]/2), T1 = 1/(kB*B1)
- T0 = (kB*(const[0]-trueslope[0]/2))**(-1)
- T1 = (kB*(const[0]+trueslope[0]/2))**(-1)
-
- print("---------------------------------------------")
- print(" True dT = %7.3f, Eff. dT = %7.3f+/-%.3f" % (T0-T1, kB*T0*T1*slope[0],kB*dslope[0]*T0*T1))
- print("---------------------------------------------")
-
- if (type == 'dbeta-dpressure'):
- text = 'dP'
- elif (type == 'dbeta-dmu'):
- text = 'dmu'
- elif (type == 'dbeta-ddmu'):
- text = 'ddmu'
-
- print("---------------------------------------------")
- print(" True %s = %7.3f, Eff. %s = %7.3f+/-%.3f" % (text,-trueslope[1]/convertback,text, -slope[1]/convertback, dslope[1]/convertback))
-
-
-def PrintPicture(xaxis,true,y,dy,fit,type,name,figname,fittype,vunits='kT',show=False):
-
- try:
- import matplotlib
- except:
- print('*************')
- print('Note: Figures not generated because matplotlib not found. ', end=' ')
- print('Please install matplotlib to allow generation of pictures')
- return
-
- import matplotlib.pyplot as plt
-
- matplotlib.rc('lines',lw=2)
- font = {'family' : 'serif',
- 'weight' : 'bold',
- 'size' : '14'}
- matplotlib.rc('font',**font)
-
- [vt,pstring,plinfit,pnlfit,varstring,legend_location] = PrepStrings(type,vunits=vunits)
-
- pstringtex = r'$\frac{P_2(' + vt + r')}{P_1(' + vt + r')}$'
- pstringlntex = r'$\ln\frac{P_2(' + vt + r')}{P_1(' + vt + r')}$'
-
- print("Now printing figure %s" % (figname))
- plt.clf()
- plt.xlabel(varstring)
- if (fittype == 'linear'):
- plt.title(vt + ' vs. log probability ratio \n for ' + name)
- plt.errorbar(xaxis,y,fmt='b-',yerr=dy,label = pstringlntex) # make this general!
- plt.errorbar(xaxis,true,fmt='k-',label = plinfit)
- plt.errorbar(xaxis,fit,fmt='r-',label = 'Fit to $y = b+aB$')
- plt.ylabel(pstringlntex)
- elif (fittype == 'nonlinear'):
- plt.title(vt + ' vs. probability ratio \n for ' + name)
- plt.errorbar(xaxis,y,fmt='b-',yerr=dy,label = pstringtex)
- plt.errorbar(xaxis,true,fmt='k-',label = pnlfit)
- plt.errorbar(xaxis,fit,fmt='r-',label = 'Fit to $y = \exp(b+aE)$')
- plt.ylabel(pstringtex)
- elif (fittype == 'maxwell'):
- # only valid for kinetic energy
- plt.title('E_kin vs. probability \n for' + name)
- plt.errorbar(xaxis,y,fmt='b-',yerr=dy,label = r'$P(E_{\mathrm{kin}})$')
- if (true is not None): # sometimes, true will be none.
- plt.errorbar(xaxis,true,fmt='k-',label = 'Fit to Analytical')
- plt.errorbar(xaxis,fit,fmt='r-',label = 'Fit to Normal')
- plt.ylabel(r'$P(E_{\mathrm{kin}})$')
- else:
- print("I'm crying foul! %s is not an allowed chart type!" % (fittype))
-
- plt.legend(loc=legend_location)
- if show:
- plt.show()
- plt.savefig(figname + '.pdf')
-
-def GenHistogramProbs(N_k,bins,v,g):
-
- K = len(N_k)
-
- hlist = []
- dhlist = []
-
- # check for zero bins
- left = 0
- right = len(bins)
- center = int(len(bins) / 2)
- for k in range(0,K):
- hstat = numpy.histogram(v[0, k, 0:N_k[k]], bins=bins)
- empty = numpy.where(hstat[0] == 0)[0]
- for e in empty:
- if e < center:
- if e >= left:
- left = e + 1
- else:
- if e < right:
- right = e
- bins = bins[left:right]
- for k in range(0,K):
- hstat = numpy.histogram(v[0,k,0:N_k[k]], bins = bins)
- h = (1.0*hstat[0])/N_k[k]
- hlist.append(h)
- dh = numpy.sqrt(g[k]*h*(1.0-h)/N_k[k])
- dhlist.append(dh)
-
- return hlist,dhlist,bins
-
-def LinFit(bins, N_k, dp, const, v, df=0,
- analytic_uncertainty=False, name="", g=(1, 1),
- screen=False, filename=None, eunits=None):
-
- [hlist,dhlist,bins] = GenHistogramProbs(N_k,bins,v,g)
-
- ratio = numpy.log(hlist[1]/hlist[0]) # this should have the proper exponential distribution
- dratio = numpy.sqrt((dhlist[0]/hlist[0])**2 + (dhlist[1]/hlist[1])**2)
-
- usedat = numpy.isfinite(ratio)
- y = ratio[usedat]
- nuse = len(y)
- weights = 1.0/dratio[usedat]
-
- xaxis = (bins[0:len(bins)-1] + bins[1:len(bins)])/2
- x = xaxis[usedat]
-
- X = numpy.ones([nuse,2],float)
- X[:,1] = x
-
- w = numpy.diag(weights)
- WX = numpy.dot(w,X)
- WY = numpy.dot(w,y)
- WXT = numpy.transpose(WX)
- Z = numpy.dot(WXT,WX)
- WXY = numpy.dot(WXT,WY)
-
- a = numpy.linalg.solve(Z,WXY)
- da_matrix = numpy.transpose(numpy.linalg.inv(Z))
- da = numpy.zeros(2,float)
- da[0] = numpy.sqrt(da_matrix[0,0])
- da[1] = numpy.sqrt(da_matrix[1,1])
-
- # the true line is y = df + dp*x, where y is ln P_1(X)/P_2(X)
-
- do_plot = screen or filename is not None
- if do_plot:
- trueslope = dp
- true = df+trueslope*xaxis
- fit = a[0] + a[1]*xaxis
-
- #PrintData(xaxis,true,fit,ratio,dratio,'linear')
-
- name = name + ' (linear)'
- #PrintPicture(xaxis,true,ratio,dratio,fit,type,name,figname,'linear',vunits)
-
- data = [{'x': xaxis,
- 'y': ratio,
- 'y_err': dratio,
- 'name': 'Simulation'},
- {'x': xaxis,
- 'y': fit,
- 'name': 'Fit to simulation'},
- {'x': xaxis,
- 'y': true,
- 'name': 'Analytical ratio'}]
-
- units = ''
- if eunits is not None:
- units = ' [' + eunits + ']'
-
- annot = None
- if analytic_uncertainty:
- annot = ('{:.1f}'.format(abs((a[1] - dp[0]) / da[1])) +
- ' quantiles')
-
- plot.plot(data,
- legend='best',
- title='Log probability ratio',
- xlabel='Energy' + units,
- ylabel=r'$\log\frac{P_2(E)}{P_1(E)}$',
- filename=filename,
- screen=screen,
- axtext=annot)
-
-
-
- results = []
- results.append(a)
- if (analytic_uncertainty):
- results.append(da)
-
- return results
-
-def SolveNonLin(f,df,a,data,ddata,xaxis,maxiter=20,tol=1e-10):
-
- K = numpy.size(a)
- usedat = numpy.isfinite(data)
- y = data[usedat]
- nuse = len(y)
- weights = 1.0/ddata[usedat]
- w = numpy.diag(weights)
- x = xaxis[usedat]
- J = numpy.zeros([nuse,K],dtype = numpy.float64)
-
- # do the newton-raphson solution
- endnext = False
- for n in range(maxiter):
-
- expt = f(a,x)
-
- J = numpy.transpose(df(a,x))
- WJ = numpy.dot(w,J)
- JTW = numpy.transpose(WJ)
- dy = y - expt
- Z = numpy.dot(JTW,WJ)
- incr_a = numpy.linalg.solve(Z,numpy.dot(JTW,dy))
- a += incr_a
- ra = incr_a/a
- chtol = numpy.sqrt(numpy.dot(ra,ra))
- if (chtol < tol):
- if (endnext == True) or (analytical_estimate == False):
- endnow == True # we put in this logic so that we calculate the matrix at the minimum
- # if we want the analytic uncertainty
- endnext = True
- if (endnow == True):
- break
-
- if (n == maxiter):
- print("Too many iterations for nonlinear least squares")
-
- da_matrix = numpy.linalg.inv(Z)
- da = numpy.zeros(K,float)
- for k in range(K):
- da[k] = numpy.sqrt(da_matrix[k,k])
-
- return a,da
-
-def ExpFit(a,x): # assume only 2D, since we are not generating histograms
- return numpy.exp(a[0] + a[1]*x)
-
-def dExpFit(a,x):
- s = a[0] + a[1]*x
- e = numpy.exp(s)
- return numpy.array([e,x*e])
-
-def NonLinFit(bins,N_k,dp,const,v,df=0,analytic_uncertainty=False,bGraph=False,name="",
- figname='nonlin_figure', tol=1e-10,g=[1,1], type = 'dbeta-constV',vunits='kT'):
-
- # nonlinear model is exp(A + B*E_i), where the i are the bin energies.
- # residuals are y_i - exp(A + B*E_i)
- # dS/dbeta_j = 2\sum_i r_i dr_i/dB_j = 0
- #
- # dr_i/dA = exp(A + B*E_i)
- # dr_i/dB = E_i*exp(A + B*E_i)
-
- [hlist,dhlist,bins] = GenHistogramProbs(N_k,bins,v,g)
-
- ratio = (hlist[1]/hlist[0]) # this should have the proper exponential distribution
- dratio = ratio*(numpy.sqrt((dhlist[0]/hlist[0])**2 + (dhlist[1]/hlist[1])**2))
-
- xaxis = (bins[0:len(bins)-1] + bins[1:len(bins)])/2
-
- # starting point for nonlinear fit
- L = numpy.size(dp)+1
- a = numpy.zeros(L)
- a[0] = df
- a[1:L] = dp[:]
-
- (a,da) = SolveNonLin(ExpFit,dExpFit,a,ratio,dratio,xaxis,tol=tol)
-
- if (bGraph):
- trueslope = dp
- true = numpy.exp(df+trueslope*xaxis)
- fit = ExpFit(a,xaxis)
-
- PrintData(xaxis,true,fit,ratio,dratio,'nonlinear')
-
- name = name + ' (nonlinear)'
- PrintPicture(xaxis,true,ratio,dratio,fit,type,name,figname,'nonlinear',vunits=vunits)
-
- results = []
- results.append(a)
- if (analytic_uncertainty):
- results.append(da)
-
- return results
-
-def MaxwellBoltzFit(bins,U,N,kT,figname,name="",ndof=None,g=1):
-
- # generate histogram
- hstat = numpy.histogram(U, bins = bins)
- # normalize the histogram
- h = (1.0*hstat[0])/N
- # compute the error bars
- dh = numpy.sqrt(g*h*(1.0-h)/N)
- xaxis = (bins[0:len(bins)-1] + bins[1:len(bins)])/2
-
- # we assume we have the correct mean for now, since presumably the temperature works
- mean = numpy.mean(U)
- std_fit = numpy.std(U)
- std_true = numpy.sqrt(mean*kT)
- if (mean > 50*kT): #if too big, we use a normal distribution -- we'll use limit of 50 DOF as suggest (by Wikipedia!)
- # note that for a normal distribution, the sample mean and standard deviation give the maximum likelihood information.
- fit = numpy.exp(-(xaxis-mean)**2/(2*std_fit**2))/(numpy.sqrt(2*numpy.pi*std_fit**2))
- true = numpy.exp(-(xaxis-mean)**2/(2*std_true**2))/(numpy.sqrt(2*numpy.pi*std_true**2))
- # check this with paper?
- else:
- # should be a gamma distribution; no std fit
- fit = (xaxis/kT)**(mean/kT-1)*numpy.exp(-xaxis/kT)/(kT*scipy.special.gamma(mean/kT))
- if (ndof is not None):
- mean_true = 0.5*ndof*kT
- true = (xaxis/kT)**(mean_true/kT-1)*numpy.exp(-xaxis/kT)/(kT*scipy.special.gamma(mean/kT))
- else:
- true = None # unless we know the number of DOF, we don't know the true distribution:
-
- print("--- Kinetic energy analysis ---")
- print("")
- print("kT = %10.4f" % (kT))
- if (ndof is None):
- print("Effective # of DOF = %10.4f" % (2*mean/kT))
- else:
- print("Reported # of DOF = %10.4f" % ndof)
- if (mean > 25*kT):
- "Approximating the Maxwell-Boltzmann with a normal distribution, as # DOF > 50"
- print("Direct Std = %10.4f, Std from sqrt(U*kT) = %10.4f" % (std_fit,std_true))
- print("")
-
- # name = name + str
- # normalize histogram and error bars
- width = bins[1]-bins[0] # assumes equal spacing (currently true)
- h /= width
- dh /= width
- PrintPicture(xaxis,true,h,dh,fit,'dbeta-constV',name,figname,'maxwell')
-
-def PrintData(xaxis,true,fit,collected,dcollected,type):
-
- if (type == 'linear'):
- print("---- Linear Fit ----")
- elif (type == 'nonlinear'):
- print("---- Nonlinear Fit ----")
- elif (type == 'maxwell'):
- print("---- fit to Maxwell-Boltzmann ----")
- else:
- quit("Sorry, no allowed type of fit (%s) specified!" % (type))
-
- print(" X True Observed Error d(true/obs) sig(true/obs) Fit ")
- print("---------------------------------------------------------------------------------------")
- for i in range(len(collected)):
- diff = collected[i]-true[i]
- sig = numpy.abs(collected[i]-true[i])/dcollected[i]
- print("%10.3f %10.3f %10.3f %10.3f %10.3f %10.3f %10.3f" % (xaxis[i],true[i],collected[i],dcollected[i],diff,sig,fit[i]))
-
-
-def ProbabilityAnalysis(N_k, type='dbeta-constV',
- T_k=None, P_k=None, mu_k=None, U_kn=None, V_kn=None, N_kn=None,
- kB=0.0083144624, nbins=40,
- bMaxLikelihood=True, bLinearFit=True, bNonLinearFit=True, reptype=None, nboots=200,
- g=[1,1], reps=None, cuttails=0.001, bMaxwell=False,
- units=None, seed=None,
- screen=False, filename=None, quiet=False):
-
- K = len(N_k) # should be 2 pretty much always . . .
-
- # decide if we are printing figures:
- do_plot = screen or filename is not None
-
- # get correct conversion terms between different units.
- conversions = 1, 1, 1
- if units is not None:
- conversions = prepare_conversion_factors(units)
-
- if (seed):
- numpy.random.seed(seed) # so there is the ability to make the RNG repeatable
- if not quiet:
- print("setting random number seed for bootstrapping %d" % (seed))
- # initialize constant terms
- beta_ave = None
- P_ave = None
- mu_ave = None
-
- if (T_k is None):
- T_k = numpy.zeros(2,float)
- else:
- beta_k = (1.0/(kB*T_k))
- beta_ave = numpy.average(beta_k)
-
- if (P_k is None):
- P_k = numpy.zeros(2,float)
- else:
- P_ave = numpy.average(P_k)
-
- if (mu_k is None):
- mu_k = numpy.zeros(2,float)
- else:
- mu_ave = numpy.average(mu_k)
-
- # prepare the variables we are going to work with
- [dp,const,v,vr] = PrepInputs(N_k,conversions,type,beta_k,beta_ave,P_k,P_ave,mu_k,mu_ave,U_kn,V_kn,N_kn)
- [vt,pstring,plinfit,pnlfit,varstring,legend_location] = PrepStrings(type)
-
- if (check_twodtype(type)): # if it's 2D, we can graph, otherwise, there is too much histogram error
-
- # determine the bin widths
- maxk = numpy.zeros(K,float)
- mink = numpy.zeros(K,float)
-
- # cuttails indicates how many we leave out on each tail
- # for now, we choose the range that cuts 0.1% from the tails of the smallest distribution.
- prange = cuttails
-
- for k in range(K):
- maxk[k] = scipy.stats.scoreatpercentile(v[0,k,0:N_k[k]],100*(1-prange))
- mink[k] = scipy.stats.scoreatpercentile(v[0,k,0:N_k[k]],100*(prange))
-
- binmax = numpy.min(maxk)
- binmin = numpy.max(mink)
-
- if (binmax < binmin):
- quit("\nThere is no overlap between the two ensembles; high distribution min is %f and low distribution max is %f; quitting!" %(binmin,binmax));
-
- if (type == 'dmu-constB'): # special code for N, since it's discrete
- # if the spread is greater than
- if ((binmax-binmin) < nbins):
- bins = numpy.arange(binmin-0.5,binmax+0.6,1)
- nbins = len(bins)
- else:
- print("Warning: since the range of particle number is greater than the ", end=' ')
- print("number of bins specified, particle number is not discrete for ", end=' ')
- print("methods using histograms. Set nbins larger (using --nbins) to ")
- print("obtain discrete N distributions")
- bins = numpy.zeros(nbins+1,float)
- for i in range(nbins+1):
- bins[i] = binmin + (binmax-binmin)*(i/(1.0*nbins))
- else:
- bins = numpy.zeros(nbins+1,float)
- for i in range(nbins+1):
- bins[i] = binmin + (binmax-binmin)*(i/(1.0*nbins))
-
- #===================================================================================================
- # Calculate free energies with different methods
- #===================================================================================================
-
- if (type == 'dbeta-dpressure') or (type == 'dbeta-dmu'):
- if (dp[0] == 0):
- quit("Warning: two input temperatures are equal, can't do joint variable fit!");
- if (dp[1] == 0):
- if (type == 'dbeta-dpressure'):
- quit("Warning: two input pressures are equal, can't do joint E,V fit!");
- elif (type == 'dbeta-dmu'):
- quit("Warning: two input chemical potentials are equal, can't do joint E,N fit!");
- else:
- trueslope = dp
- if not quiet:
- print("True slope of %s should be %.8f" % (pstring,trueslope))
-
- if type == 'dpressure-constB' or type == 'dbeta-dpressure':
- convertback = beta_ave*conversions[1] # this is the pv component
- elif type == 'dmu-constB' or type == 'dbeta-dmu':
- convertback = -1*kB
- else:
- convertback = kB
-
- w_F = (beta_k[1]-beta_k[0])*U_kn[0,0:N_k[0]]
- w_R = -((beta_k[1]-beta_k[0])*U_kn[1,0:N_k[1]])
-
- if (type == 'dbeta-constP') or (type == 'dpressure-constB') or (type == 'dbeta-dpressure'):
- w_F += conversions[1]*(beta_k[1]*P_k[1]-beta_k[0]*P_k[0])*V_kn[0,0:N_k[0]]
- w_R += -conversions[1]*(beta_k[1]*P_k[1]-beta_k[0]*P_k[0])*V_kn[1,0:N_k[1]]
-
- if (type == 'dbeta-constmu') or (type == 'dmu-constB') or (type == 'dbeta-dmu'):
- w_F += -(beta_k[1]*mu_k[1]-beta_k[0]*mu_k[0])*N_kn[0,0:N_k[0]]
- w_R += (beta_k[1]*mu_k[1]-beta_k[0]*mu_k[0])*N_kn[1,0:N_k[1]]
- # it's not entirely clear if this right because of lack of overlap in phase space between different N's!
-
- if not quiet:
- print("Now computing log of partition functions using BAR")
-
- (df,ddf) = BAR(w_F,w_R)
-
- if not quiet:
- print("using %.5f for log of partition functions computed from BAR" % (df))
- print("Uncertainty in quantity is %.5f" % (ddf))
- print("Assuming this is negligible compared to sampling error at individual points")
-
- if (bMaxwell): # only applies for kinetic energies
- if not quiet:
- print("Now fitting to a Maxwell-Boltzmann distribution")
- for k in range(2):
- fn = filename + '_maxboltz' + str(T_k[k])
- MaxwellBoltzFit(bins,U_kn[k,0:N_k[k]],N_k[k],kB*T_k[k],fn)
-
- quant = {}
-
- if (bLinearFit and check_twodtype(type)):
- if not quiet:
- print("Now computing the linear fit parameters")
- if filename is not None:
- fn = filename + '_linear'
- else:
- fn = None
- (fitvals,dfitvals) = LinFit(bins, N_k, dp, const, v, df=df,
- analytic_uncertainty=True, name='', g=g,
- filename=fn, screen=screen)
- slope = fitvals[1]
- dslope = dfitvals[1]
- quant['linear'] = [abs((slope - dp[0])/dslope)]
- if not quiet:
- Print1DStats('Linear Fit Analysis (analytical error)',type,fitvals,convertback,dp,const,dfitvals=dfitvals)
-
- if (bNonLinearFit and check_twodtype(type)):
- if not quiet:
- print("Now computing the nonlinear fit parameters")
- fn = filename + '_nonlinear'
- (fitvals,dfitvals) = NonLinFit(bins,N_k,dp,const,v,df=df,name='',figname=fn,bGraph=do_plot,analytic_uncertainty=True,g=g,type=type,vunits=vunits)
- slope = fitvals[1]
- dslope = dfitvals[1]
- quant['nonlinear'] = [abs((slope - dp[0])/dslope)]
- if not quiet:
- Print1DStats('Nonlinear Fit Analysis (analytical error)',type,fitvals,convertback,dp,const,dfitvals=dfitvals)
-
- if (bMaxLikelihood):
- if not quiet:
- print("Now computing the maximum likelihood parameters")
- (fitvals,dfitvals) = MaxLikeParams(N_k,dp,const,v,df=df,analytic_uncertainty=True,g=numpy.average(g),quiet=quiet)
- if (check_twodtype(type)):
- if not quiet:
- Print1DStats('Maximum Likelihood Analysis (analytical error)',type,fitvals,convertback,dp,const,dfitvals=dfitvals)
- slope = fitvals[1]
- dslope = dfitvals[1]
- quant['maxLikelihood'] = [abs((slope - dp[0])/dslope)]
- else:
- if not quiet:
- Print2DStats('2D-Maximum Likelihood Analysis (analytical error)',type,fitvals,kB,convertback,dp,const,dfitvals=dfitvals)
- eneslope = fitvals[1]
- pvslope = fitvals[2]
- deneslope = dfitvals[1]
- dpvslope = dfitvals[2]
- quant['maxLikelihood'] = [abs((eneslope - dp[0])/deneslope),
- abs((pvslope - dp[1]) / dpvslope)]
-
- returnvalue = None
- if bMaxLikelihood:
- returnvalue = quant['maxLikelihood']
- elif bLinearFit:
- returnvalue = quant['linear']
- elif bNonLinearFit:
- returnvalue = quant['nonlinear']
-
- if (reptype is None):
- return returnvalue
-
- if (reptype == 'bootstrap'):
- nreps = nboots
- if (nreps < 50):
- if (nreps > 1):
- print("Warning, less than 50 bootstraps (%d requested) is likely not a good statistical idea" % (nreps))
- else:
- print("Cannot provide bootstrap statisics, only %d requested" % (nreps))
- return returnvalue
-
- print("Now bootstrapping (n=%d) for uncertainties . . . could take a bit of time!" % (nreps))
- elif (reptype == 'independent'):
- nreps = len(reps)
- print("Now analyzing %d independent samples . . . could take a bit of time!" % (nreps))
- else:
- quit("Don't understand reptype = %s; quitting" % (reptype))
-
- if check_twodtype(type): # how many values do we have to deal with?
- rval = 2
- else:
- rval = 3
-
- linvals = numpy.zeros([rval,nreps],float)
- nlvals = numpy.zeros([rval,nreps],float)
- mlvals = numpy.zeros([rval,nreps],float)
-
- for n in range(nreps):
- if n % 10 == 0 and not quiet:
- print("Finished %d samples . . ." % (n))
-
- if (reptype == 'bootstrap'):
- for k in range(K):
- if ((g is None) or (g[0]==1 and g[1]==1)):
- #do normal bootstrap
- rindex = numpy.random.randint(0,high=N_k[k],size=N_k[k]); # bootstrap it
- for i in range(len(const)):
- vr[i,k,0:N_k[k]] = v[i,k,rindex]
- else:
- # we are given correlation times. Do block bootstrapping.
- gk = int(numpy.ceil(g[k]))
- nblocks = int(numpy.floor(N_k[k]/gk))
- # moving blocks bootstrap; all contiguous segments of length gk
-
- rindex = numpy.random.randint(0,high=gk*(nblocks-1),size=nblocks);
- for nb in range(nblocks):
- for i in range(len(const)):
- vr[i,k,nb*gk:(nb+1)*gk] = v[i,k,rindex[nb]:rindex[nb]+gk]
- N_k[k] = nblocks*gk # we could have a few samples less now
-
- if (reptype == 'independent'):
- for k in range(K):
- for i in range(len(const)):
- vr[i,k,0:N_k[k]] = reps[n][i][k,0:N_k[k]]
-
- if (bLinearFit and check_twodtype(type)):
- fitvals = LinFit(bins,N_k,dp,const,vr)
- for i in range(rval):
- linvals[i,n] = fitvals[0][i]
-
- if (bNonLinearFit and check_twodtype(type)):
- fitvals = NonLinFit(bins,N_k,dp,const,vr,df=df)
- for i in range(rval):
- nlvals[i,n] = fitvals[0][i]
-
- if (bMaxLikelihood):
- fitvals = MaxLikeParams(N_k,dp,const,vr,df=df,quiet=quiet)
- for i in range(rval):
- mlvals[i,n] = fitvals[0][i]
-
- if (bLinearFit and check_twodtype(type)):
- if not quiet:
- Print1DStats('Linear Fit Analysis',type,[linvals[0],linvals[1]],convertback,dp,const)
- slopes = linvals[1]
- slope = numpy.average(slopes)
- dslope = numpy.std(slopes)
- quant['linear'] = [abs((slope - dp[0])/dslope)]
-
- if (bNonLinearFit and check_twodtype(type)):
- if not quiet:
- Print1DStats('Nonlinear Fit Analysis',type,[nlvals[0],nlvals[1]],convertback,dp,const)
- slopes = nlvals[1]
- slope = numpy.average(slopes)
- dslope = numpy.std(slopes)
- quant['nonlinear'] = [abs((slope - dp[0]) / dslope)]
-
- if (bMaxLikelihood):
- if check_twodtype(type):
- if not quiet:
- Print1DStats('Maximum Likelihood Analysis',type,[mlvals[0],mlvals[1]],convertback,dp,const)
- slopes = nlvals[1]
- slope = numpy.average(slopes)
- dslope = numpy.std(slopes)
- quant['maxLikelihood'] = [abs((slope - dp[0]) / dslope)]
- else:
- if not quiet:
- Print2DStats('2D-Maximum Likelihood Analysis',type,[mlvals[0],mlvals[1],mlvals[2]],kB,convertback,dp,const)
- eneslopes = mlvals[1]
- pvslopes = mlvals[2]
- eneslope = numpy.average(eneslopes)
- deneslope = numpy.average(deneslopes)
- pvslope = numpy.average(pvslopes)
- dpvslope = numpy.average(dpvslopes)
- quant['maxLikelihood'] = [abs((eneslope - dp[0]) / deneslope),
- abs((pvslope - dp[1]) / dpvslope)]
-
- returnvalue = None
- if bMaxLikelihood:
- returnvalue = quant['maxLikelihood']
- elif bLinearFit:
- returnvalue = quant['linear']
- elif bNonLinearFit:
- returnvalue = quant['nonlinear']
-
- return returnvalue
-
-# to do: fix the drawing directions so that correct data has the legend in the right place.
--- /dev/null
+###########################################################################
+# #
+# physical_validation, #
+# a python package to test the physical validity of MD results #
+# #
+# Written by Michael R. Shirts <michael.shirts@colorado.edu> #
+# Pascal T. Merz <pascal.merz@colorado.edu> #
+# #
+# Copyright (C) 2012 University of Virginia #
+# (C) 2017 University of Colorado Boulder #
+# #
+# This library is free software; you can redistribute it and/or #
+# modify it under the terms of the GNU Lesser General Public #
+# License as published by the Free Software Foundation; either #
+# version 2.1 of the License, or (at your option) any later version. #
+# #
+# This library is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
+# Lesser General Public License for more details. #
+# #
+# You should have received a copy of the GNU Lesser General Public #
+# License along with this library; if not, write to the #
+# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, #
+# Boston, MA 02110-1301 USA #
+# #
+###########################################################################
+r"""
+This file reimplements most functionality of the checkensemble.py code
+originally published on https://github.com/shirtsgroup/checkensemble. It
+serves as the low-level functionality of the high-level module
+:mod:`physical_validation.ensemble`.
+"""
+from __future__ import division
+import numpy as np
+import scipy.optimize
+
+import pymbar
+
+from . import trajectory
+from . import error as pv_error
+from . import plot
+
+
+def generate_histograms(traj1, traj2, g1, g2, bins):
+
+ n1 = np.size(traj1)
+ n2 = np.size(traj2)
+
+ h1 = np.histogram(traj1, bins=bins)[0]/n1
+ h2 = np.histogram(traj2, bins=bins)[0]/n2
+ dh1 = np.sqrt(g1 * h1 * (1 - h1) / n1)
+ dh2 = np.sqrt(g2 * h2 * (1 - h2) / n2)
+
+ return h1, h2, dh1, dh2
+
+
+def do_linear_fit(traj1, traj2, g1, g2, bins,
+ screen=False, filename=None,
+ trueslope=0.0, trueoffset=0.0,
+ units=None):
+
+ h1, h2, dh1, dh2 = generate_histograms(traj1, traj2, g1, g2, bins)
+
+ # v copied from checkensemble.py v
+ ratio = np.log(h2 / h1)
+ dratio = np.sqrt((dh1/h1)**2 + (dh2/h2)**2)
+
+ usedat = np.isfinite(ratio)
+ y = ratio[usedat]
+ nuse = len(y)
+ weights = 1.0/dratio[usedat]
+
+ xaxis = (bins[:-1] + bins[1:])/2
+ x = xaxis[usedat]
+
+ x_mat = np.ones([nuse, 2])
+ x_mat[:, 1] = x
+
+ w = np.diag(weights)
+ wx = np.dot(w, x_mat)
+ wy = np.dot(w, y)
+ wx_t = np.transpose(wx)
+ z = np.dot(wx_t, wx)
+ wxy = np.dot(wx_t, wy)
+
+ a = np.linalg.solve(z, wxy)
+ da_matrix = np.transpose(np.linalg.inv(z))
+ da = np.zeros(2)
+ da[0] = np.sqrt(da_matrix[0, 0])
+ da[1] = np.sqrt(da_matrix[1, 1])
+
+ # the true line is y = df + dp*x, where y is ln P_1(X)/P_2(X)
+ # ^ end copied from checkensemble.py ^
+
+ do_plot = screen or filename is not None
+ if do_plot:
+ true = trueoffset+trueslope*xaxis
+ fit = a[0] + a[1]*xaxis
+
+ data = [{'x': xaxis,
+ 'y': ratio,
+ 'y_err': dratio,
+ 'name': 'Simulation'},
+ {'x': xaxis,
+ 'y': fit,
+ 'name': 'Fit to simulation'},
+ {'x': xaxis,
+ 'y': true,
+ 'name': 'Analytical ratio'}]
+
+ if units is not None:
+ units = ' [' + units + ']'
+ else:
+ units = ''
+
+ annot = ('{:.1f}'.format(abs((a[1] - trueslope) / da[1])) +
+ ' quantiles')
+
+ plot.plot(data,
+ legend='best',
+ title='Log probability ratio',
+ xlabel='Energy' + units,
+ ylabel=r'$\log\frac{P_2(E)}{P_1(E)}$',
+ filename=filename,
+ screen=screen,
+ axtext=annot)
+
+ return a, da
+
+
+def do_max_likelihood_fit(traj1, traj2, g1, g2,
+ init_params=None,
+ verbose=False):
+
+ # ============================================================= #
+ # Define (negative) log-likelihood function and its derivatives #
+ # ============================================================= #
+ def log_likelihood(a, ene1, ene2):
+ # Returns negative of eq (8) of check_ensemble paper
+ #
+ # Uses log (1/f(x)) == -log(f(x))
+ # and log(1 + e^x) == log(e^x (e^-x + 1)) == x + log(1 + e^-x)
+ # ^(a) ^(b)
+ # form (a) -> 0 for x->-inf, -> inf for x->inf
+ # form (b) -> NaN for x->-inf, -> x for x->inf
+ # combined: -> 0 for x-> -inf, -> x for x-> inf
+ def log_1_plus_exp(y):
+ def f(yy):
+ with np.errstate(over='raise'):
+ try:
+ xx = np.log(1 + np.exp(yy))
+ except FloatingPointError:
+ xx = yy + np.log(1 + np.exp(-yy))
+ return xx
+ return np.vectorize(f)(y)
+
+ if a.size == 2:
+ return (np.sum(log_1_plus_exp(a[0] + a[1]*ene1)) +
+ np.sum(log_1_plus_exp(-a[0] - a[1]*ene2)))
+ else:
+ return (np.sum(log_1_plus_exp(a[0] + a[1]*ene1[0] + a[2]*ene1[1])) +
+ np.sum(log_1_plus_exp(-a[0] - a[1]*ene2[0] - a[2]*ene2[1])))
+
+ def da_log_likelihood(a, ene1, ene2):
+ # Returns the first derivative wrt the parameters a of log_likelihood
+ #
+ # d/da0 log(1 + exp(a0 + a1*E)) == exp(a0 + a1*E) / (1 + exp(a0 + a1*E))
+ # == 1 / (1 + exp(-a0 - a1*E))
+ # d/da1 log(1 + exp(a0 + a1*E)) == E * exp(a0 + a1*E) / (1 + exp(a0 + a1*E))
+ # == E / (1 + exp(-a0 - a1*E))
+ def inv_1_plus_exp(y):
+ def f(yy):
+ with np.errstate(over='raise'):
+ try:
+ xx = 1. / (1 + np.exp(yy))
+ except FloatingPointError:
+ xx = 0.
+ return xx
+ return np.vectorize(f)(y)
+
+ if a.size == 2:
+ d = np.zeros(2)
+ d[0] = (np.sum(inv_1_plus_exp(-a[0] - a[1]*ene1)) -
+ np.sum(inv_1_plus_exp(a[0] + a[1]*ene2)))
+ d[1] = (np.sum(inv_1_plus_exp(-a[0] - a[1]*ene1) * ene1) -
+ np.sum(inv_1_plus_exp(a[0] + a[1]*ene2) * ene2))
+ else:
+ d = np.zeros(3)
+ d[0] = (np.sum(inv_1_plus_exp(-a[0] - a[1]*ene1[0] - a[2]*ene1[1])) -
+ np.sum(inv_1_plus_exp(a[0] + a[1]*ene2[0] + a[2]*ene2[1])))
+ d[1] = (np.sum(inv_1_plus_exp(-a[0] - a[1]*ene1[0] - a[2]*ene1[1]) * ene1[0]) -
+ np.sum(inv_1_plus_exp(a[0] + a[1]*ene2[0] + a[2]*ene2[1]) * ene2[0]))
+ d[2] = (np.sum(inv_1_plus_exp(-a[0] - a[1]*ene1[0] - a[2]*ene1[1]) * ene1[1]) -
+ np.sum(inv_1_plus_exp(a[0] + a[1]*ene2[0] + a[2]*ene2[1]) * ene2[1]))
+
+ return d
+
+ def hess_log_likelihood(a, ene1, ene2):
+ # Returns the hessian wrt the parameters a of log_likelihood
+ # fac1 = 1 / (2 + 2*cosh(a0 + a1*ene1))
+ # h1 = [[ fac1, ene1*fac1 ],
+ # [ ene1*fac1, ene1**2*fac1 ]]
+ # fac2 = 1 / (2 + 2*cosh(a0 + a1*ene2))
+ # h2 = [[ fac2, ene2*fac2 ],
+ # [ ene2*fac2, ene2**2*fac2 ]]
+ # h = h1 + h2
+
+ if a.size == 2:
+ fac1 = 1 / (2 + 2*np.cosh(a[0] + a[1]*ene1))
+ fac2 = 1 / (2 + 2*np.cosh(a[0] + a[1]*ene2))
+
+ h = np.zeros((2, 2))
+
+ h[0, 0] = np.sum(fac1) + np.sum(fac2)
+ h[0, 1] = h[1, 0] = np.sum(ene1 * fac1) + np.sum(ene2 * fac2)
+ h[1, 1] = np.sum(ene1 * ene1 * fac1) + np.sum(ene2 * ene2 * fac2)
+
+ else:
+ fac1 = 1 / (2 + 2*np.cosh(a[0] + a[1]*ene1[0] + a[2]*ene1[1]))
+ fac2 = 1 / (2 + 2*np.cosh(a[0] + a[1]*ene2[0] + a[2]*ene2[1]))
+
+ h = np.zeros((3, 3))
+
+ h[0, 0] = np.sum(fac1) + np.sum(fac2)
+ h[1, 1] = np.sum(ene1[0] * ene1[0] * fac1) + np.sum(ene2[0] * ene2[0] * fac2)
+ h[2, 2] = np.sum(ene1[1] * ene1[1] * fac1) + np.sum(ene2[1] * ene2[1] * fac2)
+
+ h[0, 1] = h[1, 0] = np.sum(ene1[0] * fac1) + np.sum(ene2[0] * fac2)
+ h[0, 2] = h[2, 0] = np.sum(ene1[1] * fac1) + np.sum(ene2[1] * fac2)
+ h[1, 2] = h[2, 1] = (np.sum(ene1[0] * ene1[1] * fac1) +
+ np.sum(ene2[0] * ene2[1] * fac2))
+
+ return h
+
+ # ==================================================== #
+ # Minimize the negative of the log likelihood function #
+ # ==================================================== #
+ if init_params is None:
+ init_params = np.zeros(traj1.ndim + 1)
+ else:
+ init_params = np.array(init_params)
+
+ min_res = scipy.optimize.minimize(
+ log_likelihood,
+ x0=init_params,
+ args=(traj1, traj2),
+ method='dogleg',
+ jac=da_log_likelihood,
+ hess=hess_log_likelihood
+ )
+
+ # fallback options
+ if not min_res.success:
+ if verbose:
+ print('Note: Max-Likelihood minimization failed using \'dogleg\' method. '
+ 'Trying to vary initial parameters.')
+ min_res_1 = scipy.optimize.minimize(
+ log_likelihood,
+ x0=init_params * 0.9,
+ args=(traj1, traj2),
+ method='dogleg',
+ jac=da_log_likelihood,
+ hess=hess_log_likelihood
+ )
+ min_res_2 = scipy.optimize.minimize(
+ log_likelihood,
+ x0=init_params * 1.1,
+ args=(traj1, traj2),
+ method='dogleg',
+ jac=da_log_likelihood,
+ hess=hess_log_likelihood
+ )
+ if min_res_1.success and min_res_2.success and np.allclose(min_res_1.x, min_res_2.x):
+ min_res = min_res_1
+
+ if not min_res.success:
+ # dogleg was unsuccessful using alternative starting point
+ if verbose:
+ print('Note: Max-Likelihood minimization failed using \'dogleg\' method. '
+ 'Trying method \'nelder-mead\'.')
+ min_res = scipy.optimize.minimize(
+ log_likelihood,
+ x0=init_params * 0.9,
+ args=(traj1, traj2),
+ method='nelder-mead'
+ )
+
+ if not min_res.success:
+ raise RuntimeError('MaxLikelihood: Unable to minimize function.')
+
+ final_params = min_res.x
+
+ # ======================= #
+ # Calculate uncertainties #
+ # ======================= #
+ cov = np.linalg.inv(hess_log_likelihood(final_params, traj1, traj2))
+ final_error = np.sqrt(np.diag(cov))*np.sqrt(np.average([g1, g2]))
+
+ return final_params, final_error
+
+
+def check_bins(traj1, traj2, bins):
+ # check for empty bins
+ h1, _ = np.histogram(traj1, bins=bins)
+ h2, _ = np.histogram(traj2, bins=bins)
+ empty = np.where((h1 == 0) | (h2 == 0))[0]
+
+ if np.size(empty) == 0:
+ return bins
+ elif np.size(empty) == 1:
+ empty = empty[0]
+ if empty > np.size(bins) / 2:
+ return bins[:empty]
+ else:
+ return bins[empty+1:]
+ else:
+ # find longest non-empty interval
+ empty = np.insert(np.append(empty, [40]), 0, [-1])
+ max_interval = np.argmax(empty[1:] - empty[:-1])
+ left = empty[max_interval] + 1
+ right = empty[max_interval + 1]
+ return bins[left:right]
+
+
+def print_stats(title,
+ fitvals, dfitvals,
+ kb, param1, param2, trueslope,
+ temp=None, pvconvert=None,
+ dtemp=False, dpress=False, dmu=False,
+ dtempdpress=False, dtempdmu=False):
+
+ # if simple 1d:
+ # fitvals = [df, slope]
+ # dfitvals = [ddf, dslope]
+ # if simple 2d:
+ # fitvals = [df, slope0, slope1]
+ # dfitvals = [ddf, dslope0, dslope1]
+ # if bootstrapped 1d:
+ # fitvals = [[df, slope], [df, slope], ...]
+ # dfitvals = None
+ # if bootstrapped 2d:
+ # fitvals = [[df, slope0, slope1], [df, slope0, slope1], ...]
+ # dfitvals = None
+ if fitvals.ndim > 1:
+ dfitvals = np.std(fitvals, axis=0)
+ fitvals = np.average(fitvals, axis=0)
+
+ if np.ndim(trueslope) == 0:
+ trueslopes = np.array([trueslope])
+ else:
+ trueslopes = trueslope
+
+ free_energy = fitvals[0]
+ slopes = fitvals[1:]
+ dfree_energy = dfitvals[0]
+ dslopes = dfitvals[1:]
+
+ print('='*50)
+ print(title)
+ print('='*50)
+ print('Free energy')
+ print(' {:.5f} +/- {:.5f}'.format(free_energy, dfree_energy))
+ print('{:27s} | {:s}'.format('Estimated slope', 'True slope'))
+ for slope, dslope, trueslope in zip(slopes, dslopes, trueslopes):
+ print(' {:<9.6f} +/- {:<9.6f} | {:<9.6f}'.format(slope, dslope, trueslope))
+ quant = np.abs((slope-trueslope)/dslope)
+ print(' ({:.2f} quantiles from true slope)'.format(quant))
+
+ if dtemp or dtempdpress or dtempdmu:
+ # slope is estimated beta2 - beta1
+ # kb * slope == 1/T1' - 1/T2' == (T2' - T1')/(T1'*T2')
+ # So we'll assume dT' == T1' - T2' ~= kb * slope * T1*T2
+ slope = slopes[0]
+ dslope = dslopes[0]
+ if dtemp:
+ t1 = param1
+ t2 = param2
+ else:
+ t1 = param1[0]
+ t2 = param2[0]
+ print('{:27s} | {:s}'.format('Estimated dT', 'True dT'))
+ print(' {:<6.1f} +/- {:<6.1f} | {:<6.1f}'.format(
+ kb * slope * t1 * t2,
+ kb * dslope * t1 * t2,
+ t2 - t1
+ ))
+ if dpress or dtempdpress:
+ # slope is estimated (P1 - P2)/beta*pvconvert (1d), or
+ # (P1/b1 - P2/b2)*pvconvert (2d)
+ if temp is None and dtempdpress:
+ temp = .5*(param1[0] + param2[0])
+ if dpress:
+ press = -slopes[0] * (kb*temp) / pvconvert
+ ddpress = -dslopes[0] * (kb*temp) / pvconvert
+ truepress = -trueslopes[0] * (kb*temp) / pvconvert
+ else:
+ press = -slopes[1] * (kb*temp) / pvconvert
+ ddpress = -dslopes[1] * (kb*temp) / pvconvert
+ truepress = -trueslopes[1] * (kb*temp) / pvconvert
+ print('{:27s} | {:s}'.format('Estimated dP', 'True dP'))
+ print(' {:<6.1f} +/- {:<6.1f} | {:<6.1f}'.format(
+ press, np.abs(ddpress), truepress
+ ))
+ if dmu or dtempdmu:
+ pass
+ print('='*50)
+
+
+def estimate_interval(ens_string, ens_temp,
+ energy, kb,
+ ens_press=None,
+ volume=None, pvconvert=None,
+ verbosity=1, cutoff=0.001,
+ tunit='', punit=''):
+ result = {}
+ if ens_string == 'NVT':
+ # Discard burn-in period and time-correlated frames
+ energy = trajectory.equilibrate(energy, verbose=(verbosity > 1), name='Energy')
+ energy = trajectory.decorrelate(energy, verbose=(verbosity > 1), name='Energy')
+ energy = trajectory.cut_tails(energy, cut=cutoff, verbose=(verbosity > 2), name='Energy')
+
+ # dT
+ sig = np.std(energy)
+ result['dT'] = 2*kb*ens_temp*ens_temp/sig
+ elif ens_string == 'NPT':
+ enthalpy = energy + pvconvert * ens_press * volume
+ traj_2d = np.array([energy, volume])
+ # Discard burn-in period and time-correlated frames
+ enthalpy = trajectory.equilibrate(enthalpy, verbose=(verbosity > 1), name='Enthalpy')
+ enthalpy = trajectory.decorrelate(enthalpy, verbose=(verbosity > 1), name='Enthalpy')
+ enthalpy = trajectory.cut_tails(enthalpy, cut=cutoff, verbose=(verbosity > 2), name='Enthalpy')
+ volume_1d = trajectory.equilibrate(volume, verbose=(verbosity > 1), name='Volume')
+ volume_1d = trajectory.decorrelate(volume_1d, verbose=(verbosity > 1), name='Volume')
+ volume_1d = trajectory.cut_tails(volume_1d, cut=cutoff, verbose=(verbosity > 2), name='Volume')
+ traj_2d = trajectory.equilibrate(traj_2d, verbose=(verbosity > 1), name='2D-Trajectory')
+ traj_2d = trajectory.decorrelate(traj_2d, facs=[1, pvconvert * ens_press], verbose=(verbosity > 1), name='2D-Trajectory')
+ traj_2d = trajectory.cut_tails(traj_2d, cut=cutoff, verbose=(verbosity > 2), name='2D-Trajectory')
+
+ # dT
+ sig = np.std(enthalpy)
+ result['dT'] = 2*kb*ens_temp*ens_temp/sig
+ # dP
+ sig = np.std(volume_1d)*pvconvert
+ result['dP'] = 2*kb*ens_temp/sig
+ # dTdP
+ cov = np.cov(traj_2d)
+ sig = np.sqrt(np.diag(cov))
+ sig[1] *= pvconvert
+ result['dTdP'] = [2*kb*ens_temp*ens_temp/sig[0],
+ 2*kb*ens_temp/sig[1]]
+ else:
+ raise pv_error.InputError('ens_str', 'Unrecognized ensemble string.')
+
+ if verbosity > 0:
+ print('A rule of thumb states that good error recognition can be expected when\n'
+ 'spacing the tip of the distributions by about two standard deviations.\n'
+ 'Based on this rule, and the assumption that the standard deviation of the\n'
+ 'distributions is largely independent of the state point, here\'s an estimate\n'
+ 'for the interval given the current simulation:')
+ if ens_string == 'NVT':
+ print('Current trajectory: NVT, T = {:.2f} {:s}'.format(ens_temp, tunit))
+ print('Suggested interval: dT = {:.1f} {:s}'.format(result['dT'], tunit))
+ if ens_string == 'NPT':
+ print('Current trajectory: NPT, T = {:.2f} {:s}, P = {:.2f} {:s}'.format(
+ ens_temp, tunit, ens_press, punit))
+ print('Suggested interval:')
+ print(' Temperature-only: dT = {:.1f} {:s}'.format(result['dT'], tunit))
+ print(' Pressure-only: dP = {:.1f} {:s}'.format(result['dP'], punit))
+ print(' Combined: dT = {:.1f} {:s}, dP = {:.1f} {:s}'.format(
+ result['dTdP'][0], tunit, result['dTdP'][1], punit))
+
+
+def check_1d(traj1, traj2, param1, param2, kb,
+ quantity, dtemp=False, dpress=False, dmu=False,
+ temp=None, pvconvert=None,
+ nbins=40, cutoff=0.001, seed=None,
+ verbosity=1, screen=False, filename=None):
+ r"""
+ Checks whether the energy trajectories of two simulation performed at
+ different temperatures have sampled distributions at the analytically
+ expected ratio.
+
+ Parameters
+ ----------
+ traj1 : array-like
+ Trajectory of the first simulation
+ If dtemp:
+
+ * NVT: Potential energy U or total energy E = U + K
+ * NPT: Enthalpy H = U + pV or total energy E = H + K
+
+ If dpress:
+
+ * NPT: Volume V
+
+ traj2 : array-like
+ Trajectory of the second simulation
+ If dtemp:
+
+ * NVT: Potential energy U or total energy E = U + K
+ * NPT: Enthalpy H = U + pV or total energy E = H + K
+
+ If dpress:
+
+ * NPT: Volume V
+
+ param1 : float
+ Target temperature or pressure of the first simulation
+ param2 : float
+ Target temperature or pressure of the second simulation
+ kb : float
+ Boltzmann constant in same units as the energy trajectories
+ quantity : str
+ Name of quantity analyzed (used for printing only)
+ dtemp : bool, optional
+ Set to True if trajectories were simulated at different temperature
+ Default: False.
+ dpress : bool, optional
+ Set to True if trajectories were simulated at different pressure
+ Default: False.
+ temp : float, optional
+ The temperature in equal temperature, differring pressure NPT simulations.
+ Needed to print optimal dP.
+ pvconvert : float, optional
+ Conversion from pressure * volume to energy units.
+ Needed to print optimal dP.
+ dmu : bool, optional
+ Set to True if trajectories were simulated at different chemical potential
+ Default: False.
+ nbins : int, optional
+ Number of bins used to assess distributions of the trajectories
+ Default: 40
+ cutoff : float, optional
+ Tail cutoff of distributions.
+ Default: 0.001 (0.1%)
+ seed : int, optional
+ If set, bootstrapping will be reproducible.
+ Default: None, bootstrapping non-reproducible.
+ verbosity : int, optional
+ Verbosity level.
+ Default: 1 (only most important output)
+ screen : bool, optional
+ Plot distributions on screen.
+ Default: False.
+ filename : string, optional
+ Plot distributions to `filename`.pdf.
+ Default: None.
+
+ Returns
+ -------
+
+ """
+
+ if (not (dtemp or dpress or dmu) or
+ (dtemp and dpress) or
+ (dtemp and dmu) or
+ (dpress and dmu)):
+ raise pv_error.InputError(['dtemp', 'dpress', 'dmu'],
+ 'Need to specify exactly one of `dtemp`, `dpress` and `dmu`.')
+
+ if dmu:
+ raise NotImplementedError('check_1d: Testing of `dmu` not implemented.')
+
+ if seed is not None:
+ raise NotImplementedError('check_1d: Bootstrapping not implemented.')
+
+ if dpress and (temp is None or pvconvert is None):
+ raise pv_error.InputError(['dpress', 'temp', 'pvconvert'],
+ '`ensemble.check_1d` with `dpress=True` requires `temp` and `pvconvert`.')
+
+ # =============================== #
+ # prepare constants, strings etc. #
+ # =============================== #
+ pstring = 'ln(P_2(' + quantity + ')/P_1(' + quantity + '))'
+ trueslope = 0
+ if dtemp:
+ trueslope = 1/(kb * param1) - 1/(kb * param2)
+ elif dpress:
+ trueslope = (param1 - param2) / (kb * temp) * pvconvert
+
+ if verbosity > 1:
+ print('Analytical slope of {:s}: {:.8f}'.format(pstring, trueslope))
+
+ quant = {}
+
+ # ==================== #
+ # prepare trajectories #
+ # ==================== #
+ # Discard burn-in period and time-correlated frames
+ traj1 = trajectory.prepare(traj1, cut=cutoff, verbosity=verbosity, name='Trajectory 1')
+ traj2 = trajectory.prepare(traj2, cut=cutoff, verbosity=verbosity, name='Trajectory 2')
+
+ # calculate inefficiency
+ g1 = pymbar.timeseries.statisticalInefficiency(traj1)
+ g2 = pymbar.timeseries.statisticalInefficiency(traj2)
+
+ # calculate overlap
+ traj1_full = traj1
+ traj2_full = traj2
+ traj1, traj2, min_ene, max_ene = trajectory.overlap(
+ traj1=traj1_full, traj2=traj2_full,
+ )
+ if verbosity > 0:
+ print('Overlap is {:.1%} of trajectory 1 and {:.1%} of trajectory 2.'.format(
+ traj1.shape[0] / traj1_full.shape[0],
+ traj2.shape[0] / traj2_full.shape[0]
+ ))
+ if verbosity > 0 and dtemp:
+ sig1 = np.std(traj1_full)
+ sig2 = np.std(traj2_full)
+ dt1 = 2*kb*param1*param1/sig1
+ dt2 = 2*kb*param2*param2/sig2
+ if verbosity > 1:
+ print('A rule of thumb states that a good overlap is found when dT/T = (2*kB*T)/(sig),\n'
+ 'where sig is the standard deviation of the energy distribution.\n'
+ 'For the current trajectories, dT = {:.1f}, sig1 = {:.1f} and sig2 = {:.1f}.\n'
+ 'According to the rule of thumb, given T1, a good dT is dT = {:.1f}, and\n'
+ ' given T2, a good dT is dT = {:.1f}.'.format(
+ param2-param1, sig1, sig2, dt1, dt2)
+ )
+ print('Rule of thumb estimates that dT = {:.1f} would be optimal '
+ '(currently, dT = {:.1f})'.format(.5*(dt1+dt2), param2-param1))
+ if verbosity > 0 and dpress:
+ sig1 = np.std(traj1_full)*pvconvert
+ sig2 = np.std(traj2_full)*pvconvert
+ dp1 = 2*kb*temp/sig1
+ dp2 = 2*kb*temp/sig2
+ if verbosity > 1:
+ print('A rule of thumb states that a good overlap is found when dP = (2*kB*T)/(sig),\n'
+ 'where sig is the standard deviation of the volume distribution.\n'
+ 'For the current trajectories, dP = {:.1f}, sig1 = {:.1g} and sig2 = {:.1g}.\n'
+ 'According to the rule of thumb, given P1, a good dP is dP = {:.1f}, and\n'
+ ' given P2, a good dP is dP = {:.1f}.'.format(
+ param2-param1, sig1, sig2, dp1, dp2)
+ )
+ print('Rule of thumb estimates that dP = {:.1f} would be optimal '
+ '(currently, dP = {:.1f})'.format(.5*(dp1+dp2), param2-param1))
+ if not min_ene:
+ raise pv_error.InputError(['traj1', 'traj2'],
+ 'No overlap between trajectories.')
+ # calculate bins
+ bins = np.linspace(min_ene, max_ene, nbins+1)
+ bins = check_bins(traj1, traj2, bins)
+ if np.size(bins) < 3:
+ raise pv_error.InputError(['traj1', 'traj2', 'nbins', 'cutoff'],
+ 'Less than 3 bins were filled in the overlap region.\n'
+ 'Ensure sufficient overlap between the trajectories, and '
+ 'consider increasing `cutoff` or `nbins` if there is '
+ 'sufficient overlap but unusually long tails.')
+
+ w_f = -trueslope * traj1_full
+ w_r = trueslope * traj2_full
+
+ if verbosity > 2:
+ print('Computing log of partition functions using pymbar.BAR...')
+ df, ddf = pymbar.BAR(w_f, w_r)
+ if verbosity > 2:
+ print('Using {:.5f} for log of partition functions as computed from BAR.'.format(df))
+ print('Uncertainty in quantity is {:.5f}.'.format(ddf))
+ print('Assuming this is negligible compared to sampling error at individual points.')
+
+ # ========== #
+ # linear fit #
+ # ========== #
+ if verbosity > 2:
+ print('Computing linear fit parameters (for plotting / comparison)')
+
+ fitvals, dfitvals = do_linear_fit(
+ traj1=traj1_full, traj2=traj2_full, g1=g1, g2=g2, bins=bins,
+ screen=screen, filename=filename,
+ trueslope=trueslope, trueoffset=df,
+ units=None
+ )
+
+ slope = fitvals[1]
+ dslope = dfitvals[1]
+ quant['linear'] = [abs((slope - trueslope)/dslope)]
+ if verbosity > 1:
+ print_stats(
+ title='Linear Fit Analysis (analytical error)',
+ fitvals=fitvals,
+ dfitvals=dfitvals,
+ kb=kb,
+ param1=param1,
+ param2=param2,
+ trueslope=trueslope,
+ temp=temp, pvconvert=pvconvert,
+ dtemp=dtemp, dpress=dpress, dmu=dmu
+ )
+
+ # ================== #
+ # max-likelihood fit #
+ # ================== #
+ if verbosity > 2:
+ print('Computing the maximum likelihood parameters')
+
+ fitvals, dfitvals = do_max_likelihood_fit(traj1_full, traj2_full, g1, g2,
+ init_params=[df, trueslope],
+ verbose=(verbosity > 1))
+
+ slope = fitvals[1]
+ dslope = dfitvals[1]
+ quant['maxLikelihood'] = [abs((slope - trueslope)/dslope)]
+ if verbosity > 0:
+ print_stats(
+ title='Maximum Likelihood Analysis (analytical error)',
+ fitvals=fitvals,
+ dfitvals=dfitvals,
+ kb=kb,
+ param1=param1,
+ param2=param2,
+ trueslope=trueslope,
+ temp=temp, pvconvert=pvconvert,
+ dtemp=dtemp, dpress=dpress, dmu=dmu
+ )
+
+ return quant['maxLikelihood']
+
+
+def check_2d(traj1, traj2, param1, param2, kb, pvconvert,
+ quantity, dtempdpress=False, dtempdmu=False,
+ cutoff=0.001, seed=None,
+ verbosity=1, screen=False, filename=None):
+ r"""
+ Checks whether the energy trajectories of two simulation performed at
+ different temperatures have sampled distributions at the analytically
+ expected ratio.
+
+ Parameters
+ ----------
+ traj1 : array-like, 2d
+ Trajectory of the first simulation
+ If dtempdpress:
+
+ * traj[0,:]: Potential energy U or total energy E = U + K
+ * traj[1,:]: Volume V
+ traj2 : array-like, 2d
+ Trajectory of the second simulation
+ If dtempdpress:
+
+ * traj[0,:]: Potential energy U or total energy E = U + K
+ * traj[1,:]: Volume V
+ param1 : array-like
+ If dtempdpress:
+ Target temperature and pressure of the first simulation
+ param2 : array-like
+ If dtempdpress:
+ Target temperature and pressure of the first simulation
+ kb : float
+ Boltzmann constant in same units as the energy trajectories
+ pvconvert : float
+ Conversion from pressure * volume to energy units
+ quantity : List[str]
+ Names of quantities analyzed (used for printing only)
+ dtempdpress : bool, optional
+ Set to True if trajectories were simulated at different
+ temperature and pressure
+ Default: False.
+ dtempdmu : bool, optional
+ Set to True if trajectories were simulated at different
+ temperature and chemical potential
+ Default: False.
+ cutoff : float
+ Tail cutoff of distributions.
+ Default: 0.001 (0.1%)
+ seed : int
+ If set, bootstrapping will be reproducible.
+ Default: None, bootstrapping non-reproducible.
+ verbosity : int
+ Verbosity level.
+ Default: 1 (only most important output)
+ screen : bool, optional
+ Plot distributions on screen.
+ Default: False.
+ filename : string, optional
+ Plot distributions to `filename`.pdf.
+ Default: None.
+
+ Returns
+ -------
+
+ """
+
+ if not (dtempdpress or dtempdmu) or (dtempdpress and dtempdmu):
+ raise pv_error.InputError(['dtempdpress', 'dtempdmu'],
+ 'Need to specify exactly one of `dtempdpress` and `dtempdmu`.')
+
+ if dtempdmu:
+ raise NotImplementedError('check_2d: Testing of `dtempdmu` not implemented.')
+
+ if seed is not None:
+ raise NotImplementedError('check_2d: Bootstrapping not implemented.')
+
+ if screen or filename is not None:
+ raise NotImplementedError('check_2d: Plotting not implemented.')
+
+ # =============================== #
+ # prepare constants, strings etc. #
+ # =============================== #
+ pstring = ('ln(P_2(' + quantity[0] + ', ' + quantity[1] + ')/' +
+ 'P_1(' + quantity[0] + ', ' + quantity[1] + '))')
+ trueslope = np.zeros(2)
+ facs = [None, None]
+ if dtempdpress:
+ trueslope = np.array([
+ 1/(kb * param1[0]) - 1/(kb * param2[0]),
+ pvconvert*(1/(kb * param1[0]) * param1[1] - 1/(kb * param2[0]) * param2[1])
+ ])
+ facs = [[1, param1[1]], [1, param2[1]]]
+
+ if verbosity > 1:
+ print('Analytical slope of {:s}: {:.8f}, {:.8f}'.format(
+ pstring, trueslope[0], trueslope[1]
+ ))
+
+ quant = {}
+
+ # ==================== #
+ # prepare trajectories #
+ # ==================== #
+ # Discard burn-in period and time-correlated frames
+ traj1 = trajectory.prepare(traj1, cut=cutoff, facs=facs[0],
+ verbosity=verbosity, name='Trajectory 1')
+ traj2 = trajectory.prepare(traj2, cut=cutoff, facs=facs[1],
+ verbosity=verbosity, name='Trajectory 2')
+
+ # calculate inefficiency
+ g1 = np.array([
+ pymbar.timeseries.statisticalInefficiency(traj1[0]),
+ pymbar.timeseries.statisticalInefficiency(traj1[1])
+ ])
+ g2 = np.array([
+ pymbar.timeseries.statisticalInefficiency(traj2[0]),
+ pymbar.timeseries.statisticalInefficiency(traj2[1])
+ ])
+
+ # calculate overlap
+ traj1_full = traj1
+ traj2_full = traj2
+ traj1, traj2, min_ene, max_ene = trajectory.overlap(
+ traj1=traj1_full, traj2=traj2_full,
+ )
+ if verbosity > 0:
+ print('Overlap is {:.1%} of trajectory 1 and {:.1%} of trajectory 2.'.format(
+ traj1.shape[1] / traj1_full.shape[1],
+ traj2.shape[1] / traj2_full.shape[1]
+ ))
+ if verbosity > 0 and dtempdpress:
+ cov1 = np.cov(traj1_full)
+ sig1 = np.sqrt(np.diag(cov1))
+ sig1[1] *= pvconvert
+ cov2 = np.cov(traj2_full)
+ sig2 = np.sqrt(np.diag(cov2))
+ sig2[1] *= pvconvert
+ dt1 = 2*kb*param1[0]*param1[0]/sig1[0]
+ dt2 = 2*kb*param2[0]*param2[0]/sig2[0]
+ dp1 = 2*kb*param1[0]/sig1[1]
+ dp2 = 2*kb*param2[0]/sig2[1]
+ if verbosity > 1:
+ print('A rule of thumb states that a good overlap can be expected when choosing state\n'
+ 'points separated by about 2 standard deviations.\n'
+ 'For the current trajectories, dT = {:.1f}, and dP = {:.1f},\n'
+ 'with standard deviations sig1 = [{:.1f}, {:.1g}], and sig2 = [{:.1f}, {:.1g}].\n'
+ 'According to the rule of thumb, given point 1, the estimate is dT = {:.1f}, dP = {:.1f}, and\n'
+ ' given point 2, the estimate is dT = {:.1f}, dP = {:.1f}.'.format(
+ param2[0]-param1[0], param2[1]-param1[1],
+ sig1[0], sig1[1], sig2[0], sig2[1],
+ dt1, dt2, dp1, dp2)
+ )
+ print('Rule of thumb estimates that (dT,dP) = ({:.1f},{:.1f}) would be optimal '
+ '(currently, (dT,dP) = ({:.1f},{:.1f}))'.format(.5*(dt1+dt2), .5*(dp1+dp2),
+ param2[0]-param1[0], param2[1]-param1[1]))
+ if min_ene is None:
+ raise pv_error.InputError(['traj1', 'traj2'],
+ 'No overlap between trajectories.')
+
+ w_f = -trueslope[0] * traj1_full[0] - trueslope[1] * traj1_full[1]
+ w_r = trueslope[0] * traj2_full[0] + trueslope[1] * traj2_full[1]
+
+ if verbosity > 2:
+ print('Computing log of partition functions using pymbar.BAR...')
+ df, ddf = pymbar.BAR(w_f, w_r)
+ if verbosity > 2:
+ print('Using {:.5f} for log of partition functions as computed from BAR.'.format(df))
+ print('Uncertainty in quantity is {:.5f}.'.format(ddf))
+ print('Assuming this is negligible compared to sampling error at individual points.')
+
+ # ================== #
+ # max-likelihood fit #
+ # ================== #
+ if verbosity > 2:
+ print('Computing the maximum likelihood parameters')
+
+ fitvals, dfitvals = do_max_likelihood_fit(traj1_full, traj2_full, g1, g2,
+ init_params=[df, trueslope[0], trueslope[1]],
+ verbose=(verbosity > 1))
+
+ slope = fitvals[1:]
+ dslope = dfitvals[1:]
+ quant['maxLikelihood'] = np.abs((slope - trueslope)/dslope)
+ if verbosity > 0:
+ print_stats(
+ title='Maximum Likelihood Analysis (analytical error)',
+ fitvals=fitvals,
+ dfitvals=dfitvals,
+ kb=kb,
+ param1=param1,
+ param2=param2,
+ trueslope=trueslope,
+ pvconvert=pvconvert,
+ dtempdpress=dtempdpress, dtempdmu=dtempdmu
+ )
+
+ return quant['maxLikelihood']
self.exe = exe
if includepath is not None:
- self._includepath = includepath
+ self.includepath = includepath
@property
def exe(self):
@includepath.setter
def includepath(self, path):
+ try: # py2/3 compatibility
+ basestring
+ except NameError:
+ basestring = str
+ if isinstance(path, basestring):
+ path = [path]
self._includepath = path
def get_quantities(self, edr, quantities, cwd=None,
if not line:
continue
line = line.split('=')
- option = line[0].strip()
- value = line[1].strip()
+ # unify mdp options - all lower case, only dashes
+ option = line[0].strip().replace('_', '-').lower()
+ if option not in ['include', 'define']:
+ value = line[1].strip().replace('_', '-').lower()
+ else:
+ value = line[1].strip()
result[option] = value
return result
def _run(self, cmd, args, cwd=None, stdin=None, stdout=None, stderr=None, mpicmd=None):
if self.exe is None:
- print('ERROR: No gmx executable defined. Set before attempting to run!')
+ raise RuntimeError('Tried to use GromacsParser before setting gmx executable.')
if mpicmd:
command = [mpicmd, self.exe, cmd]
else:
content = []
include_dirs = include
if self.includepath:
- include_dirs += [self.includepath]
+ include_dirs += self.includepath
for line in filehandler:
line = line.split(';')[0].strip()
+ line = line.split('*')[0].strip()
if not line:
continue
if line[0] == '#':
import scipy.stats as stats
import numpy as np
+import multiprocessing as mproc
+from ..util import trajectory
from . import plot
return 2 * float(kin) / (float(ndof) * float(kb))
-def check_mb_ensemble(kin, temp, ndof, alpha, kb=8.314e-3, verbose=False,
+def check_mb_ensemble(kin, temp, ndof, alpha, kb=8.314e-3, verbosity=1,
screen=False, filename=None, ene_unit=None):
r"""
Checks if a kinetic energy trajectory is Maxwell-Boltzmann distributed.
Confidence. TODO: Check proper statistical definition.
kb : float
Boltzmann constant :math:`k_B`. Default: 8.314e-3 (kJ/mol).
- verbose : bool
- Print result details. Default: False.
+ verbosity : int
+ 0: Silent.
+ 1: Print result details.
+ 2: Print additional information.
+ Default: False.
screen : bool
Plot distributions on screen. Default: False.
filename : string
physical_validation.kinetic_energy.check_mb_ensemble : High-level version
"""
+ # Discard burn-in period and time-correlated frames
+ kin = trajectory.prepare(kin, verbosity=verbosity, name='Kinetic energy')
+
kt = kb * temp
d, p = stats.kstest(kin, 'chi2', (ndof, 0, kt/2))
title='Simulation vs. Maxwell-Boltzmann',
xlabel='Kinetic energy' + unit,
ylabel='Probability [%]',
+ sci_x=True,
filename=filename,
screen=screen)
- if verbose:
+ if verbosity > 0:
message = ('Kolmogorov-Smirnov test result: p = {:g}\n'
'Null hypothesis: Kinetic energy is Maxwell-Boltzmann distributed'.format(p))
if alpha is not None:
Useful to pre-define groups of molecules (e.g. solute / solvent,
liquid mixture species, ...). If None, no pre-defined molecule
groups will be tested. Default: None.
- Note: If an empty 1d array is found as last element in the list, the remaining
- molecules are collected in this array. This allows, for example, to only
- specify the solute, and indicate the solvent by giving an empty array.
+
+ *Note:* If an empty 1d array is found as last element in the list, the remaining
+ molecules are collected in this array. This allows, for example, to only
+ specify the solute, and indicate the solvent by giving an empty array.
random_divisions : int, optional
Number of random division tests attempted. Default: 0 (random
division tests off).
# for each frame, calculate total / translational / rotational & internal /
# rotational / internal kinetic energy for each molecule
if kin_molec is None:
- kin_molec = []
- for r, v in zip(positions, velocities):
- kin_molec.append(calc_molec_kinetic_energy(r, v, masses,
- molec_idx, natoms, nmolecs))
+ try:
+ with mproc.Pool() as p:
+ kin_molec = p.starmap(calc_molec_kinetic_energy,
+ [(r, v, masses, molec_idx, natoms, nmolecs)
+ for r, v in zip(positions, velocities)])
+ except AttributeError:
+ # Parallel execution doesn't work in py2.7 for quite a number of reasons.
+ # Attribute error when opening the `with` region is the first error (and
+ # an easy one), but by far not the last. So let's just resort to non-parallel
+ # execution:
+ kin_molec = [calc_molec_kinetic_energy(r, v, masses, molec_idx, natoms, nmolecs)
+ for r, v in zip(positions, velocities)]
result = []
for key in dict_keys:
p = check_mb_ensemble(kin=group_kin[key], temp=temp, ndof=ndof[key],
- alpha=alpha, verbose=(verbosity > 2),
+ alpha=alpha, verbosity=verbosity > 2,
screen=screen, filename=filename+'_'+key,
ene_unit=ene_unit)
result.append(p)
title='Temperature trajectories',
xlabel='Frames',
ylabel='Temperature' + unit,
+ sci_x=True,
filename=filename,
screen=screen)
###########################################################################
import numpy as np
-import matplotlib as mpl
-import matplotlib.pyplot as plt
-from matplotlib.ticker import AutoMinorLocator
+import warnings
def plot(res, legend=None, title=None,
xlabel=None, ylabel=None, xlim=None, ylim=None,
- inv_x=False, inv_y=False,
+ inv_x=False, inv_y=False, sci_x=False, sci_y=False,
axtext=None, annotation_location=None,
filename=None, screen=True):
+ try:
+ import matplotlib as mpl
+ import matplotlib.pyplot as plt
+ from matplotlib.ticker import AutoMinorLocator
+ except ImportError:
+ warnings.warn('Install matplotlib to enable plotting.')
+ return
+
font = {'family': 'serif',
'weight': 'normal',
'size': 16}
ax.text(loc[0], loc[1], t,
bbox=bbox)
- ax.ticklabel_format(style='sci', axis='x', scilimits=(-3, 4))
+ if sci_x:
+ ax.ticklabel_format(style='sci', axis='x', scilimits=(-3, 4))
+ if sci_y:
+ ax.ticklabel_format(style='sci', axis='y', scilimits=(-3, 4))
ax.xaxis.major.formatter._useMathText = True
if filename is not None:
+++ /dev/null
-#!/usr/local/bin/env python
-
-"""
-A module for extracting uncorrelated samples from correlated timeseries data.
-
-This module provides various tools that allow one to examine the correlation functions and
-integrated autocorrelation times in correlated timeseries data, compute statistical inefficiencies,
-and automatically extract uncorrelated samples for data analysis.
-
-REFERENCES
-
-[1] Shirts MR and Chodera JD. Statistically optimal analysis of samples from multiple equilibrium states.
-J. Chem. Phys. 129:124105, 2008
-http://dx.doi.org/10.1063/1.2978177
-
-[2] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
-histogram analysis method for the analysis of simulated and parallel tempering simulations.
-JCTC 3(1):26-41, 2007.
-
-"""
-from __future__ import print_function
-from __future__ import division
-
-#=============================================================================================
-# COPYRIGHT NOTICE
-#
-# Written by John D. Chodera <jchodera@gmail.com> and Michael R. Shirts <mrshirts@gmail.com>.
-#
-# Copyright (c) 2007 The Regents of the University of California. All Rights Reserved.
-# Portions of this software are Copyright (c) 2007 Stanford University and Columbia University.
-#
-# This program is free software; you can redistribute it and/or modify it under the terms of
-# the GNU General Public License as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
-# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along with this program;
-# if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
-# Boston, MA 02110-1301, USA.
-#=============================================================================================
-
-#=============================================================================================
-# TODO
-# * Implement unit tests that generate timeseries with various levels of Gaussian correlation to test all methods.
-# * Add Zwanzig procedure for estimating statistical uncertainties in correlation functions
-# (by making Gaussian process assumptions).
-#=============================================================================================
-
-#=============================================================================================
-# VERSION CONTROL INFORMATION
-#=============================================================================================
-
-
-__version__ = "2.0beta"
-__authors__ = "Michael R. Shirts and John D. Chodera."
-__licanse__ = "GPL 2.0"
-
-#=============================================================================================
-# IMPORTS
-#=============================================================================================
-
-import math
-import numpy
-import numpy.linalg
-import sys
-
-#=============================================================================================
-# Exception class.
-#=============================================================================================
-
-class ParameterError(Exception):
- """An error in the input parameters has been detected.
-
- """
-
-#=============================================================================================
-# Issue warning on import.
-#=============================================================================================
-
-LongWarning = "Warning: If the inherent timescales of the system are long compared to those being analyzed, this statistical inefficiency may be an underestimate. The estimate presumes the use of many statistically independent samples. Tests should be performed to assess whether this condition is satisfied. Be cautious in the interpretation of the data."
-
-# sys.stderr.write(LongWarning + '\n')
-
-#=============================================================================================
-# METHODS
-#=============================================================================================
-
-#=============================================================================================
-def statisticalInefficiency(A_n, B_n=None, fast=False, mintime=3):
- """
- Compute the (cross) statistical inefficiency of (two) timeseries.
-
- REQUIRED ARGUMENTS
- A_n (numpy array) - A_n[n] is nth value of timeseries A. Length is deduced from vector.
-
- OPTIONAL ARGUMENTS
- B_n (numpy array) - B_n[n] is nth value of timeseries B. Length is deduced from vector.
- If supplied, the cross-correlation of timeseries A and B will be estimated instead of the
- autocorrelation of timeseries A.
- fast (boolean) - if True, will use faster (but less accurate) method to estimate correlation
- time, described in Ref. [1] (default: False)
- mintime (int) - minimum amount of correlation function to compute (default: 3)
- The algorithm terminates after computing the correlation time out to mintime when the
- correlation function furst goes negative. Note that this time may need to be increased
- if there is a strong initial negative peak in the correlation function.
-
- RETURNS
- g is the estimated statistical inefficiency (equal to 1 + 2 tau, where tau is the correlation time).
- We enforce g >= 1.0.
-
- NOTES
- The same timeseries can be used for both A_n and B_n to get the autocorrelation statistical inefficiency.
- The fast method described in Ref [1] is used to compute g.
-
- REFERENCES
- [1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
- histogram analysis method for the analysis of simulated and parallel tempering simulations.
- JCTC 3(1):26-41, 2007.
-
- EXAMPLES
-
- Compute statistical inefficiency of timeseries data with known correlation time.
-
- >>> import testsystems
- >>> A_n = testsystems.generateCorrelatedTimeseries(N=100000, tau=5.0)
- >>> g = statisticalInefficiency(A_n, fast=True)
-
- """
-
- # Create numpy copies of input arguments.
- A_n = numpy.array(A_n)
- if B_n is not None:
- B_n = numpy.array(B_n)
- else:
- B_n = numpy.array(A_n)
-
- # Get the length of the timeseries.
- N = A_n.size
-
- # Be sure A_n and B_n have the same dimensions.
- if(A_n.shape != B_n.shape):
- raise ParameterError('A_n and B_n must have same dimensions.')
-
- # Initialize statistical inefficiency estimate with uncorrelated value.
- g = 1.0
-
- # Compute mean of each timeseries.
- mu_A = A_n.mean()
- mu_B = B_n.mean()
-
- # Make temporary copies of fluctuation from mean.
- dA_n = A_n.astype(numpy.float64) - mu_A
- dB_n = B_n.astype(numpy.float64) - mu_B
-
- # Compute estimator of covariance of (A,B) using estimator that will ensure C(0) = 1.
- sigma2_AB = (dA_n * dB_n).mean() # standard estimator to ensure C(0) = 1
-
- # Trap the case where this covariance is zero, and we cannot proceed.
- if(sigma2_AB == 0):
- raise ParameterException('Sample covariance sigma_AB^2 = 0 -- cannot compute statistical inefficiency')
-
- # Accumulate the integrated correlation time by computing the normalized correlation time at
- # increasing values of t. Stop accumulating if the correlation function goes negative, since
- # this is unlikely to occur unless the correlation function has decayed to the point where it
- # is dominated by noise and indistinguishable from zero.
- t = 1
- increment = 1
- while (t < N-1):
- # compute normalized fluctuation correlation function at time t
- C = numpy.sum( dA_n[0:(N-t)]*dB_n[t:N] + dB_n[0:(N-t)]*dA_n[t:N] ) / (2.0 * float(N-t) * sigma2_AB)
- # Terminate if the correlation function has crossed zero and we've computed the correlation
- # function at least out to 'mintime'.
- if (C <= 0.0) and (t > mintime):
- break
-
- # Accumulate contribution to the statistical inefficiency.
- g += 2.0 * C * (1.0 - float(t)/float(N)) * float(increment)
- # Increment t and the amount by which we increment t.
- t += increment
-
- # Increase the interval if "fast mode" is on.
- if fast: increment += 1
-
- # g must be at least unity
- if (g < 1.0): g = 1.0
-
- # Return the computed statistical inefficiency.
- return g
-#=============================================================================================
-def statisticalInefficiencyMultiple(A_kn, fast=False, return_correlation_function=False):
- """
- Estimate the statistical inefficiency from multiple stationary timeseries (of potentially differing lengths).
-
- REQUIRED ARGUMENTS
- A_kn (Python list of numpy arrays) - A_kn[k] is the kth timeseries, and A_kn[k][n] is nth value of timeseries k. Length is deduced from arrays.
-
- OPTIONAL ARGUMENTS
- fast can be set to True to give a less accurate but very quick estimate (default False)
- return_correlation_function - if True, will also return estimates of normalized fluctuation correlation function that were computed (default: False)
-
- RETURNS
- g is the statistical inefficiency (equal to 1 + 2 tau, where tau is the integrated autocorrelation time).
- Ct (list of tuples) - Ct[n] = (t, C) with time t and normalized correlation function estimate C is returned as well if return_correlation_function is set to True
-
- NOTES
- The autocorrelation of the timeseries is used to compute the statistical inefficiency.
- The normalized fluctuation autocorrelation function is computed by averaging the unnormalized raw correlation functions.
- The fast method described in Ref [1] is used to compute g.
-
- REFERENCES
- [1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
- histogram analysis method for the analysis of simulated and parallel tempering simulations.
- JCTC 3(1):26-41, 2007.
-
- EXAMPLES
-
- Estimate statistical efficiency from multiple timeseries of different lengths.
-
- >>> import testsystems
- >>> N_k = [1000, 2000, 3000, 4000, 5000]
- >>> tau = 5.0 # exponential relaxation time
- >>> A_kn = [ testsystems.generateCorrelatedTimeseries(N=N, tau=tau) for N in N_k ]
- >>> g = statisticalInefficiencyMultiple(A_kn)
-
- Also return the values of the normalized fluctuation autocorrelation function that were computed.
-
- >>> [g, Ct] = statisticalInefficiencyMultiple(A_kn, return_correlation_function=True)
-
- """
-
- # Convert A_kn into a list of arrays if it is not in this form already.
- if (type(A_kn) == numpy.ndarray):
- A_kn_list = list()
- if A_kn.ndim == 1:
- A_kn_list.append(A_kn.copy())
- else:
- [K,N] = A_kn.shape
- for k in range(K):
- A_kn_list.append(A_kn[k,:].copy())
- A_kn = A_kn_list
-
- # Determine number of timeseries.
- K = len(A_kn)
-
- # Get the length of each timeseries.
- N_k = numpy.zeros([K], numpy.int32)
- for k in range(K):
- N_k[k] = A_kn[k].size
-
- # Compute average timeseries length.
- Navg = numpy.array(N_k, numpy.float64).mean()
-
- # Determine total number of samples.
- N = numpy.sum(N_k)
-
- # Initialize statistical inefficiency estimate with uncorrelated value.
- g = 1.0
-
- # Compute sample mean.
- mu = 0.0
- for k in range(K):
- mu += A_kn[k].sum()
- mu /= float(N)
-
- # Construct and store fluctuation timeseries.
- dA_kn = list()
- for k in range(K):
- dA_n = A_kn[k] - mu
- dA_kn.append(dA_n.copy())
-
- # Compute sample variance from mean of squared fluctuations, to ensure that C(0) = 1.
- sigma2 = 0.0
- for k in range(K):
- sigma2 += (dA_kn[k]**2).sum()
- sigma2 /= float(N)
-
- # Initialize statistical inefficiency estimate with uncorrelated value.
- g = 1.0
-
- # Initialize storage for correlation function.
- Ct = list() # Ct[n] is a tuple (t, C) of the time lag t and estimate of normalized fluctuation correlation function C
-
- # Accumulate the integrated correlation time by computing the normalized correlation time at
- # increasing values of t. Stop accumulating if the correlation function goes negative, since
- # this is unlikely to occur unless the correlation function has decayed to the point where it
- # is dominated by noise and indistinguishable from zero.
- t = 1
- increment = 1
- while (t < N_k.max()-1):
- # compute unnormalized correlation function
- numerator = 0.0
- denominator = 0.0
- for k in range(K):
- if (t >= N_k[k]): continue # skip trajectory if lag time t is greater than its length
- dA_n = dA_kn[k] # retrieve trajectory
- x = dA_n[0:(N_k[k]-t)] * dA_n[t:N_k[k]]
- numerator += x.sum() # accumulate contribution from trajectory k
- denominator += float(x.size) # count how many overlapping time segments we've included
-
- C = numerator / denominator
-
- # compute normalized fluctuation correlation function at time t
- C = C / sigma2
- #print "C[%5d] = %16f (%16f / %16f)" % (t, C, numerator, denominator)
-
- # Store estimate of correlation function.
- Ct.append( (t,C) )
-
- # Terminate if the correlation function has crossed zero.
- # Note that we've added a hack (t > 10) condition to avoid terminating too early in correlation functions that have a strong negative peak at
- if (C <= 0.0) and (t > 10):
- break
-
- # Accumulate contribution to the statistical inefficiency.
- g += 2.0 * C * (1.0 - float(t)/Navg) * float(increment)
-
- # Increment t and the amount by which we increment t.
- t += increment
-
- # Increase the interval if "fast mode" is on.
- if fast: increment += 1
-
- # g must be at least unity
- if (g < 1.0): g = 1.0
-
- # Return statistical inefficency and correlation function estimate, if requested.
- if return_correlation_function:
- return (g, Ct)
-
- # Return the computed statistical inefficiency.
- return g
-#=============================================================================================
-def integratedAutocorrelationTime(A_n, B_n=None, fast=False, mintime=3):
- """
- Estimate the integrated autocorrelation time.
-
- """
-
- g = statisticalInefficiency(A_n, B_n, fast, mintime)
- tau = (g-1.0)/2.0
- return tau
-#=============================================================================================
-def integratedAutocorrelationTimeMultiple(A_kn, fast=False):
- """
- Estimate the integrated autocorrelation time from multiple timeseries.
-
- """
-
- g = statisticalInefficiencyMultiple(A_kn, fast, False)
- tau = (g-1.0)/2.0
- return tau
-#=============================================================================================
-def normalizedFluctuationCorrelationFunction(A_n, B_n=None, N_max=None):
- """
- Compute the normalized fluctuation (cross) correlation function of (two) stationary timeseries.
-
- C(t) = (<A(t) B(t)> - <A><B>) / (<AB> - <A><B>)
-
- This may be useful in diagnosing odd time-correlations in timeseries data.
-
- REQUIRED ARGUMENTS
- A_n[n] is nth value of timeseries A. Length is deduced from vector.
- B_n[n] is nth value of timeseries B. Length is deduced from vector.
-
- OPTIONAL ARGUMENTS
- N_max - if specified, will only compute correlation function out to time lag of N_max
-
- RETURNS
- C_n[n] is the normalized fluctuation auto- or cross-correlation function for timeseries A(t) and B(t).
-
- NOTES
- The same timeseries can be used for both A_n and B_n to get the autocorrelation statistical inefficiency.
- This procedure may be slow.
- The statistical error in C_n[n] will grow with increasing n. No effort is made here to estimate the uncertainty.
-
- REFERENCES
- [1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
- histogram analysis method for the analysis of simulated and parallel tempering simulations.
- JCTC 3(1):26-41, 2007.
-
- EXAMPLES
-
- Estimate normalized fluctuation correlation function.
-
- >>> import testsystems
- >>> A_t = testsystems.generateCorrelatedTimeseries(N=10000, tau=5.0)
- >>> C_t = normalizedFluctuationCorrelationFunction(A_t, N_max=25)
-
- """
-
- # If B_n is not specified, set it to be identical to A_n.
- if B_n is None:
- B_n = A_n
-
- # Create numpy copies of input arguments.
- A_n = numpy.array(A_n)
- B_n = numpy.array(B_n)
-
- # Get the length of the timeseries.
- N = A_n.size
-
- # Set maximum time to compute correlation functon for.
- if (not N_max) or (N_max > N-1):
- N_max = N-1
-
- # Be sure A_n and B_n have the same dimensions.
- if(A_n.shape != B_n.shape):
- raise ParameterError('A_n and B_n must have same dimensions.')
-
- # Initialize statistical inefficiency estimate with uncorrelated value.
- g = 1.0
-
- # Compute means and variance.
- mu_A = A_n.mean()
- mu_B = B_n.mean()
-
- # Make temporary copies at high precision with means subtracted off.
- dA_n = A_n.astype(numpy.float64) - mu_A
- dB_n = B_n.astype(numpy.float64) - mu_B
-
- # sigma2_AB = sum((A_n-mu_A) * (B_n-mu_B)) / (float(N)-1.0) # unbiased estimator
- sigma2_AB = (dA_n * dB_n).mean() # standard estimator to ensure C(0) = 1
- if(sigma2_AB == 0):
- raise ParameterException('Sample covariance sigma_AB^2 = 0 -- cannot compute statistical inefficiency')
-
- # allocate storage for normalized fluctuation correlation function
- C_n = numpy.zeros([N_max+1], numpy.float64)
-
- # Compute normalized correlation funtion.
- t = 0
- for t in range(0,N_max+1):
- # compute normalized fluctuation correlation function at time t
- C_n[t] = sum( dA_n[0:(N-t)]*dB_n[t:N] + dB_n[0:(N-t)]*dA_n[t:N] ) / (2.0 * float(N-t) * sigma2_AB)
-
- # Return the computed correlation function
- return C_n
-#=============================================================================================
-def normalizedFluctuationCorrelationFunctionMultiple(A_kn, B_kn=None, N_max=None, suppress_warning=False):
- """
- Compute the normalized fluctuation (cross) correlation function of (two) timeseries from multiple timeseries samples.
-
- C(t) = (<A(t) B(t)> - <A><B>) / (<AB> - <A><B>)
-
- This may be useful in diagnosing odd time-correlations in timeseries data.
-
- REQUIRED ARGUMENTS
- A_kn (Python list of numpy arrays) - A_kn[k] is the kth timeseries, and A_kn[k][n] is nth value of timeseries k. Length is deduced from arrays.
- B_kn (Python list of numpy arrays) - B_kn[k] is the kth timeseries, and B_kn[k][n] is nth value of timeseries k. B_kn[k] must have same length as A_kn[k]
-
- OPTIONAL ARGUMENTS
- N_max - if specified, will only compute correlation function out to time lag of N_max
- suppress_warning - if we are calculating a lot of these, the warning could get a little annoying. Make it possible to suppress it, but don't make that the default.
-
- RETURNS
- C_n[n] is the normalized fluctuation auto- or cross-correlation function for timeseries A(t) and B(t).
-
- NOTES
- The same timeseries can be used for both A_n and B_n to get the autocorrelation statistical inefficiency.
- This procedure may be slow.
- The statistical error in C_n[n] will grow with increasing n. No effort is made here to estimate the uncertainty.
-
- REFERENCES
- [1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
- histogram analysis method for the analysis of simulated and parallel tempering simulations.
- JCTC 3(1):26-41, 2007.
-
- EXAMPLES
-
- Estimate a portion of the normalized fluctuation autocorrelation function from multiple timeseries of different length.
-
- >>> import testsystems
- >>> N_k = [1000, 2000, 3000, 4000, 5000]
- >>> tau = 5.0 # exponential relaxation time
- >>> A_kn = [ testsystems.generateCorrelatedTimeseries(N=N, tau=tau) for N in N_k ]
- >>> C_n = normalizedFluctuationCorrelationFunctionMultiple(A_kn, N_max=25)
-
- """
-
- # If B_kn is not specified, define it to be identical with A_kn.
- if B_kn is None:
- B_kn = A_kn
-
- # TODO: Change this to support other iterable types, like sets.
- # Make sure A_kn and B_kn are both lists
- if (type(A_kn) is not list) or (type(B_kn) is not list):
- raise ParameterError("A_kn and B_kn must each be a list of numpy arrays.")
-
- # Ensure the same number of timeseries are stored in A_kn and B_kn.
- if (len(A_kn) != len(B_kn)):
- raise ParameterError("A_kn and B_kn must contain corresponding timeseries -- different numbers of timeseries detected in each.")
-
- # Determine number of timeseries stored.
- K = len(A_kn)
-
- # Ensure both observable trajectories in each timeseries are of the same length.
- for k in range(K):
- A_n = A_kn[k]
- B_n = B_kn[k]
- if A_n.size != B_n.size:
- raise Exception("A_kn and B_kn must contain corresponding timeseries -- lack of correspondence in timeseries lenghts detected.")
-
- # Get the length of each timeseries.
- N_k = numpy.zeros([K], numpy.int32)
- for k in range(K):
- N_k[k] = A_kn[k].size
-
- # Determine total number of samples.
- N = sum(N_k)
-
- # Set maximum time to compute correlation functon for.
- if (not N_max) or (N_max > max(N_k) - 1):
- N_max = max(N_k) - 1
-
- # Compute means.
- mu_A = 0.0
- mu_B = 0.0
- for k in range(K):
- mu_A += A_kn[k].sum()
- mu_B += B_kn[k].sum()
- mu_A /= float(N)
- mu_B /= float(N)
-
- # Compute fluctuation timeseries.
- dA_kn = list()
- dB_kn = list()
- for k in range(K):
- dA_n = A_kn[k] - mu_A
- dB_n = B_kn[k] - mu_B
- dA_kn.append(dA_n)
- dB_kn.append(dB_n)
-
- # Compute covariance.
- sigma2_AB = 0.0
- for k in range(K):
- sigma2_AB += (dA_kn[k] * dB_kn[k]).sum()
- sigma2_AB /= float(N)
-
- # allocate storage for normalized fluctuation correlation function
- C_n = numpy.zeros([N_max+1], numpy.float64)
-
- # Accumulate the integrated correlation time by computing the normalized correlation time at
- # increasing values of t. Stop accumulating if the correlation function goes negative, since
- # this is unlikely to occur unless the correlation function has decayed to the point where it
- # is dominated by noise and indistinguishable from zero.
- t = 0
- for t in range(0,N_max+1):
- # compute unnormalized correlation function
- numerator = 0.0
- denominator = 0.0
- for k in range(K):
- if (t >= N_k[k]): continue # skip this trajectory if t is longer than the timeseries
- numerator += (dA_kn[k][0:(N_k[k]-t)] * dB_kn[k][t:N_k[k]]).sum()
- denominator += float(N_k[k]-t)
- C = numerator / denominator
-
- # compute normalized fluctuation correlation function at time t
- C /= sigma2_AB
-
- # Store correlation function.
- C_n[t] = C
-
- # Return the computed fluctuation correlation function.
- return C_n
-#=============================================================================================
-def subsampleCorrelatedData(A_t, g=None, fast=False, conservative=False, verbose=False):
- """Determine the indices of an uncorrelated subsample of the data.
-
- REQUIRED ARGUMENTS
- A_t (T array) - A_t[t] is the t-th value of timeseries A(t). Length is deduced from vector.
-
- OPTIONAL ARGUMENTS
- g (float) - if provided, the statistical inefficiency g is used to subsample the timeseries -- otherwise it will be computed (default: None)
- fast (logical) - fast can be set to True to give a less accurate but very quick estimate (default: False)
- conservative (logical) - if set to True, uniformly-spaced indices are chosen with interval ceil(g), where g is the statistical inefficiency. Otherwise, indices are chosen non-uniformly with interval of approximately g in order to end up with approximately T/g total indices
- verbose (logical) - if True, some output is printed
-
- RETURNS
- indices (list of int) - the indices of an uncorrelated subsample of the data
-
- NOTES
- The statistical inefficiency is computed with the function computeStatisticalInefficiency().
-
- TODO
- Instead of using regular stride, use irregular stride so more data can be fit in when g is non-integral.
-
- EXAMPLES
-
- Subsample a correlated timeseries to extract an effectively uncorrelated dataset.
-
- >>> import testsystems
- >>> A_t = testsystems.generateCorrelatedTimeseries(N=10000, tau=5.0) # generate a test correlated timeseries
- >>> indices = subsampleCorrelatedData(A_t) # compute indices of uncorrelated timeseries
- >>> A_n = A_t[indices] # extract uncorrelated samples
-
- Extract uncorrelated samples from multiple timeseries data from the same process.
-
- >>> # Generate multiple correlated timeseries data of different lengths.
- >>> T_k = [1000, 2000, 3000, 4000, 5000]
- >>> K = len(T_k) # number of timeseries
- >>> tau = 5.0 # exponential relaxation time
- >>> A_kt = [ testsystems.generateCorrelatedTimeseries(N=T, tau=tau) for T in T_k ] # A_kt[k] is correlated timeseries k
- >>> # Estimate statistical inefficiency from all timeseries data.
- >>> g = statisticalInefficiencyMultiple(A_kt)
- >>> # Count number of uncorrelated samples in each timeseries.
- >>> N_k = numpy.array([ len(subsampleCorrelatedData(A_t, g=g)) for A_t in A_kt ]) # N_k[k] is the number of uncorrelated samples in timeseries k
- >>> N = N_k.sum() # total number of uncorrelated samples
- >>> # Subsample all trajectories to produce uncorrelated samples
- >>> A_kn = [ A_t[subsampleCorrelatedData(A_t, g=g)] for A_t in A_kt ] # A_kn[k] is uncorrelated subset of trajectory A_kt[t]
- >>> # Concatenate data into one timeseries.
- >>> A_n = numpy.zeros([N], numpy.float32) # A_n[n] is nth sample in concatenated set of uncorrelated samples
- >>> A_n[0:N_k[0]] = A_kn[0]
- >>> for k in range(1,K): A_n[N_k[0:k].sum():N_k[0:k+1].sum()] = A_kn[k]
-
- """
-
- # Create numpy copy of arrays.
- A_t = numpy.array(A_t)
-
- # Get the length of the timeseries.
- T = A_t.size
-
- # Compute the statistical inefficiency for the timeseries.
- if not g:
- if verbose: print("Computing statistical inefficiency...")
- g = statisticalInefficiency(A_t, A_t, fast = fast)
- if verbose: print("g = %f" % g)
-
- if conservative:
- # Round g up to determine the stride we can use to pick out regularly-spaced uncorrelated samples.
- import math
- stride = int(math.ceil(g))
- if verbose: print("conservative subsampling: using stride of %d" % stride)
-
- # Assemble list of indices of uncorrelated snapshots.
- indices = list(range(0, T, stride))
- else:
- # Choose indices as floor(n*g), with n = 0,1,2,..., until we run out of data.
- import math
- indices = []
- n = 0
- while int(round(n*g)) < T:
- t = int(round(n*g))
- # ensure we don't sample the same point twice
- if (n == 0) or (t != indices[n-1]):
- indices.append(t)
- n += 1
- if verbose: print("standard subsampling: using average stride of %f" % g)
-
- # Number of samples in subsampled timeseries.
- N = len(indices)
-
- if verbose: print("The resulting subsampled set has %d samples (original timeseries had %d)." % (N, T))
-
- # Return the list of indices of uncorrelated snapshots.
- return indices
-
-#=============================================================================================
-# MAIN AND TESTS
-#=============================================================================================
-
-if __name__ == "__main__":
- import doctest
- doctest.testmod()
-
--- /dev/null
+###########################################################################
+# #
+# physical_validation, #
+# a python package to test the physical validity of MD results #
+# #
+# Written by Michael R. Shirts <michael.shirts@colorado.edu> #
+# Pascal T. Merz <pascal.merz@colorado.edu> #
+# #
+# Copyright (C) 2012 University of Virginia #
+# (C) 2017 University of Colorado Boulder #
+# #
+# This library is free software; you can redistribute it and/or #
+# modify it under the terms of the GNU Lesser General Public #
+# License as published by the Free Software Foundation; either #
+# version 2.1 of the License, or (at your option) any later version. #
+# #
+# This library is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
+# Lesser General Public License for more details. #
+# #
+# You should have received a copy of the GNU Lesser General Public #
+# License along with this library; if not, write to the #
+# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, #
+# Boston, MA 02110-1301 USA #
+# #
+###########################################################################
+
+from __future__ import division
+
+import numpy as np
+from scipy import stats
+
+from pymbar import timeseries
+
+from . import error as pv_error
+
+
+def equilibrate(traj, verbose=False, name=None):
+ traj = np.array(traj)
+ if traj.ndim == 1:
+ t0, g, n_eff = timeseries.detectEquilibration(traj)
+ if t0 == 0 and traj.size > 10:
+ # See https://github.com/choderalab/pymbar/issues/277
+ t0x, gx, n_effx = timeseries.detectEquilibration(traj[10:])
+ if t0x != 0:
+ t0 = t0x + 10
+ n = traj.size
+ res = traj[t0:]
+
+ elif traj.ndim == 2 and traj.shape[0] == 2:
+ t01, g1, n_eff1 = timeseries.detectEquilibration(traj[0])
+ t02, g2, n_eff2 = timeseries.detectEquilibration(traj[1])
+ t0 = max(t01, t02)
+ if t0 == 0 and traj.shape[1] > 10:
+ # See https://github.com/choderalab/pymbar/issues/277
+ t01x, g1x, n_eff1x = timeseries.detectEquilibration(traj[0, 10:])
+ t02x, g2x, n_eff2x = timeseries.detectEquilibration(traj[1, 10:])
+ t0x = max(t01x, t02x)
+ if t0x != 0:
+ t0 = t0x + 10
+ n = traj.shape[1]
+ res = traj[:, t0:]
+ elif traj.ndim == 2:
+ raise NotImplementedError('trajectory.equilibrate() in 2 dimensions is only '
+ 'implemented for exactly two timeseries.')
+ else:
+ raise NotImplementedError('trajectory.equilibrate() is not implemented for '
+ 'trajectories with more than 2 dimensions.')
+
+ if verbose:
+ if not name:
+ name = 'Trajectory'
+ if t0 == 0:
+ print('{:s} equilibration: No frames discarded for burn-in.'.format(name))
+ elif t0 == 1:
+ print('{:s} equilibration: First frame ({:.1%} of '
+ 'trajectory) discarded for burn-in.'.format(name, 1 / n))
+ else:
+ print('{:s} equilibration: First {:d} frames ({:.1%} of '
+ 'trajectory) discarded for burn-in.'.format(name, t0, t0 / n))
+
+ return res
+
+
+def decorrelate(traj, facs=None, verbose=False, name=None):
+ traj = np.array(traj)
+ if traj.ndim == 1:
+ idx = timeseries.subsampleCorrelatedData(traj)
+ n0 = traj.size
+ n1 = len(idx)
+ res = traj[idx]
+ elif facs is not None:
+ # The cleanest way to decorrelate multi-dimensional trajectories would probably
+ # be a sort of "parallel-decorrelation", taking frames in a way that both trajectories
+ # are independently decorrelated. pymbar does not offer this functionality, so for
+ # now, here's a work-around: We'll decorrelate such that
+ # traj_sum = facs[0]*traj[0, :] + facs[1]*traj[1, :] + ...
+ # is decorrelated.
+ # Use case:
+ # traj_sum = 1.0 * U + P * V
+ traj_sum = np.zeros(traj.shape[1])
+ for n, f in enumerate(facs):
+ traj_sum += f * traj[n]
+ idx = timeseries.subsampleCorrelatedData(traj_sum)
+ n0 = traj.shape[1]
+ n1 = len(idx)
+ res = traj[:, idx]
+ else:
+ raise NotImplementedError('trajectory.decorrelate() is not implemented for '
+ 'trajectories with more than 1 dimension.')
+ if verbose:
+ n = n0 - n1
+ if not name:
+ name = 'Trajectory'
+ if n == 0:
+ print('{:s} decorrelation: No frames discarded for decorrelation.'.format(name))
+ elif n == 1:
+ print('{:s} decorrelation: 1 frame ({:.1%} of '
+ 'trajectory) discarded for decorrelation.'.format(name, 1/n0))
+ else:
+ print('{:s} decorrelation: {:d} frames ({:.1%} of '
+ 'trajectory) discarded for decorrelation.'.format(name, n, n/n0))
+
+ return res
+
+
+def cut_tails(traj, cut, verbose=False, name=None):
+ traj = np.array(traj)
+ dc = 100 * cut
+ if traj.ndim == 1:
+ tmax = stats.scoreatpercentile(traj, 100 - dc)
+ tmin = stats.scoreatpercentile(traj, dc)
+ t = traj[(tmin <= traj) * (traj <= tmax)]
+ n0 = traj.size
+ n = t.size
+ elif traj.ndim == 2:
+ tmax = stats.scoreatpercentile(traj, 100 - dc, axis=1)
+ tmin = stats.scoreatpercentile(traj, dc, axis=1)
+ t = traj[:,
+ (tmin[0] <= traj[0]) * (tmin[1] <= traj[1]) *
+ (tmax[0] >= traj[0]) * (tmax[1] >= traj[1])]
+ n0 = traj.shape[1]
+ n = t.shape[1]
+ else:
+ raise NotImplementedError('trajectory.cut_tails() is not implemented for '
+ 'trajectories with more than 2 dimension.')
+
+ if verbose:
+ if not name:
+ name = 'Trajectory'
+ print('{:s} tails (cut = {:.2%}): {:n} frames ({:.2%} of trajectory) were cut'.format(
+ name, cut, n0 - n, (n0-n)/n0))
+
+ return t
+
+
+def prepare(traj, cut=None, facs=None, verbosity=1, name=None):
+ traj = np.array(traj)
+ if not name:
+ name = 'Trajectory'
+
+ def traj_length(t):
+ if t.ndim == 1:
+ return t.size
+ else:
+ return t.shape[1]
+
+ if traj.ndim > 2:
+ raise NotImplementedError('trajectory.prepare() is not implemented for '
+ 'trajectories with more than 2 dimensions.')
+
+ # original length
+ n0 = traj_length(traj)
+ # equilibrate
+ res = equilibrate(traj, verbose=False)
+ n1 = traj_length(res)
+ if verbosity > 2:
+ print('{:s} equilibration: First {:d} frames ({:.1%} of '
+ 'trajectory) discarded for burn-in.'.format(name, n0 - n1, (n0 - n1) / n0))
+ # decorrelate
+ res = decorrelate(res, facs=facs, verbose=False)
+ n2 = traj_length(res)
+ if verbosity > 2:
+ print('{:s} decorrelation: {:d} frames ({:.1%} of equilibrated '
+ 'trajectory) discarded for decorrelation.'.format(name, n1 - n2, (n1 - n2)/n1))
+ # cut tails
+ if cut is not None:
+ res = cut_tails(res, cut, verbose=False)
+ n3 = traj_length(res)
+ if verbosity > 2:
+ print('{:s} tails (cut = {:.2%}): {:n} frames ({:.2%} of equilibrated and '
+ 'decorrelated trajectory) were cut'.format(name, cut, n2 - n3, (n2 - n3)/n2))
+ # end length
+ nn = traj_length(res)
+
+ if verbosity > 0:
+ print('After equilibration, decorrelation and tail pruning, {:.2%} ({:n} frames) '
+ 'of original {:s} remain.'.format(nn/n0, nn, name))
+
+ return res
+
+
+def overlap(traj1, traj2, cut=None, verbose=False, name=None):
+ traj1 = np.array(traj1)
+ traj2 = np.array(traj2)
+ if traj1.ndim == traj2.ndim and traj2.ndim == 1:
+ if cut:
+ dc = 100 * cut
+ max1 = stats.scoreatpercentile(traj1, 100 - dc)
+ min1 = stats.scoreatpercentile(traj1, dc)
+ max2 = stats.scoreatpercentile(traj2, 100 - dc)
+ min2 = stats.scoreatpercentile(traj2, dc)
+ else:
+ max1 = traj1.max()
+ min1 = traj1.min()
+ max2 = traj2.max()
+ min2 = traj2.min()
+
+ tmin = max(min1, min2)
+ tmax = min(max1, max2)
+
+ t1 = traj1[(tmin <= traj1) * (traj1 <= tmax)]
+ t2 = traj2[(tmin <= traj2) * (traj2 <= tmax)]
+ elif traj1.ndim == traj2.ndim and traj2.ndim == 2:
+ if traj1.shape[0] != 2 or traj2.shape[0] != 2:
+ raise NotImplementedError('trajectory.overlap() in 2 dimensions is only '
+ 'implemented for exactly two timeseries per trajectory.')
+ if cut:
+ dc = 100 * cut
+ max1 = stats.scoreatpercentile(traj1, 100 - dc, axis=1)
+ min1 = stats.scoreatpercentile(traj1, dc, axis=1)
+ max2 = stats.scoreatpercentile(traj2, 100 - dc, axis=1)
+ min2 = stats.scoreatpercentile(traj2, dc, axis=1)
+ else:
+ max1 = traj1.max(axis=1)
+ min1 = traj1.min(axis=1)
+ max2 = traj2.max(axis=1)
+ min2 = traj2.min(axis=1)
+
+ tmin = np.max([min1, min2], axis=0)
+ tmax = np.min([max1, max2], axis=0)
+
+ t1 = traj1[:,
+ (tmin[0] <= traj1[0]) * (tmin[1] <= traj1[1]) *
+ (tmax[0] >= traj1[0]) * (tmax[1] >= traj1[1])]
+ t2 = traj2[:,
+ (tmin[0] <= traj2[0]) * (tmin[1] <= traj2[1]) *
+ (tmax[0] >= traj2[0]) * (tmax[1] >= traj2[1])]
+ elif traj1.ndim != traj2.ndim:
+ raise pv_error.InputError(['traj1', 'traj2'],
+ 'Trajectories don\'t have the same number of dimensions')
+ else:
+ raise NotImplementedError('trajectory.overlap() is not implemented for '
+ 'trajectories with more than 2 dimensions.')
+
+ if np.any(max1 < min2) or np.any(max2 < min1):
+ if verbose:
+ if not name:
+ name = 'Trajectory'
+ print('{:s} overlap: No overlap found between trajectories'.format(name))
+ return np.array([]), np.array([]), None, None
+
+ if verbose:
+ if not name:
+ name = 'Trajectory'
+ print('{:s} overlap: {:.1%} of trajectory 1, and {:.1%} of trajectory 2 '
+ 'were found within overlap region.\n'
+ ' That corresponds to {:n} frames and {:n} frames, '
+ 'respectively'.format(name, len(traj1)/len(t1), len(traj2)/len(t2),
+ len(t1), len(t2)))
+
+ return t1, t2, tmin, tmax
[
{
+ "name": "int_water_md_verlet_none_pme_pme",
"dir": "int_water_md_verlet_none_pme_pme",
"tests": [
{"test": "integrator", "args": "-n 3 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "int_water_md_verlet_none_pme_switch",
"dir": "int_water_md_verlet_none_pme_switch",
"tests": [
{"test": "integrator", "args": "-n 3 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "int_water_md-vv_verlet_none_pme_pme",
"dir": "int_water_md-vv_verlet_none_pme_pme",
"tests": [
{"test": "integrator", "args": "-n 2 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "int_water_md-vv_verlet_none_pme_switch",
"dir": "int_water_md-vv_verlet_none_pme_switch",
"tests": [
{"test": "integrator", "args": "-n 2 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "ens_argon_md_verlet_pme_vr",
"dir": "ens_argon_md_verlet_pme_vr",
"tests": [
- {"test": "ensemble", "args": "--dtemp 5"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "-t 4 --dtemp 6"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
},
{
+ "name": "ens_argon_md_verlet_pme_vr_pr",
"dir": "ens_argon_md_verlet_pme_vr_pr",
"tests": [
- {"test": "ensemble", "args": "--dtemp 5 --dpress 100"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "-t 4 --dtemp 3 0 3 --dpress 0 75 75"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
},
{
+ "name": "ens_argon_md-vv_verlet_pme_nh_mttk",
"dir": "ens_argon_md-vv_verlet_pme_nh_mttk",
"tests": [
- {"test": "ensemble", "args": "--dtemp 5 --dpress 100"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "-t 4 --dtemp 3 0 3 --dpress 0 75 75"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
},
{
+ "name": "ens_argon_md-vv_verlet_pme_vr",
"dir": "ens_argon_md-vv_verlet_pme_vr",
"tests": [
- {"test": "ensemble", "args": "--dtemp 5"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "-t 4 --dtemp 6"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
},
{
+ "name": "ens_water_md_verlet_settle_pme_vr",
"dir": "ens_water_md_verlet_settle_pme_vr",
"tests": [
- {"test": "ensemble", "args": "--dtemp 10"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "-t 4 --dtemp 9"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
},
{
+ "name": "ens_water_md_verlet_settle_pme_vr_pr",
"dir": "ens_water_md_verlet_settle_pme_vr_pr",
"tests": [
- {"test": "ensemble", "args": "--dtemp 10 --dpress 100"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "-t 4 --dtemp 8 0 8 --dpress 0 300 300"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
},
{
+ "name": "ens_water_md-vv_verlet_none_pme_nh_mttk",
"dir": "ens_water_md-vv_verlet_none_pme_nh_mttk",
"tests": [
- {"test": "ensemble", "args": "--dtemp 10 --dpress 100"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "-t 4 --dtemp 8 0 8 --dpress 0 350 350"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
},
{
+ "name": "ens_water_md-vv_verlet_settle_pme_vr",
"dir": "ens_water_md-vv_verlet_settle_pme_vr",
"tests": [
- {"test": "ensemble", "args": "--dtemp 10"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "-t 4 --dtemp 9"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
}
]
integrator = md-vv
vdwtype = cut-off
vdw-modifier = force-switch
+rvdw-switch = 0.8
integrator = md
vdwtype = cut-off
vdw-modifier = force-switch
+rvdw-switch = 0.8
[
{
+ "name": "int_argon_md-vv_verlet_pme",
"dir": "int_argon_md-vv_verlet_pme",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
]
},
{
+ "name": "int_argon_md-vv_verlet_switch",
"dir": "int_argon_md-vv_verlet_switch",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
]
},
{
+ "name": "int_argon_md_verlet_pme",
"dir": "int_argon_md_verlet_pme",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
]
},
{
+ "name": "int_argon_md_verlet_switch",
"dir": "int_argon_md_verlet_switch",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
]
},
{
+ "name": "int_water_md_verlet_lincs_pme_pme",
"dir": "int_water_md_verlet_lincs_pme_pme",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "int_water_md_verlet_lincs_pme_switch",
"dir": "int_water_md_verlet_lincs_pme_switch",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "int_water_md_verlet_none_pme_pme",
"dir": "int_water_md_verlet_none_pme_pme",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "int_water_md_verlet_none_pme_switch",
"dir": "int_water_md_verlet_none_pme_switch",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "int_water_md_verlet_settle_pme_pme",
"dir": "int_water_md_verlet_settle_pme_pme",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "int_water_md_verlet_settle_pme_switch",
"dir": "int_water_md_verlet_settle_pme_switch",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "int_water_md-vv_verlet_lincs_pme_pme",
"dir": "int_water_md-vv_verlet_lincs_pme_pme",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "int_water_md-vv_verlet_lincs_pme_switch",
"dir": "int_water_md-vv_verlet_lincs_pme_switch",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "int_water_md-vv_verlet_none_pme_pme",
"dir": "int_water_md-vv_verlet_none_pme_pme",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "int_water_md-vv_verlet_none_pme_switch",
"dir": "int_water_md-vv_verlet_none_pme_switch",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "int_water_md-vv_verlet_settle_pme_pme",
"dir": "int_water_md-vv_verlet_settle_pme_pme",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "int_water_md-vv_verlet_settle_pme_switch",
"dir": "int_water_md-vv_verlet_settle_pme_switch",
"tests": [
{"test": "integrator", "args": "-n 5 -t 0.25"}
- ],
- "grompp_args": "-maxwarn 5"
+ ]
},
{
+ "name": "ens_argon_md_verlet_pme_vr",
"dir": "ens_argon_md_verlet_pme_vr",
"tests": [
- {"test": "ensemble", "args": "--dtemp 5"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "--dtemp 6"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
},
{
+ "name": "ens_argon_md_verlet_pme_vr_pr",
"dir": "ens_argon_md_verlet_pme_vr_pr",
"tests": [
- {"test": "ensemble", "args": "--dtemp 5 --dpress 100"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "--dtemp 3 0 3 --dpress 0 75 75"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
},
{
+ "name": "ens_argon_md-vv_verlet_pme_nh_mttk",
"dir": "ens_argon_md-vv_verlet_pme_nh_mttk",
"tests": [
- {"test": "ensemble", "args": "--dtemp 5 --dpress 100"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "--dtemp 3 0 3 --dpress 0 75 75"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
},
{
+ "name": "ens_argon_md-vv_verlet_pme_vr",
"dir": "ens_argon_md-vv_verlet_pme_vr",
"tests": [
- {"test": "ensemble", "args": "--dtemp 5"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "--dtemp 6"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
},
{
+ "name": "ens_water_md_verlet_settle_pme_vr",
"dir": "ens_water_md_verlet_settle_pme_vr",
"tests": [
- {"test": "ensemble", "args": "--dtemp 10"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "--dtemp 9"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
},
{
+ "name": "ens_water_md_verlet_settle_pme_vr_pr",
"dir": "ens_water_md_verlet_settle_pme_vr_pr",
"tests": [
- {"test": "ensemble", "args": "--dtemp 10 --dpress 100"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "--dtemp 8 0 8 --dpress 0 300 300"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
},
{
+ "name": "ens_water_md-vv_verlet_none_pme_nh_mttk",
"dir": "ens_water_md-vv_verlet_none_pme_nh_mttk",
"tests": [
- {"test": "ensemble", "args": "--dtemp 10 --dpress 100"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "--dtemp 8 0 8 --dpress 0 350 350"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
},
{
+ "name": "ens_water_md-vv_verlet_settle_pme_vr",
"dir": "ens_water_md-vv_verlet_settle_pme_vr",
"tests": [
- {"test": "ensemble", "args": "--dtemp 10"},
- {"test": "kin_mb", "args": "-t 0.05"}
- ],
- "grompp_args": "-maxwarn 5"
+ {"test": "ensemble", "args": "--dtemp 9"},
+ {"test": "kin_mb", "args": "-t 0.01"}
+ ]
}
]