ERROR_QUIET)
if (RESULT_VAR EQUAL 0)
string(STRIP "${OUTPUT_VAR_TEMP}" OUTPUT_VAR)
- set(${RESULT} ${OUTPUT_VAR_TEMP} CACHE INTERNAL "Result of test for number of AVX-512 FMA units")
+ set(${RESULT} ${OUTPUT_VAR} CACHE INTERNAL "Result of test for number of AVX-512 FMA units")
else()
message(STATUS "Could not identify number of AVX-512 units - detection program did run successfully")
set(${RESULT} -1 CACHE INTERNAL "Result of test for number of AVX-512 FMA units")
# code. Set the CMake variable GMX_NVCC_WORKS on if you want to
# bypass this check.
if((_cuda_nvcc_executable_or_flags_changed OR CUDA_HOST_COMPILER_CHANGED OR NOT GMX_NVCC_WORKS) AND NOT WIN32)
- message(STATUS "Check for working NVCC/C compiler combination")
+ message(STATUS "Check for working NVCC/C++ compiler combination with nvcc '${CUDA_NVCC_EXECUTABLE}'")
execute_process(COMMAND ${CUDA_NVCC_EXECUTABLE} -ccbin ${CUDA_HOST_COMPILER} -c ${CUDA_NVCC_FLAGS} ${CUDA_NVCC_FLAGS_${_build_type}} ${CMAKE_SOURCE_DIR}/cmake/TestCUDA.cu
RESULT_VARIABLE _cuda_test_res
OUTPUT_VARIABLE _cuda_test_out
message(STATUS "${CUDA_NVCC_EXECUTABLE} standard output: '${_cuda_test_out}'")
message(STATUS "${CUDA_NVCC_EXECUTABLE} standard error: '${_cuda_test_err}'")
if(${_cuda_test_err} MATCHES "nsupported")
- message(FATAL_ERROR "NVCC/C compiler combination does not seem to be supported. CUDA frequently does not support the latest versions of the host compiler, so you might want to try an earlier C/C++ compiler version and make sure your CUDA compiler and driver are as recent as possible.")
+ message(FATAL_ERROR "NVCC/C++ compiler combination does not seem to be supported. CUDA frequently does not support the latest versions of the host compiler, so you might want to try an earlier C++ compiler version and make sure your CUDA compiler and driver are as recent as possible.")
else()
message(FATAL_ERROR "CUDA compiler does not seem to be functional.")
endif()
elseif(NOT GMX_CUDA_TEST_COMPILER_QUIETLY)
- message(STATUS "Check for working NVCC/C compiler combination - works")
+ message(STATUS "Check for working NVCC/C++ compiler combination - works")
set(GMX_NVCC_WORKS TRUE CACHE INTERNAL "Nvcc can compile a trivial test program")
endif()
endif() # GMX_CHECK_NVCC
release-notes/2020/major/deprecated-functionality.rst
release-notes/2020/major/portability.rst
release-notes/2020/major/miscellaneous.rst
+ release-notes/2019/2019.4.rst
+ release-notes/2019/2019.3.rst
release-notes/2019/2019.2.rst
release-notes/2019/2019.1.rst
release-notes/2019/major/highlights.rst
release-notes/2019/major/deprecated-functionality.rst
release-notes/2019/major/portability.rst
release-notes/2019/major/miscellaneous.rst
+ release-notes/2018/2018.7.rst
release-notes/2018/2018.6.rst
release-notes/2018/2018.5.rst
release-notes/2018/2018.4.rst
else()
set(MANUAL_BUILD_IS_POSSIBLE OFF)
- set(MANUAL_BUILD_NOT_POSSIBLE_REASON "Sphinx version ${EXPECTED_SPHINX_VERSION} is not available")
+ set(MANUAL_BUILD_NOT_POSSIBLE_REASON "Sphinx expected minimum version ${EXPECTED_SPHINX_VERSION} is not available")
add_custom_target(webpage-sphinx
COMMAND ${CMAKE_COMMAND} -E echo
- "HTML pages cannot be built because Sphinx version ${EXPECTED_SPHINX_VERSION} is not available"
+ "HTML pages cannot be built because Sphinx expected minimum version ${EXPECTED_SPHINX_VERSION} is not available"
VERBATIM)
add_custom_target(install-guide
COMMAND ${CMAKE_COMMAND} -E echo
- "INSTALL cannot be built because Sphinx version ${EXPECTED_SPHINX_VERSION} is not available"
+ "INSTALL cannot be built because Sphinx expected minimum version ${EXPECTED_SPHINX_VERSION} is not available"
VERBATIM)
add_custom_target(man
COMMAND ${CMAKE_COMMAND} -E echo
- "man pages cannot be built because Sphinx version ${EXPECTED_SPHINX_VERSION} is not available"
+ "man pages cannot be built because Sphinx expected minimum version ${EXPECTED_SPHINX_VERSION} is not available"
VERBATIM)
add_custom_target(sphinx-create-texman
COMMAND ${CMAKE_COMMAND} -E echo
- "Cannot prepare LaTeX input files because Sphinx version ${EXPECTED_SPHINX_VERSION} is not available"
+ "Cannot prepare LaTeX input files because Sphinx expected minimum version ${EXPECTED_SPHINX_VERSION} is not available"
VERBATIM)
+ add_custom_target(manual
+ COMMAND ${CMAKE_COMMAND} -E echo
+ "manual cannot be built because Sphinx expected minimum version ${EXPECTED_SPHINX_VERSION} is not available")
endif()
if (MAN_PAGE_DIR)
elseif (NOT SPHINX_FOUND)
# Hardly anything gets built if Sphinx is not available, so don't bother.
set(HTML_BUILD_IS_POSSIBLE OFF)
- set(HTML_BUILD_NOT_POSSIBLE_REASON "Sphinx version ${EXPECTED_SPHINX_VERSION} is required")
+ set(HTML_BUILD_NOT_POSSIBLE_REASON "Sphinx expected minimum version ${EXPECTED_SPHINX_VERSION} is required")
endif()
if (NOT MANUAL_BUILD_IS_POSSIBLE)
list(APPEND HTML_BUILD_WARNINGS
library. LAM-MPI_ might work, but since it has
been deprecated for years, it is not supported.
+For example, depending on your actual MPI library, use ``cmake
+-DCMAKE_C_COMPILER=mpicc -DCMAKE_CXX_COMPILER=mpicxx -DGMX_MPI=on``.
+
+
CMake
^^^^^
.. math:: \begin{aligned}
\exp(iL{\Delta t}) &\approx& \exp\left(iL_{\mathrm{NHC}}{\Delta t}/2\right) \exp\left(iL_2 {\Delta t}/2\right) \nonumber \\
&&\exp\left(iL_1 {\Delta t}\right) \exp\left(iL_2 {\Delta t}/2\right) \exp\left(iL_{\mathrm{NHC}}{\Delta t}/2\right).\end{aligned}
- :label:eqnVVNHTrotter
+ :label: eqnVVNHTrotter
If the Nosé-Hoover chain is sufficiently slow with respect to the
motions of the system, we can write an alternate integrator over
applied to a special fixed list of atoms. Such a list is usually
generated by the :ref:`pdb2gmx <gmx pdb2gmx>` program.
+.. _reference-manual-position-restraints:
+
+Note that position restraints make the potential dependent on absolute
+coordinates in space. Therefore, in general the pressure (and virial)
+is not well defined, as the pressure is the derivative of the free-energy
+of the system with respect to the volume. When the reference coordinates
+are scaled along with the system, which can be selected with the mdp option
+:mdp-value:`refcoord-scaling=all`, the pressure and virial are well defined.
+
Flat-bottomed position restraints
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. math:: V_{dihr}(\phi') ~=~ \left\{
\begin{array}{lcllll}
- {\frac{1}{2}}k_{dihr}(\phi'-\phi_0-\Delta\phi)^2
- &\mbox{for}& \phi' & > & \Delta\phi \\[1.5ex]
- 0 &\mbox{for}& \phi' & \le & \Delta\phi \\[1.5ex]
+ {\frac{1}{2}}k_{dihr}(\phi'-\Delta\phi)^2
+ &\mbox{for}& \|\phi'\| & > & \Delta\phi \\[1.5ex]
+ 0 &\mbox{for}& \|\phi'\| & \le & \Delta\phi \\[1.5ex]
\end{array}\right.
:label: eqndihre
</div>
+.. raw:: html
+
+ <div id="ref-GroenhofEwaldArtefact">
+
+.. _refGroenhofEwaldArtefact:
+
+:sup:`181` Hub, J. S., de Groot, B. L., Grubmüller, H., Groenhof, G.,
+"Quantifying artifacts in Ewald simulations of inhomogeneous systems with a net charge,"
+*J. Chem. Theory Comput.*, **10**, 381–390 (2014).
+
+.. raw:: html
+
+ </div>
+
+
.. raw:: html
</div>
GROMACS 2018.6 release notes
----------------------------
-This version was released on TODO, 2019. These release notes document
+This version was released on February 22, 2019. These release notes document
the changes that have taken place in GROMACS since version 2018.5, to fix known
-issues. It also incorporates all fixes made in version TODO and
-earlier, which you can find described in the :ref:`release-notes`.
+issues. It also incorporates all fixes made in previous versions,
+which you can find described in the :ref:`release-notes`.
Fixes where mdrun could behave incorrectly
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Correct free-energy Delta H output with mass lambda's
+"""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+When separate lambda parameters were used for perturbed mass
+free-energy contributions, these contributions were double counted
+in the Delta H output used for BAR calculations. Note that dH/dlambda
+was always correct
+
+:issue:`2703`
+:issue:`2849`
+
+.. _release-notes-2018-6-gpu:
+
+Fix incorrect LJ repulsion force switching on GPUs
+""""""""""""""""""""""""""""""""""""""""""""""""""
+
+When using a CUDA or OpenCL GPU, the coefficient for the second order
+term for the LJ repulsion in the force switching function, called 'A'
+in the manual, had the wrong sign. This lead to very small errors in
+the forces and the pressure. Note that the dispersion force switching
+was correct. The effects of this bug on any physical results seems to
+be negligible. Note that force switching is usually only used in
+combination with the CHARMM force field.
+
+:issue:`2845`
+
Fixes for ``gmx`` tools
^^^^^^^^^^^^^^^^^^^^^^^
Fixes to improve portability
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Fix compiler flags for Power8
+""""""""""""""""""""""""""""""""""""""""""""""""""
+
+A compiler flag for Power8 processors lead to errors in the code and was removed.
+
+:issue:`2747`
+:issue:`2746`
+:issue:`2734`
+
Miscellaneous
^^^^^^^^^^^^^
--- /dev/null
+GROMACS 2018.7 release notes
+----------------------------
+
+This version was released on May 29, 2019. These release notes document
+the changes that have taken place in GROMACS since version 2018.6, to fix known
+issues. It also incorporates all fixes made in previous versions,
+which you can find described in the :ref:`release-notes`.
+
+Fixes where mdrun could behave incorrectly
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Reverted broken change made in 2018.1
+"""""""""""""""""""""""""""""""""""""""""""""""""
+
+Reverted a change made in 2018.1 that broke simulations that used the
+SHAKE constraint algorithm.
+
+:issue:`2879`
+
+Work around gcc 7 AVX512 compiler bug
+"""""""""""""""""""""""""""""""""""""""
+
+With gcc version 7 a compiler bug caused a large part of non-bonded
+interactions to be ignored when compiled with AVX512 and running on more
+than 16 OpenMP threads.
+
+:issue:`2762`
+
+Fixes for ``gmx`` tools
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Fixes to improve portability
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Miscellaneous
+^^^^^^^^^^^^^
+
+Updated release notes for 2018.6
+""""""""""""""""""""""""""""""""
+
+A :ref:`fix <release-notes-2018-6-gpu>` made to GPU kernels in 2018.6 was
+thought to resolve :issue:`2845` but further investigation suggests that
+the real cause is not yet known.
+
:issue:`2830`
+.. _release-notes-2019-1-gpu:
+
Fix incorrect LJ repulsion force switching on GPUs
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
term for the LJ repulsion in the force switching function, called 'A'
in the manual, had the wrong sign. This lead to very small errors in
the forces and the pressure. Note that the dispersion force switching
-was correct. Although the effect on individual atoms pairs was negligible,
-their combined effect on the pressure could lead to deformation of
-CHARMM membrane systems, where LJ force switching is regularly applied.
+was correct. The effects of this bug on any physical results seems to
+be negligible. Note that force switching is usually only used in
+combination with the CHARMM force field.
:issue:`2845`
GROMACS 2019.2 release notes
----------------------------
-This version was released on TODO, 2019. These release notes
+This version was released on April 16th, 2019. These release notes
document the changes that have taken place in GROMACS since the
-initial version 2019.1, to fix known issues. It also incorporates all
-fixes made in version 2018.5 and earlier, which you can find described
+previous 2019.1 version, to fix known issues. It also incorporates all
+fixes made in version 2018.6 and earlier, which you can find described
in the :ref:`release-notes`.
.. Note to developers!
Fixes where mdrun could behave incorrectly
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Fix L-BGFS minimizer
+""""""""""""""""""""""""""""""""""""""""""""""""
+
+The minimizer could fail on a number of systems.
+
+:issue:`2641`
+
+Disallow pull geometry direction-periodic with AWH
+""""""""""""""""""""""""""""""""""""""""""""""""""
+
+This could lead to incorrect behavior or a cryptic error message.
+
+:issue:`2923`
+
+Fixed mdrun -nsteps option
+""""""""""""""""""""""""""
+
+Fixed that the, deprecated, mdrun option -nsteps only allowed extension
+of the simulation under certain conditions.
+
+:issue:`2881`
+
Fixes for ``gmx`` tools
^^^^^^^^^^^^^^^^^^^^^^^
+gmx cluster -clndx indices now correct
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+The reported indices of trajectory frames in clusters were
+too small by one.
+
+:issue:`2926`
+
+gmx editconf -f in.pdb -o out.pdb again preserves chain IDs
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+This had been inadvertently broken and is now fixed.
+
+:issue:`2900`
+
+
+Tools again accept .tpr files as input
+"""""""""""""""""""""""""""""""""""""""
+
+The pdb2gmx, solvate, and insert-molecules tools could no longer
+accept input configurations contained in .tpr format files. This
+is now fixed.
+
+:issue:`2900`
+
+Fix segmentation fault when preparing simulated annealing inputs
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+grompp was unable to prepare tpr files for inputs containing simulated annealing
+procedures. The code has been fixed to allow the generation of those files again.
+
+:issue:`2871`
+
Fixes that affect portability
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Fix error in AVX 512 detection code
+"""""""""""""""""""""""""""""""""""
+
+The CMake detection code had a typo that could lead to wrong detection results.
+
Miscellaneous
^^^^^^^^^^^^^
:issue:`2884`
+Prevented internal build of FFTW with clang and AVX-512 SIMD
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+Prevented the internal build of FFTW with clang from attempting to
+configure FFTW to compile with AVX-512 support. That SIMD level is not
+supported by FFTW with the clang compiler, and compilation fails.
+
+:issue:`2892`
+
+Updated performance guide for recent Intel processors with AVX512 instruction support
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+Noted the tradeoffs between CPU frequency and SIMD throughput and advising users to
+prefer AVX2 over AVX512 in GPU-offload or highly parallel MPI cases.
+
+Updated release notes for 2019.1
+""""""""""""""""""""""""""""""""
+
+A :ref:`fix <release-notes-2019-1-gpu>` made to GPU kernels in 2019.1 was
+thought to resolve :issue:`2845` but further investigation suggests that
+the real cause is not yet known.
+
--- /dev/null
+GROMACS 2019.3 release notes
+----------------------------
+
+This version was released on June 14, 2019. These release notes
+document the changes that have taken place in GROMACS since the
+previous 2019.2 version, to fix known issues. It also incorporates all
+fixes made in version 2018.7 and earlier, which you can find described
+in the :ref:`release-notes`.
+
+.. Note to developers!
+ Please use """"""" to underline the individual entries for fixed issues in the subfolders,
+ otherwise the formatting on the webpage is messed up.
+ Also, please use the syntax :issue:`number` to reference issues on redmine, without the
+ a space between the colon and number!
+
+Fixes where mdrun could behave incorrectly
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Fix missing interactions with domain decomposition
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+When running with domain decomposition, any interactions described by
+the rarely-used topology file section
+``[ intermolecular_interactions ]`` were ignored. This did not
+affect normal non-bonded or intra-molecular interactions.
+
+:issue:`2953`
+
+Fix possible floating point exception during minimization.
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+It was possible that very small forces during minimization could lead to
+a crash due to a divide by zero error. Fixed by introducing a check.
+
+:issue:`2917`
+
+Fix segmentation fault when using membrane embedding
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+:issue:`2947`
+
+Allow AWH with pull-geometry 'direction' to be periodic
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+When applying AWH to a pull coordinate with geometry 'direction'
+with a AWH interval length of more than 95% of the box size,
+the dimension is now made periodic.
+
+:issue:`2946`
+
+Fixes for ``gmx`` tools
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Fixed residue and molecule indexing in selections
+"""""""""""""""""""""""""""""""""""""""""""""""""
+
+:issue:`2951`
+
+Fix PQR formatting
+""""""""""""""""""""
+
+The formatting was incorrect for some tools that use PQR files.
+
+:issue:`2955`
+
+Fix gmx wham with angle geometries
+""""""""""""""""""""""""""""""""""
+
+gmx wham would mix up degree and radian units leading to no overlap
+or not-a-number output.
+
+:issue:`2609`
+
+Add some information for grompp error with wrong line endings
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+Give meaningful error with too large grid in hbond
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+When using a grid that is too large :ref:`hbond <gmx hbond>` could try to
+allocate enough memory to cause a crash.
+
+:issue:`2962`
+
+Add some information for syntax errors with include delimiters in grompp
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+:issue:`2911`
+
+Fixes that affect portability
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Fixed wider reference SIMD setups
+"""""""""""""""""""""""""""""""""
+
+The reference SIMD builds could use a too small memory alignment,
+leading to mdrun exiting with an alignment error
+
+:issue:`2952`
+
+Fixed build failure with Apple Clang
+""""""""""""""""""""""""""""""""""""
+
+Builds would fail because of qsort being undefined.
+
+Miscellaneous
+^^^^^^^^^^^^^
+
+Removed non-existent mdp option awh1-dim1-period from user guide
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+:issue:`2940`
+
+Add checks for too many interactions during memory allocation
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+:issue:`2932`
+
--- /dev/null
+GROMACS 2019.4 release notes
+----------------------------
+
+This version was released on TODO, 2019. These release notes
+document the changes that have taken place in GROMACS since the
+previous 2019.3 version, to fix known issues. It also incorporates all
+fixes made in version 2018.7 and earlier, which you can find described
+in the :ref:`release-notes`.
+
+.. Note to developers!
+ Please use """"""" to underline the individual entries for fixed issues in the subfolders,
+ otherwise the formatting on the webpage is messed up.
+ Also, please use the syntax :issue:`number` to reference issues on redmine, without the
+ a space between the colon and number!
+
+Fixes where mdrun could behave incorrectly
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Fix segmentation fault in grompp and mdrun with cosine COM pulling
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+:issue:`3023`
+
+
+Fixes for ``gmx`` tools
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Fix bug in gmx xpm2ps
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+The tool would fail when not being provided with a library file to read in.
+
+:issue:`3012`
+
+
+Fix bug in gmx anaeig
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+An issue was noted when reading a second set
+set of eigenvectors that could lead to problems when the number
+of eigenvectors was less than the three times the number of atoms.
+
+:issue:`2972`
+
+Fixes that affect portability
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Disable PME OpenCL on Apple
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+The Apple OpenCL compilers fail to produce a functional clFFT build.
+The OpenCL PME support is therefore disabled on Apple platforms.
+
+:issue:`2941`
+
+Miscellaneous
+^^^^^^^^^^^^^
+
.. toctree::
:maxdepth: 1
+ 2019/2019.4
+ 2019/2019.3
2019/2019.2
2019/2019.1
.. toctree::
:maxdepth: 1
+ 2018/2018.7
2018/2018.6
2018/2018.5
2018/2018.4
The reference coordinates for position restraints are not
modified. Note that with this option the virial and pressure
- will depend on the absolute positions of the reference
- coordinates.
+ might be ill defined, see :ref:`here <reference-manual-position-restraints>`
+ for more details.
.. mdp-value:: all
one COM is used, even when there are multiple molecules with
position restraints. For calculating the COM of the reference
coordinates in the starting configuration, periodic boundary
- conditions are not taken into account.
+ conditions are not taken into account. Note that with this option
+ the virial and pressure might be ill defined, see
+ :ref:`here <reference-manual-position-restraints>` for more details.
Simulated annealing
multidimensional and is defined by mapping each dimension to a pull coordinate index.
This is only allowed if :mdp-value:`pull-coord1-type=external-potential` and
:mdp:`pull-coord1-potential-provider` = ``awh`` for the concerned pull coordinate
- indices.
+ indices. Pull geometry 'direction-periodic' is not supported by AWH.
.. mdp:: awh-potential
(0.0) [nm] or [rad]
Start value of the sampling interval along this dimension. The range of allowed
values depends on the relevant pull geometry (see :mdp:`pull-coord1-geometry`).
- For periodic geometries :mdp:`awh1-dim1-start` greater than :mdp:`awh1-dim1-end`
+ For dihedral geometries :mdp:`awh1-dim1-start` greater than :mdp:`awh1-dim1-end`
is allowed. The interval will then wrap around from +period/2 to -period/2.
+ For the direction geometry, the dimension is made periodic when
+ the direction is along a box vector and covers more than 95%
+ of the box length. Note that one should not apply pressure coupling
+ along a periodic dimension.
.. mdp:: awh1-dim1-end
(0.0) [nm] or [rad]
End value defining the sampling interval together with :mdp:`awh1-dim1-start`.
-.. mdp:: awh1-dim1-period
-
- (0.0) [nm] or [rad]
- The period of this reaction coordinate, use 0 when the coordinate is not periodic.
-
.. mdp:: awh1-dim1-diffusion
(10\ :sup:`-5`) [nm\ :sup:`2`/ps] or [rad\ :sup:`2`/ps]
There are multiple parallelization schemes available, therefore a simulation can be run on a
given hardware with different choices of run configuration.
-Core level parallelization via SIMD: SSE, AVX, etc.
+.. _intra-core-parallelization:
+
+Intra-core parallelization via SIMD: SSE, AVX, etc.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
One level of performance improvement available in |Gromacs| is through the use of
By default, the build system will detect the highest supported
acceleration of the host where the compilation is carried out. For cross-compiling for
a machine with a different highest SIMD instructions set, in order to set the target acceleration,
-the ``-DGMX_SIMD`` CMake option can be used. For best performance always pick the highest
-(latest) SIMD instruction set supported by the target architecture (and |Gromacs|). To use a single
+the ``-DGMX_SIMD`` CMake option can be used.
+To use a single
installation on multiple different machines, it is convenient to compile the analysis tools with
the lowest common SIMD instruction set (as these rely little on SIMD acceleration), but for best
-performance :ref:`mdrun <gmx mdrun>` should be compiled separately for each machine.
+performance :ref:`mdrun <gmx mdrun>` should be compiled be compiled separately with the
+highest (latest) ``native`` SIMD instruction set of the target architecture (supported by |Gromacs|).
-.. TODO add a note on AVX throttle and its impact on MPI-parallel and GPU accelerated runs
+Recent Intel CPU architectures bring tradeoffs between the maximum clock frequency of the
+CPU (ie. its speed), and the width of the SIMD instructions it executes (ie its throughput
+at a given speed). In particular, the Intel ``Skylake`` and ``Cascade Lake`` processors
+(e.g. Xeon SP Gold/Platinum), can offer better throughput when using narrower SIMD because
+of the better clock frequency available. Consider building :ref:`mdrun <gmx mdrun>`
+configured with ``GMX_SIMD=AVX2_256`` instead of ``GMX_SIMD=AVX512`` for better
+performance in GPU accelerated or highly parallel MPI runs.
Process(-or) level parallelization via OpenMP
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
default, 0, will start one thread on each available core.
Alternatively, :ref:`mdrun <gmx mdrun>` will honor the appropriate system
environment variable (e.g. ``OMP_NUM_THREADS``) if set.
+ Note that the maximum number of OpenMP threads (per rank) is,
+ for efficiency reasons, limited to 64. While it is rarely beneficial to use
+ a number of threads higher than this, the GMX_OPENMP_MAX_THREADS CMake variable
+ can be used to increase the limit.
``-npme``
The total number of ranks to dedicate to the long-ranged
**Please note again the limitations outlined below!**
-- Only compilation with CUDA is supported.
+- PME GPU offload is supported on NVIDIA hardware with CUDA and AMD hardware with OpenCL.
- Only a PME order of 4 is supported on GPUs.
* If you have GPUs that support either CUDA or OpenCL, use them.
* Configure with ``-DGMX_GPU=ON`` (add ``-DGMX_USE_OPENCL=ON`` for OpenCL).
- * For CUDA, use the newest CUDA availabe for your GPU to take advantage of the
+ * For CUDA, use the newest CUDA available for your GPU to take advantage of the
latest performance enhancements.
* Use a recent GPU driver.
+ * Make sure you use an :ref:`gmx mdrun` with ``GMX_SIMD`` appropriate for the CPU
+ architecture; the log file will contain a warning note if suboptimal setting is used.
+ However, prefer ``AVX2` over ``AVX512`` in GPU or highly parallel MPI runs (for more
+ information see the :ref:`intra-core parallelization information <intra-core-parallelization>`).
* If compiling on a cluster head node, make sure that ``GMX_SIMD``
is appropriate for the compute nodes.
Note for PME users: It is possible to use a uniform neutralizing background
charge in PME to compensate for a system with a net background charge.
This may however, especially for non-homogeneous systems, lead to unwanted artifacts, as
-shown in `Hub, J. S., de Groot, B. L., Grubmüller, H. & Groenhof, G. Quantifying
-artifacts in Ewald simulations of inhomogeneous systems with a net charge.
-*J. Chem. Theory Comput.* **10**, 381–390 (2014) <http://pubs.acs.org/doi/abs/10.1021/ct400626b>`.
-Nevertheless, it is a standard
-practice to actually add counter-ions to make the system net neutral.
+shown in \ :ref:`181 <refGroenhofEwaldArtefact>` (http://pubs.acs.org/doi/abs/10.1021/ct400626b).
+Nevertheless, it is a standard practice to actually add counter-ions to make the system net neutral.
Incorrect number of parameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
favourite text editor in concert with chapter 5 of the |Gromacs|
`Reference Manual`_. For the AMBER force fields, `antechamber
<http://amber.scripps.edu/antechamber/antechamber.html>`__ or
- `acpype <https://github.com/choderalab/mmtools/blob/master/converters/acpype.py>`__
+ `acpype <https://github.com/alanwilter/acpype>`__
might be appropriate.
6. Describe a simulation box (e.g. using :ref:`gmx editconf`) whose
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2018, by the GROMACS development team, led by
+ * Copyright (c) 2018,2019, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*/
void makeTprFile(int steps)
{
- runner_.useTopGroAndNdxFromDatabase("spc-and-methanol");
+ runner_.useTopGroAndNdxFromDatabase("spc_and_methane");
runner_.useStringAsMdpFile(gmx::formatString("integrator = md\n"
"cutoff-scheme = Verlet\n"
"nsteps = %d\n"
set(_fftw_simd_support_level --enable-sse2;--enable-avx;--enable-avx2)
elseif(${GMX_SIMD_ACTIVE} MATCHES "^(AVX_512)")
# MSVC, GCC < 4.9, Clang < 3.9 do not support AVX-512, so
- # we should not enable it.
+ # we should not enable it there. FFTW does not support clang with
+ # AVX-512, so we should not enable that either.
if(MSVC OR (CMAKE_COMPILER_IS_GNUCC AND CMAKE_C_COMPILER_VERSION VERSION_LESS 4.9.0) OR
- (CMAKE_C_COMPILER_ID MATCHES "Clang" AND CMAKE_C_COMPILER_VERSION VERSION_LESS 3.9.0))
+ (CMAKE_C_COMPILER_ID MATCHES "Clang" AND CMAKE_C_COMPILER_VERSION VERSION_LESS 3.9.0) OR
+ (CMAKE_C_COMPILER_ID MATCHES "Clang" AND ${GMX_SIMD_ACTIVE} MATCHES "^(AVX_512)"))
set(_fftw_simd_support_level --enable-sse2;--enable-avx;--enable-avx2)
else()
set(_fftw_simd_support_level --enable-sse2;--enable-avx;--enable-avx2;--enable-avx512)
# include <cstddef>
#endif
-#if defined(__GNUC__) && __GNUC__>=7
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && __GNUC__>=7
#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
#endif
# endif
#endif
-#if defined(__GNUC__) && __GNUC__>=7
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && __GNUC__>=7
#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
#endif
const AwhDimParams &awhDimParams = awhBiasParams.dimParams[d];
GMX_RELEASE_ASSERT(awhDimParams.eCoordProvider == eawhcoordproviderPULL, "Currently only the pull code is supported as coordinate provider");
const t_pull_coord &pullCoord = inputRecord.pull->coord[awhDimParams.coordIndex];
+ GMX_RELEASE_ASSERT(pullCoord.eGeom != epullgDIRPBC, "Pull geometry 'direction-periodic' is not supported by AWH");
double conversionFactor = pull_coordinate_is_angletype(&pullCoord) ? DEG2RAD : 1;
dimParams.emplace_back(conversionFactor, awhDimParams.forceConstant, beta);
/*! \brief
* Gets the period of a pull coordinate.
*
- * \param[in] pull_params Pull parameters.
- * \param[in] coord_ind Pull coordinate index.
- * \param[in] box Box vectors.
+ * \param[in] pullCoordParams The parameters for the pull coordinate.
+ * \param[in] pbc The PBC setup
+ * \param[in] intervalLength The length of the AWH interval for this pull coordinate
* \returns the period (or 0 if not periodic).
*/
-static double get_pull_coord_period(const pull_params_t *pull_params,
- int coord_ind,
- const matrix box)
+static double get_pull_coord_period(const t_pull_coord &pullCoordParams,
+ const t_pbc &pbc,
+ const real intervalLength)
{
- double period;
- t_pull_coord *pcrd_params = &pull_params->coord[coord_ind];
-
- if (pcrd_params->eGeom == epullgDIRPBC)
- {
- /* For direction periodic, we need the pull vector to be one of the box vectors
- (or more generally I guess it could be an integer combination of boxvectors).
- This boxvector should to be orthogonal to the (periodic) plane spanned by the other two box vectors.
- Here we assume that the pull vector is either x, y or z.
- * E.g. for pull vec = (1, 0, 0) the box vector tensor should look like:
- * | x 0 0 |
- * | 0 a c |
- * | 0 b d |
- *
- The period is then given by the box length x.
-
- Note: we make these checks here for AWH and not in pull because we allow pull to be more general.
- */
- int m_pullvec = -1, count_nonzeros = 0;
-
- /* Check that pull vec has only one component and which component it is. This component gives the relevant box vector */
- for (int m = 0; m < DIM; m++)
- {
- if (pcrd_params->vec[m] != 0)
- {
- m_pullvec = m;
- count_nonzeros++;
- }
- }
- if (count_nonzeros != 1)
- {
- gmx_fatal(FARGS, "For AWH biasing pull coordinate %d with pull geometry %s, the pull vector needs to be parallel to "
- "a box vector that is parallel to either the x, y or z axis and is orthogonal to the other box vectors.",
- coord_ind + 1, EPULLGEOM(epullgDIRPBC));
- }
+ double period = 0;
- /* Check that there is a box vec parallel to pull vec and that this boxvec is orthogonal to the other box vectors */
- for (int m = 0; m < DIM; m++)
+ if (pullCoordParams.eGeom == epullgDIR)
+ {
+ const real margin = 0.001;
+ // Make dims periodic when the interval covers > 95%
+ const real periodicFraction = 0.95;
+
+ // Check if the pull direction is along a box vector
+ for (int dim = 0; dim < pbc.ndim_ePBC; dim++)
{
- for (int n = 0; n < DIM; n++)
+ const real boxLength = norm(pbc.box[dim]);
+ const real innerProduct = iprod(pullCoordParams.vec, pbc.box[dim]);
+ if (innerProduct >= (1 - margin)*boxLength &&
+ innerProduct <= (1 + margin)*boxLength)
{
- if ((n != m) && (n == m_pullvec || m == m_pullvec) && box[m][n] > 0)
+ GMX_RELEASE_ASSERT(intervalLength < (1 + margin)*boxLength,
+ "We have checked before that interval <= period");
+ if (intervalLength > periodicFraction*boxLength)
{
- gmx_fatal(FARGS, "For AWH biasing pull coordinate %d with pull geometry %s, there needs to be a box vector parallel to the pull vector that is "
- "orthogonal to the other box vectors.",
- coord_ind + 1, EPULLGEOM(epullgDIRPBC));
+ period = boxLength;
}
}
}
-
- /* If this box vector only has one component as we assumed the norm should be equal to the absolute value of that component */
- period = static_cast<double>(norm(box[m_pullvec]));
}
- else if (pcrd_params->eGeom == epullgDIHEDRAL)
+ else if (pullCoordParams.eGeom == epullgDIHEDRAL)
{
/* The dihedral angle is periodic in -180 to 180 deg */
period = 360;
}
- else
- {
- period = 0;
- }
return period;
}
void setStateDependentAwhParams(AwhParams *awhParams,
const pull_params_t *pull_params, pull_t *pull_work,
- const matrix box, int ePBC,
+ const matrix box, int ePBC, const tensor &compressibility,
const t_grpopts *inputrecGroupOptions, warninp_t wi)
{
/* The temperature is not really state depenendent but is not known
AwhBiasParams *awhBiasParams = &awhParams->awhBiasParams[k];
for (int d = 0; d < awhBiasParams->ndim; d++)
{
- AwhDimParams *dimParams = &awhBiasParams->dimParams[d];
+ AwhDimParams *dimParams = &awhBiasParams->dimParams[d];
+ const t_pull_coord &pullCoordParams = pull_params->coord[dimParams->coordIndex];
- /* The periodiciy of the AWH grid in certain cases depends on the simulation box */
- dimParams->period = get_pull_coord_period(pull_params, dimParams->coordIndex, box);
+ if (pullCoordParams.eGeom == epullgDIRPBC)
+ {
+ gmx_fatal(FARGS, "AWH does not support pull geometry '%s'. "
+ "If the maximum distance between the groups is always less than half the box size, "
+ "you can use geometry '%s' instead.",
+ EPULLGEOM(epullgDIRPBC),
+ EPULLGEOM(epullgDIR));
+
+ }
+
+ dimParams->period = get_pull_coord_period(pullCoordParams, pbc, dimParams->end - dimParams->origin);
+ // We would like to check for scaling, but we don't have the full inputrec available here
+ if (dimParams->period > 0 && !(pullCoordParams.eGeom == epullgANGLE ||
+ pullCoordParams.eGeom == epullgDIHEDRAL))
+ {
+ bool coordIsScaled = false;
+ for (int d2 = 0; d2 < DIM; d2++)
+ {
+ if (pullCoordParams.vec[d2] != 0 && norm2(compressibility[d2]) != 0)
+ {
+ coordIsScaled = true;
+ }
+ }
+ if (coordIsScaled)
+ {
+ std::string mesg = gmx::formatString("AWH dimension %d of bias %d is periodic with pull geometry '%s', "
+ "while you should are applying pressure scaling to the corresponding box vector, this is not supported.",
+ d + 1, k + 1, EPULLGEOM(pullCoordParams.eGeom));
+ warning(wi, mesg.c_str());
+ }
+ }
/* The initial coordinate value, converted to external user units. */
dimParams->coordValueInit =
get_pull_coord_value(pull_work, dimParams->coordIndex, &pbc);
- t_pull_coord *pullCoord = &pull_params->coord[dimParams->coordIndex];
- dimParams->coordValueInit *= pull_conversion_factor_internal2userinput(pullCoord);
+ dimParams->coordValueInit *= pull_conversion_factor_internal2userinput(&pullCoordParams);
}
}
checkInputConsistencyInterval(awhParams, wi);
* \param[in,out] pull_work Pull working struct to register AWH bias in.
* \param[in] box Box vectors.
* \param[in] ePBC Periodic boundary conditions enum.
+ * \param[in] compressibility Compressibility matrix for pressure coupling, pass all 0 without pressure coupling
* \param[in] inputrecGroupOptions Parameters for atom groups.
* \param[in,out] wi Struct for bookeeping warnings.
*
pull_t *pull_work,
const matrix box,
int ePBC,
+ const tensor &compressibility,
const t_grpopts *inputrecGroupOptions,
warninp_t wi);
fprintf(debug, "Making load communicators\n");
}
- snew(dd->comm->load, std::max(dd->ndim, 1));
+ dd->comm->load = new domdec_load_t[std::max(dd->ndim, 1)];
snew(dd->comm->mpi_comm_load, std::max(dd->ndim, 1));
if (dd->ndim == 0)
struct BalanceRegion;
+//! Indices to communicate in a dimension
struct gmx_domdec_ind_t
{
- /* The numbers of charge groups to send and receive for each cell
- * that requires communication, the last entry contains the total
+ //! @{
+ /*! \brief The numbers of charge groups to send and receive for each
+ * cell that requires communication, the last entry contains the total
* number of atoms that needs to be communicated.
*/
- int nsend[DD_MAXIZONE+2];
- int nrecv[DD_MAXIZONE+2];
- /* The charge groups to send */
+ int nsend[DD_MAXIZONE+2] = {};
+ int nrecv[DD_MAXIZONE+2] = {};
+ //! @}
+ //! The charge groups to send
std::vector<int> index;
+ //! @{
/* The atom range for non-in-place communication */
- int cell2at0[DD_MAXIZONE];
- int cell2at1[DD_MAXIZONE];
+ int cell2at0[DD_MAXIZONE] = {};
+ int cell2at1[DD_MAXIZONE] = {};
+ //! @}
};
+//! Things relating to index communication
struct gmx_domdec_comm_dim_t
{
/* Returns the number of grid pulses (the number of domains in the halo along this dimension) */
return ind.size();
}
- int np_dlb; /* For dlb, for use with edlbAUTO */
- std::vector<gmx_domdec_ind_t> ind; /* The indices to communicate, size np */
- bool receiveInPlace; /* Can we receive data in place? */
+ /**< For dlb, for use with edlbAUTO */
+ int np_dlb = 0;
+ /**< The indices to communicate, size np */
+ std::vector<gmx_domdec_ind_t> ind;
+ /**< Can we receive data in place? */
+ bool receiveInPlace = false;
};
/*! \brief Load balancing data along a dim used on the master rank of that dim */
{
struct Bounds
{
- real cellFracLowerMax; /**< State var.: max lower bound., incl. neighbors */
- real cellFracUpperMin; /**< State var.: min upper bound., incl. neighbors */
- real boundMin; /**< Temp. var.: lower limit for cell boundary */
- real boundMax; /**< Temp. var.: upper limit for cell boundary */
+ /**< State var.: max lower bound., incl. neighbors */
+ real cellFracLowerMax = 0;
+ /**< State var.: min upper bound., incl. neighbors */
+ real cellFracUpperMin = 0;
+ /**< Temp. var.: lower limit for cell boundary */
+ real boundMin = 0;
+ /**< Temp. var.: upper limit for cell boundary */
+ real boundMax = 0;
};
- std::vector<bool> isCellMin; /**< Temp. var.: is this cell size at the limit */
- std::vector<real> cellFrac; /**< State var.: cell boundaries, box relative */
- std::vector<real> oldCellFrac; /**< Temp. var.: old cell size */
- std::vector<Bounds> bounds; /**< Cell bounds */
- bool dlbIsLimited; /**< State var.: is DLB limited in this row */
- std::vector<real> buf_ncd; /**< Temp. var. */
+ /**< Temp. var.: is this cell size at the limit */
+ std::vector<bool> isCellMin;
+ /**< State var.: cell boundaries, box relative */
+ std::vector<real> cellFrac;
+ /**< Temp. var.: old cell size */
+ std::vector<real> oldCellFrac;
+ /**< Cell bounds */
+ std::vector<Bounds> bounds;
+ /**< State var.: is DLB limited in this row */
+ bool dlbIsLimited = false;
+ /**< Temp. var. */
+ std::vector<real> buf_ncd;
};
/*! \brief Struct for managing cell sizes with DLB along a dimension */
struct DDCellsizesWithDlb
{
- /* Cell sizes for dynamic load balancing */
- std::unique_ptr<RowMaster> rowMaster; /**< Cell row root struct, only available on the first rank in a row */
- std::vector<real> fracRow; /**< The cell sizes, in fractions, along a row, not available on the first rank in a row */
- real fracLower; /**< The lower corner, in fractions, in triclinic space */
- real fracUpper; /**< The upper corner, in fractions, in triclinic space */
- real fracLowerMax; /**< The maximum lower corner among all our neighbors */
- real fracUpperMin; /**< The minimum upper corner among all our neighbors */
+ /**< Cell row root struct, only available on the first rank in a row */
+ std::unique_ptr<RowMaster> rowMaster;
+ /**< The cell sizes, in fractions, along a row, not available on the first rank in a row */
+ std::vector<real> fracRow;
+ /**< The lower corner, in fractions, in triclinic space */
+ real fracLower = 0;
+ /**< The upper corner, in fractions, in triclinic space */
+ real fracUpper = 0;
+ /**< The maximum lower corner among all our neighbors */
+ real fracLowerMax = 0;
+ /**< The minimum upper corner among all our neighbors */
+ real fracUpperMin = 0;
};
/*! \brief Struct for compute load commuication
*/
typedef struct
{
- int nload; /**< The number of load recordings */
- float *load; /**< Scan of the sum of load over dimensions */
- float sum; /**< The sum of the load over the ranks up to our current dimension */
- float max; /**< The maximum over the ranks contributing to \p sum */
- float sum_m; /**< Like \p sum, but takes the maximum when the load balancing is limited */
- float cvol_min; /**< Minimum cell volume, relative to the box */
- float mdf; /**< The PP time during which PME can overlap */
- float pme; /**< The PME-only rank load */
- int flags; /**< Bit flags that tell if DLB was limited, per dimension */
+ /**< The number of load recordings */
+ int nload = 0;
+ /**< Scan of the sum of load over dimensions */
+ float *load = nullptr;
+ /**< The sum of the load over the ranks up to our current dimension */
+ float sum = 0;
+ /**< The maximum over the ranks contributing to \p sum */
+ float max = 0;
+ /**< Like \p sum, but takes the maximum when the load balancing is limited */
+ float sum_m = 0;
+ /**< Minimum cell volume, relative to the box */
+ float cvol_min = 0;
+ /**< The PP time during which PME can overlap */
+ float mdf = 0;
+ /**< The PME-only rank load */
+ float pme = 0;
+ /**< Bit flags that tell if DLB was limited, per dimension */
+ int flags = 0;
} domdec_load_t;
/*! \brief Data needed to sort an atom to the desired location in the local state */
typedef struct
{
- int nsc; /**< Neighborsearch grid cell index */
- int ind_gl; /**< Global atom/charge group index */
- int ind; /**< Local atom/charge group index */
+ /**< Neighborsearch grid cell index */
+ int nsc = 0;
+ /**< Global atom/charge group index */
+ int ind_gl = 0;
+ /**< Local atom/charge group index */
+ int ind = 0;
} gmx_cgsort_t;
/*! \brief Temporary buffers for sorting atoms */
typedef struct
{
- std::vector<gmx_cgsort_t> sorted; /**< Sorted array of indices */
- std::vector<gmx_cgsort_t> stationary; /**< Array of stationary atom/charge group indices */
- std::vector<gmx_cgsort_t> moved; /**< Array of moved atom/charge group indices */
- std::vector<int> intBuffer; /**< Integer buffer for sorting */
+ /**< Sorted array of indices */
+ std::vector<gmx_cgsort_t> sorted;
+ /**< Array of stationary atom/charge group indices */
+ std::vector<gmx_cgsort_t> stationary;
+ /**< Array of moved atom/charge group indices */
+ std::vector<gmx_cgsort_t> moved;
+ /**< Integer buffer for sorting */
+ std::vector<int> intBuffer;
} gmx_domdec_sort_t;
/*! \brief Manages atom ranges and order for the local state atom vectors */
/*! \brief The PME domain decomposition for one dimension */
typedef struct
{
- int dim; /**< The dimension */
- gmx_bool dim_match; /**< Tells if DD and PME dims match */
- int nslab; /**< The number of PME ranks/domains in this dimension */
- real *slb_dim_f; /**< Cell sizes for determining the PME comm. with SLB */
- int *pp_min; /**< The minimum pp node location, size nslab */
- int *pp_max; /**< The maximum pp node location, size nslab */
- int maxshift; /**< The maximum shift for coordinate redistribution in PME */
+ /**< The dimension */
+ int dim = 0;
+ /**< Tells if DD and PME dims match */
+ gmx_bool dim_match = false;
+ /**< The number of PME ranks/domains in this dimension */
+ int nslab = 0;
+ /**< Cell sizes for determining the PME comm. with SLB */
+ real *slb_dim_f = nullptr;
+ /**< The minimum pp node location, size nslab */
+ int *pp_min = nullptr;
+ /**< The maximum pp node location, size nslab */
+ int *pp_max = nullptr;
+ /**< The maximum shift for coordinate redistribution in PME */
+ int maxshift = 0;
} gmx_ddpme_t;
struct gmx_ddzone_t
{
- real min0; /* The minimum bottom of this zone */
- real max1; /* The maximum top of this zone */
- real min1; /* The minimum top of this zone */
- real mch0; /* The maximum bottom communicaton height for this zone */
- real mch1; /* The maximum top communicaton height for this zone */
- real p1_0; /* The bottom value of the first cell in this zone */
- real p1_1; /* The top value of the first cell in this zone */
- real dataSet; /* Bool disguised as a real, 1 when the above data has been set. 0 otherwise */
+ /**< The minimum bottom of this zone */
+ real min0 = 0;
+ /**< The maximum top of this zone */
+ real max1 = 0;
+ /**< The minimum top of this zone */
+ real min1 = 0;
+ /**< The maximum bottom communicaton height for this zone */
+ real mch0 = 0;
+ /**< The maximum top communicaton height for this zone */
+ real mch1 = 0;
+ /**< The bottom value of the first cell in this zone */
+ real p1_0 = 0;
+ /**< The top value of the first cell in this zone */
+ real p1_1 = 0;
+ /**< Bool disguised as a real, 1 when the above data has been set. 0 otherwise */
+ real dataSet = 0;
};
/*! \brief The number of reals in gmx_ddzone_t */
gmx::ArrayRef<T> buffer; /**< The access to the memory buffer */
};
-/*! brief Temporary buffer for setting up communiation over one pulse and all zones in the halo */
+/*! \brief Temporary buffer for setting up communiation over one pulse and all zones in the halo */
struct dd_comm_setup_work_t
{
- std::vector<int> localAtomGroupBuffer; /**< The local atom group indices to send */
- std::vector<int> atomGroupBuffer; /**< Buffer for collecting the global atom group indices to send */
- std::vector<gmx::RVec> positionBuffer; /**< Buffer for collecting the atom group positions to send */
- int nat; /**< The number of atoms contained in the atom groups to send */
- int nsend_zone; /**< The number of atom groups to send for the last zone */
+ /**< The local atom group indices to send */
+ std::vector<int> localAtomGroupBuffer;
+ /**< Buffer for collecting the global atom group indices to send */
+ std::vector<int> atomGroupBuffer;
+ /**< Buffer for collecting the atom group positions to send */
+ std::vector<gmx::RVec> positionBuffer;
+ /**< The number of atoms contained in the atom groups to send */
+ int nat = 0;
+ /**< The number of atom groups to send for the last zone */
+ int nsend_zone = 0;
};
/*! \brief Struct for domain decomposition communication
struct gmx_domdec_comm_t // NOLINT (clang-analyzer-optin.performance.Padding)
{
/* PME and Cartesian communicator stuff */
- int npmedecompdim; /**< The number of decomposition dimensions for PME, 0: no PME */
- int npmenodes; /**< The number of ranks doing PME (PP/PME or only PME) */
- int npmenodes_x; /**< The number of PME ranks/domains along x */
- int npmenodes_y; /**< The number of PME ranks/domains along y */
- gmx_bool bCartesianPP_PME; /**< Use Cartesian communication between PP and PME ranks */
- ivec ntot; /**< Cartesian grid for combinted PP+PME ranks */
- int cartpmedim; /**< The number of dimensions for the PME setup that are Cartesian */
- int *pmenodes; /**< The PME ranks, size npmenodes */
- int *ddindex2simnodeid; /**< The Cartesian index to sim rank conversion, used with bCartesianPP_PME */
- gmx_ddpme_t ddpme[2]; /**< The 1D or 2D PME domain decomposition setup */
+ /**< The number of decomposition dimensions for PME, 0: no PME */
+ int npmedecompdim = 0;
+ /**< The number of ranks doing PME (PP/PME or only PME) */
+ int npmenodes = 0;
+ /**< The number of PME ranks/domains along x */
+ int npmenodes_x = 0;
+ /**< The number of PME ranks/domains along y */
+ int npmenodes_y = 0;
+ /**< Use Cartesian communication between PP and PME ranks */
+ gmx_bool bCartesianPP_PME = false;
+ /**< Cartesian grid for combinted PP+PME ranks */
+ ivec ntot = { };
+ /**< The number of dimensions for the PME setup that are Cartesian */
+ int cartpmedim = 0;
+ /**< The PME ranks, size npmenodes */
+ int *pmenodes = nullptr;
+ /**< The Cartesian index to sim rank conversion, used with bCartesianPP_PME */
+ int *ddindex2simnodeid = nullptr;
+ /**< The 1D or 2D PME domain decomposition setup */
+ gmx_ddpme_t ddpme[2];
/* The DD particle-particle nodes only */
- gmx_bool bCartesianPP; /**< Use a Cartesian communicator for PP */
- int *ddindex2ddnodeid; /**< The Cartesian index to DD rank conversion, used with bCartesianPP */
+ /**< Use a Cartesian communicator for PP */
+ gmx_bool bCartesianPP = false;
+ /**< The Cartesian index to DD rank conversion, used with bCartesianPP */
+ int *ddindex2ddnodeid = nullptr;
/* The DLB state, used for reloading old states, during e.g. EM */
- t_block cgs_gl; /**< The global charge groups, this defined the DD state (except for the DLB state) */
+ /**< The global charge groups, this defined the DD state (except for the DLB state) */
+ t_block cgs_gl = { };
/* Charge group / atom sorting */
- std::unique_ptr<gmx_domdec_sort_t> sort; /**< Data structure for cg/atom sorting */
+ /**< Data structure for cg/atom sorting */
+ std::unique_ptr<gmx_domdec_sort_t> sort;
//! True when update groups are used
- bool useUpdateGroups;
+ bool useUpdateGroups = false;
//! Update atom grouping for each molecule type
std::vector<gmx::RangePartitioning> updateGroupingPerMoleculetype;
//! Centers of mass of local update groups
std::unique_ptr<gmx::UpdateGroupsCog> updateGroupsCog;
/* Are there charge groups? */
- bool haveInterDomainBondeds; /**< Are there inter-domain bonded interactions? */
- bool haveInterDomainMultiBodyBondeds; /**< Are there inter-domain multi-body interactions? */
+ bool haveInterDomainBondeds = false; /**< Are there inter-domain bonded interactions? */
+ bool haveInterDomainMultiBodyBondeds = false; /**< Are there inter-domain multi-body interactions? */
/* Data for the optional bonded interaction atom communication range */
- gmx_bool bBondComm; /**< Only communicate atoms beyond the non-bonded cut-off when they are involved in bonded interactions with non-local atoms */
- t_blocka *cglink; /**< Links between cg's through bonded interactions */
- char *bLocalCG; /**< Local cg availability, TODO: remove when group scheme is removed */
+ /**< Only communicate atoms beyond the non-bonded cut-off when they are involved in bonded interactions with non-local atoms */
+ gmx_bool bBondComm = false;
+ /**< Links between cg's through bonded interactions */
+ t_blocka *cglink = nullptr;
+ /**< Local cg availability, TODO: remove when group scheme is removed */
+ char *bLocalCG = nullptr;
/* The DLB state, possible values are defined above */
DlbState dlbState;
/* With dlbState=DlbState::offCanTurnOn, should we check if to DLB on at the next DD? */
- gmx_bool bCheckWhetherToTurnDlbOn;
+ gmx_bool bCheckWhetherToTurnDlbOn = false;
/* The first DD count since we are running without DLB */
int ddPartioningCountFirstDlbOff = 0;
/* Cell sizes for static load balancing, first index cartesian */
- real **slb_frac;
+ real **slb_frac = nullptr;
/* The width of the communicated boundaries */
- real cutoff_mbody; /**< Cut-off for multi-body interactions, also 2-body bonded when \p cutoff_mody > \p cutoff */
- real cutoff; /**< Cut-off for non-bonded/2-body interactions */
- rvec cellsize_min; /**< The minimum guaranteed cell-size, Cartesian indexing */
- rvec cellsize_min_dlb; /**< The minimum guaranteed cell-size with dlb=auto */
- real cellsize_limit; /**< The lower limit for the DD cell size with DLB */
- gmx_bool bVacDLBNoLimit; /**< Effectively no NB cut-off limit with DLB for systems without PBC? */
+ /**< Cut-off for multi-body interactions, also 2-body bonded when \p cutoff_mody > \p cutoff */
+ real cutoff_mbody = 0;
+ /**< Cut-off for non-bonded/2-body interactions */
+ real cutoff = 0;
+ /**< The minimum guaranteed cell-size, Cartesian indexing */
+ rvec cellsize_min = { };
+ /**< The minimum guaranteed cell-size with dlb=auto */
+ rvec cellsize_min_dlb = { };
+ /**< The lower limit for the DD cell size with DLB */
+ real cellsize_limit = 0;
+ /**< Effectively no NB cut-off limit with DLB for systems without PBC? */
+ gmx_bool bVacDLBNoLimit = false;
/** With PME load balancing we set limits on DLB */
- gmx_bool bPMELoadBalDLBLimits;
+ gmx_bool bPMELoadBalDLBLimits = false;
/** DLB needs to take into account that we want to allow this maximum
* cut-off (for PME load balancing), this could limit cell boundaries.
*/
- real PMELoadBal_max_cutoff;
+ real PMELoadBal_max_cutoff = 0;
- ivec tric_dir; /**< tric_dir from \p gmx_ddbox_t is only stored here because dd_get_ns_ranges needs it */
- rvec box0; /**< box lower corner, required with dim's without pbc when avoiding communication */
- rvec box_size; /**< box size, required with dim's without pbc when avoiding communication */
+ /**< tric_dir from \p gmx_ddbox_t is only stored here because dd_get_ns_ranges needs it */
+ ivec tric_dir = { };
+ /**< box lower corner, required with dim's without pbc and -gcom */
+ rvec box0 = { };
+ /**< box size, required with dim's without pbc and -gcom */
+ rvec box_size = { };
- rvec cell_x0; /**< The DD cell lower corner, in triclinic space */
- rvec cell_x1; /**< The DD cell upper corner, in triclinic space */
+ /**< The DD cell lower corner, in triclinic space */
+ rvec cell_x0 = { };
+ /**< The DD cell upper corner, in triclinic space */
+ rvec cell_x1 = { };
- rvec old_cell_x0; /**< The old \p cell_x0, to check cg displacements */
- rvec old_cell_x1; /**< The old \p cell_x1, to check cg displacements */
+ /**< The old \p cell_x0, to check cg displacements */
+ rvec old_cell_x0 = { };
+ /**< The old \p cell_x1, to check cg displacements */
+ rvec old_cell_x1 = { };
/** The communication setup and charge group boundaries for the zones */
gmx_domdec_zones_t zones;
* cell boundaries of neighboring cells for staggered grids when using
* dynamic load balancing.
*/
- gmx_ddzone_t zone_d1[2]; /**< Zone limits for dim 1 with staggered grids */
- gmx_ddzone_t zone_d2[2][2]; /**< Zone limits for dim 2 with staggered grids */
+ /**< Zone limits for dim 1 with staggered grids */
+ gmx_ddzone_t zone_d1[2];
+ /**< Zone limits for dim 2 with staggered grids */
+ gmx_ddzone_t zone_d2[2][2];
/** The coordinate/force communication setup and indices */
gmx_domdec_comm_dim_t cd[DIM];
/** The maximum number of cells to communicate with in one dimension */
- int maxpulse;
+ int maxpulse = 0;
/** Which cg distribution is stored on the master node,
* stored as DD partitioning call count.
*/
- int64_t master_cg_ddp_count;
+ int64_t master_cg_ddp_count = 0;
/** The number of cg's received from the direct neighbors */
- int zone_ncg1[DD_MAXZONE];
+ int zone_ncg1[DD_MAXZONE] = {0};
/** The atom ranges in the local state */
DDAtomRanges atomRanges;
DDBuffer<gmx::RVec> rvecBuffer;
/* Temporary storage for thread parallel communication setup */
- std::vector<dd_comm_setup_work_t> dth; /**< Thread-local work data */
+ /**< Thread-local work data */
+ std::vector<dd_comm_setup_work_t> dth;
/* Communication buffer only used with multiple grid pulses */
- DDBuffer<gmx::RVec> rvecBuffer2; /**< Another rvec comm. buffer */
+ /**< Another rvec comm. buffer */
+ DDBuffer<gmx::RVec> rvecBuffer2;
/* Communication buffers for local redistribution */
- std::array<std::vector<int>, DIM*2> cggl_flag; /**< Charge group flag comm. buffers */
- std::array<std::vector<gmx::RVec>, DIM*2> cgcm_state; /**< Charge group center comm. buffers */
+ /**< Charge group flag comm. buffers */
+ std::array<std::vector<int>, DIM*2> cggl_flag;
+ /**< Charge group center comm. buffers */
+ std::array<std::vector<gmx::RVec>, DIM*2> cgcm_state;
/* Cell sizes for dynamic load balancing */
std::vector<DDCellsizesWithDlb> cellsizesWithDlb;
/* Stuff for load communication */
- gmx_bool bRecordLoad; /**< Should we record the load */
- domdec_load_t *load; /**< The recorded load data */
- int nrank_gpu_shared; /**< The number of MPI ranks sharing the GPU our rank is using */
+ /**< Should we record the load */
+ gmx_bool bRecordLoad = false;
+ /**< The recorded load data */
+ domdec_load_t *load = nullptr;
+ /**< The number of MPI ranks sharing the GPU our rank is using */
+ int nrank_gpu_shared = 0;
#if GMX_MPI
- MPI_Comm *mpi_comm_load; /**< The MPI load communicator */
- MPI_Comm mpi_comm_gpu_shared; /**< The MPI load communicator for ranks sharing a GPU */
+ /**< The MPI load communicator */
+ MPI_Comm *mpi_comm_load = nullptr;
+ /**< The MPI load communicator for ranks sharing a GPU */
+ MPI_Comm mpi_comm_gpu_shared;
#endif
/* Information for managing the dynamic load balancing */
- int dlb_scale_lim; /**< Maximum DLB scaling per load balancing step in percent */
+ /**< Maximum DLB scaling per load balancing step in percent */
+ int dlb_scale_lim = 0;
- BalanceRegion *balanceRegion; /**< Struct for timing the force load balancing region */
+ /**< Struct for timing the force load balancing region */
+ BalanceRegion *balanceRegion = nullptr;
/* Cycle counters over nstlist steps */
- float cycl[ddCyclNr]; /**< Total cycles counted */
- int cycl_n[ddCyclNr]; /**< The number of cycle recordings */
- float cycl_max[ddCyclNr]; /**< The maximum cycle count */
+ /**< Total cycles counted */
+ float cycl[ddCyclNr] = { };
+ /**< The number of cycle recordings */
+ int cycl_n[ddCyclNr] = { };
+ /**< The maximum cycle count */
+ float cycl_max[ddCyclNr] = { };
/** Flop counter (0=no,1=yes,2=with (eFlop-1)*5% noise */
- int eFlop;
- double flop; /**< Total flops counted */
- int flop_n; /**< The number of flop recordings */
+ int eFlop = 0;
+ /**< Total flops counted */
+ double flop = 0.0;
+ /**< The number of flop recordings */
+ int flop_n = 0;
/** How many times did we have load measurements */
- int n_load_have;
+ int n_load_have = 0;
/** How many times have we collected the load measurements */
- int n_load_collect;
+ int n_load_collect = 0;
/* Cycle count history for DLB checks */
- float cyclesPerStepBeforeDLB; /**< The averaged cycles per step over the last nstlist step before turning on DLB */
- float cyclesPerStepDlbExpAverage; /**< The running average of the cycles per step during DLB */
- bool haveTurnedOffDlb; /**< Have we turned off DLB (after turning DLB on)? */
- int64_t dlbSlowerPartitioningCount; /**< The DD step at which we last measured that DLB off was faster than DLB on, 0 if there was no such step */
+ /**< The averaged cycles per step over the last nstlist step before turning on DLB */
+ float cyclesPerStepBeforeDLB = 0;
+ /**< The running average of the cycles per step during DLB */
+ float cyclesPerStepDlbExpAverage = 0;
+ /**< Have we turned off DLB (after turning DLB on)? */
+ bool haveTurnedOffDlb = false;
+ /**< The DD step at which we last measured that DLB off was faster than DLB on, 0 if there was no such step */
+ int64_t dlbSlowerPartitioningCount = 0;
/* Statistics for atoms */
- double sum_nat[static_cast<int>(DDAtomRanges::Type::Number)]; /**< The atoms per range, summed over the steps */
+ /**< The atoms per range, summed over the steps */
+ double sum_nat[static_cast<int>(DDAtomRanges::Type::Number)] = { };
/* Statistics for calls and times */
- int ndecomp; /**< The number of partioning calls */
- int nload; /**< The number of load recordings */
- double load_step; /**< Total MD step time */
- double load_sum; /**< Total PP force time */
- double load_max; /**< Max \p load_sum over the ranks */
- ivec load_lim; /**< Was load balancing limited, per DD dim */
- double load_mdf; /**< Total time on PP done during PME overlap time */
- double load_pme; /**< Total time on our PME-only rank */
+ /**< The number of partioning calls */
+ int ndecomp = 0;
+ /**< The number of load recordings */
+ int nload = 0;
+ /**< Total MD step time */
+ double load_step = 0.0;
+ /**< Total PP force time */
+ double load_sum = 0.0;
+ /**< Max \p load_sum over the ranks */
+ double load_max = 0.0;
+ /**< Was load balancing limited, per DD dim */
+ ivec load_lim = { };
+ /**< Total time on PP done during PME overlap time */
+ double load_mdf = 0.0;
+ /**< Total time on our PME-only rank */
+ double load_pme = 0.0;
/** The last partition step */
- int64_t partition_step;
+ int64_t partition_step = 0;
/* Debugging */
- int nstDDDump; /**< Step interval for dumping the local+non-local atoms to pdb */
- int nstDDDumpGrid; /**< Step interval for duming the DD grid to pdb */
- int DD_debug; /**< DD debug print level: 0, 1, 2 */
+ /**< Step interval for dumping the local+non-local atoms to pdb */
+ int nstDDDump = 0;
+ /**< Step interval for duming the DD grid to pdb */
+ int nstDDDumpGrid = 0;
+ /**< DD debug print level: 0, 1, 2 */
+ int DD_debug = 0;
};
/*! \brief DD zone permutation
}
typedef struct {
- int j0; /* j-zone start */
- int j1; /* j-zone end */
- int cg1; /* i-charge-group end */
- int jcg0; /* j-charge-group start */
- int jcg1; /* j-charge-group end */
- ivec shift0; /* Minimum shifts to consider */
- ivec shift1; /* Maximum shifts to consider */
+ /* j-zone start */
+ int j0 = 0;
+ /* j-zone end */
+ int j1 = 0;
+ /* i-charge-group end */
+ int cg1 = 0;
+ /* j-charge-group start */
+ int jcg0 = 0;
+ /* j-charge-group end */
+ int jcg1 = 0;
+ /* Minimum shifts to consider */
+ ivec shift0 = { };
+ /* Maximum shifts to consider */
+ ivec shift1 = { };
} gmx_domdec_ns_ranges_t;
typedef struct {
- rvec x0; /* Zone lower corner in triclinic coordinates */
- rvec x1; /* Zone upper corner in triclinic coordinates */
- rvec bb_x0; /* Zone bounding box lower corner in Cartesian coords */
- rvec bb_x1; /* Zone bounding box upper corner in Cartesian coords */
+ /* Zone lower corner in triclinic coordinates */
+ rvec x0 = { };
+ /* Zone upper corner in triclinic coordinates */
+ rvec x1 = { };
+ /* Zone bounding box lower corner in Cartesian coords */
+ rvec bb_x0 = { };
+ /* Zone bounding box upper corner in Cartesian coords */
+ rvec bb_x1 = { };
} gmx_domdec_zone_size_t;
struct gmx_domdec_zones_t {
/* The number of zones including the home zone */
- int n;
+ int n = 0;
/* The shift of the zones with respect to the home zone */
- ivec shift[DD_MAXZONE];
+ ivec shift[DD_MAXZONE] = { };
/* The charge group boundaries for the zones */
- int cg_range[DD_MAXZONE+1];
+ int cg_range[DD_MAXZONE+1] = { };
/* The number of neighbor search zones with i-particles */
- int nizone;
+ int nizone = 0;
/* The neighbor search charge group ranges for each i-zone */
gmx_domdec_ns_ranges_t izone[DD_MAXIZONE];
/* Boundaries of the zones */
gmx_domdec_zone_size_t size[DD_MAXZONE];
/* The cg density of the home zone */
- real dens_zone0;
+ real dens_zone0 = 0;
};
struct gmx_ddbox_t {
{
errorReasons.emplace_back("non-AMD devices");
}
+#ifdef __APPLE__
+ errorReasons.emplace_back("Apple OS X operating system");
+#endif
}
return addMessageIfNotSupported(errorReasons, error);
}
int elementIndex = smemReserved + lineIndex;
// Store input force contributions
sm_forceReduction[elementIndex] = (dimIndex == XX) ? fx : (dimIndex == YY) ? fy : fz;
- /* This barrier was not needed in CUDA. Different OpenCL compilers might have different ideas
+
+#if !defined(_AMD_SOURCE_)
+ /* This barrier was not needed in CUDA, nor is it needed on AMD GPUs.
+ * Different OpenCL compilers might have different ideas
* about #pragma unroll, though. OpenCL 2 has _attribute__((opencl_unroll_hint)).
* #2519
*/
barrier(CLK_LOCAL_MEM_FENCE);
+#endif
// Reduce to fit into smemPerDim (warp size)
#pragma unroll
case XDR_ENCODE:
ip = reinterpret_cast<int *>(dp);
- tmp[0] = ip[!bool(LSW)];
+ tmp[0] = ip[bool(LSW == 0)];
tmp[1] = ip[LSW];
return static_cast<bool_t>(bool(xdr_putint32(xdrs, tmp)) &&
bool(xdr_putint32(xdrs, tmp+1)));
res_seq_number = res_seq_number % 10000;
int n = fprintf(fp,
- "%s %d %s %s %c %d %8.3f %8.3f %8.3f %6.2f %6.2f\n",
+ "%-6s%5d %-4.4s%4.4s%c%4d %8.3f %8.3f %8.3f %6.2f %6.2f\n",
pdbtp[record],
atom_seq_number,
atom_name,
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
{
gmx_fatal(FARGS, "Need a second eigenvector file to do this analysis.");
}
- read_eigenvectors(Vec2File, &neig2, &bFit2,
+ int natoms2;
+ read_eigenvectors(Vec2File, &natoms2, &bFit2,
&xref2, &bDMR2, &xav2, &bDMA2, &nvec2, &eignr2, &eigvec2, &eigval2);
- neig2 = DIM*neig2;
+ neig2 = std::min(nvec2, DIM*natoms2);
if (neig2 != neig1)
{
gmx_fatal(FARGS, "Dimensions in the eigenvector files don't match");
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
}
static rvec **read_whole_trj(const char *fn, int isize, const int index[], int skip,
- int *nframe, real **time, matrix **boxes, int **frameindexes, const gmx_output_env_t *oenv, gmx_bool bPBC, gmx_rmpbc_t gpbc)
+ int *nframe, real **time, matrix **boxes, int **frameindices, const gmx_output_env_t *oenv, gmx_bool bPBC, gmx_rmpbc_t gpbc)
{
rvec **xx, *x;
matrix box;
srenew(xx, max_nf);
srenew(*time, max_nf);
srenew(*boxes, max_nf);
- srenew(*frameindexes, max_nf);
+ srenew(*frameindices, max_nf);
}
if ((i % skip) == 0)
{
}
(*time)[clusterIndex] = t;
copy_mat(box, (*boxes)[clusterIndex]);
- (*frameindexes)[clusterIndex] = nframes_read(status);
+ (*frameindices)[clusterIndex] = nframes_read(status);
clusterIndex++;
}
i++;
static void analyze_clusters(int nf, t_clusters *clust, real **rmsd,
int natom, t_atoms *atoms, rvec *xtps,
real *mass, rvec **xx, real *time,
- matrix *boxes, int *frameindexes,
+ matrix *boxes, int *frameindices,
int ifsize, int *fitidx,
int iosize, int *outidx,
const char *trxfn, const char *sizefn,
fprintf(size_fp, "@g%d type %s\n", 0, "bar");
}
}
- if (clustndxfn && frameindexes)
+ if (clustndxfn && frameindices)
{
ndxfn = gmx_ffopen(clustndxfn, "w");
}
fprintf(log, "%s %6g", buf, time[i1]);
if (ndxfn)
{
- fprintf(ndxfn, " %6d", frameindexes[i1]);
+ fprintf(ndxfn, " %6d", frameindices[i1] + 1);
}
}
fprintf(log, "\n");
real *eigenvectors;
int isize = 0, ifsize = 0, iosize = 0;
- int *index = nullptr, *fitidx = nullptr, *outidx = nullptr, *frameindexes = nullptr;
+ int *index = nullptr, *fitidx = nullptr, *outidx = nullptr, *frameindices = nullptr;
char *grpname;
real rmsd, **d1, **d2, *time = nullptr, time_invfac, *mass = nullptr;
char buf[STRLEN], buf1[80];
/* Loop over first coordinate file */
fn = opt2fn("-f", NFILE, fnm);
- xx = read_whole_trj(fn, isize, index, skip, &nf, &time, &boxes, &frameindexes, oenv, bPBC, gpbc);
+ xx = read_whole_trj(fn, isize, index, skip, &nf, &time, &boxes, &frameindices, oenv, bPBC, gpbc);
output_env_conv_times(oenv, nf, time);
if (!bRMSdist || bAnalyze)
{
copy_rvec(xtps[index[i]], usextps[i]);
}
useatoms.nr = isize;
- analyze_clusters(nf, &clust, rms->mat, isize, &useatoms, usextps, mass, xx, time, boxes, frameindexes,
+ analyze_clusters(nf, &clust, rms->mat, isize, &useatoms, usextps, mass, xx, time, boxes, frameindices,
ifsize, fitidx, iosize, outidx,
bReadTraj ? trx_out_fn : nullptr,
opt2fn_null("-sz", NFILE, fnm),
bAverage, write_ncl, write_nst, rmsmin, bFit, log,
rlo_bot, rhi_bot, oenv);
sfree(boxes);
- sfree(frameindexes);
+ sfree(frameindices);
}
gmx_ffclose(log);
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2008, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
}
else
{
- printf("\nWill do grid-seach on %dx%dx%d grid, rcut=%g\n",
+ printf("\nWill do grid-search on %dx%dx%d grid, rcut=%3.8f\n",
ngrid[XX], ngrid[YY], ngrid[ZZ], rcut);
}
+ if (((ngrid[XX]*ngrid[YY]*ngrid[ZZ]) * sizeof(grid)) > INT_MAX)
+ {
+ gmx_fatal(FARGS, "Failed to allocate memory for %d x %d x %d grid points, which is larger than the maximum of %zu. "
+ "You are likely either using a box that is too large (box dimensions are %3.8f nm x%3.8f nm x%3.8f nm) or a cutoff (%3.8f nm) that is too small.",
+ ngrid[XX], ngrid[YY], ngrid[ZZ], INT_MAX/sizeof(grid), box[XX][XX], box[YY][YY], box[ZZ][ZZ], rcut);
+ }
snew(grid, ngrid[ZZ]);
for (z = 0; z < ngrid[ZZ]; z++)
{
header->pcrd[i].pull_type = ir->pull->coord[i].eType;
header->pcrd[i].geometry = ir->pull->coord[i].eGeom;
header->pcrd[i].ngroup = ir->pull->coord[i].ngroup;
- header->pcrd[i].k = ir->pull->coord[i].k;
+ /* Angle type coordinates are handled fully in degrees in gmx wham */
+ header->pcrd[i].k = ir->pull->coord[i].k*pull_conversion_factor_internal2userinput(&ir->pull->coord[i]);
header->pcrd[i].init_dist = ir->pull->coord[i].init;
copy_ivec(ir->pull->coord[i].dim, header->pcrd[i].dim);
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
wi = init_warning(FALSE, 0);
- std::string libmpin = gmx::findLibraryFile(mpin);
- if (!libmpin.empty())
+ if (mpin != nullptr)
{
+ std::string libmpin = gmx::findLibraryFile(mpin);
gmx::TextInputFile stream(libmpin);
inp = read_inpfile(&stream, libmpin.c_str(), wi);
}
{
inp.clear();
}
+
psr->bw = get_eenum(&inp, "black&white", gmx_bools);
psr->linewidth = get_ereal(&inp, "linewidth", 1.0, wi);
setStringEntry(&inp, "titlefont", psr->titfont, "Helvetica");
printf("Incompatible options -mead and -grasp. Turning off -grasp\n");
bGrasp = FALSE;
}
- if ((bGrasp || bCONECT) && (outftp != efPDB))
+ if (bGrasp && (outftp != efPDB))
{
gmx_fatal(FARGS, "Output file should be a .pdb file"
- " when using the -grasp or -connect options\n");
+ " when using the -grasp option\n");
}
- if ((bMead || bGrasp || bCONECT) && (fn2ftp(infile) != efTPR))
+ if ((bMead || bGrasp) && (fn2ftp(infile) != efTPR))
{
gmx_fatal(FARGS, "Input file should be a .tpr file"
- " when using the -mead or -connect options\n");
+ " when using the -mead option\n");
}
t_symtab symtab;
{
return eCPP_SYNTAX;
}
+ // An include needs to be followed by either a '"' or a '<' as a first character.
+ if ((dval[0] != '"') && (dval[0] != '<'))
+ {
+ return eCPP_INVALID_INCLUDE_DELIMITER;
+ }
for (size_t i1 = 0; i1 < dval.size(); i1++)
{
if ((dval[i1] == '"') || (dval[i1] == '<') || (dval[i1] == '>'))
if (!bEOF)
{
/* Something strange happened, fgets returned NULL,
- * but we are not at EOF.
+ * but we are not at EOF. Maybe wrong line endings?
*/
return eCPP_UNKNOWN;
}
char buf[256];
const char *ecpp[] = {
"OK", "File not found", "End of file", "Syntax error", "Interrupted",
- "Invalid file handle",
- "File not open", "Unknown error", "Error status out of range"
+ "Invalid file handle", "Invalid delimiter for filename in #include statement",
+ "File not open", "Unknown error, perhaps your text file uses wrong line endings?", "Error status out of range"
};
gmx_cpp_t handle = *handlep;
/* The possible return codes for these functions */
enum {
eCPP_OK, eCPP_FILE_NOT_FOUND, eCPP_EOF, eCPP_SYNTAX, eCPP_INTERRUPT,
- eCPP_INVALID_HANDLE,
+ eCPP_INVALID_HANDLE, eCPP_INVALID_INCLUDE_DELIMITER,
eCPP_FILE_NOT_OPEN, eCPP_UNKNOWN, eCPP_NR
};
if (ir->bDoAwh)
{
+ tensor compressibility = { { 0 } };
+ if (ir->epc != epcNO)
+ {
+ copy_mat(ir->compress, compressibility);
+ }
setStateDependentAwhParams(ir->awhParams, ir->pull, pull,
- state.box, ir->ePBC, &ir->opts, wi);
+ state.box, ir->ePBC, compressibility,
+ &ir->opts, wi);
}
if (ir->bPull)
if ((nr = at->atomTypeFromName(type)) != NOTSET)
{
- auto message = gmx::formatString("Overriding atomtype %s", type);
+ auto message = gmx::formatString
+ ("Atomtype %s was defined previously (e.g. in the forcefield files), "
+ "and has now been defined again. This could happen e.g. if you would "
+ "use a self-contained molecule .itp file that duplicates or replaces "
+ "the contents of the standard force-field files. You should check "
+ "the contents of your files and remove such repetition. If you know "
+ "you should override the previous definition, then you could choose "
+ "to suppress this warning with -maxwarn.", type);
warning(wi, message);
if ((nr = at->setType(nr, symtab, *atom, type, interactionType, batype_nr,
atomnr)) == NOTSET)
else if (!haveWarned)
{
auto message = gmx::formatString
- ("Overriding %s parameters.%s",
+ ("Bondtype %s was defined previously (e.g. in the forcefield files), "
+ "and has now been defined again. This could happen e.g. if you would "
+ "use a self-contained molecule .itp file that duplicates or replaces "
+ "the contents of the standard force-field files. You should check "
+ "the contents of your files and remove such repetition. If you know "
+ "you should override the previous definition, then you could choose "
+ "to suppress this warning with -maxwarn.%s",
interaction_function[ftype].longname,
(ftype == F_PDIHS) ?
"\nUse dihedraltype 9 to allow several multiplicity terms. Only consecutive "
}
if (!bId)
{
- auto message = gmx::formatString("Overriding non-bonded parameters,");
+ auto message = gmx::formatString
+ ("Non-bonded parameters were defined previously (e.g. in the forcefield files), "
+ "and have now been defined again. This could happen e.g. if you would "
+ "use a self-contained molecule .itp file that duplicates or replaces "
+ "the contents of the standard force-field files. You should check "
+ "the contents of your files and remove such repetition. If you know "
+ "you should override the previous definitions, then you could choose "
+ "to suppress this warning with -maxwarn.");
warning(wi, message);
fprintf(stderr, " old:");
for (i = 0; i < nrfp; i++)
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2015,2016,2018, by the GROMACS development team, led by
+ * Copyright (c) 2015,2016,2018,2019, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include <gtest/gtest.h>
+#include "gromacs/utility/stringutil.h"
+
namespace
{
if (hwTop.supportLevel() >= gmx::HardwareTopology::SupportLevel::Basic)
{
- int socketsInMachine = hwTop.machine().sockets.size();
- int coresPerSocket = hwTop.machine().sockets[0].cores.size();
- int hwThreadsPerCore = hwTop.machine().sockets[0].cores[0].hwThreads.size();
+ SCOPED_TRACE(gmx::formatString("Logical Processor count %d", hwTop.machine().logicalProcessorCount));
- // Check that logical processor information is reasonable
- for (auto &l : hwTop.machine().logicalProcessors)
- {
- EXPECT_TRUE(l.socketRankInMachine >= 0 && l.socketRankInMachine < socketsInMachine);
- EXPECT_TRUE(l.coreRankInSocket >= 0 && l.coreRankInSocket < coresPerSocket);
- EXPECT_TRUE(l.hwThreadRankInCore >= 0 && l.hwThreadRankInCore < hwThreadsPerCore);
- }
+ int socketsInMachine = hwTop.machine().sockets.size();
+ int coresPerSocket = hwTop.machine().sockets[0].cores.size();
+ int hwThreadsPerCore = hwTop.machine().sockets[0].cores[0].hwThreads.size();
- // Double-check that the tree is self-consistent with logical processor info
- for (int s = 0; s < socketsInMachine; s++)
+ auto logicalProcessors = hwTop.machine().logicalProcessors;
+ for (auto logicalProcessorIt = logicalProcessors.begin();
+ logicalProcessorIt != logicalProcessors.end();
+ ++logicalProcessorIt)
{
- for (int c = 0; c < coresPerSocket; c++)
+ // Check that logical processor information contains
+ // reasonable values.
+ SCOPED_TRACE(gmx::formatString("Socket rank in machine: %d", logicalProcessorIt->socketRankInMachine));
+ SCOPED_TRACE(gmx::formatString("Core rank in socket: %d", logicalProcessorIt->coreRankInSocket));
+ SCOPED_TRACE(gmx::formatString("Hw thread rank in core: %d", logicalProcessorIt->hwThreadRankInCore));
+ EXPECT_TRUE(logicalProcessorIt->socketRankInMachine >= 0 && logicalProcessorIt->socketRankInMachine < socketsInMachine);
+ EXPECT_TRUE(logicalProcessorIt->coreRankInSocket >= 0 && logicalProcessorIt->coreRankInSocket < coresPerSocket);
+ EXPECT_TRUE(logicalProcessorIt->hwThreadRankInCore >= 0 && logicalProcessorIt->hwThreadRankInCore < hwThreadsPerCore);
+ // Check that logical processor information is distinct
+ // for each logical processor.
+
+ for (auto remainingLogicalProcessorIt = logicalProcessorIt + 1;
+ remainingLogicalProcessorIt != logicalProcessors.end();
+ ++remainingLogicalProcessorIt)
{
- for (int t = 0; t < hwThreadsPerCore; t++)
- {
- int idx = hwTop.machine().sockets[s].cores[c].hwThreads[t].logicalProcessorId;
- EXPECT_LT(idx, hwTop.machine().logicalProcessorCount);
- EXPECT_EQ(hwTop.machine().logicalProcessors[idx].socketRankInMachine, s);
- EXPECT_EQ(hwTop.machine().logicalProcessors[idx].coreRankInSocket, c) << "logical:" << idx;
- EXPECT_EQ(hwTop.machine().logicalProcessors[idx].hwThreadRankInCore, t);
- }
+ SCOPED_TRACE(gmx::formatString("Other socket rank in machine: %d", remainingLogicalProcessorIt->socketRankInMachine));
+ SCOPED_TRACE(gmx::formatString("Other core rank in socket: %d", remainingLogicalProcessorIt->coreRankInSocket));
+ SCOPED_TRACE(gmx::formatString("Other hw thread rank in core: %d", remainingLogicalProcessorIt->hwThreadRankInCore));
+ EXPECT_TRUE((logicalProcessorIt->socketRankInMachine != remainingLogicalProcessorIt->socketRankInMachine) ||
+ (logicalProcessorIt->coreRankInSocket != remainingLogicalProcessorIt->coreRankInSocket) ||
+ (logicalProcessorIt->hwThreadRankInCore != remainingLogicalProcessorIt->hwThreadRankInCore)) <<
+ "This pair of logical processors have the same descriptive information, which is an error";
}
}
}
if (bondedThreading.nthreads > BITMASK_SIZE)
{
#pragma omp master
- gmx_fatal(FARGS, "You are using %d OpenMP threads, which is larger than GMX_OPENMP_MAX_THREADS (%d). Decrease the number of OpenMP threads or rebuild GROMACS with a larger value for GMX_OPENMP_MAX_THREADS.",
+ gmx_fatal(FARGS, "You are using %d OpenMP threads, which is larger than GMX_OPENMP_MAX_THREADS (%d). Decrease the number of OpenMP threads or rebuild GROMACS with a larger value for GMX_OPENMP_MAX_THREADS passed to CMake.",
bondedThreading.nthreads, GMX_OPENMP_MAX_THREADS);
#pragma omp barrier
}
block_bc(cr, mtop->bIntermolecularInteractions);
if (mtop->bIntermolecularInteractions)
{
- mtop->intermolecular_ilist = std::make_unique<InteractionLists>();
+ if (!MASTER(cr))
+ {
+ mtop->intermolecular_ilist = std::make_unique<InteractionLists>();
+ }
bc_ilists(cr, mtop->intermolecular_ilist.get());
}
r_min_rad = probe_rad*probe_rad;
gmx::RangePartitioning molecules = gmx_mtop_molecules(*mtop);
snew(rm_p->block, molecules.numBlocks());
+ snew(rm_p->mol, molecules.numBlocks());
nrm = nupper = 0;
nlower = low_up_rm;
for (i = 0; i < ins_at->nr; i++)
for (int i = 0; i < rm_p->nr; i++)
{
mol_id = rm_p->mol[i];
- at = molecules.block(mol_id).size();
+ at = molecules.block(mol_id).begin();
block = rm_p->block[i];
mtop->molblock[block].nmol--;
for (j = 0; j < mtop->moltype[mtop->molblock[block].type].atoms.nr; j++)
}
mtop->natoms -= n;
- state_change_natoms(state, state->natoms - n);
- snew(x_tmp, state->natoms);
- snew(v_tmp, state->natoms);
+ /* We cannot change the size of the state datastructures here
+ * because we still access the coordinate arrays for all positions
+ * before removing the molecules we want to remove.
+ */
+ const int newStateAtomNumber = state->natoms - n;
+ snew(x_tmp, newStateAtomNumber);
+ snew(v_tmp, newStateAtomNumber);
for (auto group : keysOf(groups->groupNumbers))
{
if (!groups->groupNumbers[group].empty())
{
- groups->groupNumbers[group].resize(state->natoms);
- new_egrp[group].resize(state->natoms);
+ groups->groupNumbers[group].resize(newStateAtomNumber);
+ new_egrp[group].resize(newStateAtomNumber);
}
}
auto x = makeArrayRef(state->x);
auto v = makeArrayRef(state->v);
rm = 0;
- for (int i = 0; i < state->natoms+n; i++)
+ for (int i = 0; i < state->natoms; i++)
{
bRM = FALSE;
for (j = 0; j < n; j++)
}
}
}
+ state_change_natoms(state, newStateAtomNumber);
for (int i = 0; i < state->natoms; i++)
{
copy_rvec(x_tmp[i], x[i]);
fprintf(debug, "Going to sort constraints\n");
}
- qsort(sb, ncons, sizeof(*sb), pcomp);
+ std::qsort(sb, ncons, sizeof(*sb), pcomp);
if (debug)
{
{ "-append", FALSE, etBOOL, {&appendOption},
"Append to previous output files when continuing from checkpoint instead of adding the simulation part number to all file names" },
{ "-nsteps", FALSE, etINT64, {&mdrunOptions.numStepsCommandline},
- "Run this number of steps, overrides .mdp file option (-1 means infinite, -2 means use mdp option, smaller is invalid)" },
+ "Run this number of steps (-1 means infinite, -2 means use mdp option, smaller is invalid)" },
{ "-maxh", FALSE, etREAL, {&mdrunOptions.maximumHoursToRun},
"Terminate after 0.99 times this time (hours)" },
{ "-replex", FALSE, etINT, {&replExParams.exchangeInterval},
{
/* Replace c endpoint with b */
c = b;
- /* swap states b and c */
- swap_em_state(&sb, &sc);
+ /* copy state b to c */
+ *sc = *sb;
}
else
{
/* Replace a endpoint with b */
a = b;
- /* swap states a and b */
- swap_em_state(&sa, &sb);
+ /* copy state b to a */
+ *sa = *sb;
}
/*
}
// Reset stepsize in we are doing more iterations
- stepsize = 1.0/ems.fnorm;
+ stepsize = 1.0;
/* Stop when the maximum force lies below tolerance.
* If we have reached machine precision, converged is already set to true.
}
}
- /* Determine new step */
- stepsize = ustep/s_min->fmax;
+ // If the force is very small after finishing minimization,
+ // we risk dividing by zero when calculating the step size.
+ // So we check first if the minimization has stopped before
+ // trying to obtain a new step size.
+ if (!bDone)
+ {
+ /* Determine new step */
+ stepsize = ustep/s_min->fmax;
+ }
/* Check if stepsize is too small, with 1 nm as a characteristic length */
#if GMX_DOUBLE
if (startingBehavior != StartingBehavior::NewSimulation)
{
+ /* Check if checkpoint file exists before doing continuation.
+ * This way we can use identical input options for the first and subsequent runs...
+ */
+ if (mdrunOptions.numStepsCommandline > -2)
+ {
+ /* Temporarily set the number of steps to unmlimited to avoid
+ * triggering the nsteps check in load_checkpoint().
+ * This hack will go away soon when the -nsteps option is removed.
+ */
+ inputrec->nsteps = -1;
+ }
+
load_checkpoint(opt2fn_master("-cpi", filenames.size(), filenames.data(), cr),
logFileHandle,
cr, domdecOptions.numCells,
}
}
+#if defined(__GNUC__) && !defined(__clang__) && !defined(__ICC) && __GNUC__ == 7
+/* Avoid gcc 7 avx512 loop vectorization bug (actually only needed with -mavx512f) */
+#pragma GCC push_options
+#pragma GCC optimize ("no-tree-vectorize")
+#endif
+
+/* Returns the number of cluster pairs that are in use summed over all lists */
+static int countClusterpairs(gmx::ArrayRef<const NbnxnPairlistCpu> pairlists)
+{
+ /* gcc 7 with -mavx512f can miss the contributions of 16 consecutive
+ * elements to the sum calculated in this loop. Above we have disabled
+ * loop vectorization to avoid this bug.
+ */
+ int ncjTotal = 0;
+ for (const auto &pairlist : pairlists)
+ {
+ ncjTotal += pairlist.ncjInUse;
+ }
+ return ncjTotal;
+}
+
+#if defined(__GNUC__) && !defined(__clang__) && !defined(__ICC) && __GNUC__ == 7
+#pragma GCC pop_options
+#endif
+
/* This routine re-balances the pairlists such that all are nearly equally
* sized. Only whole i-entries are moved between lists. These are moved
* between the ends of the lists, such that the buffer reduction cost should
gmx::ArrayRef<NbnxnPairlistCpu> destSet,
gmx::ArrayRef<PairsearchWork> searchWork)
{
- int ncjTotal = 0;
- for (auto &src : srcSet)
- {
- ncjTotal += src.ncjInUse;
- }
+ const int ncjTotal = countClusterpairs(srcSet);
const int numLists = srcSet.ssize();
const int ncjTarget = (ncjTotal + numLists - 1)/numLists;
}
#ifndef NDEBUG
- int ncjTotalNew = 0;
- for (auto &dest : destSet)
- {
- ncjTotalNew += dest.ncjInUse;
- }
+ const int ncjTotalNew = countClusterpairs(destSet);
GMX_RELEASE_ASSERT(ncjTotalNew == ncjTotal, "The total size of the lists before and after rebalancing should match");
#endif
}
for (pull_group_work_t &group : pull->group)
{
- if (group.epgrppbc == epgrppbcCOS || !group.globalWeights.empty())
+ if (!group.globalWeights.empty())
{
group.localWeights.resize(group.atomSet.numAtomsLocal());
for (size_t i = 0; i < group.atomSet.numAtomsLocal(); ++i)
{
pull_group_work_t *pgrp = &pull->group[g];
+ /* Cosine-weighted COMs behave different from all other weighted COMs
+ * in the sense that the weights depend on instantaneous coordinates,
+ * not on pre-set weights. Thus we resize the local weight buffer here.
+ */
+ if (pgrp->epgrppbc == epgrppbcCOS)
+ {
+ pgrp->localWeights.resize(pgrp->atomSet.localIndex().size());
+ }
+
auto comBuffer =
gmx::arrayRefFromArray(comm->comBuffer.data() + g*c_comBufferStride, c_comBufferStride);
{
int molnr, atnr_mol;
mtopGetMolblockIndex(top, ai, &molb, &molnr, &atnr_mol);
- const t_atoms &mol_atoms = top->moltype[top->molblock[molb].type].atoms;
- int last_atom = atnr_mol + 1;
+ const t_atoms &mol_atoms = top->moltype[top->molblock[molb].type].atoms;
+ int last_atom = atnr_mol + 1;
+ const int currentResid = mol_atoms.atom[atnr_mol].resind;
while (last_atom < mol_atoms.nr
- && mol_atoms.atom[last_atom].resind == id)
+ && mol_atoms.atom[last_atom].resind == currentResid)
{
++last_atom;
}
int first_atom = atnr_mol - 1;
while (first_atom >= 0
- && mol_atoms.atom[first_atom].resind == id)
+ && mol_atoms.atom[first_atom].resind == currentResid)
{
--first_atom;
}
- int first_mol_atom = top->moleculeBlockIndices[molb].globalAtomStart;
- first_mol_atom += molnr*top->moleculeBlockIndices[molb].numAtomsPerMolecule;
+ const MoleculeBlockIndices &molBlock = top->moleculeBlockIndices[molb];
+ int first_mol_atom = molBlock.globalAtomStart;
+ first_mol_atom += molnr*molBlock.numAtomsPerMolecule;
first_atom = first_mol_atom + first_atom + 1;
last_atom = first_mol_atom + last_atom - 1;
for (int j = first_atom; j <= last_atom; ++j)
}
case INDEX_MOL:
{
- size_t molb = 0;
- while (molb + 1 < top->molblock.size() && id >= top->moleculeBlockIndices[molb].moleculeIndexStart)
- {
- ++molb;
- }
+ int molnr, atnr_mol;
+ mtopGetMolblockIndex(top, ai, &molb, &molnr, &atnr_mol);
const MoleculeBlockIndices &blockIndices = top->moleculeBlockIndices[molb];
const int atomStart = blockIndices.globalAtomStart + (id - blockIndices.moleculeIndexStart)*blockIndices.numAtomsPerMolecule;
for (int j = 0; j < blockIndices.numAtomsPerMolecule; ++j)
"[TT]resnr[tt] selects atoms using the residue numbering in the input",
"file. [TT]resid[tt] is synonym for this keyword for VMD compatibility.",
"",
- "[TT]resindex N[tt] selects the [TT]N[tt]th residue starting from the",
+ "[TT]resindex N[tt] selects the [TT]N[tt] th residue starting from the",
"beginning of the input file. This is useful for uniquely identifying",
"residues if there are duplicate numbers in the input file (e.g., in",
"multiple chains).",
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018,2019, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
//! \brief The SIMD4 type is always four units wide, but this makes code more explicit
#define GMX_SIMD4_WIDTH 4
-//! \brief Required alignment in bytes for aligned load/store (always defined, even without SIMD)
-#define GMX_SIMD_ALIGNMENT 8 // 8 (1*double)
+/*! \brief Maximum required alignment in bytes for aligned load/store of multiple
+ * values (maximum required for either float or double). */
+#if GMX_SIMD_DOUBLE_WIDTH >= 2*GMX_SIMD_FLOAT_WIDTH
+#define GMX_SIMD_ALIGNMENT (GMX_SIMD_DOUBLE_WIDTH*8)
+#else
+#define GMX_SIMD_ALIGNMENT (GMX_SIMD_FLOAT_WIDTH*4)
+#endif
//! \brief Accuracy of SIMD 1/sqrt(x) lookup. Used to determine number of iterations.
#define GMX_SIMD_RSQRT_BITS 23
Append to previous output files when continuing from checkpoint
instead of adding the simulation part number to all file names
-nsteps <int> (-2)
- Run this number of steps, overrides .mdp file option (-1 means
- infinite, -2 means use mdp option, smaller is invalid)
+ Run this number of steps (-1 means infinite, -2 means use mdp
+ option, smaller is invalid)
-maxh <real> (-1)
Terminate after 0.99 times this time (hours)
-replex <int> (0)
<Energy Name="Potential">
<Real Name="Time 0.000000 Step 0 in frame 0">2195.7786482024485</Real>
<Real Name="Time 0.000000 Step 0 in frame 1">1848.1873657020258</Real>
- <Real Name="Time 4.000000 Step 4 in frame 2">1847.227343785434</Real>
+ <Real Name="Time 4.000000 Step 4 in frame 2">561.1160975330763</Real>
</Energy>
</Minimizer>
</Simulation>
--- /dev/null
+water and methane
+ 8
+ 1SOL OW 1 0.005 0.600 0.244 0.1823 -0.4158 0.4875
+ 1SOL HW1 2 -0.017 0.690 0.270 -1.7457 -0.5883 -0.4604
+ 1SOL HW2 3 0.051 0.610 0.161 2.5085 -0.1501 1.7627
+ 1CH4 C 649 -0.024 -0.222 -0.640 0.0000 0.0000 0.0000
+ 1CH4 H1 650 -0.083 -0.303 -0.646 0.0000 0.0000 0.0000
+ 1CH4 H2 651 -0.080 -0.140 -0.642 0.0000 0.0000 0.0000
+ 1CH4 H3 652 0.040 -0.221 -0.716 0.0000 0.0000 0.0000
+ 1CH4 H4 653 0.027 -0.225 -0.553 0.0000 0.0000 0.0000
+ 3.72412 3.72412 3.72412
\ No newline at end of file
--- /dev/null
+[ System ]
+ 1 2 3 4 5 6 7 8
+[ SOL ]
+ 1 2 3
+[ CH4 ]
+ 4 5 6 7 8
--- /dev/null
+#include "oplsaa.ff/forcefield.itp"
+
+[ atomtypes ]
+;name at.num mass charge ptype sigma epsilon
+ CH4 0 0.0 0.000 A 0.371 1.26
+
+#include "oplsaa.ff/spce.itp"
+
+[ moleculetype ]
+; Name nrexcl
+ methane 3
+
+[ atoms ]
+; nr type resnr residue atom cgnr charge mass
+ 1 opls_138 1 METH C 1 -0.24 12.011
+ 2 opls_140 1 METH H1 1 0.06 1.008
+ 3 opls_140 1 METH H2 1 0.06 1.008
+ 4 opls_140 1 METH H3 1 0.06 1.008
+ 5 opls_140 1 METH H4 1 0.06 1.008
+
+[ bonds ]
+; ai aj funct c0 c1 c2 c3
+ 1 2 1
+ 1 3 1
+ 1 4 1
+ 1 5 1
+
+[ system ]
+Water and methane
+
+[ molecules ]
+SOL 1
+methane 1