#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2017, by the GROMACS development team, led by
+# Copyright (c) 2017,2018, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.0" AND
NOT CUDA_VERSION VERSION_LESS "8.0")
- message(FATAL_ERROR "clang ${CMAKE_CXX_COMPILER_VERSION} for CUDA is only compatible with CUDA version <8.0")
+ message(FATAL_ERROR "clang ${CMAKE_CXX_COMPILER_VERSION} for CUDA is only compatible with CUDA version <=8.0")
endif()
if (GMX_CUDA_TARGET_COMPUTE)
list(APPEND _CUDA_CLANG_GENCODE_FLAGS "--cuda-gpu-arch=sm_${_target}")
endforeach()
else()
- list(APPEND _CUDA_CLANG_GENCODE_FLAGS "--cuda-gpu-arch=sm_20")
+ if(CUDA_VERSION VERSION_LESS "9.00") # < 9.0
+ list(APPEND _CUDA_CLANG_GENCODE_FLAGS "--cuda-gpu-arch=sm_20")
+ endif()
list(APPEND _CUDA_CLANG_GENCODE_FLAGS "--cuda-gpu-arch=sm_30")
list(APPEND _CUDA_CLANG_GENCODE_FLAGS "--cuda-gpu-arch=sm_35")
- list(APPEND _CUDA_CLANG_GENCODE_FLAGS "--cuda-gpu-arch=sm_37")
+ # clang 6.0 + CUDA 9.0 seems to have issues generating code for sm_37
+ if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 6.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 6.0.999)
+ list(APPEND _CUDA_CLANG_GENCODE_FLAGS "--cuda-gpu-arch=sm_37")
+ endif()
list(APPEND _CUDA_CLANG_GENCODE_FLAGS "--cuda-gpu-arch=sm_50")
list(APPEND _CUDA_CLANG_GENCODE_FLAGS "--cuda-gpu-arch=sm_52")
if (NOT CUDA_VERSION VERSION_LESS 8.0)
list(APPEND _CUDA_CLANG_GENCODE_FLAGS "--cuda-gpu-arch=sm_60")
list(APPEND _CUDA_CLANG_GENCODE_FLAGS "--cuda-gpu-arch=sm_61")
endif()
- # TODO: test CUDA 9.0 and figure out which clang releases support it
- # and the sm_70 arch.
+ if (NOT CUDA_VERSION VERSION_LESS 9.0)
+ list(APPEND _CUDA_CLANG_GENCODE_FLAGS "--cuda-gpu-arch=sm_70")
+ endif()
endif()
if (GMX_CUDA_TARGET_SM)
set_property(CACHE GMX_CUDA_TARGET_SM PROPERTY HELPSTRING "List of CUDA GPU architecture codes to compile for (without the sm_ prefix)")
set(REGRESSIONTEST_VERSION "${GMX_VERSION_STRING}")
set(REGRESSIONTEST_BRANCH "refs/heads/release-2018")
-set(REGRESSIONTEST_MD5SUM "5a609bea6b6777072063be9e665121ef" CACHE INTERNAL "MD5 sum of the regressiontests tarball")
+set(REGRESSIONTEST_MD5SUM "1a94916e2cf90e34fddb3514a65e0154" CACHE INTERNAL "MD5 sum of the regressiontests tarball")
math(EXPR GMX_VERSION_NUMERIC
"${GMX_VERSION_MAJOR}*10000 + ${GMX_VERSION_PATCH}")
fragments/doxygen-links.rst
install-guide/index.rst
release-notes/index.rst
+ release-notes/2018/2018.2.rst
release-notes/2018/2018.1.rst
release-notes/2018/major/highlights.rst
release-notes/2018/major/features.rst
GROMACS 2018.1 release notes
----------------------------
-This version was released on February 23, 2018. These release notes
+This version was released on March 21, 2018. These release notes
document the changes that have taken place in GROMACS since the
initial version 2018, to fix known issues. It also incorporates all
fixes made in version 2016.5 and earlier, which you can find described
Fixes where mdrun could behave incorrectly
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Fixed leap-frog integrator with Nose-Hoover T coupling and Parrinello-Rahman P coupling
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+With Parrinello-Rahman P coupling active, when applying Nose-Hoover T
+coupling at an MD step where no P coupling occured, the update phase
+could use outdated or garbage coupling data. Such simulations with
+:mdp:`nsttcouple` equal to :mdp:`nstpcouple` are unaffected
+by this issue, so few users will be impacted by this. Simulations
+using other coupling algorithms are unaffected.
+
+:issue:`2418`
+
Used SIMD bondeds without perturbed interactions
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
In free-energy calculations that lacked bonded interactions between
:issue:`2381`
+Fixed multi-simulations with multiple ranks per simulation
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+These used to crash or hang mysteriously before the simulation would
+start.
+
+:issue:`2403`
+
+Improved inter-simulation signalling implementation
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+Reduced communication overhead with either many simulations or many
+ranks per simulation.
+
Fixed FEP calculations with SHAKE
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
All SHAKE + FEP calculations accumulated wrong values to dH/dl output,
:issue:`2377`
+Improved mdrun handling when GPUs are present but unavailable
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+:issue:`2415`
+
+Fixed crash with AWH and awh1-equilibrate-histogram=yes
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+When running AWH with awh1-equilibrate-histogram=yes and multiple MPI
+ranks, the simulation would segmentation fault.
+
+:issue:`2436`
+
+Fixed issues with AWH and bias sharing
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+When sharing AWH biases between multiple simulations, there were four
+issues. An MPI error would occur when an individual simulation would
+use more than one rank. The deconvoluted PMF would be garbage (but
+the sampling was correct). with more than 32 MPI ranks for an individual
+simulation, an error about a coordinate being 0 could occur.
+And continuation from checkpoints could be refused.
+
+:issue:`2433`
+:issue:`2439`
+:issue:`2441`
+:issue:`2444`
+
+Fixed virial with AWH and domain decomposiion
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+When running AWH with domain decomposition, the AWH/pull virial
+contribution would be multiplied with the number of MPI ranks.
+
+
+Fixed restart bug with pull geometry direction-periodic
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+With COM pulling with geometry direction-periodic, (only) at the step
+of continuing from checkpoint the closest PBC image would be used
+instead of the of the one closest to the reference value. This could
+lead to a sharp spike in the pull force at the continuation step.
+
+:issue:`2446`
+
Fixes for ``gmx`` tools
^^^^^^^^^^^^^^^^^^^^^^^
:issue:`2386`
+Fixed grompp net charge check
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+Use of multiple non-consecutive blocks of a moleculetype now works
+correctly.
+
+:issue:`2407`
+
+Fixed issue with adding selection groups for TNG output
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+When there were more molecule blocks than molecule types in the topology,
+the output was wrong.
+
+Fixed help text and functionality of ``pdb2gmx -missing``
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+This now permits dangling bonds at termini, which is occasionally useful.
+
Fixes to improve portability
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+PME on Fermi-era GPUs on large systems now works
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+On older GPUs, it was possible to run into a hardware size limitation
+that has now been fixed.
+
+:issue:`2409`
+
GoogleTest death tests are now used in a more portable way
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+Tests for GPU utility functionality are now more robust
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+Non-GPU builds, and GPU builds that find incompatible or otherwise
+unavailable devices will pass the tests in the manner intended.
+
+:issue:`2405`
+
Used more portable python shebangs
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Per https://www.python.org/dev/peps/pep-0394/#recommendation, we
:issue:`2400`
-Work around gcc-6 bug in tabulated group non-bonded kernels
+Worked around gcc-6 bug in tabulated group non-bonded kernels
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
With the gcc-6 compiler, AVX and -O3, which is the default,
the tabulated non-bonded kernels of the (deprecated) group
:issue:`2424`
-Detect correct AMD Zen SMT topology
+Detected correct AMD Zen SMT topology
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
On recent AMD Zen processors, hardware thread detection and pinning
handling have been fixed, improving performance.
:issue:`2388`
-Fix sharing of the AWH bias over multiple simulations
+Fixed POWER VSX SIMD usage for upcoming gcc version 8
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
-Sharing the AWH bias over multiple simulations only worked when
-each simulation was running on a single MPI rank. When a simulation
-itself used multiple MPI ranks, the run would stop with an MPI error.
+:issue:`2421`
-:issue:`2433`
+Fixed clang 6 with CUDA 9
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+Permits builds for sm_70 and may work around an issue with sm_37
+
+:issue:`2443`
Miscellaneous
^^^^^^^^^^^^^
Made multi-atom TPI reproducible with different compilers
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+Documentation enhancements
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+In particular, for handling options to mdrun relating to GPUs and
+running mdrun with good performance.
--- /dev/null
+GROMACS 2018.2 release notes
+----------------------------
+
+This version was released on TODO, 2018. These release notes document
+the changes that have taken place in GROMACS since version 2018.1, to fix known
+issues. It also incorporates all fixes made in version TODO and
+earlier, which you can find described in the :ref:`release-notes`.
+
+Fixes where mdrun could behave incorrectly
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Fixes for ``gmx`` tools
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Fixes to improve portability
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Miscellaneous
+^^^^^^^^^^^^^
.. toctree::
:maxdepth: 1
+ 2018/2018.2
2018/2018.1
Major release
.. _gmx-performance:
-Getting good performance from mdrun
-===================================
+Getting good performance from :ref:`mdrun <gmx mdrun>`
+======================================================
The |Gromacs| build system and the :ref:`gmx mdrun` tool has a lot of built-in
and configurable intelligence to detect your hardware and make pretty
effective use of that hardware. For a lot of casual and serious use of
members of its domain. A GPU may perform work for more than
one PP rank, but it is normally most efficient to use a single
PP rank per GPU and for that rank to have thousands of
- particles. When the work of a PP rank is done on the CPU, mdrun
+ particles. When the work of a PP rank is done on the CPU, :ref:`mdrun <gmx mdrun>`
will make extensive use of the SIMD capabilities of the
core. There are various `command-line options
<controlling-the-domain-decomposition-algorithm` to control
there are separate PME ranks, then the remaining ranks handle
the PP work. Otherwise, all ranks do both PP and PME work.
-Running mdrun within a single node
-----------------------------------
+Running :ref:`mdrun <gmx mdrun>` within a single node
+-----------------------------------------------------
:ref:`gmx mdrun` can be configured and compiled in several different ways that
are efficient to use within a single :term:`node`. The default configuration
``-ntomp``
The total number of OpenMP threads per rank to start. The
default, 0, will start one thread on each available core.
- Alternatively, mdrun will honor the appropriate system
+ Alternatively, :ref:`mdrun <gmx mdrun>` will honor the appropriate system
environment variable (e.g. ``OMP_NUM_THREADS``) if set.
``-npme``
``-pin``
Can be set to "auto," "on" or "off" to control whether
- mdrun will attempt to set the affinity of threads to cores.
- Defaults to "auto," which means that if mdrun detects that all the
- cores on the node are being used for mdrun, then it should behave
+ :ref:`mdrun <gmx mdrun>` will attempt to set the affinity of threads to cores.
+ Defaults to "auto," which means that if :ref:`mdrun <gmx mdrun>` detects that all the
+ cores on the node are being used for :ref:`mdrun <gmx mdrun>`, then it should behave
like "on," and attempt to set the affinities (unless they are
already set by something else).
``-pinoffset``
If ``-pin on``, specifies the logical core number to
- which mdrun should pin the first thread. When running more than
- one instance of mdrun on a node, use this option to to avoid
- pinning threads from different mdrun instances to the same core.
+ which :ref:`mdrun <gmx mdrun>` should pin the first thread. When running more than
+ one instance of :ref:`mdrun <gmx mdrun>` on a node, use this option to to avoid
+ pinning threads from different :ref:`mdrun <gmx mdrun>` instances to the same core.
``-pinstride``
If ``-pin on``, specifies the stride in logical core
- numbers for the cores to which mdrun should pin its threads. When
- running more than one instance of mdrun on a node, use this option
- to to avoid pinning threads from different mdrun instances to the
+ numbers for the cores to which :ref:`mdrun <gmx mdrun>` should pin its threads. When
+ running more than one instance of :ref:`mdrun <gmx mdrun>` on a node, use this option
+ to to avoid pinning threads from different :ref:`mdrun <gmx mdrun>` instances to the
same core. Use the default, 0, to minimize the number of threads
- per physical core - this lets mdrun manage the hardware-, OS- and
+ per physical core - this lets :ref:`mdrun <gmx mdrun>` manage the hardware-, OS- and
configuration-specific details of how to map logical cores to
physical cores.
A string that specifies the ID numbers of the GPUs that
are available to be used by ranks on this node. For example,
"12" specifies that the GPUs with IDs 1 and 2 (as reported
- by the GPU runtime) can be used by mdrun. This is useful
+ by the GPU runtime) can be used by :ref:`mdrun <gmx mdrun>`. This is useful
when sharing a node with other computations, or if a GPU
- is best used to support a display. If many GPUs are
+ is best used to support a display. Without specifying this
+ parameter, :ref:`mdrun <gmx mdrun>` will utilize all GPUs. When many GPUs are
present, a comma may be used to separate the IDs, so
- "12,13" would make GPUs 12 and 13 available to mdrun.
+ "12,13" would make GPUs 12 and 13 available to :ref:`mdrun <gmx mdrun>`.
It could be necessary to use different GPUs on different
nodes of a simulation, in which case the environment
variable ``GMX_GPU_ID`` can be set differently for the ranks
on different nodes to achieve that result.
+ In |Gromacs| versions preceding 2018 this parameter used to
+ specify both GPU availability and GPU task assignment.
+ The latter is now done with the ``-gputasks`` parameter.
``-gputasks``
A string that specifies the ID numbers of the GPUs to be
used by corresponding GPU tasks on this node. For example,
"0011" specifies that the first two GPU tasks will use GPU 0,
and the other two use GPU 1. When using this option, the
- number of ranks must be known to mdrun, as well as where
+ number of ranks must be known to :ref:`mdrun <gmx mdrun>`, as well as where
tasks of different types should be run, such as by using
- ``-nb gpu``.
+ ``-nb gpu`` - only the tasks which are set to run on GPUs
+ count for parsing the mapping.
+ In |Gromacs| versions preceding 2018 only a single type
+ of GPU task could be run on any rank. Now that there is some
+ support for running PME on GPUs, the number of GPU tasks
+ (and the number of GPU IDs expected in the ``-gputasks`` string)
+ can actually be 2 for a single-rank simulation. The IDs
+ still have to be the same in this case, as using multiple GPUs
+ per single rank is not yet implemented.
+ The order of GPU tasks per rank in the string is short-range first,
+ PME second. The order of ranks with different kinds of GPU tasks
+ is the same by default, but can be influenced with the ``-ddorder``
+ option and gets quite complex when using multiple nodes.
+ The GPU task assignment (whether manually set, or automated),
+ will be reported in the :ref:`mdrun <gmx mdrun>` output on
+ the first physical node of the simulation. For example:
-Examples for mdrun on one node
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ ::
+
+ gmx mdrun -gputasks 0001 -nb gpu -pme gpu -npme 1 -ntmpi 4
+
+ will produce the following output in the log file/terminal:
+
+ ::
+
+ On host tcbl14 2 GPUs user-selected for this run.
+ Mapping of GPU IDs to the 4 GPU tasks in the 4 ranks on this node:
+ PP:0,PP:0,PP:0,PME:1
+
+ In this case, 3 ranks are set by user to compute short-range work
+ on GPU 0, and 1 rank to compute PME on GPU 1.
+ The detailed indexing of the GPUs is also reported in the log file.
+
+ For more information about GPU tasks, please refer to
+ :ref:`Types of GPU tasks<gmx-gpu-tasks>`.
+
+Examples for :ref:`mdrun <gmx mdrun>` on one node
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
::
gmx mdrun
-Starts mdrun using all the available resources. mdrun
+Starts :ref:`mdrun <gmx mdrun>` using all the available resources. :ref:`mdrun <gmx mdrun>`
will automatically choose a fairly efficient division
into thread-MPI ranks, OpenMP threads and assign work
to compatible GPUs. Details will vary with hardware
gmx mdrun -nt 8
-Starts mdrun using 8 threads, which might be thread-MPI
+Starts :ref:`mdrun <gmx mdrun>` using 8 threads, which might be thread-MPI
or OpenMP threads depending on hardware and the kind
of simulation being run.
gmx mdrun -ntmpi 2 -ntomp 4
-Starts mdrun using eight total threads, with four thread-MPI
+Starts :ref:`mdrun <gmx mdrun>` using eight total threads, with four thread-MPI
ranks and two OpenMP threads per core. You should only use
these options when seeking optimal performance, and
must take care that the ranks you create can have
gmx mdrun -gpu_id 12
-Starts mdrun using GPUs with IDs 1 and 2 (e.g. because
+Starts :ref:`mdrun <gmx mdrun>` using GPUs with IDs 1 and 2 (e.g. because
GPU 0 is dedicated to running a display). This requires
two thread-MPI ranks, and will split the available
CPU cores between them using OpenMP threads.
gmx mdrun -ntmpi 4 -nb gpu -gputasks 1122
-Starts mdrun using four thread-MPI ranks, and maps them
+Starts :ref:`mdrun <gmx mdrun>` using four thread-MPI ranks, and maps them
to GPUs with IDs 1 and 2. The CPU cores available will
be split evenly between the ranks using OpenMP threads.
gmx mdrun -nt 6 -pin on -pinoffset 0
gmx mdrun -nt 6 -pin on -pinoffset 3
-Starts two mdrun processes, each with six total threads.
+Starts two :ref:`mdrun <gmx mdrun>` processes, each with six total threads.
Threads will have their affinities set to particular
logical cores, beginning from the logical core
with rank 0 or 3, respectively. The above would work
well on an Intel CPU with six physical cores and
hyper-threading enabled. Use this kind of setup only
-if restricting mdrun to a subset of cores to share a
+if restricting :ref:`mdrun <gmx mdrun>` to a subset of cores to share a
node with other processes.
::
MPI setup is restricted to one node, then the resulting
:ref:`gmx mdrun` will be local to that node.
-Running mdrun on more than one node
------------------------------------
+Running :ref:`mdrun <gmx mdrun>` on more than one node
+------------------------------------------------------
This requires configuring |Gromacs| to build with an external MPI
-library. By default, this mdrun executable is run with
+library. By default, this :ref:`mdrun <gmx mdrun>` executable is run with
:ref:`mdrun_mpi`. All of the considerations for running single-node
-mdrun still apply, except that ``-ntmpi`` and ``-nt`` cause a fatal
+:ref:`mdrun <gmx mdrun>` still apply, except that ``-ntmpi`` and ``-nt`` cause a fatal
error, and instead the number of ranks is controlled by the
MPI environment.
Settings such as ``-npme`` are much more important when
Defaults to "on." If "on," a Verlet-scheme simulation will
optimize various aspects of the PME and DD algorithms, shifting
load between ranks and/or GPUs to maximize throughput. Some
- mdrun features are not compatible with this, and these ignore
+ :ref:`mdrun <gmx mdrun>` features are not compatible with this, and these ignore
this option.
``-dlb``
the MPI environment, and does not explicitly manage
any aspect of OpenMP during the optimization.
-Examples for mdrun on more than one node
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The examples and explanations for for single-node mdrun are
+Examples for :ref:`mdrun <gmx mdrun>` on more than one node
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+The examples and explanations for for single-node :ref:`mdrun <gmx mdrun>` are
still relevant, but ``-nt`` is no longer the way
to choose the number of MPI ranks.
suitable when there are ten nodes, each with two GPUs, but another
job on each node is using GPU 0. The job scheduler should set the
affinity of threads of both jobs to their allocated cores, or the
-performance of mdrun will suffer greatly.
+performance of :ref:`mdrun <gmx mdrun>` will suffer greatly.
::
of ``-dds`` might need to be adjusted to account for high or low
spatial inhomogeneity of the system.
-Finding out how to run mdrun better
------------------------------------
+Finding out how to run :ref:`mdrun <gmx mdrun>` better
+------------------------------------------------------
The Wallcycle module is used for runtime performance measurement of :ref:`gmx mdrun`.
At the end of the log file of each run, the "Real cycle and time accounting" section
* Waiting + Communication of force
* Particle mesh Ewald
* PME redist. X/F
-* PME spread/gather
+* PME spread
+* PME gather
* PME 3D-FFT
* PME 3D-FFT Communication
* PME solve Lennard-Jones
+* PME solve LJ
* PME solve Elec
* PME wait for particle-particle
* Wait + Receive PME force
* Wait GPU nonlocal
* Wait GPU local
+* Wait PME GPU spread
+* Wait PME GPU gather
+* Reduce PME GPU Force
* Non-bonded position/force buffer operations
* Virtual site spread
* COM pull force
+* AWH (accelerated weight histogram method)
* Write trajectory
* Update
* Constraints
* Bonded-FEP force
* Restraints force
* Listed buffer operations
+* Nonbonded pruning
* Nonbonded force
+* Launch non-bonded GPU tasks
+* Launch PME GPU tasks
* Ewald force correction
* Non-bonded position buffer operations
* Non-bonded force buffer operations
.. _gmx-mdrun-on-gpu:
-Running mdrun with GPUs
------------------------
+Running :ref:`mdrun <gmx mdrun>` with GPUs
+------------------------------------------
NVIDIA GPUs from the professional line (Tesla or Quadro) starting with
the Kepler generation (compute capability 3.5 and later) support changing the
Known limitations
.................
-**Please note again the limitations outlined above!**
+**Please note again the limitations outlined below!**
- Only compilation with CUDA is supported.
-- Only a PME order of 4 is supported in GPU.
+- Only a PME order of 4 is supported on GPUs.
- PME will run on a GPU only when exactly one rank has a
PME task, ie. decompositions with multiple ranks doing PME are not supported.
- Only single precision is supported.
-- Free energy calculations are not supported, because only single PME grids can be calculated.
+- Free energy calculations where charges are perturbed are not supported,
+ because only single PME grids can be calculated.
-- LJ PME is not supported on GPU.
+- LJ PME is not supported on GPUs.
Assigning tasks to GPUs
.......................
/* Need to temporarily exponentiate the log weights to sum over simulations */
for (size_t i = 0; i < buffer.size(); i++)
{
- buffer[i] = pointState[i].inTargetRegion() ? std::exp(static_cast<float>(pointState[i].logPmfSum())) : 0;
+ buffer[i] = pointState[i].inTargetRegion() ? std::exp(-pointState[i].logPmfSum()) : 0;
}
sumOverSimulations(gmx::ArrayRef<double>(buffer), commRecord, multiSimComm);
"awh_coordpoint", "awh_umbrellaGridpoint",
"awh_updatelist",
"awh_logScaledSampleWeight",
- "awh_numupdates"
+ "awh_numupdates",
"awh_forceCorrelationGrid"
};
{ "-ignh", FALSE, etBOOL, {&bRemoveH},
"Ignore hydrogen atoms that are in the coordinate file" },
{ "-missing", FALSE, etBOOL, {&bAllowMissing},
- "Continue when atoms are missing, dangerous" },
+ "Continue when atoms are missing and bonds cannot be made, dangerous" },
{ "-v", FALSE, etBOOL, {&bVerbose},
"Be slightly more verbose in messages" },
{ "-posrefc", FALSE, etREAL, {&posre_fc},
/* lookup hackblocks and rtp for all residues */
get_hackblocks_rtp(&hb_chain, &restp_chain,
nrtp, restp, pdba->nres, pdba->resinfo,
- cc->nterpairs, cc->ntdb, cc->ctdb, cc->r_start, cc->r_end);
+ cc->nterpairs, cc->ntdb, cc->ctdb, cc->r_start, cc->r_end,
+ bAllowMissing);
/* ideally, now we would not need the rtp itself anymore, but do
everything using the hb and restp arrays. Unfortunately, that
requires some re-thinking of code in gen_vsite.c, which I won't
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
int nres, t_resinfo *resinfo,
int nterpairs,
t_hackblock **ntdb, t_hackblock **ctdb,
- int *rn, int *rc)
+ int *rn, int *rc,
+ gmx_bool bAllowMissing)
{
int i, j, k, l;
char *key;
if (bRM && ((tern >= 0 && ntdb[tern] == nullptr) ||
(terc >= 0 && ctdb[terc] == nullptr)))
{
- gmx_fatal(FARGS, "There is a dangling bond at at least one of the terminal ends and the force field does not provide terminal entries or files. Fix your terminal residues so that they match the residue database (.rtp) entries, or provide terminal database entries (.tdb).");
+ const char *errString = "There is a dangling bond at at least one of the terminal ends and the force field does not provide terminal entries or files. Fix your terminal residues so that they match the residue database (.rtp) entries, or provide terminal database entries (.tdb).";
+ if (bAllowMissing)
+ {
+ fprintf(stderr, "%s\n", errString);
+ }
+ else
+ {
+ gmx_fatal(FARGS, errString);
+ }
}
- if (bRM && ((tern >= 0 && ntdb[tern]->nhack == 0) ||
- (terc >= 0 && ctdb[terc]->nhack == 0)))
+ else if (bRM && ((tern >= 0 && ntdb[tern]->nhack == 0) ||
+ (terc >= 0 && ctdb[terc]->nhack == 0)))
{
- gmx_fatal(FARGS, "There is a dangling bond at at least one of the terminal ends. Fix your coordinate file, add a new terminal database entry (.tdb), or select the proper existing terminal entry.");
+ const char *errString = "There is a dangling bond at at least one of the terminal ends. Fix your coordinate file, add a new terminal database entry (.tdb), or select the proper existing terminal entry.";
+ if (bAllowMissing)
+ {
+ fprintf(stderr, "%s\n", errString);
+ }
+ else
+ {
+ gmx_fatal(FARGS, errString);
+ }
}
}
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
int nres, t_resinfo *resinfo,
int nterpairs,
t_hackblock **ntdb, t_hackblock **ctdb,
- int *rn, int *rc);
+ int *rn, int *rc,
+ gmx_bool bAllowMissing);
/* Get the database entries for the nres residues in resinfo
* and store them in restp and hb.
*/
int *rank;
#endif
- if (multidirs.size() <= 1)
+ if (multidirs.empty())
{
return nullptr;
}
- if (!GMX_LIB_MPI && multidirs.size() > 1)
+
+ if (!GMX_LIB_MPI && multidirs.size() >= 1)
{
gmx_fatal(FARGS, "mdrun -multidir is only supported when GROMACS has been "
"configured with a proper external MPI library.");
}
+ if (multidirs.size() == 1)
+ {
+ /* NOTE: It would be nice if this special case worked, but this requires checks/tests. */
+ gmx_fatal(FARGS, "To run mdrun in multiple simulation mode, more then one "
+ "actual simulation is required. The single simulation case is not supported.");
+ }
+
#if GMX_MPI
int numRanks;
MPI_Comm_size(comm, &numRanks);
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2012,2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2012,2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*/
const char *check_box(int ePBC, const matrix box);
-/*! \brief Creates box matrix from edge lengths and angles. */
+/*! \brief Creates box matrix from edge lengths and angles.
+ *
+ * \param[inout] box The box matrix
+ * \param[in] vec The edge lengths
+ * \param[in] angleInDegrees The angles
+ */
void matrix_convert(matrix box, const rvec vec, const rvec angleInDegrees);
/*! \brief Compute the maximum cutoff for the box
*/
int pbc_dx_aiuc(const t_pbc *pbc, const rvec x1, const rvec x2, rvec dx);
-/*\brief Compute distance with PBC
+/*! \brief Compute distance with PBC
*
* As pbc_dx, but for double precision vectors.
* set_pbc must be called before ever calling this routine.
pcrd = &pull->coord[coord_ind];
- get_pull_coord_distance(pull, coord_ind, pbc);
-
+ /* Update the reference value before computing the distance,
+ * since it is used in the distance computation with periodic pulling.
+ */
update_pull_coord_reference_value(pcrd, coord_ind, t);
+ get_pull_coord_distance(pull, coord_ind, pbc);
+
/* Determine the deviation */
dev = pcrd->value - pcrd->value_ref;
calc_pull_coord_vector_force(pcrd);
/* Add the forces for this coordinate to the total virial and force */
- if (forceWithVirial->computeVirial_)
+ if (forceWithVirial->computeVirial_ && pull->comm.isMasterRank)
{
matrix virial = { { 0 } };
add_virial_coord(virial, pcrd);
ga2la = nullptr;
}
- /* We always make the master node participate, such that it can do i/o
- * and to simplify MC type extensions people might have.
+ /* We always make the master node participate, such that it can do i/o,
+ * add the virial and to simplify MC type extensions people might have.
*/
- bMustParticipate = (comm->bParticipateAll || dd == nullptr || DDMASTER(dd));
+ bMustParticipate = (comm->bParticipateAll || comm->isMasterRank);
for (g = 0; g < pull->ngroup; g++)
{
make_local_pull_group(ga2la, &pull->group[g],
0, md->homenr);
+ GMX_ASSERT(bMustParticipate || dd != nullptr, "Either all ranks (including this rank) participate, or we use DD and need to have access to dd here");
+
/* We should participate if we have pull or pbc atoms */
if (!bMustParticipate &&
(pull->group[g].nat_loc > 0 ||
comm = &pull->comm;
#if GMX_MPI
- /* Use a sub-communicator when we have more than 32 ranks */
+ /* Use a sub-communicator when we have more than 32 ranks, but not
+ * when we have an external pull potential, since then the external
+ * potential provider expects each rank to have the coordinate.
+ */
comm->bParticipateAll = (cr == nullptr || !DOMAINDECOMP(cr) ||
cr->dd->nnodes <= 32 ||
+ pull->numCoordinatesWithExternalPotential > 0 ||
getenv("GMX_PULL_PARTICIPATE_ALL") != nullptr);
/* This sub-commicator is not used with comm->bParticipateAll,
* so we can always initialize it to NULL.
*/
comm->mpi_comm_com = MPI_COMM_NULL;
comm->nparticipate = 0;
+ comm->isMasterRank = (cr == nullptr || MASTER(cr));
#else
/* No MPI: 1 rank: all ranks pull */
comm->bParticipateAll = TRUE;
+ comm->isMasterRank = true;
#endif
comm->bParticipate = comm->bParticipateAll;
comm->setup_count = 0;
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
MPI_Comm mpi_comm_com; /* Communicator for pulling */
#endif
int nparticipate; /* The number of ranks participating */
+ bool isMasterRank; /* Tells whether our rank is the master rank and thus should add the pull virial */
gmx_int64_t setup_count; /* The number of decomposition calls */
gmx_int64_t must_count; /* The last count our rank needed to be part */
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
load4U(const float *m)
{
return {
+#if __GNUC__ < 7
*reinterpret_cast<const __vector float *>(m)
+#else
+ vec_xl(0, m)
+#endif
};
}
static inline void gmx_simdcall
store4U(float *m, Simd4Float a)
{
+#if __GNUC__ < 7
*reinterpret_cast<__vector float *>(m) = a.simdInternal_;
+#else
+ vec_xst(a.simdInternal_, 0, m);
+#endif
}
static inline Simd4Float gmx_simdcall
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
simdLoadU(const double *m, SimdDoubleTag = {})
{
return {
+#if __GNUC__ < 7
*reinterpret_cast<const __vector double *>(m)
+#else
+ vec_xl(0, m)
+#endif
};
}
static inline void gmx_simdcall
storeU(double *m, SimdDouble a)
{
+#if __GNUC__ < 7
*reinterpret_cast<__vector double *>(m) = a.simdInternal_;
+#else
+ vec_xst(a.simdInternal_, 0, m);
+#endif
}
static inline SimdDouble gmx_simdcall
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
+ * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
simdLoadU(const float *m, SimdFloatTag = {})
{
return {
+#if __GNUC__ < 7
*reinterpret_cast<const __vector float *>(m)
+#else
+ vec_xl(0, m)
+#endif
};
}
static inline void gmx_simdcall
storeU(float *m, SimdFloat a)
{
+#if __GNUC__ < 7
*reinterpret_cast<__vector float *>(m) = a.simdInternal_;
+#else
+ vec_xst(a.simdInternal_, 0, m);
+#endif
}
static inline SimdFloat gmx_simdcall
simdLoadU(const std::int32_t *m, SimdFInt32Tag)
{
return {
+#if __GNUC__ < 7
*reinterpret_cast<const __vector int *>(m)
+#else
+ vec_xl(0, m)
+#endif
};
}
static inline void gmx_simdcall
storeU(std::int32_t * m, SimdFInt32 a)
{
+#if __GNUC__ < 7
*reinterpret_cast<__vector int *>(m) = a.simdInternal_;
+#else
+ vec_xst(a.simdInternal_, 0, m);
+#endif
}
static inline SimdFInt32 gmx_simdcall
#include "gromacs/mdlib/vcm.h"
#include "gromacs/mdlib/vsite.h"
#include "gromacs/mdtypes/awh-history.h"
+#include "gromacs/mdtypes/awh-params.h"
#include "gromacs/mdtypes/commrec.h"
#include "gromacs/mdtypes/df_history.h"
#include "gromacs/mdtypes/energyhistory.h"
gmx_membed_t *membed,
gmx_walltime_accounting_t walltime_accounting)
{
- gmx_mdoutf_t outf = nullptr;
- gmx_int64_t step, step_rel;
- double elapsed_time;
- double t, t0, lam0[efptNR];
- gmx_bool bGStatEveryStep, bGStat, bCalcVir, bCalcEnerStep, bCalcEner;
- gmx_bool bNS, bNStList, bSimAnn, bStopCM,
- bFirstStep, bInitStep, bLastStep = FALSE,
- bUsingEnsembleRestraints;
+ gmx_mdoutf_t outf = nullptr;
+ gmx_int64_t step, step_rel;
+ double elapsed_time;
+ double t, t0, lam0[efptNR];
+ gmx_bool bGStatEveryStep, bGStat, bCalcVir, bCalcEnerStep, bCalcEner;
+ gmx_bool bNS, bNStList, bSimAnn, bStopCM,
+ bFirstStep, bInitStep, bLastStep = FALSE;
gmx_bool bDoDHDL = FALSE, bDoFEP = FALSE, bDoExpanded = FALSE;
gmx_bool do_ene, do_log, do_verbose, bRerunWarnNoV = TRUE,
bForceUpdate = FALSE, bCPT;
repl_ex = init_replica_exchange(fplog, ms, top_global->natoms, ir,
replExParams);
}
-
/* PME tuning is only supported in the Verlet scheme, with PME for
* Coulomb. It is not supported with only LJ PME, or for
* reruns. */
bSumEkinhOld = FALSE;
bExchanged = FALSE;
bNeedRepartition = FALSE;
- // TODO This implementation of ensemble orientation restraints is nasty because
- // a user can't just do multi-sim with single-sim orientation restraints.
- bUsingEnsembleRestraints = (fcd->disres.nsystems > 1) || (ms && fcd->orires.nr);
+ bool simulationsShareState = false;
+ int nstSignalComm = nstglobalcomm;
{
- // Replica exchange and ensemble restraints need all
+ // TODO This implementation of ensemble orientation restraints is nasty because
+ // a user can't just do multi-sim with single-sim orientation restraints.
+ bool usingEnsembleRestraints = (fcd->disres.nsystems > 1) || (ms && fcd->orires.nr);
+ bool awhUsesMultiSim = (ir->bDoAwh && ir->awhParams->shareBiasMultisim && ms);
+
+ // Replica exchange, ensemble restraints and AWH need all
// simulations to remain synchronized, so they need
// checkpoints and stop conditions to act on the same step, so
// the propagation of such signals must take place between
// simulations, not just within simulations.
- bool checkpointIsLocal = !useReplicaExchange && !bUsingEnsembleRestraints;
- bool stopConditionIsLocal = !useReplicaExchange && !bUsingEnsembleRestraints;
+ // TODO: Make algorithm initializers set these flags.
+ simulationsShareState = useReplicaExchange || usingEnsembleRestraints || awhUsesMultiSim;
bool resetCountersIsLocal = true;
- signals[eglsCHKPT] = SimulationSignal(checkpointIsLocal);
- signals[eglsSTOPCOND] = SimulationSignal(stopConditionIsLocal);
+ signals[eglsCHKPT] = SimulationSignal(!simulationsShareState);
+ signals[eglsSTOPCOND] = SimulationSignal(!simulationsShareState);
signals[eglsRESETCOUNTERS] = SimulationSignal(resetCountersIsLocal);
+
+ if (simulationsShareState)
+ {
+ // Inter-simulation signal communication does not need to happen
+ // often, so we use a minimum of 200 steps to reduce overhead.
+ const int c_minimumInterSimulationSignallingInterval = 200;
+ nstSignalComm = ((c_minimumInterSimulationSignallingInterval + nstglobalcomm - 1)/nstglobalcomm)*nstglobalcomm;
+ }
}
DdOpenBalanceRegionBeforeForceComputation ddOpenBalanceRegion = (DOMAINDECOMP(cr) ? DdOpenBalanceRegionBeforeForceComputation::yes : DdOpenBalanceRegionBeforeForceComputation::no);
* to allow for exact continuation, when possible.
*/
signals[eglsSTOPCOND].sig = 1;
- nsteps_stop = std::max(ir->nstlist, 2*nstglobalcomm);
+ nsteps_stop = std::max(ir->nstlist, 2*nstSignalComm);
}
else if (gmx_get_stop_condition() == gmx_stop_cond_next)
{
* This breaks exact continuation.
*/
signals[eglsSTOPCOND].sig = -1;
- nsteps_stop = nstglobalcomm + 1;
+ nsteps_stop = nstSignalComm + 1;
}
if (fplog)
{
{
// Organize to do inter-simulation signalling on steps if
// and when algorithms require it.
- bool doInterSimSignal = (!bFirstStep && bDoReplEx) || bUsingEnsembleRestraints;
+ bool doInterSimSignal = (simulationsShareState && do_per_step(step, nstSignalComm));
if (bGStat || (!EI_VV(ir->eI) && do_per_step(step+1, nstglobalcomm)) || doInterSimSignal)
{