include(gmxManageSimd)
gmx_manage_simd()
-include(gmxManageCycleCounters)
-gmx_manage_cycle_counters()
-
# Process QM/MM Settings
if(${GMX_QMMM_PROGRAM} STREQUAL "GAUSSIAN")
set(GMX_QMMM_GAUSSIAN 1)
# configuration syntax, so that the configurations are stable even
# if the defaults change in future.
-# Test on ARM v7
-# Test ARM_NEON SIMD
-gcc-5 simd=ARM_NEON no-hwloc release-with-assert
-
# Test on ARM v8
# Test ARM_NEON_ASIMD SIMD
gcc-5 simd=ARM_NEON_ASIMD release-with-assert
except:
variables:
- $GROMACS_RELEASE
-# Always clone the default version for this branch, release-2020 in this case
+# Always clone the default version for this branch, master in this case
script:
- - export REGTESTBRANCH=release-2020
+ - export REGTESTBRANCH=master
- if [ ! -z $CI_MERGE_REQUEST_TARGET_BRANCH_NAME ] ; then
if [[ ! -d regressiontests ]] ; then
mkdir regressiontests ;
- if [[ $GROMACS_RELEASE != "true" ]] ; then
REGTESTNAME=$REGTESTNAME-dev ;
fi
- - export REGTESTBRANCH=release-2020
+ - export REGTESTBRANCH=master
- if [[ $CI_COMMIT_REF_NAME == "master" || $CI_COMMIT_REF_NAME == "release-20"[1-2][0-9] ]] ; then
export REGTESTBRANCH=$CI_COMMIT_REF_NAME ;
fi
if [ -z "$CLANG_FORMAT" ]
then
echo "Please set the path to clang-format using the git hook"
- echo "git config hooks.clang_formatpath /path/to/clang-format"
+ echo "git config hooks.clangformatpath /path/to/clang-format"
echo "or by setting an environment variable, e.g."
echo "CLANG_FORMAT=/path/to/clang-format"
echo "See docs/dev-manual/code-formatting.rst for how to get clang-format."
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2013,2014,2015,2016,2018,2019, by the GROMACS development team, led by
+# Copyright (c) 2013,2014,2015,2016,2018,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
+++ /dev/null
-int main()
-{
-#ifdef __ARM_ARCH_7A__
- return 0;
-#else
-#error This compiler is not targetting 32-bit ARMv7
-#endif
-}
+++ /dev/null
-int main()
-{
-#if defined(__ARM_ARCH_7A__) && defined(__GNUC__)
- unsigned int cycles_lo, cycles_hi;
- asm volatile("mrrc p15, 1, %0, %1, c14" : "=r" (cycles_lo), "=r" (cycles_hi));
-
- // Return 0 (success) if low or high 32 bits contained anything non-trivial
- return !(cycles_lo > 0 || cycles_hi > 0);
-#else
-#error This architecture/compiler does not support ARMv7 32-bit cycle counters
-#endif
-}
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+# Copyright (c) 2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
try_compile(GMX_TARGET_MIC ${CMAKE_BINARY_DIR}
"${CMAKE_SOURCE_DIR}/cmake/TestMIC.cpp")
endif()
- if (NOT DEFINED GMX_TARGET_ARMV7)
- try_compile(GMX_TARGET_ARMV7 ${CMAKE_BINARY_DIR}
- "${CMAKE_SOURCE_DIR}/cmake/TestARMv7.cpp")
- endif()
if (NOT DEFINED GMX_TARGET_FUJITSU_SPARC64)
try_compile(GMX_TARGET_FUJITSU_SPARC64 ${CMAKE_BINARY_DIR}
"${CMAKE_SOURCE_DIR}/cmake/TestFujitsuSparc64.cpp")
+++ /dev/null
-#
-# This file is part of the GROMACS molecular simulation package.
-#
-# Copyright (c) 2012,2013,2014,2015,2016, by the GROMACS development team, led by
-# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
-# and including many others, as listed in the AUTHORS file in the
-# top-level source directory and at http://www.gromacs.org.
-#
-# GROMACS is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public License
-# as published by the Free Software Foundation; either version 2.1
-# of the License, or (at your option) any later version.
-#
-# GROMACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with GROMACS; if not, see
-# http://www.gnu.org/licenses, or write to the Free Software Foundation,
-# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# If you want to redistribute modifications to GROMACS, please
-# consider that scientific software is very special. Version
-# control is crucial - bugs must be traceable. We will be happy to
-# consider code for inclusion in the official distribution, but
-# derived work must not be called official GROMACS. Details are found
-# in the README & COPYING files - if they are missing, get the
-# official version at http://www.gromacs.org.
-#
-# To help us fund GROMACS development, we humbly ask that you cite
-# the research papers on the package. Check out http://www.gromacs.org.
-
-# - Decide whether to use CPU cycle counters
-#
-# gmx_manage_cycle_counters()
-#
-# By default, we enable GMX_CYCLECOUNTERS for all architectures except ARMv7.
-# On ARMv7, we enable it if we are not cross-compiling and can run a small
-# test to confirm that the support is present in the kernel, otherwise we
-# disable it.
-#
-macro(gmx_manage_cycle_counters)
-
- if(NOT DEFINED GMX_CYCLECOUNTERS)
-
- if(GMX_TARGET_ARMV7)
-
- if(NOT CMAKE_CROSSCOMPILING)
-
- try_run(ARMV7_COUNTER_RUN_VAR ARMV7_COUNTER_COMPILE_VAR
- ${CMAKE_BINARY_DIR} "${CMAKE_SOURCE_DIR}/cmake/TestARMv7CycleCounters.cpp")
-
- # Enable cycle counter usage if the test ran fine and exited with 0 return code
- if(${ARMV7_COUNTER_COMPILE_VAR} AND ("${ARMV7_COUNTER_RUN_VAR}" EQUAL "0"))
- set(GMX_CYCLECOUNTERS ON CACHE BOOL "Use CPU cycle counters timing")
- else()
- set(GMX_CYCLECOUNTERS OFF CACHE BOOL "Use CPU cycle counters for timing")
- endif()
-
- else()
-
- # Disable cycle counters when cross-compiling for ARMv7
- set(GMX_CYCLECOUNTERS OFF CACHE BOOL "Use CPU cycle counters for timing")
-
- endif()
-
- else()
-
- # For now we (try to) enable cycle counters on all other platforms
- set(GMX_CYCLECOUNTERS ON CACHE BOOL "Use CPU cycle counters timing")
-
- endif()
-
- mark_as_advanced(GMX_CYCLECOUNTERS)
-
- endif()
-
-endmacro()
-
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+# Copyright (c) 2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
# GROMACS 2018 3
# GROMACS 2019 4
# GROMACS 2020 5
+# GROMACS 2021 6
# LIBRARY_SOVERSION_MINOR so minor version for the built libraries.
# Should be increased for each release that changes only the implementation.
# In GROMACS, the typical policy is to increase it for each patch version
# The GROMACS convention is that these are the version number of the next
# release that is going to be made from this branch.
-set(GMX_VERSION_MAJOR 2020)
+set(GMX_VERSION_MAJOR 2021)
set(GMX_VERSION_PATCH 0)
# The suffix, on the other hand, is used mainly for betas and release
# candidates, where it signifies the most recent such release from
# this branch; it will be empty before the first such release, as well
# as after the final release is out.
-set(GMX_VERSION_SUFFIX "-rc1")
+set(GMX_VERSION_SUFFIX "")
# Conventionally with libtool, any ABI change must change the major
# version number, the minor version number should change if it's just
# here. The important thing is to minimize the chance of third-party
# code being able to dynamically link with a version of libgromacs
# that might not work.
-set(LIBRARY_SOVERSION_MAJOR 5)
+set(LIBRARY_SOVERSION_MAJOR 6)
set(LIBRARY_SOVERSION_MINOR 0)
set(LIBRARY_VERSION ${LIBRARY_SOVERSION_MAJOR}.${LIBRARY_SOVERSION_MINOR}.0)
endif()
set(REGRESSIONTEST_VERSION "${GMX_VERSION_STRING}")
-set(REGRESSIONTEST_BRANCH "refs/heads/release-2020")
+set(REGRESSIONTEST_BRANCH "refs/heads/master")
# Run the regressiontests packaging job with the correct pakage
# version string, and the release box checked, in order to have it
# build the regressiontests tarball with all the right naming. The
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+# Copyright (c) 2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
how-to/visualize.rst
install-guide/index.rst
release-notes/index.rst
+ release-notes/2021/major/highlights.rst
+ release-notes/2021/major/features.rst
+ release-notes/2021/major/performance.rst
+ release-notes/2021/major/tools.rst
+ release-notes/2021/major/bugs-fixed.rst
+ release-notes/2021/major/removed-functionality.rst
+ release-notes/2021/major/deprecated-functionality.rst
+ release-notes/2021/major/portability.rst
+ release-notes/2021/major/miscellaneous.rst
release-notes/2020/major/highlights.rst
release-notes/2020/major/features.rst
release-notes/2020/major/performance.rst
# Sphinx cache with pickled ReST documents
set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/_doctrees")
+ set(SPHINX_CONFIG_OVERRIDES "")
+ if (GMX_DEVELOPER_BUILD)
+ set(SPHINX_CONFIG_OVERRIDES "-D todo_include_todos=1")
+ endif()
add_custom_target(webpage-sphinx
DEPENDS sphinx-programs
DEPENDS sphinx-input
-q -b html
-w sphinx-html.log
-d "${SPHINX_CACHE_DIR}"
+ ${SPHINX_CONFIG_OVERRIDES}
"${SPHINX_INPUT_DIR}"
"${HTML_OUTPUT_DIR}"
WORKING_DIRECTORY
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2015,2016,2017,2018,2019, by the GROMACS development team, led by
+# Copyright (c) 2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
developers or advanced users can set to affect what CMake generates and/or what
will get built.
-.. TODO: Figure out where to document basic variables intended for user
+.. todo::
+
+ Figure out where to document basic variables intended for user
consumption, and how does it relate to documentation here.
-.. TODO: Document the remaining variables below, and identify any variables
+.. todo::
+
+ Document the remaining variables below, and identify any variables
missing from the list.
Compiler flags
Defaults to ``OFF``, and there should not be any need to change this in a
manual build.
- .. TODO: This could likely be replaced by a (yet another) build type.
+ .. todo:: This could likely be replaced by a (yet another) build type.
.. cmake:: GMX_BUILD_MDRUN_ONLY
version 8.0.* with libstdc++<7 or libc++ is supported. Others might miss tests or give false positives.
It is run automatically on Jenkins for each commit. Many checks have fixes which can automatically be
applied. To run it, the build has to be configured with
- ``cmake -DGMX_CLANG_TIDY=ON -DGMX_OPENMP=no -DCMAKE_BUILD_TYPE=Debug -DCMAKE_EXPORT_COMPILE_COMMANDS=on``.
+ ``cmake -DGMX_CLANG_TIDY=ON -DCMAKE_BUILD_TYPE=Debug -DCMAKE_EXPORT_COMPILE_COMMANDS=on``.
Any ``CMAKE_BUILD_TYPE`` which enables asserts (e.g. ASAN) works. Such a configured build will
run both the compiler as well as clang-tidy when building. The name of the clang-tidy executable is set with
``-DCLANG_TIDY=...``, and the full path to it can be set with ``-DCLANG_TIDY_EXE=...``.
If ``OFF`` (the default), all detection is skipped and the manual cannot be
built.
- .. TODO: Consider if this is really necessary, or if we could just use
+ .. todo:: Consider if this is really necessary, or if we could just use
GMX_DEVELOPER_BUILD.
.. cmake:: GMX_BUILD_TARBALL
External libraries
------------------
-.. TODO: List external libraries used (either from src/external/, or from the
+.. todo::
+
+ List external libraries used (either from src/external/, or from the
system), whether they are required or optional, what functionality they
provide for Gromacs, and how to control their use.
:doc:`releng/jenkins-howto` has information on how to do common things with
Jenkins builds.
-.. TODO: Add a link to a wiki page about general Jenkins documentation, once
+.. todo:: Add a link to a wiki page about general Jenkins documentation, once
there is more of that.
Pre-submit verification
See :doc:`doxygen` for details of how the Doxygen documentation is built and
organized.
-.. TODO: Create a separate page (at the front of the developer guide, and/or at
+.. todo:: Create a separate page (at the front of the developer guide, and/or at
the main index.rst) that describes the documentation from readers'
perspective, and move relevant content there. This should contain just an
overview of how the documentation is organized in the source tree.
:doc:`commitstyle`
Guidelines for formatting git commits when sending in proposed fixes for code review.
-.. TODO: Add more guidelines
+.. todo:: Add more guidelines
testutils
physical_validation
-.. TODO: Consider what is the most reasonable structure; currently, this list
+.. todo:: :issue:`3032`
+
+ Consider what is the most reasonable structure; currently, this list
here does not make much sense in the overall organization and creates a
confusing TOC for the developer guide.
-.. TODO: Add details for most of the tools, either in the form of links to wiki,
+.. todo:: :issue:`3267`
+
+ Add details for most of the tools, either in the form of links to wiki,
or to a separate page that explains more details.
Change management
Build system
------------
-.. TODO: details, ASAN, others?
+.. todo:: details, ASAN, others?
CMake
Main tool used in the build system.
Welcome to the |Gromacs| documentation!
=======================================
-.. TODO : consolidate at least some of the material in the
+.. todo::
+
+ Consolidate at least some of the material in the
Documentation links below into the new user guide, along with all
of http://www.gromacs.org/Documentation/Cut-off_schemes,
http://www.gromacs.org/Documentation/Acceleration_and_parallelization
|Gromacs| can be compiled for many operating systems and
architectures. These include any distribution of Linux, Mac OS X or
Windows, and architectures including x86, AMD64/x86-64, several
-PowerPC including POWER8, ARM v7, ARM v8, and SPARC VIII.
+PowerPC including POWER8, ARM v8, and SPARC VIII.
Compiler
^^^^^^^^
512-wide AVX, including KNL, add ``--enable-avx512`` also.
FFTW will create a fat library with codelets for all different instruction sets,
and pick the fastest supported one at runtime.
-On ARM architectures with NEON SIMD support and IBM Power8 and later, you
+On ARM architectures with SIMD support and IBM Power8 and later, you
definitely want version 3.3.5 or later,
and to compile it with ``--enable-neon`` and ``--enable-vsx``, respectively, for
SIMD support. If you are using a Cray, there is a special modified
The GPU acceleration has been tested on AMD64/x86-64 platforms with
Linux, Mac OS X and Windows operating systems, but Linux is the
-best-tested and supported of these. Linux running on POWER 8, ARM v7 and v8
+best-tested and supported of these. Linux running on POWER 8 and ARM v8
CPUs also works well.
Experimental support is available for compiling CUDA code, both for host and
have a look at the `continuous integration server used by GROMACS`_,
which runs Jenkins_.
-We test irregularly on ARM v7, ARM v8, Cray, Fujitsu
-PRIMEHPC, Power8, Power9,
+We test irregularly on ARM v8, Cray, Power8, Power9,
Google Native Client and other environments, and
with other compilers and compiler versions, too.
File formats
============
-.. TODO in future patch: update for accuracy, organize better, improve formatting
+.. todo:: in future patch: update for accuracy, organize better, improve formatting
Summary of file formats
^^^^^^^^^^^^^^^^^^^^^^^
.. highlight:: bash
-.. TODO this needs to be carefully checked that I didn't mess anything up too bad
+.. todo:: this needs to be carefully checked that I didn't mess anything up too bad
.. ifconfig:: gmx_image_convert == 'possible'
Citation information
--------------------
-.. TODO needs link to ref list
+.. todo:: needs link to ref list
|GMX_MANUAL_DOI_STRING|
--- /dev/null
+Bugs fixed
+^^^^^^^^^^
+
+.. Note to developers!
+ Please use """"""" to underline the individual entries for fixed issues in the subfolders,
+ otherwise the formatting on the webpage is messed up.
+ Also, please use the syntax :issue:`number` to reference issues on redmine, without the
+ a space between the colon and number!
+
--- /dev/null
+.. _anticipated-changes:
+
+.. Note to developers!
+ Please use """"""" to underline the individual entries for fixed issues in the subfolders,
+ otherwise the formatting on the webpage is messed up.
+ Also, please use the syntax :issue:`number` to reference issues on redmine, without the
+ a space between the colon and number!
+
+Changes anticipated to |Gromacs| 2021 functionality
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Functionality deprecated in |Gromacs| 2021
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
--- /dev/null
+New and improved features
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. Note to developers!
+ Please use """"""" to underline the individual entries for fixed issues in the subfolders,
+ otherwise the formatting on the webpage is messed up.
+ Also, please use the syntax :issue:`number` to reference issues on redmine, without the
+ a space between the colon and number!
+
--- /dev/null
+Highlights
+^^^^^^^^^^
+
+|Gromacs| 2021 was released on INSERT DATE HERE. Patch releases may
+have been made since then, please use the updated versions! Here are
+some highlights of what you can expect, along with more detail in the
+links below!
+
+As always, we've got several useful performance improvements, with or
+without GPUs, all enabled and automated by default. In addition,
+several new features are available for running simulations. We are extremely
+interested in your feedback on how well the new release works on your
+simulations and hardware. The new features are:
+
+* Cool quote autogenerator
+
+
+.. Note to developers!
+ Please use """"""" to underline the individual entries for fixed issues in the subfolders,
+ otherwise the formatting on the webpage is messed up.
+ Also, please use the syntax :issue:`number` to reference issues on redmine, without the
+ a space between the colon and number!
--- /dev/null
+Miscellaneous
+^^^^^^^^^^^^^
+
+.. Note to developers!
+ Please use """"""" to underline the individual entries for fixed issues in the subfolders,
+ otherwise the formatting on the webpage is messed up.
+ Also, please use the syntax :issue:`number` to reference issues on redmine, without the
+ a space between the colon and number!
+
--- /dev/null
+Performance improvements
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. Note to developers!
+ Please use """"""" to underline the individual entries for fixed issues in the subfolders,
+ otherwise the formatting on the webpage is messed up.
+ Also, please use the syntax :issue:`number` to reference issues on redmine, without the
+ a space between the colon and number!
+
--- /dev/null
+Portability
+^^^^^^^^^^^
+
+.. Note to developers!
+ Please use """"""" to underline the individual entries for fixed issues in the subfolders,
+ otherwise the formatting on the webpage is messed up.
+ Also, please use the syntax :issue:`number` to reference issues on redmine, without the
+ a space between the colon and number!
+
--- /dev/null
+Removed functionality
+^^^^^^^^^^^^^^^^^^^^^
+
+.. Note to developers!
+ Please use """"""" to underline the individual entries for fixed issues in the subfolders,
+ otherwise the formatting on the webpage is messed up.
+ Also, please use the syntax :issue:`number` to reference issues on redmine, without the
+ a space between the colon and number!
+
--- /dev/null
+Improvements to |Gromacs| tools
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. Note to developers!
+ Please use """"""" to underline the individual entries for fixed issues in the subfolders,
+ otherwise the formatting on the webpage is messed up.
+ Also, please use the syntax :issue:`number` to reference issues on redmine, without the
+ a space between the colon and number!
+
functionality supported, whereas patch releases contain only fixes for
issues identified in the corresponding major releases.
-Two versions of |Gromacs| are under active maintenance, the 2020
-series and the 2019 series. In the latter, only highly conservative
+Two versions of |Gromacs| are under active maintenance, the 2021
+series and the 2020 series. In the latter, only highly conservative
fixes will be made, and only to address issues that affect scientific
correctness. Naturally, some of those releases will be made after the
-year 2019 ends, but we keep 2018 in the name so users understand how
+year 2020 ends, but we keep 2019 in the name so users understand how
up to date their version is. Such fixes will also be incorporated into
-the 2020 release series, as appropriate. Around the time the 2021
-release is made, the 2019 series will no longer be maintained.
+the 2021 release series, as appropriate. Around the time the 2022
+release is made, the 2020 series will no longer be maintained.
Where issue numbers are reported in these release notes, more details
can be found at https://redmine.gromacs.org at that issue number.
+|Gromacs| 2021 series
+---------------------
+
+.. todolist::
+
+Major release
+^^^^^^^^^^^^^
+
+.. toctree::
+ :maxdepth: 1
+
+ 2021/major/highlights
+ 2021/major/features
+ 2021/major/performance
+ 2021/major/tools
+ 2021/major/bugs-fixed
+ 2021/major/deprecated-functionality
+ 2021/major/removed-functionality
+ 2021/major/portability
+ 2021/major/miscellaneous
+
+
|Gromacs| 2020 series
---------------------
.. Another useful one-liner to find undocumentedvariables:
.. ( export INPUT_FILE=docs/user-guide/environment-variables.rst; GIT_PAGER="cat "; for ss in `for s in $(git grep getenv | sed 's/.*getenv("\(.*\)".*/\1/' | sort -u | grep '^[A-Z]'); do [ $(grep $s $INPUT_FILE -c) -eq 0 ] && echo $s; done `; do git grep $ss ; done )
-.. TODO: still undocumented GMX_QM_GAUSSIAN_NCPUS
+.. todo:: still undocumented GMX_QM_GAUSSIAN_NCPUS
Environment Variables
=====================
You can choose different values for :mdp:`tinit` and :mdp:`init-step`.
- .. TODO make links work :ref:`Continuing simulations <gmx-cont-simulation>`.
+ .. todo:: Add "Continuing simulations" content (label: gmx-cont-simulation) and link.
+
+ e.g. ``:ref:`Continuing simulations <gmx-cont-simulation>`.``
#. Why can't I do conjugate gradient minimization with constraints?
You can either prepare a new :ref:`mdp` file, or extend the simulation time
in the original :ref:`tpr` file using :ref:`convert-tpr <gmx convert-tpr>`.
- .. TODO #. How do I complete a crashed simulation?
+ .. todo:: #. How do I complete a crashed simulation?
+
+ Need gmx-cont-crash doc target.
+
+ .. code-block:: none
+
+ This can be easily achieved using the checkpoint reading
+ :ref:`available <gmx-cont-crash>` in |Gromacs| versions newer than 4.
- .. This can be easily achieved using the checkpoint reading
- :ref:`available <gmx-cont-crash>` in |Gromacs| versions newer than 4.
+ .. todo:: #. How can I do a simulation at constant pH?
- .. TODO #. How can I do a simulation at constant pH?
+ Need gmx-howto-cph doc target.
- .. This is a rather large topic, and you should at least read the short
- :ref:`Constant pH How-To <gmx-howto-cph>` and all of the literature
- included there to get an overview over the topic.
+ .. code-block:: none
+
+ This is a rather large topic, and you should at least read the short
+ :ref:`Constant pH How-To <gmx-howto-cph>` and all of the literature
+ included there to get an overview over the topic.
#. How should I compute a single-point energy?
Analysis and Visualization
--------------------------
- .. TODO #. How do I visualize a trajectory?
+.. todo:: #. How do I visualize a trajectory?
+
+ gmx-howto-visualize doc target:
+
+ .. code-block:: none
- .. Use one of the number of different programs that can visualize
+ Use one of the number of different programs that can visualize
coordinate :ref:`files and trajectories <gmx-howto-visualize>`.
#. Why am I seeing bonds being created when I watch the trajectory?
* GROMOS 43a1p - 43a1 modified to contain SEP (phosphoserine), TPO (phosphothreonine),
and PTR (phosphotyrosine) (all PO42- forms), and SEPH, TPOH, PTRH (PO4H- forms).
-.. TODO Add new force fields to the list
+.. todo:: Add new force fields to the list
.. _GROMOS: http://www.igc.ethz.ch/gromos/
.. _reference manual: gmx-manual-parent-dir_
|GMX_SOURCE_DOI_STRING|
-.. TODO This is going to require more organization now that
+.. todo::
+
+ This is going to require more organization now that
we are getting more content available.
.. toctree::
See the "run control" section for a working example of the
syntax to use when making .mdp entries, with and without detailed
documentation for values those entries might take. Everything can
- be cross-referenced, see the examples there. TODO Make more
- cross-references.
+ be cross-referenced, see the examples there.
+
+.. todo:: Make more cross-references.
Molecular dynamics parameters (.mdp options)
============================================
Subcounters are geared toward developers and have to be enabled during compilation. See
:doc:`/dev-manual/build-system` for more information.
-.. TODO In future patch:
- - red flags in log files, how to interpret wallcycle output
- - hints to devs how to extend wallcycles
+.. todo::
+
+ In future patch:
+ - red flags in log files, how to interpret wallcycle output
+ - hints to devs how to extend wallcycles
.. _gmx-mdrun-on-gpu:
GPU computation of short range nonbonded interactions
.....................................................
-.. TODO make this more elaborate and include figures
+.. todo:: make this more elaborate and include figures
Using the GPU for the short-ranged nonbonded interactions provides
the majority of the available speed-up compared to run using only the CPU.
GPU accelerated calculation of PME
..................................
-.. TODO again, extend this and add some actual useful information concerning performance etc...
+.. todo:: again, extend this and add some actual useful information concerning performance etc...
|Gromacs| now allows the offloading of the PME calculation
to the GPU, to further reduce the load on the CPU and improve usage overlap between
GPU accelerated calculation of bonded interactions (CUDA only)
..............................................................
-.. TODO again, extend this and add some actual useful information concerning performance etc...
+.. todo:: again, extend this and add some actual useful information concerning performance etc...
|Gromacs| now allows the offloading of the bonded part of the PP
workload to a CUDA-compatible GPU. This is treated as part of the PP
#) The only way to know for sure what alternative is best for
your machine is to test and check performance.
-.. TODO: we need to be more concrete here, i.e. what machine/software aspects to take into consideration, when will default run mode be using PME-GPU and when will it not, when/how should the user reason about testing different settings than the default.
+.. todo:: we need to be more concrete here, i.e. what machine/software aspects to take into consideration, when will default run mode be using PME-GPU and when will it not, when/how should the user reason about testing different settings than the default.
-.. TODO someone who knows about the mixed mode should comment further.
+.. todo:: someone who knows about the mixed mode should comment further.
Reducing overheads in GPU accelerated runs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
involves a tradeoff which may outweigh the benefits of reduced GPU driver overhead,
in particular without HyperThreading and with few CPU cores.
-.. TODO In future patch: any tips not covered above
+.. todo:: In future patch: any tips not covered above
Running the OpenCL version of mdrun
-----------------------------------
provide the OpenCL support.
For Intel integrated GPUs, the `Neo driver <https://github.com/intel/compute-runtime/releases>`_ is
recommended.
-TODO: add more Intel driver recommendations
+.. seealso:: :issue:`3268` add more Intel driver recommendations
+
The minimum OpenCL version required is |REQUIRED_OPENCL_MIN_VERSION|. See
also the :ref:`known limitations <opencl-known-limitations>`.
* search the primary literature for publications for parameters for the
residue that are consistent with the force field that is being used.
-.. TODO Once you have determined the parameters and topology for your residue, see
- :ref:`adding a residue to a force field <gmx-add-new-residue>` for instructions on how to proceed.
+.. todo:: gmx-add-new-residue doc target
+
+ Need gmx-add-new-residue doc target.
+
+ .. code-block:: none
+
+ Once you have determined the parameters and topology for your residue, see
+ :ref:`adding a residue to a force field <gmx-add-new-residue>` for instructions on how to proceed.
Long bonds and/or missing atoms
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2015,2016,2017,2018,2019, by the GROMACS development team, led by
+# Copyright (c) 2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
# Python package requirements for complete build, installation, and testing of
# gmxapi functionality.
cmake>=3.9.6
+flake8>=3.7.7
networkx>=2.0
pip>=10.1
setuptools>=28.0.0
import pytest
+pytest_plugins = ('gmxapi.testsupport',)
def pytest_addoption(parser):
"""Add a command-line user option for the pytest invocation."""
from gmxapi.simulation.workflow import WorkElement, from_tpr
from gmxapi import version as gmx_version
import pytest
-from gmxapi.testsupport import withmpi_only
logging.getLogger().setLevel(logging.DEBUG)
# create console handler
session.run()
-@withmpi_only
+@pytest.mark.withmpi_only
@pytest.mark.usefixtures("cleandir")
def test_ensemble_potential_withmpi(spc_water_box):
tpr_filename = spc_water_box
--- /dev/null
+#!/bin/bash -x
+
+# Note: E501 specifies a line length limit of 80 characters, but GROMACS allows more.
+# Ref: http://manual.gromacs.org/current/dev-manual/formatting.html
+# Note: --max-complexity can be used to check McCabe complexity. Compliance
+# could be a future goal, but is not yet a GROMACS priority.
+# W503 is ignored in observation of current PEP-8 conventions, but must be
+# explicitly disabled in at least some versions.
+# http://pycodestyle.pycqa.org/en/latest/intro.html#error-codes
+# https://www.python.org/dev/peps/pep-0008/#should-a-line-break-before-or-after-a-binary-operator
+
+# Check gmxapi package sources.
+python -m flake8 --ignore W503 --max-line-length 119 ${HOME}/gmxapi/src/gmxapi
+
+# Check gmxapi unit tests.
+python -m flake8 --ignore W503 --max-line-length 119 ${HOME}/gmxapi/src/test
+
+# Check gmxapi acceptance tests.
+python -m flake8 --ignore W503 --max-line-length 119 ${HOME}/gmxapi/test
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2019, by the GROMACS development team, led by
+# Copyright (c) 2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2019, by the GROMACS development team, led by
+# Copyright (c) 2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
#
# To help us fund GROMACS development, we humbly ask that you cite
# the research papers on the package. Check out http://www.gromacs.org.
+
"""
Provide command line operation.
"""
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2019, by the GROMACS development team, led by
+# Copyright (c) 2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2019, by the GROMACS development team, led by
+# Copyright (c) 2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
"""Reusable definitions for test modules.
-Define the ``withmpi_only`` test decorator.
+Provides utilities and pytest fixtures for gmxapi and GROMACS tests.
+
+To load these facilities in a pytest environment, set a `pytest_plugins`
+variable in a conftest.py
+(Reference https://docs.pytest.org/en/latest/writing_plugins.html#requiring-loading-plugins-in-a-test-module-or-conftest-file)
+
+ pytest_plugins = "gmxapi.testsupport"
+
+.. seealso:: https://docs.pytest.org/en/latest/plugins.html#findpluginname
.. todo:: Consider moving this to a separate optional package.
"""
import pytest
-mpi_requirement = 'Test requires mpi4py managing 2 MPI ranks.'
+mpi_status = 'Test requires mpi4py managing 2 MPI ranks.'
+skip_mpi = False
try:
from mpi4py import MPI
if not MPI.Is_initialized():
skip_mpi = True
- reason = mpi_requirement + ' MPI is not initialized'
+ mpi_status += ' MPI is not initialized'
elif MPI.COMM_WORLD.Get_size() < 2:
skip_mpi = True
- reason = mpi_requirement + ' MPI context is too small.'
- else:
- skip_mpi = False
- reason = ''
- withmpi_only = pytest.mark.skipif(skip_mpi, reason=reason)
+ mpi_status += ' MPI context is too small.'
except ImportError:
- withmpi_only = pytest.mark.skip(
- reason=mpi_requirement + ' mpi4py is not available.')
+ skip_mpi = True
+ mpi_status += ' mpi4py is not available.'
+
+
+def pytest_configure(config):
+ config.addinivalue_line("markers", "withmpi_only: test requires mpi4py managing 2 MPI ranks.")
+
+
+def pytest_runtest_setup(item):
+ # Handle the withmpi_only marker.
+ for _ in item.iter_markers(name='withmpi_only'):
+ if skip_mpi:
+ pytest.skip(mpi_status)
+ # The API uses iteration because markers may be duplicated, but we only
+ # care about whether 'withmpi_only' occurs at all.
+ break
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2019, by the GROMACS development team, led by
+# Copyright (c) 2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2019, by the GROMACS development team, led by
+# Copyright (c) 2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
import pytest
+pytest_plugins = ('gmxapi.testsupport',)
def pytest_addoption(parser):
"""Add a command-line user option for the pytest invocation."""
import pytest
import gmxapi as gmx
-from gmxapi.testsupport import withmpi_only
# Configure the `logging` module before proceeding any further.
gmx.logger.setLevel(logging.WARNING)
# TODO: better handling of output on unused MPI ranks.
-@withmpi_only
+@pytest.mark.withmpi_only
@pytest.mark.usefixtures('cleandir')
def test_run_trivial_ensemble(spc_water_box, caplog):
from mpi4py import MPI
import pytest
+pytest_plugins = ('gmxapi.testsupport',)
def pytest_addoption(parser):
"""Add a command-line user option for the pytest invocation."""
"-Wno-double-promotion")
string(REPLACE " " ";" IGNORED_CLANG_ALL_WARNINGS "${IGNORED_CLANG_ALL_WARNINGS}")
+option(GMX_CLANG_TIDY "Use clang-tidy" OFF)
if (GMX_CLANG_TIDY)
+ if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
+ elseif("${CMAKE_BUILD_TYPE}" STREQUAL "RelWithAssert")
+ elseif("${CMAKE_BUILD_TYPE}" STREQUAL "RelWithDebInfo")
+ elseif("${CMAKE_BUILD_TYPE}" STREQUAL "ASAN")
+ else()
+ message(FATAL_ERROR "Can only use clang-tidy with build type containing asserts: Debug, RelWithAssert, RelWithDebInfo, ASAN.")
+ endif()
+ set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
+ mark_as_advanced(CMAKE_EXPORT_COMPILE_COMMANDS)
set(CLANG_TIDY "clang-tidy" CACHE STRING "Name of clang-tidy executable")
find_program(CLANG_TIDY_EXE NAMES "${CLANG_TIDY}"
DOC "Path to clang-tidy executable")
if(NOT CLANG_TIDY_EXE)
message(FATAL_ERROR "clang-tidy not found.")
endif()
+ mark_as_advanced(CLANG_TIDY)
+ mark_as_advanced(CLANG_TIDY_EXE)
endif()
-#####
add_subdirectory(external)
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2018,2019, by the GROMACS development team, led by
+# Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2018,2019, by the GROMACS development team, led by
+# Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
/* Use ORCA for QM-MM calculations */
#cmakedefine01 GMX_QMMM_ORCA
-/* Use cycle counters */
-#cmakedefine01 GMX_CYCLECOUNTERS
-
/* Use sub-counters */
#cmakedefine01 GMX_CYCLE_SUBCOUNTERS
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
gmx::ArrayRef<const gmx::RVec> xRef = constArrayRefFromArray(
x.data(), masterRankHasTheSystemState ? x.size() : dd.comm->atomRanges.numHomeAtoms());
- low_set_ddbox(dd.unitCellInfo.npbcdim, dd.unitCellInfo.numBoundedDimensions, &dd.nc, box,
- calculateUnboundedSize, xRef,
+ low_set_ddbox(dd.unitCellInfo.npbcdim, dd.unitCellInfo.numBoundedDimensions, &dd.numCells,
+ box, calculateUnboundedSize, xRef,
needToReduceCoordinateData ? &dd.mpi_comm_all : nullptr, ddbox);
}
int sh;
comm = dd->comm;
- nc = dd->nc[ddpme->dim];
+ nc = dd->numCells[ddpme->dim];
ns = ddpme->nslab;
if (!ddpme->dim_match)
dim = dd->dim[d];
if (dim < ddbox->nboundeddim
&& ddbox->box_size[dim] * ddbox->skew_fac[dim]
- < dd->nc[dim] * dd->comm->cellsize_limit * DD_CELL_MARGIN)
+ < dd->numCells[dim] * dd->comm->cellsize_limit * DD_CELL_MARGIN)
{
gmx_fatal(
FARGS,
"The %c-size of the box (%f) times the triclinic skew factor (%f) is smaller "
"than the number of DD cells (%d) times the smallest allowed cell size (%f)\n",
- dim2char(dim), ddbox->box_size[dim], ddbox->skew_fac[dim], dd->nc[dim],
+ dim2char(dim), ddbox->box_size[dim], ddbox->skew_fac[dim], dd->numCells[dim],
dd->comm->cellsize_limit);
}
}
{
cellsize_min[d] = ddbox->box_size[d] * ddbox->skew_fac[d];
npulse[d] = 1;
- if (dd->nc[d] == 1 || comm->slb_frac[d] == nullptr)
+ if (dd->numCells[d] == 1 || comm->slb_frac[d] == nullptr)
{
/* Uniform grid */
- real cell_dx = ddbox->box_size[d] / dd->nc[d];
+ real cell_dx = ddbox->box_size[d] / dd->numCells[d];
switch (setmode)
{
case setcellsizeslbMASTER:
- for (int j = 0; j < dd->nc[d] + 1; j++)
+ for (int j = 0; j < dd->numCells[d] + 1; j++)
{
cell_x_master[d][j] = ddbox->box0[d] + j * cell_dx;
}
}
else
{
- cell_x_buffer.resize(dd->nc[d] + 1);
+ cell_x_buffer.resize(dd->numCells[d] + 1);
cell_x = cell_x_buffer;
}
cell_x[0] = ddbox->box0[d];
- for (int j = 0; j < dd->nc[d]; j++)
+ for (int j = 0; j < dd->numCells[d]; j++)
{
real cell_dx = ddbox->box_size[d] * comm->slb_frac[d][j];
cell_x[j + 1] = cell_x[j] + cell_dx;
real cellsize = cell_dx * ddbox->skew_fac[d];
- while (cellsize * npulse[d] < comm->systemInfo.cutoff && npulse[d] < dd->nc[d] - 1)
+ while (cellsize * npulse[d] < comm->systemInfo.cutoff && npulse[d] < dd->numCells[d] - 1)
{
npulse[d]++;
}
* some of its own home charge groups back over the periodic boundary.
* Double charge groups cause trouble with the global indices.
*/
- if (d < ddbox->npbcdim && dd->nc[d] > 1 && npulse[d] >= dd->nc[d])
+ if (d < ddbox->npbcdim && dd->numCells[d] > 1 && npulse[d] >= dd->numCells[d])
{
char error_string[STRLEN];
"small for a cut-off of %f with %d domain decomposition cells, use 1 or more "
"than %d %s or increase the box size in this direction",
dim2char(d), ddbox->box_size[d], ddbox->skew_fac[d], comm->systemInfo.cutoff,
- dd->nc[d], dd->nc[d], dd->nnodes > dd->nc[d] ? "cells" : "ranks");
+ dd->numCells[d], dd->numCells[d], dd->nnodes > dd->numCells[d] ? "cells" : "ranks");
if (setmode == setcellsizeslbLOCAL)
{
comm = dd->comm;
- const int ncd = dd->nc[dim];
+ const int ncd = dd->numCells[dim];
const bool dimHasPbc = (dim < ddbox->npbcdim);
if (!rowMaster->isCellMin[i])
{
cell_size[i] *= fac;
- if (!dimHasPbc && (i == 0 || i == dd->nc[dim] - 1))
+ if (!dimHasPbc && (i == 0 || i == dd->numCells[dim] - 1))
{
cellsize_limit_f_i = 0;
}
/* Convert the maximum change from the input percentage to a fraction */
const real change_limit = comm->ddSettings.dlb_scale_lim * 0.01;
- const int ncd = dd->nc[dim];
+ const int ncd = dd->numCells[dim];
const bool bPBC = (dim < ddbox->npbcdim);
rowMaster->cellFrac[i], rowMaster->cellFrac[i + 1]);
}
- if ((bPBC || (i != 0 && i != dd->nc[dim] - 1))
+ if ((bPBC || (i != 0 && i != dd->numCells[dim] - 1))
&& rowMaster->cellFrac[i + 1] - rowMaster->cellFrac[i] < cellsize_limit_f / DD_CELL_MARGIN)
{
char buf[22];
comm.cellsizesWithDlb[d].fracLower = cellFracRow[dd->ci[dim]];
comm.cellsizesWithDlb[d].fracUpper = cellFracRow[dd->ci[dim] + 1];
/* The whole array was communicated, so set the buffer position */
- int pos = dd->nc[dim] + 1;
+ int pos = dd->numCells[dim] + 1;
for (int d1 = 0; d1 <= d; d1++)
{
if (d1 < d)
/* Set the dimensions for which no DD is used */
for (dim = 0; dim < DIM; dim++)
{
- if (dd->nc[dim] == 1)
+ if (dd->numCells[dim] == 1)
{
comm->cell_x0[dim] = 0;
comm->cell_x1[dim] = ddbox->box_size[dim];
if (d < dd.unitCellInfo.npbcdim)
{
bool bScrew = (dd.unitCellInfo.haveScrewPBC && d == XX);
- if (ddbox.tric_dir[d] && dd.nc[d] > 1)
+ if (ddbox.tric_dir[d] && dd.numCells[d] > 1)
{
/* Use triclinic coordinates for this dimension */
for (int j = d + 1; j < DIM; j++)
}
/* This could be done more efficiently */
ind[d] = 0;
- while (ind[d] + 1 < dd.nc[d] && pos_d >= cellBoundaries[d][ind[d] + 1])
+ while (ind[d] + 1 < dd.numCells[d] && pos_d >= cellBoundaries[d][ind[d] + 1])
{
ind[d]++;
}
}
- return dd_index(dd.nc, ind);
+ return dd_index(dd.numCells, ind);
}
* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019, by the.
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
{ 2, 5, 6 },
{ 3, 5, 7 } };
-
-/*
- #define dd_index(n,i) ((((i)[ZZ]*(n)[YY] + (i)[YY])*(n)[XX]) + (i)[XX])
-
- static void index2xyz(ivec nc,int ind,ivec xyz)
- {
- xyz[XX] = ind % nc[XX];
- xyz[YY] = (ind / nc[XX]) % nc[YY];
- xyz[ZZ] = ind / (nc[YY]*nc[XX]);
- }
- */
-
static void ddindex2xyz(const ivec nc, int ind, ivec xyz)
{
xyz[XX] = ind / (nc[YY] * nc[ZZ]);
int ddnodeid = -1;
const CartesianRankSetup& cartSetup = dd->comm->cartesianRankSetup;
- const int ddindex = dd_index(dd->nc, c);
+ const int ddindex = dd_index(dd->numCells, c);
if (cartSetup.bCartesianPP_PME)
{
ddnodeid = cartSetup.ddindex2ddnodeid[ddindex];
ivec coords;
int slab;
- dd = cr->dd;
- /*
- if (dd->comm->bCartesian) {
- gmx_ddindex2xyz(dd->nc,ddindex,coords);
- dd_coords2pmecoords(dd,coords,coords_pme);
- copy_ivec(dd->ntot,nc);
- nc[dd->cartpmedim] -= dd->nc[dd->cartpmedim];
- coords_pme[dd->cartpmedim] -= dd->nc[dd->cartpmedim];
-
- slab = (coords_pme[XX]*nc[YY] + coords_pme[YY])*nc[ZZ] + coords_pme[ZZ];
- } else {
- slab = (ddindex*cr->npmenodes + cr->npmenodes/2)/dd->nnodes;
- }
- */
+ dd = cr->dd;
coords[XX] = x;
coords[YY] = y;
coords[ZZ] = z;
- slab = ddindex2pmeindex(dd->comm->ddRankSetup, dd_index(dd->nc, coords));
+ slab = ddindex2pmeindex(dd->comm->ddRankSetup, dd_index(dd->numCells, coords));
return slab;
}
}
else
{
- int ddindex = dd_index(cr->dd->nc, coords);
+ int ddindex = dd_index(cr->dd->numCells, coords);
if (cartSetup.bCartesianPP)
{
nodeid = cartSetup.ddindex2simnodeid[ddindex];
ivec coords;
MPI_Cart_coords(cr->mpi_comm_mysim, cr->sim_nodeid, DIM, coords);
coords[cartSetup.cartpmedim]++;
- if (coords[cartSetup.cartpmedim] < dd->nc[cartSetup.cartpmedim])
+ if (coords[cartSetup.cartpmedim] < dd->numCells[cartSetup.cartpmedim])
{
int rank;
MPI_Cart_rank(cr->mpi_comm_mysim, coords, &rank);
comm = dd->comm;
- snew(*dim_f, dd->nc[dim] + 1);
+ snew(*dim_f, dd->numCells[dim] + 1);
(*dim_f)[0] = 0;
- for (i = 1; i < dd->nc[dim]; i++)
+ for (i = 1; i < dd->numCells[dim]; i++)
{
if (comm->slb_frac[dim])
{
}
else
{
- (*dim_f)[i] = static_cast<real>(i) / static_cast<real>(dd->nc[dim]);
+ (*dim_f)[i] = static_cast<real>(i) / static_cast<real>(dd->numCells[dim]);
}
}
- (*dim_f)[dd->nc[dim]] = 1;
+ (*dim_f)[dd->numCells[dim]] = 1;
}
static void init_ddpme(gmx_domdec_t* dd, gmx_ddpme_t* ddpme, int dimind)
snew(ddpme->pp_max, ddpme->nslab);
for (int slab = 0; slab < ddpme->nslab; slab++)
{
- ddpme->pp_min[slab] = dd->nc[dd->dim[dimind]] - 1;
+ ddpme->pp_min[slab] = dd->numCells[dd->dim[dimind]] - 1;
ddpme->pp_max[slab] = 0;
}
for (int i = 0; i < dd->nnodes; i++)
{
ivec xyz;
- ddindex2xyz(dd->nc, i, xyz);
+ ddindex2xyz(dd->numCells, i, xyz);
/* For y only use our y/z slab.
* This assumes that the PME x grid size matches the DD grid size.
*/
dim = dd->dim[dim_ind];
copy_ivec(loc, loc_c);
- for (i = 0; i < dd->nc[dim]; i++)
+ for (i = 0; i < dd->numCells[dim]; i++)
{
loc_c[dim] = i;
- rank = dd_index(dd->nc, loc_c);
+ rank = dd_index(dd->numCells, loc_c);
if (rank == dd->rank)
{
/* This process is part of the group */
RowMaster& rowMaster = *cellsizes.rowMaster;
rowMaster.cellFrac.resize(ddCellFractionBufferSize(dd, dim_ind));
- rowMaster.oldCellFrac.resize(dd->nc[dim] + 1);
- rowMaster.isCellMin.resize(dd->nc[dim]);
+ rowMaster.oldCellFrac.resize(dd->numCells[dim] + 1);
+ rowMaster.isCellMin.resize(dd->numCells[dim]);
if (dim_ind > 0)
{
- rowMaster.bounds.resize(dd->nc[dim]);
+ rowMaster.bounds.resize(dd->numCells[dim]);
}
- rowMaster.buf_ncd.resize(dd->nc[dim]);
+ rowMaster.buf_ncd.resize(dd->numCells[dim]);
}
else
{
}
if (dd->ci[dim] == dd->master_ci[dim])
{
- snew(dd->comm->load[dim_ind].load, dd->nc[dim] * DD_NLOAD_MAX);
+ snew(dd->comm->load[dim_ind].load, dd->numCells[dim] * DD_NLOAD_MAX);
}
}
}
if (dd->ndim > 1)
{
dim0 = dd->dim[0];
- for (i = 0; i < dd->nc[dim0]; i++)
+ for (i = 0; i < dd->numCells[dim0]; i++)
{
loc[dim0] = i;
make_load_communicator(dd, 1, loc);
if (dd->ndim > 2)
{
dim0 = dd->dim[0];
- for (i = 0; i < dd->nc[dim0]; i++)
+ for (i = 0; i < dd->numCells[dim0]; i++)
{
loc[dim0] = i;
dim1 = dd->dim[1];
- for (j = 0; j < dd->nc[dim1]; j++)
+ for (j = 0; j < dd->numCells[dim1]; j++)
{
loc[dim1] = j;
make_load_communicator(dd, 2, loc);
{
dim = dd->dim[d];
copy_ivec(dd->ci, tmp);
- tmp[dim] = (tmp[dim] + 1) % dd->nc[dim];
+ tmp[dim] = (tmp[dim] + 1) % dd->numCells[dim];
dd->neighbor[d][0] = ddcoord2ddnodeid(dd, tmp);
copy_ivec(dd->ci, tmp);
- tmp[dim] = (tmp[dim] - 1 + dd->nc[dim]) % dd->nc[dim];
+ tmp[dim] = (tmp[dim] - 1 + dd->numCells[dim]) % dd->numCells[dim];
dd->neighbor[d][1] = ddcoord2ddnodeid(dd, tmp);
if (debug)
{
s[d] = dd->ci[d] - zones->shift[i][d];
if (s[d] < 0)
{
- s[d] += dd->nc[d];
+ s[d] += dd->numCells[d];
}
- else if (s[d] >= dd->nc[d])
+ else if (s[d] >= dd->numCells[d])
{
- s[d] -= dd->nc[d];
+ s[d] -= dd->numCells[d];
}
}
}
std::min(ddNonbondedZonePairRanges[iZoneIndex][2], nzone));
for (dim = 0; dim < DIM; dim++)
{
- if (dd->nc[dim] == 1)
+ if (dd->numCells[dim] == 1)
{
/* All shifts should be allowed */
iZone.shift0[dim] = -1;
{
/* Set up cartesian communication for the particle-particle part */
GMX_LOG(mdlog.info)
- .appendTextFormatted("Will use a Cartesian communicator: %d x %d x %d", dd->nc[XX],
- dd->nc[YY], dd->nc[ZZ]);
+ .appendTextFormatted("Will use a Cartesian communicator: %d x %d x %d",
+ dd->numCells[XX], dd->numCells[YY], dd->numCells[ZZ]);
ivec periods;
for (int i = 0; i < DIM; i++)
periods[i] = TRUE;
}
MPI_Comm comm_cart;
- MPI_Cart_create(cr->mpi_comm_mygroup, DIM, dd->nc, periods, static_cast<int>(reorder), &comm_cart);
+ MPI_Cart_create(cr->mpi_comm_mygroup, DIM, dd->numCells, periods, static_cast<int>(reorder),
+ &comm_cart);
/* We overwrite the old communicator with the new cartesian one */
cr->mpi_comm_mygroup = comm_cart;
}
* and not the one after split, we need to make an index.
*/
cartSetup.ddindex2ddnodeid.resize(dd->nnodes);
- cartSetup.ddindex2ddnodeid[dd_index(dd->nc, dd->ci)] = dd->rank;
+ cartSetup.ddindex2ddnodeid[dd_index(dd->numCells, dd->ci)] = dd->rank;
gmx_sumi(dd->nnodes, cartSetup.ddindex2ddnodeid.data(), cr);
/* Get the rank of the DD master,
* above we made sure that the master node is a PP node.
std::vector<int> buf(dd->nnodes);
if (thisRankHasDuty(cr, DUTY_PP))
{
- buf[dd_index(dd->nc, dd->ci)] = cr->sim_nodeid;
+ buf[dd_index(dd->numCells, dd->ci)] = cr->sim_nodeid;
}
/* Communicate the ddindex to simulation nodeid index */
MPI_Allreduce(buf.data(), cartSetup.ddindex2simnodeid.data(), dd->nnodes, MPI_INT, MPI_SUM,
{
if (cartSetup.ddindex2simnodeid[i] == 0)
{
- ddindex2xyz(dd->nc, i, dd->master_ci);
+ ddindex2xyz(dd->numCells, i, dd->master_ci);
MPI_Cart_rank(dd->mpi_comm_all, dd->master_ci, &dd->masterrank);
}
}
{
/* No Cartesian communicators */
/* We use the rank in dd->comm->all as DD index */
- ddindex2xyz(dd->nc, dd->rank, dd->ci);
+ ddindex2xyz(dd->numCells, dd->rank, dd->ci);
/* The simulation master nodeid is 0, so the DD master rank is also 0 */
dd->masterrank = 0;
clear_ivec(dd->master_ci);
std::vector<int> buf(dd->nnodes);
if (thisRankHasDuty(cr, DUTY_PP))
{
- buf[dd_index(dd->nc, dd->ci)] = cr->sim_nodeid;
+ buf[dd_index(dd->numCells, dd->ci)] = cr->sim_nodeid;
}
/* Communicate the ddindex to simulation nodeid index */
MPI_Allreduce(buf.data(), cartSetup.ddindex2simnodeid.data(), dd->nnodes, MPI_INT, MPI_SUM,
/* We can not use DDMASTER(dd), because dd->masterrank is set later */
if (MASTER(cr))
{
- dd->ma = std::make_unique<AtomDistribution>(dd->nc, numAtomsInSystem, numAtomsInSystem);
+ dd->ma = std::make_unique<AtomDistribution>(dd->numCells, numAtomsInSystem, numAtomsInSystem);
}
}
static void check_dd_restrictions(const gmx_domdec_t* dd, const t_inputrec* ir, const gmx::MDLogger& mdlog)
{
- if (ir->ePBC == epbcSCREW && (dd->nc[XX] == 1 || dd->nc[YY] > 1 || dd->nc[ZZ] > 1))
+ if (ir->ePBC == epbcSCREW && (dd->numCells[XX] == 1 || dd->numCells[YY] > 1 || dd->numCells[ZZ] > 1))
{
gmx_fatal(FARGS, "With pbc=%s can only do domain decomposition in the x-direction",
epbc_names[ir->ePBC]);
}
/* Set the DD setup given by ddGridSetup */
- copy_ivec(ddGridSetup.numDomains, dd->nc);
+ copy_ivec(ddGridSetup.numDomains, dd->numCells);
dd->ndim = ddGridSetup.numDDDimensions;
copy_ivec(ddGridSetup.ddDimensions, dd->dim);
- dd->nnodes = dd->nc[XX] * dd->nc[YY] * dd->nc[ZZ];
+ dd->nnodes = dd->numCells[XX] * dd->numCells[YY] * dd->numCells[ZZ];
snew(comm->slb_frac, DIM);
if (isDlbDisabled(comm))
{
- comm->slb_frac[XX] = get_slb_frac(mdlog, "x", dd->nc[XX], options.cellSizeX);
- comm->slb_frac[YY] = get_slb_frac(mdlog, "y", dd->nc[YY], options.cellSizeY);
- comm->slb_frac[ZZ] = get_slb_frac(mdlog, "z", dd->nc[ZZ], options.cellSizeZ);
+ comm->slb_frac[XX] = get_slb_frac(mdlog, "x", dd->numCells[XX], options.cellSizeX);
+ comm->slb_frac[YY] = get_slb_frac(mdlog, "y", dd->numCells[YY], options.cellSizeY);
+ comm->slb_frac[ZZ] = get_slb_frac(mdlog, "z", dd->numCells[ZZ], options.cellSizeZ);
}
/* Set the multi-body cut-off and cellsize limit for DLB */
* the minimum and the maximum,
* since the extra communication cost is nearly zero.
*/
- real acs = average_cellsize_min(ddbox, dd->nc);
+ real acs = average_cellsize_min(ddbox, dd->numCells);
comm->cutoff_mbody = 0.5 * (systemInfo.minCutoffForMultiBody + acs);
if (!isDlbDisabled(comm))
{
}
}
-void dd_init_bondeds(FILE* fplog,
- gmx_domdec_t* dd,
- const gmx_mtop_t* mtop,
- const gmx_vsite_t* vsite,
- const t_inputrec* ir,
- gmx_bool bBCheck,
- cginfo_mb_t* cginfo_mb)
+void dd_init_bondeds(FILE* fplog,
+ gmx_domdec_t* dd,
+ const gmx_mtop_t& mtop,
+ const gmx_vsite_t* vsite,
+ const t_inputrec* ir,
+ gmx_bool bBCheck,
+ gmx::ArrayRef<cginfo_mb_t> cginfo_mb)
{
gmx_domdec_comm_t* comm;
- dd_make_reverse_top(fplog, dd, mtop, vsite, ir, bBCheck);
+ dd_make_reverse_top(fplog, dd, &mtop, vsite, ir, bBCheck);
comm = dd->comm;
log->writeString("The allowed shrink of domain decomposition cells is:");
for (d = 0; d < DIM; d++)
{
- if (dd->nc[d] > 1)
+ if (dd->numCells[d] > 1)
{
- if (d >= ddbox->npbcdim && dd->nc[d] == 2)
+ if (d >= ddbox->npbcdim && dd->numCells[d] == 2)
{
shrink = 0;
}
else
{
shrink = comm->cellsize_min_dlb[d]
- / (ddbox->box_size[d] * ddbox->skew_fac[d] / dd->nc[d]);
+ / (ddbox->box_size[d] * ddbox->skew_fac[d] / dd->numCells[d]);
}
log->writeStringFormatted(" %c %.2f", dim2char(d), shrink);
}
log->writeString("The initial domain decomposition cell size is:");
for (d = 0; d < DIM; d++)
{
- if (dd->nc[d] > 1)
+ if (dd->numCells[d] > 1)
{
log->writeStringFormatted(" %c %.2f nm", dim2char(d), dd->comm->cellsize_min[d]);
}
else
{
/* There is no cell size limit */
- npulse = std::max(dd->nc[XX] - 1, std::max(dd->nc[YY] - 1, dd->nc[ZZ] - 1));
+ npulse = std::max(dd->numCells[XX] - 1, std::max(dd->numCells[YY] - 1, dd->numCells[ZZ] - 1));
}
if (!bNoCutOff && npulse > 1)
dim = dd->dim[d];
npulse_d = static_cast<int>(
1
- + dd->nc[dim] * comm->systemInfo.cutoff
+ + dd->numCells[dim] * comm->systemInfo.cutoff
/ (ddbox->box_size[dim] * ddbox->skew_fac[dim] * dlb_scale));
npulse_d_max = std::max(npulse_d_max, npulse_d);
}
}
else
{
- comm->cd[d].np_dlb = std::min(npulse, dd->nc[dd->dim[d]] - 1);
+ comm->cd[d].np_dlb = std::min(npulse, dd->numCells[dd->dim[d]] - 1);
comm->maxpulse = std::max(comm->maxpulse, comm->cd[d].np_dlb);
}
- if (comm->cd[d].np_dlb < dd->nc[dd->dim[d]] - 1)
+ if (comm->cd[d].np_dlb < dd->numCells[dd->dim[d]] - 1)
{
comm->bVacDLBNoLimit = FALSE;
}
* we do not need to take pbc into account for the bonded interactions.
*/
return (ePBC != epbcNONE && dd->comm->systemInfo.haveInterDomainBondeds
- && !(dd->nc[XX] > 1 && dd->nc[YY] > 1 && (dd->nc[ZZ] > 1 || ePBC == epbcXY)));
+ && !(dd->numCells[XX] > 1 && dd->numCells[YY] > 1 && (dd->numCells[ZZ] > 1 || ePBC == epbcXY)));
}
/*! \brief Sets grid size limits and PP-PME setup, prints settings to log */
}
else
{
- vol_frac = (1 + comm_box_frac(dd->nc, comm->systemInfo.cutoff, *ddbox))
+ vol_frac = (1 + comm_box_frac(dd->numCells, comm->systemInfo.cutoff, *ddbox))
/ static_cast<double>(dd->nnodes);
}
if (debug)
bool is1DAnd1PulseDD(const gmx_domdec_t& dd)
{
- const int maxDimensionSize = std::max(dd.nc[XX], std::max(dd.nc[YY], dd.nc[ZZ]));
- const int productOfDimensionSizes = dd.nc[XX] * dd.nc[YY] * dd.nc[ZZ];
+ const int maxDimensionSize = std::max(dd.numCells[XX], std::max(dd.numCells[YY], dd.numCells[ZZ]));
+ const int productOfDimensionSizes = dd.numCells[XX] * dd.numCells[YY] * dd.numCells[ZZ];
const bool decompositionHasOneDimension = (maxDimensionSize == productOfDimensionSizes);
const bool hasMax1Pulse =
{
dim = dd->dim[d];
- inv_cell_size = DD_CELL_MARGIN * dd->nc[dim] / ddbox.box_size[dim];
+ inv_cell_size = DD_CELL_MARGIN * dd->numCells[dim] / ddbox.box_size[dim];
if (dd->unitCellInfo.ddBoxIsDynamic)
{
inv_cell_size *= DD_PRES_SCALE_MARGIN;
* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 2005 - 2014, The GROMACS development team.
- * Copyright (c) 2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
bool is1DAnd1PulseDD(const gmx_domdec_t& dd);
/*! \brief Initialize data structures for bonded interactions */
-void dd_init_bondeds(FILE* fplog,
- gmx_domdec_t* dd,
- const gmx_mtop_t* mtop,
- const gmx_vsite_t* vsite,
- const t_inputrec* ir,
- gmx_bool bBCheck,
- cginfo_mb_t* cginfo_mb);
+void dd_init_bondeds(FILE* fplog,
+ gmx_domdec_t* dd,
+ const gmx_mtop_t& mtop,
+ const gmx_vsite_t* vsite,
+ const t_inputrec* ir,
+ gmx_bool bBCheck,
+ gmx::ArrayRef<cginfo_mb_t> cginfo_mb);
/*! \brief Returns whether molecules are always whole, i.e. not broken by PBC */
bool dd_moleculesAreAlwaysWhole(const gmx_domdec_t& dd);
*
* Also stores whether atoms are linked in \p cginfo_mb.
*/
-t_blocka* makeBondedLinks(const gmx_mtop_t* mtop, cginfo_mb_t* cginfo_mb);
+t_blocka* makeBondedLinks(const gmx_mtop_t& mtop, gmx::ArrayRef<cginfo_mb_t> cginfo_mb);
/*! \brief Calculate the maximum distance involved in 2-body and multi-body bonded interactions */
void dd_bonded_cg_distance(const gmx::MDLogger& mdlog,
#include "gromacs/utility/exceptions.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxassert.h"
-#include "gromacs/utility/smalloc.h"
+#include "gromacs/utility/listoflists.h"
#include "domdec_internal.h"
#include "domdec_specatomcomm.h"
+using gmx::ListOfLists;
+
/*! \brief Struct used during constraint setup with domain decomposition */
struct gmx_domdec_constraints_t
{
int nrec,
gmx::ArrayRef<const int> ia1,
gmx::ArrayRef<const int> ia2,
- const t_blocka* at2con,
+ const ListOfLists<int>& at2con,
const gmx_ga2la_t& ga2la,
gmx_bool bHomeConnect,
gmx_domdec_constraints_t* dc,
t_ilist* il_local,
std::vector<int>* ireq)
{
- int a1_gl, a2_gl, i, coni, b;
- const t_iatom* iap;
-
if (!dc->gc_req[con_offset + con])
{
/* Add this non-home constraint to the list */
il_local->nalloc = over_alloc_dd(il_local->nr + 3);
srenew(il_local->iatoms, il_local->nalloc);
}
- iap = constr_iatomptr(ia1, ia2, con);
+ const int* iap = constr_iatomptr(ia1, ia2, con);
il_local->iatoms[il_local->nr++] = iap[0];
- a1_gl = offset + iap[1];
- a2_gl = offset + iap[2];
+ const int a1_gl = offset + iap[1];
+ const int a2_gl = offset + iap[2];
/* The following indexing code can probably be optizimed */
if (const int* a_loc = ga2la.findHome(a1_gl))
{
if (nrec > 0)
{
- for (i = at2con->index[a]; i < at2con->index[a + 1]; i++)
+ /* Loop over the constraint connected to atom a */
+ for (const int coni : at2con[a])
{
- coni = at2con->a[i];
if (coni != con)
{
/* Walk further */
- iap = constr_iatomptr(ia1, ia2, coni);
+ const int* iap = constr_iatomptr(ia1, ia2, coni);
+ int b;
if (a == iap[1])
{
b = iap[2];
}
/*! \brief Looks up constraint for the local atoms */
-static void atoms_to_constraints(gmx_domdec_t* dd,
- const gmx_mtop_t* mtop,
- const int* cginfo,
- gmx::ArrayRef<const t_blocka> at2con_mt,
- int nrec,
- t_ilist* ilc_local,
- std::vector<int>* ireq)
+static void atoms_to_constraints(gmx_domdec_t* dd,
+ const gmx_mtop_t* mtop,
+ const int* cginfo,
+ gmx::ArrayRef<const ListOfLists<int>> at2con_mt,
+ int nrec,
+ t_ilist* ilc_local,
+ std::vector<int>* ireq)
{
- const t_blocka* at2con;
- int b_lo, offset, b_mol, i, con, con_offset;
-
gmx_domdec_constraints_t* dc = dd->constraints;
gmx_domdec_specat_comm_t* dcc = dd->constraint_comm;
* This is only required for the global index to make sure
* that we use each constraint only once.
*/
- con_offset = dc->molb_con_offset[mb] + molnr * dc->molb_ncon_mol[mb];
+ const int con_offset = dc->molb_con_offset[mb] + molnr * dc->molb_ncon_mol[mb];
/* The global atom number offset for this molecule */
- offset = a_gl - a_mol;
- at2con = &at2con_mt[molb.type];
- for (i = at2con->index[a_mol]; i < at2con->index[a_mol + 1]; i++)
+ const int offset = a_gl - a_mol;
+ /* Loop over the constraints connected to atom a_mol in the molecule */
+ const auto& at2con = at2con_mt[molb.type];
+ for (const int con : at2con[a_mol])
{
- con = at2con->a[i];
const int* iap = constr_iatomptr(ia1, ia2, con);
+ int b_mol;
if (a_mol == iap[1])
{
b_mol = iap[2];
ilc_local->nalloc = over_alloc_dd(ilc_local->nr + 3);
srenew(ilc_local->iatoms, ilc_local->nalloc);
}
- b_lo = *a_loc;
+ const int b_lo = *a_loc;
ilc_local->iatoms[ilc_local->nr++] = iap[0];
ilc_local->iatoms[ilc_local->nr++] = (a_gl == iap[1] ? a : b_lo);
ilc_local->iatoms[ilc_local->nr++] = (a_gl == iap[1] ? b_lo : a);
int nrec,
t_ilist* il_local)
{
- gmx_domdec_constraints_t* dc;
- t_ilist * ilc_local, *ils_local;
- std::vector<int>* ireq;
- gmx::ArrayRef<const t_blocka> at2con_mt;
- gmx::HashedMap<int>* ga2la_specat;
- int at_end, i, j;
- t_iatom* iap;
+ gmx_domdec_constraints_t* dc;
+ t_ilist * ilc_local, *ils_local;
+ gmx::HashedMap<int>* ga2la_specat;
+ int at_end, i, j;
+ t_iatom* iap;
// This code should not be called unless this condition is true,
// because that's the only time init_domdec_constraints is
dc->ncon = 0;
ilc_local->nr = 0;
+ gmx::ArrayRef<const ListOfLists<int>> at2con_mt;
+ std::vector<int>* ireq = nullptr;
if (dd->constraint_comm)
{
// TODO Perhaps gmx_domdec_constraints_t should keep a valid constr?
ireq = &dc->requestedGlobalAtomIndices[0];
ireq->clear();
}
- else
- {
- // Currently unreachable
- at2con_mt = {};
- ireq = nullptr;
- }
gmx::ArrayRef<const std::vector<int>> at2settle_mt;
/* When settle works inside charge groups, we assigned them already */
/*! Returns the size of the buffer to hold fractional cell boundaries for DD dimension index dimIndex */
static inline int ddCellFractionBufferSize(const gmx_domdec_t* dd, int dimIndex)
{
- return dd->nc[dd->dim[dimIndex]] + 1 + dimIndex * 2 + 1 + dimIndex;
+ return dd->numCells[dd->dim[dimIndex]] + 1 + dimIndex * 2 + 1 + dimIndex;
}
/*! \brief Maximum number of ranks for using send/recv for state scattering and gathering
for (int d = dd->ndim - 1; d >= 0; d--)
{
dim = dd->dim[d];
- if (dd->nc[dim] > 2)
+ if (dd->numCells[dim] > 2)
{
/* Pulse the grid forward and backward */
spas = spac->spas[d];
vbuf + spas[0].a.size(), spas[1].a.size());
for (dir = 0; dir < 2; dir++)
{
- bPBC = ((dir == 0 && dd->ci[dim] == 0) || (dir == 1 && dd->ci[dim] == dd->nc[dim] - 1));
+ bPBC = ((dir == 0 && dd->ci[dim] == 0)
+ || (dir == 1 && dd->ci[dim] == dd->numCells[dim] - 1));
bScrew = (bPBC && dd->unitCellInfo.haveScrewPBC && dim == XX);
spas = &spac->spas[d][dir];
spas->a.size());
/* Sum the buffer into the required forces */
if (dd->unitCellInfo.haveScrewPBC && dim == XX
- && (dd->ci[dim] == 0 || dd->ci[dim] == dd->nc[dim] - 1))
+ && (dd->ci[dim] == 0 || dd->ci[dim] == dd->numCells[dim] - 1))
{
int i = 0;
for (int a : spas->a)
for (d = 0; d < dd->ndim; d++)
{
dim = dd->dim[d];
- if (dd->nc[dim] > 2)
+ if (dd->numCells[dim] > 2)
{
/* Pulse the grid forward and backward */
rvec* vbuf = as_rvec_array(spac->vbuf.data());
bScrew = (dd->unitCellInfo.haveScrewPBC && dim == XX);
copy_rvec(box[dim], shift);
}
- else if (dir == 1 && dd->ci[dim] == dd->nc[dim] - 1)
+ else if (dir == 1 && dd->ci[dim] == dd->numCells[dim] - 1)
{
bPBC = TRUE;
bScrew = (dd->unitCellInfo.haveScrewPBC && dim == XX);
{
rvec* x = (v == 0 ? x0 : x1);
if (dd->unitCellInfo.haveScrewPBC && dim == XX
- && (dd->ci[XX] == 0 || dd->ci[XX] == dd->nc[XX] - 1))
+ && (dd->ci[XX] == 0 || dd->ci[XX] == dd->numCells[XX] - 1))
{
/* Here we only perform the rotation, the rest of the pbc
* is handled in the constraint or viste routines.
/* Pulse the grid forward and backward */
dim = dd->dim[d];
bPBC = (dim < dd->unitCellInfo.npbcdim);
- if (dd->nc[dim] == 2)
+ if (dd->numCells[dim] == 2)
{
/* Only 2 cells, so we only need to communicate once */
ndir = 1;
}
for (int dir = 0; dir < ndir; dir++)
{
- if (!bPBC && dd->nc[dim] > 2
- && ((dir == 0 && dd->ci[dim] == dd->nc[dim] - 1) || (dir == 1 && dd->ci[dim] == 0)))
+ if (!bPBC && dd->numCells[dim] > 2
+ && ((dir == 0 && dd->ci[dim] == dd->numCells[dim] - 1) || (dir == 1 && dd->ci[dim] == 0)))
{
/* No pbc: the fist/last cell should not request atoms */
nsend_ptr = nsend_zero;
for (int d = 0; d < dd->ndim; d++)
{
/* Pulse the grid forward and backward */
- if (dd->dim[d] >= dd->unitCellInfo.npbcdim || dd->nc[dd->dim[d]] > 2)
+ if (dd->dim[d] >= dd->unitCellInfo.npbcdim || dd->numCells[dd->dim[d]] > 2)
{
ndir = 2;
}
//! The j-atom range
gmx::Range<int> jAtomRange;
//! Minimum shifts to consider
- ivec shift0 = {};
+ gmx::IVec shift0 = { 0, 0, 0 };
//! Maximum shifts to consider
- ivec shift1 = {};
+ gmx::IVec shift1 = { 0, 0, 0 };
};
typedef struct
{
/* Zone lower corner in triclinic coordinates */
- rvec x0 = {};
+ gmx::RVec x0 = { 0, 0, 0 };
/* Zone upper corner in triclinic coordinates */
- rvec x1 = {};
+ gmx::RVec x1 = { 0, 0, 0 };
/* Zone bounding box lower corner in Cartesian coords */
- rvec bb_x0 = {};
+ gmx::RVec bb_x0 = { 0, 0, 0 };
/* Zone bounding box upper corner in Cartesian coords */
- rvec bb_x1 = {};
+ gmx::RVec bb_x1 = { 0, 0, 0 };
} gmx_domdec_zone_size_t;
struct gmx_domdec_zones_t
struct gmx_ddbox_t
{
- int npbcdim;
- int nboundeddim;
- rvec box0;
- rvec box_size;
+ int npbcdim;
+ int nboundeddim;
+ gmx::RVec box0 = { 0, 0, 0 };
+ gmx::RVec box_size = { 0, 0, 0 };
/* Tells if the box is skewed for each of the three cartesian directions */
- ivec tric_dir;
- rvec skew_fac;
+ gmx::IVec tric_dir = { 0, 0, 0 };
+ gmx::RVec skew_fac = { 0, 0, 0 };
/* Orthogonal vectors for triclinic cells, Cartesian index */
rvec v[DIM][DIM];
/* Normal vectors for the cells walls */
int nnodes = 0;
MPI_Comm mpi_comm_all = MPI_COMM_NULL;
/* The local DD cell index and rank */
- ivec ci = { 0, 0, 0 };
- int rank = 0;
- ivec master_ci = { 0, 0, 0 };
- int masterrank = 0;
+ gmx::IVec ci = { 0, 0, 0 };
+ int rank = 0;
+ gmx::IVec master_ci = { 0, 0, 0 };
+ int masterrank = 0;
/* Communication with the PME only nodes */
int pme_nodeid = 0;
gmx_bool pme_receive_vir_ener = false;
UnitCellInfo unitCellInfo;
/* The communication setup, identical for each cell, cartesian index */
- ivec nc = { 0, 0, 0 };
- int ndim = 0;
- ivec dim = { 0, 0, 0 }; /* indexed by 0 to ndim */
+ //! Todo: refactor nbnxm to not rely on this sometimes being a nullptr so this can be IVec
+ ivec numCells = { 0, 0, 0 };
+ int ndim = 0;
+ gmx::IVec dim = { 0, 0, 0 }; /* indexed by 0 to ndim */
/* Forward and backward neighboring cells, indexed by 0 to ndim */
int neighbor[DIM][2] = { { 0, 0 }, { 0, 0 }, { 0, 0 } };
#include "gromacs/utility/exceptions.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxassert.h"
+#include "gromacs/utility/listoflists.h"
#include "gromacs/utility/logger.h"
#include "gromacs/utility/smalloc.h"
#include "gromacs/utility/strconvert.h"
#include "domdec_vsite.h"
#include "dump.h"
+using gmx::ListOfLists;
+
/*! \brief The number of integer item in the local state, used for broadcasting of the state */
#define NITEM_DD_INIT_LOCAL_STATE 5
t_idef idef; /**< Partial local topology */
std::unique_ptr<VsitePbc> vsitePbc; /**< vsite PBC structure */
int nbonded; /**< The number of bondeds in this struct */
- t_blocka excl; /**< List of exclusions */
+ ListOfLists<int> excl; /**< List of exclusions */
int excl_count; /**< The total exclusion count for \p excl */
};
struct gmx_reverse_top_t
{
//! @cond Doxygen_Suppress
- //! \brief The maximum number of exclusions one atom can have
- int n_excl_at_max = 0;
//! \brief Are there constraints in this revserse top?
bool bConstr = false;
//! \brief Are there settles in this revserse top?
}
/*! \brief Returns the maximum number of exclusions per atom */
-static int getMaxNumExclusionsPerAtom(const t_blocka& excls)
+static int getMaxNumExclusionsPerAtom(const ListOfLists<int>& excls)
{
int maxNumExcls = 0;
- for (int at = 0; at < excls.nr; at++)
+ for (gmx::index at = 0; at < excls.ssize(); at++)
{
- const int numExcls = excls.index[at + 1] - excls.index[at];
+ const auto list = excls[at];
+ const int numExcls = list.ssize();
- GMX_RELEASE_ASSERT(numExcls != 1 || excls.a[excls.index[at]] == at,
+ GMX_RELEASE_ASSERT(numExcls != 1 || list[0] == at,
"With 1 exclusion we expect a self-exclusion");
maxNumExcls = std::max(maxNumExcls, numExcls);
make_reverse_top(mtop, ir->efep != efepNO, !dd->comm->systemInfo.haveSplitConstraints,
!dd->comm->systemInfo.haveSplitSettles, bBCheck, &dd->nbonded_global);
- gmx_reverse_top_t* rt = dd->reverse_top;
-
dd->haveExclusions = false;
- rt->n_excl_at_max = 0;
for (const gmx_molblock_t& molb : mtop->molblock)
{
const int maxNumExclusionsPerAtom = getMaxNumExclusionsPerAtom(mtop->moltype[molb.type].excls);
{
dd->haveExclusions = true;
}
- rt->n_excl_at_max = std::max(rt->n_excl_at_max, maxNumExclusionsPerAtom);
}
if (vsite && vsite->numInterUpdategroupVsites > 0)
return norm2(dx);
}
-/*! \brief Append t_blocka block structures 1 to nsrc in src to *dest */
-static void combine_blocka(t_blocka* dest, gmx::ArrayRef<const thread_work_t> src)
-{
- int ni = src.back().excl.nr;
- int na = 0;
- for (const thread_work_t& th_work : src)
- {
- na += th_work.excl.nra;
- }
- if (ni + 1 > dest->nalloc_index)
- {
- dest->nalloc_index = over_alloc_large(ni + 1);
- srenew(dest->index, dest->nalloc_index);
- }
- if (dest->nra + na > dest->nalloc_a)
- {
- dest->nalloc_a = over_alloc_large(dest->nra + na);
- srenew(dest->a, dest->nalloc_a);
- }
- for (gmx::index s = 1; s < src.ssize(); s++)
- {
- for (int i = dest->nr + 1; i < src[s].excl.nr + 1; i++)
- {
- dest->index[i] = dest->nra + src[s].excl.index[i];
- }
- for (int i = 0; i < src[s].excl.nra; i++)
- {
- dest->a[dest->nra + i] = src[s].excl.a[i];
- }
- dest->nr = src[s].excl.nr;
- dest->nra += src[s].excl.nra;
- }
-}
-
/*! \brief Append t_idef structures 1 to nsrc in src to *dest */
static void combine_idef(t_idef* dest, gmx::ArrayRef<const thread_work_t> src)
{
}
/*! \brief Set the exclusion data for i-zone \p iz for the case of no exclusions */
-static void set_no_exclusions_zone(const gmx_domdec_zones_t* zones, int iz, t_blocka* lexcls)
+static void set_no_exclusions_zone(const gmx_domdec_zones_t* zones, int iz, ListOfLists<int>* lexcls)
{
for (int a = zones->cg_range[iz]; a < zones->cg_range[iz + 1]; a++)
{
- lexcls->index[a + 1] = lexcls->nra;
+ lexcls->pushBack({});
}
}
gmx_domdec_zones_t* zones,
const std::vector<gmx_moltype_t>& moltype,
const int* cginfo,
- t_blocka* lexcls,
+ ListOfLists<int>* lexcls,
int iz,
int at_start,
int at_end,
const gmx::ArrayRef<const int> intermolecularExclusionGroup)
{
- int n_excl_at_max, n, at;
-
const gmx_ga2la_t& ga2la = *dd->ga2la;
const auto& jAtomRange = zones->iZones[iz].jAtomRange;
- n_excl_at_max = dd->reverse_top->n_excl_at_max;
+ const gmx::index oldNumLists = lexcls->ssize();
- /* We set the end index, but note that we might not start at zero here */
- lexcls->nr = at_end;
-
- n = lexcls->nra;
- for (at = at_start; at < at_end; at++)
+ std::vector<int> exclusionsForAtom;
+ for (int at = at_start; at < at_end; at++)
{
- if (n + 1000 > lexcls->nalloc_a)
- {
- lexcls->nalloc_a = over_alloc_large(n + 1000);
- srenew(lexcls->a, lexcls->nalloc_a);
- }
+ exclusionsForAtom.clear();
if (GET_CGINFO_EXCL_INTER(cginfo[at]))
{
- int a_gl, mb, mt, mol, a_mol, j;
- const t_blocka* excls;
-
- if (n + n_excl_at_max > lexcls->nalloc_a)
- {
- lexcls->nalloc_a = over_alloc_large(n + n_excl_at_max);
- srenew(lexcls->a, lexcls->nalloc_a);
- }
+ int a_gl, mb, mt, mol, a_mol;
/* Copy the exclusions from the global top */
- lexcls->index[at] = n;
- a_gl = dd->globalAtomIndices[at];
+ a_gl = dd->globalAtomIndices[at];
global_atomnr_to_moltype_ind(dd->reverse_top, a_gl, &mb, &mt, &mol, &a_mol);
- excls = &moltype[mt].excls;
- for (j = excls->index[a_mol]; j < excls->index[a_mol + 1]; j++)
+ const auto excls = moltype[mt].excls[a_mol];
+ for (const int aj_mol : excls)
{
- const int aj_mol = excls->a[j];
-
if (const auto* jEntry = ga2la.find(a_gl + aj_mol - a_mol))
{
/* This check is not necessary, but it can reduce
*/
if (jAtomRange.isInRange(jEntry->la))
{
- lexcls->a[n++] = jEntry->la;
+ exclusionsForAtom.push_back(jEntry->la);
}
}
}
}
- else
- {
- /* We don't need exclusions for this atom */
- lexcls->index[at] = n;
- }
bool isExcludedAtom = !intermolecularExclusionGroup.empty()
&& std::find(intermolecularExclusionGroup.begin(),
if (isExcludedAtom)
{
- if (n + intermolecularExclusionGroup.ssize() > lexcls->nalloc_a)
- {
- lexcls->nalloc_a = over_alloc_large(n + intermolecularExclusionGroup.size());
- srenew(lexcls->a, lexcls->nalloc_a);
- }
for (int qmAtomGlobalIndex : intermolecularExclusionGroup)
{
if (const auto* entry = dd->ga2la->find(qmAtomGlobalIndex))
{
- lexcls->a[n++] = entry->la;
+ exclusionsForAtom.push_back(entry->la);
}
}
}
- }
-
- lexcls->index[lexcls->nr] = n;
- lexcls->nra = n;
-}
-
-/*! \brief Ensure we have enough space in \p ba for \p nindex_max indices */
-static void check_alloc_index(t_blocka* ba, int nindex_max)
-{
- if (nindex_max + 1 > ba->nalloc_index)
- {
- ba->nalloc_index = over_alloc_dd(nindex_max + 1);
- srenew(ba->index, ba->nalloc_index);
+ /* Append the exclusions for this atom to the topology */
+ lexcls->pushBack(exclusionsForAtom);
}
-}
-
-/*! \brief Ensure that we have enough space for exclusion storate in \p lexcls */
-static void check_exclusions_alloc(const gmx_domdec_t* dd, const gmx_domdec_zones_t* zones, t_blocka* lexcls)
-{
- const int nr = zones->iZones.back().iAtomRange.end();
-
- check_alloc_index(lexcls, nr);
- for (size_t thread = 1; thread < dd->reverse_top->th_work.size(); thread++)
- {
- check_alloc_index(&dd->reverse_top->th_work[thread].excl, nr);
- }
+ GMX_RELEASE_ASSERT(
+ lexcls->ssize() - oldNumLists == at_end - at_start,
+ "The number of exclusion list should match the number of atoms in the range");
}
/*! \brief Set the total count indexes for the local exclusions, needed by several functions */
-static void finish_local_exclusions(gmx_domdec_t* dd, gmx_domdec_zones_t* zones, t_blocka* lexcls)
+static void finish_local_exclusions(gmx_domdec_t* dd, gmx_domdec_zones_t* zones, ListOfLists<int>* lexcls)
{
const gmx::Range<int> nonhomeIzonesAtomRange(zones->iZones[0].iAtomRange.end(),
zones->iZones.back().iAtomRange.end());
/* There are no exclusions involving non-home charge groups,
* but we need to set the indices for neighborsearching.
*/
- for (int la : nonhomeIzonesAtomRange)
+ for (int gmx_unused la : nonhomeIzonesAtomRange)
{
- lexcls->index[la] = lexcls->nra;
+ lexcls->pushBack({});
}
-
- /* nr is only used to loop over the exclusions for Ewald and RF,
- * so we can set it to the number of home atoms for efficiency.
- */
- lexcls->nr = nonhomeIzonesAtomRange.begin();
- }
- else
- {
- lexcls->nr = nonhomeIzonesAtomRange.end();
}
}
t_pbc* pbc_null,
rvec* cg_cm,
t_idef* idef,
- t_blocka* lexcls,
+ ListOfLists<int>* lexcls,
int* excl_count)
{
int nzone_bondeds, nzone_excl;
nzone_excl = 1;
}
- check_exclusions_alloc(dd, zones, lexcls);
-
rt = dd->reverse_top;
rc2 = rc * rc;
clear_idef(idef);
nbonded_local = 0;
- lexcls->nr = 0;
- lexcls->nra = 0;
+ lexcls->clear();
*excl_count = 0;
for (int izone = 0; izone < nzone_bondeds; izone++)
{
try
{
- int cg0t, cg1t;
- t_idef* idef_t;
- t_blocka* excl_t;
+ int cg0t, cg1t;
+ t_idef* idef_t;
cg0t = cg0 + ((cg1 - cg0) * thread) / numThreads;
cg1t = cg0 + ((cg1 - cg0) * (thread + 1)) / numThreads;
if (izone < nzone_excl)
{
+ ListOfLists<int>* excl_t;
if (thread == 0)
{
+ // Thread 0 stores exclusions directly in the final storage
excl_t = lexcls;
}
else
{
- excl_t = &rt->th_work[thread].excl;
- excl_t->nr = 0;
- excl_t->nra = 0;
+ // Threads > 0 store in temporary storage, starting at list index 0
+ excl_t = &rt->th_work[thread].excl;
+ excl_t->clear();
}
/* No charge groups and no distance check required */
if (izone < nzone_excl)
{
- if (rt->th_work.size() > 1)
+ for (std::size_t th = 1; th < rt->th_work.size(); th++)
{
- combine_blocka(lexcls, rt->th_work);
+ lexcls->appendListOfLists(rt->th_work[th].excl);
}
-
for (const thread_work_t& th_work : rt->th_work)
{
*excl_count += th_work.excl_count;
finish_local_exclusions(dd, zones, lexcls);
if (debug)
{
- fprintf(debug, "We have %d exclusions, check count %d\n", lexcls->nra, *excl_count);
+ fprintf(debug, "We have %d exclusions, check count %d\n", lexcls->numElements(), *excl_count);
}
return nbonded_local;
/* Only need to check for dimensions where the part of the box
* that is not communicated is smaller than the cut-off.
*/
- if (d < npbcdim && dd->nc[d] > 1 && (dd->nc[d] - npulse[d]) * cellsize_min[d] < 2 * rc)
+ if (d < npbcdim && dd->numCells[d] > 1
+ && (dd->numCells[d] - npulse[d]) * cellsize_min[d] < 2 * rc)
{
- if (dd->nc[d] == 2)
+ if (dd->numCells[d] == 2)
{
rcheck[d] = TRUE;
bRCheckMB = TRUE;
{
if (fr->bMolPBC)
{
- pbc_null = set_pbc_dd(&pbc, fr->ePBC, dd->nc, TRUE, box);
+ pbc_null = set_pbc_dd(&pbc, fr->ePBC, dd->numCells, TRUE, box);
}
else
{
}
}
-t_blocka* makeBondedLinks(const gmx_mtop_t* mtop, cginfo_mb_t* cginfo_mb)
+t_blocka* makeBondedLinks(const gmx_mtop_t& mtop, gmx::ArrayRef<cginfo_mb_t> cginfo_mb)
{
t_blocka* link;
cginfo_mb_t* cgi_mb;
*/
reverse_ilist_t ril_intermol;
- if (mtop->bIntermolecularInteractions)
+ if (mtop.bIntermolecularInteractions)
{
t_atoms atoms;
- atoms.nr = mtop->natoms;
+ atoms.nr = mtop.natoms;
atoms.atom = nullptr;
- GMX_RELEASE_ASSERT(mtop->intermolecular_ilist,
+ GMX_RELEASE_ASSERT(mtop.intermolecular_ilist,
"We should have an ilist when intermolecular interactions are on");
- make_reverse_ilist(*mtop->intermolecular_ilist, &atoms, FALSE, FALSE, FALSE, TRUE, &ril_intermol);
+ make_reverse_ilist(*mtop.intermolecular_ilist, &atoms, FALSE, FALSE, FALSE, TRUE, &ril_intermol);
}
snew(link, 1);
- snew(link->index, mtop->natoms + 1);
+ snew(link->index, mtop.natoms + 1);
link->nalloc_a = 0;
link->a = nullptr;
link->index[0] = 0;
int cg_offset = 0;
int ncgi = 0;
- for (size_t mb = 0; mb < mtop->molblock.size(); mb++)
+ for (size_t mb = 0; mb < mtop.molblock.size(); mb++)
{
- const gmx_molblock_t& molb = mtop->molblock[mb];
+ const gmx_molblock_t& molb = mtop.molblock[mb];
if (molb.nmol == 0)
{
continue;
}
- const gmx_moltype_t& molt = mtop->moltype[molb.type];
+ const gmx_moltype_t& molt = mtop.moltype[molb.type];
/* Make a reverse ilist in which the interactions are linked
* to all atoms, not only the first atom as in gmx_reverse_top.
* The constraints are discarded here.
cgi_mb = &cginfo_mb[mb];
int mol;
- for (mol = 0; mol < (mtop->bIntermolecularInteractions ? molb.nmol : 1); mol++)
+ for (mol = 0; mol < (mtop.bIntermolecularInteractions ? molb.nmol : 1); mol++)
{
for (int a = 0; a < molt.atoms.nr; a++)
{
i += nral_rt(ftype);
}
- if (mtop->bIntermolecularInteractions)
+ if (mtop.bIntermolecularInteractions)
{
int i = ril_intermol.index[cg_gl];
while (i < ril_intermol.index[cg_gl + 1])
if (debug)
{
- fprintf(debug, "Of the %d atoms %d are linked via bonded interactions\n", mtop->natoms, ncgi);
+ fprintf(debug, "Of the %d atoms %d are linked via bonded interactions\n", mtop.natoms, ncgi);
}
return link;
}
if (bExcl)
{
- const t_blocka* excls = &molt->excls;
- for (int ai = 0; ai < excls->nr; ai++)
+ const auto& excls = molt->excls;
+ for (gmx::index ai = 0; ai < excls.ssize(); ai++)
{
- for (int j = excls->index[ai]; j < excls->index[ai + 1]; j++)
+ for (const int aj : excls[ai])
{
- int aj = excls->a[j];
if (ai != aj)
{
real rij2 = distance2(cg_cm[ai], cg_cm[aj]);
}
else
{
- if (d < ddbox->npbcdim && dd->nc[d] > 1)
+ if (d < ddbox->npbcdim && dd->numCells[d] > 1)
{
tric[d][i] = box[i][d] / box[i][i];
}
if (applyPbc)
{
/* Take the minimum to avoid double communication */
- numPulsesMin = std::min(numPulses, dd->nc[dim] - 1 - numPulses);
+ numPulsesMin = std::min(numPulses, dd->numCells[dim] - 1 - numPulses);
}
else
{
for (int pulse = 0; pulse < numPulses; pulse++)
{
/* Communicate all the zone information backward */
- bool receiveValidData = (applyPbc || dd->ci[dim] < dd->nc[dim] - 1);
+ bool receiveValidData = (applyPbc || dd->ci[dim] < dd->numCells[dim] - 1);
static_assert(
sizeof(gmx_ddzone_t) == c_ddzoneNumReals * sizeof(real),
*/
buf_s[i] = buf_r[i];
}
- if (((applyPbc || dd->ci[dim] + numPulses < dd->nc[dim]) && pulse == numPulses - 1)
- || (!applyPbc && dd->ci[dim] + 1 + pulse == dd->nc[dim] - 1))
+ if (((applyPbc || dd->ci[dim] + numPulses < dd->numCells[dim]) && pulse == numPulses - 1)
+ || (!applyPbc && dd->ci[dim] + 1 + pulse == dd->numCells[dim] - 1))
{
/* Store the extremes */
int pos = 0;
{
if (fr != nullptr)
{
- const cginfo_mb_t* cginfo_mb = fr->cginfo_mb;
- gmx::ArrayRef<int> cginfo = fr->cginfo;
+ gmx::ArrayRef<cginfo_mb_t> cginfo_mb = fr->cginfo_mb;
+ gmx::ArrayRef<int> cginfo = fr->cginfo;
for (int cg = cg0; cg < cg1; cg++)
{
dim = dd->dim[dim_ind];
/* Without PBC we don't have restrictions on the outer cells */
- if (!(dim >= ddbox->npbcdim && (dd->ci[dim] == 0 || dd->ci[dim] == dd->nc[dim] - 1))
+ if (!(dim >= ddbox->npbcdim && (dd->ci[dim] == 0 || dd->ci[dim] == dd->numCells[dim] - 1))
&& isDlbOn(comm)
&& (comm->cell_x1[dim] - comm->cell_x0[dim]) * ddbox->skew_fac[dim] < comm->cellsize_min[dim])
{
load->mdf = 0;
load->pme = 0;
int pos = 0;
- for (int i = 0; i < dd->nc[dim]; i++)
+ for (int i = 0; i < dd->numCells[dim]; i++)
{
load->sum += load->load[pos++];
load->max = std::max(load->max, load->load[pos]);
}
if (isDlbOn(comm) && rowMaster->dlbIsLimited)
{
- load->sum_m *= dd->nc[dim];
+ load->sum_m *= dd->numCells[dim];
load->flags |= (1 << d);
}
}
{
comm->load[d].sum_m = comm->load[d].sum;
- int nc = dd->nc[dd->dim[d]];
+ int nc = dd->numCells[dd->dim[d]];
for (int i = 0; i < nc; i++)
{
rowMaster->cellFrac[i] = i / static_cast<real>(nc);
const int* recv_i,
gmx::ArrayRef<gmx::RVec> x,
gmx::ArrayRef<const gmx::RVec> recv_vr,
- cginfo_mb_t* cginfo_mb,
+ gmx::ArrayRef<cginfo_mb_t> cginfo_mb,
gmx::ArrayRef<int> cginfo)
{
gmx_domdec_ind_t *ind, *ind_p;
gmx_domdec_comm_t* comm;
gmx_domdec_zones_t* zones;
gmx_domdec_comm_dim_t* cd;
- cginfo_mb_t* cginfo_mb;
gmx_bool bBondComm, bDist2B, bDistMB, bDistBonded;
dd_corners_t corners;
rvec * normal, *v_d, *v_0 = nullptr, *v_1 = nullptr;
v_1 = ddbox->v[dim1];
}
- zone_cg_range = zones->cg_range;
- cginfo_mb = fr->cginfo_mb;
+ zone_cg_range = zones->cg_range;
+ gmx::ArrayRef<cginfo_mb_t> cginfo_mb = fr->cginfo_mb;
zone_cg_range[0] = 0;
zone_cg_range[1] = dd->ncg_home;
flag |= DD_FLAG_BW(d);
if (firstMoveDimValue == -1)
{
- if (dd.nc[dim] > 2)
+ if (dd.numCells[dim] > 2)
{
firstMoveDimValue = d * 2 + 1;
}
/* Do pbc and check DD cell boundary crossings */
for (int d = DIM - 1; d >= 0; d--)
{
- if (dd->nc[d] > 1)
+ if (dd->numCells[d] > 1)
{
bool bScrew = (dd->unitCellInfo.haveScrewPBC && d == XX);
/* Determine the location of this cg in lattice coordinates */
cm_new, cm_new, pos_d);
}
dev[d] = 1;
- if (dd->ci[d] == dd->nc[d] - 1)
+ if (dd->ci[d] == dd->numCells[d] - 1)
{
rvec_dec(cm_new, state->box[d]);
if (bScrew)
/* Do pbc and check DD cell boundary crossings */
for (int d = DIM - 1; d >= 0; d--)
{
- if (dd->nc[d] > 1)
+ if (dd->numCells[d] > 1)
{
/* Determine the location of this COG in lattice coordinates */
real pos_d = cog[d];
cogOld, cog, pos_d);
}
dev[d] = 1;
- if (dd->ci[d] == dd->nc[d] - 1)
+ if (dd->ci[d] == dd->numCells[d] - 1)
{
rvec_dec(cog, state->box[d]);
}
{
cell_x0[d] = comm->cell_x0[d];
}
- if (d >= npbcdim && dd->ci[d] == dd->nc[d] - 1)
+ if (d >= npbcdim && dd->ci[d] == dd->numCells[d] - 1)
{
cell_x1[d] = GMX_FLOAT_MAX;
}
/* We reuse the intBuffer without reacquiring since we are in the same scope */
DDBufferAccess<int>& flagBuffer = moveBuffer;
- const cginfo_mb_t* cginfo_mb = fr->cginfo_mb;
+ gmx::ArrayRef<const cginfo_mb_t> cginfo_mb = fr->cginfo_mb;
/* Temporarily store atoms passed to our rank at the end of the range */
int home_pos_cg = dd->ncg_home;
const int dim = dd->dim[d];
int ncg_recv = 0;
int nvr = 0;
- for (int dir = 0; dir < (dd->nc[dim] == 2 ? 1 : 2); dir++)
+ for (int dir = 0; dir < (dd->numCells[dim] == 2 ? 1 : 2); dir++)
{
const int cdd = d * 2 + dir;
/* Communicate the cg and atom counts */
int flag = flagBuffer.buffer[cg * DD_CGIBS + 1];
const gmx::RVec& cog = rvecBuffer.buffer[buf_pos];
- if (dim >= npbcdim && dd->nc[dim] > 2)
+ if (dim >= npbcdim && dd->numCells[dim] > 2)
{
/* No pbc in this dim and more than one domain boundary.
* We do a separate check if a charge group didn't move too far.
* so we do not need to handle boundary crossings.
* This also means we do not have to handle PBC here.
*/
- if (!((dd->ci[dim2] == dd->nc[dim2] - 1 && (flag & DD_FLAG_FW(d2)))
+ if (!((dd->ci[dim2] == dd->numCells[dim2] - 1 && (flag & DD_FLAG_FW(d2)))
|| (dd->ci[dim2] == 0 && (flag & DD_FLAG_BW(d2)))))
{
/* Clear the two flags for this dimension */
* to an adjacent cell because of the
* staggering.
*/
- if (pos_d >= cell_x1[dim2] && dd->ci[dim2] != dd->nc[dim2] - 1)
+ if (pos_d >= cell_x1[dim2] && dd->ci[dim2] != dd->numCells[dim2] - 1)
{
flag |= DD_FLAG_FW(d2);
}
}
else if (flag & DD_FLAG_BW(d2))
{
- if (dd->nc[dd->dim[d2]] > 2)
+ if (dd->numCells[dd->dim[d2]] > 2)
{
mc = d2 * 2 + 1;
}
#include "gromacs/gpu_utils/hostallocator.h"
#include "gromacs/mdtypes/forcerec.h"
+#include "gromacs/utility/arrayref.h"
#include "domdec_internal.h"
void check_screw_box(const matrix box);
/*! \brief Return the charge group information flags for charge group cg */
-static inline int ddcginfo(const cginfo_mb_t* cginfo_mb, int cg)
+static inline int ddcginfo(gmx::ArrayRef<const cginfo_mb_t> cginfo_mb, int cg)
{
- while (cg >= cginfo_mb->cg_end)
+ size_t index = 0;
+ while (cg >= cginfo_mb[index].cg_end)
{
- cginfo_mb++;
+ index++;
}
+ const cginfo_mb_t& cgimb = cginfo_mb[index];
- return cginfo_mb->cginfo[(cg - cginfo_mb->cg_start) % cginfo_mb->cg_mod];
+ return cgimb.cginfo[(cg - cgimb.cg_start) % cgimb.cg_mod];
};
/*! \brief Returns the number of MD steps for which load has been recorded */
* send coordinates buffer address to PP rank
* \param[in] d_x coordinates buffer in GPU memory
*/
- void sendCoordinateBufferAddressToPpRanks(rvec* d_x);
+ void sendCoordinateBufferAddressToPpRanks(DeviceBuffer<float> d_x);
/*! \brief
};
/*!\brief Constructor stub. */
-PmeCoordinateReceiverGpu::PmeCoordinateReceiverGpu(void gmx_unused* pmeStream,
- MPI_Comm gmx_unused comm,
- gmx::ArrayRef<PpRanks> gmx_unused ppRanks) :
+PmeCoordinateReceiverGpu::PmeCoordinateReceiverGpu(void* /* pmeStream */,
+ MPI_Comm /* comm */,
+ gmx::ArrayRef<PpRanks> /* ppRanks */) :
impl_(nullptr)
{
GMX_ASSERT(false,
PmeCoordinateReceiverGpu::~PmeCoordinateReceiverGpu() = default;
/*!\brief init PME-PP GPU communication stub */
-void PmeCoordinateReceiverGpu::sendCoordinateBufferAddressToPpRanks(rvec gmx_unused* d_x)
+void PmeCoordinateReceiverGpu::sendCoordinateBufferAddressToPpRanks(const DeviceBuffer<float> /* d_x */)
{
GMX_ASSERT(false,
"A CPU stub for PME-PP GPU communication initialization was called instead of the "
"correct implementation.");
}
-void PmeCoordinateReceiverGpu::launchReceiveCoordinatesFromPpCudaDirect(int gmx_unused ppRank)
+void PmeCoordinateReceiverGpu::launchReceiveCoordinatesFromPpCudaDirect(int /* ppRank */)
{
GMX_ASSERT(false,
"A CPU stub for PME-PP GPU communication was called instead of the correct "
PmeCoordinateReceiverGpu::Impl::~Impl() = default;
-void PmeCoordinateReceiverGpu::Impl::sendCoordinateBufferAddressToPpRanks(rvec* d_x)
+void PmeCoordinateReceiverGpu::Impl::sendCoordinateBufferAddressToPpRanks(const DeviceBuffer<float> d_x)
{
int ind_start = 0;
PmeCoordinateReceiverGpu::~PmeCoordinateReceiverGpu() = default;
-void PmeCoordinateReceiverGpu::sendCoordinateBufferAddressToPpRanks(rvec* d_x)
+void PmeCoordinateReceiverGpu::sendCoordinateBufferAddressToPpRanks(const DeviceBuffer<float> d_x)
{
impl_->sendCoordinateBufferAddressToPpRanks(d_x);
}
* send coordinates buffer address to PP rank
* \param[in] d_x coordinates buffer in GPU memory
*/
- void sendCoordinateBufferAddressToPpRanks(rvec* d_x);
+ void sendCoordinateBufferAddressToPpRanks(const DeviceBuffer<float> d_x);
/*! \brief
* launch receive of coordinate data from PP rank
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2012-2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2012-2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
"but PME run mode is not PmeRunMode::GPU\n");
// This rank will have its data accessed directly by PP rank, so needs to send the remote addresses.
- rvec* d_x = nullptr;
- rvec* d_f = nullptr;
-# if (GMX_GPU == GMX_GPU_CUDA) // avoid invalid cast for OpenCL
- d_x = reinterpret_cast<rvec*>(pme_gpu_get_device_x(pme));
- d_f = reinterpret_cast<rvec*>(pme_gpu_get_device_f(pme));
-# endif
- pme_pp->pmeCoordinateReceiverGpu->sendCoordinateBufferAddressToPpRanks(d_x);
- pme_pp->pmeForceSenderGpu->sendForceBufferAddressToPpRanks(d_f);
+ pme_pp->pmeCoordinateReceiverGpu->sendCoordinateBufferAddressToPpRanks(
+ pme_gpu_get_device_x(pme));
+ pme_pp->pmeForceSenderGpu->sendForceBufferAddressToPpRanks(
+ reinterpret_cast<rvec*>(pme_gpu_get_device_f(pme)));
}
}
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
serializer->doIntArray(block->index, block->nr + 1);
}
-static void do_blocka(gmx::ISerializer* serializer, t_blocka* block)
+static void doListOfLists(gmx::ISerializer* serializer, gmx::ListOfLists<int>* listOfLists)
{
- serializer->doInt(&block->nr);
- serializer->doInt(&block->nra);
+ int numLists = listOfLists->ssize();
+ serializer->doInt(&numLists);
+ int numElements = listOfLists->elementsView().ssize();
+ serializer->doInt(&numElements);
if (serializer->reading())
{
- block->nalloc_index = block->nr + 1;
- snew(block->index, block->nalloc_index);
- block->nalloc_a = block->nra;
- snew(block->a, block->nalloc_a);
+ std::vector<int> listRanges(numLists + 1);
+ serializer->doIntArray(listRanges.data(), numLists + 1);
+ std::vector<int> elements(numElements);
+ serializer->doIntArray(elements.data(), numElements);
+ *listOfLists = gmx::ListOfLists<int>(std::move(listRanges), std::move(elements));
+ }
+ else
+ {
+ serializer->doIntArray(const_cast<int*>(listOfLists->listRangesView().data()), numLists + 1);
+ serializer->doIntArray(const_cast<int*>(listOfLists->elementsView().data()), numElements);
}
- serializer->doIntArray(block->index, block->nr + 1);
- serializer->doIntArray(block->a, block->nra);
}
/* This is a primitive routine to make it possible to translate atomic numbers
sfree(cgs.index);
/* This used to be in the atoms struct */
- do_blocka(serializer, &molt->excls);
+ doListOfLists(serializer, &molt->excls);
}
static void do_molblock(gmx::ISerializer* serializer, gmx_molblock_t* molb, int numAtomsPerMolecule)
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
const int* typeA = mdatoms->typeA;
const int* typeB = mdatoms->typeB;
const int ntype = fr->ntype;
- const real* nbfp = fr->nbfp;
+ const real* nbfp = fr->nbfp.data();
const real* nbfp_grid = fr->ljpme_c6grid;
real* Vv = kernel_data->energygrp_vdw;
const real lambda_coul = kernel_data->lambda[efptCOUL];
}
#endif
-static void nnb2excl(t_nextnb* nnb, t_blocka* excl)
+static void nnb2excl(t_nextnb* nnb, gmx::ListOfLists<int>* excls)
{
int i, j, j_index;
int nre, nrx, nrs, nr_of_sortables;
sortable* s;
- srenew(excl->index, nnb->nr + 1);
- excl->index[0] = 0;
+ excls->clear();
+
for (i = 0; (i < nnb->nr); i++)
{
/* calculate the total number of exclusions for atom i */
nr_of_sortables = j_index;
prints("after rm-double", j_index, s);
- /* make space for arrays */
- srenew(excl->a, excl->nra + nr_of_sortables);
-
/* put the sorted exclusions in the target list */
+ excls->pushBackListOfSize(nr_of_sortables);
+ gmx::ArrayRef<int> exclusionsForAtom = excls->back();
for (nrs = 0; (nrs < nr_of_sortables); nrs++)
{
- excl->a[excl->nra + nrs] = s[nrs].aj;
+ exclusionsForAtom[nrs] = s[nrs].aj;
}
- excl->nra += nr_of_sortables;
- excl->index[i + 1] = excl->nra;
/* cleanup temporary space */
sfree(s);
}
-void generate_excl(int nrexcl, int nratoms, gmx::ArrayRef<InteractionsOfType> plist, t_blocka* excl)
+void generate_excl(int nrexcl, int nratoms, gmx::ArrayRef<InteractionsOfType> plist, gmx::ListOfLists<int>* excls)
{
t_nextnb nnb;
if (nrexcl < 0)
}
init_nnb(&nnb, nratoms, nrexcl);
gen_nnb(&nnb, plist);
- excl->nr = nratoms;
sort_and_purge_nnb(&nnb);
- nnb2excl(&nnb, excl);
+ nnb2excl(&nnb, excls);
done_nnb(&nnb);
}
#include "gromacs/utility/arrayref.h"
-struct t_blocka;
struct InteractionsOfType;
+namespace gmx
+{
+template<typename>
+class ListOfLists;
+}
+
struct t_nextnb
{
int nr; /* nr atoms (0 <= i < nr) (atoms->nr) */
* initiated using init_nnb.
*/
-void generate_excl(int nrexcl, int nratoms, gmx::ArrayRef<InteractionsOfType> plist, t_blocka* excl);
+void generate_excl(int nrexcl, int nratoms, gmx::ArrayRef<InteractionsOfType> plist, gmx::ListOfLists<int>* excls);
/* Generate an exclusion block from bonds and constraints in
* plist.
*/
#include "gromacs/utility/futil.h"
#include "gromacs/utility/gmxassert.h"
#include "gromacs/utility/keyvaluetreebuilder.h"
+#include "gromacs/utility/listoflists.h"
#include "gromacs/utility/mdmodulenotification.h"
#include "gromacs/utility/smalloc.h"
#include "gromacs/utility/snprintf.h"
void MoleculeInformation::initMolInfo()
{
init_block(&mols);
- init_blocka(&excls);
+ excls.clear();
init_t_atoms(&atoms, 0, FALSE);
}
const t_atom* atom = molt.atoms.atom;
- t_blocka atomToConstraints =
+ const auto atomToConstraints =
gmx::make_at2con(molt, iparams, gmx::FlexibleConstraintTreatment::Exclude);
bool haveDecoupledMode = false;
int a1 = il.iatoms[1 + i + 1];
int a2 = il.iatoms[1 + i + 2];
if ((atom[a0].m > atom[a2].m * massFactorThreshold || atom[a2].m > atom[a0].m * massFactorThreshold)
- && atomToConstraints.index[a0 + 1] - atomToConstraints.index[a0] == 1
- && atomToConstraints.index[a2 + 1] - atomToConstraints.index[a2] == 1
- && atomToConstraints.index[a1 + 1] - atomToConstraints.index[a1] >= 3)
+ && atomToConstraints[a0].ssize() == 1 && atomToConstraints[a2].ssize() == 1
+ && atomToConstraints[a1].ssize() >= 3)
{
- int constraint0 = atomToConstraints.a[atomToConstraints.index[a0]];
- int constraint2 = atomToConstraints.a[atomToConstraints.index[a2]];
+ int constraint0 = atomToConstraints[a0][0];
+ int constraint2 = atomToConstraints[a2][0];
bool foundAtom0 = false;
bool foundAtom2 = false;
- for (int conIndex = atomToConstraints.index[a1];
- conIndex < atomToConstraints.index[a1 + 1]; conIndex++)
+ for (const int constraint : atomToConstraints[a1])
{
- if (atomToConstraints.a[conIndex] == constraint0)
+ if (constraint == constraint0)
{
foundAtom0 = true;
}
- if (atomToConstraints.a[conIndex] == constraint2)
+ if (constraint == constraint2)
{
foundAtom2 = true;
}
}
}
- done_blocka(&atomToConstraints);
-
return haveDecoupledMode;
}
#include "gromacs/utility/arrayref.h"
#include "gromacs/utility/basedefinitions.h"
#include "gromacs/utility/exceptions.h"
+#include "gromacs/utility/listoflists.h"
#include "gromacs/utility/real.h"
/*! \libinternal \brief
//! Molecules separated in datastructure.
t_block mols;
//! Exclusions in the molecule.
- t_blocka excls;
+ gmx::ListOfLists<int> excls;
//! Interactions of a defined type.
std::array<InteractionsOfType, F_NRE> interactions;
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013-2019, by the GROMACS development team, led by
+ * Copyright (c) 2013-2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/* Copy the exclusions to a new array, since this is the only
* thing that needs to be modified for QMMM.
*/
- copy_blocka(&sys->moltype[molb->type].excls, &sys->moltype.back().excls);
+ sys->moltype.back().excls = sys->moltype[molb->type].excls;
/* Set the molecule type for the QMMM molblock */
molb->type = sys->moltype.size() - 1;
}
static void generate_LJCpairsNB(MoleculeInformation* mol, int nb_funct, InteractionsOfType* nbp, warninp* wi)
{
- int n, ntype;
- t_atom* atom;
- t_blocka* excl;
- bool bExcl;
+ int n, ntype;
+ t_atom* atom;
n = mol->atoms.nr;
atom = mol->atoms.atom;
"Number of pairs of generated non-bonded parameters should be a perfect square");
/* Add a pair interaction for all non-excluded atom pairs */
- excl = &mol->excls;
+ const auto& excls = mol->excls;
for (int i = 0; i < n; i++)
{
for (int j = i + 1; j < n; j++)
{
- bExcl = FALSE;
- for (int k = excl->index[i]; k < excl->index[i + 1]; k++)
+ bool pairIsExcluded = false;
+ for (const int atomK : excls[i])
{
- if (excl->a[k] == j)
+ if (atomK == j)
{
- bExcl = TRUE;
+ pairIsExcluded = true;
}
}
- if (!bExcl)
+ if (!pairIsExcluded)
{
if (nb_funct != F_LJ)
{
}
}
-static void set_excl_all(t_blocka* excl)
+static void set_excl_all(gmx::ListOfLists<int>* excl)
{
- int nat, i, j, k;
-
/* Get rid of the current exclusions and exclude all atom pairs */
- nat = excl->nr;
- excl->nra = nat * nat;
- srenew(excl->a, excl->nra);
- k = 0;
- for (i = 0; i < nat; i++)
+ const int numAtoms = excl->ssize();
+ std::vector<int> exclusionsForAtom(numAtoms);
+ for (int i = 0; i < numAtoms; i++)
{
- excl->index[i] = k;
- for (j = 0; j < nat; j++)
- {
- excl->a[k++] = j;
- }
+ exclusionsForAtom[i] = i;
+ }
+ excl->clear();
+ for (int i = 0; i < numAtoms; i++)
+ {
+ excl->pushBack(exclusionsForAtom);
}
- excl->index[nat] = k;
}
static void decouple_atoms(t_atoms* atoms,
const t_ilist& ilist = idef.il[fType];
- return (idef.ilsort != ilsortNO_FE && ilist.nr_nonperturbed != ilist.nr);
+ return (idef.ilsort != ilsortNO_FE && idef.numNonperturbedInteractions[fType] != ilist.nr);
}
//! Converts \p src with atom indices in state order to \p dest in nbnxn order
namespace
{
+using gmx::ArrayRef;
+
/*! \brief Return true if ftype is an explicit pair-listed LJ or
* COULOMB interaction type: bonded LJ (usually 1-4), or special
* listed non-bonded for FEP. */
real calc_one_bond(int thread,
int ftype,
const t_idef* idef,
+ ArrayRef<const int> iatoms,
+ const int numNonperturbedInteractions,
const WorkDivision& workDivision,
const rvec x[],
rvec4 f[],
"The topology should be marked either as no FE or sorted on FE");
const bool havePerturbedInteractions =
- (idef->ilsort == ilsortFE_SORTED && idef->il[ftype].nr_nonperturbed < idef->il[ftype].nr);
+ (idef->ilsort == ilsortFE_SORTED && numNonperturbedInteractions < iatoms.ssize());
BondedKernelFlavor flavor =
selectBondedKernelFlavor(stepWork, fr->use_simd_kernels, havePerturbedInteractions);
int efptFTYPE;
efptFTYPE = efptBONDED;
}
- const int nat1 = interaction_function[ftype].nratoms + 1;
- const int nbonds = idef->il[ftype].nr / nat1;
- const t_iatom* iatoms = idef->il[ftype].iatoms;
+ const int nat1 = interaction_function[ftype].nratoms + 1;
+ const int nbonds = iatoms.ssize() / nat1;
- GMX_ASSERT(fr->gpuBonded != nullptr || workDivision.end(ftype) == idef->il[ftype].nr,
+ GMX_ASSERT(fr->gpuBonded != nullptr || workDivision.end(ftype) == iatoms.ssize(),
"The thread division should match the topology");
const int nb0 = workDivision.bound(ftype, thread);
nice to account to its own subtimer, but first
wallcycle needs to be extended to support calling from
multiple threads. */
- v = cmap_dihs(nbn, iatoms + nb0, idef->iparams, idef->cmap_grid, x, f, fshift, pbc, g,
- lambda[efptFTYPE], &(dvdl[efptFTYPE]), md, fcd, global_atom_index);
+ v = cmap_dihs(nbn, iatoms.data() + nb0, idef->iparams, idef->cmap_grid, x, f, fshift,
+ pbc, g, lambda[efptFTYPE], &(dvdl[efptFTYPE]), md, fcd, global_atom_index);
}
else
{
- v = calculateSimpleBond(ftype, nbn, iatoms + nb0, idef->iparams, x, f, fshift, pbc, g,
- lambda[efptFTYPE], &(dvdl[efptFTYPE]), md, fcd,
+ v = calculateSimpleBond(ftype, nbn, iatoms.data() + nb0, idef->iparams, x, f, fshift,
+ pbc, g, lambda[efptFTYPE], &(dvdl[efptFTYPE]), md, fcd,
global_atom_index, flavor);
}
}
/* TODO The execution time for pairs might be nice to account
to its own subtimer, but first wallcycle needs to be
extended to support calling from multiple threads. */
- do_pairs(ftype, nbn, iatoms + nb0, idef->iparams, x, f, fshift, pbc, g, lambda, dvdl, md,
- fr, havePerturbedInteractions, stepWork, grpp, global_atom_index);
+ do_pairs(ftype, nbn, iatoms.data() + nb0, idef->iparams, x, f, fshift, pbc, g, lambda, dvdl,
+ md, fr, havePerturbedInteractions, stepWork, grpp, global_atom_index);
}
if (thread == 0)
/* Loop over all bonded force types to calculate the bonded forces */
for (ftype = 0; (ftype < F_NRE); ftype++)
{
- if (idef->il[ftype].nr > 0 && ftype_is_bonded_potential(ftype))
+ const t_ilist& ilist = idef->il[ftype];
+ if (ilist.nr > 0 && ftype_is_bonded_potential(ftype))
{
- v = calc_one_bond(thread, ftype, idef, fr->bondedThreading->workDivision, x, ft,
- fshift, fr, pbc_null, g, grpp, nrnb, lambda, dvdlt, md, fcd,
- stepWork, global_atom_index);
+ ArrayRef<const int> iatoms = gmx::constArrayRefFromArray(ilist.iatoms, ilist.nr);
+ v = calc_one_bond(
+ thread, ftype, idef, iatoms, idef->numNonperturbedInteractions[ftype],
+ fr->bondedThreading->workDivision, x, ft, fshift, fr, pbc_null, g, grpp,
+ nrnb, lambda, dvdlt, md, fcd, stepWork, global_atom_index);
epot[ftype] += v;
}
}
if (ftype_is_bonded_potential(ftype))
{
const t_ilist& ilist = idef->il[ftype];
- /* Create a temporary t_ilist with only perturbed interactions */
- t_ilist& ilist_fe = idef_fe.il[ftype];
- ilist_fe.iatoms = ilist.iatoms + ilist.nr_nonperturbed;
- ilist_fe.nr_nonperturbed = 0;
- ilist_fe.nr = ilist.nr - ilist.nr_nonperturbed;
+ /* Create a temporary iatom list with only perturbed interactions */
+ const int numNonperturbed = idef->numNonperturbedInteractions[ftype];
+ ArrayRef<const int> iatoms = gmx::constArrayRefFromArray(ilist.iatoms + numNonperturbed,
+ ilist.nr - numNonperturbed);
+ t_ilist& ilist_fe = idef_fe.il[ftype];
/* Set the work range of thread 0 to the perturbed bondeds */
workDivision.setBound(ftype, 0, 0);
- workDivision.setBound(ftype, 1, ilist_fe.nr);
+ workDivision.setBound(ftype, 1, iatoms.ssize());
if (ilist_fe.nr > 0)
{
gmx::StepWorkload tempFlags;
tempFlags.computeEnergy = true;
- v = calc_one_bond(0, ftype, &idef_fe, workDivision, x, f, fshift, fr, pbc_null, g,
- grpp, nrnb, lambda, dvdl_dum, md, fcd, tempFlags, global_atom_index);
+ v = calc_one_bond(0, ftype, idef, iatoms, iatoms.ssize(), workDivision, x, f,
+ fshift, fr, pbc_null, g, grpp, nrnb, lambda, dvdl_dum, md, fcd,
+ tempFlags, global_atom_index);
epot[ftype] += v;
}
}
const t_ilist& ilist = idef.il[ftype];
- return (idef.ilsort != ilsortNO_FE && ilist.nr_nonperturbed != ilist.nr);
+ return (idef.ilsort != ilsortNO_FE && idef.numNonperturbedInteractions[ftype] != ilist.nr);
}
//! Divides bonded interactions over threads and GPU
#include "gromacs/pbcutil/pbc.h"
#include "gromacs/pulling/pull.h"
#include "gromacs/timing/wallcycle.h"
-#include "gromacs/topology/block.h"
#include "gromacs/topology/ifunc.h"
#include "gromacs/topology/mtop_lookup.h"
#include "gromacs/topology/mtop_util.h"
#include "gromacs/utility/exceptions.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxassert.h"
+#include "gromacs/utility/listoflists.h"
#include "gromacs/utility/pleasecite.h"
#include "gromacs/utility/smalloc.h"
#include "gromacs/utility/txtdump.h"
//! The number of flexible constraints.
int nflexcon = 0;
//! A list of atoms to constraints for each moleculetype.
- std::vector<t_blocka> at2con_mt;
+ std::vector<ListOfLists<int>> at2con_mt;
//! A list of atoms to settles for each moleculetype
std::vector<std::vector<int>> at2settle_mt;
//! LINCS data.
* by the constraint coordinate communication routine,
* so that here we can use normal pbc.
*/
- pbc_null = set_pbc_dd(&pbc, ir.ePBC, DOMAINDECOMP(cr) ? cr->dd->nc : nullptr, FALSE, box);
+ pbc_null = set_pbc_dd(&pbc, ir.ePBC, DOMAINDECOMP(cr) ? cr->dd->numCells : nullptr, FALSE, box);
}
else
{
* \param[in] numAtoms The number of atoms to construct the list for
* \param[in] ilists The interaction lists, size F_NRE
* \param[in] iparams Interaction parameters, can be null when
- * flexibleConstraintTreatment=Include \param[in] flexibleConstraintTreatment The flexible
- * constraint treatment, see enum above \returns a block struct with all constraints for each atom
+ * \p flexibleConstraintTreatment==Include
+ * \param[in] flexibleConstraintTreatment The flexible constraint treatment,
+ * see enum above
+ *
+ * \returns a block struct with all constraints for each atom
*/
template<typename T>
-static t_blocka makeAtomsToConstraintsList(int numAtoms,
- const T* ilists,
- const t_iparams* iparams,
- FlexibleConstraintTreatment flexibleConstraintTreatment)
+static ListOfLists<int> makeAtomsToConstraintsList(int numAtoms,
+ const T* ilists,
+ const t_iparams* iparams,
+ FlexibleConstraintTreatment flexibleConstraintTreatment)
{
GMX_ASSERT(flexibleConstraintTreatment == FlexibleConstraintTreatment::Include || iparams != nullptr,
"With flexible constraint detection we need valid iparams");
}
}
- t_blocka at2con;
- at2con.nr = numAtoms;
- at2con.nalloc_index = at2con.nr + 1;
- snew(at2con.index, at2con.nalloc_index);
- at2con.index[0] = 0;
+ std::vector<int> listRanges(numAtoms + 1);
for (int a = 0; a < numAtoms; a++)
{
- at2con.index[a + 1] = at2con.index[a] + count[a];
- count[a] = 0;
+ listRanges[a + 1] = listRanges[a] + count[a];
+ count[a] = 0;
}
- at2con.nra = at2con.index[at2con.nr];
- at2con.nalloc_a = at2con.nra;
- snew(at2con.a, at2con.nalloc_a);
+ std::vector<int> elements(listRanges[numAtoms]);
/* The F_CONSTRNC constraints have constraint numbers
* that continue after the last F_CONSTR constraint.
{
for (int j = 1; j < 3; j++)
{
- int a = ilist.iatoms[i + j];
- at2con.a[at2con.index[a] + count[a]++] = numConstraints;
+ const int a = ilist.iatoms[i + j];
+ elements[listRanges[a] + count[a]++] = numConstraints;
}
}
numConstraints++;
}
}
- return at2con;
+ return ListOfLists<int>(std::move(listRanges), std::move(elements));
}
-t_blocka make_at2con(int numAtoms,
- const t_ilist* ilist,
- const t_iparams* iparams,
- FlexibleConstraintTreatment flexibleConstraintTreatment)
+ListOfLists<int> make_at2con(int numAtoms,
+ const t_ilist* ilist,
+ const t_iparams* iparams,
+ FlexibleConstraintTreatment flexibleConstraintTreatment)
{
return makeAtomsToConstraintsList(numAtoms, ilist, iparams, flexibleConstraintTreatment);
}
-t_blocka make_at2con(const gmx_moltype_t& moltype,
- gmx::ArrayRef<const t_iparams> iparams,
- FlexibleConstraintTreatment flexibleConstraintTreatment)
+ListOfLists<int> make_at2con(const gmx_moltype_t& moltype,
+ gmx::ArrayRef<const t_iparams> iparams,
+ FlexibleConstraintTreatment flexibleConstraintTreatment)
{
return makeAtomsToConstraintsList(moltype.atoms.nr, moltype.ilist.data(), iparams.data(),
flexibleConstraintTreatment);
* indices to constraint indices.
*
* Note that flexible constraints are only enabled with a dynamical integrator. */
-static std::vector<t_blocka> makeAtomToConstraintMappings(const gmx_mtop_t& mtop,
- FlexibleConstraintTreatment flexibleConstraintTreatment)
+static std::vector<ListOfLists<int>> makeAtomToConstraintMappings(const gmx_mtop_t& mtop,
+ FlexibleConstraintTreatment flexibleConstraintTreatment)
{
- std::vector<t_blocka> mapping;
+ std::vector<ListOfLists<int>> mapping;
mapping.reserve(mtop.moltype.size());
for (const gmx_moltype_t& moltype : mtop.moltype)
{
Constraints::Impl::~Impl()
{
- for (auto blocka : at2con_mt)
- {
- done_blocka(&blocka);
- }
if (bSettleErrorHasOccurred != nullptr)
{
sfree(bSettleErrorHasOccurred);
impl_->ed = ed;
}
-ArrayRef<const t_blocka> Constraints::atom2constraints_moltype() const
+ArrayRef<const ListOfLists<int>> Constraints::atom2constraints_moltype() const
{
return impl_->at2con_mt;
}
struct gmx_multisim_t;
struct gmx_wallcycle;
struct pull_t;
-struct t_blocka;
struct t_commrec;
struct t_ilist;
struct t_inputrec;
{
template<typename T>
class ArrayRefWithPadding;
+template<typename>
+class ListOfLists;
//! Describes supported flavours of constrained updates.
enum class ConstraintVariable : int
//! Links the essentialdynamics and constraint code.
void saveEdsamPointer(gmx_edsam* ed);
//! Getter for use by domain decomposition.
- ArrayRef<const t_blocka> atom2constraints_moltype() const;
+ ArrayRef<const ListOfLists<int>> atom2constraints_moltype() const;
//! Getter for use by domain decomposition.
ArrayRef<const std::vector<int>> atom2settle_moltype() const;
/*! \brief Returns the flexible constraint treatment depending on whether the integrator is dynamic */
FlexibleConstraintTreatment flexibleConstraintTreatment(bool haveDynamicsIntegrator);
-/*! \brief Returns a block struct to go from atoms to constraints
+/*! \brief Returns a ListOfLists object to go from atoms to constraints
*
- * The block struct will contain constraint indices with lower indices
+ * The object will contain constraint indices with lower indices
* directly matching the order in F_CONSTR and higher indices matching
* the order in F_CONSTRNC offset by the number of constraints in F_CONSTR.
*
* \param[in] moltype The molecule data
* \param[in] iparams Interaction parameters, can be null when
- * flexibleConstraintTreatment=Include \param[in] flexibleConstraintTreatment The flexible
- * constraint treatment, see enum above \returns a block struct with all constraints for each atom
+ * \p flexibleConstraintTreatment==Include
+ * \param[in] flexibleConstraintTreatment The flexible constraint treatment,
+ * see enum above
+ *
+ * \returns a ListOfLists object with all constraints for each atom
*/
-t_blocka make_at2con(const gmx_moltype_t& moltype,
- gmx::ArrayRef<const t_iparams> iparams,
- FlexibleConstraintTreatment flexibleConstraintTreatment);
+ListOfLists<int> make_at2con(const gmx_moltype_t& moltype,
+ gmx::ArrayRef<const t_iparams> iparams,
+ FlexibleConstraintTreatment flexibleConstraintTreatment);
-/*! \brief Returns a block struct to go from atoms to constraints
+/*! \brief Returns a ListOfLists object to go from atoms to constraints
*
- * The block struct will contain constraint indices with lower indices
+ * The object will contain constraint indices with lower indices
* directly matching the order in F_CONSTR and higher indices matching
* the order in F_CONSTRNC offset by the number of constraints in F_CONSTR.
*
* \param[in] numAtoms The number of atoms to construct the list for
* \param[in] ilist Interaction list, size F_NRE
* \param[in] iparams Interaction parameters, can be null when
- * flexibleConstraintTreatment=Include \param[in] flexibleConstraintTreatment The flexible
- * constraint treatment, see enum above \returns a block struct with all constraints for each atom
+ * \p flexibleConstraintTreatment==Include
+ * \param[in] flexibleConstraintTreatment The flexible constraint treatment,
+ * see enum above
+ *
+ * \returns a ListOfLists object with all constraints for each atom
*/
-t_blocka make_at2con(int numAtoms,
- const t_ilist* ilist,
- const t_iparams* iparams,
- FlexibleConstraintTreatment flexibleConstraintTreatment);
-
-/*! \brief Returns an array of atom to constraints lists for the moltypes */
-const t_blocka* atom2constraints_moltype(const Constraints* constr);
+ListOfLists<int> make_at2con(int numAtoms,
+ const t_ilist* ilist,
+ const t_iparams* iparams,
+ FlexibleConstraintTreatment flexibleConstraintTreatment);
//! Return the number of flexible constraints in the \c ilist and \c iparams.
int countFlexibleConstraints(const t_ilist* ilist, const t_iparams* iparams);
#include "gromacs/mdlib/constr.h"
#include "gromacs/mdtypes/inputrec.h"
-#include "gromacs/topology/block.h"
#include "gromacs/topology/mtop_util.h"
#include "gromacs/utility/basedefinitions.h"
#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/listoflists.h"
#include "gromacs/utility/logger.h"
#include "gromacs/utility/real.h"
-#include "gromacs/utility/smalloc.h"
namespace gmx
{
//! Recursing function to help find all adjacent constraints.
-static void constr_recur(const t_blocka* at2con,
+static void constr_recur(const ListOfLists<int>& at2con,
const InteractionLists& ilist,
gmx::ArrayRef<const t_iparams> iparams,
gmx_bool bTopB,
int at,
int depth,
int nc,
- int* path,
+ ArrayRef<int> path,
real r0,
real r1,
real* r2max,
int* count)
{
- int c, con, a1;
gmx_bool bUse;
real len, rn0, rn1;
gmx::ArrayRef<const int> ia2 = ilist[F_CONSTRNC].iatoms;
/* Loop over all constraints connected to this atom */
- for (c = at2con->index[at]; c < at2con->index[at + 1]; c++)
+ for (const int con : at2con[at])
{
- con = at2con->a[c];
/* Do not walk over already used constraints */
bUse = TRUE;
- for (a1 = 0; a1 < depth; a1++)
+ for (int a1 = 0; a1 < depth; a1++)
{
if (con == path[a1])
{
fprintf(debug,
"Found longer constraint distance: r0 %5.3f r1 %5.3f rmax %5.3f\n", rn0,
rn1, sqrt(*r2max));
- for (a1 = 0; a1 < depth; a1++)
+ for (int a1 = 0; a1 < depth; a1++)
{
fprintf(debug, " %d %5.3f", path[a1],
iparams[constr_iatomptr(ia1, ia2, con)[0]].constr.dA);
*/
if (depth + 1 < nc && *count < 1000 * nc)
{
+ int a1;
if (ia[1] == at)
{
a1 = ia[2];
gmx::ArrayRef<const t_iparams> iparams,
const t_inputrec* ir)
{
- int natoms, *path, at, count;
+ int natoms, at, count;
- t_blocka at2con;
- real r0, r1, r2maxA, r2maxB, rmax, lam0, lam1;
+ real r0, r1, r2maxA, r2maxB, rmax, lam0, lam1;
if (molt->ilist[F_CONSTR].size() == 0 && molt->ilist[F_CONSTRNC].size() == 0)
{
natoms = molt->atoms.nr;
- at2con = make_at2con(*molt, iparams, flexibleConstraintTreatment(EI_DYNAMICS(ir->eI)));
- snew(path, 1 + ir->nProjOrder);
+ const ListOfLists<int> at2con =
+ make_at2con(*molt, iparams, flexibleConstraintTreatment(EI_DYNAMICS(ir->eI)));
+ std::vector<int> path(1 + ir->nProjOrder);
for (at = 0; at < 1 + ir->nProjOrder; at++)
{
path[at] = -1;
r1 = 0;
count = 0;
- constr_recur(&at2con, molt->ilist, iparams, FALSE, at, 0, 1 + ir->nProjOrder, path, r0, r1,
+ constr_recur(at2con, molt->ilist, iparams, FALSE, at, 0, 1 + ir->nProjOrder, path, r0, r1,
&r2maxA, &count);
}
if (ir->efep == efepNO)
r0 = 0;
r1 = 0;
count = 0;
- constr_recur(&at2con, molt->ilist, iparams, TRUE, at, 0, 1 + ir->nProjOrder, path, r0,
+ constr_recur(at2con, molt->ilist, iparams, TRUE, at, 0, 1 + ir->nProjOrder, path, r0,
r1, &r2maxB, &count);
}
lam0 = ir->fepvals->init_lambda;
}
}
- done_blocka(&at2con);
- sfree(path);
-
return rmax;
}
*/
for (const gmx_molblock_t& molb : mtop.molblock)
{
- const int nmol = molb.nmol;
- const t_atoms* atoms = &mtop.moltype[molb.type].atoms;
- const t_blocka* excl = &mtop.moltype[molb.type].excls;
+ const int nmol = molb.nmol;
+ const t_atoms* atoms = &mtop.moltype[molb.type].atoms;
+ const auto& excl = mtop.moltype[molb.type].excls;
for (int i = 0; (i < atoms->nr); i++)
{
const int tpi = atomtypeAOrB(atoms->atom[i], q);
- const int j1 = excl->index[i];
- const int j2 = excl->index[i + 1];
- for (int j = j1; j < j2; j++)
+ for (const int k : excl[i])
{
- const int k = excl->a[j];
if (k > i)
{
const int tpj = atomtypeAOrB(atoms->atom[k], q);
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/* do QMMM first if requested */
if (fr->bQMMM)
{
- enerd->term[F_EQM] = calculate_QMMM(cr, &forceOutputs->forceWithShiftForces(), fr);
+ enerd->term[F_EQM] = calculate_QMMM(cr, &forceOutputs->forceWithShiftForces(), fr->qr);
}
/* Call the short range functions all in one go. */
/* Since all atoms are in the rectangular or triclinic unit-cell,
* only single box vector shifts (2 in x) are required.
*/
- set_pbc_dd(&pbc, fr->ePBC, DOMAINDECOMP(cr) ? cr->dd->nc : nullptr, TRUE, box);
+ set_pbc_dd(&pbc, fr->ePBC, DOMAINDECOMP(cr) ? cr->dd->numCells : nullptr, TRUE, box);
}
do_force_listed(wcycle, box, ir->fepvals, cr, ms, idef, x, hist, forceOutputs, fr, &pbc,
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013-2019, by the GROMACS development team, led by
+ * Copyright (c) 2013-2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
static const bool c_enableGpuPmePpComms =
(getenv("GMX_GPU_PME_PP_COMMS") != nullptr) && GMX_THREAD_MPI && (GMX_GPU == GMX_GPU_CUDA);
-static real* mk_nbfp(const gmx_ffparams_t* idef, gmx_bool bBHAM)
+static std::vector<real> mk_nbfp(const gmx_ffparams_t* idef, gmx_bool bBHAM)
{
- real* nbfp;
- int i, j, k, atnr;
+ std::vector<real> nbfp;
+ int atnr;
atnr = idef->atnr;
if (bBHAM)
{
- snew(nbfp, 3 * atnr * atnr);
- for (i = k = 0; (i < atnr); i++)
+ nbfp.resize(3 * atnr * atnr);
+ int k = 0;
+ for (int i = 0; (i < atnr); i++)
{
- for (j = 0; (j < atnr); j++, k++)
+ for (int j = 0; (j < atnr); j++, k++)
{
BHAMA(nbfp, atnr, i, j) = idef->iparams[k].bham.a;
BHAMB(nbfp, atnr, i, j) = idef->iparams[k].bham.b;
}
else
{
- snew(nbfp, 2 * atnr * atnr);
- for (i = k = 0; (i < atnr); i++)
+ nbfp.resize(2 * atnr * atnr);
+ int k = 0;
+ for (int i = 0; (i < atnr); i++)
{
- for (j = 0; (j < atnr); j++, k++)
+ for (int j = 0; (j < atnr); j++, k++)
{
/* nbfp now includes the 6.0/12.0 derivative prefactors */
C6(nbfp, atnr, i, j) = idef->iparams[k].lj.c6 * 6.0;
acSETTLE
};
-static cginfo_mb_t* init_cginfo_mb(const gmx_mtop_t* mtop, const t_forcerec* fr, gmx_bool* bFEP_NonBonded)
+static std::vector<cginfo_mb_t> init_cginfo_mb(const gmx_mtop_t* mtop, const t_forcerec* fr, gmx_bool* bFEP_NonBonded)
{
- cginfo_mb_t* cginfo_mb;
- gmx_bool* type_VDW;
- int* cginfo;
- int* a_con;
-
- snew(cginfo_mb, mtop->molblock.size());
+ gmx_bool* type_VDW;
+ int* a_con;
snew(type_VDW, fr->ntype);
for (int ai = 0; ai < fr->ntype; ai++)
*bFEP_NonBonded = FALSE;
- int a_offset = 0;
+ std::vector<cginfo_mb_t> cginfoPerMolblock;
+ int a_offset = 0;
for (size_t mb = 0; mb < mtop->molblock.size(); mb++)
{
const gmx_molblock_t& molb = mtop->molblock[mb];
const gmx_moltype_t& molt = mtop->moltype[molb.type];
- const t_blocka& excl = molt.excls;
+ const auto& excl = molt.excls;
/* Check if the cginfo is identical for all molecules in this block.
* If so, we only need an array of the size of one molecule.
}
}
- cginfo_mb[mb].cg_start = a_offset;
- cginfo_mb[mb].cg_end = a_offset + molb.nmol * molt.atoms.nr;
- cginfo_mb[mb].cg_mod = (bId ? 1 : molb.nmol) * molt.atoms.nr;
- snew(cginfo_mb[mb].cginfo, cginfo_mb[mb].cg_mod);
- cginfo = cginfo_mb[mb].cginfo;
+ cginfo_mb_t cginfo_mb;
+ cginfo_mb.cg_start = a_offset;
+ cginfo_mb.cg_end = a_offset + molb.nmol * molt.atoms.nr;
+ cginfo_mb.cg_mod = (bId ? 1 : molb.nmol) * molt.atoms.nr;
+ cginfo_mb.cginfo.resize(cginfo_mb.cg_mod);
+ gmx::ArrayRef<int> cginfo = cginfo_mb.cginfo;
/* Set constraints flags for constrained atoms */
snew(a_con, molt.atoms.nr);
bool haveExclusions = false;
/* Loop over all the exclusions of atom ai */
- for (int j = excl.index[a]; j < excl.index[a + 1]; j++)
+ for (const int j : excl[a])
{
- if (excl.a[j] != a)
+ if (j != a)
{
haveExclusions = true;
break;
sfree(a_con);
+ cginfoPerMolblock.push_back(cginfo_mb);
+
a_offset += molb.nmol * molt.atoms.nr;
}
sfree(type_VDW);
- return cginfo_mb;
+ return cginfoPerMolblock;
}
-static std::vector<int> cginfo_expand(const int nmb, const cginfo_mb_t* cgi_mb)
+static std::vector<int> cginfo_expand(const int nmb, gmx::ArrayRef<const cginfo_mb_t> cgi_mb)
{
const int ncg = cgi_mb[nmb - 1].cg_end;
return cginfo;
}
-static void done_cginfo_mb(cginfo_mb_t* cginfo_mb, int numMolBlocks)
-{
- if (cginfo_mb == nullptr)
- {
- return;
- }
- for (int mb = 0; mb < numMolBlocks; ++mb)
- {
- sfree(cginfo_mb[mb].cginfo);
- }
- sfree(cginfo_mb);
-}
-
/* Sets the sum of charges (squared) and C6 in the system in fr.
* Returns whether the system has a net charge.
*/
fr->shiftForces.resize(SHIFTS);
- if (fr->nbfp == nullptr)
+ if (fr->nbfp.empty())
{
fr->ntype = mtop->ffparams.atnr;
fr->nbfp = mk_nbfp(&mtop->ffparams, fr->bBHAM);
if (ir->eDispCorr != edispcNO)
{
fr->dispersionCorrection = std::make_unique<DispersionCorrection>(
- *mtop, *ir, fr->bBHAM, fr->ntype,
- gmx::arrayRefFromArray(fr->nbfp, fr->ntype * fr->ntype * 2), *fr->ic, tabfn);
+ *mtop, *ir, fr->bBHAM, fr->ntype, fr->nbfp, *fr->ic, tabfn);
fr->dispersionCorrection->print(mdlog);
}
t_forcerec::t_forcerec() = default;
-t_forcerec::~t_forcerec() = default;
-
-/* Frees GPU memory and sets a tMPI node barrier.
- *
- * Note that this function needs to be called even if GPUs are not used
- * in this run because the PME ranks have no knowledge of whether GPUs
- * are used or not, but all ranks need to enter the barrier below.
- * \todo Remove physical node barrier from this function after making sure
- * that it's not needed anymore (with a shared GPU run).
- */
-void free_gpu_resources(t_forcerec* fr,
- const gmx::PhysicalNodeCommunicator& physicalNodeCommunicator,
- const gmx_gpu_info_t& gpu_info)
+t_forcerec::~t_forcerec()
{
- bool isPPrankUsingGPU = (fr != nullptr) && (fr->nbv != nullptr) && fr->nbv->useGpu();
-
- /* stop the GPU profiler (only CUDA) */
- if (gpu_info.n_dev > 0)
- {
- stopGpuProfiler();
- }
-
- if (isPPrankUsingGPU)
- {
- /* Free data in GPU memory and pinned memory before destroying the GPU context */
- fr->nbv.reset();
-
- delete fr->gpuBonded;
- fr->gpuBonded = nullptr;
- }
-
- /* With tMPI we need to wait for all ranks to finish deallocation before
- * destroying the CUDA context in free_gpu() as some tMPI ranks may be sharing
- * GPU and context.
- *
- * This is not a concern in OpenCL where we use one context per rank which
- * is freed in nbnxn_gpu_free().
- *
- * Note: it is safe to not call the barrier on the ranks which do not use GPU,
- * but it is easier and more futureproof to call it on the whole node.
- */
- if (GMX_THREAD_MPI)
- {
- physicalNodeCommunicator.barrier();
- }
-}
-
-void done_forcerec(t_forcerec* fr, int numMolBlocks)
-{
- if (fr == nullptr)
- {
- // PME-only ranks don't have a forcerec
- return;
- }
- done_cginfo_mb(fr->cginfo_mb, numMolBlocks);
- sfree(fr->nbfp);
- delete fr->ic;
- sfree(fr->shift_vec);
- sfree(fr->ewc_t);
- tear_down_bonded_threading(fr->bondedThreading);
- GMX_RELEASE_ASSERT(fr->gpuBonded == nullptr, "Should have been deleted earlier, when used");
- fr->bondedThreading = nullptr;
- delete fr;
+ /* Note: This code will disappear when types are converted to C++ */
+ sfree(shift_vec);
+ sfree(ewc_t);
+ tear_down_bonded_threading(bondedThreading);
}
class PhysicalNodeCommunicator;
} // namespace gmx
-//! Destroy a forcerec.
-void done_forcerec(t_forcerec* fr, int numMolBlocks);
-
/*! \brief Print the contents of the forcerec to a file
*
* \param[in] fplog The log file to print to
*/
void forcerec_set_excl_load(t_forcerec* fr, const gmx_localtop_t* top);
-void free_gpu_resources(t_forcerec* fr,
- const gmx::PhysicalNodeCommunicator& physicalNodeCommunicator,
- const gmx_gpu_info_t& gpu_info);
-
#endif
freeDeviceBuffer(&d_inverseMasses_);
}
-void LeapFrogCuda::setPbc(const t_pbc* pbc)
-{
- setPbcAiuc(pbc->ndim_ePBC, pbc->box, &pbcAiuc_);
-}
-
void LeapFrogCuda::set(const t_mdatoms& md, const int numTempScaleValues, const unsigned short* tempScaleGroups)
{
numAtoms_ = md.nr;
LeapFrogCuda(CommandStream commandStream);
~LeapFrogCuda();
- /*! \brief
- * Update PBC data.
- *
- * Converts PBC data from t_pbc into the PbcAiuc format and stores the latter.
- *
- * \param[in] pbc The PBC data in t_pbc format.
- */
- void setPbc(const t_pbc* pbc);
-
/*! \brief Integrate
*
* Integrates the equation of motion using Leap-Frog algorithm.
CommandStream commandStream_;
//! CUDA kernel launch config
KernelLaunchConfig kernelLaunchConfig_;
- //! Periodic boundary data
- PbcAiuc pbcAiuc_;
//! Number of atoms
int numAtoms_;
#include "gromacs/simd/simd.h"
#include "gromacs/simd/simd_math.h"
#include "gromacs/simd/vector_operations.h"
-#include "gromacs/topology/block.h"
#include "gromacs/topology/mtop_util.h"
#include "gromacs/utility/alignedallocator.h"
#include "gromacs/utility/arrayref.h"
#include "gromacs/utility/exceptions.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxomp.h"
+#include "gromacs/utility/listoflists.h"
#include "gromacs/utility/pleasecite.h"
using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
}
//! Finds all triangles of atoms that share constraints to a central atom.
-static int count_triangle_constraints(const InteractionLists& ilist, const t_blocka& at2con)
+static int count_triangle_constraints(const InteractionLists& ilist, const ListOfLists<int>& at2con)
{
- int ncon1, ncon_tot;
- int c0, n1, c1, ac1, n2, c2;
- int ncon_triangle;
-
- ncon1 = ilist[F_CONSTR].size() / 3;
- ncon_tot = ncon1 + ilist[F_CONSTRNC].size() / 3;
+ const int ncon1 = ilist[F_CONSTR].size() / 3;
+ const int ncon_tot = ncon1 + ilist[F_CONSTRNC].size() / 3;
gmx::ArrayRef<const int> ia1 = ilist[F_CONSTR].iatoms;
gmx::ArrayRef<const int> ia2 = ilist[F_CONSTRNC].iatoms;
- ncon_triangle = 0;
- for (c0 = 0; c0 < ncon_tot; c0++)
+ int ncon_triangle = 0;
+ for (int c0 = 0; c0 < ncon_tot; c0++)
{
bool bTriangle = FALSE;
const int* iap = constr_iatomptr(ia1, ia2, c0);
const int a00 = iap[1];
const int a01 = iap[2];
- for (n1 = at2con.index[a01]; n1 < at2con.index[a01 + 1]; n1++)
+ for (const int c1 : at2con[a01])
{
- c1 = at2con.a[n1];
if (c1 != c0)
{
const int* iap = constr_iatomptr(ia1, ia2, c1);
const int a10 = iap[1];
const int a11 = iap[2];
+ int ac1;
if (a10 == a01)
{
ac1 = a11;
{
ac1 = a10;
}
- for (n2 = at2con.index[ac1]; n2 < at2con.index[ac1 + 1]; n2++)
+ for (const int c2 : at2con[ac1])
{
- c2 = at2con.a[n2];
if (c2 != c0 && c2 != c1)
{
const int* iap = constr_iatomptr(ia1, ia2, c2);
}
//! Finds sequences of sequential constraints.
-static bool more_than_two_sequential_constraints(const InteractionLists& ilist, const t_blocka& at2con)
+static bool more_than_two_sequential_constraints(const InteractionLists& ilist, const ListOfLists<int>& at2con)
{
- int ncon1, ncon_tot, c;
- bool bMoreThanTwoSequentialConstraints;
-
- ncon1 = ilist[F_CONSTR].size() / 3;
- ncon_tot = ncon1 + ilist[F_CONSTRNC].size() / 3;
+ const int ncon1 = ilist[F_CONSTR].size() / 3;
+ const int ncon_tot = ncon1 + ilist[F_CONSTRNC].size() / 3;
gmx::ArrayRef<const int> ia1 = ilist[F_CONSTR].iatoms;
gmx::ArrayRef<const int> ia2 = ilist[F_CONSTRNC].iatoms;
- bMoreThanTwoSequentialConstraints = FALSE;
- for (c = 0; c < ncon_tot && !bMoreThanTwoSequentialConstraints; c++)
+ for (int c = 0; c < ncon_tot; c++)
{
const int* iap = constr_iatomptr(ia1, ia2, c);
const int a1 = iap[1];
const int a2 = iap[2];
/* Check if this constraint has constraints connected at both atoms */
- if (at2con.index[a1 + 1] - at2con.index[a1] > 1 && at2con.index[a2 + 1] - at2con.index[a2] > 1)
+ if (at2con[a1].ssize() > 1 && at2con[a2].ssize() > 1)
{
- bMoreThanTwoSequentialConstraints = TRUE;
+ return true;
}
}
- return bMoreThanTwoSequentialConstraints;
+ return false;
}
-Lincs* init_lincs(FILE* fplog,
- const gmx_mtop_t& mtop,
- int nflexcon_global,
- ArrayRef<const t_blocka> at2con,
- bool bPLINCS,
- int nIter,
- int nProjOrder)
+Lincs* init_lincs(FILE* fplog,
+ const gmx_mtop_t& mtop,
+ int nflexcon_global,
+ ArrayRef<const ListOfLists<int>> atomToConstraintsPerMolType,
+ bool bPLINCS,
+ int nIter,
+ int nProjOrder)
{
// TODO this should become a unique_ptr
Lincs* li;
li->max_connect = 0;
for (size_t mt = 0; mt < mtop.moltype.size(); mt++)
{
+ const auto& at2con = atomToConstraintsPerMolType[mt];
for (int a = 0; a < mtop.moltype[mt].atoms.nr; a++)
{
- li->max_connect = std::max(li->max_connect, at2con[mt].index[a + 1] - at2con[mt].index[a]);
+ li->max_connect = std::max(li->max_connect, int(at2con[a].ssize()));
}
}
bMoreThanTwoSeq = FALSE;
for (const gmx_molblock_t& molb : mtop.molblock)
{
- const gmx_moltype_t& molt = mtop.moltype[molb.type];
+ const gmx_moltype_t& molt = mtop.moltype[molb.type];
+ const auto& at2con = atomToConstraintsPerMolType[molb.type];
- li->ncg_triangle += molb.nmol * count_triangle_constraints(molt.ilist, at2con[molb.type]);
+ li->ncg_triangle += molb.nmol * count_triangle_constraints(molt.ilist, at2con);
- if (!bMoreThanTwoSeq && more_than_two_sequential_constraints(molt.ilist, at2con[molb.type]))
+ if (!bMoreThanTwoSeq && more_than_two_sequential_constraints(molt.ilist, at2con))
{
bMoreThanTwoSeq = TRUE;
}
}
//! Assign a constraint.
-static void assign_constraint(Lincs* li, int constraint_index, int a1, int a2, real lenA, real lenB, const t_blocka* at2con)
+static void assign_constraint(Lincs* li,
+ int constraint_index,
+ int a1,
+ int a2,
+ real lenA,
+ real lenB,
+ const ListOfLists<int>& at2con)
{
int con;
/* Make space in the constraint connection matrix for constraints
* connected to both end of the current constraint.
*/
- li->ncc += at2con->index[a1 + 1] - at2con->index[a1] - 1 + at2con->index[a2 + 1]
- - at2con->index[a2] - 1;
+ li->ncc += at2con[a1].ssize() - 1 + at2con[a2].ssize() - 1;
li->blnr[con + 1] = li->ncc;
/*! \brief Check if constraint with topology index constraint_index is connected
* to other constraints, and if so add those connected constraints to our task. */
-static void check_assign_connected(Lincs* li,
- const t_iatom* iatom,
- const t_idef& idef,
- bool bDynamics,
- int a1,
- int a2,
- const t_blocka* at2con)
+static void check_assign_connected(Lincs* li,
+ const t_iatom* iatom,
+ const t_idef& idef,
+ bool bDynamics,
+ int a1,
+ int a2,
+ const ListOfLists<int>& at2con)
{
/* Currently this function only supports constraint groups
* in which all constraints share at least one atom
* connected constraints. We need to assign those
* to the same task.
*/
- int end;
-
- for (end = 0; end < 2; end++)
+ for (int end = 0; end < 2; end++)
{
- int a, k;
+ const int a = (end == 0 ? a1 : a2);
- a = (end == 0 ? a1 : a2);
-
- for (k = at2con->index[a]; k < at2con->index[a + 1]; k++)
+ for (const int cc : at2con[a])
{
- int cc;
-
- cc = at2con->a[k];
/* Check if constraint cc has not yet been assigned */
if (li->con_index[cc] == -1)
{
- int type;
- real lenA, lenB;
-
- type = iatom[cc * 3];
- lenA = idef.iparams[type].constr.dA;
- lenB = idef.iparams[type].constr.dB;
+ const int type = iatom[cc * 3];
+ const real lenA = idef.iparams[type].constr.dA;
+ const real lenB = idef.iparams[type].constr.dB;
if (bDynamics || lenA != 0 || lenB != 0)
{
/*! \brief Check if constraint with topology index constraint_index is involved
* in a constraint triangle, and if so add the other two constraints
* in the triangle to our task. */
-static void check_assign_triangle(Lincs* li,
- const t_iatom* iatom,
- const t_idef& idef,
- bool bDynamics,
- int constraint_index,
- int a1,
- int a2,
- const t_blocka* at2con)
+static void check_assign_triangle(Lincs* li,
+ const t_iatom* iatom,
+ const t_idef& idef,
+ bool bDynamics,
+ int constraint_index,
+ int a1,
+ int a2,
+ const ListOfLists<int>& at2con)
{
- int nca, cc[32], ca[32], k;
+ int nca, cc[32], ca[32];
int c_triangle[2] = { -1, -1 };
nca = 0;
- for (k = at2con->index[a1]; k < at2con->index[a1 + 1]; k++)
+ for (const int c : at2con[a1])
{
- int c;
-
- c = at2con->a[k];
if (c != constraint_index)
{
int aa1, aa2;
}
}
- for (k = at2con->index[a2]; k < at2con->index[a2 + 1]; k++)
+ for (const int c : at2con[a2])
{
- int c;
-
- c = at2con->a[k];
if (c != constraint_index)
{
int aa1, aa2, i;
}
//! Sets matrix indices.
-static void set_matrix_indices(Lincs* li, const Task& li_task, const t_blocka* at2con, bool bSortMatrix)
+static void set_matrix_indices(Lincs* li, const Task& li_task, const ListOfLists<int>& at2con, bool bSortMatrix)
{
for (int b = li_task.b0; b < li_task.b1; b++)
{
const int a2 = li->atoms[b].index2;
int i = li->blnr[b];
- for (int k = at2con->index[a1]; k < at2con->index[a1 + 1]; k++)
+ for (const int constraint : at2con[a1])
{
- int concon = li->con_index[at2con->a[k]];
+ const int concon = li->con_index[constraint];
if (concon != b)
{
li->blbnb[i++] = concon;
}
}
- for (int k = at2con->index[a2]; k < at2con->index[a2 + 1]; k++)
+ for (const int constraint : at2con[a2])
{
- int concon = li->con_index[at2con->a[k]];
+ const int concon = li->con_index[constraint];
if (concon != b)
{
li->blbnb[i++] = concon;
void set_lincs(const t_idef& idef, const t_mdatoms& md, bool bDynamics, const t_commrec* cr, Lincs* li)
{
int natoms;
- t_blocka at2con;
t_iatom* iatom;
li->nc_real = 0;
natoms = md.homenr;
}
- at2con = make_at2con(natoms, idef.il, idef.iparams, flexibleConstraintTreatment(bDynamics));
+ const ListOfLists<int> at2con =
+ make_at2con(natoms, idef.il, idef.iparams, flexibleConstraintTreatment(bDynamics));
const int ncon_tot = idef.il[F_CONSTR].nr / 3;
/* Skip the flexible constraints when not doing dynamics */
if (bDynamics || lenA != 0 || lenB != 0)
{
- assign_constraint(li, con, a1, a2, lenA, lenB, &at2con);
+ assign_constraint(li, con, a1, a2, lenA, lenB, at2con);
if (li->ntask > 1 && !li->bTaskDep)
{
/* We can generate independent tasks. Check if we
* need to assign connected constraints to our task.
*/
- check_assign_connected(li, iatom, idef, bDynamics, a1, a2, &at2con);
+ check_assign_connected(li, iatom, idef, bDynamics, a1, a2, at2con);
}
if (li->ntask > 1 && li->ncg_triangle > 0)
{
/* Ensure constraints in one triangle are assigned
* to the same task.
*/
- check_assign_triangle(li, iatom, idef, bDynamics, con, a1, a2, &at2con);
+ check_assign_triangle(li, iatom, idef, bDynamics, con, a1, a2, at2con);
}
}
}
li_task.tri_bits.resize(li_task.b1 - li_task.b0);
}
- set_matrix_indices(li, li_task, &at2con, bSortMatrix);
+ set_matrix_indices(li, li_task, at2con, bSortMatrix);
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
- done_blocka(&at2con);
-
if (cr->dd == nullptr)
{
/* Since the matrix is static, we should free some memory */
struct gmx_mtop_t;
struct gmx_multisim_t;
-struct t_blocka;
struct t_commrec;
struct t_idef;
struct t_inputrec;
{
enum class ConstraintVariable : int;
-
-/* Abstract type for LINCS that is defined only in the file that uses it */
class Lincs;
+template<typename>
+class ListOfLists;
+
/*! \brief Return the data for determining constraint RMS relative deviations. */
ArrayRef<real> lincs_rmsdData(Lincs* lincsd);
real lincs_rmsd(const Lincs* lincsd);
/*! \brief Initializes and returns the lincs data struct. */
-Lincs* init_lincs(FILE* fplog,
- const gmx_mtop_t& mtop,
- int nflexcon_global,
- ArrayRef<const t_blocka> at2con,
- bool bPLINCS,
- int nIter,
- int nProjOrder);
+Lincs* init_lincs(FILE* fplog,
+ const gmx_mtop_t& mtop,
+ int nflexcon_global,
+ ArrayRef<const ListOfLists<int>> atomsToConstraintsPerMolType,
+ bool bPLINCS,
+ int nIter,
+ int nProjOrder);
/*! \brief Destructs the lincs object when it is not nullptr. */
void done_lincs(Lincs* li);
* using CUDA, including class initialization, data-structures management
* and GPU kernel.
*
- * \note Management of periodic boundary should be unified with SETTLE and
- * removed from here.
* \todo Reconsider naming, i.e. "cuda" suffics should be changed to "gpu".
*
* \author Artem Zhmurov <zhmurov@gmail.com>
float3* d_v,
const real invdt,
const bool computeVirial,
- tensor virialScaled)
+ tensor virialScaled,
+ const PbcAiuc pbcAiuc)
{
ensureNoPendingCudaError("In CUDA version of LINCS");
}
config.stream = commandStream_;
+ kernelParams_.pbcAiuc = pbcAiuc;
+
const auto kernelArgs =
prepareGpuKernelArguments(kernelPtr, config, &kernelParams_, &d_x, &d_xp, &d_v, &invdt);
GpuApiCallBehavior::Sync, nullptr);
}
-void LincsCuda::setPbc(const t_pbc* pbc)
-{
- setPbcAiuc(pbc->ndim_ePBC, pbc->box, &kernelParams_.pbcAiuc);
-}
-
} // namespace gmx
* Applies LINCS to coordinates and velocities, stored on GPU.
* The results are not automatically copied back to the CPU memory.
* Method uses this class data structures which should be updated
- * when needed using set() and setPbc() method.
+ * when needed using set() method.
*
* \param[in] d_x Coordinates before timestep (in GPU memory)
* \param[in,out] d_xp Coordinates after timestep (in GPU memory). The
* multipliers when velocities are updated)
* \param[in] computeVirial If virial should be updated.
* \param[in,out] virialScaled Scaled virial tensor to be updated.
+ * \param[in] pbcAiuc PBC data.
*/
void apply(const float3* d_x,
float3* d_xp,
float3* d_v,
const real invdt,
const bool computeVirial,
- tensor virialScaled);
+ tensor virialScaled,
+ const PbcAiuc pbcAiuc);
/*! \brief
* Update data-structures (e.g. after NB search step).
*/
void set(const t_idef& idef, const t_mdatoms& md);
- /*! \brief
- * Update PBC data.
- *
- * Converts pbc data from t_pbc into the PbcAiuc format and stores the latter.
- *
- * \todo Remove this method. LINCS should not manage PBC.
- *
- * \param[in] pbc The PBC data in t_pbc format.
- */
- void setPbc(const t_pbc* pbc);
-
/*! \brief
* Returns whether the maximum number of coupled constraints is supported
* by the CUDA LINCS code.
fflush_tng(of->tng_low_prec);
ivec one_ivec = { 1, 1, 1 };
write_checkpoint(of->fn_cpt, of->bKeepAndNumCPT, fplog, cr,
- DOMAINDECOMP(cr) ? cr->dd->nc : one_ivec,
+ DOMAINDECOMP(cr) ? cr->dd->numCells : one_ivec,
DOMAINDECOMP(cr) ? cr->dd->nnodes : cr->nnodes, of->eIntegrator,
of->simulation_part, of->bExpanded, of->elamstats, step, t,
state_global, observablesHistory, *(of->mdModulesNotifier));
bdens0 = (*gr0)[d];
}
/* Check for a DD cell not at a higher edge */
- if (dd != nullptr && gr1 != nullptr && dd->ci[d] < dd->nc[d] - 1)
+ if (dd != nullptr && gr1 != nullptr && dd->ci[d] < dd->numCells[d] - 1)
{
grid_x1[d] = (*gr1)[d];
bdens1 = (*gr1)[d];
grid->cell_offset[i] = izones_x0[i];
size = izones_size[i];
- bDD = (dd != nullptr) && (dd->nc[i] > 1);
+ bDD = (dd != nullptr) && (dd->numCells[i] > 1);
if (!bDD)
{
bDDRect = FALSE;
}
if (bExcl)
{
- ndtot_c += molb.nmol * (molt->excls.nra - molt->atoms.nr) / 2.;
+ ndtot_c += molb.nmol * (molt->excls.numElements() - molt->atoms.nr) / 2.;
}
}
#include "gromacs/math/units.h"
#include "gromacs/math/vec.h"
#include "gromacs/mdlib/force.h"
-#include "gromacs/mdlib/forcerec.h"
#include "gromacs/mdlib/qmmm.h"
#include "gromacs/mdtypes/md_enums.h"
#include "gromacs/utility/cstringutil.h"
}
-static void write_gaussian_SH_input(int step, gmx_bool swap, const t_forcerec* fr, t_QMrec* qm, t_MMrec* mm)
+static void write_gaussian_SH_input(int step, gmx_bool swap, const t_QMMMrec* QMMMrec, t_QMrec* qm, t_MMrec* mm)
{
- int i;
- gmx_bool bSA;
- FILE* out;
- t_QMMMrec* QMMMrec;
- QMMMrec = fr->qr;
- bSA = (qm->SAstep > 0);
-
- out = fopen("input.com", "w");
+ int i;
+ bool bSA = (qm->SAstep > 0);
+ FILE* out = fopen("input.com", "w");
/* write the route */
fprintf(out, "%s", "%scr=input\n");
fprintf(out, "%s", "%rwf=input\n");
fclose(out);
} /* write_gaussian_SH_input */
-static void write_gaussian_input(int step, const t_forcerec* fr, t_QMrec* qm, t_MMrec* mm)
+static void write_gaussian_input(int step, const t_QMMMrec* QMMMrec, t_QMrec* qm, t_MMrec* mm)
{
- int i;
- t_QMMMrec* QMMMrec;
- FILE* out;
+ int i;
- QMMMrec = fr->qr;
- out = fopen("input.com", "w");
+ FILE* out = fopen("input.com", "w");
/* write the route */
if (qm->QMmethod >= eQMmethodRHF)
}
}
-real call_gaussian(const t_forcerec* fr, t_QMrec* qm, t_MMrec* mm, rvec f[], rvec fshift[])
+real call_gaussian(const t_QMMMrec* qmmm, t_QMrec* qm, t_MMrec* mm, rvec f[], rvec fshift[])
{
/* normal gaussian jobs */
static int step = 0;
snew(QMgrad, qm->nrQMatoms);
snew(MMgrad, mm->nrMMatoms);
- write_gaussian_input(step, fr, qm, mm);
+ write_gaussian_input(step, qmmm, qm, mm);
do_gaussian(step, exe);
QMener = read_gaussian_output(QMgrad, MMgrad, qm, mm);
/* put the QMMM forces in the force array and to the fshift
} /* call_gaussian */
-real call_gaussian_SH(const t_forcerec* fr, t_QMrec* qm, t_MMrec* mm, rvec f[], rvec fshift[])
+real call_gaussian_SH(const t_QMMMrec* qmmm, t_QMrec* qm, t_MMrec* mm, rvec f[], rvec fshift[])
{
/* a gaussian call routine intended for doing diabatic surface
* "sliding". See the manual for the theoretical background of this
/* if(!step)
* qr->bSA=FALSE;*/
/* temporray set to step + 1, since there is a chk start */
- write_gaussian_SH_input(step, swapped, fr, qm, mm);
+ write_gaussian_SH_input(step, swapped, qmmm, qm, mm);
do_gaussian(step, exe);
QMener = read_gaussian_SH_output(QMgrad, MMgrad, step, qm, mm);
}
if (swap) /* change surface, so do another call */
{
- write_gaussian_SH_input(step, swapped, fr, qm, mm);
+ write_gaussian_SH_input(step, swapped, qmmm, qm, mm);
do_gaussian(step, exe);
QMener = read_gaussian_SH_output(QMgrad, MMgrad, step, qm, mm);
}
/*! \brief
* Call gaussian to do qm calculation.
*
- * \param[in] fr Global forcerec.
- * \param[in] qm QM part of forcerec.
- * \param[in] mm mm part of forcerec.
- * \param[in] f force vector.
+ * \param[in] qmmm QMMM part forcerec.
+ * \param[in] qm QM part of forcerec.
+ * \param[in] mm mm part of forcerec.
+ * \param[in] f force vector.
* \param[in] fshift shift of force vector.
*/
-real call_gaussian(const t_forcerec* fr, t_QMrec* qm, t_MMrec* mm, rvec f[], rvec fshift[]);
+real call_gaussian(const t_QMMMrec* qmmm, t_QMrec* qm, t_MMrec* mm, rvec f[], rvec fshift[]);
/*! \brief
* Call gaussian SH(?) to do qm calculation.
*
- * \param[in] fr Global forcerec.
- * \param[in] qm QM part of forcerec.
- * \param[in] mm mm part of forcerec.
- * \param[in] f force vector.
+ * \param[in] qmmm QMMM part forcerec.
+ * \param[in] qm QM part of forcerec.
+ * \param[in] mm mm part of forcerec.
+ * \param[in] f force vector.
* \param[in] fshift shift of force vector.
*/
-real call_gaussian_SH(const t_forcerec* fr, t_QMrec* qm, t_MMrec* mm, rvec f[], rvec fshift[]);
+real call_gaussian_SH(const t_QMMMrec* qmmm, t_QMrec* qm, t_MMrec* mm, rvec f[], rvec fshift[]);
#endif
#include "gromacs/math/units.h"
#include "gromacs/math/vec.h"
#include "gromacs/mdlib/qmmm.h"
-#include "gromacs/mdtypes/forcerec.h"
#include "gromacs/mdtypes/md_enums.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/smalloc.h"
}
-static void write_orca_input(const t_forcerec* fr, t_QMrec* qm, t_MMrec* mm)
+static void write_orca_input(const t_QMMMrec* QMMMrec, t_QMrec* qm, t_MMrec* mm)
{
- int i;
- t_QMMMrec* QMMMrec;
- FILE * out, *pcFile, *addInputFile;
- char * buf, *orcaInput, *addInputFilename, *pcFilename;
-
- QMMMrec = fr->qr;
+ int i;
+ FILE *pcFile, *addInputFile;
+ char *buf, *orcaInput, *addInputFilename, *pcFilename;
/* write the first part of the input-file */
snew(orcaInput, 200);
sprintf(orcaInput, "%s.inp", qm->orca_basename);
- out = fopen(orcaInput, "w");
+ FILE* out = fopen(orcaInput, "w");
snew(addInputFilename, 200);
sprintf(addInputFilename, "%s.ORCAINFO", qm->orca_basename);
fclose(out);
} /* write_orca_input */
-static real read_orca_output(rvec QMgrad[], rvec MMgrad[], const t_forcerec* fr, t_QMrec* qm, t_MMrec* mm)
+static real read_orca_output(rvec QMgrad[], rvec MMgrad[], const t_QMMMrec* QMMMrec, t_QMrec* qm, t_MMrec* mm)
{
- int i, j;
- char buf[300], orca_pcgradFilename[300], orca_engradFilename[300];
- real QMener;
- FILE * pcgrad, *engrad;
- int k;
- t_QMMMrec* QMMMrec;
- QMMMrec = fr->qr;
+ int i, j;
+ char buf[300], orca_pcgradFilename[300], orca_engradFilename[300];
+ real QMener;
+ FILE *pcgrad, *engrad;
+ int k;
/* the energy and gradients for the QM part are stored in the engrad file
* and the gradients for the point charges are stored in the pc file.
}
}
-real call_orca(const t_forcerec* fr, t_QMrec* qm, t_MMrec* mm, rvec f[], rvec fshift[])
+real call_orca(const t_QMMMrec* qmmm, t_QMrec* qm, t_MMrec* mm, rvec f[], rvec fshift[])
{
/* normal orca jobs */
static int step = 0;
snew(QMgrad, qm->nrQMatoms);
snew(MMgrad, mm->nrMMatoms);
- write_orca_input(fr, qm, mm);
+ write_orca_input(qmmm, qm, mm);
do_orca(qm->orca_dir, qm->orca_basename);
- QMener = read_orca_output(QMgrad, MMgrad, fr, qm, mm);
+ QMener = read_orca_output(QMgrad, MMgrad, qmmm, qm, mm);
/* put the QMMM forces in the force array and to the fshift
*/
for (i = 0; i < qm->nrQMatoms; i++)
void init_orca(t_QMrec* qm);
-real call_orca(const t_forcerec* fr, t_QMrec* qm, t_MMrec* mm, rvec f[], rvec fshift[]);
+real call_orca(const t_QMMMrec* qmmm, t_QMrec* qm, t_MMrec* mm, rvec f[], rvec fshift[]);
#endif
}
static real call_QMroutine(const t_commrec gmx_unused* cr,
- const t_forcerec gmx_unused* fr,
+ const t_QMMMrec gmx_unused* qmmm,
t_QMrec gmx_unused* qm,
t_MMrec gmx_unused* mm,
rvec gmx_unused f[],
{
if (GMX_QMMM_GAUSSIAN)
{
- return call_gaussian_SH(fr, qm, mm, f, fshift);
+ return call_gaussian_SH(qmmm, qm, mm, f, fshift);
}
else
{
}
else if (GMX_QMMM_GAUSSIAN)
{
- return call_gaussian(fr, qm, mm, f, fshift);
+ return call_gaussian(qmmm, qm, mm, f, fshift);
}
else if (GMX_QMMM_ORCA)
{
- return call_orca(fr, qm, mm, f, fshift);
+ return call_orca(qmmm, qm, mm, f, fshift);
}
else
{
/* init_pbc(box); needs to be called first, see pbc.h */
ivec null_ivec;
clear_ivec(null_ivec);
- set_pbc_dd(&pbc, fr->ePBC, DOMAINDECOMP(cr) ? cr->dd->nc : null_ivec, FALSE, box);
+ set_pbc_dd(&pbc, fr->ePBC, DOMAINDECOMP(cr) ? cr->dd->numCells : null_ivec, FALSE, box);
/* only in standard (normal) QMMM we need the neighbouring MM
* particles to provide a electric field of point charges for the QM
* atoms.
}
} /* update_QMMM_rec */
-real calculate_QMMM(const t_commrec* cr, gmx::ForceWithShiftForces* forceWithShiftForces, const t_forcerec* fr)
+real calculate_QMMM(const t_commrec* cr, gmx::ForceWithShiftForces* forceWithShiftForces, const t_QMMMrec* qr)
{
real QMener = 0.0;
/* a selection for the QM package depending on which is requested
* (Gaussian, GAMESS-UK, MOPAC or ORCA) needs to be implemented here. Now
* it works through defines.... Not so nice yet
*/
- t_QMMMrec* qr;
- t_QMrec * qm, *qm2;
- t_MMrec* mm = nullptr;
- rvec * forces = nullptr, *fshift = nullptr, *forces2 = nullptr,
+ t_QMrec *qm, *qm2;
+ t_MMrec* mm = nullptr;
+ rvec * forces = nullptr, *fshift = nullptr, *forces2 = nullptr,
*fshift2 = nullptr; /* needed for multilayer ONIOM */
int i, j, k;
/* make a local copy the QMMMrec pointer
*/
- qr = fr->qr;
mm = qr->mm;
/* now different procedures are carried out for one layer ONION and
qm = qr->qm[0];
snew(forces, (qm->nrQMatoms + mm->nrMMatoms));
snew(fshift, (qm->nrQMatoms + mm->nrMMatoms));
- QMener = call_QMroutine(cr, fr, qm, mm, forces, fshift);
+ QMener = call_QMroutine(cr, qr, qm, mm, forces, fshift);
for (i = 0; i < qm->nrQMatoms; i++)
{
for (j = 0; j < DIM; j++)
srenew(fshift, qm->nrQMatoms);
/* we need to re-initialize the QMroutine every step... */
init_QMroutine(cr, qm, mm);
- QMener += call_QMroutine(cr, fr, qm, mm, forces, fshift);
+ QMener += call_QMroutine(cr, qr, qm, mm, forces, fshift);
/* this layer at the lower level of theory */
srenew(forces2, qm->nrQMatoms);
srenew(fshift2, qm->nrQMatoms);
init_QMroutine(cr, qm2, mm);
- QMener -= call_QMroutine(cr, fr, qm2, mm, forces2, fshift2);
+ QMener -= call_QMroutine(cr, qr, qm2, mm, forces2, fshift2);
/* E = E1high-E1low The next layer includes the current layer at
* the lower level of theory, which provides + E2low
* this is similar for gradients
init_QMroutine(cr, qm, mm);
srenew(forces, qm->nrQMatoms);
srenew(fshift, qm->nrQMatoms);
- QMener += call_QMroutine(cr, fr, qm, mm, forces, fshift);
+ QMener += call_QMroutine(cr, qr, qm, mm, forces, fshift);
for (i = 0; i < qm->nrQMatoms; i++)
{
for (j = 0; j < DIM; j++)
* routine should be called at every step, since it updates the MM
* elements of the t_QMMMrec struct.
*/
-real calculate_QMMM(const t_commrec* cr, gmx::ForceWithShiftForces* forceWithShiftForces, const t_forcerec* fr);
+real calculate_QMMM(const t_commrec* cr, gmx::ForceWithShiftForces* forceWithShiftForces, const t_QMMMrec* qmmm);
/* QMMM computes the QM forces. This routine makes either function
* calls to gmx QM routines (derived from MOPAC7 (semi-emp.) and MPQC
* using CUDA, including class initialization, data-structures management
* and GPU kernel.
*
- * \note Management of CUDA stream and periodic boundary should be unified with LINCS
- * and removed from here once constraints are fully integrated with update module.
* \todo Reconsider naming to use "gpu" suffix instead of "cuda".
*
* \author Artem Zhmurov <zhmurov@gmail.com>
* \param [in] gm_x Coordinates of atoms before the timestep.
* \param [in,out] gm_x Coordinates of atoms after the timestep (constrained coordinates will be
* saved here).
- * \param [in] pbcAiuc Periodic boundary conditions data.
* \param [in] invdt Reciprocal timestep.
* \param [in] gm_v Velocities of the particles.
* \param [in] gm_virialScaled Virial tensor.
+ * \param [in] pbcAiuc Periodic boundary conditions data.
*/
template<bool updateVelocities, bool computeVirial>
__launch_bounds__(c_maxThreadsPerBlock) __global__
const SettleParameters pars,
const float3* __restrict__ gm_x,
float3* __restrict__ gm_xprime,
- const PbcAiuc pbcAiuc,
- float invdt,
+ float invdt,
float3* __restrict__ gm_v,
- float* __restrict__ gm_virialScaled)
+ float* __restrict__ gm_virialScaled,
+ const PbcAiuc pbcAiuc)
{
/* ******************************************************************* */
/* ** */
float3* d_v,
const real invdt,
const bool computeVirial,
- tensor virialScaled)
+ tensor virialScaled,
+ const PbcAiuc pbcAiuc)
{
ensureNoPendingCudaError("In CUDA version SETTLE");
config.stream = commandStream_;
const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, &numSettles_, &d_atomIds_,
- &settleParameters_, &d_x, &d_xp, &pbcAiuc_,
- &invdt, &d_v, &d_virialScaled_);
+ &settleParameters_, &d_x, &d_xp, &invdt, &d_v,
+ &d_virialScaled_, &pbcAiuc);
launchGpuKernel(kernelPtr, config, nullptr, "settle_kernel<updateVelocities, computeVirial>", kernelArgs);
GpuApiCallBehavior::Sync, nullptr);
}
-void SettleCuda::setPbc(const t_pbc* pbc)
-{
- setPbcAiuc(pbc->ndim_ePBC, pbc->box, &pbcAiuc_);
-}
-
} // namespace gmx
* multipliers when velocities are updated)
* \param[in] computeVirial If virial should be updated.
* \param[in,out] virialScaled Scaled virial tensor to be updated.
+ * \param[in] pbcAiuc PBC data.
*/
void apply(const float3* d_x,
float3* d_xp,
float3* d_v,
const real invdt,
const bool computeVirial,
- tensor virialScaled);
+ tensor virialScaled,
+ const PbcAiuc pbcAiuc);
/*! \brief
* Update data-structures (e.g. after NB search step).
*/
void set(const t_idef& idef, const t_mdatoms& md);
- /*! \brief
- * Update PBC data.
- *
- * Converts pbc data from t_pbc into the PbcAiuc format and stores the latter.
- *
- * \todo PBC should not be handled by constraints.
- *
- * \param[in] pbc The PBC data in t_pbc format.
- */
- void setPbc(const t_pbc* pbc);
-
-
private:
//! CUDA stream
CommandStream commandStream_;
- //! Periodic boundary data
- PbcAiuc pbcAiuc_;
//! Scaled virial tensor (9 reals, GPU)
std::vector<float> h_virialScaled_;
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013-2019, by the GROMACS development team, led by
+ * Copyright (c) 2013-2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
*
- * TODO: eliminate the \p useGpuNonbonded and \p useGpuNonbonded when these are
+ * TODO: eliminate \p useGpuPmeOnThisRank when this is
* incorporated in DomainLifetimeWorkload.
*/
static void launchGpuEndOfStepTasks(nonbonded_verlet_t* nbv,
gmx_pme_t* pmedata,
gmx_enerdata_t* enerd,
const gmx::MdrunScheduleWorkload& runScheduleWork,
- bool useGpuNonbonded,
- bool useGpuPme,
+ bool useGpuPmeOnThisRank,
int64_t step,
gmx_wallcycle_t wcycle)
{
- if (useGpuNonbonded)
+ if (runScheduleWork.simulationWork.useGpuNonbonded)
{
/* Launch pruning before buffer clearing because the API overhead of the
* clear kernel launches can leave the GPU idle while it could be running
wallcycle_stop(wcycle, ewcLAUNCH_GPU);
}
- if (useGpuPme)
+ if (useGpuPmeOnThisRank)
{
pme_gpu_reinit_computation(pmedata, wcycle);
}
}
wallcycle_stop(wcycle, ewcLAUNCH_GPU);
}
- }
- if (stepWork.doNeighborSearch)
- {
// Need to run after the GPU-offload bonded interaction lists
// are set up to be able to determine whether there is bonded work.
runScheduleWork->domainWork = setupDomainLifetimeWorkload(
wallcycle_start_nocount(wcycle, ewcNS);
wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
/* Note that with a GPU the launch overhead of the list transfer is not timed separately */
- nbv->constructPairlist(InteractionLocality::Local, &top->excls, step, nrnb);
+ nbv->constructPairlist(InteractionLocality::Local, top->excls, step, nrnb);
nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::Local);
wallcycle_start_nocount(wcycle, ewcNS);
wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
/* Note that with a GPU the launch overhead of the list transfer is not timed separately */
- nbv->constructPairlist(InteractionLocality::NonLocal, &top->excls, step, nrnb);
+ nbv->constructPairlist(InteractionLocality::NonLocal, top->excls, step, nrnb);
nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::NonLocal);
wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
}
launchGpuEndOfStepTasks(nbv, fr->gpuBonded, fr->pmedata, enerd, *runScheduleWork,
- simulationWork.useGpuNonbonded, useGpuPmeOnThisRank, step, wcycle);
+ useGpuPmeOnThisRank, step, wcycle);
if (DOMAINDECOMP(cr))
{
#include "gromacs/mdtypes/inputrec.h"
#include "gromacs/mdtypes/mdatom.h"
#include "gromacs/pbcutil/pbc.h"
-#include "gromacs/topology/block.h"
#include "gromacs/topology/idef.h"
#include "gromacs/topology/ifunc.h"
#include "gromacs/topology/topology.h"
+#include "gromacs/utility/listoflists.h"
#include "gromacs/utility/unique_cptr.h"
#include "testutils/testasserts.h"
gmx_omp_nthreads_set(emntLINCS, 1);
// Make blocka structure for faster LINCS setup
- std::vector<t_blocka> at2con_mt;
+ std::vector<ListOfLists<int>> at2con_mt;
at2con_mt.reserve(testData->mtop_.moltype.size());
for (const gmx_moltype_t& moltype : testData->mtop_.moltype)
{
&testData->nrnb_, maxwarn, &warncount_lincs);
EXPECT_TRUE(success) << "Test failed with a false return value in LINCS.";
EXPECT_EQ(warncount_lincs, 0) << "There were warnings in LINCS.";
- for (auto& moltype : at2con_mt)
- {
- sfree(moltype.index);
- sfree(moltype.a);
- }
done_lincs(lincsd);
}
float3 *d_x, *d_xp, *d_v;
lincsCuda->set(testData->idef_, testData->md_);
- lincsCuda->setPbc(&pbc);
+ PbcAiuc pbcAiuc;
+ setPbcAiuc(pbc.ndim_ePBC, pbc.box, &pbcAiuc);
allocateDeviceBuffer(&d_x, numAtoms, nullptr);
allocateDeviceBuffer(&d_xp, numAtoms, nullptr);
GpuApiCallBehavior::Sync, nullptr);
}
lincsCuda->apply(d_x, d_xp, updateVelocities, d_v, testData->invdt_, testData->computeVirial_,
- testData->virialScaled_);
+ testData->virialScaled_, pbcAiuc);
copyFromDeviceBuffer((float3*)(testData->xPrime_.data()), &d_xp, 0, numAtoms, nullptr,
GpuApiCallBehavior::Sync, nullptr);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
}
// Reshape some data so it can be directly used by the SETTLE constraints
- ilist_ = { mtop_.moltype[0].ilist[F_SETTLE].size(), 0,
- mtop_.moltype[0].ilist[F_SETTLE].iatoms.data(), 0 };
+ ilist_ = { mtop_.moltype[0].ilist[F_SETTLE].size(), mtop_.moltype[0].ilist[F_SETTLE].iatoms.data(), 0 };
idef_.il[F_SETTLE] = ilist_;
}
GMX_RELEASE_ASSERT(canPerformGpuDetection(), "Can't detect CUDA-capable GPUs.");
auto settleCuda = std::make_unique<SettleCuda>(testData->mtop_, nullptr);
- settleCuda->setPbc(&pbc);
+
settleCuda->set(testData->idef_, testData->mdatoms_);
+ PbcAiuc pbcAiuc;
+ setPbcAiuc(pbc.ndim_ePBC, pbc.box, &pbcAiuc);
int numAtoms = testData->mdatoms_.homenr;
copyToDeviceBuffer(&d_v, (float3*)h_v, 0, numAtoms, nullptr, GpuApiCallBehavior::Sync, nullptr);
}
settleCuda->apply(d_x, d_xp, updateVelocities, d_v, testData->reciprocalTimeStep_, calcVirial,
- testData->virial_);
+ testData->virial_, pbcAiuc);
copyFromDeviceBuffer((float3*)h_xp, &d_xp, 0, numAtoms, nullptr, GpuApiCallBehavior::Sync, nullptr);
if (updateVelocities)
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
// Constraints need both coordinates before (d_x_) and after (d_xp_) update. However, after constraints
// are applied, the d_x_ can be discarded. So we intentionally swap the d_x_ and d_xp_ here to avoid the
// d_xp_ -> d_x_ copy after constraints. Note that the integrate saves them in the wrong order as well.
- lincsCuda_->apply(d_xp_, d_x_, updateVelocities, d_v_, 1.0 / dt, computeVirial, virial);
- settleCuda_->apply(d_xp_, d_x_, updateVelocities, d_v_, 1.0 / dt, computeVirial, virial);
+ lincsCuda_->apply(d_xp_, d_x_, updateVelocities, d_v_, 1.0 / dt, computeVirial, virial, pbcAiuc_);
+ settleCuda_->apply(d_xp_, d_x_, updateVelocities, d_v_, 1.0 / dt, computeVirial, virial, pbcAiuc_);
// scaledVirial -> virial (methods above returns scaled values)
float scaleFactor = 0.5f / (dt * dt);
void UpdateConstrainCuda::Impl::setPbc(const t_pbc* pbc)
{
setPbcAiuc(pbc->ndim_ePBC, pbc->box, &pbcAiuc_);
- integrator_->setPbc(pbc);
- lincsCuda_->setPbc(pbc);
- settleCuda_->setPbc(pbc);
}
GpuEventSynchronizer* UpdateConstrainCuda::Impl::getCoordinatesReadySync()
#include "gromacs/math/units.h"
#include "gromacs/mdlib/constr.h"
#include "gromacs/mdtypes/inputrec.h"
-#include "gromacs/topology/block.h"
#include "gromacs/topology/idef.h"
#include "gromacs/topology/ifunc.h"
#include "gromacs/topology/topology.h"
+#include "gromacs/utility/listoflists.h"
namespace gmx
{
}
/*! \brief Returns the range of atoms constrained to atom \p a (including \p a itself) */
-static AtomIndexExtremes constraintAtomRange(int a, const t_blocka& at2con, const InteractionList& ilistConstraints)
+static AtomIndexExtremes constraintAtomRange(int a,
+ const ListOfLists<int>& at2con,
+ const InteractionList& ilistConstraints)
{
AtomIndexExtremes extremes = { a, a };
- for (int i = at2con.index[a]; i < at2con.index[a + 1]; i++)
+ for (const int constraint : at2con[a])
{
for (int j = 0; j < 2; j++)
{
- int atomJ = ilistConstraints.iatoms[at2con.a[i] * 3 + 1 + j];
+ int atomJ = ilistConstraints.iatoms[constraint * 3 + 1 + j];
extremes.minAtom = std::min(extremes.minAtom, atomJ);
extremes.maxAtom = std::max(extremes.maxAtom, atomJ);
}
}
/*! \brief Returns the size of the update group starting at \p firstAtom or 0 when criteria (see updategroups.h) are not met */
-static int detectGroup(int firstAtom,
- const gmx_moltype_t& moltype,
- const t_blocka& at2con,
- const InteractionList& ilistConstraints)
+static int detectGroup(int firstAtom,
+ const gmx_moltype_t& moltype,
+ const ListOfLists<int>& at2con,
+ const InteractionList& ilistConstraints)
{
/* We should be using moltype.atoms.atom[].ptype for checking whether
* a particle is a vsite. But the test code can't fill t_atoms,
std::vector<bool> isParticleVsite = buildIsParticleVsite(moltype);
/* A non-vsite atom without constraints is an update group by itself */
- if (!isParticleVsite[firstAtom] && at2con.index[firstAtom + 1] - at2con.index[firstAtom] == 0)
+ if (!isParticleVsite[firstAtom] && at2con[firstAtom].empty())
{
return 1;
}
}
else
{
- int numConstraints = at2con.index[a + 1] - at2con.index[a];
+ const int numConstraints = at2con[a].ssize();
if (numConstraints == 0)
{
/* We can not have unconstrained atoms in an update group */
ilistsCombined[F_CONSTR].iatoms = constraintsCombined.iatoms.data();
ilistsCombined[F_CONSTRNC].nr = 0;
/* We "include" flexible constraints, but none are present (checked above) */
- t_blocka at2con = make_at2con(moltype.atoms.nr, ilistsCombined, iparams.data(),
- FlexibleConstraintTreatment::Include);
+ const ListOfLists<int> at2con = make_at2con(moltype.atoms.nr, ilistsCombined, iparams.data(),
+ FlexibleConstraintTreatment::Include);
bool satisfiesCriteria = true;
groups.clear();
}
- done_blocka(&at2con);
-
return groups;
}
static real constraintGroupRadius(const gmx_moltype_t& moltype,
gmx::ArrayRef<const t_iparams> iparams,
const int centralAtom,
- const t_blocka& at2con,
+ const ListOfLists<int>& at2con,
const std::unordered_multimap<int, int>& angleIndices,
const real constraintLength,
const real temperature)
{
- const int numConstraints = at2con.index[centralAtom + 1] - at2con.index[centralAtom];
+ const int numConstraints = at2con[centralAtom].ssize();
GMX_RELEASE_ASSERT(numConstraints == numPartnerAtoms,
"We expect as many constraints as partner atoms here");
std::array<int, numPartnerAtoms> partnerAtoms;
for (int i = 0; i < numPartnerAtoms; i++)
{
- const int ind = at2con.a[at2con.index[centralAtom] + i] * 3;
+ const int ind = at2con[centralAtom][i] * 3;
if (ind >= moltype.ilist[F_CONSTR].size())
{
/* This is a flexible constraint, we don't optimize for that */
const InteractionList& settles = moltype.ilist[F_SETTLE];
- t_blocka at2con = make_at2con(moltype, iparams, FlexibleConstraintTreatment::Include);
+ const ListOfLists<int> at2con = make_at2con(moltype, iparams, FlexibleConstraintTreatment::Include);
const auto angleIndices = getAngleIndices(moltype);
int maxAtom = -1;
for (int a : updateGroups.block(group))
{
- int numConstraints = at2con.index[a + 1] - at2con.index[a];
+ const int numConstraints = at2con[a].ssize();
if (numConstraints > maxNumConstraints)
{
maxNumConstraints = numConstraints;
int constraintType = -1;
real maxConstraintLength = 0;
real sumConstraintLengths = 0;
- for (int i = at2con.index[maxAtom]; i < at2con.index[maxAtom + 1]; i++)
+ bool isFirstConstraint = true;
+ for (const int constraint : at2con[maxAtom])
{
- int conIndex = at2con.a[i] * (1 + NRAL(F_CONSTR));
+ int conIndex = constraint * (1 + NRAL(F_CONSTR));
int iparamsIndex;
if (conIndex < moltype.ilist[F_CONSTR].size())
{
iparamsIndex =
moltype.ilist[F_CONSTRNC].iatoms[conIndex - moltype.ilist[F_CONSTR].size()];
}
- if (i == at2con.index[maxAtom])
+ if (isFirstConstraint)
{
- constraintType = iparamsIndex;
+ constraintType = iparamsIndex;
+ isFirstConstraint = false;
}
else if (iparamsIndex != constraintType)
{
sumConstraintLengths += constraintLength;
}
- int numConstraints = at2con.index[maxAtom + 1] - at2con.index[maxAtom];
+ int numConstraints = at2con[maxAtom].ssize();
real radius;
if (numConstraints == 1)
{
maxRadius = std::max(maxRadius, dCAny);
}
- done_blocka(&at2con);
-
return maxRadius;
}
*/
ivec null_ivec;
clear_ivec(null_ivec);
- pbc_null = set_pbc_dd(&pbc, ePBC, useDomdec ? cr->dd->nc : null_ivec, FALSE, box);
+ pbc_null = set_pbc_dd(&pbc, ePBC, useDomdec ? cr->dd->numCells : null_ivec, FALSE, box);
}
else
{
/* This is wasting some CPU time as we now do this multiple times
* per MD step.
*/
- pbc_null = set_pbc_dd(&pbc, ePBC, useDomdec ? cr->dd->nc : nullptr, FALSE, box);
+ pbc_null = set_pbc_dd(&pbc, ePBC, useDomdec ? cr->dd->numCells : nullptr, FALSE, box);
}
else
{
const int nwall = ir.nwall;
const int ngid = ir.opts.ngener;
const int ntype = fr.ntype;
- const real* nbfp = fr.nbfp;
+ const real* nbfp = fr.nbfp.data();
const int* egp_flags = fr.egp_flags;
for (int w = 0; w < nwall; w++)
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2011-2019, by the GROMACS development team, led by
+ * Copyright (c) 2011-2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2011-2019, by the GROMACS development team, led by
+ * Copyright (c) 2011-2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/* This call is not included in init_domain_decomposition mainly
* because fr->cginfo_mb is set later.
*/
- dd_init_bondeds(fplog, cr->dd, &mtop, vsite.get(), inputrec,
+ dd_init_bondeds(fplog, cr->dd, mtop, vsite.get(), inputrec,
domdecOptions.checkBondedInteractions, fr->cginfo_mb);
}
}
// FIXME: this is only here to manually unpin mdAtoms->chargeA_ and state->x,
- // before we destroy the GPU context(s) in free_gpu_resources().
+ // before we destroy the GPU context(s) in free_gpu().
// Pinned buffers are associated with contexts in CUDA.
// As soon as we destroy GPU contexts after mdrunner() exits, these lines should go.
mdAtoms.reset(nullptr);
globalState.reset(nullptr);
mdModules_.reset(nullptr); // destruct force providers here as they might also use the GPU
+ /* Free pinned buffers in *fr */
+ delete fr;
+ fr = nullptr;
+
+ if (hwinfo->gpu_info.n_dev > 0)
+ {
+ /* stop the GPU profiler (only CUDA) */
+ stopGpuProfiler();
+ }
+
+ /* With tMPI we need to wait for all ranks to finish deallocation before
+ * destroying the CUDA context in free_gpu() as some tMPI ranks may be sharing
+ * GPU and context.
+ *
+ * This is not a concern in OpenCL where we use one context per rank which
+ * is freed in nbnxn_gpu_free().
+ *
+ * Note: it is safe to not call the barrier on the ranks which do not use GPU,
+ * but it is easier and more futureproof to call it on the whole node.
+ *
+ * Note that this function needs to be called even if GPUs are not used
+ * in this run because the PME ranks have no knowledge of whether GPUs
+ * are used or not, but all ranks need to enter the barrier below.
+ * \todo Remove this physical node barrier after making sure
+ * that it's not needed anymore (with a shared GPU run).
+ */
+ if (GMX_THREAD_MPI)
+ {
+ physicalNodeComm.barrier();
+ }
- /* Free GPU memory and set a physical node tMPI barrier (which should eventually go away) */
- free_gpu_resources(fr, physicalNodeComm, hwinfo->gpu_info);
free_gpu(nonbondedDeviceInfo);
free_gpu(pmeDeviceInfo);
- done_forcerec(fr, mtop.molblock.size());
sfree(fcd);
if (doMembed)
/* TODO: Avoid updating all atoms at every bNS step */
fr->nbv->setAtomProperties(*mdatoms, fr->cginfo);
- fr->nbv->constructPairlist(InteractionLocality::Local, &top.excls, step, nrnb);
+ fr->nbv->constructPairlist(InteractionLocality::Local, top.excls, step, nrnb);
bNS = FALSE;
}
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
struct cginfo_mb_t
{
- int cg_start;
- int cg_end;
- int cg_mod;
- int* cginfo;
+ int cg_start = 0;
+ int cg_end = 0;
+ int cg_mod = 0;
+ std::vector<int> cginfo;
};
real sc_sigma6_min = 0;
/* Information about atom properties for the molecule blocks in the system */
- struct cginfo_mb_t* cginfo_mb = nullptr;
+ std::vector<cginfo_mb_t> cginfo_mb;
/* Information about atom properties for local and non-local atoms */
std::vector<int> cginfo;
std::vector<gmx::RVec> shiftForces;
/* Non bonded Parameter lists */
- int ntype = 0; /* Number of atom types */
- gmx_bool bBHAM = FALSE;
- real* nbfp = nullptr;
- real* ljpme_c6grid = nullptr; /* C6-values used on grid in LJPME */
+ int ntype = 0; /* Number of atom types */
+ gmx_bool bBHAM = FALSE;
+ std::vector<real> nbfp;
+ real* ljpme_c6grid = nullptr; /* C6-values used on grid in LJPME */
/* Energy group pair flags */
int* egp_flags = nullptr;
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
{
GMX_UNUSED_VALUE(dataSize);
+ GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
+
GMX_ASSERT(dataSize >= 0, "Trying to copy to device buffer before it was allocated.");
+ GMX_ASSERT(commandStream != nullptr,
+ "No stream is valid for copying with given atom locality.");
wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
{
GMX_UNUSED_VALUE(dataSize);
+ GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
+
GMX_ASSERT(dataSize >= 0, "Trying to copy from device buffer before it was allocated.");
+ GMX_ASSERT(commandStream != nullptr,
+ "No stream is valid for copying with given atom locality.");
wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
// TODO: remove this by adding an event-mark free flavor of this function
if (GMX_GPU == GMX_GPU_CUDA)
{
- xReadyOnDevice_[atomLocality].markEvent(commandStream);
+ xReadyOnDevice_[atomLocality].markEvent(xCopyStreams_[atomLocality]);
}
wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
const Nbnxm::KernelType kernelType,
int enbnxninitcombrule,
int ntype,
- const real* nbfp,
+ ArrayRef<const real> nbfp,
int n_energygroups)
{
real c6, c12, tol;
const Nbnxm::KernelType kernelType,
int enbnxninitcombrule,
int ntype,
- const real* nbfp,
+ ArrayRef<const real> nbfp,
int n_energygroups,
int nout)
{
* to the atom data structure.
* enbnxninitcombrule sets what combination rule data gets stored in nbat.
*/
-void nbnxn_atomdata_init(const gmx::MDLogger& mdlog,
- nbnxn_atomdata_t* nbat,
- Nbnxm::KernelType kernelType,
- int enbnxninitcombrule,
- int ntype,
- const real* nbfp,
- int n_energygroups,
- int nout);
+void nbnxn_atomdata_init(const gmx::MDLogger& mdlog,
+ nbnxn_atomdata_t* nbat,
+ Nbnxm::KernelType kernelType,
+ int enbnxninitcombrule,
+ int ntype,
+ gmx::ArrayRef<const real> nbfp,
+ int n_energygroups,
+ int nout);
void nbnxn_atomdata_set(nbnxn_atomdata_t* nbat,
const Nbnxm::GridSet& gridSet,
std::move(atomData), kernelSetup, nullptr, nullptr);
nbnxn_atomdata_init(gmx::MDLogger(), nbv->nbat.get(), kernelSetup.kernelType, combinationRule,
- system.numAtomTypes, system.nonbondedParameters.data(), 1, numThreads);
+ system.numAtomTypes, system.nonbondedParameters, 1, numThreads);
t_nrnb nrnb;
{ 0, int(system.coordinates.size()) }, atomDensity, atomInfo,
system.coordinates, 0, nullptr);
- nbv->constructPairlist(gmx::InteractionLocality::Local, &system.excls, 0, &nrnb);
+ nbv->constructPairlist(gmx::InteractionLocality::Local, system.excls, 0, &nrnb);
t_mdatoms mdatoms;
// We only use (read) the atom type and charge from mdatoms
#include "bench_system.h"
+#include <numeric>
#include <vector>
#include "gromacs/math/vec.h"
charges.resize(numAtoms);
atomInfoAllVdw.resize(numAtoms);
atomInfoOxygenVdw.resize(numAtoms);
- snew(excls.index, numAtoms + 1);
- snew(excls.a, numAtoms * numAtomsInMolecule);
- excls.index[0] = 0;
for (int a = 0; a < numAtoms; a++)
{
SET_CGINFO_HAS_Q(atomInfoAllVdw[a]);
SET_CGINFO_HAS_Q(atomInfoOxygenVdw[a]);
- const int firstAtomInMolecule = a - (a % numAtomsInMolecule);
- for (int aj = 0; aj < numAtomsInMolecule; aj++)
- {
- excls.a[a * numAtomsInMolecule + aj] = firstAtomInMolecule + aj;
- }
- excls.index[a + 1] = (a + 1) * numAtomsInMolecule;
+ excls.pushBackListOfSize(numAtomsInMolecule);
+ gmx::ArrayRef<int> exclusionsForAtom = excls.back();
+ const int firstAtomInMolecule = a - (a % numAtomsInMolecule);
+ std::iota(exclusionsForAtom.begin(), exclusionsForAtom.end(), firstAtomInMolecule);
}
forceRec.ntype = numAtomTypes;
- forceRec.nbfp = nonbondedParameters.data();
+ forceRec.nbfp = nonbondedParameters;
snew(forceRec.shift_vec, SHIFTS);
calc_shifts(box, forceRec.shift_vec);
}
#include "gromacs/math/vectypes.h"
#include "gromacs/mdtypes/forcerec.h"
-#include "gromacs/topology/block.h"
+#include "gromacs/utility/listoflists.h"
#include "gromacs/utility/smalloc.h"
namespace gmx
//! Atom info where only oxygen atoms are marked to have Van der Waals interactions
std::vector<int> atomInfoOxygenVdw;
//! Information about exclusions.
- t_blocka excls;
+ ListOfLists<int> excls;
//! Storage for atom positions.
std::vector<gmx::RVec> coordinates;
//! System simulation box.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2012-2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
struct nonbonded_verlet_t;
class PairSearch;
class PairlistSets;
-struct t_blocka;
struct t_commrec;
struct t_lambda;
struct t_mdatoms;
namespace gmx
{
class ForceWithShiftForces;
+template<typename>
+class ListOfLists;
class MDLogger;
class UpdateGroupsCog;
} // namespace gmx
gmx::ArrayRef<const int> getGridIndices() const;
//! Constructs the pairlist for the given locality
- void constructPairlist(gmx::InteractionLocality iLocality, const t_blocka* excl, int64_t step, t_nrnb* nrnb);
+ void constructPairlist(gmx::InteractionLocality iLocality,
+ const gmx::ListOfLists<int>& exclusions,
+ int64_t step,
+ t_nrnb* nrnb);
//! Updates all the atom properties in Nbnxm
void setAtomProperties(const t_mdatoms& mdatoms, gmx::ArrayRef<const int> atomInfo);
minimumIlistCountForGpuBalancing);
auto pairSearch = std::make_unique<PairSearch>(
- ir->ePBC, EI_TPI(ir->eI), DOMAINDECOMP(cr) ? &cr->dd->nc : nullptr,
+ ir->ePBC, EI_TPI(ir->eI), DOMAINDECOMP(cr) ? &cr->dd->numCells : nullptr,
DOMAINDECOMP(cr) ? domdec_zones(cr->dd) : nullptr, pairlistParams.pairlistType,
bFEP_NonBonded, gmx_omp_nthreads_get(emntPairsearch), pinPolicy);
#include "gromacs/pbcutil/pbc.h"
#include "gromacs/simd/simd.h"
#include "gromacs/simd/vector_operations.h"
-#include "gromacs/topology/block.h"
#include "gromacs/utility/exceptions.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxomp.h"
+#include "gromacs/utility/listoflists.h"
#include "gromacs/utility/smalloc.h"
#include "atomdata.h"
* Set all atom-pair exclusions from the topology stored in exclusions
* as masks in the pair-list for simple list entry iEntry.
*/
-static void setExclusionsForIEntry(const Nbnxm::GridSet& gridSet,
- NbnxnPairlistCpu* nbl,
- gmx_bool diagRemoved,
- int na_cj_2log,
- const nbnxn_ci_t& iEntry,
- const t_blocka& exclusions)
+static void setExclusionsForIEntry(const Nbnxm::GridSet& gridSet,
+ NbnxnPairlistCpu* nbl,
+ gmx_bool diagRemoved,
+ int na_cj_2log,
+ const nbnxn_ci_t& iEntry,
+ const ListOfLists<int>& exclusions)
{
if (iEntry.cj_ind_end == iEntry.cj_ind_start)
{
if (iAtom >= 0)
{
/* Loop over the topology-based exclusions for this i-atom */
- for (int exclIndex = exclusions.index[iAtom]; exclIndex < exclusions.index[iAtom + 1];
- exclIndex++)
+ for (const int jAtom : exclusions[iAtom])
{
- const int jAtom = exclusions.a[exclIndex];
-
if (jAtom == iAtom)
{
/* The self exclusion are already set, save some time */
static void setExclusionsForIEntry(const Nbnxm::GridSet& gridSet,
NbnxnPairlistGpu* nbl,
gmx_bool diagRemoved,
- int gmx_unused na_cj_2log,
- const nbnxn_sci_t& iEntry,
- const t_blocka& exclusions)
+ int gmx_unused na_cj_2log,
+ const nbnxn_sci_t& iEntry,
+ const ListOfLists<int>& exclusions)
{
if (iEntry.numJClusterGroups() == 0)
{
const int iCluster = i / c_clusterSize;
/* Loop over the topology-based exclusions for this i-atom */
- for (int exclIndex = exclusions.index[iAtom]; exclIndex < exclusions.index[iAtom + 1];
- exclIndex++)
+ for (const int jAtom : exclusions[iAtom])
{
- const int jAtom = exclusions.a[exclIndex];
-
if (jAtom == iAtom)
{
/* The self exclusions are already set, save some time */
const Grid& jGrid,
PairsearchWork* work,
const nbnxn_atomdata_t* nbat,
- const t_blocka& exclusions,
+ const ListOfLists<int>& exclusions,
real rlist,
const PairlistType pairlistType,
int ci_block,
void PairlistSet::constructPairlists(const Nbnxm::GridSet& gridSet,
gmx::ArrayRef<PairsearchWork> searchWork,
nbnxn_atomdata_t* nbat,
- const t_blocka* excl,
+ const ListOfLists<int>& exclusions,
const int minimumIlistCountForGpuBalancing,
t_nrnb* nrnb,
SearchCycleCounting* searchCycleCounting)
/* Divide the i cells equally over the pairlists */
if (isCpuType_)
{
- nbnxn_make_pairlist_part(gridSet, iGrid, jGrid, &work, nbat, *excl, rlist,
+ nbnxn_make_pairlist_part(gridSet, iGrid, jGrid, &work, nbat, exclusions, rlist,
params_.pairlistType, ci_block, nbat->bUseBufferFlags,
nsubpair_target, progBal, nsubpair_tot_est, th,
numLists, &cpuLists_[th], fepListPtr);
}
else
{
- nbnxn_make_pairlist_part(gridSet, iGrid, jGrid, &work, nbat, *excl, rlist,
+ nbnxn_make_pairlist_part(gridSet, iGrid, jGrid, &work, nbat, exclusions, rlist,
params_.pairlistType, ci_block, nbat->bUseBufferFlags,
nsubpair_target, progBal, nsubpair_tot_est, th,
numLists, &gpuLists_[th], fepListPtr);
void PairlistSets::construct(const InteractionLocality iLocality,
PairSearch* pairSearch,
nbnxn_atomdata_t* nbat,
- const t_blocka* excl,
+ const ListOfLists<int>& exclusions,
const int64_t step,
t_nrnb* nrnb)
{
- pairlistSet(iLocality).constructPairlists(pairSearch->gridSet(), pairSearch->work(), nbat, excl,
- minimumIlistCountForGpuBalancing_, nrnb,
+ pairlistSet(iLocality).constructPairlists(pairSearch->gridSet(), pairSearch->work(), nbat,
+ exclusions, minimumIlistCountForGpuBalancing_, nrnb,
&pairSearch->cycleCounting_);
if (iLocality == InteractionLocality::Local)
}
void nonbonded_verlet_t::constructPairlist(const InteractionLocality iLocality,
- const t_blocka* excl,
+ const ListOfLists<int>& exclusions,
int64_t step,
t_nrnb* nrnb)
{
- pairlistSets_->construct(iLocality, pairSearch_.get(), nbat.get(), excl, step, nrnb);
+ pairlistSets_->construct(iLocality, pairSearch_.get(), nbat.get(), exclusions, step, nrnb);
if (useGpu())
{
struct PairlistParams;
struct PairsearchWork;
struct SearchCycleCounting;
-struct t_blocka;
struct t_nrnb;
+namespace gmx
+{
+template<typename>
+class ListOfLists;
+}
+
namespace Nbnxm
{
class GridSet;
void constructPairlists(const Nbnxm::GridSet& gridSet,
gmx::ArrayRef<PairsearchWork> searchWork,
nbnxn_atomdata_t* nbat,
- const t_blocka* excl,
+ const gmx::ListOfLists<int>& exclusions,
int minimumIlistCountForGpuBalancing,
t_nrnb* nrnb,
SearchCycleCounting* searchCycleCounting);
class PairlistSet;
enum class PairlistType;
class PairSearch;
-struct t_blocka;
struct t_nrnb;
+namespace gmx
+{
+template<typename>
+class ListOfLists;
+}
class PairlistSets
{
int minimumIlistCountForGpuBalancing);
//! Construct the pairlist set for the given locality
- void construct(gmx::InteractionLocality iLocality,
- PairSearch* pairSearch,
- nbnxn_atomdata_t* nbat,
- const t_blocka* excl,
- int64_t step,
- t_nrnb* nrnb);
+ void construct(gmx::InteractionLocality iLocality,
+ PairSearch* pairSearch,
+ nbnxn_atomdata_t* nbat,
+ const gmx::ListOfLists<int>& exclusions,
+ int64_t step,
+ t_nrnb* nrnb);
//! Dispatches the dynamic pruning kernel for the given locality
void dispatchPruneKernel(gmx::InteractionLocality iLocality,
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "gromacs/math/functions.h"
#include "gromacs/math/vec.h"
#include "gromacs/pbcutil/pbc.h"
-#include "gromacs/topology/block.h"
#include "gromacs/utility/arrayref.h"
#include "gromacs/utility/exceptions.h"
#include "gromacs/utility/gmxassert.h"
+#include "gromacs/utility/listoflists.h"
#include "gromacs/utility/mutex.h"
#include "gromacs/utility/stringutil.h"
*/
void init(AnalysisNeighborhood::SearchMode mode,
bool bXY,
- const t_blocka* excls,
+ const ListOfLists<int>* excls,
const t_pbc* pbc,
const AnalysisNeighborhoodPositions& positions);
PairSearchImplPointer getPairSearch();
//! Reference position indices (NULL if no indices).
const int* refIndices_;
//! Exclusions.
- const t_blocka* excls_;
+ const ListOfLists<int>* excls_;
//! PBC data.
t_pbc pbc_;
testPositions_ = nullptr;
testExclusionIds_ = nullptr;
testIndices_ = nullptr;
- nexcl_ = 0;
- excl_ = nullptr;
clear_rvec(xtest_);
clear_rvec(testcell_);
clear_ivec(currCell_);
const int* testExclusionIds_;
//! Reference to the test position indices.
const int* testIndices_;
- //! Number of excluded reference positions for current test particle.
- int nexcl_;
//! Exclusions for current test particle.
- const int* excl_;
+ ArrayRef<const int> excl_;
//! Index of the currently active test position in \p testPositions_.
int testIndex_;
//! Stores test position during a pair loop.
void AnalysisNeighborhoodSearchImpl::init(AnalysisNeighborhood::SearchMode mode,
bool bXY,
- const t_blocka* excls,
+ const ListOfLists<int>* excls,
const t_pbc* pbc,
const AnalysisNeighborhoodPositions& positions)
{
if (search_.excls_ != nullptr)
{
const int exclIndex = testExclusionIds_[index];
- if (exclIndex < search_.excls_->nr)
+ if (exclIndex < search_.excls_->ssize())
{
- const int startIndex = search_.excls_->index[exclIndex];
- nexcl_ = search_.excls_->index[exclIndex + 1] - startIndex;
- excl_ = &search_.excls_->a[startIndex];
+ excl_ = (*search_.excls_)[exclIndex];
}
else
{
- nexcl_ = 0;
- excl_ = nullptr;
+ excl_ = ArrayRef<const int>();
}
}
}
bool AnalysisNeighborhoodPairSearchImpl::isExcluded(int j)
{
- if (exclind_ < nexcl_)
+ const int nexcl = excl_.ssize();
+ if (exclind_ < nexcl)
{
const int index = (search_.refIndices_ != nullptr ? search_.refIndices_[j] : j);
const int refId = search_.refExclusionIds_[index];
- while (exclind_ < nexcl_ && excl_[exclind_] < refId)
+ while (exclind_ < nexcl && excl_[exclind_] < refId)
{
++exclind_;
}
- if (exclind_ < nexcl_ && refId == excl_[exclind_])
+ if (exclind_ < nexcl && refId == excl_[exclind_])
{
++exclind_;
return true;
SearchImplPointer getSearch();
- Mutex createSearchMutex_;
- SearchList searchList_;
- real cutoff_;
- const t_blocka* excls_;
- SearchMode mode_;
- bool bXY_;
+ Mutex createSearchMutex_;
+ SearchList searchList_;
+ real cutoff_;
+ const ListOfLists<int>* excls_;
+ SearchMode mode_;
+ bool bXY_;
};
AnalysisNeighborhood::Impl::SearchImplPointer AnalysisNeighborhood::Impl::getSearch()
impl_->bXY_ = bXY;
}
-void AnalysisNeighborhood::setTopologyExclusions(const t_blocka* excls)
+void AnalysisNeighborhood::setTopologyExclusions(const ListOfLists<int>* excls)
{
GMX_RELEASE_ASSERT(impl_->searchList_.empty(),
"Changing the exclusions after initSearch() not currently supported");
#include "gromacs/utility/gmxassert.h"
#include "gromacs/utility/real.h"
-struct t_blocka;
struct t_pbc;
namespace gmx
{
+template<typename>
+class ListOfLists;
namespace internal
{
*
* \see AnalysisNeighborhoodPositions::exclusionIds()
*/
- void setTopologyExclusions(const t_blocka* excls);
+ void setTopologyExclusions(const ListOfLists<int>* excls);
/*! \brief
* Sets the algorithm to use for searching.
*
#include "gromacs/random/threefry.h"
#include "gromacs/random/uniformrealdistribution.h"
#include "gromacs/topology/block.h"
+#include "gromacs/utility/listoflists.h"
#include "gromacs/utility/smalloc.h"
#include "gromacs/utility/stringutil.h"
class ExclusionsHelper
{
public:
- static void markExcludedPairs(RefPairList* refPairs, int testIndex, const t_blocka* excls);
+ static void markExcludedPairs(RefPairList* refPairs, int testIndex, const gmx::ListOfLists<int>* excls);
ExclusionsHelper(int refPosCount, int testPosCount);
void generateExclusions();
- const t_blocka* exclusions() const { return &excls_; }
+ const gmx::ListOfLists<int>* exclusions() const { return &excls_; }
gmx::ArrayRef<const int> refPosIds() const
{
}
private:
- int refPosCount_;
- int testPosCount_;
- std::vector<int> exclusionIds_;
- std::vector<int> exclsIndex_;
- std::vector<int> exclsAtoms_;
- t_blocka excls_;
+ int refPosCount_;
+ int testPosCount_;
+ std::vector<int> exclusionIds_;
+ gmx::ListOfLists<int> excls_;
};
// static
-void ExclusionsHelper::markExcludedPairs(RefPairList* refPairs, int testIndex, const t_blocka* excls)
+void ExclusionsHelper::markExcludedPairs(RefPairList* refPairs, int testIndex, const gmx::ListOfLists<int>* excls)
{
int count = 0;
- for (int i = excls->index[testIndex]; i < excls->index[testIndex + 1]; ++i)
+ for (const int excludedIndex : (*excls)[testIndex])
{
- const int excludedIndex = excls->a[i];
NeighborhoodSearchTestData::RefPair searchPair(excludedIndex, 0.0);
RefPairList::iterator excludedRefPair =
std::lower_bound(refPairs->begin(), refPairs->end(), searchPair);
exclusionIds_.resize(std::max(refPosCount, testPosCount), 1);
exclusionIds_[0] = 0;
std::partial_sum(exclusionIds_.begin(), exclusionIds_.end(), exclusionIds_.begin());
-
- excls_.nr = 0;
- excls_.index = nullptr;
- excls_.nra = 0;
- excls_.a = nullptr;
- excls_.nalloc_index = 0;
- excls_.nalloc_a = 0;
}
void ExclusionsHelper::generateExclusions()
// particles would be higher, or where the exclusions would not be random,
// to make a higher percentage of the exclusions to actually be within the
// cutoff.
- exclsIndex_.reserve(testPosCount_ + 1);
- exclsAtoms_.reserve(testPosCount_ * 20);
- exclsIndex_.push_back(0);
for (int i = 0; i < testPosCount_; ++i)
{
+ excls_.pushBackListOfSize(20);
+ gmx::ArrayRef<int> exclusionsForAtom = excls_.back();
for (int j = 0; j < 20; ++j)
{
- exclsAtoms_.push_back(i + j * 3);
+ exclusionsForAtom[j] = i + j * 3;
}
- exclsIndex_.push_back(exclsAtoms_.size());
}
- excls_.nr = exclsIndex_.size();
- excls_.index = exclsIndex_.data();
- excls_.nra = exclsAtoms_.size();
- excls_.a = exclsAtoms_.data();
}
/********************************************************************
void testPairSearchFull(gmx::AnalysisNeighborhoodSearch* search,
const NeighborhoodSearchTestData& data,
const gmx::AnalysisNeighborhoodPositions& pos,
- const t_blocka* excls,
+ const gmx::ListOfLists<int>* excls,
const gmx::ArrayRef<const int>& refIndices,
const gmx::ArrayRef<const int>& testIndices,
bool selfPairs);
void NeighborhoodSearchTest::testPairSearchFull(gmx::AnalysisNeighborhoodSearch* search,
const NeighborhoodSearchTestData& data,
const gmx::AnalysisNeighborhoodPositions& pos,
- const t_blocka* excls,
+ const gmx::ListOfLists<int>* excls,
const gmx::ArrayRef<const int>& refIndices,
const gmx::ArrayRef<const int>& testIndices,
bool selfPairs)
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#elif defined(_MSC_VER)
# include <windows.h>
-typedef __int64 gmx_cycles_t;
+typedef __int64 gmx_cycles_t;
#elif (defined(__hpux) || defined(__HP_cc)) && defined(__ia64)
/* HP compiler on ia64 */
# include <machine/sys/inline.h>
-typedef unsigned long gmx_cycles_t;
+typedef unsigned long gmx_cycles_t;
#elif (defined(__INTEL_COMPILER) || defined(__ECC)) && defined(__ia64__)
/* Intel compiler on ia64 */
# include <ia64intrin.h>
-typedef unsigned long gmx_cycles_t;
+typedef unsigned long gmx_cycles_t;
#elif defined(__GNUC__) && defined(__ia64__)
/* ia64 with GCC inline assembly */
#elif ((defined(__hppa__) || defined(__hppa)) && defined(__hpux))
/* HP PA-RISC, instruction when using HP compiler */
# include <machine/inline.h>
-typedef unsigned long gmx_cycles_t;
+typedef unsigned long gmx_cycles_t;
#elif defined(__GNUC__) && defined(__s390__)
/* S390, taken from FFTW who got it from James Treacy */
-typedef unsigned long long gmx_cycles_t;
+typedef unsigned long long gmx_cycles_t;
#elif defined(__GNUC__) && defined(__alpha__)
/* gcc inline assembly on alpha CPUs */
#elif defined(__DECC) && defined(__alpha)
/* Digital GEM C compiler on alpha */
# include <c_asm.h>
-typedef unsigned long gmx_cycles_t;
+typedef unsigned long gmx_cycles_t;
#elif (defined(__sgi) && defined(CLOCK_SGI_CYCLE))
/* Irix compilers on SGI hardware. Get nanoseconds from struct timespec */
#elif (defined(__SVR4) && defined(__SUNPRO_CC))
/* Solaris high-resolution timers */
-typedef hrtime_t gmx_cycles_t;
+typedef hrtime_t gmx_cycles_t;
#elif defined(__xlC__) && defined(_AIX)
/* AIX compilers */
#elif ((defined(__GNUC__) || defined(__IBM_GCC_ASM) || defined(__IBM_STDCPP_ASM)) \
&& (defined(__powerpc__) || defined(__ppc__)))
/* PowerPC using gcc inline assembly (also works on xlc>=7.0 with -qasm=gcc) */
-typedef unsigned long long gmx_cycles_t;
+typedef unsigned long long gmx_cycles_t;
#elif (defined(__MWERKS__) && (defined(MAC) || defined(macintosh)))
/* Metrowerks on macintosh */
#elif defined(__sun) && defined(__sparcv9)
-typedef unsigned long gmx_cycles_t;
+typedef unsigned long gmx_cycles_t;
#else
/*! \brief Integer-like datatype for cycle counter values
* the difference between two gmx_cycles_t values returned from this
* routine.
*/
-#if (GMX_CYCLECOUNTERS == 0)
-static __inline__ gmx_cycles_t gmx_cycles_read(void)
-{
- return 0;
-}
-#elif ((defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__PATHSCALE__) || defined(__PGIC__)) \
- && (defined(__i386__) || defined(__x86_64__)) && !defined(_CRAYC))
+#if ((defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__PATHSCALE__) || defined(__PGIC__)) \
+ && (defined(__i386__) || defined(__x86_64__)) && !defined(_CRAYC))
static __inline__ gmx_cycles_t gmx_cycles_read()
{
/* x86 with GCC inline assembly - pentium TSC register */
* one when later linking to the library it might happen that the
* library supports cyclecounters but not the headers, or vice versa.
*/
-#if (GMX_CYCLECOUNTERS == 0)
-static __inline__ bool gmx_cycles_have_counter(void)
-{
- return 0;
-}
-#elif ((defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__PATHSCALE__) \
- || defined(__PGIC__) || defined(_CRAYC)) \
- && (defined(__i386__) || defined(__x86_64__)))
+#if ((defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__PATHSCALE__) \
+ || defined(__PGIC__) || defined(_CRAYC)) \
+ && (defined(__i386__) || defined(__x86_64__)))
static __inline__ bool gmx_cycles_have_counter()
{
/* x86 or x86-64 with GCC inline assembly - pentium TSC register */
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2008, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2008, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2017,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
"of atoms in the tpr file (%d)", \
(i), (n))
-static gmx_bool* bKeepIt(int gnx, int natoms, int index[])
+static std::vector<bool> bKeepIt(int gnx, int natoms, int index[])
{
- gmx_bool* b;
- int i;
+ std::vector<bool> b(natoms);
- snew(b, natoms);
- for (i = 0; (i < gnx); i++)
+ for (int i = 0; (i < gnx); i++)
{
RANGECHK(index[i], natoms);
- b[index[i]] = TRUE;
+ b[index[i]] = true;
}
return b;
}
-static int* invind(int gnx, int natoms, int index[])
+static std::vector<int> invind(int gnx, int natoms, int index[])
{
- int* inv;
- int i;
+ std::vector<int> inv(natoms);
- snew(inv, natoms);
- for (i = 0; (i < gnx); i++)
+ for (int i = 0; (i < gnx); i++)
{
RANGECHK(index[i], natoms);
inv[index[i]] = i;
return inv;
}
-static void reduce_block(const gmx_bool bKeep[], t_block* block, const char* name)
+static gmx::ListOfLists<int> reduce_listoflists(gmx::ArrayRef<const int> invindex,
+ const std::vector<bool>& bKeep,
+ const gmx::ListOfLists<int>& src,
+ const char* name)
{
- int* index;
- int i, j, newi, newj;
-
- snew(index, block->nr);
+ gmx::ListOfLists<int> lists;
- newi = newj = 0;
- for (i = 0; (i < block->nr); i++)
+ std::vector<int> exclusionsForAtom;
+ for (gmx::index i = 0; i < src.ssize(); i++)
{
- for (j = block->index[i]; (j < block->index[i + 1]); j++)
+ if (bKeep[i])
{
- if (bKeep[j])
+ exclusionsForAtom.clear();
+ for (const int j : src[i])
{
- newj++;
+ if (bKeep[j])
+ {
+ exclusionsForAtom.push_back(invindex[j]);
+ }
}
- }
- if (newj > index[newi])
- {
- newi++;
- index[newi] = newj;
+ lists.pushBack(exclusionsForAtom);
}
}
- fprintf(stderr, "Reduced block %8s from %6d to %6d index-, %6d to %6d a-entries\n", name,
- block->nr, newi, block->index[block->nr], newj);
- block->index = index;
- block->nr = newi;
-}
-
-static void reduce_blocka(const int invindex[], const gmx_bool bKeep[], t_blocka* block, const char* name)
-{
- int *index, *a;
- int i, j, k, newi, newj;
-
- snew(index, block->nr);
- snew(a, block->nra);
-
- newi = newj = 0;
- for (i = 0; (i < block->nr); i++)
- {
- for (j = block->index[i]; (j < block->index[i + 1]); j++)
- {
- k = block->a[j];
- if (bKeep[k])
- {
- a[newj] = invindex[k];
- newj++;
- }
- }
- if (newj > index[newi])
- {
- newi++;
- index[newi] = newj;
- }
- }
+ fprintf(stderr, "Reduced block %8s from %6zu to %6zu index-, %6d to %6d a-entries\n", name,
+ src.size(), lists.size(), src.numElements(), lists.numElements());
- fprintf(stderr, "Reduced block %8s from %6d to %6d index-, %6d to %6d a-entries\n", name,
- block->nr, newi, block->nra, newj);
- block->index = index;
- block->a = a;
- block->nr = newi;
- block->nra = newj;
+ return lists;
}
static void reduce_rvec(int gnx, const int index[], rvec vv[])
sfree(rinfo);
}
-static void reduce_ilist(const int invindex[], const gmx_bool bKeep[], t_ilist* il, int nratoms, const char* name)
+static void reduce_ilist(gmx::ArrayRef<const int> invindex,
+ const std::vector<bool>& bKeep,
+ t_ilist* il,
+ int nratoms,
+ const char* name)
{
t_iatom* ia;
int i, j, newnr;
static void reduce_topology_x(int gnx, int index[], gmx_mtop_t* mtop, rvec x[], rvec v[])
{
- t_topology top;
- gmx_bool* bKeep;
- int* invindex;
- int i;
+ gmx_localtop_t top;
+ gmx_mtop_generate_local_top(*mtop, &top, false);
+ t_atoms atoms = gmx_mtop_global_atoms(mtop);
- top = gmx_mtop_t_to_t_topology(mtop, false);
- bKeep = bKeepIt(gnx, top.atoms.nr, index);
- invindex = invind(gnx, top.atoms.nr, index);
+ const std::vector<bool> bKeep = bKeepIt(gnx, atoms.nr, index);
+ const std::vector<int> invindex = invind(gnx, atoms.nr, index);
- reduce_block(bKeep, &(top.mols), "mols");
- reduce_blocka(invindex, bKeep, &(top.excls), "excls");
reduce_rvec(gnx, index, x);
reduce_rvec(gnx, index, v);
- reduce_atom(gnx, index, top.atoms.atom, top.atoms.atomname, &(top.atoms.nres), top.atoms.resinfo);
+ reduce_atom(gnx, index, atoms.atom, atoms.atomname, &(atoms.nres), atoms.resinfo);
- for (i = 0; (i < F_NRE); i++)
+ for (int i = 0; (i < F_NRE); i++)
{
reduce_ilist(invindex, bKeep, &(top.idef.il[i]), interaction_function[i].nratoms,
interaction_function[i].name);
}
- top.atoms.nr = gnx;
+ atoms.nr = gnx;
mtop->moltype.resize(1);
mtop->moltype[0].name = mtop->name;
- mtop->moltype[0].atoms = top.atoms;
- for (i = 0; i < F_NRE; i++)
+ mtop->moltype[0].atoms = atoms;
+ mtop->moltype[0].excls = reduce_listoflists(invindex, bKeep, top.excls, "excls");
+ for (int i = 0; i < F_NRE; i++)
{
InteractionList& ilist = mtop->moltype[0].ilist[i];
ilist.iatoms.resize(top.idef.il[i].nr);
ilist.iatoms[j] = top.idef.il[i].iatoms[j];
}
}
- mtop->moltype[0].atoms = top.atoms;
- mtop->moltype[0].excls = top.excls;
mtop->molblock.resize(1);
mtop->molblock[0].type = 0;
mtop->molblock[0].nmol = 1;
- mtop->natoms = top.atoms.nr;
+ mtop->natoms = atoms.nr;
}
static void zeroq(const int index[], gmx_mtop_t* mtop)
#include <algorithm>
+#include "gromacs/utility/listoflists.h"
#include "gromacs/utility/smalloc.h"
#include "gromacs/utility/txtdump.h"
return indent;
}
+static int pr_listoflists_title(FILE* fp, int indent, const char* title, const gmx::ListOfLists<int>* lists)
+{
+ if (available(fp, lists, indent, title))
+ {
+ indent = pr_title(fp, indent, title);
+ pr_indent(fp, indent);
+ fprintf(fp, "numLists=%zu\n", lists->size());
+ pr_indent(fp, indent);
+ fprintf(fp, "numElements=%d\n", lists->numElements());
+ }
+ return indent;
+}
+
static void low_pr_blocka(FILE* fp, int indent, const char* title, const t_blocka* block, gmx_bool bShowNumbers)
{
int i;
}
}
+void pr_listoflists(FILE* fp, int indent, const char* title, const gmx::ListOfLists<int>* lists, gmx_bool bShowNumbers)
+{
+ if (available(fp, lists, indent, title))
+ {
+ indent = pr_listoflists_title(fp, indent, title, lists);
+ for (gmx::index i = 0; i < lists->ssize(); i++)
+ {
+ int size = pr_indent(fp, indent);
+ gmx::ArrayRef<const int> list = (*lists)[i];
+ if (list.empty())
+ {
+ size += fprintf(fp, "%s[%d]={", title, int(i));
+ }
+ else
+ {
+ size += fprintf(fp, "%s[%d][num=%zu]={", title, bShowNumbers ? int(i) : -1, list.size());
+ }
+ bool isFirst = true;
+ for (const int j : list)
+ {
+ if (!isFirst)
+ {
+ size += fprintf(fp, ", ");
+ }
+ if ((size) > (USE_WIDTH))
+ {
+ fprintf(fp, "\n");
+ size = pr_indent(fp, indent + INDENT);
+ }
+ size += fprintf(fp, "%d", j);
+ isFirst = false;
+ }
+ fprintf(fp, "}\n");
+ }
+ }
+}
+
void copy_block(const t_block* src, t_block* dst)
{
dst->nr = src->nr;
namespace gmx
{
+template<typename>
+class ListOfLists;
+
/*! \brief Division of a range of indices into consecutive blocks
*
* A range of consecutive indices 0 to full.range.end() is divided
void pr_block(FILE* fp, int indent, const char* title, const t_block* block, gmx_bool bShowNumbers);
void pr_blocka(FILE* fp, int indent, const char* title, const t_blocka* block, gmx_bool bShowNumbers);
+void pr_listoflists(FILE* fp, int indent, const char* title, const gmx::ListOfLists<int>* block, gmx_bool bShowNumbers);
#endif
#include "gromacs/topology/block.h"
#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/listoflists.h"
#include "gromacs/utility/smalloc.h"
#include "gromacs/utility/stringutil.h"
namespace gmx
{
+namespace
+{
+
+//! Converts ListOfLists to a list of ExclusionBlocks
+void listOfListsToExclusionBlocks(const ListOfLists<int>& b, gmx::ArrayRef<ExclusionBlock> b2)
+{
+ for (gmx::index i = 0; i < b.ssize(); i++)
+ {
+ for (int jAtom : b[i])
+ {
+ b2[i].atomNumber.push_back(jAtom);
+ }
+ }
+}
+
+//! Converts a list of ExclusionBlocks to ListOfLists
+void exclusionBlocksToListOfLists(gmx::ArrayRef<const ExclusionBlock> b2, ListOfLists<int>* b)
+{
+ b->clear();
+
+ for (const auto& block : b2)
+ {
+ b->pushBack(block.atomNumber);
+ }
+}
+
+} // namespace
+
void blockaToExclusionBlocks(const t_blocka* b, gmx::ArrayRef<ExclusionBlock> b2)
{
for (int i = 0; (i < b->nr); i++)
b->index[i] = nra;
}
-void mergeExclusions(t_blocka* excl, gmx::ArrayRef<ExclusionBlock> b2)
+namespace
{
- if (b2.empty())
- {
- return;
- }
- GMX_RELEASE_ASSERT(b2.ssize() == excl->nr,
- "Cannot merge exclusions for "
- "blocks that do not describe the same number "
- "of particles");
- /* Convert the t_blocka entries to ExclusionBlock form */
- blockaToExclusionBlocks(excl, b2);
-
- /* Count and sort the exclusions */
+//! Counts and sorts the exclusions
+int countAndSortExclusions(gmx::ArrayRef<ExclusionBlock> b2)
+{
int nra = 0;
for (auto& block : b2)
{
nra += block.nra();
}
}
- excl->nra = nra;
- srenew(excl->a, excl->nra);
- exclusionBlocksToBlocka(b2, excl);
+ return nra;
+}
+
+} // namespace
+
+void mergeExclusions(ListOfLists<int>* excl, gmx::ArrayRef<ExclusionBlock> b2)
+{
+ if (b2.empty())
+ {
+ return;
+ }
+ GMX_RELEASE_ASSERT(b2.ssize() == excl->ssize(),
+ "Cannot merge exclusions for "
+ "blocks that do not describe the same number "
+ "of particles");
+
+ /* Convert the t_blocka entries to ExclusionBlock form */
+ listOfListsToExclusionBlocks(*excl, b2);
+
+ countAndSortExclusions(b2);
+
+ exclusionBlocksToListOfLists(b2, excl);
}
} // namespace gmx
namespace gmx
{
+template<typename>
+class ListOfLists;
/*! \libinternal \brief
* Describes exclusions for a single atom.
* Requires that \c b2 and \c excl describe the same number of
* particles, if \c b2 describes a non-zero number.
*/
-void mergeExclusions(t_blocka* excl, gmx::ArrayRef<ExclusionBlock> b2);
+void mergeExclusions(ListOfLists<int>* excl, gmx::ArrayRef<ExclusionBlock> b2);
/*! \brief
* Convert the exclusions.
idef->iparams_fbposres = nullptr;
for (int f = 0; f < F_NRE; ++f)
{
- idef->il[f].iatoms = nullptr;
- idef->il[f].nalloc = 0;
- idef->il[f].nr = 0;
- idef->il[f].nr_nonperturbed = 0;
+ idef->il[f].iatoms = nullptr;
+ idef->il[f].nalloc = 0;
+ idef->il[f].nr = 0;
+ idef->numNonperturbedInteractions[f] = 0;
}
idef->cmap_grid = nullptr;
idef->iparams_posres_nalloc = 0;
idef->iparams_fbposres_nalloc = 0;
- idef->ilsort = 0;
+ idef->ilsort = ilsortUNKNOWN;
}
void done_idef(t_idef* idef)
void copy_ilist(const t_ilist* src, t_ilist* dst)
{
- dst->nr = src->nr;
- dst->nr_nonperturbed = src->nr_nonperturbed;
- dst->nalloc = src->nalloc;
+ dst->nr = src->nr;
+ dst->nalloc = src->nr;
- snew(dst->iatoms, dst->nr);
+ snew(dst->iatoms, dst->nalloc);
for (int i = 0; i < dst->nr; ++i)
{
dst->iatoms[i] = src->iatoms[i];
int size() const { return nr; }
int nr;
- int nr_nonperturbed;
t_iatom* iatoms;
int nalloc;
};
int iparams_posres_nalloc, iparams_fbposres_nalloc;
t_ilist il[F_NRE];
- int ilsort;
+ /* The number of non-perturbed interactions at the start of each entry in il */
+ int numNonperturbedInteractions[F_NRE];
+ int ilsort;
} t_idef;
/*
* The cat routines below are old code from src/kernel/topcat.c
*/
-static void blockacat(t_blocka* dest, const t_blocka* src, int copies, int dnum, int snum)
-{
- int i, j, l, size;
- int destnr = dest->nr;
- int destnra = dest->nra;
-
- if (src->nr)
- {
- size = (dest->nr + copies * src->nr + 1);
- srenew(dest->index, size);
- }
- if (src->nra)
- {
- size = (dest->nra + copies * src->nra);
- srenew(dest->a, size);
- }
-
- for (l = destnr, j = 0; (j < copies); j++)
- {
- for (i = 0; (i < src->nr); i++)
- {
- dest->index[l++] = dest->nra + src->index[i];
- }
- dest->nra += src->nra;
- }
- for (l = destnra, j = 0; (j < copies); j++)
- {
- for (i = 0; (i < src->nra); i++)
- {
- dest->a[l++] = dnum + src->a[i];
- }
- dnum += snum;
- dest->nr += src->nr;
- }
- dest->index[dest->nr] = dest->nra;
- dest->nalloc_index = dest->nr;
- dest->nalloc_a = dest->nra;
-}
-
static void ilistcat(int ftype, t_ilist* dest, const InteractionList& src, int copies, int dnum, int snum)
{
int nral, c, i, a;
}
}
-/*! \brief Copy excls from mtop.
- *
- * Makes a deep copy of excls(t_blocka) from gmx_mtop_t.
- * Used to initialize legacy topology types.
+/*! \brief Generate a single list of lists of exclusions for the whole system
*
* \param[in] mtop Reference to input mtop.
- * \param[in] excls Pointer to final excls data structure.
*/
-static void copyExclsFromMtop(const gmx_mtop_t& mtop, t_blocka* excls)
+static gmx::ListOfLists<int> globalExclusionLists(const gmx_mtop_t& mtop)
{
- init_blocka(excls);
- int natoms = 0;
+ gmx::ListOfLists<int> excls;
+
+ int atomIndex = 0;
for (const gmx_molblock_t& molb : mtop.molblock)
{
const gmx_moltype_t& molt = mtop.moltype[molb.type];
- int srcnr = molt.atoms.nr;
- int destnr = natoms;
-
- blockacat(excls, &molt.excls, molb.nmol, destnr, srcnr);
+ for (int mol = 0; mol < molb.nmol; mol++)
+ {
+ excls.appendListOfLists(molt.excls, atomIndex);
- natoms += molb.nmol * srcnr;
+ atomIndex += molt.atoms.nr;
+ }
}
+
+ return excls;
}
/*! \brief Updates inter-molecular exclusion lists
* \param[inout] excls existing exclusions in local topology
* \param[in] ids list of global IDs of atoms
*/
-static void addMimicExclusions(t_blocka* excls, const gmx::ArrayRef<const int> ids)
+static void addMimicExclusions(gmx::ListOfLists<int>* excls, const gmx::ArrayRef<const int> ids)
{
t_blocka inter_excl{};
init_blocka(&inter_excl);
size_t n_q = ids.size();
- inter_excl.nr = excls->nr;
+ inter_excl.nr = excls->ssize();
inter_excl.nra = n_q * n_q;
size_t total_nra = n_q * n_q;
- snew(inter_excl.index, excls->nr + 1);
+ snew(inter_excl.index, excls->ssize() + 1);
snew(inter_excl.a, total_nra);
- for (int i = 0; i < excls->nr; ++i)
+ for (int i = 0; i < inter_excl.nr; ++i)
{
inter_excl.index[i] = 0;
}
inter_excl.index[inter_excl.nr] = n_q * n_q;
- std::vector<gmx::ExclusionBlock> qmexcl2(excls->nr);
+ std::vector<gmx::ExclusionBlock> qmexcl2(excls->size());
gmx::blockaToExclusionBlocks(&inter_excl, qmexcl2);
// Merge the created exclusion list with the existing one
{
copyAtomtypesFromMtop(mtop, &top->atomtypes);
copyIdefFromMtop(mtop, &top->idef, freeEnergyInteractionsAtEnd, bMergeConstr);
- copyExclsFromMtop(mtop, &top->excls);
+ top->excls = globalExclusionLists(mtop);
if (!mtop.intermolecularExclusionGroup.empty())
{
addMimicExclusions(&top->excls, mtop.intermolecularExclusionGroup);
{
copyAtomtypesFromMtop(mtop, &top->atomtypes);
copyIdefFromMtop(mtop, &top->idef, freeEnergyInteractionsAtEnd, bMergeConstr);
- copyExclsFromMtop(mtop, &top->excls);
top->name = mtop.name;
top->atoms = gmx_mtop_global_atoms(&mtop);
#include "gromacs/topology/exclusionblocks.h"
+#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "gromacs/topology/block.h"
#include "gromacs/utility/arrayref.h"
+#include "gromacs/utility/listoflists.h"
#include "gromacs/utility/smalloc.h"
#include "testutils/cmdlinetest.h"
addGroupToBlocka(ba, indices);
}
+//! Return ListOfLists filled with some datastructures
+ListOfLists<int> makeTestListOfLists()
+{
+ ListOfLists<int> list;
+
+ std::vector<int> indices = { 12, 11, 9, 6, 2 };
+ list.pushBack(indices);
+ indices = { 10, 8, 5, 1 };
+ list.pushBack(indices);
+ indices = { 7, 4, 0 };
+ list.pushBack(indices);
+
+ return list;
+}
+
class ExclusionBlockTest : public ::testing::Test
{
public:
{
const int natom = 3;
makeTestBlockAData(&ba_);
+ list_ = makeTestListOfLists();
b_.resize(natom);
}
~ExclusionBlockTest() override { done_blocka(&ba_); }
}
}
+ void compareBlocksAndList()
+ {
+ GMX_RELEASE_ASSERT(ssize(b_) == list_.ssize(), "The list counts should match");
+ for (index i = 0; i < ssize(b_); i++)
+ {
+ gmx::ArrayRef<const int> jList = list_[i];
+ ASSERT_EQ(b_[i].nra(), jList.ssize()) << "Block size mismatch at " << i << ".";
+ EXPECT_THAT(b_[i].atomNumber, ::testing::Pointwise(::testing::Eq(), jList));
+ }
+ }
+
protected:
t_blocka ba_;
+ ListOfLists<int> list_;
std::vector<ExclusionBlock> b_;
};
TEST_F(ExclusionBlockTest, MergeExclusions)
{
- mergeExclusions(&ba_, b_);
- compareBlocks();
+ mergeExclusions(&list_, b_);
+ compareBlocksAndList();
}
} // namespace
init_atom(&(top->atoms));
init_atomtypes(&(top->atomtypes));
init_block(&top->mols);
- init_blocka(&top->excls);
open_symtab(&top->symtab);
}
-gmx_moltype_t::gmx_moltype_t() : name(nullptr), excls()
+gmx_moltype_t::gmx_moltype_t() : name(nullptr)
{
init_t_atoms(&atoms, 0, FALSE);
}
gmx_moltype_t::~gmx_moltype_t()
{
done_atom(&atoms);
- done_blocka(&excls);
}
gmx_mtop_t::gmx_mtop_t()
done_symtab(&(top->symtab));
done_block(&(top->mols));
- done_blocka(&(top->excls));
}
void done_top_mtop(t_topology* top, gmx_mtop_t* mtop)
{
done_idef(&top->idef);
done_atom(&top->atoms);
- done_blocka(&top->excls);
done_block(&top->mols);
done_symtab(&top->symtab);
open_symtab(&mtop->symtab);
gmx_localtop_t::gmx_localtop_t()
{
- init_blocka_null(&excls);
init_idef(&idef);
init_atomtypes(&atomtypes);
}
if (!useInDomainDecomp_)
{
done_idef(&idef);
- done_blocka(&excls);
done_atomtypes(&atomtypes);
}
}
pr_indent(fp, indent);
fprintf(fp, "name=\"%s\"\n", *(molt->name));
pr_atoms(fp, indent, "atoms", &(molt->atoms), bShowNumbers);
- pr_blocka(fp, indent, "excls", &molt->excls, bShowNumbers);
+ pr_listoflists(fp, indent, "excls", &molt->excls, bShowNumbers);
for (j = 0; (j < F_NRE); j++)
{
pr_ilist(fp, indent, interaction_function[j].longname, ffparams->functype.data(),
pr_block(fp, indent, "mols", &top->mols, bShowNumbers);
pr_str(fp, indent, "bIntermolecularInteractions",
gmx::boolToString(top->bIntermolecularInteractions));
- pr_blocka(fp, indent, "excls", &top->excls, bShowNumbers);
pr_idef(fp, indent, "idef", &top->idef, bShowNumbers, bShowParameters);
}
}
}
}
-static void cmp_blocka(FILE* fp, const t_blocka* b1, const t_blocka* b2, const char* s)
+static void cmp_listoflists(FILE* fp,
+ const gmx::ListOfLists<int>& list1,
+ const gmx::ListOfLists<int>& list2,
+ const char* s)
{
char buf[32];
fprintf(fp, "comparing blocka %s\n", s);
- sprintf(buf, "%s.nr", s);
- cmp_int(fp, buf, -1, b1->nr, b2->nr);
- sprintf(buf, "%s.nra", s);
- cmp_int(fp, buf, -1, b1->nra, b2->nra);
+ sprintf(buf, "%s.numLists", s);
+ cmp_int(fp, buf, -1, list1.ssize(), list2.ssize());
+ sprintf(buf, "%s.numElements", s);
+ cmp_int(fp, buf, -1, list1.numElements(), list2.numElements());
}
static void compareFfparams(FILE* fp,
compareAtoms(fp, &mt1[i].atoms, &mt2[i].atoms, relativeTolerance, absoluteTolerance);
compareInteractionLists(fp, &mt1[i].ilist, &mt2[i].ilist);
std::string buf = gmx::formatString("excls[%d]", i);
- cmp_blocka(fp, &mt1[i].excls, &mt2[i].excls, buf.c_str());
+ cmp_listoflists(fp, mt1[i].excls, mt2[i].excls, buf.c_str());
}
}
void copy_moltype(const gmx_moltype_t* src, gmx_moltype_t* dst)
{
- dst->name = src->name;
- copy_blocka(&src->excls, &dst->excls);
+ dst->name = src->name;
+ dst->excls = src->excls;
t_atoms* atomsCopy = copy_t_atoms(&src->atoms);
dst->atoms = *atomsCopy;
sfree(atomsCopy);
#include "gromacs/topology/idef.h"
#include "gromacs/topology/symtab.h"
#include "gromacs/utility/enumerationhelpers.h"
+#include "gromacs/utility/listoflists.h"
#include "gromacs/utility/unique_cptr.h"
enum class SimulationAtomGroupType : int
/*! \brief Default copy constructor */
gmx_moltype_t(const gmx_moltype_t&) = default;
- char** name; /**< Name of the molecule type */
- t_atoms atoms; /**< The atoms in this molecule */
- InteractionLists ilist; /**< Interaction list with local indices */
- t_blocka excls; /**< The exclusions */
+ char** name; /**< Name of the molecule type */
+ t_atoms atoms; /**< The atoms in this molecule */
+ InteractionLists ilist; /**< Interaction list with local indices */
+ gmx::ListOfLists<int> excls; /**< The exclusions */
};
/*! \brief Block of molecules of the same type, used in gmx_mtop_t */
//! Atomtype properties
t_atomtypes atomtypes;
//! The exclusions
- t_blocka excls;
+ gmx::ListOfLists<int> excls;
//! Flag for domain decomposition so we don't free already freed memory.
bool useInDomainDecomp_ = false;
};
t_atomtypes atomtypes; /* Atomtype properties */
t_block mols; /* The molecules */
gmx_bool bIntermolecularInteractions; /* Inter.mol. int. ? */
- t_blocka excls; /* The exclusions */
- t_symtab symtab; /* The symbol table */
+ /* Note that the exclusions are not stored in t_topology */
+ t_symtab symtab; /* The symbol table */
} t_topology;
void init_top(t_topology* top);
}
}
/* Now we now the number of non-perturbed interactions */
- ilist->nr_nonperturbed = ic;
+ idef->numNonperturbedInteractions[ftype] = ic;
/* Copy the buffer with perturbed interactions to the ilist */
for (a = 0; a < ib; a++)
if (debug)
{
+ const int numNonperturbed = idef->numNonperturbedInteractions[ftype];
fprintf(debug, "%s non-pert %d pert %d\n", interaction_function[ftype].longname,
- ilist->nr_nonperturbed, ilist->nr - ilist->nr_nonperturbed);
+ numNonperturbed, ilist->nr - numNonperturbed);
}
}
}
}
}
localTop_ = top.expandedTopology();
- if (localTop_->excls.nr == 0)
+ if (localTop_->excls.empty())
{
GMX_THROW(InconsistentInputError(
"-excl is set, but the file provided to -s does not define exclusions"));
install(FILES
arrayref.h
baseversion.h
+ classhelpers.h
enumerationhelpers.h
+ exceptions.h
+ listoflists.h
programcontext.h
range.h
smalloc.h
--- /dev/null
+/*
+ * This file is part of the GROMACS molecular simulation package.
+ *
+ * Copyright (c) 2019, by the GROMACS development team, led by
+ * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
+ * and including many others, as listed in the AUTHORS file in the
+ * top-level source directory and at http://www.gromacs.org.
+ *
+ * GROMACS is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * GROMACS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with GROMACS; if not, see
+ * http://www.gnu.org/licenses, or write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * If you want to redistribute modifications to GROMACS, please
+ * consider that scientific software is very special. Version
+ * control is crucial - bugs must be traceable. We will be happy to
+ * consider code for inclusion in the official distribution, but
+ * derived work must not be called official GROMACS. Details are found
+ * in the README & COPYING files - if they are missing, get the
+ * official version at http://www.gromacs.org.
+ *
+ * To help us fund GROMACS development, we humbly ask that you cite
+ * the research papers on the package. Check out http://www.gromacs.org.
+ */
+/*! \file
+ * \brief
+ * Declares gmx::ListOfLists
+ *
+ * \author Berk Hess <hess@kth.se>
+ * \inpublicapi
+ * \ingroup module_utility
+ */
+#ifndef GMX_UTILITY_LISTOFLISTS_H
+#define GMX_UTILITY_LISTOFLISTS_H
+
+#include <vector>
+
+#include "gromacs/utility/arrayref.h"
+#include "gromacs/utility/basedefinitions.h"
+#include "gromacs/utility/exceptions.h"
+
+namespace gmx
+{
+
+/*! \brief A list of lists, optimized for performance
+ *
+ * This class holds a list of \p size() lists of elements of type \p T.
+ * To optimize performance, the only modification operation supporting
+ * is adding a new list at the end of the list of lists.
+ *
+ * This implementation stores all data internally in two std::vector objects
+ * and thereby avoids the overhead of managing \p size() separate objects
+ * in memory.
+ *
+ * Internal storage consists of one std::vector<int> listRanges_ of size number
+ * of lists plus one and a std::vector<T> elements_ with the elements of all
+ * lists concatenated. List i is stored in entries listRanges_[i] to
+ * listRanges_[i+1] in elements_.
+ *
+ * \note This class is currently limited to arithmetic types, mainly because
+ * this should only be used for performance critical applications.
+ * When performance is not critical, a std::vector of std::vector can be used.
+ *
+ * \tparam T value type
+ */
+
+template<typename T>
+class ListOfLists
+{
+ static_assert(std::is_arithmetic<T>::value, "This class is limited to arithmetic types");
+
+public:
+ //! Constructs an empty list of lists
+ ListOfLists() = default;
+
+ /*! \brief Constructs a list of list from raw data in internal layout
+ *
+ * Does basic consistency checks and throws when one of those fail.
+ *
+ * \param[in] listRanges Ranges of the lists concatenated (see above), is consumed
+ * \param[in] elements Elements for all lists concatenated, is consumed
+ */
+ ListOfLists(std::vector<int>&& listRanges, std::vector<T>&& elements) :
+ listRanges_(std::move(listRanges)),
+ elements_(std::move(elements))
+ {
+ if (listRanges_.empty() || listRanges_.at(0) != 0)
+ {
+ GMX_THROW(InconsistentInputError(
+ "listRanges does not have a first element with value 0"));
+ }
+ if (int(elements_.size()) != listRanges_.back())
+ {
+ GMX_THROW(InconsistentInputError(
+ "The size of elements does not match the last value in listRanges"));
+ }
+ }
+
+ //! Returns the number of lists
+ std::size_t size() const { return listRanges_.size() - 1; }
+
+ /*! \brief Returns the number of lists
+ *
+ * \note Use ssize for any expression involving arithmetic operations
+ * (including loop indices).
+ */
+ index ssize() const { return index(listRanges_.size()) - 1; }
+
+ //! Returns whether the list holds no lists
+ bool empty() const { return listRanges_.size() == 1; }
+
+ //! Returns the sum of the number of elements over all lists
+ int numElements() const { return listRanges_.back(); }
+
+ //! Appends a new list with elements \p values, pass {} to add an empty list
+ void pushBack(ArrayRef<const T> values)
+ {
+ elements_.insert(elements_.end(), values.begin(), values.end());
+ listRanges_.push_back(int(elements_.size()));
+ }
+
+ //! Appends a new list with \p numElements elements
+ void pushBackListOfSize(int numElements)
+ {
+ // With arithmetic types enforced, this assertion is always true
+ static_assert(std::is_default_constructible<T>::value,
+ "pushBackListOfSize should only be called with default constructable types");
+ elements_.resize(elements_.size() + numElements);
+ listRanges_.push_back(int(elements_.size()));
+ }
+
+ //! Returns an ArrayRef to the elements of the list with the given index
+ ArrayRef<const T> operator[](std::size_t listIndex) const
+ {
+ return ArrayRef<const T>(elements_.data() + listRanges_[listIndex],
+ elements_.data() + listRanges_[listIndex + 1]);
+ }
+
+ //! Returns the list of elements for the list with index \p listIndex, throws an \p out_of_range exception when out of range
+ ArrayRef<const T> at(std::size_t listIndex) const
+ {
+ return ArrayRef<const T>(elements_.data() + listRanges_.at(listIndex),
+ elements_.data() + listRanges_.at(listIndex + 1));
+ }
+
+ /*! \brief Returns a reference to the first list
+ *
+ * \returns a reference to the first list
+ */
+ ArrayRef<T> front()
+ {
+ GMX_ASSERT(size() > 0, "Must contain a list if front() is called");
+ auto beginPtr = elements_.data();
+ auto endPtr = beginPtr + listRanges_[1];
+ return { beginPtr, endPtr };
+ }
+ /*! \brief Returns a reference to the final list
+ *
+ * \returns a reference to the final list
+ */
+ ArrayRef<T> back()
+ {
+ GMX_ASSERT(size() > 0, "Must contain a list if bank() is called");
+ auto endIndex = *(listRanges_.end() - 1);
+ auto beginIndex = *(listRanges_.end() - 2);
+ return { elements_.data() + beginIndex, elements_.data() + endIndex };
+ }
+
+ //! Clears the list
+ void clear()
+ {
+ listRanges_.resize(1);
+ elements_.clear();
+ }
+
+ //! Appends a ListOfLists at the end and increments the appended elements by \p offset
+ void appendListOfLists(const ListOfLists& listOfLists, const T offset = 0)
+ {
+ listRanges_.insert(listRanges_.end(), listOfLists.listRanges_.begin() + 1,
+ listOfLists.listRanges_.end());
+ const int oldNumElements = elements_.size();
+ for (std::size_t i = listRanges_.size() - listOfLists.size(); i < listRanges_.size(); i++)
+ {
+ listRanges_[i] += oldNumElements;
+ }
+ elements_.insert(elements_.end(), listOfLists.elements_.begin(), listOfLists.elements_.end());
+
+ if (offset != 0)
+ {
+ for (std::size_t i = elements_.size() - listOfLists.elements_.size(); i < elements_.size(); i++)
+ {
+ elements_[i] += offset;
+ }
+ }
+ }
+
+ //! Returns concatenated ranges of the lists (see above for details)
+ ArrayRef<const int> listRangesView() const { return listRanges_; }
+
+ //! Returns the a view of the elements of all lists concatenated
+ ArrayRef<const T> elementsView() const { return elements_; }
+
+private:
+ //! The ranges of the lists, list i uses range \p listRanges_[i], \p listRanges_[i+1].
+ std::vector<int> listRanges_ = { 0 };
+ //! The elements in all lists concatenated
+ std::vector<T> elements_;
+};
+
+} // namespace gmx
+
+#endif
inmemoryserializer.cpp
keyvaluetreeserializer.cpp
keyvaluetreetransform.cpp
+ listoflists.cpp
logger.cpp
mutex.cpp
path.cpp
--- /dev/null
+/*
+ * This file is part of the GROMACS molecular simulation package.
+ *
+ * Copyright (c) 2018,2019, by the GROMACS development team, led by
+ * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
+ * and including many others, as listed in the AUTHORS file in the
+ * top-level source directory and at http://www.gromacs.org.
+ *
+ * GROMACS is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * GROMACS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with GROMACS; if not, see
+ * http://www.gnu.org/licenses, or write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * If you want to redistribute modifications to GROMACS, please
+ * consider that scientific software is very special. Version
+ * control is crucial - bugs must be traceable. We will be happy to
+ * consider code for inclusion in the official distribution, but
+ * derived work must not be called official GROMACS. Details are found
+ * in the README & COPYING files - if they are missing, get the
+ * official version at http://www.gromacs.org.
+ *
+ * To help us fund GROMACS development, we humbly ask that you cite
+ * the research papers on the package. Check out http://www.gromacs.org.
+ */
+/*! \internal \file
+ * \brief
+ * Tests for the ListOfLists class.
+ *
+ * \author berk Hess <hess@kth.se>
+ * \ingroup module_utility
+ */
+#include "gmxpre.h"
+
+#include "gromacs/utility/listoflists.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include "testutils/testasserts.h"
+
+namespace gmx
+{
+
+namespace
+{
+
+using ::testing::Eq;
+using ::testing::Pointwise;
+
+//! Compares all element between two lists of lists
+template<typename T>
+void compareLists(const ListOfLists<T>& list, const std::vector<std::vector<T>>& v)
+{
+ ASSERT_EQ(list.size(), v.size());
+ for (std::size_t i = 0; i < list.size(); i++)
+ {
+ ASSERT_EQ(list[i].size(), v[i].size());
+ EXPECT_THAT(list[i], Pointwise(Eq(), v[i]));
+ }
+}
+
+TEST(ListOfLists, EmptyListOfListsWorks)
+{
+ ListOfLists<char> list;
+
+ EXPECT_EQ(list.size(), 0);
+ EXPECT_EQ(list.empty(), true);
+ EXPECT_EQ(list.numElements(), 0);
+}
+
+//! Checks whether append works and stores the data correctly
+template<typename T>
+void checkAppend(const std::vector<std::vector<T>> inputLists)
+{
+ ListOfLists<T> list;
+
+ for (const auto& inputList : inputLists)
+ {
+ list.pushBack(inputList);
+ }
+ EXPECT_EQ(list.size(), 2);
+ compareLists(list, inputLists);
+}
+
+TEST(ListOfLists, AppendWorks)
+{
+ const std::vector<std::vector<char>> v = { { 5, 3 }, { char(-1), 7, 4 } };
+
+ checkAppend(v);
+}
+
+TEST(ListOfLists, EmptyListWorks)
+{
+ ListOfLists<char> list;
+
+ std::vector<char> v = { 5, 3 };
+ list.pushBack(v);
+ list.pushBack({});
+ EXPECT_EQ(list.size(), 2);
+ auto a = list[1];
+ EXPECT_EQ(a.empty(), true);
+}
+
+TEST(ListOfLists, AppendAccessWorks)
+{
+ const std::vector<std::vector<char>> v = { { 5, 3 }, { char(-1), 4 } };
+
+ ListOfLists<char> list;
+ list.pushBack(v[0]);
+ list.pushBackListOfSize(v[1].size());
+ std::copy(v[1].begin(), v[1].end(), list.back().begin());
+ compareLists(list, v);
+}
+
+TEST(ListOfLists, ClearWorks)
+{
+ ListOfLists<char> list;
+
+ std::vector<char> v = { 5, 3 };
+ list.pushBack(v);
+ list.pushBack({});
+ list.clear();
+ EXPECT_EQ(list.empty(), true);
+ EXPECT_EQ(list.numElements(), 0);
+}
+
+TEST(ListOfLists, OutOfRangeAccessThrows)
+{
+ ListOfLists<char> list;
+
+ std::vector<char> v = { 5, 3 };
+ EXPECT_THROW(list.at(1), std::out_of_range);
+}
+
+TEST(ListOfLists, FrontAndBackWork)
+{
+ ListOfLists<char> list1;
+ std::vector<char> v1{ { 3, 4 } };
+ list1.pushBack(v1);
+ EXPECT_THAT(list1.front(), Pointwise(Eq(), v1));
+ EXPECT_THAT(list1.back(), Pointwise(Eq(), v1));
+
+ std::vector<char> v2{ { 12, 63, 1 } };
+ list1.pushBack(v2);
+ EXPECT_THAT(list1.front(), Pointwise(Eq(), v1));
+ EXPECT_THAT(list1.back(), Pointwise(Eq(), v2));
+
+ list1.pushBack({});
+ EXPECT_THAT(list1.front(), Pointwise(Eq(), v1));
+ EXPECT_THAT(list1.back(), Pointwise(Eq(), std::vector<char>{}));
+
+ std::vector<char> v3{ { 99, 0, char(-1) } };
+ list1.pushBack(v3);
+ EXPECT_THAT(list1.front(), Pointwise(Eq(), v1));
+ EXPECT_THAT(list1.back(), Pointwise(Eq(), v3));
+
+ ListOfLists<char> list2;
+ list2.pushBack(v2);
+ EXPECT_THAT(list2.front(), Pointwise(Eq(), v2));
+ EXPECT_THAT(list2.back(), Pointwise(Eq(), v2));
+
+ list2.appendListOfLists(list1);
+ EXPECT_THAT(list2.front(), Pointwise(Eq(), v2));
+ EXPECT_THAT(list2.back(), Pointwise(Eq(), v3));
+ EXPECT_EQ(list2.back().size(), v3.size());
+
+ list2.pushBackListOfSize(1);
+ EXPECT_EQ(list2.back().size(), 1);
+}
+
+TEST(ListOfLists, ExtractsAndRestores)
+{
+ const std::vector<std::vector<char>> v({ { 5, 3 }, {}, { char(-1), 4 } });
+
+ ListOfLists<char> list1;
+ for (const auto& vlist : v)
+ {
+ list1.pushBack(vlist);
+ }
+
+ auto listRanges = list1.listRangesView();
+ auto elements = list1.elementsView();
+ std::vector<int> listRangesVector;
+ listRangesVector.insert(listRangesVector.begin(), listRanges.begin(), listRanges.end());
+ std::vector<char> elementsVector;
+ elementsVector.insert(elementsVector.begin(), elements.begin(), elements.end());
+ ListOfLists<char> list2(std::move(listRangesVector), std::move(elementsVector));
+ compareLists(list2, v);
+}
+
+TEST(ListOfLists, AppendsListOfListsWithOffset)
+{
+ std::vector<std::vector<char>> v = { { 5, 3 }, { 2, char(-1) }, { 4 } };
+
+ ListOfLists<char> list1;
+ ListOfLists<char> list2;
+
+ list1.pushBack(v[0]);
+ list2.pushBack(v[1]);
+ list2.pushBack(v[2]);
+ const char offset = 2;
+ list1.appendListOfLists(list2, offset);
+ for (std::size_t i = 1; i < v.size(); i++)
+ {
+ for (auto& elem : v[i])
+ {
+ elem += offset;
+ }
+ }
+ compareLists(list1, v);
+}
+
+} // namespace
+
+} // namespace gmx
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+# Copyright (c) 2013,2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#
# This file is part of the GROMACS molecular simulation package.
#
-# Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+# Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.