# the research papers on the package. Check out http://www.gromacs.org.
cmake_minimum_required(VERSION 2.8.8)
+ # When we require cmake >= 2.8.12, it will provide
+ # CMAKE_MINIMUM_REQUIRED_VERSION automatically, but in the meantime we
+ # need to set a variable, and it must have a different name.
+ set(GMX_CMAKE_MINIMUM_REQUIRED_VERSION "2.8.8")
# CMake modules/macros are in a subdirectory to keep this file cleaner
# This needs to be set before project() in order to pick up toolchain files
# NOTE: when releasing the "-dev" suffix needs to be stripped off!
# REGRESSIONTEST_VERSION and REGRESSIONTEST_BRANCH should always be
# defined.
-set(PROJECT_VERSION "5.0.1-dev")
+set(PROJECT_VERSION "5.1-dev")
# If this is a released tarball, "-dev" will not be present in
# PROJECT_VERSION, and REGRESSIONTEST_VERSION specifies the version
# number of the regressiontest tarball against which the code tarball
# can be tested. This will be the version of the last patch release.
- set(REGRESSIONTEST_VERSION "5.0-rc1")
+ set(REGRESSIONTEST_VERSION "5.0.1-dev")
# If this is not a released tarball, "-dev" will be present in
# PROJECT_VERSION, and REGRESSIONTEST_BRANCH specifies the name of the
# gerrit.gromacs.org branch whose HEAD can test this code, *if* this
# code contains all recent fixes from the corresponding code branch.
-set(REGRESSIONTEST_BRANCH "refs/heads/release-5-0")
+set(REGRESSIONTEST_BRANCH "refs/heads/master")
set(CUSTOM_VERSION_STRING ""
CACHE STRING "Custom version string (if empty, use hard-coded default)")
set(LIBRARY_VERSION ${LIBRARY_SOVERSION}.0.0)
# It is a bit irritating, but this has to be set separately for now!
SET(CPACK_PACKAGE_VERSION_MAJOR "5")
-SET(CPACK_PACKAGE_VERSION_MINOR "0")
+SET(CPACK_PACKAGE_VERSION_MINOR "1")
#SET(CPACK_PACKAGE_VERSION_PATCH "0")
# The numerical gromacs version. It is 40600 for 4.6.0.
mark_as_advanced(GMX_INSTALL_PREFIX)
include(gmxBuildTypeReference)
+include(gmxBuildTypeProfile)
include(gmxBuildTypeTSAN)
include(gmxBuildTypeASAN)
include(gmxBuildTypeReleaseWithAssert)
if(NOT CMAKE_BUILD_TYPE)
- set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel Reference RelWithAssert." FORCE)
+ set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel Reference RelWithAssert Profile." FORCE)
# There's no need to offer a user the choice of ThreadSanitizer
# Set the possible values of build type for cmake-gui
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release"
- "MinSizeRel" "RelWithDebInfo" "Reference" "RelWithAssert")
+ "MinSizeRel" "RelWithDebInfo" "Reference" "RelWithAssert" "Profile")
endif()
if(CMAKE_CONFIGURATION_TYPES)
# Add appropriate GROMACS-specific build types for the Visual
"List of configuration types"
FORCE)
endif()
-set(build_types_with_explicit_flags RELEASE DEBUG RELWITHDEBUGINFO RELWITHASSERT MINSIZEREL)
+set(build_types_with_explicit_flags RELEASE DEBUG RELWITHDEBUGINFO RELWITHASSERT MINSIZEREL PROFILE)
enable_language(C)
enable_language(CXX)
mark_as_advanced(GMX_COOL_QUOTES)
gmx_add_cache_dependency(GMX_COOL_QUOTES BOOL "NOT GMX_FAHCORE" OFF)
- # decide on GPU settings based on user-settings and GPU/CUDA detection
+ # Decide on GPU settings based on user-settings and GPU/CUDA detection.
+ # We support CUDA >=v4.0 on *nix, but <= v4.1 doesn't work with MSVC
+ if(MSVC)
+ set(REQUIRED_CUDA_VERSION 4.1)
+ else()
+ set(REQUIRED_CUDA_VERSION 4.0)
+ endif()
+ set(REQUIRED_CUDA_COMPUTE_CAPABILITY 2.0)
include(gmxManageGPU)
# Detect the architecture the compiler is targetting, detect
################################################################
# Shared library settings
################################################################
- if((NOT CMAKE_SYSTEM_NAME STREQUAL "Darwin") OR ((CMAKE_SYSTEM_VERSION VERSION_GREATER 8.0) AND (CMAKE_VERSION VERSION_GREATER 2.8.11)))
+ if((NOT CMAKE_SYSTEM_NAME STREQUAL "Darwin") OR ((CMAKE_SYSTEM_VERSION VERSION_GREATER 8.0) AND (NOT CMAKE_VERSION VERSION_LESS 2.8.12)))
if(GMX_LIB_INSTALL_DIR STREQUAL "lib")
set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE)
endif()
endif()
endif()
- if(GMX_USE_TNG)
- find_package(ZLIB QUIET)
- include(gmxTestZLib)
- gmx_test_zlib(HAVE_ZLIB)
- set(TNG_BUILD_WITH_ZLIB ${HAVE_ZLIB} CACHE BOOL "Build TNG with zlib compression")
- set(TNG_BUILD_FORTRAN OFF CACHE BOOL "Build Fortran compatible TNG library and examples for testing")
- set(TNG_BUILD_EXAMPLES OFF CACHE BOOL "Build examples showing usage of the TNG API")
- set(TNG_BUILD_COMPRESSION_TESTS OFF CACHE BOOL "Build tests of the TNG compression library")
- set(TNG_BUILD_DOCUMENTATION OFF CACHE BOOL "Use Doxygen to create the HTML based TNG API documentation")
- set(TNG_BUILD_TEST OFF CACHE BOOL "Build TNG testing binary.")
- add_subdirectory(${CMAKE_SOURCE_DIR}/src/external/tng_io)
- set(GMX_TNG_LIBRARIES tng_io)
- endif()
- mark_as_advanced(TNG_BUILD_FORTRAN)
- mark_as_advanced(TNG_BUILD_EXAMPLES)
- mark_as_advanced(TNG_BUILD_COMPRESSION_TESTS)
- mark_as_advanced(TNG_BUILD_DOCUMENTATION)
- mark_as_advanced(TNG_BUILD_TEST)
- mark_as_advanced(TNG_EXAMPLE_FILES_DIR)
-
if (GMX_BUILD_FOR_COVERAGE)
# Code heavy with asserts makes conditional coverage close to useless metric,
# as by design most of the false branches are impossible to trigger in
if (NOT GMX_BUILD_MDRUN_ONLY)
add_subdirectory(doxygen)
+ add_subdirectory(install-guide)
add_subdirectory(share)
add_subdirectory(scripts)
endif()
1999-05-03 lpd Original version.
*/
- #include "../../include/md5.h"
+ #include "tng/md5.h"
#include <string.h>
#undef BYTE_ORDER /* 1 = big-endian, -1 = little-endian, 0 = unknown */
#else
/* Define storage for little-endian or both types of CPUs. */
md5_word_t xbuf[16];
- /* cppcheck-suppress unassignedVariable */
const md5_word_t *X;
#endif
endif()
endfunction ()
+ if(GMX_USE_TNG)
+ option(GMX_EXTERNAL_TNG "Use external TNG instead of compiling the version shipped with GROMACS."
+ OFF)
+ # Detect TNG if GMX_EXTERNAL_TNG is explicitly ON
+ if(GMX_EXTERNAL_TNG)
+ find_package(TNG_IO 1.6.0)
+ if(NOT TNG_IO_FOUND)
+ message(FATAL_ERROR
+ "TNG >= 1.6.0 not found. "
+ "You can set GMX_EXTERNAL_TNG=OFF to compile TNG.")
+ endif()
+ include_directories(${TNG_IO_INCLUDE_DIRS})
+ endif()
+ if(NOT GMX_EXTERNAL_TNG)
+ # TNG wants zlib if it is available
+ find_package(ZLIB QUIET)
+ include(gmxTestZLib)
+ gmx_test_zlib(HAVE_ZLIB)
+
+ include(${CMAKE_SOURCE_DIR}/src/external/tng_io/BuildTNG.cmake)
+ tng_get_source_list(TNG_SOURCES TNG_IO_DEFINITIONS)
+ list(APPEND LIBGROMACS_SOURCES ${TNG_SOURCES})
+ tng_set_source_properties(WITH_ZLIB ${HAVE_ZLIB})
+
+ if (HAVE_ZLIB)
+ list(APPEND GMX_EXTRA_LIBRARIES ${ZLIB_LIBRARIES})
+ endif()
+ endif()
+ else()
+ # We still need to get tng/tng_io_fwd.h from somewhere!
+ include_directories(${CMAKE_SOURCE_DIR}/src/external/tng_io/include)
+ endif()
+
add_subdirectory(gmxlib)
add_subdirectory(mdlib)
add_subdirectory(gmxpreprocess)
add_subdirectory(random)
add_subdirectory(onlinehelp)
add_subdirectory(options)
+add_subdirectory(pbcutil)
add_subdirectory(timing)
+add_subdirectory(topology)
add_subdirectory(utility)
add_subdirectory(fileio)
add_subdirectory(swap)
${EXTRAE_LIBRARIES}
${GMX_GPU_LIBRARIES}
${GMX_EXTRA_LIBRARIES}
- ${GMX_TNG_LIBRARIES}
+ ${TNG_IO_LIBRARIES}
${FFT_LIBRARIES} ${LINEAR_ALGEBRA_LIBRARIES}
${XML_LIBRARIES}
${THREAD_LIB} ${GMX_SHARED_LINKER_FLAGS})
#include "names.h"
#include "gromacs/fileio/confio.h"
#include "txtdump.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "nrnb.h"
-#include "mshift.h"
#include "mdrun.h"
#include "update.h"
-#include "physics.h"
-#include "mtop_util.h"
+#include "gromacs/topology/mtop_util.h"
#include "gromacs/essentialdynamics/edsam.h"
#include "gromacs/fileio/gmxfio.h"
-#include "xvgr.h"
+#include "gromacs/fileio/xvgr.h"
#include "gromacs/mdlib/groupcoord.h"
#include "gromacs/linearalgebra/nrjac.h"
-#include "gromacs/legacyheaders/gmx_fatal.h"
+#include "gromacs/pbcutil/pbc.h"
+#include "gromacs/utility/fatalerror.h"
/* We use the same defines as in mvdata.c here */
#define block_bc(cr, d) gmx_bcast( sizeof(d), &(d), (cr))
char fn[STRLEN];
- sprintf(fn, "EDdump_node%d_edi%d", cr->nodeid, nr_edi);
+ sprintf(fn, "EDdump_rank%d_edi%d", cr->nodeid, nr_edi);
out = gmx_ffopen(fn, "w");
fprintf(out, "#NINI\n %d\n#FITMAS\n %d\n#ANALYSIS_MAS\n %d\n",
/* Keep the curr_edi pointer for the case that the next group is empty: */
last_edi = curr_edi;
/* Let's prepare to read in the next edi data set: */
+ /* cppcheck-suppress uninitvar Fixed in cppcheck 1.65 */
curr_edi = edi_read;
}
if (edi_nr == 0)
FILE* debug = 0;
#endif
-#include "gmx_fatal.h"
+#include "gromacs/utility/fatalerror.h"
#ifdef GMX_FFT_FFTW3
if (debug)
{
- fprintf(debug, "FFT5D: Using %dx%d processor grid, rank %d,%d\n",
+ fprintf(debug, "FFT5D: Using %dx%d rank grid, rank %d,%d\n",
P[0], P[1], prank[0], prank[1]);
}
{
if (prank == 0)
{
- printf("FFT5D: WARNING: Number of processors %d not evenly dividable by %d\n", size, P0);
+ printf("FFT5D: WARNING: Number of ranks %d not evenly divisible by %d\n", size, P0);
}
P0 = lfactor(size);
}
confio.h
enxio.h
filenm.h
- futil.h
gmxfio.h
matio.h
mdoutf.h
+ mtxio.h
pdbio.h
tpxio.h
trajectory_writing.h
trxio.h
xdr_datatype.h
xtcio.h
+ xvgr.h
)
gmx_install_headers(fileio ${FILEIO_PUBLIC_HEADERS})
- # These files include tng_io.h from the TNG library. That header needs to know
- # whether inttypes.h exists or not.
- include(CheckIncludeFiles)
- check_include_file(inttypes.h HAVE_INTTYPES_H)
- if(HAVE_INTTYPES_H)
- set_property(SOURCE tngio.cpp APPEND PROPERTY COMPILE_DEFINITIONS USE_STD_INTTYPES_H)
- set_property(SOURCE tngio_for_tools.cpp APPEND PROPERTY COMPILE_DEFINITIONS USE_STD_INTTYPES_H)
+ if (GMX_USE_TNG AND TNG_IO_DEFINITIONS)
+ set_property(SOURCE tngio.cpp
+ APPEND PROPERTY COMPILE_DEFINITIONS ${TNG_IO_DEFINITIONS})
+ set_property(SOURCE tngio_for_tools.cpp
+ APPEND PROPERTY COMPILE_DEFINITIONS ${TNG_IO_DEFINITIONS})
endif()
if (BUILD_TESTING)
#include "thread_mpi/threads.h"
-#include "gmx_fatal.h"
+#include "gromacs/utility/fatalerror.h"
#include "macros.h"
#include "gromacs/utility/smalloc.h"
-#include "futil.h"
+#include "gromacs/utility/futil.h"
#include "filenm.h"
#include "gromacs/utility/cstringutil.h"
#include "gmxfio.h"
if (!ret)
{
- md5_init(&state);
- md5_append(&state, buf, read_len);
- md5_finish(&state, digest);
+ gmx_md5_init(&state);
+ gmx_md5_append(&state, buf, read_len);
+ gmx_md5_finish(&state, digest);
ret = read_len;
}
sfree(buf);
* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
+ /* This software has been altered by GROMACS for its use, including
+ * the use of GMX_INTEGER_BIG_ENDIAN, and the renaming of the
+ * functions md5_init, md5_append and md5_finish to have a gmx_ prefix
+ * (to avoid name clashes). */
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#else
/* Define storage for little-endian or both types of CPUs. */
md5_word_t xbuf[16];
- /* cppcheck-suppress unassignedVariable */
const md5_word_t *X;
#endif
}
void
- md5_init(md5_state_t *pms)
+ gmx_md5_init(md5_state_t *pms)
{
pms->count[0] = pms->count[1] = 0;
pms->abcd[0] = 0x67452301;
}
void
- md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes)
+ gmx_md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes)
{
const md5_byte_t *p = data;
int left = nbytes;
}
void
- md5_finish(md5_state_t *pms, md5_byte_t digest[16])
+ gmx_md5_finish(md5_state_t *pms, md5_byte_t digest[16])
{
static const md5_byte_t pad[64] = {
0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
data[i] = (md5_byte_t)(pms->count[i >> 2] >> ((i & 3) << 3));
}
/* Pad to 56 bytes mod 64. */
- md5_append(pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1);
+ gmx_md5_append(pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1);
/* Append the length. */
- md5_append(pms, data, 8);
+ gmx_md5_append(pms, data, 8);
for (i = 0; i < 16; ++i)
{
digest[i] = (md5_byte_t)(pms->abcd[i >> 2] >> ((i & 3) << 3));
#endif
#ifdef GMX_USE_TNG
- #include "../../external/tng_io/include/tng_io.h"
+ #include "tng/tng_io.h"
#endif
#include "gromacs/legacyheaders/copyrite.h"
-#include "gromacs/legacyheaders/gmx_fatal.h"
-#include "gromacs/legacyheaders/main.h"
-#include "gromacs/legacyheaders/physics.h"
+#include "gromacs/legacyheaders/types/ifunc.h"
+
+#include "gromacs/fileio/gmxfio.h"
+#include "gromacs/math/units.h"
#include "gromacs/math/utilities.h"
+#include "gromacs/topology/topology.h"
+#include "gromacs/utility/basenetwork.h"
#include "gromacs/utility/common.h"
+#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxassert.h"
#include "gromacs/utility/programcontext.h"
-#include "gmxfio.h"
static const char *modeToVerb(char mode)
{
#ifndef GMX_FILEIO_TNGIO_H
#define GMX_FILEIO_TNGIO_H
- #include "external/tng_io/include/tng_io_fwd.h"
-#include "gromacs/legacyheaders/typedefs.h"
+ #include "tng/tng_io_fwd.h"
+#include "gromacs/legacyheaders/types/inputrec.h"
+#include "gromacs/math/vectypes.h"
+#include "gromacs/utility/real.h"
+
#ifdef __cplusplus
extern "C" {
#endif
}
#endif
+struct gmx_mtop_t;
+
/*! \brief Open a TNG trajectory file
*
* \param filename Name of file to open
* \param tng Valid handle to a TNG trajectory
* \param mtop Pointer to a topology (can be NULL)
*/
-void gmx_tng_add_mtop(tng_trajectory_t tng,
- const gmx_mtop_t *mtop);
+void gmx_tng_add_mtop(tng_trajectory_t tng,
+ const struct gmx_mtop_t *mtop);
/*! \brief Do all TNG preparation for full-precision whole-system
* trajectory writing during MD simulations.
* \param mtop Global topology
* \param ir Input settings (for writing frequencies)
*/
-void gmx_tng_prepare_md_writing(tng_trajectory_t tng,
- const gmx_mtop_t *mtop,
- const t_inputrec *ir);
+void gmx_tng_prepare_md_writing(tng_trajectory_t tng,
+ const struct gmx_mtop_t *mtop,
+ const t_inputrec *ir);
/*! \brief Set the default compression precision for TNG writing
*
* \param mtop Global topology
* \param ir Input settings (for writing frequencies)
*/
-void gmx_tng_prepare_low_prec_writing(tng_trajectory_t tng,
- const gmx_mtop_t *mtop,
- const t_inputrec *ir);
+void gmx_tng_prepare_low_prec_writing(tng_trajectory_t tng,
+ const struct gmx_mtop_t *mtop,
+ const t_inputrec *ir);
/*! \brief Write a frame to a TNG file
*
#include "trx.h"
#ifdef GMX_USE_TNG
- #include "../../external/tng_io/include/tng_io.h"
+ #include "tng/tng_io.h"
#endif
-#include "gromacs/legacyheaders/types/atoms.h"
-#include "gromacs/legacyheaders/physics.h"
-#include "gromacs/legacyheaders/gmx_fatal.h"
-
+#include "gromacs/math/units.h"
#include "gromacs/utility/common.h"
+#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/smalloc.h"
void gmx_prepare_tng_writing(const char *filename,
GMX_UNUSED_VALUE(input);
GMX_UNUSED_VALUE(output);
GMX_UNUSED_VALUE(nAtoms);
+ GMX_UNUSED_VALUE(mtop);
+ GMX_UNUSED_VALUE(index);
+ GMX_UNUSED_VALUE(indexGroupName);
#endif
}
#else
GMX_UNUSED_VALUE(output);
GMX_UNUSED_VALUE(frame);
+ GMX_UNUSED_VALUE(natoms);
#endif
}
void *values = NULL;
double frameTime = -1.0;
int size, blockDependency;
- float prec;
+ double prec;
const int defaultNumIds = 5;
static gmx_int64_t fallbackRequestedIds[defaultNumIds] =
{
size = sizeof(double);
break;
default:
- size = 0; /* Just to make the compiler happy. */
gmx_incons("Illegal datatype of box shape values!");
}
for (int i = 0; i < DIM; i++)
GMX_UNUSED_VALUE(input);
GMX_UNUSED_VALUE(fr);
GMX_UNUSED_VALUE(requestedIds);
+ GMX_UNUSED_VALUE(numRequestedIds);
return FALSE;
#endif
}
gmx_int64_t codecId;
int blockDependency;
void *data = 0;
- float localPrec;
+ double localPrec;
stat = tng_data_block_name_get(input, blockId, name, maxLen);
if (stat != TNG_SUCCESS)
* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
-
#ifndef GMX_FILEIO_TNGIO_FOR_TOOLS_H
#define GMX_FILEIO_TNGIO_FOR_TOOLS_H
-#include "gromacs/legacyheaders/typedefs.h"
+#include <stdio.h>
+
- #include "external/tng_io/include/tng_io_fwd.h"
+ #include "tng/tng_io_fwd.h"
+#include "gromacs/legacyheaders/types/simple.h"
+#include "gromacs/utility/basedefinitions.h"
+#include "gromacs/utility/real.h"
+
#ifdef __cplusplus
extern "C" {
#endif
}
#endif
+struct gmx_mtop_t;
+struct t_trxframe;
+
/*! \brief Prepare to write TNG output from trajectory conversion tools */
void gmx_prepare_tng_writing(const char *filename,
char mode,
tng_trajectory_t *in,
tng_trajectory_t *out,
int nAtoms,
- const gmx_mtop_t *mtop,
+ const struct gmx_mtop_t *mtop,
const atom_id *index,
const char *indexGroupName);
* atoms.
*/
void gmx_write_tng_from_trxframe(tng_trajectory_t output,
- t_trxframe *frame,
+ struct t_trxframe *frame,
int natoms);
/*! \brief Creates a molecule containing only the indexed atoms and sets
/*! \brief Read the first/next TNG frame. */
gmx_bool gmx_read_next_tng_frame(tng_trajectory_t input,
- t_trxframe *fr,
+ struct t_trxframe *fr,
gmx_int64_t *requestedIds,
int numRequestedIds);
/* This file is completely threadsafe - keep it that way! */
+#include <stdlib.h>
#include <string.h>
-#include "sysstuff.h"
-#include "gromacs/utility/smalloc.h"
-#include "gromacs/utility/cstringutil.h"
-#include "gmx_fatal.h"
#include "macros.h"
#include "names.h"
-#include "symtab.h"
-#include "futil.h"
+#include "gromacs/utility/futil.h"
#include "filenm.h"
#include "gmxfio.h"
#include "tpxio.h"
#include "txtdump.h"
#include "confio.h"
-#include "atomprop.h"
#include "copyrite.h"
-#include "vec.h"
-#include "mtop_util.h"
+
+#include "gromacs/math/vec.h"
+#include "gromacs/topology/atomprop.h"
+#include "gromacs/topology/block.h"
+#include "gromacs/topology/mtop_util.h"
+#include "gromacs/topology/symtab.h"
+#include "gromacs/topology/topology.h"
+#include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/smalloc.h"
#define TPX_TAG_RELEASE "release"
}
if (file_version >= 79)
{
- gmx_fio_do_int(fio, fepvals->bPrintEnergy);
+ gmx_fio_do_int(fio, fepvals->edHdLPrintEnergy);
}
else
{
- fepvals->bPrintEnergy = FALSE;
+ fepvals->edHdLPrintEnergy = edHdLPrintEnergyNO;
}
/* handle lambda_neighbors */
#include <config.h>
#endif
+#include <assert.h>
+#include <stdlib.h>
+
#include "gromacs/commandline/pargs.h"
#include "typedefs.h"
#include "gromacs/utility/smalloc.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "gromacs/fileio/tpxio.h"
#include "gromacs/fileio/trxio.h"
-#include "xvgr.h"
-#include "rmpbc.h"
-#include "pbc.h"
-#include "physics.h"
-#include "index.h"
+#include "gromacs/fileio/xvgr.h"
+#include "gromacs/pbcutil/rmpbc.h"
+#include "gromacs/pbcutil/pbc.h"
+#include "gromacs/math/units.h"
+#include "gromacs/topology/index.h"
#include "gromacs/statistics/statistics.h"
#include "gmx_ana.h"
#include "macros.h"
-#include "gromacs/legacyheaders/gmx_fatal.h"
+#include "gromacs/utility/fatalerror.h"
#define SQR(x) (pow(x, 2.0))
#define EPSI0 (EPSILON0*E_CHARGE*E_CHARGE*AVOGADRO/(KILO*NANO)) /* EPSILON0 in SI units */
xshfr[i] = 0.0;
}
}
-
+ assert(time != NULL);
if (nfr == 0)
- if (bACF)
+ if (bACF && (ii < nvfr))
{
fprintf(stderr, "Integral and integrated fit to the current acf yields at t=%f:\n", time[vfr[ii]]);
fprintf(stderr, "sigma=%8.3f (pure integral: %.3f)\n", sgk-malt*pow(time[vfr[ii]], sigma), sgk);
}
else
{
- fprintf(stderr, "Too less points for a fit.\n");
+ fprintf(stderr, "Too few points for a fit.\n");
}
#include "gromacs/commandline/pargs.h"
#include "copyrite.h"
-#include "sysstuff.h"
#include "txtdump.h"
-#include "physics.h"
+#include "gromacs/math/units.h"
#include "macros.h"
-#include "gmx_fatal.h"
-#include "index.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/topology/index.h"
#include "gromacs/utility/smalloc.h"
-#include "vec.h"
-#include "xvgr.h"
+#include "gromacs/math/vec.h"
+#include "gromacs/fileio/xvgr.h"
+#include "viewit.h"
#include "gstat.h"
#include "gromacs/utility/cstringutil.h"
-#include "pbc.h"
+#include "gromacs/pbcutil/pbc.h"
#include "correl.h"
#include "gmx_ana.h"
#include "geminate.h"
-#include "gromacs/fileio/futil.h"
+#include "gromacs/utility/futil.h"
#include "gromacs/fileio/matio.h"
#include "gromacs/fileio/tpxio.h"
#include "gromacs/fileio/trxio.h"
{
int i;
- if (ISDON(datable[id]) || !datable)
+ if (!datable || ISDON(datable[id]))
{
if (ddd->dptr[id] == NOTSET) /* New donor */
{
static void normalizeACF(real *ct, real *gt, int nhb, int len)
{
- real ct_fac, gt_fac;
+ real ct_fac, gt_fac = 0;
int i;
/* Xu and Berne use the same normalization constant */
ct_fac = 1.0/ct[0];
- gt_fac = (nhb == 0) ? 0 : 1.0/(real)nhb;
+ if (nhb != 0)
+ {
+ gt_fac = 1.0/(real)nhb;
+ }
printf("Normalization for c(t) = %g for gh(t) = %g\n", ct_fac, gt_fac);
for (i = 0; i < len; i++)
"HIDDENDffusion coefficient to use in the reversible geminate recombination kinetic model. If negative, then it will be fitted to the ACF along with ka and kd."},
#ifdef GMX_OPENMP
{ "-nthreads", FALSE, etINT, {&nThreads},
- "Number of threads used for the parallel loop over autocorrelations. nThreads <= 0 means maximum number of threads. Requires linking with OpenMP. The number of threads is limited by the number of processors (before OpenMP v.3 ) or environment variable OMP_THREAD_LIMIT (OpenMP v.3)"},
+ "Number of threads used for the parallel loop over autocorrelations. nThreads <= 0 means maximum number of threads. Requires linking with OpenMP. The number of threads is limited by the number of cores (before OpenMP v.3 ) or environment variable OMP_THREAD_LIMIT (OpenMP v.3)"},
#endif
};
const char *bugs[] = {
{
t_matrix mat;
int id, ia, hh, x, y;
+ mat.flags = mat.y0 = 0;
if ((nframes > 0) && (hb->nrhb > 0))
{
#include <string.h>
#include "readinp.h"
#include "gromacs/commandline/pargs.h"
-#include "sysstuff.h"
#include "typedefs.h"
#include "gromacs/utility/smalloc.h"
#include "macros.h"
-#include "gmx_fatal.h"
-#include "vec.h"
-#include "pbc.h"
-#include "gromacs/fileio/futil.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/math/vec.h"
+#include "gromacs/utility/futil.h"
#include "gromacs/fileio/pdbio.h"
#include "gromacs/fileio/confio.h"
#include "gromacs/fileio/tpxio.h"
-#include "gromacs/fileio/matio.h"
-#include "mshift.h"
-#include "xvgr.h"
-#include "rmpbc.h"
+#include "gromacs/fileio/xvgr.h"
#include "txtdump.h"
#include "eigio.h"
-#include "index.h"
+#include "gromacs/topology/index.h"
#include "gromacs/utility/cstringutil.h"
typedef struct
"(collective coordinates etc.), at least on the 'protein' side, ED sampling",
"is not very parallel-friendly from an implementation point of view. Because",
"parallel ED requires some extra communication, expect the performance to be",
- "lower as in a free MD simulation, especially on a large number of nodes and/or",
+ "lower as in a free MD simulation, especially on a large number of ranks and/or",
"when the ED group contains a lot of atoms. [PAR]",
"Please also note that if your ED group contains more than a single protein,",
"then the [TT].tpr[tt] file must contain the correct PBC representation of the ED group.",
printf("\n");
}
- EigvecFile = NULL;
EigvecFile = opt2fn("-f", NFILE, fnm);
/*read eigenvectors from eigvec.trr*/
#include "typedefs.h"
#include "types/commrec.h"
#include "gromacs/utility/smalloc.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "copyrite.h"
#include "gromacs/fileio/tpxio.h"
#include "readinp.h"
#include "checkpoint.h"
#include "gmx_ana.h"
#include "gromacs/random/random.h"
-#include "physics.h"
+#include "gromacs/math/units.h"
#include "mdatoms.h"
#include "coulomb.h"
-#include "mtop_util.h"
+#include "gromacs/topology/mtop_util.h"
#include "network.h"
#include "main.h"
#include "macros.h"
-#include "gromacs/legacyheaders/gmx_fatal.h"
+#include "gromacs/utility/fatalerror.h"
/* We use the same defines as in mvdata.c here */
#define block_bc(cr, d) gmx_bcast( sizeof(d), &(d), (cr))
xtot, xtot == 1 ? "" : "s");
if (PAR(cr))
{
- fprintf(stdout, " (%d sample%s per node)", x_per_core, x_per_core == 1 ? "" : "s");
+ fprintf(stdout, " (%d sample%s per rank)", x_per_core, x_per_core == 1 ? "" : "s");
}
fprintf(stdout, ".\n");
}
#ifdef DEBUG
if (PAR(cr))
{
- fprintf(stderr, "Node %3d: nx=[%3d...%3d] e_rec3=%e\n",
+ fprintf(stderr, "Rank %3d: nx=[%3d...%3d] e_rec3=%e\n",
cr->nodeid, startlocal, stoplocal, e_rec3);
}
#endif
#include <config.h>
#endif
+#include <stdlib.h>
#include <time.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#include "typedefs.h"
#include "types/commrec.h"
#include "gromacs/utility/smalloc.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "copyrite.h"
#include "gromacs/fileio/tpxio.h"
#include "gromacs/utility/cstringutil.h"
#include "gromacs/timing/walltime_accounting.h"
#include "gromacs/math/utilities.h"
-#include "gmx_fatal.h"
+#include "gromacs/utility/fatalerror.h"
/* Enum for situations that can occur during log file parsing, the
* corresponding string entries can be found in do_the_tests() in
}
-static gmx_bool is_equal(real a, real b)
-{
- real diff, eps = 1.0e-7;
-
-
- diff = a - b;
-
- if (diff < 0.0)
- {
- diff = -diff;
- }
-
- if (diff < eps)
- {
- return TRUE;
- }
- else
- {
- return FALSE;
- }
-}
-
-
static void remove_if_exists(const char *fn)
{
if (gmx_fexist(fn))
/* Look for domain decomp grid and separate PME nodes: */
if (str_starts(line, matchstrdd))
{
- sscanf(line, "Domain decomposition grid %d x %d x %d, separate PME nodes %d",
+ sscanf(line, "Domain decomposition grid %d x %d x %d, separate PME ranks %d",
&(perfdata->nx), &(perfdata->ny), &(perfdata->nz), &npme);
if (perfdata->nPMEnodes == -1)
{
}
else if (perfdata->nPMEnodes != npme)
{
- gmx_fatal(FARGS, "PME nodes from command line and output file are not identical");
+ gmx_fatal(FARGS, "PME ranks from command line and output file are not identical");
}
iFound = eFoundDDStr;
}
fclose(fp);
return eParselogNoDDGrid;
}
- else if (str_starts(line, "The number of nodes you selected"))
+ else if (str_starts(line, "The number of ranks you selected"))
{
fclose(fp);
return eParselogLargePrimeFactor;
{
sep_line(fp);
fprintf(fp, "Summary of successful runs:\n");
- fprintf(fp, "Line tpr PME nodes Gcycles Av. Std.dev. ns/day PME/f");
+ fprintf(fp, "Line tpr PME ranks Gcycles Av. Std.dev. ns/day PME/f");
if (nnodes > 1)
{
fprintf(fp, " DD grid");
/* We have optimized the number of PME-only nodes */
if (winPME == -1)
{
- sprintf(strbuf, "%s", "the automatic number of PME nodes");
+ sprintf(strbuf, "%s", "the automatic number of PME ranks");
}
else
{
- sprintf(strbuf, "%d PME nodes", winPME);
+ sprintf(strbuf, "%d PME ranks", winPME);
}
}
fprintf(fp, "Best performance was achieved with %s", strbuf);
fprintf(fp, "\n");
/* Only mention settings if they were modified: */
- bRefinedCoul = !is_equal(info->rcoulomb[k_win], info->rcoulomb[0]);
- bRefinedVdW = !is_equal(info->rvdw[k_win], info->rvdw[0] );
+ bRefinedCoul = !gmx_within_tol(info->rcoulomb[k_win], info->rcoulomb[0], GMX_REAL_EPS);
+ bRefinedVdW = !gmx_within_tol(info->rvdw[k_win], info->rvdw[0], GMX_REAL_EPS);
bRefinedGrid = !(info->nkx[k_win] == info->nkx[0] &&
info->nky[k_win] == info->nky[0] &&
info->nkz[k_win] == info->nkz[0]);
{
/* Determine which Coulomb radii rc to use in the benchmarks */
add = (rmax-rmin)/(*ntprs-1);
- if (is_equal(rmin, info->rcoulomb[0]))
+ if (gmx_within_tol(rmin, info->rcoulomb[0], GMX_REAL_EPS))
{
ir->rcoulomb = rmin + j*add;
}
- else if (is_equal(rmax, info->rcoulomb[0]))
+ else if (gmx_within_tol(rmax, info->rcoulomb[0], GMX_REAL_EPS))
{
ir->rcoulomb = rmin + (j-1)*add;
}
fprintf(fp, " %-14s\n", fn_bench_tprs[j]);
/* Make it clear to the user that some additional settings were modified */
- if (!is_equal(ir->rvdw, info->rvdw[0])
- || !is_equal(ir->rlistlong, info->rlistlong[0]) )
+ if (!gmx_within_tol(ir->rvdw, info->rvdw[0], GMX_REAL_EPS)
+ || !gmx_within_tol(ir->rlistlong, info->rlistlong[0], GMX_REAL_EPS) )
{
bNote = TRUE;
}
"No DD grid found for these settings.",
"TPX version conflict!",
"mdrun was not started in parallel!",
- "Number of PP nodes has a prime factor that is too large.",
+ "Number of PP ranks has a prime factor that is too large.",
"An error occured."
};
char str_PME_f_load[13];
*pmeentries = 1;
snew(nPMEnodes, 1);
nPMEnodes[0] = npme_fixed;
- fprintf(stderr, "Will use a fixed number of %d PME-only nodes.\n", nPMEnodes[0]);
+ fprintf(stderr, "Will use a fixed number of %d PME-only ranks.\n", nPMEnodes[0]);
}
if (0 == repeats)
for (k = 0; k < nr_tprs; k++)
{
fprintf(fp, "\nIndividual timings for input file %d (%s):\n", k, tpr_names[k]);
- fprintf(fp, "PME nodes Gcycles ns/day PME/f Remark\n");
+ fprintf(fp, "PME ranks Gcycles ns/day PME/f Remark\n");
/* Loop over various numbers of PME nodes: */
for (i = 0; i < *pmeentries; i++)
{
/* Check number of nodes */
if (nnodes < 1)
{
- gmx_fatal(FARGS, "Number of nodes/threads must be a positive integer.");
+ gmx_fatal(FARGS, "Number of ranks/threads must be a positive integer.");
}
/* Automatically choose -ntpr if not set */
/* Add test scenarios if rmin or rmax were set */
if (*ntprs <= 2)
{
- if (!is_equal(*rmin, rcoulomb) && (*ntprs == 1) )
+ if (!gmx_within_tol(*rmin, rcoulomb, GMX_REAL_EPS) && (*ntprs == 1) )
{
(*ntprs)++;
fprintf(stderr, "NOTE: Setting -rmin to %g changed -ntpr to %d\n",
*rmin, *ntprs);
}
- if (!is_equal(*rmax, rcoulomb) && (*ntprs == 1) )
+ if (!gmx_within_tol(*rmax, rcoulomb, GMX_REAL_EPS) && (*ntprs == 1) )
{
(*ntprs)++;
fprintf(stderr, "NOTE: Setting -rmax to %g changed -ntpr to %d\n",
}
old = *ntprs;
/* If one of rmin, rmax is set, we need 2 tpr files at minimum */
- if (!is_equal(*rmax, rcoulomb) || !is_equal(*rmin, rcoulomb) )
+ if (!gmx_within_tol(*rmax, rcoulomb, GMX_REAL_EPS) || !gmx_within_tol(*rmin, rcoulomb, GMX_REAL_EPS) )
{
*ntprs = max(*ntprs, 2);
}
/* If both rmin, rmax are set, we need 3 tpr files at minimum */
- if (!is_equal(*rmax, rcoulomb) && !is_equal(*rmin, rcoulomb) )
+ if (!gmx_within_tol(*rmax, rcoulomb, GMX_REAL_EPS) && !gmx_within_tol(*rmin, rcoulomb, GMX_REAL_EPS) )
{
*ntprs = max(*ntprs, 3);
}
if (*ntprs > 1)
{
- if (is_equal(*rmin, rcoulomb) && is_equal(rcoulomb, *rmax)) /* We have just a single rc */
+ if (gmx_within_tol(*rmin, rcoulomb, GMX_REAL_EPS) && gmx_within_tol(rcoulomb, *rmax, GMX_REAL_EPS)) /* We have just a single rc */
{
fprintf(stderr, "WARNING: Resetting -ntpr to 1 since no Coulomb radius scaling is requested.\n"
"Please set rmin < rmax to test Coulomb radii in the [rmin, rmax] interval\n"
/* No more than 50% of all nodes can be assigned as PME-only nodes. */
if (2*npme_fixed > nnodes)
{
- gmx_fatal(FARGS, "Cannot have more than %d PME-only nodes for a total of %d nodes (you chose %d).\n",
+ gmx_fatal(FARGS, "Cannot have more than %d PME-only ranks for a total of %d ranks (you chose %d).\n",
nnodes/2, nnodes, npme_fixed);
}
if ((npme_fixed > 0) && (5*npme_fixed < nnodes))
{
- fprintf(stderr, "WARNING: Only %g percent of the nodes are assigned as PME-only nodes.\n",
+ fprintf(stderr, "WARNING: Only %g percent of the ranks are assigned as PME-only ranks.\n",
100.0*((real)npme_fixed / (real)nnodes));
}
if (opt2parg_bSet("-min", npargs, pa) || opt2parg_bSet("-max", npargs, pa))
{
fprintf(stderr, "NOTE: The -min, -max, and -npme options have no effect when a\n"
- " fixed number of PME-only nodes is requested with -fix.\n");
+ " fixed number of PME-only ranks is requested with -fix.\n");
}
}
}
int gmx_tune_pme(int argc, char *argv[])
{
const char *desc[] = {
- "For a given number [TT]-np[tt] or [TT]-ntmpi[tt] of processors/threads, [THISMODULE] systematically",
- "times [gmx-mdrun] with various numbers of PME-only nodes and determines",
+ "For a given number [TT]-np[tt] or [TT]-ntmpi[tt] of ranks, [THISMODULE] systematically",
+ "times [gmx-mdrun] with various numbers of PME-only ranks and determines",
"which setting is fastest. It will also test whether performance can",
"be enhanced by shifting load from the reciprocal to the real space",
"part of the Ewald sum. ",
"via the MPIRUN variable, e.g.[PAR]",
"[TT]export MPIRUN=\"/usr/local/mpirun -machinefile hosts\"[tt][PAR]",
"Please call [THISMODULE] with the normal options you would pass to",
- "[gmx-mdrun] and add [TT]-np[tt] for the number of processors to perform the",
+ "[gmx-mdrun] and add [TT]-np[tt] for the number of ranks to perform the",
"tests on, or [TT]-ntmpi[tt] for the number of threads. You can also add [TT]-r[tt]",
"to repeat each test several times to get better statistics. [PAR]",
"[THISMODULE] can test various real space / reciprocal space workloads",
"In this last test, the Fourier spacing is multiplied with [TT]rmax[tt]/rcoulomb. ",
"The remaining [TT].tpr[tt] files will have equally-spaced Coulomb radii (and Fourier "
"spacings) between these extremes. [BB]Note[bb] that you can set [TT]-ntpr[tt] to 1",
- "if you just seek the optimal number of PME-only nodes; in that case",
+ "if you just seek the optimal number of PME-only ranks; in that case",
"your input [TT].tpr[tt] file will remain unchanged.[PAR]",
"For the benchmark runs, the default of 1000 time steps should suffice for most",
"MD systems. The dynamic load balancing needs about 100 time steps",
/* g_tune_pme options: */
/***********************/
{ "-np", FALSE, etINT, {&nnodes},
- "Number of nodes to run the tests on (must be > 2 for separate PME nodes)" },
+ "Number of ranks to run the tests on (must be > 2 for separate PME ranks)" },
{ "-npstring", FALSE, etENUM, {procstring},
- "Specify the number of processors to [TT]$MPIRUN[tt] using this string"},
+ "Specify the number of ranks to [TT]$MPIRUN[tt] using this string"},
{ "-ntmpi", FALSE, etINT, {&nthreads},
"Number of MPI-threads to run the tests on (turns MPI & mpirun off)"},
{ "-r", FALSE, etINT, {&repeats},
"Repeat each test this often" },
{ "-max", FALSE, etREAL, {&maxPMEfraction},
- "Max fraction of PME nodes to test with" },
+ "Max fraction of PME ranks to test with" },
{ "-min", FALSE, etREAL, {&minPMEfraction},
- "Min fraction of PME nodes to test with" },
+ "Min fraction of PME ranks to test with" },
{ "-npme", FALSE, etENUM, {npmevalues_opt},
"Within -min and -max, benchmark all possible values for [TT]-npme[tt], or just a reasonable subset. "
"Auto neglects -min and -max and chooses reasonable values around a guess for npme derived from the .tpr"},
{ "-fix", FALSE, etINT, {&npme_fixed},
- "If >= -1, do not vary the number of PME-only nodes, instead use this fixed value and only vary rcoulomb and the PME grid spacing."},
+ "If >= -1, do not vary the number of PME-only ranks, instead use this fixed value and only vary rcoulomb and the PME grid spacing."},
{ "-rmax", FALSE, etREAL, {&rmax},
"If >0, maximal rcoulomb for -ntpr>1 (rcoulomb upscaling results in fourier grid downscaling)" },
{ "-rmin", FALSE, etREAL, {&rmin},
{
fprintf(stdout, "- %d ", maxPMEnodes);
}
- fprintf(stdout, "PME-only nodes.\n Note that the automatic number of PME-only nodes and no separate PME nodes are always tested.\n");
+ fprintf(stdout, "PME-only ranks.\n Note that the automatic number of PME-only ranks and no separate PME ranks are always tested.\n");
}
}
else
fprintf(fp, "%s for Gromacs %s\n", ShortProgram(), GromacsVersion());
if (!bThreads)
{
- fprintf(fp, "Number of nodes : %d\n", nnodes);
+ fprintf(fp, "Number of ranks : %d\n", nnodes);
fprintf(fp, "The mpirun command is : %s\n", cmd_mpirun);
if (strcmp(procstring[0], "none") != 0)
{
- fprintf(fp, "Passing # of nodes via : %s\n", procstring[0]);
+ fprintf(fp, "Passing # of ranks via : %s\n", procstring[0]);
}
else
{
- fprintf(fp, "Not setting number of nodes in system call\n");
+ fprintf(fp, "Not setting number of ranks in system call\n");
}
}
else
#include <config.h>
#endif
-#include <math.h>
#include <assert.h>
-#include "physics.h"
-#include "vec.h"
+#include <math.h>
+
+#include "gromacs/math/units.h"
+#include "gromacs/math/vec.h"
#include "gromacs/math/utilities.h"
#include "txtdump.h"
#include "bondf.h"
-#include "gromacs/utility/smalloc.h"
-#include "pbc.h"
#include "ns.h"
#include "macros.h"
#include "names.h"
-#include "gmx_fatal.h"
-#include "mshift.h"
-#include "main.h"
#include "disre.h"
#include "orires.h"
#include "force.h"
#include "nonbonded.h"
#include "restcbt.h"
+#include "gromacs/pbcutil/ishift.h"
+#include "gromacs/pbcutil/mshift.h"
+#include "gromacs/pbcutil/pbc.h"
#include "gromacs/simd/simd.h"
#include "gromacs/simd/simd_math.h"
#include "gromacs/simd/vector_operations.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/smalloc.h"
/* Find a better place for this? */
const int cmap_coeff_matrix[] = {
}
if (bPrintSepPot)
{
- fprintf(fplog, "Step %s: bonded V and dVdl for this node\n",
+ fprintf(fplog, "Step %s: bonded V and dVdl for this rank\n",
gmx_step_str(step, buf));
}
/* The source code in this file should be thread-safe.
Please keep it that way. */
-
+#include "checkpoint.h"
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
+#include <errno.h>
+#include <stdlib.h>
#include <string.h>
#include <time.h>
+#include <fcntl.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include "names.h"
#include "typedefs.h"
#include "types/commrec.h"
-#include "gromacs/utility/smalloc.h"
#include "txtdump.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "network.h"
-#include "checkpoint.h"
-#include "main.h"
-#include "gromacs/utility/cstringutil.h"
-#include <fcntl.h>
#include "gromacs/fileio/filenm.h"
-#include "gromacs/fileio/futil.h"
+#include "gromacs/utility/futil.h"
#include "gromacs/fileio/gmxfio.h"
#include "gromacs/fileio/xdrf.h"
#include "gromacs/fileio/xdr_datatype.h"
+#include "gromacs/utility/basenetwork.h"
#include "gromacs/utility/baseversion.h"
-#include "gmx_fatal.h"
+#include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/smalloc.h"
#include "buildinfo.h"
return 0;
#else
FILE *fp;
- int rc;
fp = fopen(filename, "rb+");
{
if (dtc == xdr_datatype_double)
{
+ /* cppcheck-suppress invalidPointerCast
+ * Only executed if real is anyhow double */
vd = (double *)vp;
}
else
static int do_cpte_real(XDR *xd, int cptp, int ecpt, int sflags,
real *r, FILE *list)
{
- int n;
-
return do_cpte_reals_low(xd, cptp, ecpt, sflags, 1, NULL, &r, list, ecprREAL);
}
bool_t res = 0;
int dtc = xdr_datatype_int;
int *vp, *va = NULL;
- int nf, dt, i;
+ int nf, dt;
nf = n;
res = xdr_int(xd, &nf);
bool_t res = 0;
int dtc = xdr_datatype_double;
double *vp, *va = NULL;
- int nf, dt, i;
+ int nf, dt;
nf = n;
res = xdr_int(xd, &nf);
static int do_cpte_rvecs(XDR *xd, int cptp, int ecpt, int sflags,
int n, rvec **v, FILE *list)
{
- int n3;
-
return do_cpte_reals_low(xd, cptp, ecpt, sflags,
n*DIM, NULL, (real **)v, list, ecprRVEC);
}
matrix v, FILE *list)
{
real *vr;
- real ret;
+ int ret;
vr = (real *)&(v[0][0]);
ret = do_cpte_reals_low(xd, cptp, ecpt, sflags,
int n, real **v, FILE *list)
{
int i;
- real *vr;
- real ret, reti;
+ int ret, reti;
char name[CPTSTRLEN];
ret = 0;
}
for (i = 0; i < n; i++)
{
- reti = 0;
- vr = v[i];
reti = do_cpte_reals_low(xd, cptp, ecpt, sflags, n, NULL, &(v[i]), NULL, ecprREAL);
if (list && reti == 0)
{
sprintf(name, "%s[%d]", st_names(cptp, ecpt), i);
pr_reals(list, 0, name, v[i], n);
}
- if (reti == 0)
+ if (reti != 0)
{
- ret = 0;
+ ret = reti;
}
}
return ret;
bool_t res = 0;
int magic;
int idum = 0;
- int i;
char *fhost;
if (bRead)
*step = idum;
}
do_cpt_double_err(xd, "t", t, list);
- do_cpt_int_err(xd, "#PP-nodes", nnodes, list);
+ do_cpt_int_err(xd, "#PP-ranks", nnodes, list);
idum = 1;
do_cpt_int_err(xd, "dd_nc[x]", dd_nc ? &(dd_nc[0]) : &idum, list);
do_cpt_int_err(xd, "dd_nc[y]", dd_nc ? &(dd_nc[1]) : &idum, list);
do_cpt_int_err(xd, "dd_nc[z]", dd_nc ? &(dd_nc[2]) : &idum, list);
- do_cpt_int_err(xd, "#PME-only nodes", npme, list);
+ do_cpt_int_err(xd, "#PME-only ranks", npme, list);
do_cpt_int_err(xd, "state flags", flags_state, list);
if (*file_version >= 4)
{
{
enerhist->ener_sum_sim[i] = enerhist->ener_sum[i];
}
- fflags |= (1<<eenhENERGY_SUM_SIM);
}
if ( (fflags & (1<<eenhENERGY_NSUM)) &&
{
/* Assume we have an old file format and copy nsum to nsteps */
enerhist->nsteps = enerhist->nsum;
- fflags |= (1<<eenhENERGY_NSTEPS);
}
if ( (fflags & (1<<eenhENERGY_NSUM_SIM)) &&
!(fflags & (1<<eenhENERGY_NSTEPS_SIM)))
{
/* Assume we have an old file format and copy nsum to nsteps */
enerhist->nsteps_sim = enerhist->nsum_sim;
- fflags |= (1<<eenhENERGY_NSTEPS_SIM);
}
return ret;
static int do_cpt_EDstate(XDR *xd, gmx_bool bRead,
edsamstate_t *EDstate, FILE *list)
{
- int i, j;
+ int i;
int ret = 0;
char buf[STRLEN];
gmx_file_position_t **p_outputfiles, int *nfiles,
FILE *list, int file_version)
{
- int i, j;
+ int i;
gmx_off_t offset;
gmx_off_t mask = 0xFFFFFFFFL;
int offset_high, offset_low;
char *fntemp; /* the temporary checkpoint file name */
time_t now;
char timebuf[STRLEN];
- int nppnodes, npmenodes, flag_64bit;
+ int nppnodes, npmenodes;
char buf[1024], suffix[5+STEPSTRSIZE], sbuf[STEPSTRSIZE];
gmx_file_position_t *outputfiles;
int noutputfiles;
char *ftime;
- int flags_eks, flags_enh, flags_dfh, i;
+ int flags_eks, flags_enh, flags_dfh;
t_fileio *ret;
if (DOMAINDECOMP(cr))
*/
int gmx_major, gmx_minor;
int cpt_major, cpt_minor;
- sscanf(gmx_version(), "VERSION %d.%d", &gmx_major, &gmx_minor);
- sscanf(version, "VERSION %d.%d", &cpt_major, &cpt_minor);
+ sscanf(gmx_version(), "VERSION %5d.%5d", &gmx_major, &gmx_minor);
+ sscanf(version, "VERSION %5d.%5d", &cpt_major, &cpt_minor);
version_differs = (gmx_major != cpt_major || gmx_minor != cpt_minor);
}
check_int (fplog, "Double prec.", GMX_CPT_BUILD_DP, double_prec, &mm);
check_string(fplog, "Program name", Program(), fprog, &mm);
- check_int (fplog, "#nodes", cr->nnodes, npp_f+npme_f, &mm);
+ check_int (fplog, "#ranks", cr->nnodes, npp_f+npme_f, &mm);
if (cr->nnodes > 1)
{
- check_int (fplog, "#PME-nodes", cr->npmenodes, npme_f, &mm);
+ check_int (fplog, "#PME-ranks", cr->npmenodes, npme_f, &mm);
npp = cr->nnodes;
if (cr->npmenodes >= 0)
int file_version;
char *version, *btime, *buser, *bhost, *fprog, *ftime;
int double_prec;
- char filename[STRLEN], buf[STEPSTRSIZE];
- int nppnodes, eIntegrator_f, nppnodes_f, npmenodes_f;
+ char buf[STEPSTRSIZE];
+ int eIntegrator_f, nppnodes_f, npmenodes_f;
ivec dd_nc_f;
int natoms, ngtc, nnhpres, nhchainlength, nlambda, fflags, flags_eks, flags_enh, flags_dfh;
int d;
if (!PAR(cr))
{
- nppnodes = 1;
cr->npmenodes = 0;
}
else if (cr->nnodes == nppnodes_f + npmenodes_f)
{
cr->npmenodes = npmenodes_f;
}
- nppnodes = cr->nnodes - cr->npmenodes;
+ int nppnodes = cr->nnodes - cr->npmenodes;
if (nppnodes == nppnodes_f)
{
for (d = 0; d < DIM; d++)
}
}
}
- else
- {
- /* The number of PP nodes has not been set yet */
- nppnodes = -1;
- }
if (fflags != state->flags)
{
ivec dd_nc;
t_state state;
int flags_eks, flags_enh, flags_dfh;
- int indent;
- int i, j;
int ret;
gmx_file_position_t *outputfiles;
int nfiles;
/* This file is completely threadsafe - keep it that way! */
#include "gromacs/legacyheaders/macros.h"
-#include "gromacs/legacyheaders/vec.h"
#include "gromacs/fft/fft.h"
-#include "gromacs/fileio/futil.h"
#include "gromacs/fileio/strdb.h"
+#include "gromacs/math/vec.h"
#include "gromacs/random/random.h"
#include "gromacs/utility/baseversion.h"
#include "gromacs/utility/cstringutil.h"
#include "gromacs/utility/exceptions.h"
+#include "gromacs/utility/futil.h"
#include "gromacs/utility/gmxassert.h"
#include "gromacs/utility/programcontext.h"
#include "gromacs/utility/smalloc.h"
"M. Lundborg, R. Apostolov, D. Spangberg, A. Gardenas, D. van der Spoel and E. Lindahl",
"An efficient and extensible format, library, and API for binary trajectory data from molecular simulations",
"J. Comput. Chem.",
- 35, 2014, "260-269"}
+ 35, 2014, "260-269"},
+ { "Goga2012",
+ "N. Goga and A. J. Rzepiela and A. H. de Vries and S. J. Marrink and H. J. C. Berendsen",
+ "Efficient Algorithms for Langevin and DPD Dynamics",
+ "J. Chem. Theory Comput.",
+ 8, 2012, "3637--3649"}
};
#define NSTR (int)asize(citedb)
#include "gromacs/math/utilities.h"
#include "typedefs.h"
#include "types/commrec.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "coulomb.h"
#include "gromacs/utility/smalloc.h"
-#include "physics.h"
+#include "gromacs/math/units.h"
#include "txtdump.h"
-#include "gromacs/fileio/futil.h"
+#include "gromacs/utility/futil.h"
#include "names.h"
#include "macros.h"
{
clear_mat(dxdf_lj);
}
- if ((calc_excl_corr || dipole_coeff != 0 || EVDW_PME(fr->vdwtype)) && !bFreeEnergy)
+ if ((calc_excl_corr || dipole_coeff != 0) && !bFreeEnergy)
{
for (i = start; (i < end); i++)
{
}
}
}
- else if (calc_excl_corr || dipole_coeff != 0 || EVDW_PME(fr->vdwtype))
+ else if (calc_excl_corr || dipole_coeff != 0)
{
for (i = start; (i < end); i++)
{
#include <config.h>
#endif
-#include <stdlib.h>
+#include <string>
+#include <vector>
+
#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
#include <string.h>
#ifdef HAVE_UNISTD_H
#include "types/enums.h"
#include "types/hw_info.h"
#include "types/commrec.h"
-#include "gmx_fatal.h"
-#include "gmx_fatal_collective.h"
+#include "network.h"
#include "md_logging.h"
#include "gmx_cpuid.h"
-#include "gromacs/utility/smalloc.h"
#include "gpu_utils.h"
#include "copyrite.h"
#include "gmx_detect_hardware.h"
-#include "main.h"
#include "md_logging.h"
-#include "gromacs/utility/gmxomp.h"
+
+#include "gromacs/utility/basenetwork.h"
#include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/exceptions.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/gmxomp.h"
+#include "gromacs/utility/smalloc.h"
+#include "gromacs/utility/stringutil.h"
#include "thread_mpi/threads.h"
}
}
-static void print_gpu_use_stats(FILE *fplog,
- const gmx_gpu_info_t *gpu_info,
- const gmx_gpu_opt_t *gpu_opt,
- const t_commrec *cr)
+/*! \brief Helper function for writing comma-separated GPU IDs.
+ *
+ * \param[in] ids A container of integer GPU IDs
+ * \return A comma-separated string of GPU IDs */
+template <typename Container>
+static std::string makeGpuIdsString(const Container &ids)
{
- char sbuf[STRLEN], stmp[STRLEN];
- int i, ngpu_comp, ngpu_use;
+ std::string output;
- ngpu_comp = gpu_info->ncuda_dev_compatible;
- ngpu_use = gpu_opt->ncuda_dev_use;
+ if (0 != ids.size())
+ {
+ typename Container::const_iterator it = ids.begin();
+ output += gmx::formatString("%d", *it);
+ for (++it; it != ids.end(); ++it)
+ {
+ output += gmx::formatString(",%d", *it);
+ }
+ }
+ return output;
+}
+
+/*! \brief Helper function for reporting GPU usage information
+ * in the mdrun log file
+ *
+ * \param[in] gpu_info Pointer to per-node GPU info struct
+ * \param[in] gpu_opt Pointer to per-node GPU options struct
+ * \param[in] numPpRanks Number of PP ranks per node
+ * \return String to write to the log file
+ * \throws std::bad_alloc if out of memory */
+static std::string
+makeGpuUsageReport(const gmx_gpu_info_t *gpu_info,
+ const gmx_gpu_opt_t *gpu_opt,
+ size_t numPpRanks)
+{
+ int ngpu_use = gpu_opt->ncuda_dev_use;
+ int ngpu_comp = gpu_info->ncuda_dev_compatible;
/* Issue a note if GPUs are available but not used */
if (ngpu_comp > 0 && ngpu_use < 1)
{
- sprintf(sbuf,
- "%d compatible GPU%s detected in the system, but none will be used.\n"
- "Consider trying GPU acceleration with the Verlet scheme!",
- ngpu_comp, (ngpu_comp > 1) ? "s" : "");
+ return gmx::formatString("%d compatible GPU%s detected in the system, but none will be used.\n"
+ "Consider trying GPU acceleration with the Verlet scheme!\n",
+ ngpu_comp, (ngpu_comp > 1) ? "s" : "");
}
- else
- {
- int ngpu_use_uniq;
-
- ngpu_use_uniq = gmx_count_gpu_dev_unique(gpu_info, gpu_opt);
- sprintf(sbuf, "%d GPU%s %sselected for this run.\n"
- "Mapping of GPU%s to the %d PP rank%s in this node: ",
- ngpu_use_uniq, (ngpu_use_uniq > 1) ? "s" : "",
- gpu_opt->bUserSet ? "user-" : "auto-",
- (ngpu_use > 1) ? "s" : "",
- cr->nrank_pp_intranode,
- (cr->nrank_pp_intranode > 1) ? "s" : "");
+ std::string output;
- for (i = 0; i < ngpu_use; i++)
+ {
+ std::vector<int> gpuIdsInUse;
+ for (int i = 0; i < ngpu_use; i++)
{
- sprintf(stmp, "#%d", get_gpu_device_id(gpu_info, gpu_opt, i));
- if (i < ngpu_use - 1)
- {
- strcat(stmp, ", ");
- }
- strcat(sbuf, stmp);
+ gpuIdsInUse.push_back(get_gpu_device_id(gpu_info, gpu_opt, i));
}
+ std::string gpuIdsString = makeGpuIdsString(gpuIdsInUse);
+ int numGpusInUse = gmx_count_gpu_dev_unique(gpu_info, gpu_opt);
+ bool bPluralGpus = numGpusInUse > 1;
+
+ output += gmx::formatString("%d GPU%s %sselected for this run.\n"
+ "Mapping of GPU ID%s to the %d PP rank%s in this node: %s\n",
+ numGpusInUse, bPluralGpus ? "s" : "",
+ gpu_opt->bUserSet ? "user-" : "auto-",
+ bPluralGpus ? "s" : "",
+ numPpRanks,
+ (numPpRanks > 1) ? "s" : "",
+ gpuIdsString.c_str());
}
- md_print_info(cr, fplog, "%s\n\n", sbuf);
+
+ return output;
}
/* Give a suitable fatal error or warning if the build configuration
const gmx_hw_opt_t *hw_opt,
gmx_bool bUseGPU)
{
- int npppn, ntmpi_pp;
- char sbuf[STRLEN], th_or_proc[STRLEN], th_or_proc_plural[STRLEN], pernode[STRLEN];
+ int npppn;
+ char th_or_proc[STRLEN], th_or_proc_plural[STRLEN], pernode[STRLEN];
gmx_bool btMPI, bMPI, bMaxMpiThreadsSet, bNthreadsAuto, bEmulateGPU;
assert(hwinfo);
return;
}
- btMPI = bMPI = FALSE;
- bNthreadsAuto = FALSE;
#if defined(GMX_THREAD_MPI)
+ bMPI = FALSE;
btMPI = TRUE;
bNthreadsAuto = (hw_opt->nthreads_tmpi < 1);
#elif defined(GMX_LIB_MPI)
- bMPI = TRUE;
+ bMPI = TRUE;
+ btMPI = FALSE;
+ bNthreadsAuto = FALSE;
+#else
+ bMPI = FALSE;
+ btMPI = FALSE;
+ bNthreadsAuto = FALSE;
#endif
/* GPU emulation detection is done later, but we need here as well
if (hwinfo->gpu_info.ncuda_dev_compatible > 0)
{
+ std::string gpuUseageReport;
+ try
+ {
+ gpuUseageReport = makeGpuUsageReport(&hwinfo->gpu_info,
+ &hw_opt->gpu_opt,
+ cr->nrank_pp_intranode);
+ }
+ GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+
/* NOTE: this print is only for and on one physical node */
- print_gpu_use_stats(fplog, &hwinfo->gpu_info, &hw_opt->gpu_opt, cr);
+ md_print_info(cr, fplog, gpuUseageReport.c_str());
}
/* Need to ensure that we have enough GPUs:
/* Return the number of hardware threads supported by the current CPU.
- * We assume that this is equal with the number of CPUs reported to be
- * online by the OS at the time of the call.
- */
+ * We assume that this is equal with the number of "processors"
+ * reported to be online by the OS at the time of the call. The
+ * definition of "processor" is according to an old POSIX standard.
+ *
+ * Note that the number of hardware threads is generally greater than
+ * the number of cores (e.g. x86 hyper-threading, Power). Managing the
+ * mapping of software threads to hardware threads is managed
+ * elsewhere. */
static int get_nthreads_hw_avail(FILE gmx_unused *fplog, const t_commrec gmx_unused *cr)
{
int ret = 0;
#endif /* End of check for sysconf argument values */
#else
- /* Neither windows nor Unix. No fscking idea how many CPUs we have! */
+ /* Neither windows nor Unix. No fscking idea how many hardware threads we have! */
ret = -1;
#endif
if (debug)
{
- fprintf(debug, "Detected %d processors, will use this as the number "
- "of supported hardware threads.\n", ret);
+ fprintf(debug, "Detected %d hardware threads to use.\n", ret);
}
#ifdef GMX_OPENMP
if (ret != gmx_omp_get_num_procs())
{
md_print_warn(cr, fplog,
- "Number of CPUs detected (%d) does not match the number reported by OpenMP (%d).\n"
+ "Number of hardware threads detected (%d) does not match the number reported by OpenMP (%d).\n"
"Consider setting the launch configuration manually!",
ret, gmx_omp_get_num_procs());
}
gmx_hw_info_t *gmx_detect_hardware(FILE *fplog, const t_commrec *cr,
gmx_bool bDetectGPUs)
{
- gmx_hw_info_t *hw;
int ret;
/* make sure no one else is doing the same thing */
gmx_gpu_opt_t *gpu_opt)
{
int i;
- const char *env;
char sbuf[STRLEN], stmp[STRLEN];
/* Bail if binary is not compiled with GPU acceleration, but this is either
#include <config.h>
#endif
-#include "gmx_fatal.h"
#include "typedefs.h"
+#include "types/commrec.h"
#include "macros.h"
#include "network.h"
#include "copyrite.h"
#include "gmx_omp_nthreads.h"
#include "md_logging.h"
+#include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxomp.h"
/** Structure with the number of threads for each OpenMP multi-threaded
sprintf(sbuf, "thread-MPI threads");
#else
sprintf(sbuf, "MPI processes");
- sprintf(sbuf1, " per node");
- sprintf(sbuf2, "On node %d: o", cr->sim_nodeid);
+ sprintf(sbuf1, " per rank");
+ sprintf(sbuf2, "On rank %d: o", cr->sim_nodeid);
#endif
}
#endif
#include "md_logging.h"
#include "gmx_thread_affinity.h"
-#include "gmx_fatal.h"
+#include "gromacs/utility/basenetwork.h"
+#include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxomp.h"
+#include "gromacs/utility/smalloc.h"
static int
get_thread_affinity_layout(FILE *fplog,
* Note that this will only work on Linux as we use a GNU feature.
*/
void
-gmx_check_thread_affinity_set(FILE gmx_unused *fplog,
- const t_commrec gmx_unused *cr,
- gmx_hw_opt_t gmx_unused *hw_opt,
- int gmx_unused nthreads_hw_avail,
- gmx_bool gmx_unused bAfterOpenmpInit)
+gmx_check_thread_affinity_set(FILE *fplog,
+ const t_commrec *cr,
+ gmx_hw_opt_t *hw_opt,
- int gmx_unused ncpus,
++ int gmx_unused nthreads_hw_avail,
+ gmx_bool bAfterOpenmpInit)
{
#ifdef HAVE_SCHED_GETAFFINITY
cpu_set_t mask_current;
int i, ret, cpu_count, cpu_set;
gmx_bool bAllSet;
+#endif
assert(hw_opt);
+ if (!bAfterOpenmpInit)
+ {
+ /* Check for externally set OpenMP affinity and turn off internal
+ * pinning if any is found. We need to do this check early to tell
+ * thread-MPI whether it should do pinning when spawning threads.
+ * TODO: the above no longer holds, we should move these checks later
+ */
+ if (hw_opt->thread_affinity != threadaffOFF)
+ {
+ char *message;
+ if (!gmx_omp_check_thread_affinity(&message))
+ {
+ /* TODO: with -pin auto we should only warn when using all cores */
+ md_print_warn(cr, fplog, "%s", message);
+ sfree(message);
+ hw_opt->thread_affinity = threadaffOFF;
+ }
+ }
+
+ /* With thread-MPI this is needed as pinning might get turned off,
+ * which needs to be known before starting thread-MPI.
+ * With thread-MPI hw_opt is processed here on the master rank
+ * and passed to the other ranks later, so we only do this on master.
+ */
+ if (!SIMMASTER(cr))
+ {
+ return;
+ }
+#ifndef GMX_THREAD_MPI
+ return;
+#endif
+ }
+
+#ifdef HAVE_SCHED_GETAFFINITY
if (hw_opt->thread_affinity == threadaffOFF)
{
/* internal affinity setting is off, don't bother checking process affinity */
* detected CPUs is >= the CPUs in the current set.
* We need to check for CPU_COUNT as it was added only in glibc 2.6. */
#ifdef CPU_COUNT
- if (ncpus < CPU_COUNT(&mask_current))
+ if (nthreads_hw_avail < CPU_COUNT(&mask_current))
{
if (debug)
{
- fprintf(debug, "%d CPUs detected, but %d was returned by CPU_COUNT",
- ncpus, CPU_COUNT(&mask_current));
+ fprintf(debug, "%d hardware threads detected, but %d was returned by CPU_COUNT",
+ nthreads_hw_avail, CPU_COUNT(&mask_current));
}
return;
}
#endif /* CPU_COUNT */
bAllSet = TRUE;
- for (i = 0; (i < ncpus && i < CPU_SETSIZE); i++)
+ for (i = 0; (i < nthreads_hw_avail && i < CPU_SETSIZE); i++)
{
bAllSet = bAllSet && (CPU_ISSET(i, &mask_current) != 0);
}
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef GMX_NATIVE_WINDOWS
+#include <process.h>
+#endif
-#include "gromacs/utility/smalloc.h"
#include "types/commrec.h"
-#include "gmx_fatal.h"
#include "network.h"
#include "main.h"
#include "macros.h"
-#include "gromacs/fileio/futil.h"
+#include "gromacs/utility/futil.h"
#include "gromacs/fileio/filenm.h"
#include "gromacs/fileio/gmxfio.h"
-#include "gromacs/utility/cstringutil.h"
#include "copyrite.h"
+#include "gromacs/utility/basenetwork.h"
+#include "gromacs/utility/cstringutil.h"
#include "gromacs/utility/exceptions.h"
+#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxmpi.h"
#include "gromacs/utility/programcontext.h"
+#include "gromacs/utility/smalloc.h"
/* The source code in this file should be thread-safe.
Please keep it that way. */
-
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-
-#ifdef GMX_NATIVE_WINDOWS
-#include <process.h>
-#endif
-
#define BUFSIZE 1024
-
static void par_fn(char *base, int ftp, const t_commrec *cr,
gmx_bool bAppendSimId, gmx_bool bAppendNodeId,
char buf[], int bufsize)
}
if (bAppendNodeId)
{
- strcat(buf, "_node");
+ strcat(buf, "_rank");
sprintf(buf+strlen(buf), "%d", cr->nodeid);
}
strcat(buf, ".");
strcat(buf, (ftp == efTPX) ? "tpr" : (ftp == efEDR) ? "edr" : ftp2ext(ftp));
if (debug)
{
- fprintf(debug, "node %d par_fn '%s'\n", cr->nodeid, buf);
+ fprintf(debug, "rank %d par_fn '%s'\n", cr->nodeid, buf);
if (fn2ftp(buf) == efLOG)
{
fprintf(debug, "log\n");
}
-int gmx_gethostname(char *name, size_t len)
-{
- if (len < 8)
- {
- gmx_incons("gmx_gethostname called with len<8");
- }
-#if defined(HAVE_UNISTD_H) && !defined(__native_client__)
- if (gethostname(name, len-1) != 0)
- {
- strncpy(name, "unknown", 8);
- return -1;
- }
- return 0;
-#else
- strncpy(name, "unknown", 8);
- return -1;
-#endif
-}
-
-
void gmx_log_open(const char *lognm, const t_commrec *cr, gmx_bool bMasterOnly,
gmx_bool bAppendFiles, FILE** fplog)
{
fprintf(fp,
"Log file opened on %s"
- "Host: %s pid: %d nodeid: %d nnodes: %d\n",
+ "Host: %s pid: %d rank ID: %d number of ranks: %d\n",
timebuf, host, pid, cr->nodeid, cr->nnodes);
try
{
nnodes = cr->nnodes;
if (nnodes % nsim != 0)
{
- gmx_fatal(FARGS, "The number of nodes (%d) is not a multiple of the number of simulations (%d)", nnodes, nsim);
+ gmx_fatal(FARGS, "The number of ranks (%d) is not a multiple of the number of simulations (%d)", nnodes, nsim);
}
nnodpersim = nnodes/nsim;
if (debug)
{
- fprintf(debug, "We have %d simulations, %d nodes per simulation, local simulation is %d\n", nsim, nnodpersim, sim);
+ fprintf(debug, "We have %d simulations, %d ranks per simulation, local simulation is %d\n", nsim, nnodpersim, sim);
}
snew(ms, 1);
fprintf(debug, "This is simulation %d", cr->ms->sim);
if (PAR(cr))
{
- fprintf(debug, ", local number of nodes %d, local nodeid %d",
+ fprintf(debug, ", local number of ranks %d, local rank ID %d",
cr->nnodes, cr->sim_nodeid);
}
fprintf(debug, "\n\n");
#include "mvdata.h"
#include "types/commrec.h"
#include "network.h"
-#include "gromacs/utility/smalloc.h"
-#include "gmx_fatal.h"
-#include "symtab.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "tgroup.h"
+#include "gromacs/topology/symtab.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/smalloc.h"
+
#define block_bc(cr, d) gmx_bcast( sizeof(d), &(d), (cr))
/* Probably the test for (nr) > 0 in the next macro is only needed
* on BlueGene(/L), where IBM's MPI_Bcast will segfault after
block_bc(cr, fep->init_lambda);
block_bc(cr, fep->init_fep_state);
block_bc(cr, fep->delta_lambda);
- block_bc(cr, fep->bPrintEnergy);
+ block_bc(cr, fep->edHdLPrintEnergy);
block_bc(cr, fep->n_lambda);
if (fep->n_lambda > 0)
{
* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
+#include "network.h"
+
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdlib.h>
#include <string.h>
-#include "gmx_fatal.h"
-#include "main.h"
-#include "gromacs/utility/smalloc.h"
+
#include "types/commrec.h"
-#include "network.h"
#include "copyrite.h"
-#include <ctype.h>
#include "macros.h"
-#include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/basenetwork.h"
+#include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/futil.h"
#include "gromacs/utility/gmxmpi.h"
-
+#include "gromacs/utility/smalloc.h"
/* The source code in this file should be thread-safe.
Please keep it that way. */
-gmx_bool gmx_mpi_initialized(void)
-{
- int n;
-#ifndef GMX_MPI
- return 0;
-#else
- MPI_Initialized(&n);
-
- return n;
-#endif
-}
-
void gmx_fill_commrec_from_mpi(t_commrec gmx_unused *cr)
{
#ifndef GMX_MPI
#endif
}
-int gmx_node_num(void)
-{
-#ifndef GMX_MPI
- return 1;
-#else
- int i;
- (void) MPI_Comm_size(MPI_COMM_WORLD, &i);
- return i;
-#endif
-}
-
-int gmx_node_rank(void)
-{
-#ifndef GMX_MPI
- return 0;
-#else
- int i;
- (void) MPI_Comm_rank(MPI_COMM_WORLD, &i);
- return i;
-#endif
-}
-
-static int mpi_hostname_hash(void)
-{
- int hash_int;
-
-#ifndef GMX_LIB_MPI
- /* We have a single physical node */
- hash_int = 0;
-#else
- int resultlen;
- char mpi_hostname[MPI_MAX_PROCESSOR_NAME];
-
- /* This procedure can only differentiate nodes with different names.
- * Architectures where different physical nodes have identical names,
- * such as IBM Blue Gene, should use an architecture specific solution.
- */
- MPI_Get_processor_name(mpi_hostname, &resultlen);
-
- /* The string hash function returns an unsigned int. We cast to an int.
- * Negative numbers are converted to positive by setting the sign bit to 0.
- * This makes the hash one bit smaller.
- * A 63-bit hash (with 64-bit int) should be enough for unique node hashes,
- * even on a million node machine. 31 bits might not be enough though!
- */
- hash_int =
- (int)gmx_string_fullhash_func(mpi_hostname, gmx_string_hash_init);
- if (hash_int < 0)
- {
- hash_int -= INT_MIN;
- }
-#endif
-
- return hash_int;
-}
-
-#if defined GMX_LIB_MPI && defined GMX_TARGET_BGQ
-#include <spi/include/kernel/location.h>
-
-static int bgq_nodenum(void)
-{
- int hostnum;
- Personality_t personality;
- Kernel_GetPersonality(&personality, sizeof(personality));
- /* Each MPI rank has a unique coordinate in a 6-dimensional space
- (A,B,C,D,E,T), with dimensions A-E corresponding to different
- physical nodes, and T within each node. Each node has sixteen
- physical cores, each of which can have up to four hardware
- threads, so 0 <= T <= 63 (but the maximum value of T depends on
- the confituration of ranks and OpenMP threads per
- node). However, T is irrelevant for computing a suitable return
- value for gmx_hostname_num().
- */
- hostnum = personality.Network_Config.Acoord;
- hostnum *= personality.Network_Config.Bnodes;
- hostnum += personality.Network_Config.Bcoord;
- hostnum *= personality.Network_Config.Cnodes;
- hostnum += personality.Network_Config.Ccoord;
- hostnum *= personality.Network_Config.Dnodes;
- hostnum += personality.Network_Config.Dcoord;
- hostnum *= personality.Network_Config.Enodes;
- hostnum += personality.Network_Config.Ecoord;
-
- if (debug)
- {
- fprintf(debug,
- "Torus ID A: %d / %d B: %d / %d C: %d / %d D: %d / %d E: %d / %d\nNode ID T: %d / %d core: %d / %d hardware thread: %d / %d\n",
- personality.Network_Config.Acoord,
- personality.Network_Config.Anodes,
- personality.Network_Config.Bcoord,
- personality.Network_Config.Bnodes,
- personality.Network_Config.Ccoord,
- personality.Network_Config.Cnodes,
- personality.Network_Config.Dcoord,
- personality.Network_Config.Dnodes,
- personality.Network_Config.Ecoord,
- personality.Network_Config.Enodes,
- Kernel_ProcessorCoreID(),
- 16,
- Kernel_ProcessorID(),
- 64,
- Kernel_ProcessorThreadID(),
- 4);
- }
- return hostnum;
-}
-#endif
-
-int gmx_physicalnode_id_hash(void)
-{
- int hash;
-
-#ifndef GMX_MPI
- hash = 0;
-#else
-#ifdef GMX_THREAD_MPI
- /* thread-MPI currently puts the thread number in the process name,
- * we might want to change this, as this is inconsistent with what
- * most MPI implementations would do when running on a single node.
- */
- hash = 0;
-#else
-#ifdef GMX_TARGET_BGQ
- hash = bgq_nodenum();
-#else
- hash = mpi_hostname_hash();
-#endif
-#endif
-#endif
-
- if (debug)
- {
- fprintf(debug, "In gmx_physicalnode_id_hash: hash %d\n", hash);
- }
-
- return hash;
-}
-
void gmx_setup_nodecomm(FILE gmx_unused *fplog, t_commrec *cr)
{
gmx_nodecomm_t *nc;
MPI_Comm_rank(nc->comm_intra, &nc->rank_intra);
if (debug)
{
- fprintf(debug, "In gmx_setup_nodecomm: node rank %d rank_intra %d\n",
+ fprintf(debug, "In gmx_setup_nodecomm: node ID %d rank within node %d\n",
rank, nc->rank_intra);
}
/* The inter-node communicator, split on rank_intra.
nc->bUse = TRUE;
if (fplog)
{
- fprintf(fplog, "Using two step summing over %d groups of on average %.1f processes\n\n",
+ fprintf(fplog, "Using two step summing over %d groups of on average %.1f ranks\n\n",
ng, (real)n/(real)ng);
}
if (nc->rank_intra > 0)
{
sprintf(sbuf, "%s", cr->duty & DUTY_PP ? "PP" : "PME");
}
- fprintf(debug, "On %3s node %d: nrank_intranode=%d, rank_intranode=%d, "
+ fprintf(debug, "On %3s rank %d: nrank_intranode=%d, rank_intranode=%d, "
"nrank_pp_intranode=%d, rank_pp_intranode=%d\n",
sbuf, cr->sim_nodeid,
nrank_intranode, rank_intranode,
#endif
}
-void gmx_abort(int gmx_unused noderank, int gmx_unused nnodes, int gmx_unused errorno)
-{
-#ifndef GMX_MPI
- gmx_call("gmx_abort");
-#else
-#ifdef GMX_THREAD_MPI
- fprintf(stderr, "Halting program %s\n", ShortProgram());
- gmx_thanx(stderr);
- exit(1);
-#else
- if (nnodes > 1)
- {
- fprintf(stderr, "Halting parallel program %s on CPU %d out of %d\n",
- ShortProgram(), noderank, nnodes);
- }
- else
- {
- fprintf(stderr, "Halting program %s\n", ShortProgram());
- }
-
- gmx_thanx(stderr);
- MPI_Abort(MPI_COMM_WORLD, errorno);
- exit(1);
-#endif
-#endif
-}
-
void gmx_bcast(int gmx_unused nbytes, void gmx_unused *b, const t_commrec gmx_unused *cr)
{
#ifndef GMX_MPI
#endif
#endif
}
+
+gmx_bool gmx_fexist_master(const char *fname, t_commrec *cr)
+{
+ gmx_bool bExist;
+
+ if (SIMMASTER(cr))
+ {
+ bExist = gmx_fexist(fname);
+ }
+ if (PAR(cr))
+ {
+ gmx_bcast(sizeof(bExist), &bExist, cr);
+ }
+ return bExist;
+}
+
+void gmx_fatal_collective(int f_errno, const char *file, int line,
+ const t_commrec *cr, gmx_domdec_t *dd,
+ const char *fmt, ...)
+{
+ va_list ap;
+ gmx_bool bMaster, bFinalize;
+#ifdef GMX_MPI
+ int result;
+ /* Check if we are calling on all processes in MPI_COMM_WORLD */
+ if (cr != NULL)
+ {
+ MPI_Comm_compare(cr->mpi_comm_mysim, MPI_COMM_WORLD, &result);
+ }
+ else
+ {
+ MPI_Comm_compare(dd->mpi_comm_all, MPI_COMM_WORLD, &result);
+ }
+ /* Any result except MPI_UNEQUAL allows us to call MPI_Finalize */
+ bFinalize = (result != MPI_UNEQUAL);
+#else
+ bFinalize = TRUE;
+#endif
+ bMaster = (cr != NULL && MASTER(cr)) || (dd != NULL && DDMASTER(dd));
+
+ va_start(ap, fmt);
+ gmx_fatal_mpi_va(f_errno, file, line, bMaster, bFinalize, fmt, ap);
+ va_end(ap);
+}
#include <math.h>
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "typedefs.h"
#include "nonbonded.h"
#include "nb_kernel.h"
#include "macros.h"
#include "nb_free_energy.h"
-#include "gmx_fatal.h"
+#include "gromacs/utility/fatalerror.h"
void
gmx_nb_free_energy_kernel(const t_nblist * gmx_restrict nlist,
const real * chargeB;
real sigma6_min, sigma6_def, lam_power, sc_power, sc_r_power;
real alpha_coul, alpha_vdw, lambda_coul, lambda_vdw, ewc_lj;
+ real ewcljrsq, ewclj, ewclj2, exponent, poly, vvdw_disp, vvdw_rep, sh_lj_ewald;
+ real ewclj6;
const real * nbfp, *nbfp_grid;
real * dvdl;
real * Vv;
bDoPotential = kernel_data->flags & GMX_NONBONDED_DO_POTENTIAL;
rcoulomb = fr->rcoulomb;
- sh_ewald = fr->ic->sh_ewald;
rvdw = fr->rvdw;
sh_invrc6 = fr->ic->sh_invrc6;
+ sh_lj_ewald = fr->ic->sh_lj_ewald;
+ ewclj = fr->ewaldcoeff_lj;
+ ewclj2 = ewclj*ewclj;
+ ewclj6 = ewclj2*ewclj2*ewclj2;
if (fr->coulomb_modifier == eintmodPOTSWITCH)
{
*/
bConvertLJEwaldToLJ6 = (bEwaldLJ && (fr->vdw_modifier != eintmodPOTSWITCH));
+ /* We currently don't implement exclusion correction, needed with the Verlet cut-off scheme, without conversion */
+ if (fr->cutoff_scheme == ecutsVERLET &&
+ ((bEwald && !bConvertEwaldToCoulomb) ||
+ (bEwaldLJ && !bConvertLJEwaldToLJ6)))
+ {
+ gmx_incons("Unimplemented non-bonded setup");
+ }
+
/* fix compiler warnings */
nj1 = 0;
n1C = n1V = 0;
Vcoul[i] = qq[i]*rinvC;
FscalC[i] = Vcoul[i];
/* The shift for the Coulomb potential is stored in
- * the RF parameter c_rf, which is 0 without shift
+ * the RF parameter c_rf, which is 0 without shift.
*/
Vcoul[i] -= qq[i]*fr->ic->c_rf;
break;
switch (ivdw)
{
case GMX_NBKERNEL_VDW_LENNARDJONES:
- case GMX_NBKERNEL_VDW_LJEWALD:
/* cutoff LJ */
if (sc_r_power == 6.0)
{
}
else
{
- rinv6 = pow(rinvV, 6.0);
+ rinv6 = rinvV*rinvV;
+ rinv6 = rinv6*rinv6*rinv6;
}
Vvdw6 = c6[i]*rinv6;
Vvdw12 = c12[i]*rinv6*rinv6;
- if (fr->vdw_modifier == eintmodPOTSHIFT)
- {
- Vvdw[i] = ( (Vvdw12-c12[i]*sh_invrc6*sh_invrc6)*(1.0/12.0)
- -(Vvdw6-c6[i]*sh_invrc6)*(1.0/6.0));
- }
- else
- {
- Vvdw[i] = Vvdw12*(1.0/12.0) - Vvdw6*(1.0/6.0);
- }
+
+ Vvdw[i] = ( (Vvdw12 - c12[i]*sh_invrc6*sh_invrc6)*(1.0/12.0)
+ - (Vvdw6 - c6[i]*sh_invrc6)*(1.0/6.0));
FscalV[i] = Vvdw12 - Vvdw6;
break;
FscalV[i] -= c12[i]*tabscale*FF*rV;
break;
+ case GMX_NBKERNEL_VDW_LJEWALD:
+ if (sc_r_power == 6.0)
+ {
+ rinv6 = rpinvV;
+ }
+ else
+ {
+ rinv6 = rinvV*rinvV;
+ rinv6 = rinv6*rinv6*rinv6;
+ }
+ c6grid = nbfp_grid[tj[i]];
+
+ if (bConvertLJEwaldToLJ6)
+ {
+ /* cutoff LJ */
+ Vvdw6 = c6[i]*rinv6;
+ Vvdw12 = c12[i]*rinv6*rinv6;
+
+ Vvdw[i] = ( (Vvdw12 - c12[i]*sh_invrc6*sh_invrc6)*(1.0/12.0)
+ - (Vvdw6 - c6[i]*sh_invrc6 - c6grid*sh_lj_ewald)*(1.0/6.0));
+ FscalV[i] = Vvdw12 - Vvdw6;
+ }
+ else
+ {
+ /* Normal LJ-PME */
+ ewcljrsq = ewclj2*rV*rV;
+ exponent = exp(-ewcljrsq);
+ poly = exponent*(1.0 + ewcljrsq + ewcljrsq*ewcljrsq*0.5);
+ vvdw_disp = (c6[i]-c6grid*(1.0-poly))*rinv6;
+ vvdw_rep = c12[i]*rinv6*rinv6;
+ FscalV[i] = vvdw_rep - vvdw_disp - c6grid*(1.0/6.0)*exponent*ewclj6;
+ Vvdw[i] = (vvdw_rep - c12[i]*sh_invrc6*sh_invrc6)/12.0 - (vvdw_disp - c6[i]*sh_invrc6 - c6grid*sh_lj_ewald)/6.0;
+ }
+ break;
+
case GMX_NBKERNEL_VDW_NONE:
Vvdw[i] = 0.0;
FscalV[i] = 0.0;
v_lr = (ewtab[ewitab+2]-ewtabhalfspace*eweps*(ewtab[ewitab]+f_lr));
f_lr *= rinv;
+ /* Note that any possible Ewald shift has already been applied in
+ * the normal interaction part above.
+ */
+
if (ii == jnr)
{
/* If we get here, the i particle (ii) has itself (jnr)
* the softcore to the entire VdW interaction,
* including the reciprocal-space component.
*/
+ /* We could also use the analytical form here
+ * iso a table, but that can cause issues for
+ * r close to 0 for non-interacting pairs.
+ */
real rs, frac, f_lr;
int ri;
ri = (int)rs;
frac = rs - ri;
f_lr = (1 - frac)*tab_ewald_F_lj[ri] + frac*tab_ewald_F_lj[ri+1];
- FF = f_lr*rinv;
- VV = tab_ewald_V_lj[ri] - ewtabhalfspace*frac*(tab_ewald_F_lj[ri] + f_lr);
+ /* TODO: Currently the Ewald LJ table does not contain
+ * the factor 1/6, we should add this.
+ */
+ FF = f_lr*rinv/6.0;
+ VV = (tab_ewald_V_lj[ri] - ewtabhalfspace*frac*(tab_ewald_F_lj[ri] + f_lr))/6.0;
if (ii == jnr)
{
for (i = 0; i < NSTATES; i++)
{
c6grid = nbfp_grid[tj[i]];
- vvtot += LFV[i]*c6grid*VV*(1.0/6.0);
- Fscal += LFV[i]*c6grid*FF*(1.0/6.0);
- dvdl_vdw += (DLF[i]*c6grid)*VV*(1.0/6.0);
+ vvtot += LFV[i]*c6grid*VV;
+ Fscal += LFV[i]*c6grid*FF;
+ dvdl_vdw += (DLF[i]*c6grid)*VV;
}
-
}
if (bDoForces)
#include <config.h>
#endif
+#include <stdlib.h>
#include <string.h>
+
#include "types/commrec.h"
-#include "sysstuff.h"
#include "names.h"
#include "macros.h"
#include "nrnb.h"
-#include "main.h"
#include "gromacs/utility/smalloc.h"
typedef struct {
fprintf(log, "\nDetailed load balancing info in percentage of average\n");
- fprintf(log, " Type NODE:");
+ fprintf(log, " Type RANK:");
for (i = 0; (i < cr->nnodes); i++)
{
fprintf(log, "%3d ", i);
#endif
#include "typedefs.h"
-#include "gromacs/utility/smalloc.h"
-#include "vec.h"
#include "types/commrec.h"
#include "network.h"
#include "orires.h"
#include "main.h"
#include "copyrite.h"
-#include "pbc.h"
-#include "mtop_util.h"
+#include "gromacs/pbcutil/pbc.h"
+#include "gromacs/topology/mtop_util.h"
#include "gromacs/linearalgebra/nrjac.h"
#include "gromacs/math/do_fit.h"
-#include "gmx_fatal.h"
+#include "gromacs/math/vec.h"
+#include "gromacs/pbcutil/ishift.h"
+#include "gromacs/pbcutil/mshift.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/smalloc.h"
void init_orires(FILE *fplog, const gmx_mtop_t *mtop,
rvec xref[],
if (od->nr == 0)
{
/* This means that this is not the master node */
- gmx_fatal(FARGS, "Orientation restraints are only supported on the master node, use less processors");
+ gmx_fatal(FARGS, "Orientation restraints are only supported on the master rank, use fewer ranks");
}
bTAV = (od->edt != 0);
/* This file is completely threadsafe - please keep it that way! */
#include <stdio.h>
+#include <stdlib.h>
+
#include "typedefs.h"
#include "types/commrec.h"
#include "names.h"
#include "txtdump.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "macros.h"
-#include "gmx_fatal.h"
+#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/smalloc.h"
int pr_indent(FILE *fp, int n)
}
}
PI("calc-lambda-neighbors", fep->lambda_neighbors);
- PS("dhdl-print-energy", EBOOL(fep->bPrintEnergy));
+ PS("dhdl-print-energy", edHdLPrintEnergy_names[fep->edHdLPrintEnergy]);
PR("sc-alpha", fep->sc_alpha);
PI("sc-power", fep->sc_power);
PR("sc-r-power", fep->sc_r_power);
for (i = 0; i <= block->nr; i++)
{
(void) pr_indent(fp, indent+INDENT);
- (void) fprintf(fp, "%s->index[%d]=%u\n",
+ (void) fprintf(fp, "%s->index[%d]=%d\n",
title, bShowNumbers ? i : -1, block->index[i]);
}
for (i = 0; i < block->nra; i++)
{
(void) pr_indent(fp, indent+INDENT);
- (void) fprintf(fp, "%s->a[%d]=%u\n",
+ (void) fprintf(fp, "%s->a[%d]=%d\n",
title, bShowNumbers ? i : -1, block->a[i]);
}
}
(void) fprintf(fp, "\n");
size = pr_indent(fp, indent+INDENT);
}
- size += fprintf(fp, "%u", block->a[j]);
+ size += fprintf(fp, "%d", block->a[j]);
}
(void) fprintf(fp, "}\n");
start = end;
fprintf(fp, "commrec:\n");
indent += 2;
pr_indent(fp, indent);
- fprintf(fp, "nodeid = %d\n", cr->nodeid);
+ fprintf(fp, "rank = %d\n", cr->nodeid);
pr_indent(fp, indent);
- fprintf(fp, "nnodes = %d\n", cr->nnodes);
+ fprintf(fp, "number of ranks = %d\n", cr->nnodes);
pr_indent(fp, indent);
- fprintf(fp, "npmenodes = %d\n", cr->npmenodes);
+ fprintf(fp, "PME-only ranks = %d\n", cr->npmenodes);
/*
pr_indent(fp,indent);
fprintf(fp,"threadid = %d\n",cr->threadid);
#include <ctype.h>
#include <math.h>
+#include <stdlib.h>
#include <string.h>
-#include "sysstuff.h"
#include "macros.h"
#include "gromacs/utility/smalloc.h"
#include "gromacs/utility/cstringutil.h"
#include "gromacs/fileio/confio.h"
-#include "vec.h"
-#include "pbc.h"
+#include "gromacs/math/vec.h"
#include "toputil.h"
#include "topio.h"
#include "gpp_nextnb.h"
-#include "symtab.h"
#include "macros.h"
-#include "gmx_fatal.h"
+#include "gromacs/utility/fatalerror.h"
#include "pgutil.h"
#include "resall.h"
#include "gen_ad.h"
{
fprintf(debug,
"Something VERY strange is going on in rm2par (gen_ad.c)\n"
- "a[0] %u a[1] %u a[2] %u a[3] %u\n",
+ "a[0] %d a[1] %d a[2] %d a[3] %d\n",
p[i].a[0], p[i].a[1], p[i].a[2], p[i].a[3]);
}
strcpy(p[i].s, "");
t_param *ang, *dih, *pai, *improper;
t_rbondeds *hbang, *hbdih;
char **anm;
+ const char *p;
int res, minres, maxres;
int i, j, j1, k, k1, l, l1, m, n, i1, i2;
int ninc, maxang, maxdih, maxpai;
int nFound;
gmx_bool bFound, bExcl;
-
/* These are the angles, dihedrals and pairs that we generate
* from the bonds. The ones that are already there from the rtp file
* will be retained.
if (hb)
{
gen_excls(atoms, excls, hb, bAllowMissing);
+ /* mark all entries as not matched yet */
+ for (i = 0; i < atoms->nres; i++)
+ {
+ for (j = 0; j < ebtsNR; j++)
+ {
+ for (k = 0; k < hb[i].rb[j].nb; k++)
+ {
+ hb[i].rb[j].b[k].match = FALSE;
+ }
+ }
+ }
}
/* Extract all i-j-k-l neighbours from nnb struct to generate all
if (bFound)
{
set_p_string(&(ang[nang]), hbang->b[l].s);
+ /* Mark that we found a match for this entry */
+ hbang->b[l].match = TRUE;
}
}
}
if (bFound)
{
set_p_string(&dih[ndih], hbdih->b[n].s);
+ /* Mark that we found a match for this entry */
+ hbdih->b[n].match = TRUE;
/* Set the last parameter to be able to see
if the dihedral was in the rtp list.
}
}
+ /* The above approach is great in that we double-check that e.g. an angle
+ * really corresponds to three atoms connected by bonds, but this is not
+ * generally true. Go through the angle and dihedral hackblocks to add
+ * entries that we have not yet marked as matched when going through bonds.
+ */
+ for (i = 0; i < atoms->nres; i++)
+ {
+ /* Add remaining angles from hackblock */
+ hbang = &hb[i].rb[ebtsANGLES];
+ for (j = 0; j < hbang->nb; j++)
+ {
+ if (hbang->b[j].match == TRUE)
+ {
+ /* We already used this entry, continue to the next */
+ continue;
+ }
+ /* Hm - entry not used, let's see if we can find all atoms */
+ if (nang == maxang)
+ {
+ maxang += ninc;
+ srenew(ang, maxang);
+ }
+ bFound = TRUE;
+ for (k = 0; k < 3 && bFound; k++)
+ {
+ p = hbang->b[j].a[k];
+ res = i;
+ if (p[0] == '-')
+ {
+ p++;
+ res--;
+ }
+ else if (p[0] == '+')
+ {
+ p++;
+ res++;
+ }
+ ang[nang].a[k] = search_res_atom(p, res, atoms, "angle", TRUE);
+ bFound = (ang[nang].a[k] != NO_ATID);
+ }
+ ang[nang].C0 = NOTSET;
+ ang[nang].C1 = NOTSET;
+
+ if (bFound)
+ {
+ set_p_string(&(ang[nang]), hbang->b[j].s);
+ hbang->b[j].match = TRUE;
+ /* Incrementing nang means we save this angle */
+ nang++;
+ }
+ }
+
+ /* Add remaining dihedrals from hackblock */
+ hbdih = &hb[i].rb[ebtsPDIHS];
+ for (j = 0; j < hbdih->nb; j++)
+ {
+ if (hbdih->b[j].match == TRUE)
+ {
+ /* We already used this entry, continue to the next */
+ continue;
+ }
+ /* Hm - entry not used, let's see if we can find all atoms */
+ if (ndih == maxdih)
+ {
+ maxdih += ninc;
+ srenew(dih, maxdih);
+ }
+ bFound = TRUE;
+ for (k = 0; k < 4 && bFound; k++)
+ {
+ p = hbdih->b[j].a[k];
+ res = i;
+ if (p[0] == '-')
+ {
+ p++;
+ res--;
+ }
+ else if (p[0] == '+')
+ {
+ p++;
+ res++;
+ }
+ dih[ndih].a[k] = search_res_atom(p, res, atoms, "dihedral", TRUE);
+ bFound = (dih[ndih].a[k] != NO_ATID);
+ }
+ for (m = 0; m < MAXFORCEPARAM; m++)
+ {
+ dih[ndih].c[m] = NOTSET;
+ }
+
+ if (bFound)
+ {
+ set_p_string(&(dih[ndih]), hbdih->b[j].s);
+ hbdih->b[j].match = TRUE;
+ /* Incrementing ndih means we save this dihedral */
+ ndih++;
+ }
+ }
+ }
+
/* Sort angles with respect to j-i-k (middle atom first) */
if (nang > 1)
{
#include <string.h>
#include "hackblock.h"
#include "gromacs/utility/smalloc.h"
-#include "vec.h"
-#include "macros.h"
+#include "gromacs/math/vec.h"
+ #include "names.h"
/* these MUST correspond to the enum in hackblock.h */
const char *btsNames[ebtsNR] = { "bonds", "angles", "dihedrals", "impropers", "exclusions", "cmap" };
free_t_bondeds(&(*rtp)[i].rb[j]);
}
}
- free(*rtp);
+ sfree(*rtp);
}
void free_t_hack(int nh, t_hack **h)
{
d->a[i] = safe_strdup(s->a[i]);
}
- d->s = safe_strdup(s->s);
+ d->s = safe_strdup(s->s);
+ d->match = s->match;
}
static gmx_bool contains_char(t_rbonded *s, char c)
}
fprintf(out, " %s]", SS(hb[i].rb[j].b[k].s));
}
- fprintf(out, "\n");
+ fprintf(out, " Entry matched: %s\n", yesno_names[hb[i].rb[j].b[k].match]);
}
}
fprintf(out, "\n");
#define GMX_GMXPREPROCESS_HACKBLOCK_H
#include "typedefs.h"
-#include "../fileio/pdbio.h"
+#include "gromacs/fileio/pdbio.h"
#include "grompp-impl.h"
#include "gpp_atomtype.h"
+#include "gromacs/topology/symtab.h"
#ifdef __cplusplus
extern "C" {
char *s; /* optional define string which gets copied from
.rtp/.tdb to .top and will be parsed by cpp
during grompp */
+ gmx_bool match; /* boolean to mark that the entry has been found */
} t_rbonded;
typedef struct {
#include <math.h>
#include <ctype.h>
-#include "vec.h"
-#include "gromacs/utility/smalloc.h"
+#include "gromacs/math/vec.h"
#include "macros.h"
-#include "symtab.h"
-#include "gromacs/fileio/futil.h"
-#include "gmx_fatal.h"
+#include "gromacs/utility/futil.h"
#include "pdb2top.h"
#include "gpp_nextnb.h"
#include "topdirs.h"
#include "resall.h"
#include "topio.h"
#include "gromacs/utility/cstringutil.h"
-#include "physics.h"
#include "gromacs/fileio/pdbio.h"
#include "gen_ad.h"
#include "gromacs/fileio/filenm.h"
-#include "index.h"
#include "gen_vsite.h"
#include "add_par.h"
#include "toputil.h"
#include "copyrite.h"
#include "gromacs/fileio/strdb.h"
+#include "gromacs/topology/residuetypes.h"
+#include "gromacs/topology/symtab.h"
#include "gromacs/utility/exceptions.h"
+#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/programcontext.h"
+#include "gromacs/utility/smalloc.h"
/* this must correspond to enum in pdb2top.h */
const char *hh[ehisNR] = { "HISD", "HISE", "HISH", "HIS1" };
}
static int name2type(t_atoms *at, int **cgnr,
- t_restp restp[], gmx_residuetype_t rt)
+ t_restp restp[], gmx_residuetype_t *rt)
{
int i, j, prevresind, resind, i0, prevcg, cg, curcg;
char *name;
}
}
- static atom_id search_res_atom(const char *type, int resind,
- t_atoms *atoms,
- const char *bondtype, gmx_bool bAllowMissing)
- {
- int i;
-
- for (i = 0; (i < atoms->nr); i++)
- {
- if (atoms->atom[i].resind == resind)
- {
- return search_atom(type, i, atoms, bondtype, bAllowMissing);
- }
- }
- return NO_ATID;
- }
static void do_ssbonds(t_params *ps, t_atoms *atoms,
int nssbonds, t_ssbond *ssbonds, gmx_bool bAllowMissing)
{
dist2 = distance2(x[ai], x[aj]);
if (dist2 > long_bond_dist2)
+
{
fprintf(stderr, "Warning: Long Bond (%d-%d = %g nm)\n",
ai+1, aj+1, sqrt(dist2));
}
#define NUM_CMAP_ATOMS 5
- static void gen_cmap(t_params *psb, t_restp *restp, t_atoms *atoms, gmx_residuetype_t *rt)
+ static void gen_cmap(t_params *psb, t_restp *restp, t_atoms *atoms)
{
int residx, i, j, k;
const char *ptr;
+ const char *pname;
t_resinfo *resinfo = atoms->resinfo;
int nres = atoms->nres;
gmx_bool bAddCMAP;
ptr = "check";
}
- fprintf(stderr, "Making cmap torsions...");
+ fprintf(stderr, "Making cmap torsions...\n");
i = 0;
- /* End loop at nres-1, since the very last residue does not have a +N atom, and
- * therefore we get a valgrind invalid 4 byte read error with atom am */
- for (residx = 0; residx < nres-1; residx++)
+ /* Most cmap entries use the N atom from the next residue, so the last
+ * residue should not have its CMAP entry in that case, but for things like
+ * dipeptides we sometimes define a complete CMAP entry inside a residue,
+ * and in this case we need to process everything through the last residue.
+ */
+ for (residx = 0; residx < nres; residx++)
{
/* Add CMAP terms from the list of CMAP interactions */
for (j = 0; j < restp[residx].rb[ebtsCMAP].nb; j++)
* from residues labelled as protein. */
for (k = 0; k < NUM_CMAP_ATOMS && bAddCMAP; k++)
{
- cmap_atomid[k] = search_atom(restp[residx].rb[ebtsCMAP].b[j].a[k],
+ /* Assign the pointer to the name of the next reference atom.
+ * This can use -/+ labels to refer to previous/next residue.
+ */
+ pname = restp[residx].rb[ebtsCMAP].b[j].a[k];
+ /* Skip this CMAP entry if it refers to residues before the
+ * first or after the last residue.
+ */
+ if (((strchr(pname, '-') != NULL) && (residx == 0)) ||
+ ((strchr(pname, '+') != NULL) && (residx == nres-1)))
+ {
+ bAddCMAP = FALSE;
+ break;
+ }
+
+ cmap_atomid[k] = search_atom(pname,
i, atoms, ptr, TRUE);
bAddCMAP = bAddCMAP && (cmap_atomid[k] != NO_ATID);
if (!bAddCMAP)
bAddCMAP = bAddCMAP &&
cmap_chainnum == resinfo[this_residue_index].chainnum;
}
- bAddCMAP = bAddCMAP && gmx_residuetype_is_protein(rt, *(resinfo[this_residue_index].name));
+ /* Here we used to check that the residuetype was protein and
+ * disable bAddCMAP if that was not the case. However, some
+ * special residues (say, alanine dipeptides) might not adhere
+ * to standard naming, and if we start calling them normal
+ * protein residues the user will be bugged to select termini.
+ *
+ * Instead, I believe that the right course of action is to
+ * keep the CMAP interaction if it is present in the RTP file
+ * and we correctly identified all atoms (which is the case
+ * if we got here).
+ */
}
if (bAddCMAP)
}
}
}
-
/* Start the next residue */
}
int *vsite_type;
int i, nmissat;
int bts[ebtsNR];
- gmx_residuetype_t rt;
+ gmx_residuetype_t*rt;
init_plist(plist);
gmx_residuetype_init(&rt);
/* Make CMAP */
if (TRUE == bCmap)
{
- gen_cmap(&(plist[F_CMAP]), restp, atoms, rt);
+ gen_cmap(&(plist[F_CMAP]), restp, atoms);
if (plist[F_CMAP].nr > 0)
{
fprintf(stderr, "There are %4d cmap torsion pairs\n",
#include "gromacs/utility/cstringutil.h"
#include "pgutil.h"
#include <string.h>
-#include "gmx_fatal.h"
+#include "gromacs/utility/fatalerror.h"
#define BUFSIZE 1024
static void atom_not_found(int fatal_errno, const char *file, int line,
return NO_ATID;
}
+ atom_id
+ search_res_atom(const char *type, int resind,
+ t_atoms *atoms,
+ const char *bondtype, gmx_bool bAllowMissing)
+ {
+ int i;
+
+ for (i = 0; (i < atoms->nr); i++)
+ {
+ if (atoms->atom[i].resind == resind)
+ {
+ return search_atom(type, i, atoms, bondtype, bAllowMissing);
+ }
+ }
+
+ return NO_ATID;
+ }
+
+
void set_at(t_atom *at, real m, real q, int type, int resind)
{
at->m = m;
#endif
#include <ctype.h>
-#include <stdlib.h>
#include <limits.h>
-#include "sysstuff.h"
-#include "gromacs/utility/smalloc.h"
+#include <stdlib.h>
+
#include "typedefs.h"
-#include "physics.h"
+#include "gromacs/math/units.h"
#include "names.h"
-#include "gmx_fatal.h"
#include "macros.h"
-#include "index.h"
-#include "symtab.h"
+#include "gromacs/topology/index.h"
#include "gromacs/utility/cstringutil.h"
#include "readinp.h"
#include "warninp.h"
#include "readir.h"
#include "toputil.h"
-#include "index.h"
#include "network.h"
-#include "vec.h"
-#include "pbc.h"
-#include "mtop_util.h"
+#include "gromacs/math/vec.h"
+#include "gromacs/pbcutil/pbc.h"
+#include "gromacs/topology/mtop_util.h"
#include "chargegroup.h"
#include "inputrec.h"
#include "calc_verletbuf.h"
+#include "gromacs/topology/block.h"
+#include "gromacs/topology/symtab.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/smalloc.h"
+
#define MAXPTR 254
#define NOGID 255
STYPE ("temperature-lambdas", is->fep_lambda[efptTEMPERATURE], NULL);
ITYPE ("calc-lambda-neighbors", fep->lambda_neighbors, 1);
STYPE ("init-lambda-weights", is->lambda_weights, NULL);
- EETYPE("dhdl-print-energy", fep->bPrintEnergy, yesno_names);
+ EETYPE("dhdl-print-energy", fep->edHdLPrintEnergy, edHdLPrintEnergy_names);
RTYPE ("sc-alpha", fep->sc_alpha, 0.0);
ITYPE ("sc-power", fep->sc_power, 1);
RTYPE ("sc-r-power", fep->sc_r_power, 6.0);
}
}
- if (ir->bSimTemp)
+ if (fep->edHdLPrintEnergy == edHdLPrintEnergyYES)
{
- fep->bPrintEnergy = TRUE;
- /* always print out the energy to dhdl if we are doing expanded ensemble, since we need the total energy
- if the temperature is changing. */
+ fep->edHdLPrintEnergy = edHdLPrintEnergyTOTAL;
+ warning_note(wi, "Old option for dhdl-print-energy given: "
+ "changing \"yes\" to \"total\"\n");
+ }
+
+ if (ir->bSimTemp && (fep->edHdLPrintEnergy == edHdLPrintEnergyNO))
+ {
+ /* always print out the energy to dhdl if we are doing
+ expanded ensemble, since we need the total energy for
+ analysis if the temperature is changing. In some
+ conditions one may only want the potential energy, so
+ we will allow that if the appropriate mdp setting has
+ been enabled. Otherwise, total it is:
+ */
+ fep->edHdLPrintEnergy = edHdLPrintEnergyTOTAL;
}
if ((ir->efep != efepNO) || ir->bSimTemp)
void make_IMD_group(t_IMD *IMDgroup, char *IMDgname, t_blocka *grps, char **gnames)
{
- int ig = -1, i;
+ int ig, i;
ig = search_string(IMDgname, grps->nr, gnames);
CHECK((ir->coulombtype == eelGRF) && (ir->opts.ref_t[0] <= 0));
}
- if (ir->eI == eiSD1 &&
- (gmx_mtop_ftype_count(sys, F_CONSTR) > 0 ||
- gmx_mtop_ftype_count(sys, F_SETTLE) > 0))
+ if (ir->eI == eiSD2)
{
- sprintf(warn_buf, "With constraints integrator %s is less accurate, consider using %s instead", ei_names[ir->eI], ei_names[eiSD2]);
+ sprintf(warn_buf, "The stochastic dynamics integrator %s is deprecated, since\n"
+ "it is slower than integrator %s and is slightly less accurate\n"
+ "with constraints. Use the %s integrator.",
+ ei_names[ir->eI], ei_names[eiSD1], ei_names[eiSD1]);
warning_note(wi, warn_buf);
}
#ifndef _constr_h
#define _constr_h
+
#include "typedefs.h"
#ifdef __cplusplus
extern "C" {
#endif
+struct t_pbc;
+
enum
{
econqCoord, /* Constrain coordinates (mass weighted) */
real dOH, real dHH);
/* Initializes and returns a structure with SETTLE parameters */
-void csettle(gmx_settledata_t settled,
- int nsettle, /* Number of settles */
- t_iatom iatoms[], /* The settle iatom list */
- const t_pbc *pbc, /* PBC data pointer, can be NULL */
- real b4[], /* Old coordinates */
- real after[], /* New coords, to be settled */
- real invdt, /* 1/delta_t */
- real *v, /* Also constrain v if v!=NULL */
- int calcvir_atom_end, /* Calculate r x m delta_r up to this atom */
- tensor vir_r_m_dr, /* sum r x m delta_r */
- int *xerror,
- t_vetavars *vetavar /* variables for pressure control */
+void csettle(gmx_settledata_t settled,
+ int nsettle, /* Number of settles */
+ t_iatom iatoms[], /* The settle iatom list */
+ const struct t_pbc *pbc, /* PBC data pointer, can be NULL */
+ real b4[], /* Old coordinates */
+ real after[], /* New coords, to be settled */
+ real invdt, /* 1/delta_t */
+ real *v, /* Also constrain v if v!=NULL */
+ int calcvir_atom_end, /* Calculate r x m delta_r up to this atom */
+ tensor vir_r_m_dr, /* sum r x m delta_r */
+ int *xerror,
+ t_vetavars *vetavar /* variables for pressure control */
);
void settle_proj(gmx_settledata_t settled, int econq,
int nsettle, t_iatom iatoms[],
- const t_pbc *pbc, /* PBC data pointer, can be NULL */
+ const struct t_pbc *pbc, /* PBC data pointer, can be NULL */
rvec x[],
rvec *der, rvec *derp,
int CalcVirAtomEnd, tensor vir_r_m_dder,
gmx_ekindata_t *ekind,
t_commrec *cr,
gmx_int64_t step, int delta_step,
+ real step_scaling,
t_mdatoms *md,
rvec *x, rvec *xprime, rvec *min_proj,
gmx_bool bMolPBC, matrix box,
* step + delta_step is the step at which the final configuration
* is meant to be; for update delta_step = 1.
*
+ * step_scaling can be used to update coordinates based on the time
+ * step multiplied by this factor. Thus, normally 1.0 is passed. The
+ * SD1 integrator uses 0.5 in one of its calls, to correct positions
+ * for half a step of changed velocities.
+ *
* If v!=NULL also constrain v by adding the constraint corrections / dt.
*
* If vir!=NULL calculate the constraint virial.
gmx_lincsdata_t lincsd, t_mdatoms *md,
t_commrec *cr,
rvec *x, rvec *xprime, rvec *min_proj,
- matrix box, t_pbc *pbc,
+ matrix box, struct t_pbc *pbc,
real lambda, real *dvdlambda,
real invdt, rvec *v,
gmx_bool bCalcVir, tensor vir_r_m_dr,
#include <stdio.h>
-#include "types/simple.h"
-#include "typedefs.h"
-#include "main.h"
-#include "gmx_fatal.h"
+#include "../utility/basedefinitions.h"
+#include "../utility/fatalerror.h"
#ifdef __cplusplus
extern "C" {
#endif
-t_commrec *init_commrec(void);
+struct gmx_domdec_t;
+struct gmx_multisim_t;
+struct t_commrec;
+
+struct t_commrec *init_commrec(void);
/* Allocate, initialize and return the commrec. */
-t_commrec *reinitialize_commrec_for_this_thread(const t_commrec *cro);
+struct t_commrec *reinitialize_commrec_for_this_thread(const struct t_commrec *cro);
/* Initialize communication records for thread-parallel simulations.
Must be called on all threads before any communication takes place by
the individual threads. Copies the original commrec to
thread-local versions (a small memory leak results because we don't
deallocate the old shared version). */
-void gmx_fill_commrec_from_mpi(t_commrec *cr);
+void gmx_fill_commrec_from_mpi(struct t_commrec *cr);
/* Continues t_commrec construction */
-int gmx_node_num(void);
-/* return the number of nodes in the ring */
-
-int gmx_node_rank(void);
-/* return the rank of the node */
-
-int gmx_physicalnode_id_hash(void);
-/* Return a non-negative hash that is, hopefully, unique for each physical node.
- * This hash is useful for determining hardware locality.
- */
-
-void gmx_setup_nodecomm(FILE *fplog, t_commrec *cr);
+void gmx_setup_nodecomm(FILE *fplog, struct t_commrec *cr);
/* Sets up fast global communication for clusters with multi-core nodes */
-void gmx_init_intranode_counters(t_commrec *cr);
+void gmx_init_intranode_counters(struct t_commrec *cr);
/* Initializes intra-physical-node MPI process/thread counts and ID. */
-gmx_bool gmx_mpi_initialized(void);
-/* return TRUE when MPI_Init has been called.
- * return FALSE when MPI_Init has not been called OR
- * when GROMACS was compiled without MPI support.
- */
-
-void gmx_barrier(const t_commrec *cr);
+void gmx_barrier(const struct t_commrec *cr);
/* Wait till all processes in cr->mpi_comm_mygroup have reached the barrier */
-void gmx_bcast(int nbytes, void *b, const t_commrec *cr);
+void gmx_bcast(int nbytes, void *b, const struct t_commrec *cr);
/* Broadcast nbytes bytes from the master to cr->mpi_comm_mygroup */
-void gmx_bcast_sim(int nbytes, void *b, const t_commrec *cr);
+void gmx_bcast_sim(int nbytes, void *b, const struct t_commrec *cr);
/* Broadcast nbytes bytes from the sim master to cr->mpi_comm_mysim */
-void gmx_sumi(int nr, int r[], const t_commrec *cr);
+void gmx_sumi(int nr, int r[], const struct t_commrec *cr);
/* Calculate the global sum of an array of ints */
-void gmx_sumli(int nr, gmx_int64_t r[], const t_commrec *cr);
+void gmx_sumli(int nr, gmx_int64_t r[], const struct t_commrec *cr);
/* Calculate the global sum of an array of large ints */
-void gmx_sumf(int nr, float r[], const t_commrec *cr);
+void gmx_sumf(int nr, float r[], const struct t_commrec *cr);
/* Calculate the global sum of an array of floats */
-void gmx_sumd(int nr, double r[], const t_commrec *cr);
+void gmx_sumd(int nr, double r[], const struct t_commrec *cr);
/* Calculate the global sum of an array of doubles */
-void gmx_sumi_sim(int nr, int r[], const gmx_multisim_t *ms);
+void gmx_sumi_sim(int nr, int r[], const struct gmx_multisim_t *ms);
/* Calculate the sum over the simulations of an array of ints */
-void gmx_sumli_sim(int nr, gmx_int64_t r[], const gmx_multisim_t *ms);
+void gmx_sumli_sim(int nr, gmx_int64_t r[], const struct gmx_multisim_t *ms);
/* Calculate the sum over the simulations of an array of large ints */
-void gmx_sumf_sim(int nr, float r[], const gmx_multisim_t *ms);
+void gmx_sumf_sim(int nr, float r[], const struct gmx_multisim_t *ms);
/* Calculate the sum over the simulations of an array of floats */
-void gmx_sumd_sim(int nr, double r[], const gmx_multisim_t *ms);
+void gmx_sumd_sim(int nr, double r[], const struct gmx_multisim_t *ms);
/* Calculate the sum over the simulations of an array of doubles */
-void gmx_abort(int nodeid, int nnodes, int errorno);
-/* Abort the parallel run */
-
#ifdef GMX_DOUBLE
#define gmx_sum gmx_sumd
#define gmx_sum_sim gmx_sumd_sim
#define gmx_sum_sim gmx_sumf_sim
#endif
+gmx_bool gmx_fexist_master(const char *fname, struct t_commrec *cr);
+/* Return TRUE when fname exists, FALSE otherwise, bcast from master to others */
+
+void
+gmx_fatal_collective(int f_errno, const char *file, int line,
+ const struct t_commrec *cr, struct gmx_domdec_t *dd,
+ const char *fmt, ...);
+/* As gmx_fatal declared in utility/fatalerror.h,
+ * but only the master process prints the error message.
+ * This should only be called one of the following two situations:
+ * 1) On all nodes in cr->mpi_comm_mysim, with cr!=NULL,dd==NULL.
+ * 2) On all nodes in dd->mpi_comm_all, with cr==NULL,dd!=NULL.
+ * This will call MPI_Finalize instead of MPI_Abort when possible,
+ * This is useful for handling errors in code that is executed identically
+ * for all processes.
+ */
+
+/* This doesn't currently work if enabled (needs some header cleanup). */
#ifdef DEBUG_GMX
#define debug_gmx() do { FILE *fp = debug ? debug : stderr; \
- if (bDebugMode()) { fprintf(fp, "NODEID=%d, %s %d\n", gmx_mpi_initialized() ? gmx_node_rank() : -1, __FILE__, __LINE__); } fflush(fp); } while (0)
+ if (bDebugMode()) { fprintf(fp, "rank=%d, %s %d\n", gmx_mpi_initialized() ? gmx_node_rank() : -1, __FILE__, __LINE__); } fflush(fp); } while (0)
#else
#define debug_gmx()
#endif
#ifndef _inputrec_h_
#define _inputrec_h_
+#include <stdio.h>
#include "simple.h"
#include "enums.h"
-#include "../sysstuff.h"
#include "../../swap/enums.h"
#ifdef __cplusplus
valid value if positive) */
int init_fep_state; /* the initial number of the state */
double delta_lambda; /* change of lambda per time step (fraction of (0.1) */
- gmx_bool bPrintEnergy; /* Whether to print the energy in the dhdl */
+ int edHdLPrintEnergy; /* print no, total or potential energies in dhdl */
int n_lambda; /* The number of foreign lambda points */
double **all_lambda; /* The array of all lambda values */
int lambda_neighbors; /* The number of neighboring lambda states to
#include <config.h>
#endif
+ #include <assert.h>
+#include <stdlib.h>
#include "gromacs/fileio/confio.h"
#include "types/commrec.h"
#include "constr.h"
#include "copyrite.h"
-#include "invblock.h"
-#include "main.h"
#include "mdrun.h"
#include "nrnb.h"
-#include "gromacs/utility/smalloc.h"
-#include "vec.h"
-#include "physics.h"
+#include "gromacs/math/vec.h"
#include "names.h"
#include "txtdump.h"
#include "domdec.h"
#include "gromacs/fileio/pdbio.h"
#include "splitter.h"
-#include "mtop_util.h"
+#include "gromacs/topology/mtop_util.h"
#include "gromacs/fileio/gmxfio.h"
#include "macros.h"
#include "gmx_omp_nthreads.h"
#include "gromacs/essentialdynamics/edsam.h"
#include "gromacs/pulling/pull.h"
-#include "gmx_fatal.h"
+#include "gromacs/pbcutil/pbc.h"
+#include "gromacs/topology/block.h"
+#include "gromacs/topology/invblock.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/smalloc.h"
typedef struct gmx_constr {
int ncon_tot; /* The total number of constraints */
atom_id blocknr;
} t_sortblock;
+ /* delta_t should be used instead of ir->delta_t, to permit the time
+ step to be scaled by the calling code */
static void *init_vetavars(t_vetavars *vars,
gmx_bool constr_deriv,
- real veta, real vetanew, t_inputrec *ir, gmx_ekindata_t *ekind, gmx_bool bPscal)
+ real veta, real vetanew,
+ t_inputrec *ir, real delta_t,
+ gmx_ekindata_t *ekind, gmx_bool bPscal)
{
double g;
int i;
{
vars->alpha = 1.0;
}
- g = 0.5*veta*ir->delta_t;
+ g = 0.5*veta*delta_t;
vars->rscale = exp(g)*series_sinhx(g);
- g = -0.25*vars->alpha*veta*ir->delta_t;
+ g = -0.25*vars->alpha*veta*delta_t;
vars->vscale = exp(g)*series_sinhx(g);
vars->rvscale = vars->vscale*vars->rscale;
vars->veta = vetanew;
t_idef *idef, t_inputrec *ir, gmx_ekindata_t *ekind,
t_commrec *cr,
gmx_int64_t step, int delta_step,
+ real step_scaling,
t_mdatoms *md,
rvec *x, rvec *xprime, rvec *min_proj,
gmx_bool bMolPBC, matrix box,
int ncons, settle_error;
tensor vir_r_m_dr;
rvec *vstor;
+ real scaled_delta_t;
real invdt, vir_fac, t;
t_ilist *settle;
int nsettle;
homenr = md->homenr;
nrend = start+homenr;
+ scaled_delta_t = step_scaling * ir->delta_t;
+
/* set constants for pressure control integration */
init_vetavars(&vetavar, econq != econqCoord,
- veta, vetanew, ir, ekind, bPscal);
+ veta, vetanew, ir, scaled_delta_t, ekind, bPscal);
+ /* Prepare time step for use in constraint implementations, and
+ avoid generating inf when ir->delta_t = 0. */
if (ir->delta_t == 0)
{
- invdt = 0;
+ invdt = 0.0;
}
else
{
- invdt = 1/ir->delta_t;
+ invdt = 1.0/scaled_delta_t;
}
if (ir->efep != efepNO && EI_DYNAMICS(ir->eI))
if (vir != NULL)
{
+ /* The normal uses of constrain() pass step_scaling = 1.0.
+ * The call to constrain() for SD1 that passes step_scaling =
+ * 0.5 also passes vir = NULL, so cannot reach this
+ * assertion. This assertion should remain until someone knows
+ * that this path works for their intended purpose, and then
+ * they can use scaled_delta_t instead of ir->delta_t
+ * below. */
+ assert(gmx_within_tol(step_scaling, 1.0, GMX_REAL_EPS));
switch (econq)
{
case econqCoord:
j, constr->nblocks, ncons);
for (i = 0; (i < ncons); i++)
{
- fprintf(stderr, "i: %5d sb[i].blocknr: %5u\n", i, sb[i].blocknr);
+ fprintf(stderr, "i: %5d sb[i].blocknr: %5d\n", i, sb[i].blocknr);
}
for (j = 0; (j <= constr->nblocks); j++)
{
#include <assert.h>
#include "typedefs.h"
-#include "gromacs/utility/smalloc.h"
-#include "gmx_fatal.h"
-#include "gmx_fatal_collective.h"
-#include "vec.h"
+#include "network.h"
+#include "gromacs/math/vec.h"
#include "domdec.h"
#include "domdec_network.h"
#include "nrnb.h"
-#include "pbc.h"
#include "chargegroup.h"
#include "constr.h"
#include "mdatoms.h"
#include "mdrun.h"
#include "nsgrid.h"
#include "shellfc.h"
-#include "mtop_util.h"
+#include "gromacs/topology/mtop_util.h"
#include "gmx_ga2la.h"
#include "macros.h"
#include "nbnxn_search.h"
#include "gmx_omp_nthreads.h"
#include "gpu_utils.h"
-#include "gromacs/fileio/futil.h"
+#include "gromacs/utility/futil.h"
#include "gromacs/fileio/gmxfio.h"
#include "gromacs/fileio/pdbio.h"
+#include "gromacs/imd/imd.h"
+#include "gromacs/pbcutil/ishift.h"
+#include "gromacs/pbcutil/pbc.h"
+#include "gromacs/pulling/pull.h"
+#include "gromacs/pulling/pull_rotation.h"
+#include "gromacs/swap/swapcoords.h"
#include "gromacs/timing/wallcycle.h"
+#include "gromacs/utility/basenetwork.h"
+#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxmpi.h"
-#include "gromacs/swap/swapcoords.h"
#include "gromacs/utility/qsort_threadsafe.h"
-#include "gromacs/pulling/pull.h"
-#include "gromacs/pulling/pull_rotation.h"
-#include "gromacs/imd/imd.h"
+#include "gromacs/utility/smalloc.h"
#define DDRANK(dd, rank) (rank)
#define DDMASTERRANK(dd) (dd->masterrank)
snew(grid_r, 2*dd->nnodes);
}
- dd_gather(dd, 2*sizeof(rvec), grid_s[0], DDMASTER(dd) ? grid_r[0] : NULL);
+ dd_gather(dd, 2*sizeof(rvec), grid_s, DDMASTER(dd) ? grid_r : NULL);
if (DDMASTER(dd))
{
if (debug)
{
- fprintf(debug, "Receive coordinates from PP nodes:");
+ fprintf(debug, "Receive coordinates from PP ranks:");
for (x = 0; x < *nmy_ddnodes; x++)
{
fprintf(debug, " %d", (*my_ddnodes)[x]);
if (!bLocalCG[dd->index_gl[i]])
{
fprintf(stderr,
- "DD node %d, %s: cg %d, global cg %d is not marked in bLocalCG (ncg_home %d)\n", dd->rank, where, i+1, dd->index_gl[i]+1, dd->ncg_home);
+ "DD rank %d, %s: cg %d, global cg %d is not marked in bLocalCG (ncg_home %d)\n", dd->rank, where, i+1, dd->index_gl[i]+1, dd->ncg_home);
nerr++;
}
}
}
if (ngl != dd->ncg_tot)
{
- fprintf(stderr, "DD node %d, %s: In bLocalCG %d cgs are marked as local, whereas there are %d\n", dd->rank, where, ngl, dd->ncg_tot);
+ fprintf(stderr, "DD rank %d, %s: In bLocalCG %d cgs are marked as local, whereas there are %d\n", dd->rank, where, ngl, dd->ncg_tot);
nerr++;
}
{
if (have[dd->gatindex[a]] > 0)
{
- fprintf(stderr, "DD node %d: global atom %d occurs twice: index %d and %d\n", dd->rank, dd->gatindex[a]+1, have[dd->gatindex[a]], a+1);
+ fprintf(stderr, "DD rank %d: global atom %d occurs twice: index %d and %d\n", dd->rank, dd->gatindex[a]+1, have[dd->gatindex[a]], a+1);
}
else
{
{
if (a >= dd->nat_tot)
{
- fprintf(stderr, "DD node %d: global atom %d marked as local atom %d, which is larger than nat_tot (%d)\n", dd->rank, i+1, a+1, dd->nat_tot);
+ fprintf(stderr, "DD rank %d: global atom %d marked as local atom %d, which is larger than nat_tot (%d)\n", dd->rank, i+1, a+1, dd->nat_tot);
nerr++;
}
else
have[a] = 1;
if (dd->gatindex[a] != i)
{
- fprintf(stderr, "DD node %d: global atom %d marked as local atom %d, which has global atom index %d\n", dd->rank, i+1, a+1, dd->gatindex[a]+1);
+ fprintf(stderr, "DD rank %d: global atom %d marked as local atom %d, which has global atom index %d\n", dd->rank, i+1, a+1, dd->gatindex[a]+1);
nerr++;
}
}
if (ngl != dd->nat_tot)
{
fprintf(stderr,
- "DD node %d, %s: %d global atom indices, %d local atoms\n",
+ "DD rank %d, %s: %d global atom indices, %d local atoms\n",
dd->rank, where, ngl, dd->nat_tot);
}
for (a = 0; a < dd->nat_tot; a++)
if (have[a] == 0)
{
fprintf(stderr,
- "DD node %d, %s: local atom %d, global %d has no global index\n",
+ "DD rank %d, %s: local atom %d, global %d has no global index\n",
dd->rank, where, a+1, dd->gatindex[a]+1);
}
}
if (nerr > 0)
{
- gmx_fatal(FARGS, "DD node %d, %s: %d atom/cg index inconsistencies",
+ gmx_fatal(FARGS, "DD rank %d, %s: %d atom/cg index inconsistencies",
dd->rank, where, nerr);
}
}
/* This error should never be triggered under normal
* circumstances, but you never know ...
*/
- gmx_fatal(FARGS, "Step %s: The domain decomposition grid has shifted too much in the %c-direction around cell %d %d %d. This should not have happened. Running with less nodes might avoid this issue.",
+ gmx_fatal(FARGS, "Step %s: The domain decomposition grid has shifted too much in the %c-direction around cell %d %d %d. This should not have happened. Running with fewer ranks might avoid this issue.",
gmx_step_str(step, buf),
dim2char(dim), dd->ci[XX], dd->ci[YY], dd->ci[ZZ]);
}
dim2char(d), ddbox->box_size[d], ddbox->skew_fac[d],
comm->cutoff,
dd->nc[d], dd->nc[d],
- dd->nnodes > dd->nc[d] ? "cells" : "processors");
+ dd->nnodes > dd->nc[d] ? "cells" : "ranks");
if (setmode == setcellsizeslbLOCAL)
{
if (npme > 0 && fabs(lossp) >= DD_PERF_LOSS_WARN)
{
sprintf(buf,
- "NOTE: %.1f %% performance was lost because the PME nodes\n"
- " had %s work to do than the PP nodes.\n"
- " You might want to %s the number of PME nodes\n"
+ "NOTE: %.1f %% performance was lost because the PME ranks\n"
+ " had %s work to do than the PP ranks.\n"
+ " You might want to %s the number of PME ranks\n"
" or %s the cut-off and the grid spacing.\n",
fabs(lossp*100),
(lossp < 0) ? "less" : "more",
if (fplog)
{
fprintf(fplog,
- "Domain decomposition nodeid %d, coordinates %d %d %d\n\n",
+ "Domain decomposition rank %d, coordinates %d %d %d\n\n",
dd->rank, dd->ci[XX], dd->ci[YY], dd->ci[ZZ]);
}
if (debug)
{
fprintf(debug,
- "Domain decomposition nodeid %d, coordinates %d %d %d\n\n",
+ "Domain decomposition rank %d, coordinates %d %d %d\n\n",
dd->rank, dd->ci[XX], dd->ci[YY], dd->ci[ZZ]);
}
}
}
else if (fplog)
{
- fprintf(fplog, "#pmenodes (%d) is not a multiple of nx*ny (%d*%d) or nx*nz (%d*%d)\n", cr->npmenodes, dd->nc[XX], dd->nc[YY], dd->nc[XX], dd->nc[ZZ]);
+ fprintf(fplog, "Number of PME-only ranks (%d) is not a multiple of nx*ny (%d*%d) or nx*nz (%d*%d)\n", cr->npmenodes, dd->nc[XX], dd->nc[YY], dd->nc[XX], dd->nc[ZZ]);
fprintf(fplog,
"Will not use a Cartesian communicator for PP <-> PME\n\n");
}
if (fplog)
{
- fprintf(fplog, "Cartesian nodeid %d, coordinates %d %d %d\n\n",
+ fprintf(fplog, "Cartesian rank %d, coordinates %d %d %d\n\n",
cr->sim_nodeid, dd->ci[XX], dd->ci[YY], dd->ci[ZZ]);
}
case ddnoPP_PME:
if (fplog)
{
- fprintf(fplog, "Order of the nodes: PP first, PME last\n");
+ fprintf(fplog, "Order of the ranks: PP first, PME last\n");
}
break;
case ddnoINTERLEAVE:
*/
if (fplog)
{
- fprintf(fplog, "Interleaving PP and PME nodes\n");
+ fprintf(fplog, "Interleaving PP and PME ranks\n");
}
comm->pmenodes = dd_pmenodes(cr);
break;
if (fplog)
{
- fprintf(fplog, "This is a %s only node\n\n",
+ fprintf(fplog, "This rank does only %s work.\n\n",
(cr->duty & DUTY_PP) ? "particle-particle" : "PME-mesh");
}
}
if (fplog)
{
fprintf(fplog,
- "\nInitializing Domain Decomposition on %d nodes\n", cr->nnodes);
+ "\nInitializing Domain Decomposition on %d ranks\n", cr->nnodes);
}
snew(dd, 1);
if (dd->nc[XX] == 0)
{
bC = (dd->bInterCGcons && rconstr > r_bonded_limit);
- sprintf(buf, "Change the number of nodes or mdrun option %s%s%s",
+ sprintf(buf, "Change the number of ranks or mdrun option %s%s%s",
!bC ? "-rdd" : "-rcon",
comm->eDLB != edlbNO ? " or -dds" : "",
bC ? " or your LINCS settings" : "");
gmx_fatal_collective(FARGS, cr, NULL,
- "There is no domain decomposition for %d nodes that is compatible with the given box and a minimum cell size of %g nm\n"
+ "There is no domain decomposition for %d ranks that is compatible with the given box and a minimum cell size of %g nm\n"
"%s\n"
"Look in the log file for details on the domain decomposition",
cr->nnodes-cr->npmenodes, limit, buf);
if (fplog)
{
fprintf(fplog,
- "Domain decomposition grid %d x %d x %d, separate PME nodes %d\n",
+ "Domain decomposition grid %d x %d x %d, separate PME ranks %d\n",
dd->nc[XX], dd->nc[YY], dd->nc[ZZ], cr->npmenodes);
}
if (cr->nnodes - dd->nnodes != cr->npmenodes)
{
gmx_fatal_collective(FARGS, cr, NULL,
- "The size of the domain decomposition grid (%d) does not match the number of nodes (%d). The total number of nodes is %d",
+ "The size of the domain decomposition grid (%d) does not match the number of ranks (%d). The total number of ranks is %d",
dd->nnodes, cr->nnodes - cr->npmenodes, cr->nnodes);
}
if (cr->npmenodes > dd->nnodes)
{
gmx_fatal_collective(FARGS, cr, NULL,
- "The number of separate PME nodes (%d) is larger than the number of PP nodes (%d), this is not supported.", cr->npmenodes, dd->nnodes);
+ "The number of separate PME ranks (%d) is larger than the number of PP ranks (%d), this is not supported.", cr->npmenodes, dd->nnodes);
}
if (cr->npmenodes > 0)
{
if (dd->pme_nodeid >= 0)
{
gmx_fatal_collective(FARGS, NULL, dd,
- "Can not have separate PME nodes without PME electrostatics");
+ "Can not have separate PME ranks without PME electrostatics");
}
}
#endif
#include <assert.h>
-#include "gromacs/utility/smalloc.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "constr.h"
#include "types/commrec.h"
#include "domdec.h"
#include "domdec_network.h"
-#include "mtop_util.h"
+#include "gromacs/topology/mtop_util.h"
#include "gmx_ga2la.h"
#include "gmx_hash.h"
#include "gmx_omp_nthreads.h"
#include "macros.h"
-#include "gmx_fatal.h"
+#include "gromacs/pbcutil/ishift.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/smalloc.h"
typedef struct {
int nsend;
nsend, 2, buf, 2);
if (debug)
{
- fprintf(debug, "Send to node %d, %d (%d) indices, "
- "receive from node %d, %d (%d) indices\n",
+ fprintf(debug, "Send to rank %d, %d (%d) indices, "
+ "receive from rank %d, %d (%d) indices\n",
dd->neighbor[d][1-dir], nsend[1], nsend[0],
dd->neighbor[d][dir], buf[1], buf[0]);
if (gmx_debug_at)
#include "types/commrec.h"
#include "network.h"
#include "perf_est.h"
-#include "physics.h"
#include "gromacs/utility/smalloc.h"
#include "typedefs.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "names.h"
/* Margin for setting up the DD grid */
}
if (npme > nnodes/2)
{
- gmx_fatal(FARGS, "Could not find an appropriate number of separate PME nodes. i.e. >= %5f*#nodes (%d) and <= #nodes/2 (%d) and reasonable performance wise (grid_x=%d, grid_y=%d).\n"
- "Use the -npme option of mdrun or change the number of processors or the PME grid dimensions, see the manual for details.",
+ gmx_fatal(FARGS, "Could not find an appropriate number of separate PME ranks. i.e. >= %5f*#ranks (%d) and <= #ranks/2 (%d) and reasonable performance wise (grid_x=%d, grid_y=%d).\n"
+ "Use the -npme option of mdrun or change the number of ranks or the PME grid dimensions, see the manual for details.",
ratio, (int)(0.95*ratio*nnodes+0.5), nnodes/2, ir->nkx, ir->nky);
/* Keep the compiler happy */
npme = 0;
if (fplog)
{
fprintf(fplog,
- "Will use %d particle-particle and %d PME only nodes\n"
+ "Will use %d particle-particle and %d PME only ranks\n"
"This is a guess, check the performance at the end of the log file\n",
nnodes-npme, npme);
}
fprintf(stderr, "\n"
- "Will use %d particle-particle and %d PME only nodes\n"
+ "Will use %d particle-particle and %d PME only ranks\n"
"This is a guess, check the performance at the end of the log file\n",
nnodes-npme, npme);
}
if (cr->nnodes <= 2)
{
gmx_fatal(FARGS,
- "Can not have separate PME nodes with 2 or less nodes");
+ "Cannot have separate PME ranks with 2 or fewer ranks");
}
if (cr->npmenodes >= cr->nnodes)
{
gmx_fatal(FARGS,
- "Can not have %d separate PME nodes with just %d total nodes",
+ "Cannot have %d separate PME ranks with just %d total ranks",
cr->npmenodes, cr->nnodes);
}
/* Check if the largest divisor is more than nnodes^2/3 */
if (ldiv*ldiv*ldiv > nnodes_div*nnodes_div)
{
- gmx_fatal(FARGS, "The number of nodes you selected (%d) contains a large prime factor %d. In most cases this will lead to bad performance. Choose a number with smaller prime factors or set the decomposition (option -dd) manually.",
+ gmx_fatal(FARGS, "The number of ranks you selected (%d) contains a large prime factor %d. In most cases this will lead to bad performance. Choose a number with smaller prime factors or set the decomposition (option -dd) manually.",
nnodes_div, ldiv);
}
}
cr->npmenodes = 0;
if (fplog)
{
- fprintf(fplog, "Using %d separate PME nodes, as there are too few total\n nodes for efficient splitting\n", cr->npmenodes);
+ fprintf(fplog, "Using %d separate PME ranks, as there are too few total\n ranks for efficient splitting\n", cr->npmenodes);
}
}
else
cr->npmenodes = guess_npme(fplog, mtop, ir, box, cr->nnodes);
if (fplog)
{
- fprintf(fplog, "Using %d separate PME nodes, as guessed by mdrun\n", cr->npmenodes);
+ fprintf(fplog, "Using %d separate PME ranks, as guessed by mdrun\n", cr->npmenodes);
}
}
}
{
if (fplog)
{
- fprintf(fplog, "Using %d separate PME nodes, per user request\n", cr->npmenodes);
+ fprintf(fplog, "Using %d separate PME ranks, per user request\n", cr->npmenodes);
}
}
}
#include <config.h>
#endif
+#include <assert.h>
#include <math.h>
#include <string.h>
-#include <assert.h>
-#include "sysstuff.h"
+
#include "typedefs.h"
#include "macros.h"
-#include "gromacs/utility/smalloc.h"
-#include "macros.h"
-#include "physics.h"
#include "force.h"
#include "nonbonded.h"
#include "names.h"
#include "network.h"
-#include "pbc.h"
#include "ns.h"
#include "nrnb.h"
#include "bondf.h"
-#include "mshift.h"
#include "txtdump.h"
#include "coulomb.h"
#include "pme.h"
#include "qmmm.h"
#include "gmx_omp_nthreads.h"
+#include "gromacs/legacyheaders/types/commrec.h"
+#include "gromacs/math/vec.h"
+#include "gromacs/pbcutil/ishift.h"
+#include "gromacs/pbcutil/mshift.h"
+#include "gromacs/pbcutil/pbc.h"
#include "gromacs/timing/wallcycle.h"
-#include "gmx_fatal.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/smalloc.h"
void ns(FILE *fp,
t_forcerec *fr,
if (bSepDVDL)
{
- fprintf(fplog, "Step %s: non-bonded V and dVdl for node %d:\n",
+ fprintf(fplog, "Step %s: non-bonded V and dVdl for rank %d:\n",
gmx_step_str(step, buf), cr->nodeid);
}
where();
*cycles_pme = 0;
+ clear_mat(fr->vir_el_recip);
+ clear_mat(fr->vir_lj_recip);
+
+ /* Do long-range electrostatics and/or LJ-PME, including related short-range
+ * corrections.
+ */
if (EEL_FULL(fr->eeltype) || EVDW_PME(fr->vdwtype))
{
- real Vlr = 0, Vcorr = 0;
- real dvdl_long_range = 0;
- int status = 0;
+ real Vlr = 0, Vcorr = 0;
+ real dvdl_long_range = 0;
+ int status = 0;
+ real Vlr_q = 0, Vlr_lj = 0, Vcorr_q = 0, Vcorr_lj = 0;
+ real dvdl_long_range_q = 0, dvdl_long_range_lj = 0;
bSB = (ir->nwall == 2);
if (bSB)
svmul(ir->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
box_size[ZZ] *= ir->wall_ewald_zfac;
}
- }
-
- /* Do long-range electrostatics and/or LJ-PME, including related short-range
- * corrections.
- */
-
- clear_mat(fr->vir_el_recip);
- clear_mat(fr->vir_lj_recip);
-
- if (EEL_FULL(fr->eeltype) || EVDW_PME(fr->vdwtype))
- {
- real Vlr_q = 0, Vlr_lj = 0, Vcorr_q = 0, Vcorr_lj = 0;
- real dvdl_long_range_q = 0, dvdl_long_range_lj = 0;
- int status = 0;
if (EEL_PME_EWALD(fr->eeltype) || EVDW_PME(fr->vdwtype))
{
PRINT_SEPDVDL("Ewald excl. corr. LJ", Vcorr_lj, dvdl_long_range_correction_lj);
enerd->dvdl_lin[efptCOUL] += dvdl_long_range_correction_q;
enerd->dvdl_lin[efptVDW] += dvdl_long_range_correction_lj;
- }
- if ((EEL_PME(fr->eeltype) || EVDW_PME(fr->vdwtype)))
- {
- if (cr->duty & DUTY_PME)
+ if ((EEL_PME(fr->eeltype) || EVDW_PME(fr->vdwtype)) && (cr->duty & DUTY_PME))
{
/* Do reciprocal PME for Coulomb and/or LJ. */
assert(fr->n_tpi >= 0);
fr->t_wait += t3-t2;
if (fr->timesteps == 11)
{
- fprintf(stderr, "* PP load balancing info: node %d, step %s, rel wait time=%3.0f%% , load string value: %7.2f\n",
+ fprintf(stderr, "* PP load balancing info: rank %d, step %s, rel wait time=%3.0f%% , load string value: %7.2f\n",
cr->nodeid, gmx_step_str(fr->timesteps, buf),
100*fr->t_wait/(fr->t_wait+fr->t_fnbf),
(fr->t_fnbf+fr->t_wait)/fr->t_fnbf);
#include <config.h>
#endif
+#include <assert.h>
#include <math.h>
#include <string.h>
-#include <assert.h>
-#include "sysstuff.h"
+
#include "typedefs.h"
#include "types/commrec.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "gromacs/math/utilities.h"
#include "macros.h"
-#include "gromacs/utility/smalloc.h"
#include "macros.h"
-#include "gmx_fatal.h"
-#include "physics.h"
+#include "gromacs/math/units.h"
#include "force.h"
#include "tables.h"
#include "nonbonded.h"
-#include "invblock.h"
#include "names.h"
#include "network.h"
-#include "pbc.h"
#include "ns.h"
-#include "mshift.h"
#include "txtdump.h"
#include "coulomb.h"
#include "md_support.h"
#include "domdec.h"
#include "qmmm.h"
#include "copyrite.h"
-#include "mtop_util.h"
+#include "gromacs/topology/mtop_util.h"
#include "nbnxn_simd.h"
#include "nbnxn_search.h"
#include "nbnxn_atomdata.h"
#include "gmx_detect_hardware.h"
#include "inputrec.h"
+#include "gromacs/pbcutil/ishift.h"
+#include "gromacs/pbcutil/pbc.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/smalloc.h"
+
#include "types/nbnxn_cuda_types_ext.h"
#include "gpu_utils.h"
#include "nbnxn_cuda_data_mgmt.h"
{
/* At this point the init should never fail as we made sure that
* we have all the GPUs we need. If it still does, we'll bail. */
- gmx_fatal(FARGS, "On node %d failed to initialize GPU #%d: %s",
+ gmx_fatal(FARGS, "On rank %d failed to initialize GPU #%d: %s",
cr->nodeid,
get_gpu_device_id(&hwinfo->gpu_info, gpu_opt,
cr->rank_pp_intranode),
if (bUsesSimpleTables)
{
- /* With a spacing of 0.0005 we are at the force summation accuracy
- * for the SSE kernels for "normal" atomistic simulations.
+ /* Get the Ewald table spacing based on Coulomb and/or LJ
+ * Ewald coefficients and rtol.
*/
- ic->tabq_scale = ewald_spline3_table_scale(ic->ewaldcoeff_q,
- ic->rcoulomb);
+ ic->tabq_scale = ewald_spline3_table_scale(ic);
maxr = (rtab > ic->rcoulomb) ? rtab : ic->rcoulomb;
ic->tabq_size = (int)(maxr*ic->tabq_scale) + 2;
#include <config.h>
#endif
-#include <string.h>
#include <float.h>
+#include <stdlib.h>
+#include <string.h>
+
#include "typedefs.h"
#include "mdebin.h"
-#include "gromacs/utility/smalloc.h"
-#include "physics.h"
+#include "gromacs/math/units.h"
#include "gromacs/fileio/enxio.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "disre.h"
-#include "main.h"
#include "network.h"
#include "names.h"
#include "orires.h"
#include "constr.h"
-#include "mtop_util.h"
-#include "xvgr.h"
+#include "gromacs/topology/mtop_util.h"
+#include "gromacs/fileio/xvgr.h"
#include "gromacs/fileio/gmxfio.h"
#include "macros.h"
#include "mdrun.h"
#include "mdebin_bar.h"
+#include "gromacs/pbcutil/pbc.h"
+#include "gromacs/utility/smalloc.h"
static const char *conrmsd_nm[] = { "Constr. rmsd", "Constr.2 rmsd" };
if (Nsep > 1)
{
/* and add the closing parenthesis */
- str += sprintf(str, ")");
+ sprintf(str, ")");
}
}
nsets += 1; /*add fep state for expanded ensemble */
}
- if (fep->bPrintEnergy)
+ if (fep->edHdLPrintEnergy != edHdLPrintEnergyNO)
{
nsets += 1; /* add energy to the dhdl as well */
}
s += 1;
}
- if (fep->bPrintEnergy)
+ if (fep->edHdLPrintEnergy != edHdLPrintEnergyNO)
{
- sprintf(buf, "%s (%s)", "Energy", unit_energy);
+ switch (fep->edHdLPrintEnergy)
+ {
+ case edHdLPrintEnergyPOTENTIAL:
+ sprintf(buf, "%s (%s)", "Potential Energy", unit_energy);
+ break;
+ case edHdLPrintEnergyTOTAL:
+ case edHdLPrintEnergyYES:
+ default:
+ sprintf(buf, "%s (%s)", "Total Energy", unit_energy);
+ }
setname[s] = strdup(buf);
s += 1;
}
nsetsbegin = 0;
}
- if (fep->bPrintEnergy)
+ if (fep->edHdLPrintEnergy != edHdLPrintEnergyNO)
{
nsetsbegin += 1;
}
fprintf(md->fp_dhdl, " %4d", state->fep_state);
}
/* total energy (for if the temperature changes */
- if (fep->bPrintEnergy)
+
+ if (fep->edHdLPrintEnergy != edHdLPrintEnergyNO)
{
- store_energy = enerd->term[F_ETOT];
+ switch (fep->edHdLPrintEnergy)
+ {
+ case edHdLPrintEnergyPOTENTIAL:
+ store_energy = enerd->term[F_EPOT];
+ break;
+ case edHdLPrintEnergyTOTAL:
+ case edHdLPrintEnergyYES:
+ default:
+ store_energy = enerd->term[F_ETOT];
+ }
fprintf(md->fp_dhdl, " %#.8g", store_energy);
}
#include <float.h>
#include <math.h>
#include "typedefs.h"
-#include "gmx_fatal.h"
+#include "gromacs/utility/fatalerror.h"
#include "mdebin.h"
#include "gromacs/utility/smalloc.h"
#include "gromacs/fileio/enxio.h"
bExpanded = TRUE;
}
/* whether to print energies */
- if (ir->fepvals->bPrintEnergy)
+ if (ir->fepvals->edHdLPrintEnergy != edHdLPrintEnergyNO)
{
dhc->ndh += 1;
bEnergy = TRUE;
#include <config.h>
#endif
+#include <math.h>
#include <string.h>
#include <time.h>
-#include <math.h>
-#include "sysstuff.h"
-#include "gromacs/utility/cstringutil.h"
+
#include "network.h"
-#include "gromacs/utility/smalloc.h"
#include "nrnb.h"
-#include "main.h"
#include "force.h"
#include "macros.h"
#include "names.h"
-#include "gmx_fatal.h"
#include "txtdump.h"
#include "typedefs.h"
#include "update.h"
#include "constr.h"
-#include "vec.h"
#include "tgroup.h"
#include "mdebin.h"
#include "vsite.h"
#include "domdec.h"
#include "mdatoms.h"
#include "ns.h"
-#include "mtop_util.h"
+#include "gromacs/topology/mtop_util.h"
#include "pme.h"
#include "bondf.h"
#include "gmx_omp_nthreads.h"
#include "md_logging.h"
#include "gromacs/fileio/confio.h"
+#include "gromacs/fileio/mtxio.h"
#include "gromacs/fileio/trajectory_writing.h"
-#include "gromacs/linearalgebra/mtxio.h"
+#include "gromacs/imd/imd.h"
+#include "gromacs/legacyheaders/types/commrec.h"
#include "gromacs/linearalgebra/sparsematrix.h"
+#include "gromacs/math/vec.h"
+#include "gromacs/pbcutil/mshift.h"
+#include "gromacs/pbcutil/pbc.h"
#include "gromacs/timing/wallcycle.h"
#include "gromacs/timing/walltime_accounting.h"
-#include "gromacs/imd/imd.h"
+#include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/smalloc.h"
typedef struct {
t_state s;
/* Constrain the starting coordinates */
dvdl_constr = 0;
constrain(PAR(cr) ? NULL : fplog, TRUE, TRUE, constr, &(*top)->idef,
- ir, NULL, cr, -1, 0, mdatoms,
+ ir, NULL, cr, -1, 0, 1.0, mdatoms,
ems->s.x, ems->s.x, NULL, fr->bMolPBC, ems->s.box,
ems->s.lambda[efptFEP], &dvdl_constr,
NULL, NULL, nrnb, econqCoord, FALSE, 0, 0);
wallcycle_start(wcycle, ewcCONSTR);
dvdl_constr = 0;
constrain(NULL, TRUE, TRUE, constr, &top->idef,
- ir, NULL, cr, count, 0, md,
+ ir, NULL, cr, count, 0, 1.0, md,
s1->x, s2->x, NULL, bMolPBC, s2->box,
s2->lambda[efptBONDED], &dvdl_constr,
NULL, NULL, nrnb, econqCoord, FALSE, 0, 0);
wallcycle_start(wcycle, ewcCONSTR);
dvdl_constr = 0;
constrain(NULL, FALSE, FALSE, constr, &top->idef,
- inputrec, NULL, cr, count, 0, mdatoms,
+ inputrec, NULL, cr, count, 0, 1.0, mdatoms,
ems->s.x, ems->f, ems->f, fr->bMolPBC, ems->s.box,
ems->s.lambda[efptBONDED], &dvdl_constr,
NULL, &shake_vir, nrnb, econqForceDispl, FALSE, 0, 0);
}
stepsize = 1.0/fnorm;
- converged = FALSE;
/* Start the loop over BFGS steps.
* Each successful step is counted, and we continue until
#include <config.h>
#endif
+#include <assert.h>
#include <math.h>
+#include <stdlib.h>
#include <string.h>
-#include <assert.h>
-#include "gromacs/utility/smalloc.h"
+
#include "macros.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "nbnxn_consts.h"
#include "nbnxn_internal.h"
#include "nbnxn_atomdata.h"
#include "nbnxn_search.h"
-#include "gromacs/utility/gmxomp.h"
#include "gmx_omp_nthreads.h"
#include "thread_mpi/atomic.h"
+#include "gromacs/pbcutil/ishift.h"
+#include "gromacs/utility/gmxomp.h"
+#include "gromacs/utility/smalloc.h"
+
/* Default nbnxn allocation routine, allocates NBNXN_MEM_ALIGN byte aligned */
void nbnxn_alloc_aligned(void **ptr, size_t nbytes)
{
}
}
- /* Sets the atom type and LJ data in nbnxn_atomdata_t */
+ /* Sets the atom type in nbnxn_atomdata_t */
static void nbnxn_atomdata_set_atomtypes(nbnxn_atomdata_t *nbat,
int ngrid,
const nbnxn_search_t nbs,
copy_int_to_nbat_int(nbs->a+ash, grid->cxy_na[i], ncz*grid->na_sc,
type, nbat->ntype-1, nbat->type+ash);
+ }
+ }
+ }
- if (nbat->comb_rule != ljcrNONE)
+ /* Sets the LJ combination rule parameters in nbnxn_atomdata_t */
+ static void nbnxn_atomdata_set_ljcombparams(nbnxn_atomdata_t *nbat,
+ int ngrid,
+ const nbnxn_search_t nbs)
+ {
+ int g, i, ncz, ash;
+ const nbnxn_grid_t *grid;
+
+ if (nbat->comb_rule != ljcrNONE)
+ {
+ for (g = 0; g < ngrid; g++)
+ {
+ grid = &nbs->grid[g];
+
+ /* Loop over all columns and copy and fill */
+ for (i = 0; i < grid->ncx*grid->ncy; i++)
{
+ ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
+ ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
+
if (nbat->XFormat == nbatX4)
{
copy_lj_to_nbat_lj_comb_x4(nbat->nbfp_comb,
nbnxn_atomdata_mask_fep(nbat, ngrid, nbs);
}
+ /* This must be done after masking types for FEP */
+ nbnxn_atomdata_set_ljcombparams(nbat, ngrid, nbs);
+
nbnxn_atomdata_set_energygroups(nbat, ngrid, nbs, atinfo);
}
#include <string.h>
#include <assert.h>
-#include "sysstuff.h"
-#include "gromacs/utility/smalloc.h"
#include "types/commrec.h"
#include "macros.h"
#include "gromacs/math/utilities.h"
-#include "vec.h"
-#include "pbc.h"
+#include "gromacs/math/vec.h"
#include "nbnxn_consts.h"
/* nbnxn_internal.h included gromacs/simd/macros.h */
#include "nbnxn_internal.h"
#include "nrnb.h"
#include "ns.h"
-#include "gromacs/fileio/gmxfio.h"
+#include "gromacs/pbcutil/ishift.h"
+#include "gromacs/pbcutil/pbc.h"
+#include "gromacs/utility/smalloc.h"
#ifdef NBNXN_SEARCH_BB_SIMD4
/* Always use 4-wide SIMD for bounding box calculations */
int cxy_start, int cxy_end,
int *sort_work)
{
- int cxy;
- int cx, cy, cz = -1, c = -1, ncz;
- int na, ash, na_c, ind, a;
- int subdiv_z, sub_z, na_z, ash_z;
- int subdiv_y, sub_y, na_y, ash_y;
- int subdiv_x, sub_x, na_x, ash_x;
+ int cxy;
+ int cx, cy, cz = -1, c = -1, ncz;
+ int na, ash, na_c, ind, a;
+ int subdiv_z, sub_z, na_z, ash_z;
+ int subdiv_y, sub_y, na_y, ash_y;
+ int subdiv_x, sub_x, na_x, ash_x;
- /* cppcheck-suppress unassignedVariable */
nbnxn_bb_t bb_work_array[2], *bb_work_aligned;
bb_work_aligned = (nbnxn_bb_t *)(((size_t)(bb_work_array+1)) & (~((size_t)15)));
* Note that the charge has been set to zero,
* but we need to avoid 0/0, as perturbed atoms
* can be on top of each other.
- * (and the LJ parameters have not been zeroed)
*/
nbl->cj[cj_ind].excl &= ~(1U << (i*nbl->na_cj + j));
}
#include <config.h>
#endif
+#include <assert.h>
+#include <math.h>
#include <stdio.h>
+#include <stdlib.h>
#include <string.h>
-#include <math.h>
-#include <assert.h>
+
#include "typedefs.h"
#include "txtdump.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "gromacs/utility/smalloc.h"
#include "coulomb.h"
-#include "gmx_fatal.h"
+#include "gromacs/utility/fatalerror.h"
#include "pme.h"
#include "network.h"
-#include "physics.h"
+#include "gromacs/math/units.h"
#include "nrnb.h"
#include "macros.h"
+#include "gromacs/legacyheaders/types/commrec.h"
#include "gromacs/fft/parallel_3dfft.h"
-#include "gromacs/fileio/futil.h"
+#include "gromacs/utility/futil.h"
#include "gromacs/fileio/pdbio.h"
#include "gromacs/math/gmxcomplex.h"
#include "gromacs/timing/cyclecounter.h"
{
if (atc->count[atc->nodeid] + nsend != n)
{
- gmx_fatal(FARGS, "%d particles communicated to PME node %d are more than 2/3 times the cut-off out of the domain decomposition cell of their charge group in dimension %c.\n"
+ gmx_fatal(FARGS, "%d particles communicated to PME rank %d are more than 2/3 times the cut-off out of the domain decomposition cell of their charge group in dimension %c.\n"
"This usually means that your system is not well equilibrated.",
n - (atc->count[atc->nodeid] + nsend),
pme->nodeid, 'x'+atc->dimind);
/* Communicate the count */
if (debug)
{
- fprintf(debug, "dimind %d PME node %d send to node %d: %d\n",
+ fprintf(debug, "dimind %d PME rank %d send to rank %d: %d\n",
atc->dimind, atc->nodeid, commnode[i], scount);
}
pme_dd_sendrecv(atc, FALSE, i,
/* Copy data to contiguous send buffer */
if (debug)
{
- fprintf(debug, "PME send node %d %d -> %d grid start %d Communicating %d to %d\n",
+ fprintf(debug, "PME send rank %d %d -> %d grid start %d Communicating %d to %d\n",
pme->nodeid, overlap->nodeid, send_id,
pme->pmegrid_start_iy,
send_index0-pme->pmegrid_start_iy,
/* Get data from contiguous recv buffer */
if (debug)
{
- fprintf(debug, "PME recv node %d %d <- %d grid start %d Communicating %d to %d\n",
+ fprintf(debug, "PME recv rank %d %d <- %d grid start %d Communicating %d to %d\n",
pme->nodeid, overlap->nodeid, recv_id,
pme->pmegrid_start_iy,
recv_index0-pme->pmegrid_start_iy,
if (debug)
{
- fprintf(debug, "PME send node %d %d -> %d grid start %d Communicating %d to %d\n",
+ fprintf(debug, "PME send rank %d %d -> %d grid start %d Communicating %d to %d\n",
pme->nodeid, overlap->nodeid, send_id,
pme->pmegrid_start_ix,
send_index0-pme->pmegrid_start_ix,
send_index0-pme->pmegrid_start_ix+send_nindex);
- fprintf(debug, "PME recv node %d %d <- %d grid start %d Communicating %d to %d\n",
+ fprintf(debug, "PME recv rank %d %d <- %d grid start %d Communicating %d to %d\n",
pme->nodeid, overlap->nodeid, recv_id,
pme->pmegrid_start_ix,
recv_index0-pme->pmegrid_start_ix,
*bValidSettings = FALSE;
return;
}
- gmx_fatal(FARGS, "The number of PME grid lines per node along x is %g. But when using OpenMP threads, the number of grid lines per node along x should be >= pme_order (%d) or = pmeorder-1. To resolve this issue, use less nodes along x (and possibly more along y and/or z) by specifying -dd manually.",
+ gmx_fatal(FARGS, "The number of PME grid lines per rank along x is %g. But when using OpenMP threads, the number of grid lines per rank along x should be >= pme_order (%d) or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly more along y and/or z) by specifying -dd manually.",
nkx/(double)nnodes_major, pme_order);
}
MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
if (pme->nnodes != nnodes_major*nnodes_minor)
{
- gmx_incons("PME node count mismatch");
+ gmx_incons("PME rank count mismatch");
}
}
else
{
if (pme->nnodes % nnodes_major != 0)
{
- gmx_incons("For 2D PME decomposition, #PME nodes must be divisible by the number of nodes in the major dimension");
+ gmx_incons("For 2D PME decomposition, #PME ranks must be divisible by the number of ranks in the major dimension");
}
pme->ndecompdim = 2;
"\n"
"NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
" For optimal PME load balancing\n"
- " PME grid_x (%d) and grid_y (%d) should be divisible by #PME_nodes_x (%d)\n"
- " and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_nodes_y (%d)\n"
+ " PME grid_x (%d) and grid_y (%d) should be divisible by #PME_ranks_x (%d)\n"
+ " and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_ranks_y (%d)\n"
"\n",
(int)((imbal-1)*100 + 0.5),
pme->nkx, pme->nky, pme->nnodes_major,
if (debug)
{
- fprintf(debug, "PME: nnodes = %d, nodeid = %d\n",
+ fprintf(debug, "PME: number of ranks = %d, rank = %d\n",
cr->nnodes, cr->nodeid);
fprintf(debug, "Grid = %p\n", (void*)grid);
if (grid == NULL)
if (debug)
{
- fprintf(debug, "Node= %6d, pme local particles=%6d\n",
+ fprintf(debug, "Rank= %6d, pme local particles=%6d\n",
cr->nodeid, atc->n);
}
#include "typedefs.h"
#include "types/commrec.h"
#include "gromacs/utility/smalloc.h"
-#include "gmx_fatal.h"
-#include "vec.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/math/vec.h"
#include "pme.h"
#include "network.h"
#include "domdec.h"
if (debug)
{
- fprintf(debug, "PP node %d sending to PME node %d: %d%s%s\n",
+ fprintf(debug, "PP rank %d sending to PME rank %d: %d%s%s\n",
cr->sim_nodeid, dd->pme_nodeid, n,
flags & PP_PME_CHARGE ? " charges" : "",
flags & PP_PME_COORD ? " coordinates" : "");
if (debug)
{
- fprintf(debug, "PME only node receiving:%s%s%s%s%s\n",
+ fprintf(debug, "PME only rank receiving:%s%s%s%s%s\n",
(cnb.flags & PP_PME_CHARGE) ? " charges" : "",
(cnb.flags & PP_PME_COORD ) ? " coordinates" : "",
(cnb.flags & PP_PME_FINISH) ? " finish" : "",
nat += pme_pp->nat[sender];
if (debug)
{
- fprintf(debug, "Received from PP node %d: %d "
+ fprintf(debug, "Received from PP rank %d: %d "
"charges\n",
pme_pp->node[sender], pme_pp->nat[sender]);
}
{
if (!(pme_pp->flags_charge & (PP_PME_CHARGE | PP_PME_SQRTC6)))
{
- gmx_incons("PME-only node received coordinates before charges and/or C6-values"
+ gmx_incons("PME-only rank received coordinates before charges and/or C6-values"
);
}
if (*bFreeEnergy_q && !(pme_pp->flags_charge & PP_PME_CHARGEB))
{
- gmx_incons("PME-only node received free energy request, but "
+ gmx_incons("PME-only rank received free energy request, but "
"did not receive B-state charges");
}
if (*bFreeEnergy_lj && !(pme_pp->flags_charge & PP_PME_SQRTC6B))
{
- gmx_incons("PME-only node received free energy request, but "
+ gmx_incons("PME-only rank received free energy request, but "
"did not receive B-state C6-values");
}
nat += pme_pp->nat[sender];
if (debug)
{
- fprintf(debug, "Received from PP node %d: %d "
+ fprintf(debug, "Received from PP rank %d: %d "
"coordinates\n",
pme_pp->node[sender], pme_pp->nat[sender]);
}
if (debug)
{
fprintf(debug,
- "PP node %d receiving from PME node %d: virial and energy\n",
+ "PP rank %d receiving from PME rank %d: virial and energy\n",
cr->sim_nodeid, cr->dd->pme_nodeid);
}
#ifdef GMX_MPI
if (debug)
{
- fprintf(debug, "PME node sending to PP node %d: virial and energy\n",
+ fprintf(debug, "PME rank sending to PP rank %d: virial and energy\n",
pme_pp->node_peer);
}
#ifdef GMX_MPI
#endif
#include <math.h>
-#include "sysstuff.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
#include "typedefs.h"
#include "types/commrec.h"
#include "macros.h"
-#include "gromacs/utility/smalloc.h"
-#include "physics.h"
+#include "gromacs/math/units.h"
#include "macros.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "force.h"
-#include "invblock.h"
#include "gromacs/fileio/confio.h"
#include "names.h"
#include "network.h"
-#include "pbc.h"
#include "ns.h"
#include "nrnb.h"
#include "bondf.h"
-#include "mshift.h"
#include "txtdump.h"
#include "qmmm.h"
-#include <stdio.h>
-#include <string.h>
-#include "gmx_fatal.h"
#include "typedefs.h"
-#include <stdlib.h>
-#include "mtop_util.h"
+#include "gromacs/topology/mtop_util.h"
+#include "gromacs/pbcutil/ishift.h"
+#include "gromacs/pbcutil/pbc.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/smalloc.h"
/* declarations of the interfaces to the QM packages. The _SH indicate
* the QM interfaces can be used for Surface Hopping simulations
/* issue a fatal if the user wants to run with more than one node */
if (PAR(cr))
{
- gmx_fatal(FARGS, "QM/MM does not work in parallel, use a single node instead\n");
+ gmx_fatal(FARGS, "QM/MM does not work in parallel, use a single rank instead\n");
}
/* Make a local copy of the QMMMrec */
#include <config.h>
#endif
+#include <stdlib.h>
#include <string.h>
+
#include "typedefs.h"
#include "types/commrec.h"
-#include "gromacs/utility/smalloc.h"
-#include "gmx_fatal.h"
-#include "vec.h"
#include "txtdump.h"
#include "force.h"
#include "mdrun.h"
#include "names.h"
#include "constr.h"
#include "domdec.h"
-#include "physics.h"
+#include "gromacs/math/units.h"
#include "shellfc.h"
-#include "mtop_util.h"
+#include "gromacs/topology/mtop_util.h"
#include "chargegroup.h"
#include "macros.h"
+#include "gromacs/math/vec.h"
+#include "gromacs/pbcutil/mshift.h"
+#include "gromacs/pbcutil/pbc.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/smalloc.h"
typedef struct {
int nnucl;
}
}
}
- constrain(log, FALSE, FALSE, constr, idef, ir, NULL, cr, step, 0, md,
+ constrain(log, FALSE, FALSE, constr, idef, ir, NULL, cr, step, 0, 1.0, md,
x, xnold-start, NULL, bMolPBC, box,
lambda[efptBONDED], &(dvdlambda[efptBONDED]),
NULL, NULL, nrnb, econqCoord, FALSE, 0, 0);
- constrain(log, FALSE, FALSE, constr, idef, ir, NULL, cr, step, 0, md,
+ constrain(log, FALSE, FALSE, constr, idef, ir, NULL, cr, step, 0, 1.0, md,
x, xnew-start, NULL, bMolPBC, box,
lambda[efptBONDED], &(dvdlambda[efptBONDED]),
NULL, NULL, nrnb, econqCoord, FALSE, 0, 0);
}
/* Project the acceleration on the old bond directions */
- constrain(log, FALSE, FALSE, constr, idef, ir, NULL, cr, step, 0, md,
+ constrain(log, FALSE, FALSE, constr, idef, ir, NULL, cr, step, 0, 1.0, md,
x_old, xnew-start, acc_dir, bMolPBC, box,
lambda[efptBONDED], &(dvdlambda[efptBONDED]),
NULL, NULL, nrnb, econqDeriv_FlexCon, FALSE, 0, 0);
#include "typedefs.h"
#include "gromacs/utility/cstringutil.h"
-#include "gromacs/utility/smalloc.h"
#include "names.h"
#include "txtdump.h"
-#include "pbc.h"
+#include "gromacs/pbcutil/pbc.h"
#include "chargegroup.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "nrnb.h"
-#include "mshift.h"
#include "mdrun.h"
#include "sim_util.h"
#include "update.h"
-#include "physics.h"
-#include "main.h"
+#include "gromacs/math/units.h"
#include "mdatoms.h"
#include "force.h"
#include "bondf.h"
#include "network.h"
#include "calcmu.h"
#include "constr.h"
-#include "xvgr.h"
#include "copyrite.h"
#include "domdec.h"
#include "genborn.h"
#include "../gmxlib/nonbonded/nb_kernel.h"
#include "../gmxlib/nonbonded/nb_free_energy.h"
+#include "gromacs/legacyheaders/types/commrec.h"
+#include "gromacs/pbcutil/ishift.h"
+#include "gromacs/pbcutil/mshift.h"
#include "gromacs/timing/wallcycle.h"
#include "gromacs/timing/walltime_accounting.h"
#include "gromacs/utility/gmxmpi.h"
+#include "gromacs/utility/smalloc.h"
#include "gromacs/essentialdynamics/edsam.h"
#include "gromacs/pulling/pull.h"
#include "gromacs/pulling/pull_rotation.h"
time_string[i] = '\0';
}
- fprintf(fplog, "%s on node %d %s\n", title, nodeid, time_string);
+ fprintf(fplog, "%s on rank %d %s\n", title, nodeid, time_string);
}
void print_start(FILE *fplog, t_commrec *cr,
/* constrain the current position */
constrain(NULL, TRUE, FALSE, constr, &(top->idef),
- ir, NULL, cr, step, 0, md,
+ ir, NULL, cr, step, 0, 1.0, md,
state->x, state->x, NULL,
fr->bMolPBC, state->box,
state->lambda[efptBONDED], &dvdl_dum,
/* also may be useful if we need the ekin from the halfstep for velocity verlet */
/* might not yet treat veta correctly */
constrain(NULL, TRUE, FALSE, constr, &(top->idef),
- ir, NULL, cr, step, 0, md,
+ ir, NULL, cr, step, 0, 1.0, md,
state->x, state->v, state->v,
fr->bMolPBC, state->box,
state->lambda[efptBONDED], &dvdl_dum,
}
dvdl_dum = 0;
constrain(NULL, TRUE, FALSE, constr, &(top->idef),
- ir, NULL, cr, step, -1, md,
+ ir, NULL, cr, step, -1, 1.0, md,
state->x, savex, NULL,
fr->bMolPBC, state->box,
state->lambda[efptBONDED], &dvdl_dum,
{
please_cite(fplog, "Bussi2007a");
}
+ if (ir->eI == eiSD1)
+ {
+ please_cite(fplog, "Goga2012");
+ }
}
init_nrnb(nrnb);
#include "typedefs.h"
#include "names.h"
#include "gromacs/utility/smalloc.h"
-#include "gmx_fatal.h"
-#include "gromacs/fileio/futil.h"
-#include "xvgr.h"
-#include "vec.h"
-#include "main.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/futil.h"
+#include "gromacs/fileio/xvgr.h"
+#include "gromacs/math/vec.h"
#include "network.h"
-#include "physics.h"
+#include "gromacs/math/units.h"
#include "force.h"
#include "gromacs/fileio/gmxfio.h"
#include "macros.h"
}
}
+ /* Returns the spacing for a function using the maximum of
+ * the third derivative, x_scale (unit 1/length)
+ * and function tolerance.
+ */
+ static double spline3_table_scale(double third_deriv_max,
+ double x_scale,
+ double func_tol)
+ {
+ double deriv_tol;
+ double sc_deriv, sc_func;
+
+ /* Force tolerance: single precision accuracy */
+ deriv_tol = GMX_FLOAT_EPS;
+ sc_deriv = sqrt(third_deriv_max/(6*4*deriv_tol*x_scale))*x_scale;
+
+ /* Don't try to be more accurate on energy than the precision */
+ func_tol = max(func_tol, GMX_REAL_EPS);
+ sc_func = pow(third_deriv_max/(6*12*sqrt(3)*func_tol), 1.0/3.0)*x_scale;
+
+ return max(sc_deriv, sc_func);
+ }
+
/* The scale (1/spacing) for third order spline interpolation
* of the Ewald mesh contribution which needs to be subtracted
* from the non-bonded interactions.
+ * Since there is currently only one spacing for Coulomb and LJ,
+ * the finest spacing is used if both Ewald types are present.
+ *
+ * Note that we could also implement completely separate tables
+ * for Coulomb and LJ Ewald, each with their own spacing.
+ * The current setup with the same spacing can provide slightly
+ * faster kernels with both Coulomb and LJ Ewald, especially
+ * when interleaving both tables (currently not implemented).
*/
- real ewald_spline3_table_scale(real ewaldcoeff, real rc)
+ real ewald_spline3_table_scale(const interaction_const_t *ic)
{
- double erf_x_d3 = 1.0522; /* max of (erf(x)/x)''' */
- double ftol, etol;
- double sc_f, sc_e;
+ real sc;
- /* Force tolerance: single precision accuracy */
- ftol = GMX_FLOAT_EPS;
- sc_f = sqrt(erf_x_d3/(6*4*ftol*ewaldcoeff))*ewaldcoeff;
+ sc = 0;
+
+ if (ic->eeltype == eelEWALD || EEL_PME(ic->eeltype))
+ {
+ double erf_x_d3 = 1.0522; /* max of (erf(x)/x)''' */
+ double etol;
+ real sc_q;
+
+ /* Energy tolerance: 0.1 times the cut-off jump */
+ etol = 0.1*gmx_erfc(ic->ewaldcoeff_q*ic->rcoulomb);
+
+ sc_q = spline3_table_scale(erf_x_d3, ic->ewaldcoeff_q, etol);
- /* Energy tolerance: 10x more accurate than the cut-off jump */
- etol = 0.1*gmx_erfc(ewaldcoeff*rc);
- etol = max(etol, GMX_REAL_EPS);
- sc_e = pow(erf_x_d3/(6*12*sqrt(3)*etol), 1.0/3.0)*ewaldcoeff;
+ if (debug)
+ {
+ fprintf(debug, "Ewald Coulomb quadratic spline table spacing: %f 1/nm\n", 1/sc_q);
+ }
+
+ sc = max(sc, sc_q);
+ }
+
+ if (EVDW_PME(ic->vdwtype))
+ {
+ double func_d3 = 0.42888; /* max of (x^-6 (1 - exp(-x^2)(1+x^2+x^4/2)))''' */
+ double xrc2, etol;
+ real sc_lj;
+
+ /* Energy tolerance: 0.1 times the cut-off jump */
+ xrc2 = sqr(ic->ewaldcoeff_lj*ic->rvdw);
+ etol = 0.1*exp(-xrc2)*(1 + xrc2 + xrc2*xrc2/2.0);
+
+ sc_lj = spline3_table_scale(func_d3, ic->ewaldcoeff_lj, etol);
+
+ if (debug)
+ {
+ fprintf(debug, "Ewald LJ quadratic spline table spacing: %f 1/nm\n", 1/sc_lj);
+ }
+
+ sc = max(sc, sc_lj);
+ }
- return max(sc_f, sc_e);
+ return sc;
}
/* Calculate the potential and force for an r value
#include <math.h>
#include "types/commrec.h"
-#include "sysstuff.h"
-#include "gromacs/utility/smalloc.h"
#include "typedefs.h"
#include "nrnb.h"
-#include "physics.h"
+#include "gromacs/math/units.h"
#include "macros.h"
-#include "vec.h"
-#include "main.h"
+#include "gromacs/math/vec.h"
#include "update.h"
#include "gromacs/random/random.h"
-#include "mshift.h"
#include "tgroup.h"
#include "force.h"
#include "names.h"
#include "gmx_omp_nthreads.h"
#include "gromacs/fileio/confio.h"
-#include "gromacs/fileio/futil.h"
+#include "gromacs/pbcutil/mshift.h"
+#include "gromacs/pbcutil/pbc.h"
+#include "gromacs/pulling/pull.h"
#include "gromacs/timing/wallcycle.h"
+#include "gromacs/utility/futil.h"
#include "gromacs/utility/gmxomp.h"
-#include "gromacs/pulling/pull.h"
+#include "gromacs/utility/smalloc.h"
/*For debugging, start at v(-dt/2) for velolcity verlet -- uncomment next line */
/*#define STARTFROMDT2*/
unsigned short cFREEZE[], unsigned short cACC[],
unsigned short cTC[],
rvec x[], rvec xprime[], rvec v[], rvec f[],
- int ngtc, real tau_t[], real ref_t[],
+ int ngtc, real ref_t[],
+ gmx_bool bDoConstr,
+ gmx_bool bFirstHalfConstr,
gmx_int64_t step, int seed, int* gatindex)
{
gmx_sd_const_t *sdc;
gmx_sd_sigma_t *sig;
real kT;
int gf = 0, ga = 0, gt = 0;
- real ism, sd_V;
+ real ism;
int n, d;
sdc = sd->sdc;
sig[n].V = sqrt(kT*(1 - sdc[n].em*sdc[n].em));
}
- for (n = start; n < nrend; n++)
+ if (!bDoConstr)
{
- real rnd[3];
- int ng = gatindex ? gatindex[n] : n;
- ism = sqrt(invmass[n]);
- if (cFREEZE)
- {
- gf = cFREEZE[n];
- }
- if (cACC)
+ for (n = start; n < nrend; n++)
{
- ga = cACC[n];
+ real rnd[3];
+ int ng = gatindex ? gatindex[n] : n;
+
+ ism = sqrt(invmass[n]);
+ if (cFREEZE)
+ {
+ gf = cFREEZE[n];
+ }
+ if (cACC)
+ {
+ ga = cACC[n];
+ }
+ if (cTC)
+ {
+ gt = cTC[n];
+ }
+
+ gmx_rng_cycle_3gaussian_table(step, ng, seed, RND_SEED_UPDATE, rnd);
+
+ for (d = 0; d < DIM; d++)
+ {
+ if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
+ {
+ real sd_V, vn;
+
+ sd_V = ism*sig[gt].V*rnd[d];
+ vn = v[n][d] + (invmass[n]*f[n][d] + accel[ga][d])*dt;
+ v[n][d] = vn*sdc[gt].em + sd_V;
+ /* Here we include half of the friction+noise
+ * update of v into the integration of x.
+ */
+ xprime[n][d] = x[n][d] + 0.5*(vn + v[n][d])*dt;
+ }
+ else
+ {
+ v[n][d] = 0.0;
+ xprime[n][d] = x[n][d];
+ }
+ }
}
- if (cTC)
+ }
+ else
+ {
+ /* We do have constraints */
+ if (bFirstHalfConstr)
{
- gt = cTC[n];
- }
+ /* First update without friction and noise */
+ real im;
- gmx_rng_cycle_3gaussian_table(step, ng, seed, RND_SEED_UPDATE, rnd);
- for (d = 0; d < DIM; d++)
- {
- if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
+ for (n = start; n < nrend; n++)
{
- sd_V = ism*sig[gt].V*rnd[d];
+ im = invmass[n];
- v[n][d] = v[n][d]*sdc[gt].em
- + (invmass[n]*f[n][d] + accel[ga][d])*tau_t[gt]*(1 - sdc[gt].em)
- + sd_V;
+ if (cFREEZE)
+ {
+ gf = cFREEZE[n];
+ }
+ if (cACC)
+ {
+ ga = cACC[n];
+ }
+ if (cTC)
+ {
+ gt = cTC[n];
+ }
- xprime[n][d] = x[n][d] + v[n][d]*dt;
+ for (d = 0; d < DIM; d++)
+ {
+ if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
+ {
+ v[n][d] = v[n][d] + (im*f[n][d] + accel[ga][d])*dt;
+ xprime[n][d] = x[n][d] + v[n][d]*dt;
+ }
+ else
+ {
+ v[n][d] = 0.0;
+ xprime[n][d] = x[n][d];
+ }
+ }
}
- else
+ }
+ else
+ {
+ /* Update friction and noise only */
+ for (n = start; n < nrend; n++)
{
- v[n][d] = 0.0;
- xprime[n][d] = x[n][d];
+ real rnd[3];
+ int ng = gatindex ? gatindex[n] : n;
+
+ ism = sqrt(invmass[n]);
+ if (cFREEZE)
+ {
+ gf = cFREEZE[n];
+ }
+ if (cACC)
+ {
+ ga = cACC[n];
+ }
+ if (cTC)
+ {
+ gt = cTC[n];
+ }
+
+ gmx_rng_cycle_3gaussian_table(step, ng, seed, RND_SEED_UPDATE, rnd);
+
+ for (d = 0; d < DIM; d++)
+ {
+ if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
+ {
+ real sd_V, vn;
+
+ sd_V = ism*sig[gt].V*rnd[d];
+ vn = v[n][d];
+ v[n][d] = vn*sdc[gt].em + sd_V;
+ /* Add the friction and noise contribution only */
+ xprime[n][d] = xprime[n][d] + 0.5*(v[n][d] - vn)*dt;
+ }
+ }
}
}
}
}
}
}
- constrain(NULL, FALSE, FALSE, constr, idef, ir, NULL, cr, step, 0, md,
+ constrain(NULL, FALSE, FALSE, constr, idef, ir, NULL, cr, step, 0, 1.0, md,
state->x, xp, xp, bMolPBC, state->box, state->lambda[efptBONDED], NULL,
NULL, vir_lr_constr, nrnb, econqCoord, ir->epc == epcMTTK, state->veta, state->veta);
}
if (EI_VV(inputrec->eI) && bFirstHalf)
{
constrain(NULL, bLog, bEner, constr, idef,
- inputrec, ekind, cr, step, 1, md,
+ inputrec, ekind, cr, step, 1, 1.0, md,
state->x, state->v, state->v,
bMolPBC, state->box,
state->lambda[efptBONDED], dvdlambda,
else
{
constrain(NULL, bLog, bEner, constr, idef,
- inputrec, ekind, cr, step, 1, md,
+ inputrec, ekind, cr, step, 1, 1.0, md,
state->x, xprime, NULL,
bMolPBC, state->box,
state->lambda[efptBONDED], dvdlambda,
}
where();
+
+ if (inputrec->eI == eiSD1 && bDoConstr && !bFirstHalf)
+ {
+ xprime = get_xprime(state, upd);
+
+ nth = gmx_omp_nthreads_get(emntUpdate);
+
+ #pragma omp parallel for num_threads(nth) schedule(static)
+
+ for (th = 0; th < nth; th++)
+ {
+ int start_th, end_th;
+
+ start_th = start + ((nrend-start)* th )/nth;
+ end_th = start + ((nrend-start)*(th+1))/nth;
+
+ /* The second part of the SD integration */
+ do_update_sd1(upd->sd,
+ start_th, end_th, dt,
+ inputrec->opts.acc, inputrec->opts.nFreeze,
+ md->invmass, md->ptype,
+ md->cFREEZE, md->cACC, md->cTC,
+ state->x, xprime, state->v, force,
+ inputrec->opts.ngtc, inputrec->opts.ref_t,
+ bDoConstr, FALSE,
+ step, inputrec->ld_seed,
+ DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL);
+ }
+ inc_nrnb(nrnb, eNR_UPDATE, homenr);
+
+ if (bDoConstr)
+ {
+ /* Constrain the coordinates xprime for half a time step */
+ wallcycle_start(wcycle, ewcCONSTR);
+
+ constrain(NULL, bLog, bEner, constr, idef,
+ inputrec, NULL, cr, step, 1, 0.5, md,
+ state->x, xprime, NULL,
+ bMolPBC, state->box,
+ state->lambda[efptBONDED], dvdlambda,
+ state->v, NULL, nrnb, econqCoord, FALSE, 0, 0);
+
+ wallcycle_stop(wcycle, ewcCONSTR);
+ }
+ }
+
if ((inputrec->eI == eiSD2) && !(bFirstHalf))
{
xprime = get_xprime(state, upd);
/* Constrain the coordinates xprime */
wallcycle_start(wcycle, ewcCONSTR);
constrain(NULL, bLog, bEner, constr, idef,
- inputrec, NULL, cr, step, 1, md,
+ inputrec, NULL, cr, step, 1, 1.0, md,
state->x, xprime, NULL,
bMolPBC, state->box,
state->lambda[efptBONDED], dvdlambda,
}
}
+
/* We must always unshift after updating coordinates; if we did not shake
x was shifted in do_force */
gmx_constr_t constr,
t_idef *idef)
{
- gmx_bool bNH, bPR, bLastStep, bLog = FALSE, bEner = FALSE;
+ gmx_bool bNH, bPR, bLastStep, bLog = FALSE, bEner = FALSE, bDoConstr = FALSE;
double dt, alpha;
real *imass, *imassin;
rvec *force;
rvec *vcom, *xcom, *vall, *xall, *xin, *vin, *forcein, *fall, *xpall, *xprimein, *xprime;
int nth, th;
+ bDoConstr = (NULL != constr);
+
/* Running the velocity half does nothing except for velocity verlet */
if ((UpdatePart == etrtVELOCITY1 || UpdatePart == etrtVELOCITY2) &&
!EI_VV(inputrec->eI))
}
break;
case (eiSD1):
+ /* With constraints, the SD1 update is done in 2 parts */
do_update_sd1(upd->sd,
start_th, end_th, dt,
inputrec->opts.acc, inputrec->opts.nFreeze,
md->invmass, md->ptype,
md->cFREEZE, md->cACC, md->cTC,
state->x, xprime, state->v, force,
- inputrec->opts.ngtc, inputrec->opts.tau_t, inputrec->opts.ref_t,
+ inputrec->opts.ngtc, inputrec->opts.ref_t,
+ bDoConstr, TRUE,
step, inputrec->ld_seed, DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL);
break;
case (eiSD2):
- /* The SD update is done in 2 parts, because an extra constraint step
- * is needed
+ /* The SD2 update is always done in 2 parts,
+ * because an extra constraint step is needed
*/
do_update_sd2(upd->sd,
bInitStep, start_th, end_th,
*/
#include <string.h>
-#include "gromacs/legacyheaders/vec.h"
-
#include "gromacs/math/utilities.h"
+#include "gromacs/math/vec.h"
#include "gromacs/selection/indexutil.h"
#include "gromacs/selection/poscalc.h"
#include "gromacs/selection/selection.h"
const gmx::SelectionTreeElementPointer &sel,
gmx_ana_index_t *g)
{
- gmx_ana_index_intersection(sel->v.u.g, &sel->u.cgrp, g);
+ if (sel->flags & SEL_UNSORTED)
+ {
+ // This only works if g contains all the atoms, but that is currently
+ // the only supported case.
+ gmx_ana_index_copy(sel->v.u.g, &sel->u.cgrp, false);
+ }
+ else
+ {
+ gmx_ana_index_intersection(sel->v.u.g, &sel->u.cgrp, g);
+ }
}
#include <algorithm>
#include <string>
-#include "gromacs/legacyheaders/vec.h"
-
+#include "gromacs/math/vec.h"
#include "gromacs/selection/position.h"
#include "gromacs/selection/selmethod.h"
#include "gromacs/selection/selparam.h"
// FIXME: Use exceptions.
return SelectionTreeElementPointer();
}
- _gmx_selelem_update_flags(child, scanner);
+ _gmx_selelem_update_flags(child);
if ((child->flags & SEL_DYNAMIC) && !(param->flags & SPAR_DYNAMIC))
{
_gmx_selparser_error(scanner, "dynamic values not supported");
#include <boost/exception_ptr.hpp>
#include <boost/shared_ptr.hpp>
-#include "gromacs/fileio/futil.h"
#include "gromacs/selection/poscalc.h"
#include "gromacs/selection/selection.h"
#include "gromacs/selection/selmethod.h"
char buf[1024];
va_list ap;
va_start(ap, fmt);
- vsprintf(buf, fmt, ap);
+ vsnprintf(buf, 1024, fmt, ap);
va_end(ap);
errors->append(buf);
}
/*!
* \param[in,out] sel Root of the selection element tree to initialize.
- * \param[in] scanner Scanner data structure.
- * \returns 0 on success, an error code on error.
*
* Propagates the \ref SEL_DYNAMIC flag from the children of \p sel to \p sel
* (if any child of \p sel is dynamic, \p sel is also marked as such).
* operation does not descend beyond such elements.
*/
void
- _gmx_selelem_update_flags(const gmx::SelectionTreeElementPointer &sel,
- yyscan_t scanner)
+ _gmx_selelem_update_flags(const gmx::SelectionTreeElementPointer &sel)
{
bool bUseChildType = false;
bool bOnlySingleChildren;
while (child)
{
/* Update the child */
- _gmx_selelem_update_flags(child, scanner);
- /* Propagate the dynamic flag */
- sel->flags |= (child->flags & SEL_DYNAMIC);
+ _gmx_selelem_update_flags(child);
+ /* Propagate the dynamic and unsorted flags */
+ sel->flags |= (child->flags & (SEL_DYNAMIC | SEL_UNSORTED));
/* Propagate the type flag if necessary and check for problems */
if (bUseChildType)
{
if ((sel->flags & SEL_VALTYPEMASK)
&& !(sel->flags & child->flags & SEL_VALTYPEMASK))
{
- _gmx_selparser_error(scanner, "invalid combination of selection expressions");
- // FIXME: Use an exception.
- return;
+ // TODO: Recollect when this is triggered, and whether the type
+ // is appropriate.
+ GMX_THROW(gmx::InvalidInputError("Invalid combination of selection expressions"));
}
sel->flags |= (child->flags & SEL_VALTYPEMASK);
}
const gmx::SelectionTreeElementPointer &sel,
yyscan_t scanner)
{
- gmx::MessageStringCollector *errors = _gmx_sel_lexer_error_reporter(scanner);
- char buf[1024];
- sprintf(buf, "In selection '%s'", _gmx_sel_lexer_pselstr(scanner));
- gmx::MessageStringContext context(errors, buf);
-
if (sel->v.type != POS_VALUE)
{
/* FIXME: Better handling of this error */
root->setName(name);
}
/* Update the flags */
- _gmx_selelem_update_flags(root, scanner);
+ _gmx_selelem_update_flags(root);
root->fillNameIfMissing(_gmx_sel_lexer_pselstr(scanner));
const char *pselstr = _gmx_sel_lexer_pselstr(scanner);
SelectionTreeElementPointer root;
- gmx::MessageStringCollector *errors = _gmx_sel_lexer_error_reporter(scanner);
- char buf[1024];
- sprintf(buf, "In selection '%s'", pselstr);
- gmx::MessageStringContext context(errors, buf);
-
- _gmx_selelem_update_flags(expr, scanner);
+ _gmx_selelem_update_flags(expr);
/* Check if this is a constant non-group value */
if (expr->type == SEL_CONST && expr->v.type != GROUP_VALUE)
{
_gmx_selelem_set_vtype(root->child, expr->v.type);
root->child->child = expr;
/* Update flags */
- _gmx_selelem_update_flags(root, scanner);
+ _gmx_selelem_update_flags(root);
/* Add the variable to the symbol table */
sc->symtab->addVariable(name, root->child);
finish:
#include <list>
#include <string>
-#include "gromacs/legacyheaders/types/simple.h"
-#include "gromacs/legacyheaders/vec.h"
-
+#include "gromacs/math/vec.h"
+#include "gromacs/math/vectypes.h"
#include "gromacs/utility/gmxassert.h"
+#include "gromacs/utility/real.h"
#include "gromacs/utility/uniqueptr.h"
#include "selelem.h"
/** Propagates the flags for selection elements. */
void
- _gmx_selelem_update_flags(const gmx::SelectionTreeElementPointer &sel,
- void *scanner);
+ _gmx_selelem_update_flags(const gmx::SelectionTreeElementPointer &sel);
/** Initializes the method parameter data of \ref SEL_EXPRESSION and
* \ref SEL_MODIFIER elements. */
#include <boost/shared_ptr.hpp>
#include "gromacs/legacyheaders/oenv.h"
-#include "gromacs/legacyheaders/xvgr.h"
#include "gromacs/onlinehelp/helpmanager.h"
#include "gromacs/onlinehelp/helpwritercontext.h"
#include "gromacs/options/basicoptions.h"
#include "gromacs/options/options.h"
#include "gromacs/selection/selection.h"
+#include "gromacs/topology/topology.h"
#include "gromacs/utility/exceptions.h"
#include "gromacs/utility/file.h"
#include "gromacs/utility/gmxassert.h"
while (child)
{
resolveExternalGroups(child, errors);
- child = child->next;
+ root->flags |= (child->flags & SEL_UNSORTED);
+ child = child->next;
}
}
#include <boost/shared_ptr.hpp>
-#include "gromacs/legacyheaders/types/simple.h"
#include "gromacs/utility/common.h"
+#include "gromacs/utility/real.h"
#include "indexutil.h"
#include "selvalue.h"
* The element (or one of its children) is dynamic.
*/
#define SEL_DYNAMIC 16
+ /*! \brief
+ * The element may contain atom indices in an unsorted order.
+ */
+ #define SEL_UNSORTED 32
/*! \brief
* Mask that covers the flags that describe the number of values.
*/
#include "gromacs/selection/indexutil.h"
#include "gromacs/selection/selectioncollection.h"
#include "gromacs/selection/selection.h"
+#include "gromacs/topology/topology.h"
#include "gromacs/utility/arrayref.h"
#include "gromacs/utility/exceptions.h"
#include "gromacs/utility/flags.h"
EXPECT_THROW_GMX(sc_.compile(), gmx::APIError);
}
- TEST_F(SelectionCollectionTest, HandlesUnsortedGroupReference)
+ // TODO: Make the check less eager so that it doesn't break other tests, and
+ // adapt these tests accordingly.
+ TEST_F(SelectionCollectionTest, DISABLED_HandlesUnsortedGroupReference)
{
ASSERT_NO_THROW_GMX(loadIndexGroups("simple.ndx"));
EXPECT_THROW_GMX(sc_.parseFromString("group \"GrpUnsorted\""),
EXPECT_THROW_GMX(sc_.parseFromString("2"), gmx::InconsistentInputError);
}
- TEST_F(SelectionCollectionTest, HandlesUnsortedGroupReferenceDelayed)
+ TEST_F(SelectionCollectionTest, DISABLED_HandlesUnsortedGroupReferenceDelayed)
{
ASSERT_NO_THROW_GMX(sc_.parseFromString("group 2; group \"GrpUnsorted\""));
EXPECT_THROW_GMX(loadIndexGroups("simple.ndx"), gmx::InconsistentInputError);
ASSERT_NO_FATAL_FAILURE(runCompiler());
}
+ TEST_F(SelectionCollectionDataTest, HandlesUnsortedIndexGroupsInSelections)
+ {
+ static const char * const selections[] = {
+ "group \"GrpUnsorted\"",
+ "GrpUnsorted",
+ "2",
+ "res_cog of group \"GrpUnsorted\"",
+ "group \"GrpUnsorted\" permute 2 1"
+ };
+ setFlags(TestFlags() | efTestPositionAtoms | efTestPositionMapping
+ | efTestSelectionNames);
+ ASSERT_NO_THROW_GMX(loadIndexGroups("simple.ndx"));
+ runTest("simple.gro", selections);
+ }
+
TEST_F(SelectionCollectionDataTest, HandlesConstantPositions)
{
static const char * const selections[] = {
#include <stdlib.h>
#include <string.h>
#include <time.h>
+
#include "typedefs.h"
#include "gromacs/utility/cstringutil.h"
#include "gromacs/utility/smalloc.h"
#include "gromacs/mdlib/groupcoord.h"
-#include "mtop_util.h"
+#include "gromacs/topology/mtop_util.h"
#include "macros.h"
-#include "vec.h"
+#include "gromacs/math/vec.h"
#include "names.h"
#include "network.h"
#include "mdrun.h"
-#include "xvgr.h"
+#include "gromacs/fileio/xvgr.h"
#include "copyrite.h"
#include "gromacs/fileio/confio.h"
#include "gromacs/timing/wallcycle.h"
#include "swapcoords.h"
+#include "gromacs/pbcutil/pbc.h"
+
static char *SwS = {"SWAP:"}; /**< For output that comes from the swap module */
static char *SwSEmpty = {" "}; /**< Placeholder for multi-line output */
static char* IonString[eIonNR] = {"anion", "cation" }; /**< Type of ion, used for verbose output */
}
else
{
- fprintf(stderr, "%s node %d: Inconsistency during ion compartmentalization. !inA: %d, !inB: %d, total ions %d\n",
+ fprintf(stderr, "%s rank %d: Inconsistency during ion compartmentalization. !inA: %d, !inB: %d, total ions %d\n",
SwS, cr->nodeid, not_in_comp[eCompA], not_in_comp[eCompB], iong->nat);
}
}
else
{
- fprintf(stderr, "%s node %d: %d atoms are in the ion group, but altogether %d have been assigned to the compartments.\n",
+ fprintf(stderr, "%s rank %d: %d atoms are in the ion group, but altogether %d have been assigned to the compartments.\n",
SwS, cr->nodeid, iong->nat, sum);
}
}
}
else
{
- fprintf(stderr, "%s node %d: Inconsistency during solvent compartmentalization. !inA: %d, !inB: %d, solvent atoms %d\n",
+ fprintf(stderr, "%s rank %d: Inconsistency during solvent compartmentalization. !inA: %d, !inB: %d, solvent atoms %d\n",
SwS, cr->nodeid, not_in_comp[eCompA], not_in_comp[eCompB], solg->nat);
}
}
}
else
{
- fprintf(stderr, "%s node %d: %d atoms in solvent group, but %d have been assigned to the compartments.\n",
+ fprintf(stderr, "%s rank %d: %d atoms in solvent group, but %d have been assigned to the compartments.\n",
SwS, cr->nodeid, solg->nat, sum);
}
}
#include <config.h>
#endif
+#include <stdlib.h>
#include <string.h>
-#include "gromacs/utility/smalloc.h"
-#include "gmx_fatal.h"
-#include "md_logging.h"
+#include "gromacs/legacyheaders/md_logging.h"
+#include "gromacs/legacyheaders/types/commrec.h"
#include "gromacs/utility/cstringutil.h"
-
-#include "gromacs/timing/cyclecounter.h"
+#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxmpi.h"
+#include "gromacs/utility/smalloc.h"
+
+#include "cyclecounter.h"
/* DEBUG_WCYCLE adds consistency checking for the counters.
* It checks if you stop a counter different from the last
fprintf(fplog, "\n\n");
fprintf(fplog, " Computing: Num Num Call Wall time Giga-Cycles\n");
- fprintf(fplog, " Nodes Threads Count (s) total sum %%\n");
+ fprintf(fplog, " Ranks Threads Count (s) total sum %%\n");
}
void wallcycle_print(FILE *fplog, int nnodes, int npme, double realtime,
if (npme > 0)
{
fprintf(fplog,
- "(*) Note that with separate PME nodes, the walltime column actually sums to\n"
+ "(*) Note that with separate PME ranks, the walltime column actually sums to\n"
" twice the total reported, but the cycle count total and %% are correct.\n"
"%s\n", hline);
}
#include <math.h>
#include <stdio.h>
#include <string.h>
-#include "main.h"
#include "macros.h"
#include "gromacs/utility/smalloc.h"
-#include "gromacs/fileio/futil.h"
-#include "sysstuff.h"
+#include "gromacs/utility/futil.h"
#include "txtdump.h"
-#include "gmx_fatal.h"
+#include "gromacs/utility/fatalerror.h"
#include "names.h"
#include "gromacs/fileio/tpxio.h"
#include "gromacs/fileio/trxio.h"
#include "gromacs/fileio/enxio.h"
-#include "mtop_util.h"
+#include "gromacs/topology/mtop_util.h"
#include "gromacs/utility/cstringutil.h"
static void cmp_int(FILE *fp, const char *s, int index, int i1, int i2)
cmp_int(fp, "inputrec->fepvals->sc_power", -1, fep1->sc_power, fep2->sc_power);
cmp_real(fp, "inputrec->fepvals->sc_r_power", -1, fep1->sc_r_power, fep2->sc_r_power, ftol, abstol);
cmp_real(fp, "inputrec->fepvals->sc_sigma", -1, fep1->sc_sigma, fep2->sc_sigma, ftol, abstol);
- cmp_bool(fp, "inputrec->fepvals->bPrintEnergy", -1, fep1->bPrintEnergy, fep1->bPrintEnergy);
+ cmp_int(fp, "inputrec->fepvals->edHdLPrintEnergy", -1, fep1->edHdLPrintEnergy, fep1->edHdLPrintEnergy);
cmp_bool(fp, "inputrec->fepvals->bScCoul", -1, fep1->bScCoul, fep1->bScCoul);
cmp_int(fp, "inputrec->separate_dhdl_file", -1, fep1->separate_dhdl_file, fep2->separate_dhdl_file);
cmp_int(fp, "inputrec->dhdl_derivatives", -1, fep1->dhdl_derivatives, fep2->dhdl_derivatives);
--- /dev/null
- std::fprintf(stderr, "Halting parallel program %s on node %d out of %d\n",
+/*
+ * This file is part of the GROMACS molecular simulation package.
+ *
+ * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
+ * Copyright (c) 2001-2004, The GROMACS development team.
+ * Copyright (c) 2013,2014, by the GROMACS development team, led by
+ * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
+ * and including many others, as listed in the AUTHORS file in the
+ * top-level source directory and at http://www.gromacs.org.
+ *
+ * GROMACS is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * GROMACS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with GROMACS; if not, see
+ * http://www.gnu.org/licenses, or write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * If you want to redistribute modifications to GROMACS, please
+ * consider that scientific software is very special. Version
+ * control is crucial - bugs must be traceable. We will be happy to
+ * consider code for inclusion in the official distribution, but
+ * derived work must not be called official GROMACS. Details are found
+ * in the README & COPYING files - if they are missing, get the
+ * official version at http://www.gromacs.org.
+ *
+ * To help us fund GROMACS development, we humbly ask that you cite
+ * the research papers on the package. Check out http://www.gromacs.org.
+ */
+#include "basenetwork.h"
+
+#include "config.h"
+
+#include <cctype>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+#include <algorithm>
+#include <exception>
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/gmxmpi.h"
+#include "gromacs/utility/programcontext.h"
+
+int gmx_gethostname(char *name, size_t len)
+{
+ if (len < 8)
+ {
+ gmx_incons("gmx_gethostname called with len<8");
+ }
+#if defined(HAVE_UNISTD_H) && !defined(__native_client__)
+ if (gethostname(name, len-1) != 0)
+ {
+ std::strncpy(name, "unknown", 8);
+ return -1;
+ }
+ return 0;
+#else
+ std::strncpy(name, "unknown", 8);
+ return -1;
+#endif
+}
+
+gmx_bool gmx_mpi_initialized(void)
+{
+#ifndef GMX_MPI
+ return 0;
+#else
+ int n;
+ MPI_Initialized(&n);
+
+ return n;
+#endif
+}
+
+int gmx_node_num(void)
+{
+#ifndef GMX_MPI
+ return 1;
+#else
+ int i;
+ (void) MPI_Comm_size(MPI_COMM_WORLD, &i);
+ return i;
+#endif
+}
+
+int gmx_node_rank(void)
+{
+#ifndef GMX_MPI
+ return 0;
+#else
+ int i;
+ (void) MPI_Comm_rank(MPI_COMM_WORLD, &i);
+ return i;
+#endif
+}
+
+static int mpi_hostname_hash(void)
+{
+ int hash_int;
+
+#ifndef GMX_LIB_MPI
+ /* We have a single physical node */
+ hash_int = 0;
+#else
+ int resultlen;
+ char mpi_hostname[MPI_MAX_PROCESSOR_NAME];
+
+ /* This procedure can only differentiate nodes with different names.
+ * Architectures where different physical nodes have identical names,
+ * such as IBM Blue Gene, should use an architecture specific solution.
+ */
+ MPI_Get_processor_name(mpi_hostname, &resultlen);
+
+ /* The string hash function returns an unsigned int. We cast to an int.
+ * Negative numbers are converted to positive by setting the sign bit to 0.
+ * This makes the hash one bit smaller.
+ * A 63-bit hash (with 64-bit int) should be enough for unique node hashes,
+ * even on a million node machine. 31 bits might not be enough though!
+ */
+ hash_int =
+ (int)gmx_string_fullhash_func(mpi_hostname, gmx_string_hash_init);
+ if (hash_int < 0)
+ {
+ hash_int -= INT_MIN;
+ }
+#endif
+
+ return hash_int;
+}
+
+#if defined GMX_LIB_MPI && defined GMX_TARGET_BGQ
+#include <spi/include/kernel/location.h>
+
+static int bgq_nodenum(void)
+{
+ int hostnum;
+ Personality_t personality;
+ Kernel_GetPersonality(&personality, sizeof(personality));
+ /* Each MPI rank has a unique coordinate in a 6-dimensional space
+ (A,B,C,D,E,T), with dimensions A-E corresponding to different
+ physical nodes, and T within each node. Each node has sixteen
+ physical cores, each of which can have up to four hardware
+ threads, so 0 <= T <= 63 (but the maximum value of T depends on
+ the confituration of ranks and OpenMP threads per
+ node). However, T is irrelevant for computing a suitable return
+ value for gmx_hostname_num().
+ */
+ hostnum = personality.Network_Config.Acoord;
+ hostnum *= personality.Network_Config.Bnodes;
+ hostnum += personality.Network_Config.Bcoord;
+ hostnum *= personality.Network_Config.Cnodes;
+ hostnum += personality.Network_Config.Ccoord;
+ hostnum *= personality.Network_Config.Dnodes;
+ hostnum += personality.Network_Config.Dcoord;
+ hostnum *= personality.Network_Config.Enodes;
+ hostnum += personality.Network_Config.Ecoord;
+
+ if (debug)
+ {
+ std::fprintf(debug,
+ "Torus ID A: %d / %d B: %d / %d C: %d / %d D: %d / %d E: %d / %d\n"
+ "Node ID T: %d / %d core: %d / %d hardware thread: %d / %d\n",
+ personality.Network_Config.Acoord,
+ personality.Network_Config.Anodes,
+ personality.Network_Config.Bcoord,
+ personality.Network_Config.Bnodes,
+ personality.Network_Config.Ccoord,
+ personality.Network_Config.Cnodes,
+ personality.Network_Config.Dcoord,
+ personality.Network_Config.Dnodes,
+ personality.Network_Config.Ecoord,
+ personality.Network_Config.Enodes,
+ Kernel_ProcessorCoreID(),
+ 16,
+ Kernel_ProcessorID(),
+ 64,
+ Kernel_ProcessorThreadID(),
+ 4);
+ }
+ return hostnum;
+}
+#endif
+
+int gmx_physicalnode_id_hash(void)
+{
+ int hash;
+
+#ifndef GMX_MPI
+ hash = 0;
+#else
+#ifdef GMX_THREAD_MPI
+ /* thread-MPI currently puts the thread number in the process name,
+ * we might want to change this, as this is inconsistent with what
+ * most MPI implementations would do when running on a single node.
+ */
+ hash = 0;
+#else
+#ifdef GMX_TARGET_BGQ
+ hash = bgq_nodenum();
+#else
+ hash = mpi_hostname_hash();
+#endif
+#endif
+#endif
+
+ if (debug)
+ {
+ fprintf(debug, "In gmx_physicalnode_id_hash: hash %d\n", hash);
+ }
+
+ return hash;
+}
+
+#ifdef GMX_LIB_MPI
+void gmx_abort(int errorno)
+{
+ const char *programName = "GROMACS";
+ try
+ {
+ programName = gmx::getProgramContext().displayName();
+ }
+ catch (const std::exception &)
+ {
+ }
+ const int nnodes = gmx_node_num();
+ const int noderank = gmx_node_rank();
+ if (nnodes > 1)
+ {
++ std::fprintf(stderr, "Halting parallel program %s on rank %d out of %d\n",
+ programName, noderank, nnodes);
+ }
+ else
+ {
+ std::fprintf(stderr, "Halting program %s\n", programName);
+ }
+
+ MPI_Abort(MPI_COMM_WORLD, errorno);
+ std::exit(errorno);
+}
+#endif
#include "gromacs/legacyheaders/checkpoint.h"
#include "gromacs/legacyheaders/copyrite.h"
-#include "gromacs/legacyheaders/gmx_fatal.h"
#include "gromacs/legacyheaders/macros.h"
#include "gromacs/legacyheaders/main.h"
#include "gromacs/legacyheaders/mdrun.h"
#include "gromacs/commandline/pargs.h"
#include "gromacs/fileio/filenm.h"
+#include "gromacs/utility/fatalerror.h"
int gmx_mdrun(int argc, char *argv[])
{
"the structure provided is properly energy-minimized.",
"The generated matrix can be diagonalized by [gmx-nmeig].[PAR]",
"The [TT]mdrun[tt] program reads the run input file ([TT]-s[tt])",
- "and distributes the topology over nodes if needed.",
+ "and distributes the topology over ranks if needed.",
"[TT]mdrun[tt] produces at least four output files.",
"A single log file ([TT]-g[tt]) is written, unless the option",
- "[TT]-seppot[tt] is used, in which case each node writes a log file.",
+ "[TT]-seppot[tt] is used, in which case each rank writes a log file.",
"The trajectory file ([TT]-o[tt]), contains coordinates, velocities and",
"optionally forces.",
"The structure file ([TT]-c[tt]) contains the coordinates and",
"compiled with the GROMACS built-in thread-MPI library. OpenMP threads",
"are supported when [TT]mdrun[tt] is compiled with OpenMP. Full OpenMP support",
"is only available with the Verlet cut-off scheme, with the (older)",
- "group scheme only PME-only processes can use OpenMP parallelization.",
+ "group scheme only PME-only ranks can use OpenMP parallelization.",
"In all cases [TT]mdrun[tt] will by default try to use all the available",
"hardware resources. With a normal MPI library only the options",
"[TT]-ntomp[tt] (with the Verlet cut-off scheme) and [TT]-ntomp_pme[tt],",
- "for PME-only processes, can be used to control the number of threads.",
+ "for PME-only ranks, can be used to control the number of threads.",
"With thread-MPI there are additional options [TT]-nt[tt], which sets",
"the total number of threads, and [TT]-ntmpi[tt], which sets the number",
"of thread-MPI threads.",
"The number of OpenMP threads used by [TT]mdrun[tt] can also be set with",
"the standard environment variable, [TT]OMP_NUM_THREADS[tt].",
"The [TT]GMX_PME_NUM_THREADS[tt] environment variable can be used to specify",
- "the number of threads used by the PME-only processes.[PAR]",
+ "the number of threads used by the PME-only ranks.[PAR]",
"Note that combined MPI+OpenMP parallelization is in many cases",
"slower than either on its own. However, at high parallelization, using the",
"combination is often beneficial as it reduces the number of domains and/or",
"the number of MPI ranks. (Less and larger domains can improve scaling,",
- "with separate PME processes fewer MPI ranks reduces communication cost.)",
+ "with separate PME ranks, using fewer MPI ranks reduces communication costs.)",
"OpenMP-only parallelization is typically faster than MPI-only parallelization",
"on a single CPU(-die). Since we currently don't have proper hardware",
"topology detection, [TT]mdrun[tt] compiled with thread-MPI will only",
"to specify [TT]cutoff-scheme = Verlet[tt] in the [TT].mdp[tt] file.",
"[PAR]",
"With GPUs (only supported with the Verlet cut-off scheme), the number",
- "of GPUs should match the number of MPI processes or MPI threads,",
- "excluding PME-only processes/threads. With thread-MPI, unless set on the command line, the number",
+ "of GPUs should match the number of particle-particle ranks, i.e.",
+ "excluding PME-only ranks. With thread-MPI, unless set on the command line, the number",
"of MPI threads will automatically be set to the number of GPUs detected.",
"To use a subset of the available GPUs, or to manually provide a mapping of",
"GPUs to PP ranks, you can use the [TT]-gpu_id[tt] option. The argument of [TT]-gpu_id[tt] is",
"fast GPUs, a (user-supplied) larger nstlist value can give much",
"better performance.",
"[PAR]",
- "When using PME with separate PME nodes or with a GPU, the two major",
+ "When using PME with separate PME ranks or with a GPU, the two major",
"compute tasks, the non-bonded force calculation and the PME calculation",
"run on different compute resources. If this load is not balanced,",
"some of the resources will be idle part of time. With the Verlet",
"to avoid overloading cores; with [TT]-pinoffset[tt] you can specify",
"the offset in logical cores for pinning.",
"[PAR]",
- "When [TT]mdrun[tt] is started using MPI with more than 1 process",
- "or with thread-MPI with more than 1 thread, MPI parallelization is used.",
- "Domain decomposition is always used with MPI parallelism.",
+ "When [TT]mdrun[tt] is started with more than 1 rank,",
+ "parallelization with domain decomposition is used.",
"[PAR]",
"With domain decomposition, the spatial decomposition can be set",
"with option [TT]-dd[tt]. By default [TT]mdrun[tt] selects a good decomposition.",
"At high parallelization the options in the next two sections",
"could be important for increasing the performace.",
"[PAR]",
- "When PME is used with domain decomposition, separate nodes can",
+ "When PME is used with domain decomposition, separate ranks can",
"be assigned to do only the PME mesh calculation;",
- "this is computationally more efficient starting at about 12 nodes",
+ "this is computationally more efficient starting at about 12 ranks,",
"or even fewer when OpenMP parallelization is used.",
- "The number of PME nodes is set with option [TT]-npme[tt],",
- "this can not be more than half of the nodes.",
+ "The number of PME ranks is set with option [TT]-npme[tt],",
+ "but this cannot be more than half of the ranks.",
"By default [TT]mdrun[tt] makes a guess for the number of PME",
- "nodes when the number of nodes is larger than 16. With GPUs,",
- "PME nodes are not selected automatically, since the optimal setup",
- "depends very much on the details of the hardware.",
- "In all cases you might gain performance by optimizing [TT]-npme[tt].",
- "Performance statistics on this issue",
+ "ranks when the number of ranks is larger than 16. With GPUs,",
+ "using separate PME ranks is not selected automatically,",
+ "since the optimal setup depends very much on the details",
+ "of the hardware. In all cases, you might gain performance",
+ "by optimizing [TT]-npme[tt]. Performance statistics on this issue",
"are written at the end of the log file.",
"For good load balancing at high parallelization, the PME grid x and y",
- "dimensions should be divisible by the number of PME nodes",
+ "dimensions should be divisible by the number of PME ranks",
"(the simulation will run correctly also when this is not the case).",
"[PAR]",
"This section lists all options that affect the domain decomposition.",
"With [TT]-multi[tt], the system number is appended to the run input ",
"and each output filename, for instance [TT]topol.tpr[tt] becomes",
"[TT]topol0.tpr[tt], [TT]topol1.tpr[tt] etc.",
- "The number of nodes per system is the total number of nodes",
+ "The number of ranks per system is the total number of ranks",
"divided by the number of systems.",
"One use of this option is for NMR refinement: when distance",
"or orientation restraints are present these can be ensemble averaged",
"and no old output files are modified and no new output files are opened.",
"The result with appending will be the same as from a single run.",
"The contents will be binary identical, unless you use a different number",
- "of nodes or dynamic load balancing or the FFT library uses optimizations",
+ "of ranks or dynamic load balancing or the FFT library uses optimizations",
"through timing.",
"[PAR]",
"With option [TT]-maxh[tt] a simulation is terminated and a checkpoint",
"pressed), it will stop after the next neighbor search step ",
"(with nstlist=0 at the next step).",
"In both cases all the usual output will be written to file.",
- "When running with MPI, a signal to one of the [TT]mdrun[tt] processes",
+ "When running with MPI, a signal to one of the [TT]mdrun[tt] ranks",
"is sufficient, this signal should not be sent to mpirun or",
"the [TT]mdrun[tt] process that is the parent of the others.",
"[PAR]",
{ "-dd", FALSE, etRVEC, {&realddxyz},
"Domain decomposition grid, 0 is optimize" },
{ "-ddorder", FALSE, etENUM, {ddno_opt},
- "DD node order" },
+ "DD rank order" },
{ "-npme", FALSE, etINT, {&npme},
- "Number of separate nodes to be used for PME, -1 is guess" },
+ "Number of separate ranks to be used for PME, -1 is guess" },
{ "-nt", FALSE, etINT, {&hw_opt.nthreads_tot},
"Total number of threads to start (0 is guess)" },
{ "-ntmpi", FALSE, etINT, {&hw_opt.nthreads_tmpi},
"Number of thread-MPI threads to start (0 is guess)" },
{ "-ntomp", FALSE, etINT, {&hw_opt.nthreads_omp},
- "Number of OpenMP threads per MPI process/thread to start (0 is guess)" },
+ "Number of OpenMP threads per MPI rank to start (0 is guess)" },
{ "-ntomp_pme", FALSE, etINT, {&hw_opt.nthreads_omp_pme},
- "Number of OpenMP threads per MPI process/thread to start (0 is -ntomp)" },
+ "Number of OpenMP threads per MPI rank to start (0 is -ntomp)" },
{ "-pin", FALSE, etENUM, {thread_aff_opt},
- "Fix threads (or processes) to specific cores" },
+ "Set thread affinities" },
{ "-pinoffset", FALSE, etINT, {&hw_opt.core_pinning_offset},
"The starting logical core number for pinning to cores; used to avoid pinning threads from different mdrun instances to the same core" },
{ "-pinstride", FALSE, etINT, {&hw_opt.core_pinning_stride},
{ "-nstlist", FALSE, etINT, {&nstlist},
"Set nstlist when using a Verlet buffer tolerance (0 is guess)" },
{ "-tunepme", FALSE, etBOOL, {&bTunePME},
- "Optimize PME load between PP/PME nodes or GPU/CPU" },
+ "Optimize PME load between PP/PME ranks or GPU/CPU" },
{ "-testverlet", FALSE, etBOOL, {&bTestVerlet},
"Test the Verlet non-bonded scheme" },
{ "-v", FALSE, etBOOL, {&bVerbose},
{ "-compact", FALSE, etBOOL, {&bCompact},
"Write a compact log file" },
{ "-seppot", FALSE, etBOOL, {&bSepPot},
- "Write separate V and dVdl terms for each interaction type and node to the log file(s)" },
+ "Write separate V and dVdl terms for each interaction type and rank to the log file(s)" },
{ "-pforce", FALSE, etREAL, {&pforce},
"Print all forces larger than this (kJ/mol nm)" },
{ "-reprod", FALSE, etBOOL, {&bReproducible},
#include <config.h>
#endif
-#include "gromacs/utility/smalloc.h"
#include "types/commrec.h"
#include "network.h"
#include "calcgrid.h"
#include "pme.h"
-#include "vec.h"
#include "domdec.h"
#include "nbnxn_cuda_data_mgmt.h"
#include "force.h"
#include "md_logging.h"
#include "pme_loadbal.h"
+#include "gromacs/math/vec.h"
+#include "gromacs/pbcutil/pbc.h"
+#include "gromacs/utility/cstringutil.h"
+#include "gromacs/utility/smalloc.h"
+
/* Parameters and setting for one PP-PME setup */
typedef struct {
real rcut_coulomb; /* Coulomb cut-off */
{
md_print_warn(cr, fplog,
"NOTE: PME load balancing increased the non-bonded workload by more than 50%%.\n"
- " For better performance use (more) PME nodes (mdrun -npme),\n"
- " or in case you are beyond the scaling limit, use less nodes in total.\n");
+ " For better performance, use (more) PME ranks (mdrun -npme),\n"
+ " or if you are beyond the scaling limit, use fewer total ranks (or nodes).\n");
}
else
{
* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
+
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
+
+#include <algorithm>
+
+#include <assert.h>
#include <signal.h>
#include <stdlib.h>
+#include <string.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
-#include <string.h>
-#include <assert.h>
#include "typedefs.h"
-#include "gromacs/utility/smalloc.h"
-#include "sysstuff.h"
#include "copyrite.h"
#include "force.h"
#include "mdrun.h"
#include "constr.h"
#include "mvdata.h"
#include "checkpoint.h"
-#include "mtop_util.h"
+#include "gromacs/topology/mtop_util.h"
#include "sighandler.h"
#include "txtdump.h"
#include "gmx_detect_hardware.h"
#include "gmx_omp_nthreads.h"
#include "gromacs/gmxpreprocess/calc_verletbuf.h"
-#include "gmx_fatal_collective.h"
#include "membed.h"
-#include "macros.h"
#include "gmx_thread_affinity.h"
#include "inputrec.h"
+#include "main.h"
+#include "gromacs/essentialdynamics/edsam.h"
#include "gromacs/fileio/tpxio.h"
+#include "gromacs/math/vec.h"
#include "gromacs/mdlib/nbnxn_search.h"
#include "gromacs/mdlib/nbnxn_consts.h"
-#include "gromacs/timing/wallcycle.h"
-#include "gromacs/utility/gmxmpi.h"
-#include "gromacs/utility/gmxomp.h"
-#include "gromacs/swap/swapcoords.h"
-#include "gromacs/essentialdynamics/edsam.h"
+#include "gromacs/pbcutil/pbc.h"
#include "gromacs/pulling/pull.h"
#include "gromacs/pulling/pull_rotation.h"
+#include "gromacs/swap/swapcoords.h"
+#include "gromacs/timing/wallcycle.h"
+#include "gromacs/utility/gmxassert.h"
+#include "gromacs/utility/gmxmpi.h"
+#include "gromacs/utility/smalloc.h"
#ifdef GMX_FAHCORE
#include "corewrap.h"
else if (hw_opt->nthreads_omp > 0)
{
/* Here we could oversubscribe, when we do, we issue a warning later */
- nthreads_tmpi = max(1, nthreads_tot/hw_opt->nthreads_omp);
+ nthreads_tmpi = std::max(1, nthreads_tot/hw_opt->nthreads_omp);
}
else
{
const int nthreads_omp_always_faster = 4;
const int nthreads_omp_always_faster_Nehalem = 12;
const int nthreads_omp_always_faster_SandyBridge = 16;
- const int first_model_Nehalem = 0x1A;
- const int first_model_SandyBridge = 0x2A;
gmx_bool bIntel_Family6;
bIntel_Family6 =
{
int nthreads_hw, nthreads_tot_max, nthreads_tmpi, nthreads_new, ngpu;
int min_atoms_per_mpi_thread;
- char *env;
- char sbuf[STRLEN];
gmx_bool bCanUseGPU;
if (hw_opt->nthreads_tmpi > 0)
{
/* the thread number was chosen automatically, but there are too many
threads (too few atoms per thread) */
- nthreads_new = max(1, mtop->natoms/min_atoms_per_mpi_thread);
+ nthreads_new = std::max(1, mtop->natoms/min_atoms_per_mpi_thread);
/* Avoid partial use of Hyper-Threading */
if (gmx_cpuid_x86_smt(hwinfo->cpuid_info) == GMX_CPUID_X86_SMT_ENABLED &&
/* We determine the extra cost of the non-bonded kernels compared to
* a reference nstlist value of 10 (which is the default in grompp).
*/
-static const int nbnxn_reference_nstlist = 10;
+static const int nbnxnReferenceNstlist = 10;
/* The values to try when switching */
const int nstlist_try[] = { 20, 25, 40 };
#define NNSTL sizeof(nstlist_try)/sizeof(nstlist_try[0])
float listfac_ok, listfac_max;
int nstlist_orig, nstlist_prev;
verletbuf_list_setup_t ls;
- real rlist_nstlist10, rlist_inc, rlist_ok, rlist_max;
+ real rlistWithReferenceNstlist, rlist_inc, rlist_ok, rlist_max;
real rlist_new, rlist_prev;
- int nstlist_ind = 0;
+ size_t nstlist_ind = 0;
t_state state_tmp;
gmx_bool bBox, bDD, bCont;
const char *nstl_gpu = "\nFor optimal performance with a GPU nstlist (now %d) should be larger.\nThe optimum depends on your CPU and GPU resources.\nYou might want to try several nstlist values.\n";
const char *box_err = "Can not increase nstlist because the box is too small";
const char *dd_err = "Can not increase nstlist because of domain decomposition limitations";
char buf[STRLEN];
+ const float oneThird = 1.0f / 3.0f;
if (nstlist_cmdline <= 0)
{
verletbuf_get_list_setup(bGPU, &ls);
/* Allow rlist to make the list a given factor larger than the list
- * would be with nstlist=10.
+ * would be with the reference value for nstlist (10).
*/
nstlist_prev = ir->nstlist;
- ir->nstlist = 10;
+ ir->nstlist = nbnxnReferenceNstlist;
calc_verlet_buffer_size(mtop, det(box), ir, -1, &ls, NULL,
- &rlist_nstlist10);
+ &rlistWithReferenceNstlist);
ir->nstlist = nstlist_prev;
/* Determine the pair list size increase due to zero interactions */
rlist_inc = nbnxn_get_rlist_effective_inc(ls.cluster_size_j,
mtop->natoms/det(box));
- rlist_ok = (rlist_nstlist10 + rlist_inc)*pow(listfac_ok, 1.0/3.0) - rlist_inc;
- rlist_max = (rlist_nstlist10 + rlist_inc)*pow(listfac_max, 1.0/3.0) - rlist_inc;
+ rlist_ok = (rlistWithReferenceNstlist + rlist_inc)*pow(listfac_ok, oneThird) - rlist_inc;
+ rlist_max = (rlistWithReferenceNstlist + rlist_inc)*pow(listfac_max, oneThird) - rlist_inc;
if (debug)
{
fprintf(debug, "nstlist tuning: rlist_inc %.3f rlist_ok %.3f rlist_max %.3f\n",
t_inputrec *ir,
gmx_mtop_t *mtop, real box_vol)
{
- char *conv_mesg = "Converting input file with group cut-off scheme to the Verlet cut-off scheme";
+ const char *conv_mesg = "Converting input file with group cut-off scheme to the Verlet cut-off scheme";
md_print_warn(NULL, fplog, "%s\n", conv_mesg);
rlist_fac = 1 + verlet_buffer_ratio_nodynamics;
}
ir->verletbuf_tol = -1;
- ir->rlist = rlist_fac*max(ir->rvdw, ir->rcoulomb);
+ ir->rlist = rlist_fac*std::max(ir->rvdw, ir->rcoulomb);
}
gmx_mtop_remove_chargegroups(mtop);
gmx_bool bIsPPrankUsingGPU;
char gpu_err_str[STRLEN];
- bIsPPrankUsingGPU = (cr->duty & DUTY_PP) && fr->nbv != NULL && fr->nbv->bUseGPU;
+ bIsPPrankUsingGPU = (cr->duty & DUTY_PP) && fr != NULL && fr->nbv != NULL && fr->nbv->bUseGPU;
if (bIsPPrankUsingGPU)
{
/* uninitialize GPU (by destroying the context) */
if (!free_gpu(gpu_err_str))
{
- gmx_warning("On node %d failed to free GPU #%d: %s",
+ gmx_warning("On rank %d failed to free GPU #%d: %s",
cr->nodeid, get_current_gpu_device_id(), gpu_err_str);
}
}
const char *deviceOptions, int imdport, unsigned long Flags)
{
gmx_bool bForceUseGPU, bTryUseGPU;
- double nodetime = 0, realtime;
t_inputrec *inputrec;
t_state *state = NULL;
matrix box;
gmx_ddbox_t ddbox = {0};
int npme_major, npme_minor;
- real tmpr1, tmpr2;
t_nrnb *nrnb;
gmx_mtop_t *mtop = NULL;
t_mdatoms *mdatoms = NULL;
gmx_pme_t *pmedata = NULL;
gmx_vsite_t *vsite = NULL;
gmx_constr_t constr;
- int i, m, nChargePerturbed = -1, nTypePerturbed = 0, status, nalloc;
- char *gro;
+ int nChargePerturbed = -1, nTypePerturbed = 0, status;
gmx_wallcycle_t wcycle;
gmx_bool bReadEkin;
- int list;
gmx_walltime_accounting_t walltime_accounting = NULL;
int rc;
gmx_int64_t reset_counters;
gmx_edsam_t ed = NULL;
- t_commrec *cr_old = cr;
int nthreads_pme = 1;
int nthreads_pp = 1;
gmx_membed_t membed = NULL;
" Verlet cut-off scheme.\n");
#endif
}
+
+ if (inputrec->eI == eiSD2)
+ {
+ md_print_warn(cr, fplog, "The stochastic dynamics integrator %s is deprecated, since\n"
+ "it is slower than integrator %s and is slightly less accurate\n"
+ "with constraints. Use the %s integrator.",
+ ei_names[inputrec->eI], ei_names[eiSD1], ei_names[eiSD1]);
+ }
}
- /* Check for externally set OpenMP affinity and turn off internal
- * pinning if any is found. We need to do this check early to tell
- * thread-MPI whether it should do pinning when spawning threads.
- * TODO: the above no longer holds, we should move these checks down
- */
- gmx_omp_check_thread_affinity(fplog, cr, hw_opt);
-
/* Check and update the hardware options for internal consistency */
check_and_update_hw_opt_1(hw_opt, SIMMASTER(cr));
+ /* Early check for externally set process affinity. */
+ gmx_check_thread_affinity_set(fplog, cr,
+ hw_opt, hwinfo->nthreads_hw_avail, FALSE);
if (SIMMASTER(cr))
{
-#ifdef GMX_THREAD_MPI
- /* Early check for externally set process affinity.
- * With thread-MPI this is needed as pinning might get turned off,
- * which needs to be known before starting thread-MPI.
- * With thread-MPI hw_opt is processed here on the master rank
- * and passed to the other ranks later, so we only do this on master.
- */
- gmx_check_thread_affinity_set(fplog,
- NULL,
- hw_opt, hwinfo->nthreads_hw_avail, FALSE);
-#endif
#ifdef GMX_THREAD_MPI
if (cr->npmenodes > 0 && hw_opt->nthreads_tmpi <= 0)
{
- gmx_fatal(FARGS, "You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME nodes");
+ gmx_fatal(FARGS, "You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME ranks");
}
#endif
if (hw_opt->nthreads_omp_pme != hw_opt->nthreads_omp &&
cr->npmenodes <= 0)
{
- gmx_fatal(FARGS, "You need to explicitly specify the number of PME nodes (-npme) when using different number of OpenMP threads for PP and PME nodes");
+ gmx_fatal(FARGS, "You need to explicitly specify the number of PME ranks (-npme) when using different number of OpenMP threads for PP and PME ranks");
}
}
if (hw_opt->nthreads_tmpi > 1)
{
+ t_commrec *cr_old = cr;
/* now start the threads. */
cr = mdrunner_start_threads(hw_opt, fplog, cr_old, nfile, fnm,
oenv, bVerbose, bCompact, nstglobalcomm,
#ifdef GMX_THREAD_MPI
"but the number of threads (option -nt) is 1"
#else
- "but %s was not started through mpirun/mpiexec or only one process was requested through mpirun/mpiexec"
+ "but %s was not started through mpirun/mpiexec or only one rank was requested through mpirun/mpiexec"
#endif
#endif
, ShortProgram()
if (cr->npmenodes > 0)
{
gmx_fatal_collective(FARGS, cr, NULL,
- "PME nodes are requested, but the system does not use PME electrostatics or LJ-PME");
+ "PME-only ranks are requested, but the system does not use PME for electrostatics or LJ");
}
cr->npmenodes = 0;
if (DOMAINDECOMP(cr))
{
+ GMX_RELEASE_ASSERT(fr, "fr was NULL while cr->duty was DUTY_PP");
dd_init_bondeds(fplog, cr->dd, mtop, vsite, inputrec,
Flags & MD_DDBONDCHECK, fr->cginfo_mb);
}
else
{
+ GMX_RELEASE_ASSERT(pmedata, "pmedata was NULL while cr->duty was not DUTY_PP");
/* do PME only */
walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntPME));
gmx_pmeonly(*pmedata, cr, nrnb, wcycle, walltime_accounting, ewaldcoeff_q, ewaldcoeff_lj, inputrec);
${CMAKE_SOURCE_DIR}/src/external/*.c
${CMAKE_SOURCE_DIR}/src/external/*.cpp
${CMAKE_SOURCE_DIR}/src/external/*.cu
+ ${CMAKE_SOURCE_DIR}/src/gromacs/selection/scanner.cpp
+ ${CMAKE_SOURCE_DIR}/src/gromacs/selection/parser.cpp
)
list(REMOVE_ITEM _inputfiles ${_files_to_ignore})
set(_outputopt --xml)
endif()
set(_common_flags
- --enable=style -DLINUX
+ --enable=style -DLINUX -DHAVE_UNISTD_H
-I src/gromacs/legacyheaders -I src
-I src/external/thread_mpi/include
+ -I src/external/tng_io/include
-I ${CMAKE_BINARY_DIR}/src -I ${CMAKE_BINARY_DIR}/src/gromacs/utility
--quiet
+ --inline-suppr
${_outputopt})
set(_c_flags
--suppress=variableScope
--suppress=sizeofCalculation
--suppress=missingInclude:src/programs/mdrun/gmx_gpu_utils/gmx_gpu_utils.cu
--suppress=*:src/external/Random123-1.08/include/Random123/features/compilerfeatures.h
- --inline-suppr)
+ --suppress=assignIfError:src/gromacs/mdlib/nbnxn_atomdata.c #Ticket 5695
+ --suppress=invalidPointerCast:src/gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel.cuh
+ --suppress=passedByValue:src/gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel.cuh
+ --suppress=passedByValue:src/gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_utils.cuh
+ )
set(_cxx_flags
-D__cplusplus
--suppress=variableScope
--suppress=unnecessaryForwardDeclaration
--suppress=invalidscanf:src/gromacs/fileio/matio.cpp
- --suppress=invalidscanf:src/gromacs/gmxlib/xvgr.cpp
- --suppress=invalidscanf:src/gromacs/gmxpreprocess/pdb2top.cpp
- --suppress=*:src/gromacs/selection/scanner.cpp)
+ --suppress=invalidscanf:src/gromacs/fileio/xvgr.cpp
+ --suppress=invalidscanf:src/gromacs/topology/index.cpp
+ --suppress=invalidscanf:src/gromacs/gmxpreprocess/pdb2top.cpp)
# This list will hold the list of all files with cppcheck errors
# (one per input file)