Also, fix one incorrect use of GMX_GPU.
Change-Id: I2409dada6c05e00163de3783781a6e85acadbdda
#cmakedefine01 HAVE_EXTRAE
/* Use MPI (with mpicc) for parallelization */
-#cmakedefine GMX_LIB_MPI
+#cmakedefine01 GMX_LIB_MPI
/* Use threads_mpi for parallelization */
-#cmakedefine GMX_THREAD_MPI
+#cmakedefine01 GMX_THREAD_MPI
-#if defined GMX_LIB_MPI || defined GMX_THREAD_MPI
+#if GMX_LIB_MPI || GMX_THREAD_MPI
/* Make a parallel version of GROMACS using message passing
(MPI or thread_mpi) */
#define GMX_MPI
#cmakedefine01 MPI_IN_PLACE_EXISTS
/* Use OpenMP multithreading */
-#cmakedefine GMX_OPENMP
+#cmakedefine01 GMX_OPENMP
/* Can and should use nice(3) to set priority */
#cmakedefine01 GMX_USE_NICE
#cmakedefine GMX_CUDA_VERSION @GMX_CUDA_VERSION@
/* Use a single compilation unit when compiling the CUDA (non-bonded) kernels. */
-#cmakedefine GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
+#cmakedefine01 GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
/* Use NVML */
#cmakedefine01 HAVE_NVML
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
* texture objects are used), but as this is initialization code, there
* is not point in complicating things.
*/
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
if (PAR(cr) && use_GPU(nbv))
{
gmx_barrier(cr);
#endif
#endif
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
/* TODO: Do we still need this? Are we still planning ot use fftw + OpenMP? */
#define FFT5D_THREADS
/* requires fftw compiled with openmp */
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2008, The GROMACS development team.
- * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
static const int NOTSET = -49297;
-#ifdef GMX_OPENMP
-#define MASTER_THREAD_ONLY(threadNr) ((threadNr) == 0)
-#else
-#define MASTER_THREAD_ONLY(threadNr) ((threadNr) == (threadNr))
-#endif
-
/* -----------------------------------------*/
enum {
"Cc\\scontact,hb\\v{}\\z{}(t)",
"-dAc\\sfs\\v{}\\z{}/dt"
};
- gmx_bool bNorm = FALSE, bOMP = FALSE;
+ gmx_bool bNorm = FALSE;
double nhb = 0;
real *rhbex = NULL, *ht, *gt, *ght, *dght, *kt;
real *ct, tail, tail2, dtail, *cct;
AC_NONE, AC_NN, AC_GEM, AC_LUZAR
};
-#ifdef GMX_OPENMP
- bOMP = TRUE;
-#else
- bOMP = FALSE;
-#endif
+ const bool bOMP = GMX_OPENMP;
printf("Doing autocorrelation ");
"Theoretical maximum number of hydrogen bonds used for normalizing HB autocorrelation function. Can be useful in case the program estimates it wrongly" },
{ "-merge", FALSE, etBOOL, {&bMerge},
"H-bonds between the same donor and acceptor, but with different hydrogen are treated as a single H-bond. Mainly important for the ACF." },
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
{ "-nthreads", FALSE, etINT, {&nThreads},
"Number of threads used for the parallel loop over autocorrelations. nThreads <= 0 means maximum number of threads. Requires linking with OpenMP. The number of threads is limited by the number of cores (before OpenMP v.3 ) or environment variable OMP_THREAD_LIMIT (OpenMP v.3)"},
#endif
int ii, hh, actual_nThreads;
int threadNr = 0;
gmx_bool bParallel;
- gmx_bool bEdge_yjj, bEdge_xjj, bOMP;
+ gmx_bool bEdge_yjj, bEdge_xjj;
t_hbdata **p_hb = NULL; /* one per thread, then merge after the frame loop */
int **p_adist = NULL, **p_rdist = NULL; /* a histogram for each thread. */
-#ifdef GMX_OPENMP
- bOMP = TRUE;
-#else
- bOMP = FALSE;
-#endif
+ const bool bOMP = GMX_OPENMP;
npargs = asize(pa);
ppa = add_acf_pargs(&npargs, pa);
snew(adist, nabin+1);
snew(rdist, nrbin+1);
-#ifndef GMX_OPENMP
+#if !GMX_OPENMP
#define __ADIST adist
#define __RDIST rdist
#define __HBDATA hb
k, bTric, \
bEdge_xjj, bEdge_yjj) \
default(shared)
- { /* Start of parallel region */
+ { /* Start of parallel region */
+#if !defined __clang_analyzer__ // clang complains about unused value.
threadNr = gmx_omp_get_thread_num();
+#endif
do
{
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2010,2011,2012,2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2010,2011,2012,2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
stoplocal = stopglobal;
x_per_core = xtot;
}
-/*
- #ifdef GMX_LIB_MPI
- MPI_Barrier(MPI_COMM_WORLD);
- #endif
- */
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
#ifdef TAKETIME
if (MASTER(cr))
{
fprintf(stderr, "\n");
}
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
#ifdef TAKETIME
if (MASTER(cr))
{
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
"Stepping in q (1/nm)"},
{ "-seed", FALSE, etINT, {&seed},
"Random seed for Monte-Carlo"},
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
{ "-nt", FALSE, etINT, {&nthreads},
"Number of threads to start"},
#endif
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
static int nthreads = -1;
t_pargs pa[] = {
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
{ "-nt", FALSE, etINT, {&nthreads},
"Number of threads used by gmx wham (if -1, all threads will be used or what is specified by the environment variable OMP_NUM_THREADS)"},
#endif
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
rvec dist;
double rmax;
int i, j;
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
double **tgr;
int tid;
int nthreads;
mc_max = static_cast<gmx_int64_t>(std::floor(0.5*mcover*isize*(isize-1)));
}
rng = gmx_rng_init(seed);
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
nthreads = gmx_omp_get_max_threads();
snew(tgr, nthreads);
snew(trng, nthreads);
}
else
{
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
nthreads = gmx_omp_get_max_threads();
/* Allocating memory for tgr arrays */
snew(tgr, nthreads);
snew(cr, 1);
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
gmx_fill_commrec_from_mpi(cr);
#else
cr->mpi_comm_mysim = NULL;
t_commrec *reinitialize_commrec_for_this_thread(const t_commrec gmx_unused *cro)
{
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
t_commrec *cr;
/* make a thread-specific commrec */
nc = &cr->nc;
nc->bUse = FALSE;
-#ifndef GMX_THREAD_MPI
+#if !GMX_THREAD_MPI
#ifdef GMX_MPI
int n, rank;
int nrank_intranode, rank_intranode;
int nrank_pp_intranode, rank_pp_intranode;
/* thread-MPI is not initialized when not running in parallel */
-#if defined GMX_MPI && !defined GMX_THREAD_MPI
+#if defined GMX_MPI && !GMX_THREAD_MPI
int nrank_world, rank_world;
int i, myhash, *hash, *hash_s, *hash_pp, *hash_pp_s;
* one real MPI rank on the same node (it segfaults when you try).
*/
static const bool multiGpuSupport[] = {
- false, true,
-#ifdef GMX_THREAD_MPI
- true,
-#else
- false, /* Real MPI and no MPI */
-#endif
+ false, true, GMX_THREAD_MPI
};
static const bool bMultiGpuPerNodeSupported = multiGpuSupport[GMX_GPU];
ngpu = gpu_info->n_dev;
-#if defined GMX_MPI && !defined GMX_THREAD_MPI
+#if defined GMX_MPI && !GMX_THREAD_MPI
/* We only print the detection on one, of possibly multiple, nodes */
std::strncpy(onhost, " on host ", 10);
gmx_gethostname(onhost + 9, HOSTNAMELEN);
return;
}
-#if defined(GMX_THREAD_MPI)
+#if GMX_THREAD_MPI
bMPI = FALSE;
btMPI = TRUE;
bNthreadsAuto = (hw_opt->nthreads_tmpi < 1);
-#elif defined(GMX_LIB_MPI)
+#elif GMX_LIB_MPI
bMPI = TRUE;
btMPI = FALSE;
bNthreadsAuto = FALSE;
fprintf(debug, "Detected %d hardware threads to use.\n", ret);
}
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
if (ret != gmx_omp_get_num_procs())
{
md_print_warn(cr, fplog,
static void gmx_detect_gpus(FILE *fplog, const t_commrec *cr)
{
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
int rank_world;
MPI_Comm physicalnode_comm;
#endif
* TODO: We should also do CPU hardware detection only once on each
* physical node and broadcast it, instead of do it on every MPI rank.
*/
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
/* A split of MPI_COMM_WORLD over physical nodes is only required here,
* so we create and destroy it locally.
*/
}
}
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
/* Broadcast the GPU info to the other ranks within this node */
MPI_Bcast(&hwinfo_g->gpu_info.n_dev, 1, MPI_INT, 0, physicalnode_comm);
static void gmx_collect_hardware_mpi(const gmx::CpuInfo &cpuInfo)
{
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
int rank_id;
int nrank, rank, ncore, nhwthread, ngpu, i;
int gpu_hash;
}
}
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
char host[HOSTNAMELEN];
int rank;
* texture objects are used), but as this is initialization code, there
* is no point in complicating things.
*/
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
if (PAR(cr))
{
gmx_barrier(cr);
* Note: as only PP ranks need to free GPU resources, so it is safe to
* not call the barrier on PME ranks.
*/
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
if (PAR(cr))
{
gmx_barrier(cr);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
gmx_bool bFullOmpSupport,
gmx_bool bSepPME)
{
- char *env;
- int nth;
- char sbuf[STRLEN];
- gmx_bool bOMP;
+ char *env;
+ int nth;
+ char sbuf[STRLEN];
-#ifdef GMX_OPENMP
- bOMP = TRUE;
-#else
- bOMP = FALSE;
-#endif /* GMX_OPENMP */
+ const bool bOMP = GMX_OPENMP;
/* The default should never be set through a GMX_*_NUM_THREADS env var
* as it's always equal with gnth. */
do about it. */
static void manage_number_of_openmp_threads(FILE *fplog,
const t_commrec *cr,
- gmx_bool bOMP,
+ bool bOMP,
int nthreads_hw_avail,
int omp_nthreads_req,
int omp_nthreads_pme_req,
int nth;
char *env;
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
/* modth is shared among tMPI threads, so for thread safety, the
* detection is done on the master only. It is not thread-safe
* with multiple simulations, but that's anyway not supported by
/* set the number of threads globally */
if (bOMP)
{
-#ifndef GMX_THREAD_MPI
+#if !GMX_THREAD_MPI
if (bThisNodePMEOnly)
{
gmx_omp_set_num_threads(modth.gnth_pme);
gmx_bool bFullOmpSupport,
gmx_bool bSepPME)
{
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
const char *mpi_str = "per tMPI thread";
#else
const char *mpi_str = "per MPI process";
#ifdef GMX_MPI
if (modth.gnth == 1)
{
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
sprintf(sbuf, "thread-MPI threads");
#else
sprintf(sbuf, "MPI processes");
gmx_bool bThisNodePMEOnly,
gmx_bool bFullOmpSupport)
{
- int nppn;
- gmx_bool bSepPME, bOMP;
+ int nppn;
+ gmx_bool bSepPME;
-#ifdef GMX_OPENMP
- bOMP = TRUE;
-#else
- bOMP = FALSE;
-#endif /* GMX_OPENMP */
+ const bool bOMP = GMX_OPENMP;
/* number of MPI processes/threads per physical node */
nppn = cr->nrank_intranode;
omp_nthreads_req, omp_nthreads_pme_req,
bThisNodePMEOnly, bFullOmpSupport,
nppn, bSepPME);
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
/* Non-master threads have to wait for the OpenMP management to be
* done, so that code elsewhere that uses OpenMP can be certain
* the setup is complete. */
#undef FUNCTION_DECLARATION_ONLY
/* Now generate the function definitions if we are using a single compilation unit. */
-#ifdef GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
+#if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
#include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_F_noprune.cu"
#include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_F_prune.cu"
#include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_VF_noprune.cu"
/* With multiple compilation units this ensures that texture refs are available
in the the kernels' compilation units. */
-#ifndef GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
+#if !GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
/*! Texture reference for LJ C6/C12 parameters; bound to cu_nbparam_t.nbfp */
extern texture<float, 1, cudaReadModeElementType> nbfp_texref;
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
bbcz = grid->bbcz_simple;
bb = grid->bb_simple;
-#if (defined GMX_OPENMP) && !(defined __clang_analyzer__)
+#if GMX_OPENMP && !(defined __clang_analyzer__)
// cppcheck-suppress unreadVariable
int nthreads = gmx_omp_nthreads_get(emntPairsearch);
#endif
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/* Each thread should copy its own data to the combined arrays,
* as otherwise data will go back and forth between different caches.
*/
-#if (defined GMX_OPENMP) && !(defined __clang_analyzer__)
+#if GMX_OPENMP && !(defined __clang_analyzer__)
// cppcheck-suppress unreadVariable
int nthreads = gmx_omp_nthreads_get(emntPairsearch);
#endif
double dt, elapsed_seconds, time_per_step;
char buf[48];
-#ifndef GMX_THREAD_MPI
+#if !GMX_THREAD_MPI
if (!PAR(cr))
#endif
{
ir->delta_t/1000*24*60*60/time_per_step);
}
}
-#ifndef GMX_THREAD_MPI
+#if !GMX_THREAD_MPI
if (PAR(cr))
{
fprintf(out, "\n");
if (cr->nnodes > 1)
{
#ifdef GMX_MPI
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
sprintf(sbuf1, "In tMPI thread #%d: ", cr->nodeid);
#else /* GMX_LIB_MPI */
sprintf(sbuf1, "In MPI process #%d: ", cr->nodeid);
{
return;
}
-#ifndef GMX_THREAD_MPI
+#if !GMX_THREAD_MPI
return;
#endif
}
bAllSet = bAllSet && (CPU_ISSET(i, &mask_current) != 0);
}
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
gmx_bool bAllSet_All;
MPI_Allreduce(&bAllSet, &bAllSet_All, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);
#ifndef GMX_MPI
return 1;
#else
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
if (!gmx_mpi_initialized())
{
return 1;
#ifndef GMX_MPI
return 0;
#else
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
if (!gmx_mpi_initialized())
{
return 0;
#endif
}
-#if defined GMX_LIB_MPI && GMX_TARGET_BGQ
+#if GMX_LIB_MPI && GMX_TARGET_BGQ
#ifdef __clang__
/* IBM's declaration of this function in
* /bgsys/drivers/V1R2M2/ppc64/spi/include/kernel/process.h
#if GMX_TARGET_BGQ
hash_int = bgq_nodenum();
-#elif defined GMX_LIB_MPI
+#elif GMX_LIB_MPI
int resultlen;
char mpi_hostname[MPI_MAX_PROCESSOR_NAME];
#endif
}
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
void gmx_abort(int errorno)
{
MPI_Abort(MPI_COMM_WORLD, errorno);
#endif
fprintf(fp, "Memory model: %u bit\n", (unsigned)(8*sizeof(void *)));
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
fprintf(fp, "MPI library: thread_mpi\n");
#elif defined(GMX_MPI)
fprintf(fp, "MPI library: MPI\n");
#else
fprintf(fp, "MPI library: none\n");
#endif
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
fprintf(fp, "OpenMP support: enabled (GMX_OPENMP_MAX_THREADS = %d)\n", GMX_OPENMP_MAX_THREADS);
#else
fprintf(fp, "OpenMP support: disabled\n");
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
MPI_Finalize();
break;
case ExitType_Abort:
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
gmx_abort(returnValue);
#endif
break;
#include "config.h"
/*! \cond */
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
/* MPI C++ binding is deprecated and can cause name conflicts (e.g. stdio/mpi seek) */
#define MPICH_SKIP_MPICXX 1
#define OMPI_SKIP_MPICXX 1
#endif
#endif /*MPI_INT64_T*/
#else
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
#include "thread_mpi/mpi_bindings.h" /* IWYU pragma: export */
#include "thread_mpi/tmpi.h" /* IWYU pragma: export */
#else
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
+ * Copyright (c) 2012,2013,2014,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include <stdio.h>
#include <stdlib.h>
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
#include <omp.h>
#endif
int gmx_omp_get_max_threads(void)
{
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
return omp_get_max_threads();
#else
return 1;
int gmx_omp_get_num_procs(void)
{
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
return omp_get_num_procs();
#else
return 1;
int gmx_omp_get_thread_num(void)
{
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
return omp_get_thread_num();
#else
return 0;
void gmx_omp_set_num_threads(int num_threads)
{
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
omp_set_num_threads(num_threads);
#else
GMX_UNUSED_VALUE(num_threads);
bool shouldSetAffinity = true;
*message = NULL;
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
/* We assume that the affinity setting is available on all platforms
* gcc supports. Even if this is not the case (e.g. Mac OS) the user
* will only get a warning. */
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "gromacs/utility/basedefinitions.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxassert.h"
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
#include "gromacs/utility/gmxmpi.h"
#endif
namespace
{
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
//! Maintains global counter of attempts to initialize MPI
int g_initializationCounter = 0;
#endif
void init(int *argc, char ***argv)
{
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
int isInitialized = 0, isFinalized = 0;
MPI_Finalized(&isFinalized);
GMX_RELEASE_ASSERT(!isFinalized, "Invalid attempt to initialize MPI after finalization");
#ifdef GMX_FAHCORE
fah_MPI_Init(argc, argv);
#else
-# ifdef GMX_OPENMP
+# if GMX_OPENMP
/* Formally we need to use MPI_Init_thread and ask for MPI_THREAD_FUNNELED
* level of thread support when using OpenMP. However, in practice we
* have never seen any problems with just using MPI_Init(), and some MPI
void finalize()
{
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
GMX_RELEASE_ASSERT(0 < g_initializationCounter, "Excess attempt to finalize MPI");
// Bump the counter to record this finalization event
g_initializationCounter--;
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2010,2012,2014, by the GROMACS development team, led by
+ * Copyright (c) 2010,2012,2014,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
* Expands to gmx_qsort() if Gromacs is built with threading, or system qsort()
* otherwise.
*/
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
#define gmx_qsort_threadsafe gmx_qsort
#else
#define gmx_qsort_threadsafe qsort
/* Check whether everything is still allright */
if (((int)gmx_get_stop_condition() > handled_stop_condition)
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
&& MASTER(cr)
#endif
)
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2011,2012,2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2011,2012,2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
if (nmultisim >= 1)
{
-#ifndef GMX_THREAD_MPI
+#if !GMX_THREAD_MPI
gmx_bool bParFn = (multidir == NULL);
init_multisystem(cr, nmultisim, multidir, NFILE, fnm, bParFn);
#else
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2015, by the GROMACS development team, led by
+ * Copyright (c) 2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
* and after a switch point doesn't change too much.
*/
-#ifdef GMX_OPENMP
-static const bool bHasOmpSupport = true;
-#else
-static const bool bHasOmpSupport = false;
-#endif
+static const bool bHasOmpSupport = GMX_OPENMP;
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
/* The minimum number of atoms per tMPI thread. With fewer atoms than this,
* the number of threads will get lowered.
*/
* we first try 6 OpenMP threads and then less until the number of MPI ranks
* is divisible by the number of GPUs.
*/
-#if defined GMX_OPENMP && defined GMX_MPI
+#if GMX_OPENMP && defined GMX_MPI
const int nthreads_omp_mpi_ok_max = 8;
const int nthreads_omp_mpi_ok_min_cpu = 1;
#endif
const gmx::CpuInfo &cpuInfo,
gmx_bool bUseGPU)
{
-#if defined GMX_OPENMP && defined GMX_MPI
+#if GMX_OPENMP && defined GMX_MPI
if (nrank > 1)
{
return nthreads_omp_mpi_ok_max;
}
-#ifdef GMX_THREAD_MPI
-
+#if GMX_THREAD_MPI
static bool
gmxSmtIsEnabled(const gmx::HardwareTopology &hwTop)
t_commrec *cr,
FILE *fplog)
{
-#if defined GMX_OPENMP && defined GMX_MPI
+#if GMX_OPENMP && defined GMX_MPI
int nth_omp_min, nth_omp_max, ngpu;
char buf[1000];
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
const char *mpi_option = " (option -ntmpi)";
#else
const char *mpi_option = "";
/* This function should be called after thread-MPI (when configured) and
* OpenMP have been initialized. Check that here.
*/
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
GMX_RELEASE_ASSERT(nthreads_omp_faster_default >= nthreads_omp_mpi_ok_max, "Inconsistent OpenMP thread count default values");
GMX_RELEASE_ASSERT(hw_opt->nthreads_tmpi >= 1, "Must have at least one thread-MPI rank");
#endif
/* Check if we have more than 1 physical core, if detected,
* or more than 1 hardware thread if physical cores were not detected.
*/
-#if !(defined GMX_OPENMP) && !(defined GMX_MPI)
+#if !GMX_OPENMP && !(defined GMX_MPI)
if ((hwinfo->ncore > 1) ||
(hwinfo->ncore == 0 && hwinfo->nthreads_hw_avail > 1))
{
/* Check restrictions on the user supplied options before modifying them.
* TODO: Put the user values in a const struct and preserve them.
*/
-#ifndef GMX_THREAD_MPI
+#if !GMX_THREAD_MPI
if (hw_opt->nthreads_tot > 0)
{
gmx_fatal(FARGS, "Setting the total number of threads is only supported with thread-MPI and GROMACS was compiled without thread-MPI");
*/
gmx_parse_gpu_ids(&hw_opt->gpu_opt);
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
if (hw_opt->gpu_opt.n_dev_use > 0 && hw_opt->nthreads_tmpi == 0)
{
/* Set the number of MPI threads equal to the number of GPUs */
/* Checks we can do when we know the thread-MPI rank count */
void check_and_update_hw_opt_3(gmx_hw_opt_t gmx_unused *hw_opt)
{
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
GMX_RELEASE_ASSERT(hw_opt->nthreads_tmpi >= 1, "Must have at least one thread-MPI rank");
/* If the user set the total number of threads on the command line
//! MPI variable for use in pressure scaling
tMPI_Thread_mutex_t deform_init_box_mutex = TMPI_THREAD_MUTEX_INITIALIZER;
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
/* The minimum number of atoms per tMPI thread. With fewer atoms than this,
* the number of threads will get lowered.
*/
gmx_check_thread_affinity_set(fplog, cr,
hw_opt, hwinfo->nthreads_hw_avail, FALSE);
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
if (SIMMASTER(cr))
{
if (npme > 0 && hw_opt->nthreads_tmpi <= 0)
#ifndef GMX_MPI
"but %s was compiled without threads or MPI enabled"
#else
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
"but the number of threads (option -nt) is 1"
#else
"but %s was not started through mpirun/mpiexec or only one rank was requested through mpirun/mpiexec"
}
md_print_info(cr, fplog, "Using %d MPI %s\n",
cr->nnodes,
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
cr->nnodes == 1 ? "thread" : "threads"
#else
cr->nnodes == 1 ? "process" : "processes"
done_ed(&ed);
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
/* we need to join all threads. The sub-threads join when they
exit this function, but the master thread needs to be told to
wait for that. */
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
namespace
{
-#if defined(GMX_THREAD_MPI) || defined(DOXYGEN)
+#if GMX_THREAD_MPI || defined(DOXYGEN)
//! Number of tMPI threads for child mdrun call.
int g_numThreads = 1;
#endif
-#if defined(GMX_OPENMP) || defined(DOXYGEN)
+#if GMX_OPENMP || defined(DOXYGEN)
//! Number of OpenMP threads for child mdrun call.
int g_numOpenMPThreads = 1;
#endif
GMX_TEST_OPTIONS(MdrunTestOptions, options)
{
GMX_UNUSED_VALUE(options);
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
options->addOption(IntegerOption("nt").store(&g_numThreads)
.description("Number of thread-MPI threads/ranks for child mdrun calls"));
#endif
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
options->addOption(IntegerOption("nt_omp").store(&g_numOpenMPThreads)
.description("Number of OpenMP threads for child mdrun calls"));
#endif
edrFileName_(fixture_->fileManager_.getTemporaryFilePath(".edr")),
nsteps_(-2)
{
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
GMX_RELEASE_ASSERT(gmx_mpi_initialized(), "MPI system not initialized for mdrun tests");
#endif
}
SimulationRunner::callGrompp()
{
int returnValue = 0;
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
// When compiled with external MPI, we're trying to run mdrun with
// MPI, but we need to make sure that we only do grompp on one
// rank
{
returnValue = callGromppOnThisRank();
}
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
// Make sure rank zero has written the .tpr file before other
// ranks try to read it. Thread-MPI and serial do this just fine
// on their own.
#ifdef GMX_MPI
# if GMX_GPU != GMX_GPU_NONE
-# ifdef GMX_THREAD_MPI
+# if GMX_THREAD_MPI
int numGpusNeeded = g_numThreads;
# else /* Must be real MPI */
int numGpusNeeded = gmx_node_num();
# endif
#endif
-#ifdef GMX_THREAD_MPI
+#if GMX_THREAD_MPI
caller.addOption("-ntmpi", g_numThreads);
#endif
-#ifdef GMX_OPENMP
+#if GMX_OPENMP
caller.addOption("-ntomp", g_numOpenMPThreads);
#endif
-#if defined GMX_GPU
+#if GMX_GPU != GMX_GPU_NONE
/* TODO Ideally, with real MPI, we could call
* gmx_collect_hardware_mpi() here and find out how many nodes
* mdrun will run on. For now, we assume that we're running on one
* node regardless of the number of ranks, because that's true in
* Jenkins and for most developers running the tests. */
int numberOfNodes = 1;
-#if defined GMX_THREAD_MPI
+#if GMX_THREAD_MPI
/* Can't use gmx_node_num() because it is only valid after spawn of thread-MPI threads */
int numberOfRanks = g_numThreads;
-#elif defined GMX_LIB_MPI
+#elif GMX_LIB_MPI
int numberOfRanks = gmx_node_num();
#else
int numberOfRanks = 1;
MdrunTestFixtureBase::MdrunTestFixtureBase()
{
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
GMX_RELEASE_ASSERT(gmx_mpi_initialized(), "MPI system not initialized for mdrun tests");
#endif
}
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/* Note, not all preprocessor implementations nest macro expansions
the same way / at all, if we would try to duplicate less code. */
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
INSTANTIATE_TEST_CASE_P(InNvt, MultiSimTest,
::testing::Values("pcoupl = no"));
#else
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
/* Note, not all preprocessor implementations nest macro expansions
the same way / at all, if we would try to duplicate less code. */
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
INSTANTIATE_TEST_CASE_P(WithDifferentControlVariables, ReplicaExchangeTest,
::testing::Values("pcoupl = no", "pcoupl = Berendsen"));
#else
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "gromacs/utility/gmxmpi.h"
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
#include <memory>
#include <sstream>
//! \cond internal
void gmx::test::initMPIOutput()
{
-#ifdef GMX_LIB_MPI
+#if GMX_LIB_MPI
int size, rank;
MPI_Comm_size(MPI_COMM_WORLD, &size);