#include "gromacs/topology/topology.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxassert.h"
+#include "gromacs/utility/logger.h"
+ #include "gromacs/utility/stringutil.h"
/* DISCLAIMER: All the atom count and thread numbers below are heuristic.
gmx_hw_opt_t *hw_opt,
const t_inputrec *inputrec,
const gmx_mtop_t *mtop,
- const t_commrec *cr,
- FILE *fplog,
+ const gmx::MDLogger &mdlog,
- gmx_bool bUseGpu)
+ gmx_bool bUseGpu,
+ bool doMembed)
{
int nthreads_hw, nthreads_tot_max, nrank, ngpu;
int min_atoms_per_mpi_rank;
const gmx::CpuInfo &cpuInfo = *hwinfo->cpuInfo;
const gmx::HardwareTopology &hwTop = *hwinfo->hardwareTopology;
- /* Check if an algorithm does not support parallel simulation. */
- if (inputrec->eI == eiLBFGS ||
- inputrec->coulombtype == eelEWALD)
{
- GMX_LOG(mdlog.warning).asParagraph().appendText("The integration or electrostatics algorithm doesn't support parallel runs. Using a single thread-MPI rank.");
- if (hw_opt->nthreads_tmpi > 1)
+ /* Check if an algorithm does not support parallel simulation. */
+ // TODO This might work better if e.g. implemented algorithms
+ // had to define a function that returns such requirements,
+ // and a description string.
+ SingleRankChecker checker;
+ checker.applyConstraint(inputrec->eI == eiLBFGS, "L-BFGS minimization");
+ checker.applyConstraint(inputrec->coulombtype == eelEWALD, "Plain Ewald electrostatics");
+ checker.applyConstraint(doMembed, "Membrane embedding");
+ if (checker.mustUseOneRank())
{
- gmx_fatal(FARGS, "You asked for more than 1 thread-MPI rank, but an algorithm doesn't support that");
+ std::string message = checker.getMessage();
+ if (hw_opt->nthreads_tmpi > 1)
+ {
+ gmx_fatal(FARGS, "%s However, you asked for more than 1 thread-MPI rank, so mdrun cannot continue. Choose a single rank, or a different algorithm.", message.c_str());
+ }
- md_print_warn(cr, fplog, "%s Choosing to use only a single thread-MPI rank.", message.c_str());
++ GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted("%s Choosing to use only a single thread-MPI rank.", message.c_str());
+ return 1;
}
-
- return 1;
}
if (hw_opt->nthreads_tmpi > 0)