*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2011-2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2011-2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "gromacs/fileio/tpxio.h"
#include "gromacs/gmxlib/network.h"
#include "gromacs/gmxlib/nrnb.h"
-#include "gromacs/gpu_utils/device_context.h"
#include "gromacs/gpu_utils/device_stream_manager.h"
-#include "gromacs/gpu_utils/gpu_utils.h"
#include "gromacs/hardware/cpuinfo.h"
#include "gromacs/hardware/detecthardware.h"
+#include "gromacs/hardware/device_management.h"
+#include "gromacs/hardware/hardwaretopology.h"
#include "gromacs/hardware/printhardware.h"
#include "gromacs/imd/imd.h"
#include "gromacs/listed_forces/disre.h"
#include "gromacs/mdlib/force.h"
#include "gromacs/mdlib/forcerec.h"
#include "gromacs/mdlib/gmx_omp_nthreads.h"
+#include "gromacs/mdlib/gpuforcereduction.h"
#include "gromacs/mdlib/makeconstraints.h"
#include "gromacs/mdlib/md_support.h"
#include "gromacs/mdlib/mdatoms.h"
#include "gromacs/mdlib/vsite.h"
#include "gromacs/mdrun/mdmodules.h"
#include "gromacs/mdrun/simulationcontext.h"
+#include "gromacs/mdrun/simulationinput.h"
+#include "gromacs/mdrun/simulationinputhandle.h"
#include "gromacs/mdrunutility/handlerestart.h"
#include "gromacs/mdrunutility/logging.h"
#include "gromacs/mdrunutility/multisim.h"
#include "gromacs/mdrunutility/printtime.h"
#include "gromacs/mdrunutility/threadaffinity.h"
+#include "gromacs/mdtypes/checkpointdata.h"
#include "gromacs/mdtypes/commrec.h"
#include "gromacs/mdtypes/enerdata.h"
#include "gromacs/mdtypes/fcdata.h"
#include "replicaexchange.h"
#include "simulatorbuilder.h"
-#if GMX_FAHCORE
-# include "corewrap.h"
-#endif
-
namespace gmx
{
// getenv results are ignored when clearly they are used.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-result"
- devFlags.enableGpuBufferOps = (getenv("GMX_USE_GPU_BUFFER_OPS") != nullptr)
- && (GMX_GPU == GMX_GPU_CUDA) && useGpuForNonbonded;
- devFlags.forceGpuUpdateDefault = (getenv("GMX_FORCE_UPDATE_DEFAULT_GPU") != nullptr);
- devFlags.enableGpuHaloExchange =
- (getenv("GMX_GPU_DD_COMMS") != nullptr && GMX_THREAD_MPI && (GMX_GPU == GMX_GPU_CUDA));
+
+ devFlags.enableGpuBufferOps =
+ GMX_GPU_CUDA && useGpuForNonbonded && (getenv("GMX_USE_GPU_BUFFER_OPS") != nullptr);
+ devFlags.enableGpuHaloExchange = GMX_GPU_CUDA && GMX_THREAD_MPI && getenv("GMX_GPU_DD_COMMS") != nullptr;
+ devFlags.forceGpuUpdateDefault = (getenv("GMX_FORCE_UPDATE_DEFAULT_GPU") != nullptr) || GMX_FAHCORE;
devFlags.enableGpuPmePPComm =
- (getenv("GMX_GPU_PME_PP_COMMS") != nullptr && GMX_THREAD_MPI && (GMX_GPU == GMX_GPU_CUDA));
+ GMX_GPU_CUDA && GMX_THREAD_MPI && getenv("GMX_GPU_PME_PP_COMMS") != nullptr;
+
#pragma GCC diagnostic pop
if (devFlags.enableGpuBufferOps)
{
if (pmeRunMode == PmeRunMode::GPU)
{
+ if (!devFlags.enableGpuBufferOps)
+ {
+ GMX_LOG(mdlog.warning)
+ .asParagraph()
+ .appendTextFormatted(
+ "Enabling GPU buffer operations required by GMX_GPU_PME_PP_COMMS "
+ "(equivalent with GMX_USE_GPU_BUFFER_OPS=1).");
+ devFlags.enableGpuBufferOps = true;
+ }
GMX_LOG(mdlog.warning)
.asParagraph()
.appendTextFormatted(
newRunner.hw_opt = hw_opt;
newRunner.filenames = filenames;
+ newRunner.hwinfo_ = hwinfo_;
newRunner.oenv = oenv;
newRunner.mdrunOptions = mdrunOptions;
newRunner.domdecOptions = domdecOptions;
newRunner.pforce = pforce;
// Give the spawned thread the newly created valid communicator
// for the simulation.
- newRunner.communicator = MPI_COMM_WORLD;
- newRunner.ms = ms;
- newRunner.startingBehavior = startingBehavior;
- newRunner.stopHandlerBuilder_ = std::make_unique<StopHandlerBuilder>(*stopHandlerBuilder_);
+ newRunner.libraryWorldCommunicator = MPI_COMM_WORLD;
+ newRunner.simulationCommunicator = MPI_COMM_WORLD;
+ newRunner.ms = ms;
+ newRunner.startingBehavior = startingBehavior;
+ newRunner.stopHandlerBuilder_ = std::make_unique<StopHandlerBuilder>(*stopHandlerBuilder_);
+ newRunner.inputHolder_ = inputHolder_;
threadMpiMdrunnerAccessBarrier();
// Give the master thread the newly created valid communicator for
// the simulation.
- communicator = MPI_COMM_WORLD;
+ libraryWorldCommunicator = MPI_COMM_WORLD;
+ simulationCommunicator = MPI_COMM_WORLD;
threadMpiMdrunnerAccessBarrier();
#else
GMX_UNUSED_VALUE(numThreadsToLaunch);
gmx_wallcycle_t wcycle;
gmx_walltime_accounting_t walltime_accounting = nullptr;
MembedHolder membedHolder(filenames.size(), filenames.data());
- gmx_hw_info_t* hwinfo = nullptr;
/* CAUTION: threads may be started later on in this function, so
cr doesn't reflect the final parallel state right now */
{
fplog = gmx_fio_getfp(logFileHandle);
}
- const bool isSimulationMasterRank = findIsSimulationMasterRank(ms, communicator);
+ const bool isSimulationMasterRank = findIsSimulationMasterRank(ms, simulationCommunicator);
gmx::LoggerOwner logOwner(buildLogger(fplog, isSimulationMasterRank));
gmx::MDLogger mdlog(logOwner.logger());
- // TODO The thread-MPI master rank makes a working
- // PhysicalNodeCommunicator here, but it gets rebuilt by all ranks
- // after the threads have been launched. This works because no use
- // is made of that communicator until after the execution paths
- // have rejoined. But it is likely that we can improve the way
- // this is expressed, e.g. by expressly running detection only the
- // master rank for thread-MPI, rather than relying on the mutex
- // and reference count.
- PhysicalNodeCommunicator physicalNodeComm(communicator, gmx_physicalnode_id_hash());
- hwinfo = gmx_detect_hardware(mdlog, physicalNodeComm);
+ gmx_print_detected_hardware(fplog, isSimulationMasterRank && isMasterSim(ms), mdlog, hwinfo_);
- gmx_print_detected_hardware(fplog, isSimulationMasterRank && isMasterSim(ms), mdlog, hwinfo);
-
- std::vector<int> gpuIdsToUse = makeGpuIdsToUse(hwinfo->gpu_info, hw_opt.gpuIdsAvailable);
+ std::vector<int> gpuIdsToUse = makeGpuIdsToUse(hwinfo_->deviceInfoList, hw_opt.gpuIdsAvailable);
+ const int numDevicesToUse = gmx::ssize(gpuIdsToUse);
// Print citation requests after all software/hardware printing
pleaseCiteGromacs(fplog);
- // TODO Replace this by unique_ptr once t_inputrec is C++
- t_inputrec inputrecInstance;
- t_inputrec* inputrec = nullptr;
- std::unique_ptr<t_state> globalState;
+ // Note: legacy program logic relies on checking whether these pointers are assigned.
+ // Objects may or may not be allocated later.
+ std::unique_ptr<t_inputrec> inputrec;
+ std::unique_ptr<t_state> globalState;
auto partialDeserializedTpr = std::make_unique<PartialDeserializedTprFile>();
if (isSimulationMasterRank)
{
+ // Allocate objects to be initialized by later function calls.
/* Only the master rank has the global state */
globalState = std::make_unique<t_state>();
+ inputrec = std::make_unique<t_inputrec>();
/* Read (nearly) all data required for the simulation
* and keep the partly serialized tpr contents to send to other ranks later
*/
- *partialDeserializedTpr = read_tpx_state(ftp2fn(efTPR, filenames.size(), filenames.data()),
- &inputrecInstance, globalState.get(), &mtop);
- inputrec = &inputrecInstance;
+ applyGlobalSimulationState(*inputHolder_.get(), partialDeserializedTpr.get(),
+ globalState.get(), inputrec.get(), &mtop);
}
/* Check and update the hardware options for internal consistency */
checkAndUpdateHardwareOptions(mdlog, &hw_opt, isSimulationMasterRank, domdecOptions.numPmeRanks,
- inputrec);
+ inputrec.get());
if (GMX_THREAD_MPI && isSimulationMasterRank)
{
// the number of GPUs to choose the number of ranks.
auto canUseGpuForNonbonded = buildSupportsNonbondedOnGpu(nullptr);
useGpuForNonbonded = decideWhetherToUseGpusForNonbondedWithThreadMpi(
- nonbondedTarget, gpuIdsToUse, userGpuTaskAssignment, emulateGpuNonbonded,
+ nonbondedTarget, numDevicesToUse, userGpuTaskAssignment, emulateGpuNonbonded,
canUseGpuForNonbonded,
gpuAccelerationOfNonbondedIsUseful(mdlog, *inputrec, GMX_THREAD_MPI),
hw_opt.nthreads_tmpi);
useGpuForPme = decideWhetherToUseGpusForPmeWithThreadMpi(
- useGpuForNonbonded, pmeTarget, gpuIdsToUse, userGpuTaskAssignment, *hwinfo,
- *inputrec, mtop, hw_opt.nthreads_tmpi, domdecOptions.numPmeRanks);
+ useGpuForNonbonded, pmeTarget, pmeFftTarget, numDevicesToUse, userGpuTaskAssignment,
+ *hwinfo_, *inputrec, hw_opt.nthreads_tmpi, domdecOptions.numPmeRanks);
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
* prevent any possible subsequent checks from working
* correctly. */
hw_opt.nthreads_tmpi =
- get_nthreads_mpi(hwinfo, &hw_opt, gpuIdsToUse, useGpuForNonbonded, useGpuForPme,
- inputrec, &mtop, mdlog, membedHolder.doMembed());
+ get_nthreads_mpi(hwinfo_, &hw_opt, numDevicesToUse, useGpuForNonbonded, useGpuForPme,
+ inputrec.get(), &mtop, mdlog, membedHolder.doMembed());
// Now start the threads for thread MPI.
spawnThreads(hw_opt.nthreads_tmpi);
// The spawned threads enter mdrunner() and execution of
// master and spawned threads joins at the end of this block.
- physicalNodeComm = PhysicalNodeCommunicator(communicator, gmx_physicalnode_id_hash());
}
- GMX_RELEASE_ASSERT(communicator == MPI_COMM_WORLD, "Must have valid world communicator");
- CommrecHandle crHandle = init_commrec(communicator, ms);
+ GMX_RELEASE_ASSERT(ms || simulationCommunicator != MPI_COMM_NULL,
+ "Must have valid communicator unless running a multi-simulation");
+ CommrecHandle crHandle = init_commrec(simulationCommunicator);
t_commrec* cr = crHandle.get();
GMX_RELEASE_ASSERT(cr != nullptr, "Must have valid commrec");
+ PhysicalNodeCommunicator physicalNodeComm(libraryWorldCommunicator, gmx_physicalnode_id_hash());
+
+ // If we detected the topology on this system, double-check that it makes sense
+ if (hwinfo_->hardwareTopology->isThisSystem())
+ {
+ hardwareTopologyDoubleCheckDetection(mdlog, *hwinfo_->hardwareTopology);
+ }
+
if (PAR(cr))
{
/* now broadcast everything to the non-master nodes/threads: */
if (!isSimulationMasterRank)
{
- inputrec = &inputrecInstance;
+ // Until now, only the master rank has a non-null pointer.
+ // On non-master ranks, allocate the object that will receive data in the following call.
+ inputrec = std::make_unique<t_inputrec>();
}
- init_parallel(cr->mpi_comm_mygroup, MASTER(cr), inputrec, &mtop, partialDeserializedTpr.get());
+ init_parallel(cr->mpiDefaultCommunicator, MASTER(cr), inputrec.get(), &mtop,
+ partialDeserializedTpr.get());
}
GMX_RELEASE_ASSERT(inputrec != nullptr, "All ranks should have a valid inputrec now");
partialDeserializedTpr.reset(nullptr);
bool useGpuForPme = false;
bool useGpuForBonded = false;
bool useGpuForUpdate = false;
- bool gpusWereDetected = hwinfo->ngpu_compatible_tot > 0;
+ bool gpusWereDetected = hwinfo_->ngpu_compatible_tot > 0;
try
{
// It's possible that there are different numbers of GPUs on
nonbondedTarget, userGpuTaskAssignment, emulateGpuNonbonded, canUseGpuForNonbonded,
gpuAccelerationOfNonbondedIsUseful(mdlog, *inputrec, !GMX_THREAD_MPI), gpusWereDetected);
useGpuForPme = decideWhetherToUseGpusForPme(
- useGpuForNonbonded, pmeTarget, userGpuTaskAssignment, *hwinfo, *inputrec, mtop,
- cr->nnodes, domdecOptions.numPmeRanks, gpusWereDetected);
- auto canUseGpuForBonded = buildSupportsGpuBondeds(nullptr)
- && inputSupportsGpuBondeds(*inputrec, mtop, nullptr);
- useGpuForBonded = decideWhetherToUseGpusForBonded(
- useGpuForNonbonded, useGpuForPme, bondedTarget, canUseGpuForBonded,
- EVDW_PME(inputrec->vdwtype), EEL_PME_EWALD(inputrec->coulombtype),
- domdecOptions.numPmeRanks, gpusWereDetected);
+ useGpuForNonbonded, pmeTarget, pmeFftTarget, userGpuTaskAssignment, *hwinfo_,
+ *inputrec, cr->sizeOfDefaultCommunicator, domdecOptions.numPmeRanks, gpusWereDetected);
+ useGpuForBonded = decideWhetherToUseGpusForBonded(useGpuForNonbonded, useGpuForPme,
+ bondedTarget, *inputrec, mtop,
+ domdecOptions.numPmeRanks, gpusWereDetected);
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
manageDevelopmentFeatures(mdlog, useGpuForNonbonded, pmeRunMode);
const bool useModularSimulator =
- checkUseModularSimulator(false, inputrec, doRerun, mtop, ms, replExParams, nullptr,
- doEssentialDynamics, membedHolder.doMembed());
+ checkUseModularSimulator(false, inputrec.get(), doRerun, mtop, ms, replExParams,
+ nullptr, doEssentialDynamics, membedHolder.doMembed());
// Build restraints.
// TODO: hide restraint implementation details from Mdrunner.
if (fplog != nullptr)
{
- pr_inputrec(fplog, 0, "Input Parameters", inputrec, FALSE);
+ pr_inputrec(fplog, 0, "Input Parameters", inputrec.get(), FALSE);
fprintf(fplog, "\n");
}
}
/* now make sure the state is initialized and propagated */
- set_state_entries(globalState.get(), inputrec, useModularSimulator);
+ set_state_entries(globalState.get(), inputrec.get(), useModularSimulator);
}
/* NM and TPI parallelize over force/energy calculations, not atoms,
{
globalState = std::make_unique<t_state>();
}
- broadcastStateWithoutDynamics(cr->mpi_comm_mygroup, DOMAINDECOMP(cr), PAR(cr), globalState.get());
+ broadcastStateWithoutDynamics(cr->mpiDefaultCommunicator, DOMAINDECOMP(cr), PAR(cr),
+ globalState.get());
}
/* A parallel command line option consistency check that we can
{
if (domdecOptions.numPmeRanks > 0)
{
- gmx_fatal_collective(FARGS, cr->mpi_comm_mysim, MASTER(cr),
+ gmx_fatal_collective(FARGS, cr->mpiDefaultCommunicator, MASTER(cr),
"PME-only ranks are requested, but the system does not use PME "
"for electrostatics or LJ");
}
}
}
-#if GMX_FAHCORE
- if (MASTER(cr))
- {
- fcRegisterSteps(inputrec->nsteps, inputrec->init_step);
- }
-#endif
-
/* NMR restraints must be initialized before load_checkpoint,
* since with time averaging the history is added to t_state.
* For proper consistency check we therefore need to extend
/* This needs to be called before read_checkpoint to extend the state */
t_disresdata* disresdata;
snew(disresdata, 1);
- init_disres(fplog, &mtop, inputrec, DisResRunMode::MDRun, MASTER(cr) ? DDRole::Master : DDRole::Agent,
+ init_disres(fplog, &mtop, inputrec.get(), DisResRunMode::MDRun,
+ MASTER(cr) ? DDRole::Master : DDRole::Agent,
PAR(cr) ? NumRanks::Multiple : NumRanks::Single, cr->mpi_comm_mysim, ms, disresdata,
globalState.get(), replExParams.exchangeInterval > 0);
t_oriresdata* oriresdata;
snew(oriresdata, 1);
- init_orires(fplog, &mtop, inputrec, cr, ms, globalState.get(), oriresdata);
+ init_orires(fplog, &mtop, inputrec.get(), cr, ms, globalState.get(), oriresdata);
+
+ auto deform = prepareBoxDeformation(
+ globalState != nullptr ? globalState->box : box, MASTER(cr) ? DDRole::Master : DDRole::Agent,
+ PAR(cr) ? NumRanks::Multiple : NumRanks::Single, cr->mpi_comm_mygroup, *inputrec);
- auto deform = prepareBoxDeformation(globalState->box, MASTER(cr) ? DDRole::Master : DDRole::Agent,
- PAR(cr) ? NumRanks::Multiple : NumRanks::Single,
- cr->mpi_comm_mygroup, *inputrec);
+#if GMX_FAHCORE
+ /* We have to remember the generation's first step before reading checkpoint.
+ This way, we can report to the F@H core both the generation's first step
+ and the restored first step, thus making it able to distinguish between
+ an interruption/resume and start of the n-th generation simulation.
+ Having this information, the F@H core can correctly calculate and report
+ the progress.
+ */
+ int gen_first_step = 0;
+ if (MASTER(cr))
+ {
+ gen_first_step = inputrec->init_step;
+ }
+#endif
ObservablesHistory observablesHistory = {};
+ auto modularSimulatorCheckpointData = std::make_unique<ReadCheckpointDataHolder>();
if (startingBehavior != StartingBehavior::NewSimulation)
{
/* Check if checkpoint file exists before doing continuation.
inputrec->nsteps = -1;
}
- load_checkpoint(opt2fn_master("-cpi", filenames.size(), filenames.data(), cr),
- logFileHandle, cr, domdecOptions.numCells, inputrec, globalState.get(),
- &observablesHistory, mdrunOptions.reproducible, mdModules_->notifier());
+ // Finish applying initial simulation state information from external sources on all ranks.
+ // Reconcile checkpoint file data with Mdrunner state established up to this point.
+ applyLocalState(*inputHolder_.get(), logFileHandle, cr, domdecOptions.numCells,
+ inputrec.get(), globalState.get(), &observablesHistory,
+ mdrunOptions.reproducible, mdModules_->notifier(),
+ modularSimulatorCheckpointData.get(), useModularSimulator);
+ // TODO: (#3652) Synchronize filesystem state, SimulationInput contents, and program
+ // invariants
+ // on all code paths.
+ // Write checkpoint or provide hook to update SimulationInput.
+ // If there was a checkpoint file, SimulationInput contains more information
+ // than if there wasn't. At this point, we have synchronized the in-memory
+ // state with the filesystem state only for restarted simulations. We should
+ // be calling applyLocalState unconditionally and expect that the completeness
+ // of SimulationInput is not dependent on its creation method.
if (startingBehavior == StartingBehavior::RestartWithAppending && logFileHandle)
{
}
}
+#if GMX_FAHCORE
+ if (MASTER(cr))
+ {
+ fcRegisterSteps(inputrec->nsteps + inputrec->init_step, gen_first_step);
+ }
+#endif
+
if (mdrunOptions.numStepsCommandline > -2)
{
GMX_LOG(mdlog.info)
"file field.");
}
/* override nsteps with value set on the commandline */
- override_nsteps_cmdline(mdlog, mdrunOptions.numStepsCommandline, inputrec);
+ override_nsteps_cmdline(mdlog, mdrunOptions.numStepsCommandline, inputrec.get());
- if (SIMMASTER(cr))
+ if (isSimulationMasterRank)
{
copy_mat(globalState->box, box);
}
if (PAR(cr))
{
- gmx_bcast(sizeof(box), box, cr->mpi_comm_mygroup);
+ gmx_bcast(sizeof(box), box, cr->mpiDefaultCommunicator);
}
if (inputrec->cutoff_scheme != ecutsVERLET)
"Verlet scheme, or use an earlier version of GROMACS if necessary.");
}
/* Update rlist and nstlist. */
- prepare_verlet_scheme(fplog, cr, inputrec, nstlist_cmdline, &mtop, box,
+ /* Note: prepare_verlet_scheme is calling increaseNstlist(...), which (while attempting to
+ * increase rlist) tries to check if the newly chosen value fits with the DD scheme. As this is
+ * run before any DD scheme is set up, this check is never executed. See #3334 for more details.
+ */
+ prepare_verlet_scheme(fplog, cr, inputrec.get(), nstlist_cmdline, &mtop, box,
useGpuForNonbonded || (emulateGpuNonbonded == EmulateGpuNonbonded::Yes),
- *hwinfo->cpuInfo);
+ *hwinfo_->cpuInfo);
- const bool prefer1DAnd1PulseDD = (devFlags.enableGpuHaloExchange && useGpuForNonbonded);
// This builder is necessary while we have multi-part construction
// of DD. Before DD is constructed, we use the existence of
// the builder object to indicate that further construction of DD
if (useDomainDecomposition)
{
ddBuilder = std::make_unique<DomainDecompositionBuilder>(
- mdlog, cr, domdecOptions, mdrunOptions, prefer1DAnd1PulseDD, mtop, *inputrec, box,
+ mdlog, cr, domdecOptions, mdrunOptions, mtop, *inputrec, box,
positionsFromStatePointer(globalState.get()));
}
else
{
/* PME, if used, is done on all nodes with 1D decomposition */
- cr->npmenodes = 0;
- cr->duty = (DUTY_PP | DUTY_PME);
+ cr->nnodes = cr->sizeOfDefaultCommunicator;
+ cr->sim_nodeid = cr->rankInDefaultCommunicator;
+ cr->nodeid = cr->rankInDefaultCommunicator;
+ cr->npmenodes = 0;
+ cr->duty = (DUTY_PP | DUTY_PME);
if (inputrec->pbcType == PbcType::Screw)
{
// Produce the task assignment for this rank - done after DD is constructed
GpuTaskAssignments gpuTaskAssignments = GpuTaskAssignmentsBuilder::build(
- gpuIdsToUse, userGpuTaskAssignment, *hwinfo, communicator, physicalNodeComm,
+ gpuIdsToUse, userGpuTaskAssignment, *hwinfo_, simulationCommunicator, physicalNodeComm,
nonbondedTarget, pmeTarget, bondedTarget, updateTarget, useGpuForNonbonded,
useGpuForPme, thisRankHasDuty(cr, DUTY_PP),
// TODO cr->duty & DUTY_PME should imply that a PME
// timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?)
bool useTiming = true;
- if (GMX_GPU == GMX_GPU_CUDA)
+
+ if (GMX_GPU_CUDA)
{
/* WARNING: CUDA timings are incorrect with multiple streams.
* This is the main reason why they are disabled by default.
// TODO: Consider turning on by default when we can detect nr of streams.
useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
}
- else if (GMX_GPU == GMX_GPU_OPENCL)
+ else if (GMX_GPU_OPENCL)
{
useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
}
ddBuilder.reset(nullptr);
// Note that local state still does not exist yet.
}
+ // Ensure that all atoms within the same update group are in the
+ // same periodic image. Otherwise, a simulation that did not use
+ // update groups (e.g. a single-rank simulation) cannot always be
+ // correctly restarted in a way that does use update groups
+ // (e.g. a multi-rank simulation).
+ if (isSimulationMasterRank)
+ {
+ const bool useUpdateGroups = cr->dd ? ddUsesUpdateGroups(*cr->dd) : false;
+ if (useUpdateGroups)
+ {
+ putUpdateGroupAtomsInSamePeriodicImage(*cr->dd, mtop, globalState->box, globalState->x);
+ }
+ }
// The GPU update is decided here because we need to know whether the constraints or
// SETTLEs can span accross the domain borders (i.e. whether or not update groups are
try
{
const bool useUpdateGroups = cr->dd ? ddUsesUpdateGroups(*cr->dd) : false;
+ const bool haveFrozenAtoms = inputrecFrozenAtoms(inputrec.get());
useGpuForUpdate = decideWhetherToUseGpuForUpdate(
useDomainDecomposition, useUpdateGroups, pmeRunMode, domdecOptions.numPmeRanks > 0,
useGpuForNonbonded, updateTarget, gpusWereDetected, *inputrec, mtop,
doEssentialDynamics, gmx_mtop_ftype_count(mtop, F_ORIRES) > 0,
- replExParams.exchangeInterval > 0, doRerun, devFlags, mdlog);
+ replExParams.exchangeInterval > 0, haveFrozenAtoms, doRerun, devFlags, mdlog);
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
const bool printHostName = (cr->nnodes > 1);
gpuTaskAssignments.reportGpuUsage(mdlog, printHostName, useGpuForBonded, pmeRunMode, useGpuForUpdate);
+ const bool disableNonbondedCalculation = (getenv("GMX_NO_NONBONDED") != nullptr);
+ if (disableNonbondedCalculation)
+ {
+ /* turn off non-bonded calculations */
+ GMX_LOG(mdlog.warning)
+ .asParagraph()
+ .appendText(
+ "Found environment variable GMX_NO_NONBONDED.\n"
+ "Disabling nonbonded calculations.");
+ }
+
+ MdrunScheduleWorkload runScheduleWork;
+
+ bool useGpuDirectHalo = decideWhetherToUseGpuForHalo(
+ devFlags, havePPDomainDecomposition(cr), useGpuForNonbonded, useModularSimulator,
+ doRerun, EI_ENERGY_MINIMIZATION(inputrec->eI));
+
+ // Also populates the simulation constant workload description.
+ runScheduleWork.simulationWork = createSimulationWorkload(
+ *inputrec, disableNonbondedCalculation, devFlags, useGpuForNonbonded, pmeRunMode,
+ useGpuForBonded, useGpuForUpdate, useGpuDirectHalo);
+
std::unique_ptr<DeviceStreamManager> deviceStreamManager = nullptr;
if (deviceInfo != nullptr)
dd_setup_dlb_resource_sharing(cr, deviceId);
}
deviceStreamManager = std::make_unique<DeviceStreamManager>(
- *deviceInfo, useGpuForPme, useGpuForNonbonded, havePPDomainDecomposition(cr),
- useGpuForUpdate, useTiming);
+ *deviceInfo, havePPDomainDecomposition(cr), runScheduleWork.simulationWork, useTiming);
}
// If the user chose a task assignment, give them some hints
// where appropriate.
if (!userGpuTaskAssignment.empty())
{
- gpuTaskAssignments.logPerformanceHints(mdlog, ssize(gpuIdsToUse));
+ gpuTaskAssignments.logPerformanceHints(mdlog, numDevicesToUse);
}
if (PAR(cr))
.appendTextFormatted(
"This is simulation %d out of %d running as a composite GROMACS\n"
"multi-simulation job. Setup for this simulation:\n",
- ms->sim, ms->nsim);
+ ms->simulationIndex_, ms->numSimulations_);
}
GMX_LOG(mdlog.warning)
.appendTextFormatted("Using %d MPI %s\n", cr->nnodes,
// that existing affinity setting was from OpenMP or something
// else, so we run this code both before and after we initialize
// the OpenMP support.
- gmx_check_thread_affinity_set(mdlog, &hw_opt, hwinfo->nthreads_hw_avail, FALSE);
+ gmx_check_thread_affinity_set(mdlog, &hw_opt, hwinfo_->nthreads_hw_avail, FALSE);
/* Check and update the number of OpenMP threads requested */
- checkAndUpdateRequestedNumOpenmpThreads(&hw_opt, *hwinfo, cr, ms, physicalNodeComm.size_,
+ checkAndUpdateRequestedNumOpenmpThreads(&hw_opt, *hwinfo_, cr, ms, physicalNodeComm.size_,
pmeRunMode, mtop, *inputrec);
- gmx_omp_nthreads_init(mdlog, cr, hwinfo->nthreads_hw_avail, physicalNodeComm.size_,
+ gmx_omp_nthreads_init(mdlog, cr, hwinfo_->nthreads_hw_avail, physicalNodeComm.size_,
hw_opt.nthreads_omp, hw_opt.nthreads_omp_pme, !thisRankHasDuty(cr, DUTY_PP));
// Enable FP exception detection, but not in
}
/* Now that we know the setup is consistent, check for efficiency */
- check_resource_division_efficiency(hwinfo, gpuTaskAssignments.thisRankHasAnyGpuTask(),
+ check_resource_division_efficiency(hwinfo_, gpuTaskAssignments.thisRankHasAnyGpuTask(),
mdrunOptions.ntompOptionIsSet, cr, mdlog);
/* getting number of PP/PME threads on this MPI / tMPI rank.
*/
const int numThreadsOnThisRank = thisRankHasDuty(cr, DUTY_PP) ? gmx_omp_nthreads_get(emntNonbonded)
: gmx_omp_nthreads_get(emntPME);
- checkHardwareOversubscription(numThreadsOnThisRank, cr->nodeid, *hwinfo->hardwareTopology,
+ checkHardwareOversubscription(numThreadsOnThisRank, cr->nodeid, *hwinfo_->hardwareTopology,
physicalNodeComm, mdlog);
// Enable Peer access between GPUs where available
// Only for DD, only master PP rank needs to perform setup, and only if thread MPI plus
// any of the GPU communication features are active.
if (DOMAINDECOMP(cr) && MASTER(cr) && thisRankHasDuty(cr, DUTY_PP) && GMX_THREAD_MPI
- && (devFlags.enableGpuHaloExchange || devFlags.enableGpuPmePPComm))
+ && (runScheduleWork.simulationWork.useGpuHaloExchange
+ || runScheduleWork.simulationWork.useGpuPmePpCommunication))
{
setupGpuDevicePeerAccess(gpuIdsToUse, mdlog);
}
* - which indicates that probably the OpenMP library has changed it
* since we first checked).
*/
- gmx_check_thread_affinity_set(mdlog, &hw_opt, hwinfo->nthreads_hw_avail, TRUE);
+ gmx_check_thread_affinity_set(mdlog, &hw_opt, hwinfo_->nthreads_hw_avail, TRUE);
int numThreadsOnThisNode, intraNodeThreadOffset;
analyzeThreadsOnThisNode(physicalNodeComm, numThreadsOnThisRank, &numThreadsOnThisNode,
&intraNodeThreadOffset);
/* Set the CPU affinity */
- gmx_set_thread_affinity(mdlog, cr, &hw_opt, *hwinfo->hardwareTopology, numThreadsOnThisRank,
+ gmx_set_thread_affinity(mdlog, cr, &hw_opt, *hwinfo_->hardwareTopology, numThreadsOnThisRank,
numThreadsOnThisNode, intraNodeThreadOffset, nullptr);
}
}
// Membrane embedding must be initialized before we call init_forcerec()
- membedHolder.initializeMembed(fplog, filenames.size(), filenames.data(), &mtop, inputrec,
+ membedHolder.initializeMembed(fplog, filenames.size(), filenames.data(), &mtop, inputrec.get(),
globalState.get(), cr, &mdrunOptions.checkpointOptions.period);
const bool thisRankHasPmeGpuTask = gpuTaskAssignments.thisRankHasPmeGpuTask();
/* Initiate forcerecord */
fr = new t_forcerec;
fr->forceProviders = mdModules_->initForceProviders();
- init_forcerec(fplog, mdlog, fr, inputrec, &mtop, cr, box,
+ init_forcerec(fplog, mdlog, fr, inputrec.get(), &mtop, cr, box,
opt2fn("-table", filenames.size(), filenames.data()),
opt2fn("-tablep", filenames.size(), filenames.data()),
opt2fns("-tableb", filenames.size(), filenames.data()), pforce);
// Dirty hack, for fixing disres and orires should be made mdmodules
- fr->listedForces->fcdata().disres = disresdata;
- fr->listedForces->fcdata().orires = oriresdata;
+ fr->fcdata->disres = disresdata;
+ fr->fcdata->orires = oriresdata;
// Save a handle to device stream manager to use elsewhere in the code
// TODO: Forcerec is not a correct place to store it.
fr->deviceStreamManager = deviceStreamManager.get();
- if (devFlags.enableGpuPmePPComm && !thisRankHasDuty(cr, DUTY_PME))
+ if (runScheduleWork.simulationWork.useGpuPmePpCommunication && !thisRankHasDuty(cr, DUTY_PME))
{
GMX_RELEASE_ASSERT(
deviceStreamManager != nullptr,
deviceStreamManager->stream(DeviceStreamType::PmePpTransfer));
}
- fr->nbv = Nbnxm::init_nb_verlet(mdlog, inputrec, fr, cr, *hwinfo, useGpuForNonbonded,
+ fr->nbv = Nbnxm::init_nb_verlet(mdlog, inputrec.get(), fr, cr, *hwinfo_,
+ runScheduleWork.simulationWork.useGpuNonbonded,
deviceStreamManager.get(), &mtop, box, wcycle);
// TODO: Move the logic below to a GPU bonded builder
- if (useGpuForBonded)
+ if (runScheduleWork.simulationWork.useGpuBonded)
{
GMX_RELEASE_ASSERT(deviceStreamManager != nullptr,
"GPU device stream manager should be valid in order to use GPU "
try
{
// TODO: This should be in the builder.
- GMX_RELEASE_ASSERT(!useGpuForPme || (deviceStreamManager != nullptr),
+ GMX_RELEASE_ASSERT(!runScheduleWork.simulationWork.useGpuPme
+ || (deviceStreamManager != nullptr),
"Device stream manager should be valid in order to use GPU "
"version of PME.");
GMX_RELEASE_ASSERT(
- !useGpuForPme || deviceStreamManager->streamIsValid(DeviceStreamType::Pme),
+ !runScheduleWork.simulationWork.useGpuPme
+ || deviceStreamManager->streamIsValid(DeviceStreamType::Pme),
"GPU PME stream should be valid in order to use GPU version of PME.");
- const DeviceContext* deviceContext =
- useGpuForPme ? &deviceStreamManager->context() : nullptr;
+ const DeviceContext* deviceContext = runScheduleWork.simulationWork.useGpuPme
+ ? &deviceStreamManager->context()
+ : nullptr;
const DeviceStream* pmeStream =
- useGpuForPme ? &deviceStreamManager->stream(DeviceStreamType::Pme) : nullptr;
-
- pmedata = gmx_pme_init(cr, getNumPmeDomains(cr->dd), inputrec, nChargePerturbed != 0,
- nTypePerturbed != 0, mdrunOptions.reproducible, ewaldcoeff_q,
- ewaldcoeff_lj, gmx_omp_nthreads_get(emntPME), pmeRunMode,
- nullptr, deviceContext, pmeStream, pmeGpuProgram.get(), mdlog);
+ runScheduleWork.simulationWork.useGpuPme
+ ? &deviceStreamManager->stream(DeviceStreamType::Pme)
+ : nullptr;
+
+ pmedata = gmx_pme_init(cr, getNumPmeDomains(cr->dd), inputrec.get(),
+ nChargePerturbed != 0, nTypePerturbed != 0,
+ mdrunOptions.reproducible, ewaldcoeff_q, ewaldcoeff_lj,
+ gmx_omp_nthreads_get(emntPME), pmeRunMode, nullptr,
+ deviceContext, pmeStream, pmeGpuProgram.get(), mdlog);
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
if (inputrec->bPull)
{
/* Initialize pull code */
- pull_work = init_pull(fplog, inputrec->pull, inputrec, &mtop, cr, &atomSets,
+ pull_work = init_pull(fplog, inputrec->pull.get(), inputrec.get(), &mtop, cr, &atomSets,
inputrec->fepvals->init_lambda);
if (inputrec->pull->bXOutAverage || inputrec->pull->bFOutAverage)
{
if (inputrec->bRot)
{
/* Initialize enforced rotation code */
- enforcedRotation =
- init_rot(fplog, inputrec, filenames.size(), filenames.data(), cr, &atomSets,
- globalState.get(), &mtop, oenv, mdrunOptions, startingBehavior);
+ enforcedRotation = init_rot(fplog, inputrec.get(), filenames.size(), filenames.data(),
+ cr, &atomSets, globalState.get(), &mtop, oenv, mdrunOptions,
+ startingBehavior);
}
t_swap* swap = nullptr;
if (inputrec->eSwapCoords != eswapNO)
{
/* Initialize ion swapping code */
- swap = init_swapcoords(fplog, inputrec,
+ swap = init_swapcoords(fplog, inputrec.get(),
opt2fn_master("-swap", filenames.size(), filenames.data(), cr),
&mtop, globalState.get(), &observablesHistory, cr, &atomSets,
oenv, mdrunOptions, startingBehavior);
gmx_enerdata_t enerd(mtop.groups.groups[SimulationAtomGroupType::EnergyOutput].size(),
inputrec->fepvals->n_lambda);
+ // cos acceleration is only supported by md, but older tpr
+ // files might still combine it with other integrators
+ GMX_RELEASE_ASSERT(inputrec->cos_accel == 0.0 || inputrec->eI == eiMD,
+ "cos_acceleration is only supported by integrator=md");
+
/* Kinetic energy data */
gmx_ekindata_t ekind;
init_ekindata(fplog, &mtop, &(inputrec->opts), &ekind, inputrec->cos_accel);
/* Set up interactive MD (IMD) */
auto imdSession =
- makeImdSession(inputrec, cr, wcycle, &enerd, ms, &mtop, mdlog,
+ makeImdSession(inputrec.get(), cr, wcycle, &enerd, ms, &mtop, mdlog,
MASTER(cr) ? globalState->x.rvec_array() : nullptr, filenames.size(),
filenames.data(), oenv, mdrunOptions.imdOptions, startingBehavior);
/* This call is not included in init_domain_decomposition mainly
* because fr->cginfo_mb is set later.
*/
- dd_init_bondeds(fplog, cr->dd, mtop, vsite.get(), inputrec,
+ dd_init_bondeds(fplog, cr->dd, mtop, vsite.get(), inputrec.get(),
domdecOptions.checkBondedInteractions, fr->cginfo_mb);
}
- // TODO This is not the right place to manage the lifetime of
- // this data structure, but currently it's the easiest way to
- // make it work.
- MdrunScheduleWorkload runScheduleWork;
- // Also populates the simulation constant workload description.
- runScheduleWork.simulationWork =
- createSimulationWorkload(*inputrec, useGpuForNonbonded, pmeRunMode, useGpuForBonded,
- useGpuForUpdate, devFlags.enableGpuBufferOps,
- devFlags.enableGpuHaloExchange, devFlags.enableGpuPmePPComm);
+ if (runScheduleWork.simulationWork.useGpuBufferOps)
+ {
+ fr->gpuForceReduction[gmx::AtomLocality::Local] = std::make_unique<gmx::GpuForceReduction>(
+ deviceStreamManager->context(),
+ deviceStreamManager->stream(gmx::DeviceStreamType::NonBondedLocal), wcycle);
+ fr->gpuForceReduction[gmx::AtomLocality::NonLocal] = std::make_unique<gmx::GpuForceReduction>(
+ deviceStreamManager->context(),
+ deviceStreamManager->stream(gmx::DeviceStreamType::NonBondedNonLocal), wcycle);
+ }
std::unique_ptr<gmx::StatePropagatorDataGpu> stateGpu;
if (gpusWereDetected
- && ((useGpuForPme && thisRankHasDuty(cr, DUTY_PME))
+ && ((runScheduleWork.simulationWork.useGpuPme && thisRankHasDuty(cr, DUTY_PME))
|| runScheduleWork.simulationWork.useGpuBufferOps))
{
GpuApiCallBehavior transferKind = (inputrec->eI == eiMD && !doRerun && !useModularSimulator)
constr.get(), enforcedRotation ? enforcedRotation->getLegacyEnfrot() : nullptr,
vsite.get()));
// TODO: Separate `fr` to a separate add, and make the `build` handle the coupling sensibly.
- simulatorBuilder.add(
- LegacyInput(static_cast<int>(filenames.size()), filenames.data(), inputrec, fr));
+ simulatorBuilder.add(LegacyInput(static_cast<int>(filenames.size()), filenames.data(),
+ inputrec.get(), fr));
simulatorBuilder.add(ReplicaExchangeParameters(replExParams));
simulatorBuilder.add(InteractiveMD(imdSession.get()));
simulatorBuilder.add(SimulatorModules(mdModules_->outputProvider(), mdModules_->notifier()));
// Todo move to an MDModule
simulatorBuilder.add(IonSwapping(swap));
simulatorBuilder.add(TopologyData(&mtop, mdAtoms.get()));
+ simulatorBuilder.add(BoxDeformationHandle(deform.get()));
+ simulatorBuilder.add(std::move(modularSimulatorCheckpointData));
// build and run simulator object based on user-input
- auto simulator =
- simulatorBuilder.build(useModularSimulator, deform.get(), swap, &mtop, mdAtoms.get());
+ auto simulator = simulatorBuilder.build(useModularSimulator);
simulator->run();
if (fr->pmePpCommGpu)
GMX_RELEASE_ASSERT(pmedata, "pmedata was NULL while cr->duty was not DUTY_PP");
/* do PME only */
walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntPME));
- gmx_pmeonly(pmedata, cr, &nrnb, wcycle, walltime_accounting, inputrec, pmeRunMode,
+ gmx_pmeonly(pmedata, cr, &nrnb, wcycle, walltime_accounting, inputrec.get(), pmeRunMode,
deviceStreamManager.get());
}
/* Finish up, write some stuff
* if rerunMD, don't write last frame again
*/
- finish_run(fplog, mdlog, cr, inputrec, &nrnb, wcycle, walltime_accounting,
+ finish_run(fplog, mdlog, cr, inputrec.get(), &nrnb, wcycle, walltime_accounting,
fr ? fr->nbv.get() : nullptr, pmedata, EI_DYNAMICS(inputrec->eI) && !isMultiSim(ms));
// clean up cycle counter
}
// FIXME: this is only here to manually unpin mdAtoms->chargeA_ and state->x,
- // before we destroy the GPU context(s) in free_gpu().
+ // before we destroy the GPU context(s)
// Pinned buffers are associated with contexts in CUDA.
// As soon as we destroy GPU contexts after mdrunner() exits, these lines should go.
mdAtoms.reset(nullptr);
sfree(disresdata);
sfree(oriresdata);
- if (hwinfo->gpu_info.n_dev > 0)
+ if (!hwinfo_->deviceInfoList.empty())
{
/* stop the GPU profiler (only CUDA) */
stopGpuProfiler();
}
/* With tMPI we need to wait for all ranks to finish deallocation before
- * destroying the CUDA context in free_gpu() as some tMPI ranks may be sharing
+ * destroying the CUDA context as some tMPI ranks may be sharing
* GPU and context.
*
- * This is not a concern in OpenCL where we use one context per rank which
- * is freed in nbnxn_gpu_free().
+ * This is not a concern in OpenCL where we use one context per rank.
*
* Note: it is safe to not call the barrier on the ranks which do not use GPU,
* but it is easier and more futureproof to call it on the whole node.
{
physicalNodeComm.barrier();
}
-
- free_gpu(deviceInfo);
+ releaseDevice(deviceInfo);
/* Does what it says */
print_date_and_time(fplog, cr->nodeid, "Finished mdrun", gmx_gettime());
/* we need to join all threads. The sub-threads join when they
exit this function, but the master thread needs to be told to
wait for that. */
- if (PAR(cr) && MASTER(cr))
+ if (MASTER(cr))
{
tMPI_Finalize();
}
Mdrunner::Mdrunner(Mdrunner&&) noexcept = default;
-Mdrunner& Mdrunner::operator=(Mdrunner&& /*handle*/) noexcept = default;
+//NOLINTNEXTLINE(performance-noexcept-move-constructor) working around GCC bug 58265 in CentOS 7
+Mdrunner& Mdrunner::operator=(Mdrunner&& /*handle*/) noexcept(BUGFREE_NOEXCEPT_STRING) = default;
class Mdrunner::BuilderImplementation
{
real forceWarningThreshold,
StartingBehavior startingBehavior);
+ void addHardwareDetectionResult(const gmx_hw_info_t* hwinfo);
+
void addDomdec(const DomdecOptions& options);
+ void addInput(SimulationInputHandle inputHolder);
+
void addVerletList(int nstlist);
void addReplicaExchange(const ReplicaExchangeParameters& params);
//! Command-line override for the duration of a neighbor list with the Verlet scheme.
int nstlist_ = 0;
+ //! World communicator, used for hardware detection and task assignment
+ MPI_Comm libraryWorldCommunicator_ = MPI_COMM_NULL;
+
//! Multisim communicator handle.
gmx_multisim_t* multiSimulation_;
//! mdrun communicator
- MPI_Comm communicator_ = MPI_COMM_NULL;
+ MPI_Comm simulationCommunicator_ = MPI_COMM_NULL;
//! Print a warning if any force is larger than this (in kJ/mol nm).
real forceWarningThreshold_ = -1;
//! The modules that comprise the functionality of mdrun.
std::unique_ptr<MDModules> mdModules_;
+ //! Detected hardware.
+ const gmx_hw_info_t* hwinfo_ = nullptr;
+
//! \brief Parallelism information.
gmx_hw_opt_t hardwareOptions_;
* \brief Builder for simulation stop signal handler.
*/
std::unique_ptr<StopHandlerBuilder> stopHandlerBuilder_ = nullptr;
+
+ /*!
+ * \brief Sources for initial simulation state.
+ *
+ * See issue #3652 for near-term refinements to the SimulationInput interface.
+ *
+ * See issue #3379 for broader discussion on API aspects of simulation inputs and outputs.
+ */
+ SimulationInputHandle inputHolder_;
};
Mdrunner::BuilderImplementation::BuilderImplementation(std::unique_ptr<MDModules> mdModules,
compat::not_null<SimulationContext*> context) :
mdModules_(std::move(mdModules))
{
- communicator_ = context->communicator_;
- multiSimulation_ = context->multiSimulation_.get();
+ libraryWorldCommunicator_ = context->libraryWorldCommunicator_;
+ simulationCommunicator_ = context->simulationCommunicator_;
+ multiSimulation_ = context->multiSimulation_.get();
}
Mdrunner::BuilderImplementation::~BuilderImplementation() = default;
newRunner.filenames = filenames_;
- newRunner.communicator = communicator_;
+ newRunner.libraryWorldCommunicator = libraryWorldCommunicator_;
+
+ newRunner.simulationCommunicator = simulationCommunicator_;
// nullptr is a valid value for the multisim handle
newRunner.ms = multiSimulation_;
+ if (hwinfo_)
+ {
+ newRunner.hwinfo_ = hwinfo_;
+ }
+ else
+ {
+ GMX_THROW(gmx::APIError(
+ "MdrunnerBuilder::addHardwareDetectionResult() is required before build()"));
+ }
+
+ if (inputHolder_)
+ {
+ newRunner.inputHolder_ = std::move(inputHolder_);
+ }
+ else
+ {
+ GMX_THROW(gmx::APIError("MdrunnerBuilder::addInput() is required before build()."));
+ }
+
// \todo Clarify ownership and lifetime management for gmx_output_env_t
// \todo Update sanity checking when output environment has clearly specified invariants.
// Initialization and default values for oenv are not well specified in the current version.
return newRunner;
}
+void Mdrunner::BuilderImplementation::addHardwareDetectionResult(const gmx_hw_info_t* hwinfo)
+{
+ hwinfo_ = hwinfo;
+}
+
void Mdrunner::BuilderImplementation::addNonBonded(const char* nbpu_opt)
{
nbpu_opt_ = nbpu_opt;
stopHandlerBuilder_ = std::move(builder);
}
+void Mdrunner::BuilderImplementation::addInput(SimulationInputHandle inputHolder)
+{
+ inputHolder_ = std::move(inputHolder);
+}
+
MdrunnerBuilder::MdrunnerBuilder(std::unique_ptr<MDModules> mdModules,
compat::not_null<SimulationContext*> context) :
impl_{ std::make_unique<Mdrunner::BuilderImplementation>(std::move(mdModules), context) }
MdrunnerBuilder::~MdrunnerBuilder() = default;
+MdrunnerBuilder& MdrunnerBuilder::addHardwareDetectionResult(const gmx_hw_info_t* hwinfo)
+{
+ impl_->addHardwareDetectionResult(hwinfo);
+ return *this;
+}
+
MdrunnerBuilder& MdrunnerBuilder::addSimulationMethod(const MdrunOptions& options,
real forceWarningThreshold,
const StartingBehavior startingBehavior)
return *this;
}
+MdrunnerBuilder& MdrunnerBuilder::addInput(SimulationInputHandle input)
+{
+ impl_->addInput(std::move(input));
+ return *this;
+}
+
MdrunnerBuilder::MdrunnerBuilder(MdrunnerBuilder&&) noexcept = default;
MdrunnerBuilder& MdrunnerBuilder::operator=(MdrunnerBuilder&&) noexcept = default;