/* CAUTION: threads may be started later on in this function, so
cr doesn't reflect the final parallel state right now */
- gmx::MDModules mdModules;
- t_inputrec inputrecInstance;
- t_inputrec *inputrec = &inputrecInstance;
+ std::unique_ptr<gmx::MDModules> mdModules(new gmx::MDModules);
+ t_inputrec inputrecInstance;
+ t_inputrec *inputrec = &inputrecInstance;
snew(mtop, 1);
if (mdrunOptions.continuationOptions.appendFiles)
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
// TODO: Error handling
- mdModules.assignOptionsToModules(*inputrec->params, nullptr);
+ mdModules->assignOptionsToModules(*inputrec->params, nullptr);
if (fplog != nullptr)
{
{
/* Initiate forcerecord */
fr = mk_forcerec();
- fr->forceProviders = mdModules.initForceProviders();
+ fr->forceProviders = mdModules->initForceProviders();
init_forcerec(fplog, mdlog, fr, fcd,
inputrec, mtop, cr, box,
opt2fn("-table", nfile, fnm),
oenv,
mdrunOptions,
vsite, constr,
- mdModules.outputProvider(),
+ mdModules->outputProvider(),
inputrec, mtop,
fcd,
globalState.get(),
// As soon as we destroy GPU contexts after mdrunner() exits, these lines should go.
mdAtoms.reset(nullptr);
globalState.reset(nullptr);
+ mdModules.reset(nullptr); // destruct force providers here as they might also use the GPU
/* Free GPU memory and set a physical node tMPI barrier (which should eventually go away) */
free_gpu_resources(fr, cr);