2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2011,2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief Implements the MD runner routine calling all integrators.
41 * \author David van der Spoel <david.vanderspoel@icm.uu.se>
42 * \ingroup module_mdrun
58 #include "gromacs/commandline/filenm.h"
59 #include "gromacs/compat/make_unique.h"
60 #include "gromacs/domdec/domdec.h"
61 #include "gromacs/domdec/domdec_struct.h"
62 #include "gromacs/domdec/localatomsetmanager.h"
63 #include "gromacs/ewald/ewald-utils.h"
64 #include "gromacs/ewald/pme.h"
65 #include "gromacs/ewald/pme-gpu-program.h"
66 #include "gromacs/fileio/checkpoint.h"
67 #include "gromacs/fileio/oenv.h"
68 #include "gromacs/fileio/tpxio.h"
69 #include "gromacs/gmxlib/network.h"
70 #include "gromacs/gmxlib/nrnb.h"
71 #include "gromacs/gpu_utils/clfftinitializer.h"
72 #include "gromacs/gpu_utils/gpu_utils.h"
73 #include "gromacs/hardware/cpuinfo.h"
74 #include "gromacs/hardware/detecthardware.h"
75 #include "gromacs/hardware/printhardware.h"
76 #include "gromacs/listed-forces/disre.h"
77 #include "gromacs/listed-forces/orires.h"
78 #include "gromacs/math/functions.h"
79 #include "gromacs/math/utilities.h"
80 #include "gromacs/math/vec.h"
81 #include "gromacs/mdlib/boxdeformation.h"
82 #include "gromacs/mdlib/calc_verletbuf.h"
83 #include "gromacs/mdlib/forcerec.h"
84 #include "gromacs/mdlib/gmx_omp_nthreads.h"
85 #include "gromacs/mdlib/makeconstraints.h"
86 #include "gromacs/mdlib/md_support.h"
87 #include "gromacs/mdlib/mdatoms.h"
88 #include "gromacs/mdlib/mdrun.h"
89 #include "gromacs/mdlib/membed.h"
90 #include "gromacs/mdlib/nb_verlet.h"
91 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
92 #include "gromacs/mdlib/nbnxn_search.h"
93 #include "gromacs/mdlib/nbnxn_tuning.h"
94 #include "gromacs/mdlib/qmmm.h"
95 #include "gromacs/mdlib/sighandler.h"
96 #include "gromacs/mdlib/sim_util.h"
97 #include "gromacs/mdlib/stophandler.h"
98 #include "gromacs/mdrun/logging.h"
99 #include "gromacs/mdrun/multisim.h"
100 #include "gromacs/mdrunutility/mdmodules.h"
101 #include "gromacs/mdrunutility/threadaffinity.h"
102 #include "gromacs/mdtypes/commrec.h"
103 #include "gromacs/mdtypes/fcdata.h"
104 #include "gromacs/mdtypes/inputrec.h"
105 #include "gromacs/mdtypes/md_enums.h"
106 #include "gromacs/mdtypes/observableshistory.h"
107 #include "gromacs/mdtypes/state.h"
108 #include "gromacs/pbcutil/pbc.h"
109 #include "gromacs/pulling/pull.h"
110 #include "gromacs/pulling/pull_rotation.h"
111 #include "gromacs/swap/swapcoords.h"
112 #include "gromacs/taskassignment/decidegpuusage.h"
113 #include "gromacs/taskassignment/resourcedivision.h"
114 #include "gromacs/taskassignment/taskassignment.h"
115 #include "gromacs/taskassignment/usergpuids.h"
116 #include "gromacs/timing/wallcycle.h"
117 #include "gromacs/topology/mtop_util.h"
118 #include "gromacs/trajectory/trajectoryframe.h"
119 #include "gromacs/utility/basenetwork.h"
120 #include "gromacs/utility/cstringutil.h"
121 #include "gromacs/utility/exceptions.h"
122 #include "gromacs/utility/fatalerror.h"
123 #include "gromacs/utility/filestream.h"
124 #include "gromacs/utility/gmxassert.h"
125 #include "gromacs/utility/gmxmpi.h"
126 #include "gromacs/utility/logger.h"
127 #include "gromacs/utility/loggerbuilder.h"
128 #include "gromacs/utility/physicalnodecommunicator.h"
129 #include "gromacs/utility/pleasecite.h"
130 #include "gromacs/utility/programcontext.h"
131 #include "gromacs/utility/smalloc.h"
132 #include "gromacs/utility/stringutil.h"
134 #include "integrator.h"
135 #include "replicaexchange.h"
138 #include "corewrap.h"
144 /*! \brief Barrier for safe simultaneous thread access to mdrunner data
146 * Used to ensure that the master thread does not modify mdrunner during copy
147 * on the spawned threads. */
148 static void threadMpiMdrunnerAccessBarrier()
151 MPI_Barrier(MPI_COMM_WORLD);
155 void Mdrunner::reinitializeOnSpawnedThread()
157 threadMpiMdrunnerAccessBarrier();
159 cr = reinitialize_commrec_for_this_thread(cr);
161 GMX_RELEASE_ASSERT(!MASTER(cr), "reinitializeOnSpawnedThread should only be called on spawned threads");
163 // Only the master rank writes to the log file
167 /*! \brief The callback used for running on spawned threads.
169 * Obtains the pointer to the master mdrunner object from the one
170 * argument permitted to the thread-launch API call, copies it to make
171 * a new runner for this thread, reinitializes necessary data, and
172 * proceeds to the simulation. */
173 static void mdrunner_start_fn(const void *arg)
177 auto masterMdrunner = reinterpret_cast<const gmx::Mdrunner *>(arg);
178 /* copy the arg list to make sure that it's thread-local. This
179 doesn't copy pointed-to items, of course; fnm, cr and fplog
180 are reset in the call below, all others should be const. */
181 gmx::Mdrunner mdrunner = *masterMdrunner;
182 mdrunner.reinitializeOnSpawnedThread();
185 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
189 /*! \brief Start thread-MPI threads.
191 * Called by mdrunner() to start a specific number of threads
192 * (including the main thread) for thread-parallel runs. This in turn
193 * calls mdrunner() for each thread. All options are the same as for
195 t_commrec *Mdrunner::spawnThreads(int numThreadsToLaunch) const
198 /* first check whether we even need to start tMPI */
199 if (numThreadsToLaunch < 2)
205 /* now spawn new threads that start mdrunner_start_fn(), while
206 the main thread returns, we set thread affinity later */
207 if (tMPI_Init_fn(TRUE, numThreadsToLaunch, TMPI_AFFINITY_NONE,
208 mdrunner_start_fn, static_cast<const void*>(this)) != TMPI_SUCCESS)
210 GMX_THROW(gmx::InternalError("Failed to spawn thread-MPI threads"));
213 threadMpiMdrunnerAccessBarrier();
215 GMX_UNUSED_VALUE(mdrunner_start_fn);
218 return reinitialize_commrec_for_this_thread(cr);
223 /*! \brief Initialize variables for Verlet scheme simulation */
224 static void prepare_verlet_scheme(FILE *fplog,
228 const gmx_mtop_t *mtop,
230 bool makeGpuPairList,
231 const gmx::CpuInfo &cpuinfo)
233 /* For NVE simulations, we will retain the initial list buffer */
234 if (EI_DYNAMICS(ir->eI) &&
235 ir->verletbuf_tol > 0 &&
236 !(EI_MD(ir->eI) && ir->etc == etcNO))
238 /* Update the Verlet buffer size for the current run setup */
240 /* Here we assume SIMD-enabled kernels are being used. But as currently
241 * calc_verlet_buffer_size gives the same results for 4x8 and 4x4
242 * and 4x2 gives a larger buffer than 4x4, this is ok.
244 ListSetupType listType = (makeGpuPairList ? ListSetupType::Gpu : ListSetupType::CpuSimdWhenSupported);
245 VerletbufListSetup listSetup = verletbufGetSafeListSetup(listType);
248 calc_verlet_buffer_size(mtop, det(box), ir, ir->nstlist, ir->nstlist - 1, -1, &listSetup, nullptr, &rlist_new);
250 if (rlist_new != ir->rlist)
252 if (fplog != nullptr)
254 fprintf(fplog, "\nChanging rlist from %g to %g for non-bonded %dx%d atom kernels\n\n",
255 ir->rlist, rlist_new,
256 listSetup.cluster_size_i, listSetup.cluster_size_j);
258 ir->rlist = rlist_new;
262 if (nstlist_cmdline > 0 && (!EI_DYNAMICS(ir->eI) || ir->verletbuf_tol <= 0))
264 gmx_fatal(FARGS, "Can not set nstlist without %s",
265 !EI_DYNAMICS(ir->eI) ? "dynamics" : "verlet-buffer-tolerance");
268 if (EI_DYNAMICS(ir->eI))
270 /* Set or try nstlist values */
271 increaseNstlist(fplog, cr, ir, nstlist_cmdline, mtop, box, makeGpuPairList, cpuinfo);
275 /*! \brief Override the nslist value in inputrec
277 * with value passed on the command line (if any)
279 static void override_nsteps_cmdline(const gmx::MDLogger &mdlog,
280 int64_t nsteps_cmdline,
285 /* override with anything else than the default -2 */
286 if (nsteps_cmdline > -2)
288 char sbuf_steps[STEPSTRSIZE];
289 char sbuf_msg[STRLEN];
291 ir->nsteps = nsteps_cmdline;
292 if (EI_DYNAMICS(ir->eI) && nsteps_cmdline != -1)
294 sprintf(sbuf_msg, "Overriding nsteps with value passed on the command line: %s steps, %.3g ps",
295 gmx_step_str(nsteps_cmdline, sbuf_steps),
296 fabs(nsteps_cmdline*ir->delta_t));
300 sprintf(sbuf_msg, "Overriding nsteps with value passed on the command line: %s steps",
301 gmx_step_str(nsteps_cmdline, sbuf_steps));
304 GMX_LOG(mdlog.warning).asParagraph().appendText(sbuf_msg);
306 else if (nsteps_cmdline < -2)
308 gmx_fatal(FARGS, "Invalid nsteps value passed on the command line: %" PRId64,
311 /* Do nothing if nsteps_cmdline == -2 */
317 /*! \brief Return whether GPU acceleration of nonbondeds is supported with the given settings.
319 * If not, and if a warning may be issued, logs a warning about
320 * falling back to CPU code. With thread-MPI, only the first
321 * call to this function should have \c issueWarning true. */
322 static bool gpuAccelerationOfNonbondedIsUseful(const MDLogger &mdlog,
323 const t_inputrec *ir,
326 if (ir->opts.ngener - ir->nwall > 1)
328 /* The GPU code does not support more than one energy group.
329 * If the user requested GPUs explicitly, a fatal error is given later.
333 GMX_LOG(mdlog.warning).asParagraph()
334 .appendText("Multiple energy groups is not implemented for GPUs, falling back to the CPU. "
335 "For better performance, run on the GPU without energy groups and then do "
336 "gmx mdrun -rerun option on the trajectory with an energy group .tpr file.");
343 //! Initializes the logger for mdrun.
344 static gmx::LoggerOwner buildLogger(FILE *fplog, const t_commrec *cr)
346 gmx::LoggerBuilder builder;
347 if (fplog != nullptr)
349 builder.addTargetFile(gmx::MDLogger::LogLevel::Info, fplog);
351 if (cr == nullptr || SIMMASTER(cr))
353 builder.addTargetStream(gmx::MDLogger::LogLevel::Warning,
354 &gmx::TextOutputFile::standardError());
356 return builder.build();
359 //! Make a TaskTarget from an mdrun argument string.
360 static TaskTarget findTaskTarget(const char *optionString)
362 TaskTarget returnValue = TaskTarget::Auto;
364 if (strncmp(optionString, "auto", 3) == 0)
366 returnValue = TaskTarget::Auto;
368 else if (strncmp(optionString, "cpu", 3) == 0)
370 returnValue = TaskTarget::Cpu;
372 else if (strncmp(optionString, "gpu", 3) == 0)
374 returnValue = TaskTarget::Gpu;
378 GMX_ASSERT(false, "Option string should have been checked for sanity already");
384 int Mdrunner::mdrunner()
388 t_forcerec *fr = nullptr;
389 t_fcdata *fcd = nullptr;
390 real ewaldcoeff_q = 0;
391 real ewaldcoeff_lj = 0;
392 int nChargePerturbed = -1, nTypePerturbed = 0;
393 gmx_wallcycle_t wcycle;
394 gmx_walltime_accounting_t walltime_accounting = nullptr;
396 int64_t reset_counters;
397 int nthreads_pme = 1;
398 gmx_membed_t * membed = nullptr;
399 gmx_hw_info_t *hwinfo = nullptr;
401 /* CAUTION: threads may be started later on in this function, so
402 cr doesn't reflect the final parallel state right now */
403 std::unique_ptr<gmx::MDModules> mdModules(new gmx::MDModules);
404 t_inputrec inputrecInstance;
405 t_inputrec *inputrec = &inputrecInstance;
408 if (mdrunOptions.continuationOptions.appendFiles)
413 bool doMembed = opt2bSet("-membed", nfile, fnm);
414 bool doRerun = mdrunOptions.rerun;
416 // Handle task-assignment related user options.
417 EmulateGpuNonbonded emulateGpuNonbonded = (getenv("GMX_EMULATE_GPU") != nullptr ?
418 EmulateGpuNonbonded::Yes : EmulateGpuNonbonded::No);
419 std::vector<int> gpuIdsAvailable;
422 gpuIdsAvailable = parseUserGpuIds(hw_opt.gpuIdsAvailable);
423 // TODO We could put the GPU IDs into a std::map to find
424 // duplicates, but for the small numbers of IDs involved, this
425 // code is simple and fast.
426 for (size_t i = 0; i != gpuIdsAvailable.size(); ++i)
428 for (size_t j = i+1; j != gpuIdsAvailable.size(); ++j)
430 if (gpuIdsAvailable[i] == gpuIdsAvailable[j])
432 GMX_THROW(InvalidInputError(formatString("The string of available GPU device IDs '%s' may not contain duplicate device IDs", hw_opt.gpuIdsAvailable.c_str())));
437 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
439 std::vector<int> userGpuTaskAssignment;
442 userGpuTaskAssignment = parseUserGpuIds(hw_opt.userGpuTaskAssignment);
444 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
445 auto nonbondedTarget = findTaskTarget(nbpu_opt);
446 auto pmeTarget = findTaskTarget(pme_opt);
447 auto pmeFftTarget = findTaskTarget(pme_fft_opt);
448 PmeRunMode pmeRunMode = PmeRunMode::None;
450 // Here we assume that SIMMASTER(cr) does not change even after the
451 // threads are started.
452 gmx::LoggerOwner logOwner(buildLogger(fplog, cr));
453 gmx::MDLogger mdlog(logOwner.logger());
455 // TODO The thread-MPI master rank makes a working
456 // PhysicalNodeCommunicator here, but it gets rebuilt by all ranks
457 // after the threads have been launched. This works because no use
458 // is made of that communicator until after the execution paths
459 // have rejoined. But it is likely that we can improve the way
460 // this is expressed, e.g. by expressly running detection only the
461 // master rank for thread-MPI, rather than relying on the mutex
462 // and reference count.
463 PhysicalNodeCommunicator physicalNodeComm(MPI_COMM_WORLD, gmx_physicalnode_id_hash());
464 hwinfo = gmx_detect_hardware(mdlog, physicalNodeComm);
466 gmx_print_detected_hardware(fplog, cr, ms, mdlog, hwinfo);
468 std::vector<int> gpuIdsToUse;
469 auto compatibleGpus = getCompatibleGpus(hwinfo->gpu_info);
470 if (gpuIdsAvailable.empty())
472 gpuIdsToUse = compatibleGpus;
476 for (const auto &availableGpuId : gpuIdsAvailable)
478 bool availableGpuIsCompatible = false;
479 for (const auto &compatibleGpuId : compatibleGpus)
481 if (availableGpuId == compatibleGpuId)
483 availableGpuIsCompatible = true;
487 if (!availableGpuIsCompatible)
489 gmx_fatal(FARGS, "You limited the set of compatible GPUs to a set that included ID #%d, but that ID is not for a compatible GPU. List only compatible GPUs.", availableGpuId);
491 gpuIdsToUse.push_back(availableGpuId);
495 if (fplog != nullptr)
497 /* Print references after all software/hardware printing */
498 please_cite(fplog, "Abraham2015");
499 please_cite(fplog, "Pall2015");
500 please_cite(fplog, "Pronk2013");
501 please_cite(fplog, "Hess2008b");
502 please_cite(fplog, "Spoel2005a");
503 please_cite(fplog, "Lindahl2001a");
504 please_cite(fplog, "Berendsen95a");
507 std::unique_ptr<t_state> globalState;
511 /* Only the master rank has the global state */
512 globalState = compat::make_unique<t_state>();
514 /* Read (nearly) all data required for the simulation */
515 read_tpx_state(ftp2fn(efTPR, nfile, fnm), inputrec, globalState.get(), &mtop);
517 /* In rerun, set velocities to zero if present */
518 if (doRerun && ((globalState->flags & (1 << estV)) != 0))
520 // rerun does not use velocities
521 GMX_LOG(mdlog.info).asParagraph().appendText(
522 "Rerun trajectory contains velocities. Rerun does only evaluate "
523 "potential energy and forces. The velocities will be ignored.");
524 for (int i = 0; i < globalState->natoms; i++)
526 clear_rvec(globalState->v[i]);
528 globalState->flags &= ~(1 << estV);
531 if (inputrec->cutoff_scheme != ecutsVERLET)
533 if (nstlist_cmdline > 0)
535 gmx_fatal(FARGS, "Can not set nstlist with the group cut-off scheme");
538 if (!compatibleGpus.empty())
540 GMX_LOG(mdlog.warning).asParagraph().appendText(
541 "NOTE: GPU(s) found, but the current simulation can not use GPUs\n"
542 " To use a GPU, set the mdp option: cutoff-scheme = Verlet");
547 /* Check and update the hardware options for internal consistency */
548 check_and_update_hw_opt_1(mdlog, &hw_opt, cr, domdecOptions.numPmeRanks);
550 /* Early check for externally set process affinity. */
551 gmx_check_thread_affinity_set(mdlog, cr,
552 &hw_opt, hwinfo->nthreads_hw_avail, FALSE);
554 if (GMX_THREAD_MPI && SIMMASTER(cr))
556 if (domdecOptions.numPmeRanks > 0 && hw_opt.nthreads_tmpi <= 0)
558 gmx_fatal(FARGS, "You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME ranks");
561 /* Since the master knows the cut-off scheme, update hw_opt for this.
562 * This is done later for normal MPI and also once more with tMPI
563 * for all tMPI ranks.
565 check_and_update_hw_opt_2(&hw_opt, inputrec->cutoff_scheme);
567 bool useGpuForNonbonded = false;
568 bool useGpuForPme = false;
571 // If the user specified the number of ranks, then we must
572 // respect that, but in default mode, we need to allow for
573 // the number of GPUs to choose the number of ranks.
575 useGpuForNonbonded = decideWhetherToUseGpusForNonbondedWithThreadMpi
576 (nonbondedTarget, gpuIdsToUse, userGpuTaskAssignment, emulateGpuNonbonded,
577 inputrec->cutoff_scheme == ecutsVERLET,
578 gpuAccelerationOfNonbondedIsUseful(mdlog, inputrec, GMX_THREAD_MPI),
579 hw_opt.nthreads_tmpi);
580 auto canUseGpuForPme = pme_gpu_supports_build(nullptr) && pme_gpu_supports_input(*inputrec, mtop, nullptr);
581 useGpuForPme = decideWhetherToUseGpusForPmeWithThreadMpi
582 (useGpuForNonbonded, pmeTarget, gpuIdsToUse, userGpuTaskAssignment,
583 canUseGpuForPme, hw_opt.nthreads_tmpi, domdecOptions.numPmeRanks);
586 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
588 /* Determine how many thread-MPI ranks to start.
590 * TODO Over-writing the user-supplied value here does
591 * prevent any possible subsequent checks from working
593 hw_opt.nthreads_tmpi = get_nthreads_mpi(hwinfo,
602 // Now start the threads for thread MPI.
603 cr = spawnThreads(hw_opt.nthreads_tmpi);
604 /* The main thread continues here with a new cr. We don't deallocate
605 the old cr because other threads may still be reading it. */
606 // TODO Both master and spawned threads call dup_tfn and
607 // reinitialize_commrec_for_this_thread. Find a way to express
609 physicalNodeComm = PhysicalNodeCommunicator(MPI_COMM_WORLD, gmx_physicalnode_id_hash());
611 // END OF CAUTION: cr and physicalNodeComm are now reliable
615 /* now broadcast everything to the non-master nodes/threads: */
616 init_parallel(cr, inputrec, &mtop);
619 // Now each rank knows the inputrec that SIMMASTER read and used,
620 // and (if applicable) cr->nnodes has been assigned the number of
621 // thread-MPI ranks that have been chosen. The ranks can now all
622 // run the task-deciding functions and will agree on the result
623 // without needing to communicate.
625 // TODO Should we do the communication in debug mode to support
626 // having an assertion?
628 // Note that these variables describe only their own node.
629 bool useGpuForNonbonded = false;
630 bool useGpuForPme = false;
633 // It's possible that there are different numbers of GPUs on
634 // different nodes, which is the user's responsibilty to
635 // handle. If unsuitable, we will notice that during task
637 bool gpusWereDetected = hwinfo->ngpu_compatible_tot > 0;
638 useGpuForNonbonded = decideWhetherToUseGpusForNonbonded(nonbondedTarget, userGpuTaskAssignment,
639 emulateGpuNonbonded, inputrec->cutoff_scheme == ecutsVERLET,
640 gpuAccelerationOfNonbondedIsUseful(mdlog, inputrec, !GMX_THREAD_MPI),
642 auto canUseGpuForPme = pme_gpu_supports_build(nullptr) && pme_gpu_supports_input(*inputrec, mtop, nullptr);
643 useGpuForPme = decideWhetherToUseGpusForPme(useGpuForNonbonded, pmeTarget, userGpuTaskAssignment,
644 canUseGpuForPme, cr->nnodes, domdecOptions.numPmeRanks,
647 pmeRunMode = (useGpuForPme ? PmeRunMode::GPU : PmeRunMode::CPU);
648 if (pmeRunMode == PmeRunMode::GPU)
650 if (pmeFftTarget == TaskTarget::Cpu)
652 pmeRunMode = PmeRunMode::Mixed;
655 else if (pmeFftTarget == TaskTarget::Gpu)
657 gmx_fatal(FARGS, "Assigning FFTs to GPU requires PME to be assigned to GPU as well. With PME on CPU you should not be using -pmefft.");
660 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
662 // TODO: Error handling
663 mdModules->assignOptionsToModules(*inputrec->params, nullptr);
665 if (fplog != nullptr)
667 pr_inputrec(fplog, 0, "Input Parameters", inputrec, FALSE);
668 fprintf(fplog, "\n");
673 /* now make sure the state is initialized and propagated */
674 set_state_entries(globalState.get(), inputrec);
677 /* NM and TPI parallelize over force/energy calculations, not atoms,
678 * so we need to initialize and broadcast the global state.
680 if (inputrec->eI == eiNM || inputrec->eI == eiTPI)
684 globalState = compat::make_unique<t_state>();
686 broadcastStateWithoutDynamics(cr, globalState.get());
689 /* A parallel command line option consistency check that we can
690 only do after any threads have started. */
691 if (!PAR(cr) && (domdecOptions.numCells[XX] > 1 ||
692 domdecOptions.numCells[YY] > 1 ||
693 domdecOptions.numCells[ZZ] > 1 ||
694 domdecOptions.numPmeRanks > 0))
697 "The -dd or -npme option request a parallel simulation, "
699 "but %s was compiled without threads or MPI enabled", output_env_get_program_display_name(oenv));
702 "but the number of MPI-threads (option -ntmpi) is not set or is 1");
704 "but %s was not started through mpirun/mpiexec or only one rank was requested through mpirun/mpiexec", output_env_get_program_display_name(oenv));
710 (EI_ENERGY_MINIMIZATION(inputrec->eI) || eiNM == inputrec->eI))
712 gmx_fatal(FARGS, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
715 if (can_use_allvsall(inputrec, TRUE, cr, fplog) && DOMAINDECOMP(cr))
717 gmx_fatal(FARGS, "All-vs-all loops do not work with domain decomposition, use a single MPI rank");
720 if (!(EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype)))
722 if (domdecOptions.numPmeRanks > 0)
724 gmx_fatal_collective(FARGS, cr->mpi_comm_mysim, MASTER(cr),
725 "PME-only ranks are requested, but the system does not use PME for electrostatics or LJ");
728 domdecOptions.numPmeRanks = 0;
731 if (useGpuForNonbonded && domdecOptions.numPmeRanks < 0)
733 /* With NB GPUs we don't automatically use PME-only CPU ranks. PME ranks can
734 * improve performance with many threads per GPU, since our OpenMP
735 * scaling is bad, but it's difficult to automate the setup.
737 domdecOptions.numPmeRanks = 0;
741 if (domdecOptions.numPmeRanks < 0)
743 domdecOptions.numPmeRanks = 0;
744 // TODO possibly print a note that one can opt-in for a separate PME GPU rank?
748 GMX_RELEASE_ASSERT(domdecOptions.numPmeRanks <= 1, "PME GPU decomposition is not supported");
755 fcRegisterSteps(inputrec->nsteps, inputrec->init_step);
759 /* NMR restraints must be initialized before load_checkpoint,
760 * since with time averaging the history is added to t_state.
761 * For proper consistency check we therefore need to extend
763 * So the PME-only nodes (if present) will also initialize
764 * the distance restraints.
768 /* This needs to be called before read_checkpoint to extend the state */
769 init_disres(fplog, &mtop, inputrec, cr, ms, fcd, globalState.get(), replExParams.exchangeInterval > 0);
771 init_orires(fplog, &mtop, inputrec, cr, ms, globalState.get(), &(fcd->orires));
773 auto deform = prepareBoxDeformation(globalState->box, cr, *inputrec);
775 ObservablesHistory observablesHistory = {};
777 ContinuationOptions &continuationOptions = mdrunOptions.continuationOptions;
779 if (continuationOptions.startedFromCheckpoint)
781 /* Check if checkpoint file exists before doing continuation.
782 * This way we can use identical input options for the first and subsequent runs...
786 load_checkpoint(opt2fn_master("-cpi", nfile, fnm, cr), &fplog,
787 cr, domdecOptions.numCells,
788 inputrec, globalState.get(),
789 &bReadEkin, &observablesHistory,
790 continuationOptions.appendFiles,
791 continuationOptions.appendFilesOptionSet,
792 mdrunOptions.reproducible);
796 continuationOptions.haveReadEkin = true;
800 if (SIMMASTER(cr) && continuationOptions.appendFiles)
802 gmx_log_append(cr->nodeid, cr->nnodes, fplog);
803 logOwner = buildLogger(fplog, nullptr);
804 mdlog = logOwner.logger();
807 if (mdrunOptions.numStepsCommandline > -2)
809 GMX_LOG(mdlog.info).asParagraph().
810 appendText("The -nsteps functionality is deprecated, and may be removed in a future version. "
811 "Consider using gmx convert-tpr -nsteps or changing the appropriate .mdp file field.");
813 /* override nsteps with value set on the commamdline */
814 override_nsteps_cmdline(mdlog, mdrunOptions.numStepsCommandline, inputrec);
818 copy_mat(globalState->box, box);
823 gmx_bcast(sizeof(box), box, cr);
826 /* Update rlist and nstlist. */
827 if (inputrec->cutoff_scheme == ecutsVERLET)
829 prepare_verlet_scheme(fplog, cr, inputrec, nstlist_cmdline, &mtop, box,
830 useGpuForNonbonded || (emulateGpuNonbonded == EmulateGpuNonbonded::Yes), *hwinfo->cpuInfo);
833 LocalAtomSetManager atomSets;
835 if (PAR(cr) && !(EI_TPI(inputrec->eI) ||
836 inputrec->eI == eiNM))
838 cr->dd = init_domain_decomposition(mdlog, cr, domdecOptions, mdrunOptions,
840 box, positionsFromStatePointer(globalState.get()),
842 // Note that local state still does not exist yet.
846 /* PME, if used, is done on all nodes with 1D decomposition */
848 cr->duty = (DUTY_PP | DUTY_PME);
850 if (inputrec->ePBC == epbcSCREW)
853 "pbc=screw is only implemented with domain decomposition");
859 /* After possible communicator splitting in make_dd_communicators.
860 * we can set up the intra/inter node communication.
862 gmx_setup_nodecomm(fplog, cr);
868 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
869 "This is simulation %d out of %d running as a composite GROMACS\n"
870 "multi-simulation job. Setup for this simulation:\n",
873 GMX_LOG(mdlog.warning).appendTextFormatted(
877 cr->nnodes == 1 ? "thread" : "threads"
879 cr->nnodes == 1 ? "process" : "processes"
885 /* Check and update hw_opt for the cut-off scheme */
886 check_and_update_hw_opt_2(&hw_opt, inputrec->cutoff_scheme);
888 /* Check and update the number of OpenMP threads requested */
889 checkAndUpdateRequestedNumOpenmpThreads(&hw_opt, *hwinfo, cr, ms, physicalNodeComm.size_,
892 gmx_omp_nthreads_init(mdlog, cr,
893 hwinfo->nthreads_hw_avail,
894 physicalNodeComm.size_,
896 hw_opt.nthreads_omp_pme,
897 !thisRankHasDuty(cr, DUTY_PP),
898 inputrec->cutoff_scheme == ecutsVERLET);
900 // Enable FP exception but not in Release mode and not for compilers
901 // with known buggy FP exception support (clang with any optimization)
902 // or suspected buggy FP exception support (gcc 7.* with optimization).
903 #if !defined NDEBUG && \
904 !((defined __clang__ || (defined(__GNUC__) && !defined(__ICC) && __GNUC__ == 7)) \
905 && defined __OPTIMIZE__)
906 const bool bEnableFPE = !EI_TPI(inputrec->eI) &&
907 inputrec->cutoff_scheme == ecutsVERLET;
909 const bool bEnableFPE = false;
911 //FIXME - reconcile with gmx_feenableexcept() call from CommandLineModuleManager::run()
914 gmx_feenableexcept();
917 // Build a data structure that expresses which kinds of non-bonded
918 // task are handled by this rank.
920 // TODO Later, this might become a loop over all registered modules
921 // relevant to the mdp inputs, to find those that have such tasks.
923 // TODO This could move before init_domain_decomposition() as part
924 // of refactoring that separates the responsibility for duty
925 // assignment from setup for communication between tasks, and
926 // setup for tasks handled with a domain (ie including short-ranged
927 // tasks, bonded tasks, etc.).
929 // Note that in general useGpuForNonbonded, etc. can have a value
930 // that is inconsistent with the presence of actual GPUs on any
931 // rank, and that is not known to be a problem until the
932 // duty of the ranks on a node become node.
934 // TODO Later we might need the concept of computeTasksOnThisRank,
935 // from which we construct gpuTasksOnThisRank.
937 // Currently the DD code assigns duty to ranks that can
938 // include PP work that currently can be executed on a single
939 // GPU, if present and compatible. This has to be coordinated
940 // across PP ranks on a node, with possible multiple devices
941 // or sharing devices on a node, either from the user
942 // selection, or automatically.
943 auto haveGpus = !gpuIdsToUse.empty();
944 std::vector<GpuTask> gpuTasksOnThisRank;
945 if (thisRankHasDuty(cr, DUTY_PP))
947 if (useGpuForNonbonded)
951 gpuTasksOnThisRank.push_back(GpuTask::Nonbonded);
953 else if (nonbondedTarget == TaskTarget::Gpu)
955 gmx_fatal(FARGS, "Cannot run short-ranged nonbonded interactions on a GPU because there is none detected.");
959 // TODO cr->duty & DUTY_PME should imply that a PME algorithm is active, but currently does not.
960 if (EEL_PME(inputrec->coulombtype) && (thisRankHasDuty(cr, DUTY_PME)))
966 gpuTasksOnThisRank.push_back(GpuTask::Pme);
968 else if (pmeTarget == TaskTarget::Gpu)
970 gmx_fatal(FARGS, "Cannot run PME on a GPU because there is none detected.");
975 GpuTaskAssignment gpuTaskAssignment;
978 // Produce the task assignment for this rank.
979 gpuTaskAssignment = runTaskAssignment(gpuIdsToUse, userGpuTaskAssignment, *hwinfo,
980 mdlog, cr, ms, physicalNodeComm, gpuTasksOnThisRank);
982 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
984 /* Prevent other ranks from continuing after an issue was found
985 * and reported as a fatal error.
987 * TODO This function implements a barrier so that MPI runtimes
988 * can organize an orderly shutdown if one of the ranks has had to
989 * issue a fatal error in various code already run. When we have
990 * MPI-aware error handling and reporting, this should be
995 MPI_Barrier(cr->mpi_comm_mysim);
1001 MPI_Barrier(ms->mpi_comm_masters);
1003 /* We need another barrier to prevent non-master ranks from contiuing
1004 * when an error occured in a different simulation.
1006 MPI_Barrier(cr->mpi_comm_mysim);
1010 /* Now that we know the setup is consistent, check for efficiency */
1011 check_resource_division_efficiency(hwinfo, !gpuTaskAssignment.empty(), mdrunOptions.ntompOptionIsSet,
1014 gmx_device_info_t *nonbondedDeviceInfo = nullptr;
1016 if (thisRankHasDuty(cr, DUTY_PP))
1018 // This works because only one task of each type is currently permitted.
1019 auto nbGpuTaskMapping = std::find_if(gpuTaskAssignment.begin(), gpuTaskAssignment.end(),
1020 hasTaskType<GpuTask::Nonbonded>);
1021 if (nbGpuTaskMapping != gpuTaskAssignment.end())
1023 int nonbondedDeviceId = nbGpuTaskMapping->deviceId_;
1024 nonbondedDeviceInfo = getDeviceInfo(hwinfo->gpu_info, nonbondedDeviceId);
1025 init_gpu(mdlog, nonbondedDeviceInfo);
1027 if (DOMAINDECOMP(cr))
1029 /* When we share GPUs over ranks, we need to know this for the DLB */
1030 dd_setup_dlb_resource_sharing(cr, nonbondedDeviceId);
1036 std::unique_ptr<ClfftInitializer> initializedClfftLibrary;
1038 gmx_device_info_t *pmeDeviceInfo = nullptr;
1039 // Later, this program could contain kernels that might be later
1040 // re-used as auto-tuning progresses, or subsequent simulations
1042 PmeGpuProgramStorage pmeGpuProgram;
1043 // This works because only one task of each type is currently permitted.
1044 auto pmeGpuTaskMapping = std::find_if(gpuTaskAssignment.begin(), gpuTaskAssignment.end(), hasTaskType<GpuTask::Pme>);
1045 const bool thisRankHasPmeGpuTask = (pmeGpuTaskMapping != gpuTaskAssignment.end());
1046 if (thisRankHasPmeGpuTask)
1048 pmeDeviceInfo = getDeviceInfo(hwinfo->gpu_info, pmeGpuTaskMapping->deviceId_);
1049 init_gpu(mdlog, pmeDeviceInfo);
1050 pmeGpuProgram = buildPmeGpuProgram(pmeDeviceInfo);
1051 // TODO It would be nice to move this logic into the factory
1052 // function. See Redmine #2535.
1053 bool isMasterThread = !GMX_THREAD_MPI || MASTER(cr);
1054 if (pmeRunMode == PmeRunMode::GPU && !initializedClfftLibrary && isMasterThread)
1056 initializedClfftLibrary = initializeClfftLibrary();
1060 /* getting number of PP/PME threads
1061 PME: env variable should be read only on one node to make sure it is
1062 identical everywhere;
1064 nthreads_pme = gmx_omp_nthreads_get(emntPME);
1066 int numThreadsOnThisRank;
1067 /* threads on this MPI process or TMPI thread */
1068 if (thisRankHasDuty(cr, DUTY_PP))
1070 numThreadsOnThisRank = gmx_omp_nthreads_get(emntNonbonded);
1074 numThreadsOnThisRank = nthreads_pme;
1077 checkHardwareOversubscription(numThreadsOnThisRank, cr->nodeid,
1078 *hwinfo->hardwareTopology,
1079 physicalNodeComm, mdlog);
1081 if (hw_opt.thread_affinity != threadaffOFF)
1083 /* Before setting affinity, check whether the affinity has changed
1084 * - which indicates that probably the OpenMP library has changed it
1085 * since we first checked).
1087 gmx_check_thread_affinity_set(mdlog, cr,
1088 &hw_opt, hwinfo->nthreads_hw_avail, TRUE);
1090 int numThreadsOnThisNode, intraNodeThreadOffset;
1091 analyzeThreadsOnThisNode(physicalNodeComm, numThreadsOnThisRank, &numThreadsOnThisNode,
1092 &intraNodeThreadOffset);
1094 /* Set the CPU affinity */
1095 gmx_set_thread_affinity(mdlog, cr, &hw_opt, *hwinfo->hardwareTopology,
1096 numThreadsOnThisRank, numThreadsOnThisNode,
1097 intraNodeThreadOffset, nullptr);
1100 if (mdrunOptions.timingOptions.resetStep > -1)
1102 GMX_LOG(mdlog.info).asParagraph().
1103 appendText("The -resetstep functionality is deprecated, and may be removed in a future version.");
1105 wcycle = wallcycle_init(fplog, mdrunOptions.timingOptions.resetStep, cr);
1109 /* Master synchronizes its value of reset_counters with all nodes
1110 * including PME only nodes */
1111 reset_counters = wcycle_get_reset_counters(wcycle);
1112 gmx_bcast_sim(sizeof(reset_counters), &reset_counters, cr);
1113 wcycle_set_reset_counters(wcycle, reset_counters);
1116 // Membrane embedding must be initialized before we call init_forcerec()
1121 fprintf(stderr, "Initializing membed");
1123 /* Note that membed cannot work in parallel because mtop is
1124 * changed here. Fix this if we ever want to make it run with
1125 * multiple ranks. */
1126 membed = init_membed(fplog, nfile, fnm, &mtop, inputrec, globalState.get(), cr, &mdrunOptions.checkpointOptions.period);
1129 std::unique_ptr<MDAtoms> mdAtoms;
1130 std::unique_ptr<gmx_vsite_t> vsite;
1133 if (thisRankHasDuty(cr, DUTY_PP))
1135 /* Initiate forcerecord */
1137 fr->forceProviders = mdModules->initForceProviders();
1138 init_forcerec(fplog, mdlog, fr, fcd,
1139 inputrec, &mtop, cr, box,
1140 opt2fn("-table", nfile, fnm),
1141 opt2fn("-tablep", nfile, fnm),
1142 opt2fns("-tableb", nfile, fnm),
1143 *hwinfo, nonbondedDeviceInfo,
1147 /* Initialize QM-MM */
1150 GMX_LOG(mdlog.info).asParagraph().
1151 appendText("Large parts of the QM/MM support is deprecated, and may be removed in a future "
1152 "version. Please get in touch with the developers if you find the support useful, "
1153 "as help is needed if the functionality is to continue to be available.");
1154 init_QMMMrec(cr, &mtop, inputrec, fr);
1157 /* Initialize the mdAtoms structure.
1158 * mdAtoms is not filled with atom data,
1159 * as this can not be done now with domain decomposition.
1161 mdAtoms = makeMDAtoms(fplog, mtop, *inputrec, thisRankHasPmeGpuTask);
1162 if (globalState && thisRankHasPmeGpuTask)
1164 // The pinning of coordinates in the global state object works, because we only use
1165 // PME on GPU without DD or on a separate PME rank, and because the local state pointer
1166 // points to the global state object without DD.
1167 // FIXME: MD and EM separately set up the local state - this should happen in the same function,
1168 // which should also perform the pinning.
1169 changePinningPolicy(&globalState->x, pme_get_pinning_policy());
1172 /* Initialize the virtual site communication */
1173 vsite = initVsite(mtop, cr);
1175 calc_shifts(box, fr->shift_vec);
1177 /* With periodic molecules the charge groups should be whole at start up
1178 * and the virtual sites should not be far from their proper positions.
1180 if (!inputrec->bContinuation && MASTER(cr) &&
1181 !(inputrec->ePBC != epbcNONE && inputrec->bPeriodicMols))
1183 /* Make molecules whole at start of run */
1184 if (fr->ePBC != epbcNONE)
1186 rvec *xGlobal = as_rvec_array(globalState->x.data());
1187 do_pbc_first_mtop(fplog, inputrec->ePBC, box, &mtop, xGlobal);
1191 /* Correct initial vsite positions are required
1192 * for the initial distribution in the domain decomposition
1193 * and for the initial shell prediction.
1195 constructVsitesGlobal(mtop, globalState->x);
1199 if (EEL_PME(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
1201 ewaldcoeff_q = fr->ic->ewaldcoeff_q;
1202 ewaldcoeff_lj = fr->ic->ewaldcoeff_lj;
1207 /* This is a PME only node */
1209 GMX_ASSERT(globalState == nullptr, "We don't need the state on a PME only rank and expect it to be unitialized");
1211 ewaldcoeff_q = calc_ewaldcoeff_q(inputrec->rcoulomb, inputrec->ewald_rtol);
1212 ewaldcoeff_lj = calc_ewaldcoeff_lj(inputrec->rvdw, inputrec->ewald_rtol_lj);
1215 gmx_pme_t *sepPmeData = nullptr;
1216 // This reference hides the fact that PME data is owned by runner on PME-only ranks and by forcerec on other ranks
1217 GMX_ASSERT(thisRankHasDuty(cr, DUTY_PP) == (fr != nullptr), "Double-checking that only PME-only ranks have no forcerec");
1218 gmx_pme_t * &pmedata = fr ? fr->pmedata : sepPmeData;
1220 /* Initiate PME if necessary,
1221 * either on all nodes or on dedicated PME nodes only. */
1222 if (EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype))
1224 if (mdAtoms && mdAtoms->mdatoms())
1226 nChargePerturbed = mdAtoms->mdatoms()->nChargePerturbed;
1227 if (EVDW_PME(inputrec->vdwtype))
1229 nTypePerturbed = mdAtoms->mdatoms()->nTypePerturbed;
1232 if (cr->npmenodes > 0)
1234 /* The PME only nodes need to know nChargePerturbed(FEP on Q) and nTypePerturbed(FEP on LJ)*/
1235 gmx_bcast_sim(sizeof(nChargePerturbed), &nChargePerturbed, cr);
1236 gmx_bcast_sim(sizeof(nTypePerturbed), &nTypePerturbed, cr);
1239 if (thisRankHasDuty(cr, DUTY_PME))
1243 pmedata = gmx_pme_init(cr,
1244 getNumPmeDomains(cr->dd),
1246 mtop.natoms, nChargePerturbed != 0, nTypePerturbed != 0,
1247 mdrunOptions.reproducible,
1248 ewaldcoeff_q, ewaldcoeff_lj,
1250 pmeRunMode, nullptr,
1251 pmeDeviceInfo, pmeGpuProgram.get(), mdlog);
1253 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1258 if (EI_DYNAMICS(inputrec->eI))
1260 /* Turn on signal handling on all nodes */
1262 * (A user signal from the PME nodes (if any)
1263 * is communicated to the PP nodes.
1265 signal_handler_install();
1268 if (thisRankHasDuty(cr, DUTY_PP))
1270 /* Assumes uniform use of the number of OpenMP threads */
1271 walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntDefault));
1273 if (inputrec->bPull)
1275 /* Initialize pull code */
1276 inputrec->pull_work =
1277 init_pull(fplog, inputrec->pull, inputrec,
1278 &mtop, cr, &atomSets, inputrec->fepvals->init_lambda);
1279 if (EI_DYNAMICS(inputrec->eI) && MASTER(cr))
1281 init_pull_output_files(inputrec->pull_work,
1283 continuationOptions);
1287 std::unique_ptr<EnforcedRotation> enforcedRotation;
1290 /* Initialize enforced rotation code */
1291 enforcedRotation = init_rot(fplog, inputrec, nfile, fnm, cr, &atomSets, globalState.get(), &mtop, oenv, mdrunOptions);
1294 if (inputrec->eSwapCoords != eswapNO)
1296 /* Initialize ion swapping code */
1297 init_swapcoords(fplog, inputrec, opt2fn_master("-swap", nfile, fnm, cr),
1298 &mtop, globalState.get(), &observablesHistory,
1299 cr, &atomSets, oenv, mdrunOptions);
1302 /* Let makeConstraints know whether we have essential dynamics constraints.
1303 * TODO: inputrec should tell us whether we use an algorithm, not a file option or the checkpoint
1305 bool doEssentialDynamics = (opt2fn_null("-ei", nfile, fnm) != nullptr || observablesHistory.edsamHistory);
1306 auto constr = makeConstraints(mtop, *inputrec, doEssentialDynamics,
1307 fplog, *mdAtoms->mdatoms(),
1308 cr, *ms, nrnb, wcycle, fr->bMolPBC);
1310 if (DOMAINDECOMP(cr))
1312 GMX_RELEASE_ASSERT(fr, "fr was NULL while cr->duty was DUTY_PP");
1313 /* This call is not included in init_domain_decomposition mainly
1314 * because fr->cginfo_mb is set later.
1316 dd_init_bondeds(fplog, cr->dd, &mtop, vsite.get(), inputrec,
1317 domdecOptions.checkBondedInteractions,
1321 /* Create StopHandlerBuilder (could be moved earlier if needed - currently nobody register here */
1322 auto stopHandlerBuilder = compat::make_unique<StopHandlerBuilder>();
1324 /* Now do whatever the user wants us to do (how flexible...) */
1325 Integrator integrator {
1326 fplog, cr, ms, mdlog, nfile, fnm,
1329 vsite.get(), constr.get(),
1330 enforcedRotation ? enforcedRotation->getLegacyEnfrot() : nullptr,
1332 mdModules->outputProvider(),
1336 &observablesHistory,
1337 mdAtoms.get(), nrnb, wcycle, fr,
1340 walltime_accounting,
1341 std::move(stopHandlerBuilder)
1343 integrator.run(inputrec->eI, doRerun);
1345 if (inputrec->bPull)
1347 finish_pull(inputrec->pull_work);
1353 GMX_RELEASE_ASSERT(pmedata, "pmedata was NULL while cr->duty was not DUTY_PP");
1355 walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntPME));
1356 gmx_pmeonly(pmedata, cr, nrnb, wcycle, walltime_accounting, inputrec, pmeRunMode);
1359 wallcycle_stop(wcycle, ewcRUN);
1361 /* Finish up, write some stuff
1362 * if rerunMD, don't write last frame again
1364 finish_run(fplog, mdlog, cr,
1365 inputrec, nrnb, wcycle, walltime_accounting,
1366 fr ? fr->nbv : nullptr,
1368 EI_DYNAMICS(inputrec->eI) && !isMultiSim(ms));
1373 gmx_pme_destroy(pmedata);
1377 // FIXME: this is only here to manually unpin mdAtoms->chargeA_ and state->x,
1378 // before we destroy the GPU context(s) in free_gpu_resources().
1379 // Pinned buffers are associated with contexts in CUDA.
1380 // As soon as we destroy GPU contexts after mdrunner() exits, these lines should go.
1381 mdAtoms.reset(nullptr);
1382 globalState.reset(nullptr);
1383 mdModules.reset(nullptr); // destruct force providers here as they might also use the GPU
1385 /* Free GPU memory and set a physical node tMPI barrier (which should eventually go away) */
1386 free_gpu_resources(fr, physicalNodeComm);
1387 free_gpu(nonbondedDeviceInfo);
1388 free_gpu(pmeDeviceInfo);
1389 done_forcerec(fr, mtop.molblock.size(), mtop.groups.grps[egcENER].nr);
1394 free_membed(membed);
1397 gmx_hardware_info_free();
1399 /* Does what it says */
1400 print_date_and_time(fplog, cr->nodeid, "Finished mdrun", gmx_gettime());
1401 walltime_accounting_destroy(walltime_accounting);
1404 /* Close logfile already here if we were appending to it */
1405 if (MASTER(cr) && continuationOptions.appendFiles)
1407 gmx_log_close(fplog);
1411 /* Reset FPEs (important for unit tests) by disabling them. Assumes no
1412 * exceptions were enabled before function was called. */
1415 gmx_fedisableexcept();
1418 rc = static_cast<int>(gmx_get_stop_condition());
1421 /* we need to join all threads. The sub-threads join when they
1422 exit this function, but the master thread needs to be told to
1424 if (PAR(cr) && MASTER(cr))