2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012,2013, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
51 #include "md_logging.h"
52 #include "md_support.h"
62 #include "mpelogging.h"
68 #include "checkpoint.h"
69 #include "mtop_util.h"
70 #include "sighandler.h"
73 #include "gmx_detect_hardware.h"
74 #include "gmx_omp_nthreads.h"
75 #include "pull_rotation.h"
76 #include "calc_verletbuf.h"
77 #include "../mdlib/nbnxn_search.h"
78 #include "../mdlib/nbnxn_consts.h"
79 #include "gmx_fatal_collective.h"
82 #include "gmx_thread_affinity.h"
95 #include "gpu_utils.h"
96 #include "nbnxn_cuda_data_mgmt.h"
99 gmx_integrator_t *func;
102 /* The array should match the eI array in include/types/enums.h */
103 const gmx_intp_t integrator[eiNR] = { {do_md}, {do_steep}, {do_cg}, {do_md}, {do_md}, {do_nm}, {do_lbfgs}, {do_tpi}, {do_tpi}, {do_md}, {do_md}, {do_md}};
105 gmx_large_int_t deform_init_init_step_tpx;
106 matrix deform_init_box_tpx;
107 #ifdef GMX_THREAD_MPI
108 tMPI_Thread_mutex_t deform_init_box_mutex = TMPI_THREAD_MUTEX_INITIALIZER;
112 #ifdef GMX_THREAD_MPI
113 struct mdrunner_arglist
115 gmx_hw_opt_t *hw_opt;
128 const char *dddlb_opt;
133 const char *nbpu_opt;
134 gmx_large_int_t nsteps_cmdline;
144 const char *deviceOptions;
146 int ret; /* return value */
150 /* The function used for spawning threads. Extracts the mdrunner()
151 arguments from its one argument and calls mdrunner(), after making
153 static void mdrunner_start_fn(void *arg)
155 struct mdrunner_arglist *mda = (struct mdrunner_arglist*)arg;
156 struct mdrunner_arglist mc = *mda; /* copy the arg list to make sure
157 that it's thread-local. This doesn't
158 copy pointed-to items, of course,
159 but those are all const. */
160 t_commrec *cr; /* we need a local version of this */
164 fnm = dup_tfn(mc.nfile, mc.fnm);
166 cr = init_par_threads(mc.cr);
173 mda->ret = mdrunner(mc.hw_opt, fplog, cr, mc.nfile, fnm, mc.oenv,
174 mc.bVerbose, mc.bCompact, mc.nstglobalcomm,
175 mc.ddxyz, mc.dd_node_order, mc.rdd,
176 mc.rconstr, mc.dddlb_opt, mc.dlb_scale,
177 mc.ddcsx, mc.ddcsy, mc.ddcsz,
179 mc.nsteps_cmdline, mc.nstepout, mc.resetstep,
180 mc.nmultisim, mc.repl_ex_nst, mc.repl_ex_nex, mc.repl_ex_seed, mc.pforce,
181 mc.cpt_period, mc.max_hours, mc.deviceOptions, mc.Flags);
184 /* called by mdrunner() to start a specific number of threads (including
185 the main thread) for thread-parallel runs. This in turn calls mdrunner()
187 All options besides nthreads are the same as for mdrunner(). */
188 static t_commrec *mdrunner_start_threads(gmx_hw_opt_t *hw_opt,
189 FILE *fplog, t_commrec *cr, int nfile,
190 const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose,
191 gmx_bool bCompact, int nstglobalcomm,
192 ivec ddxyz, int dd_node_order, real rdd, real rconstr,
193 const char *dddlb_opt, real dlb_scale,
194 const char *ddcsx, const char *ddcsy, const char *ddcsz,
195 const char *nbpu_opt,
196 gmx_large_int_t nsteps_cmdline,
197 int nstepout, int resetstep,
198 int nmultisim, int repl_ex_nst, int repl_ex_nex, int repl_ex_seed,
199 real pforce, real cpt_period, real max_hours,
200 const char *deviceOptions, unsigned long Flags)
203 struct mdrunner_arglist *mda;
204 t_commrec *crn; /* the new commrec */
207 /* first check whether we even need to start tMPI */
208 if (hw_opt->nthreads_tmpi < 2)
213 /* a few small, one-time, almost unavoidable memory leaks: */
215 fnmn = dup_tfn(nfile, fnm);
217 /* fill the data structure to pass as void pointer to thread start fn */
218 mda->hw_opt = hw_opt;
224 mda->bVerbose = bVerbose;
225 mda->bCompact = bCompact;
226 mda->nstglobalcomm = nstglobalcomm;
227 mda->ddxyz[XX] = ddxyz[XX];
228 mda->ddxyz[YY] = ddxyz[YY];
229 mda->ddxyz[ZZ] = ddxyz[ZZ];
230 mda->dd_node_order = dd_node_order;
232 mda->rconstr = rconstr;
233 mda->dddlb_opt = dddlb_opt;
234 mda->dlb_scale = dlb_scale;
238 mda->nbpu_opt = nbpu_opt;
239 mda->nsteps_cmdline = nsteps_cmdline;
240 mda->nstepout = nstepout;
241 mda->resetstep = resetstep;
242 mda->nmultisim = nmultisim;
243 mda->repl_ex_nst = repl_ex_nst;
244 mda->repl_ex_nex = repl_ex_nex;
245 mda->repl_ex_seed = repl_ex_seed;
246 mda->pforce = pforce;
247 mda->cpt_period = cpt_period;
248 mda->max_hours = max_hours;
249 mda->deviceOptions = deviceOptions;
252 /* now spawn new threads that start mdrunner_start_fn(), while
253 the main thread returns, we set thread affinity later */
254 ret = tMPI_Init_fn(TRUE, hw_opt->nthreads_tmpi, TMPI_AFFINITY_NONE,
255 mdrunner_start_fn, (void*)(mda) );
256 if (ret != TMPI_SUCCESS)
261 /* make a new comm_rec to reflect the new situation */
262 crn = init_par_threads(cr);
267 static int get_tmpi_omp_thread_division(const gmx_hw_info_t *hwinfo,
268 const gmx_hw_opt_t *hw_opt,
274 /* There are no separate PME nodes here, as we ensured in
275 * check_and_update_hw_opt that nthreads_tmpi>0 with PME nodes
276 * and a conditional ensures we would not have ended up here.
277 * Note that separate PME nodes might be switched on later.
281 nthreads_tmpi = ngpu;
282 if (nthreads_tot > 0 && nthreads_tot < nthreads_tmpi)
284 nthreads_tmpi = nthreads_tot;
287 else if (hw_opt->nthreads_omp > 0)
289 /* Here we could oversubscribe, when we do, we issue a warning later */
290 nthreads_tmpi = max(1, nthreads_tot/hw_opt->nthreads_omp);
294 /* TODO choose nthreads_omp based on hardware topology
295 when we have a hardware topology detection library */
296 /* In general, when running up to 4 threads, OpenMP should be faster.
297 * Note: on AMD Bulldozer we should avoid running OpenMP over two dies.
298 * On Intel>=Nehalem running OpenMP on a single CPU is always faster,
299 * even on two CPUs it's usually faster (but with many OpenMP threads
300 * it could be faster not to use HT, currently we always use HT).
301 * On Nehalem/Westmere we want to avoid running 16 threads over
302 * two CPUs with HT, so we need a limit<16; thus we use 12.
303 * A reasonable limit for Intel Sandy and Ivy bridge,
304 * not knowing the topology, is 16 threads.
306 const int nthreads_omp_always_faster = 4;
307 const int nthreads_omp_always_faster_Nehalem = 12;
308 const int nthreads_omp_always_faster_SandyBridge = 16;
309 const int first_model_Nehalem = 0x1A;
310 const int first_model_SandyBridge = 0x2A;
311 gmx_bool bIntel_Family6;
314 (gmx_cpuid_vendor(hwinfo->cpuid_info) == GMX_CPUID_VENDOR_INTEL &&
315 gmx_cpuid_family(hwinfo->cpuid_info) == 6);
317 if (nthreads_tot <= nthreads_omp_always_faster ||
319 ((gmx_cpuid_model(hwinfo->cpuid_info) >= nthreads_omp_always_faster_Nehalem && nthreads_tot <= nthreads_omp_always_faster_Nehalem) ||
320 (gmx_cpuid_model(hwinfo->cpuid_info) >= nthreads_omp_always_faster_SandyBridge && nthreads_tot <= nthreads_omp_always_faster_SandyBridge))))
322 /* Use pure OpenMP parallelization */
327 /* Don't use OpenMP parallelization */
328 nthreads_tmpi = nthreads_tot;
332 return nthreads_tmpi;
336 /* Get the number of threads to use for thread-MPI based on how many
337 * were requested, which algorithms we're using,
338 * and how many particles there are.
339 * At the point we have already called check_and_update_hw_opt.
340 * Thus all options should be internally consistent and consistent
341 * with the hardware, except that ntmpi could be larger than #GPU.
343 static int get_nthreads_mpi(const gmx_hw_info_t *hwinfo,
344 gmx_hw_opt_t *hw_opt,
345 t_inputrec *inputrec, gmx_mtop_t *mtop,
349 int nthreads_hw, nthreads_tot_max, nthreads_tmpi, nthreads_new, ngpu;
350 int min_atoms_per_mpi_thread;
355 if (hw_opt->nthreads_tmpi > 0)
357 /* Trivial, return right away */
358 return hw_opt->nthreads_tmpi;
361 nthreads_hw = hwinfo->nthreads_hw_avail;
363 /* How many total (#tMPI*#OpenMP) threads can we start? */
364 if (hw_opt->nthreads_tot > 0)
366 nthreads_tot_max = hw_opt->nthreads_tot;
370 nthreads_tot_max = nthreads_hw;
373 bCanUseGPU = (inputrec->cutoff_scheme == ecutsVERLET && hwinfo->bCanUseGPU);
376 ngpu = hwinfo->gpu_info.ncuda_dev_use;
384 get_tmpi_omp_thread_division(hwinfo, hw_opt, nthreads_tot_max, ngpu);
386 if (inputrec->eI == eiNM || EI_TPI(inputrec->eI))
388 /* Dims/steps are divided over the nodes iso splitting the atoms */
389 min_atoms_per_mpi_thread = 0;
395 min_atoms_per_mpi_thread = MIN_ATOMS_PER_GPU;
399 min_atoms_per_mpi_thread = MIN_ATOMS_PER_MPI_THREAD;
403 /* Check if an algorithm does not support parallel simulation. */
404 if (nthreads_tmpi != 1 &&
405 ( inputrec->eI == eiLBFGS ||
406 inputrec->coulombtype == eelEWALD ) )
410 md_print_warn(cr, fplog, "The integration or electrostatics algorithm doesn't support parallel runs. Using a single thread-MPI thread.\n");
411 if (hw_opt->nthreads_tmpi > nthreads_tmpi)
413 gmx_fatal(FARGS, "You asked for more than 1 thread-MPI thread, but an algorithm doesn't support that");
416 else if (mtop->natoms/nthreads_tmpi < min_atoms_per_mpi_thread)
418 /* the thread number was chosen automatically, but there are too many
419 threads (too few atoms per thread) */
420 nthreads_new = max(1, mtop->natoms/min_atoms_per_mpi_thread);
422 /* Avoid partial use of Hyper-Threading */
423 if (gmx_cpuid_x86_smt(hwinfo->cpuid_info) == GMX_CPUID_X86_SMT_ENABLED &&
424 nthreads_new > nthreads_hw/2 && nthreads_new < nthreads_hw)
426 nthreads_new = nthreads_hw/2;
429 /* Avoid large prime numbers in the thread count */
430 if (nthreads_new >= 6)
432 /* Use only 6,8,10 with additional factors of 2 */
436 while (3*fac*2 <= nthreads_new)
441 nthreads_new = (nthreads_new/fac)*fac;
446 if (nthreads_new == 5)
452 nthreads_tmpi = nthreads_new;
454 fprintf(stderr, "\n");
455 fprintf(stderr, "NOTE: Parallelization is limited by the small number of atoms,\n");
456 fprintf(stderr, " only starting %d thread-MPI threads.\n", nthreads_tmpi);
457 fprintf(stderr, " You can use the -nt and/or -ntmpi option to optimize the number of threads.\n\n");
460 return nthreads_tmpi;
462 #endif /* GMX_THREAD_MPI */
465 /* Environment variable for setting nstlist */
466 static const char* NSTLIST_ENVVAR = "GMX_NSTLIST";
467 /* Try to increase nstlist when using a GPU with nstlist less than this */
468 static const int NSTLIST_GPU_ENOUGH = 20;
469 /* Increase nstlist until the non-bonded cost increases more than this factor */
470 static const float NBNXN_GPU_LIST_OK_FAC = 1.20;
471 /* Don't increase nstlist beyond a non-bonded cost increases of this factor.
472 * A standard (protein+)water system at 300K with PME ewald_rtol=1e-5
473 * needs 1.28 at rcoulomb=0.9 and 1.24 at rcoulomb=1.0 to get to nstlist=40.
475 static const float NBNXN_GPU_LIST_MAX_FAC = 1.30;
477 /* Try to increase nstlist when running on a GPU */
478 static void increase_nstlist(FILE *fp, t_commrec *cr,
479 t_inputrec *ir, const gmx_mtop_t *mtop, matrix box)
482 int nstlist_orig, nstlist_prev;
483 verletbuf_list_setup_t ls;
484 real rlist_nstlist10, rlist_inc, rlist_ok, rlist_max;
485 real rlist_new, rlist_prev;
488 gmx_bool bBox, bDD, bCont;
489 const char *nstl_fmt = "\nFor optimal performance with a GPU nstlist (now %d) should be larger.\nThe optimum depends on your CPU and GPU resources.\nYou might want to try several nstlist values.\n";
490 const char *vbd_err = "Can not increase nstlist for GPU run because verlet-buffer-drift is not set or used";
491 const char *box_err = "Can not increase nstlist for GPU run because the box is too small";
492 const char *dd_err = "Can not increase nstlist for GPU run because of domain decomposition limitations";
495 /* Number of + nstlist alternative values to try when switching */
496 const int nstl[] = { 20, 25, 40 };
497 #define NNSTL sizeof(nstl)/sizeof(nstl[0])
499 env = getenv(NSTLIST_ENVVAR);
504 fprintf(fp, nstl_fmt, ir->nstlist);
508 if (ir->verletbuf_drift == 0)
510 gmx_fatal(FARGS, "You are using an old tpr file with a GPU, please generate a new tpr file with an up to date version of grompp");
513 if (ir->verletbuf_drift < 0)
517 fprintf(stderr, "%s\n", vbd_err);
521 fprintf(fp, "%s\n", vbd_err);
527 nstlist_orig = ir->nstlist;
530 sprintf(buf, "Getting nstlist from environment variable GMX_NSTLIST=%s", env);
533 fprintf(stderr, "%s\n", buf);
537 fprintf(fp, "%s\n", buf);
539 sscanf(env, "%d", &ir->nstlist);
542 verletbuf_get_list_setup(TRUE, &ls);
544 /* Allow rlist to make the list a given factor larger than the list
545 * would be with nstlist=10.
547 nstlist_prev = ir->nstlist;
549 calc_verlet_buffer_size(mtop, det(box), ir, ir->verletbuf_drift, &ls,
550 NULL, &rlist_nstlist10);
551 ir->nstlist = nstlist_prev;
553 /* Determine the pair list size increase due to zero interactions */
554 rlist_inc = nbnxn_get_rlist_effective_inc(NBNXN_GPU_CLUSTER_SIZE, mtop->natoms/det(box));
555 rlist_ok = (rlist_nstlist10 + rlist_inc)*pow(NBNXN_GPU_LIST_OK_FAC, 1.0/3.0) - rlist_inc;
556 rlist_max = (rlist_nstlist10 + rlist_inc)*pow(NBNXN_GPU_LIST_MAX_FAC, 1.0/3.0) - rlist_inc;
559 fprintf(debug, "GPU nstlist tuning: rlist_inc %.3f rlist_max %.3f\n",
560 rlist_inc, rlist_max);
564 nstlist_prev = nstlist_orig;
565 rlist_prev = ir->rlist;
570 ir->nstlist = nstl[i];
573 /* Set the pair-list buffer size in ir */
574 calc_verlet_buffer_size(mtop, det(box), ir, ir->verletbuf_drift, &ls,
577 /* Does rlist fit in the box? */
578 bBox = (sqr(rlist_new) < max_cutoff2(ir->ePBC, box));
580 if (bBox && DOMAINDECOMP(cr))
582 /* Check if rlist fits in the domain decomposition */
583 if (inputrec2nboundeddim(ir) < DIM)
585 gmx_incons("Changing nstlist with domain decomposition and unbounded dimensions is not implemented yet");
587 copy_mat(box, state_tmp.box);
588 bDD = change_dd_cutoff(cr, &state_tmp, ir, rlist_new);
595 if (bBox && bDD && rlist_new <= rlist_max)
597 /* Increase nstlist */
598 nstlist_prev = ir->nstlist;
599 rlist_prev = rlist_new;
600 bCont = (i+1 < NNSTL && rlist_new < rlist_ok);
604 /* Stick with the previous nstlist */
605 ir->nstlist = nstlist_prev;
606 rlist_new = rlist_prev;
618 gmx_warning(!bBox ? box_err : dd_err);
621 fprintf(fp, "\n%s\n", bBox ? box_err : dd_err);
623 ir->nstlist = nstlist_orig;
625 else if (ir->nstlist != nstlist_orig || rlist_new != ir->rlist)
627 sprintf(buf, "Changing nstlist from %d to %d, rlist from %g to %g",
628 nstlist_orig, ir->nstlist,
629 ir->rlist, rlist_new);
632 fprintf(stderr, "%s\n\n", buf);
636 fprintf(fp, "%s\n\n", buf);
638 ir->rlist = rlist_new;
639 ir->rlistlong = rlist_new;
643 static void prepare_verlet_scheme(FILE *fplog,
644 const gmx_hw_info_t *hwinfo,
646 const char *nbpu_opt,
648 const gmx_mtop_t *mtop,
652 /* Here we only check for GPU usage on the MPI master process,
653 * as here we don't know how many GPUs we will use yet.
654 * We check for a GPU on all processes later.
656 *bUseGPU = hwinfo->bCanUseGPU || (getenv("GMX_EMULATE_GPU") != NULL);
658 if (ir->verletbuf_drift > 0)
660 /* Update the Verlet buffer size for the current run setup */
661 verletbuf_list_setup_t ls;
664 /* Here we assume CPU acceleration is on. But as currently
665 * calc_verlet_buffer_size gives the same results for 4x8 and 4x4
666 * and 4x2 gives a larger buffer than 4x4, this is ok.
668 verletbuf_get_list_setup(*bUseGPU, &ls);
670 calc_verlet_buffer_size(mtop, det(box), ir,
671 ir->verletbuf_drift, &ls,
673 if (rlist_new != ir->rlist)
677 fprintf(fplog, "\nChanging rlist from %g to %g for non-bonded %dx%d atom kernels\n\n",
678 ir->rlist, rlist_new,
679 ls.cluster_size_i, ls.cluster_size_j);
681 ir->rlist = rlist_new;
682 ir->rlistlong = rlist_new;
686 /* With GPU or emulation we should check nstlist for performance */
687 if ((EI_DYNAMICS(ir->eI) &&
689 ir->nstlist < NSTLIST_GPU_ENOUGH) ||
690 getenv(NSTLIST_ENVVAR) != NULL)
692 /* Choose a better nstlist */
693 increase_nstlist(fplog, cr, ir, mtop, box);
697 static void convert_to_verlet_scheme(FILE *fplog,
699 gmx_mtop_t *mtop, real box_vol)
701 char *conv_mesg = "Converting input file with group cut-off scheme to the Verlet cut-off scheme";
703 md_print_warn(NULL, fplog, "%s\n", conv_mesg);
705 ir->cutoff_scheme = ecutsVERLET;
706 ir->verletbuf_drift = 0.005;
708 if (ir->rcoulomb != ir->rvdw)
710 gmx_fatal(FARGS, "The VdW and Coulomb cut-offs are different, whereas the Verlet scheme only supports equal cut-offs");
713 if (ir->vdwtype == evdwUSER || EEL_USER(ir->coulombtype))
715 gmx_fatal(FARGS, "User non-bonded potentials are not (yet) supported with the Verlet scheme");
717 else if (EVDW_SWITCHED(ir->vdwtype) || EEL_SWITCHED(ir->coulombtype))
719 md_print_warn(NULL, fplog, "Converting switched or shifted interactions to a shifted potential (without force shift), this will lead to slightly different interaction potentials");
721 if (EVDW_SWITCHED(ir->vdwtype))
723 ir->vdwtype = evdwCUT;
725 if (EEL_SWITCHED(ir->coulombtype))
727 if (EEL_FULL(ir->coulombtype))
729 /* With full electrostatic only PME can be switched */
730 ir->coulombtype = eelPME;
734 md_print_warn(NULL, fplog, "NOTE: Replacing %s electrostatics with reaction-field with epsilon-rf=inf\n", eel_names[ir->coulombtype]);
735 ir->coulombtype = eelRF;
736 ir->epsilon_rf = 0.0;
740 /* We set the target energy drift to a small number.
741 * Note that this is only for testing. For production the user
742 * should think about this and set the mdp options.
744 ir->verletbuf_drift = 1e-4;
747 if (inputrec2nboundeddim(ir) != 3)
749 gmx_fatal(FARGS, "Can only convert old tpr files to the Verlet cut-off scheme with 3D pbc");
752 if (ir->efep != efepNO || ir->implicit_solvent != eisNO)
754 gmx_fatal(FARGS, "Will not convert old tpr files to the Verlet cut-off scheme with free-energy calculations or implicit solvent");
757 if (EI_DYNAMICS(ir->eI) && !(EI_MD(ir->eI) && ir->etc == etcNO))
759 verletbuf_list_setup_t ls;
761 verletbuf_get_list_setup(FALSE, &ls);
762 calc_verlet_buffer_size(mtop, box_vol, ir, ir->verletbuf_drift, &ls,
767 ir->verletbuf_drift = -1;
768 ir->rlist = 1.05*max(ir->rvdw, ir->rcoulomb);
771 gmx_mtop_remove_chargegroups(mtop);
774 static void check_and_update_hw_opt(gmx_hw_opt_t *hw_opt,
776 gmx_bool bIsSimMaster)
778 gmx_omp_nthreads_read_env(&hw_opt->nthreads_omp, bIsSimMaster);
780 #ifndef GMX_THREAD_MPI
781 if (hw_opt->nthreads_tot > 0)
783 gmx_fatal(FARGS, "Setting the total number of threads is only supported with thread-MPI and Gromacs was compiled without thread-MPI");
785 if (hw_opt->nthreads_tmpi > 0)
787 gmx_fatal(FARGS, "Setting the number of thread-MPI threads is only supported with thread-MPI and Gromacs was compiled without thread-MPI");
792 if (hw_opt->nthreads_omp > 1)
794 gmx_fatal(FARGS, "More than 1 OpenMP thread requested, but Gromacs was compiled without OpenMP support");
796 hw_opt->nthreads_omp = 1;
799 if (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_omp_pme <= 0)
801 /* We have the same number of OpenMP threads for PP and PME processes,
802 * thus we can perform several consistency checks.
804 if (hw_opt->nthreads_tmpi > 0 &&
805 hw_opt->nthreads_omp > 0 &&
806 hw_opt->nthreads_tot != hw_opt->nthreads_tmpi*hw_opt->nthreads_omp)
808 gmx_fatal(FARGS, "The total number of threads requested (%d) does not match the thread-MPI threads (%d) times the OpenMP threads (%d) requested",
809 hw_opt->nthreads_tot, hw_opt->nthreads_tmpi, hw_opt->nthreads_omp);
812 if (hw_opt->nthreads_tmpi > 0 &&
813 hw_opt->nthreads_tot % hw_opt->nthreads_tmpi != 0)
815 gmx_fatal(FARGS, "The total number of threads requested (%d) is not divisible by the number of thread-MPI threads requested (%d)",
816 hw_opt->nthreads_tot, hw_opt->nthreads_tmpi);
819 if (hw_opt->nthreads_omp > 0 &&
820 hw_opt->nthreads_tot % hw_opt->nthreads_omp != 0)
822 gmx_fatal(FARGS, "The total number of threads requested (%d) is not divisible by the number of OpenMP threads requested (%d)",
823 hw_opt->nthreads_tot, hw_opt->nthreads_omp);
826 if (hw_opt->nthreads_tmpi > 0 &&
827 hw_opt->nthreads_omp <= 0)
829 hw_opt->nthreads_omp = hw_opt->nthreads_tot/hw_opt->nthreads_tmpi;
834 if (hw_opt->nthreads_omp > 1)
836 gmx_fatal(FARGS, "OpenMP threads are requested, but Gromacs was compiled without OpenMP support");
840 if (cutoff_scheme == ecutsGROUP)
842 /* We only have OpenMP support for PME only nodes */
843 if (hw_opt->nthreads_omp > 1)
845 gmx_fatal(FARGS, "OpenMP threads have been requested with cut-off scheme %s, but these are only supported with cut-off scheme %s",
846 ecutscheme_names[cutoff_scheme],
847 ecutscheme_names[ecutsVERLET]);
849 hw_opt->nthreads_omp = 1;
852 if (hw_opt->nthreads_omp_pme > 0 && hw_opt->nthreads_omp <= 0)
854 gmx_fatal(FARGS, "You need to specify -ntomp in addition to -ntomp_pme");
857 if (hw_opt->nthreads_tot == 1)
859 hw_opt->nthreads_tmpi = 1;
861 if (hw_opt->nthreads_omp > 1)
863 gmx_fatal(FARGS, "You requested %d OpenMP threads with %d total threads",
864 hw_opt->nthreads_tmpi, hw_opt->nthreads_tot);
866 hw_opt->nthreads_omp = 1;
869 if (hw_opt->nthreads_omp_pme <= 0 && hw_opt->nthreads_omp > 0)
871 hw_opt->nthreads_omp_pme = hw_opt->nthreads_omp;
876 fprintf(debug, "hw_opt: nt %d ntmpi %d ntomp %d ntomp_pme %d gpu_id '%s'\n",
877 hw_opt->nthreads_tot,
878 hw_opt->nthreads_tmpi,
879 hw_opt->nthreads_omp,
880 hw_opt->nthreads_omp_pme,
881 hw_opt->gpu_id != NULL ? hw_opt->gpu_id : "");
887 /* Override the value in inputrec with value passed on the command line (if any) */
888 static void override_nsteps_cmdline(FILE *fplog,
889 gmx_large_int_t nsteps_cmdline,
893 char sbuf[STEPSTRSIZE];
898 /* override with anything else than the default -2 */
899 if (nsteps_cmdline > -2)
903 ir->nsteps = nsteps_cmdline;
904 if (EI_DYNAMICS(ir->eI))
906 sprintf(stmp, "Overriding nsteps with value passed on the command line: %s steps, %.3f ps",
907 gmx_step_str(nsteps_cmdline, sbuf),
908 nsteps_cmdline*ir->delta_t);
912 sprintf(stmp, "Overriding nsteps with value passed on the command line: %s steps",
913 gmx_step_str(nsteps_cmdline, sbuf));
916 md_print_warn(cr, fplog, "%s\n", stmp);
920 /* Data structure set by SIMMASTER which needs to be passed to all nodes
921 * before the other nodes have read the tpx file and called gmx_detect_hardware.
924 int cutoff_scheme; /* The cutoff scheme from inputrec_t */
925 gmx_bool bUseGPU; /* Use GPU or GPU emulation */
928 int mdrunner(gmx_hw_opt_t *hw_opt,
929 FILE *fplog, t_commrec *cr, int nfile,
930 const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose,
931 gmx_bool bCompact, int nstglobalcomm,
932 ivec ddxyz, int dd_node_order, real rdd, real rconstr,
933 const char *dddlb_opt, real dlb_scale,
934 const char *ddcsx, const char *ddcsy, const char *ddcsz,
935 const char *nbpu_opt,
936 gmx_large_int_t nsteps_cmdline, int nstepout, int resetstep,
937 int nmultisim, int repl_ex_nst, int repl_ex_nex,
938 int repl_ex_seed, real pforce, real cpt_period, real max_hours,
939 const char *deviceOptions, unsigned long Flags)
941 gmx_bool bForceUseGPU, bTryUseGPU;
942 double nodetime = 0, realtime;
943 t_inputrec *inputrec;
944 t_state *state = NULL;
946 gmx_ddbox_t ddbox = {0};
947 int npme_major, npme_minor;
950 gmx_mtop_t *mtop = NULL;
951 t_mdatoms *mdatoms = NULL;
952 t_forcerec *fr = NULL;
953 t_fcdata *fcd = NULL;
955 gmx_pme_t *pmedata = NULL;
956 gmx_vsite_t *vsite = NULL;
958 int i, m, nChargePerturbed = -1, status, nalloc;
960 gmx_wallcycle_t wcycle;
961 gmx_bool bReadRNG, bReadEkin;
963 gmx_runtime_t runtime;
965 gmx_large_int_t reset_counters;
966 gmx_edsam_t ed = NULL;
967 t_commrec *cr_old = cr;
968 int nthreads_pme = 1;
970 gmx_membed_t membed = NULL;
971 gmx_hw_info_t *hwinfo = NULL;
972 master_inf_t minf = {-1, FALSE};
974 /* CAUTION: threads may be started later on in this function, so
975 cr doesn't reflect the final parallel state right now */
979 if (Flags & MD_APPENDFILES)
984 bForceUseGPU = (strncmp(nbpu_opt, "gpu", 3) == 0);
985 bTryUseGPU = (strncmp(nbpu_opt, "auto", 4) == 0) || bForceUseGPU;
987 /* Detect hardware, gather information. This is an operation that is
988 * global for this process (MPI rank). */
989 hwinfo = gmx_detect_hardware(fplog, cr,
990 bForceUseGPU, bTryUseGPU, hw_opt->gpu_id);
996 /* Read (nearly) all data required for the simulation */
997 read_tpx_state(ftp2fn(efTPX, nfile, fnm), inputrec, state, NULL, mtop);
999 if (inputrec->cutoff_scheme != ecutsVERLET &&
1000 ((Flags & MD_TESTVERLET) || getenv("GMX_VERLET_SCHEME") != NULL))
1002 convert_to_verlet_scheme(fplog, inputrec, mtop, det(state->box));
1006 minf.cutoff_scheme = inputrec->cutoff_scheme;
1007 minf.bUseGPU = FALSE;
1009 if (inputrec->cutoff_scheme == ecutsVERLET)
1011 prepare_verlet_scheme(fplog, hwinfo, cr, nbpu_opt,
1012 inputrec, mtop, state->box,
1015 else if (hwinfo->bCanUseGPU)
1017 md_print_warn(cr, fplog,
1018 "NOTE: GPU(s) found, but the current simulation can not use GPUs\n"
1019 " To use a GPU, set the mdp option: cutoff-scheme = Verlet\n"
1020 " (for quick performance testing you can use the -testverlet option)\n");
1024 gmx_fatal(FARGS, "GPU requested, but can't be used without cutoff-scheme=Verlet");
1030 md_print_warn(cr, fplog,
1031 "NOTE: There is no SIMD implementation of the group scheme kernels on\n"
1032 " BlueGene/Q. You will observe better performance from using the\n"
1033 " Verlet cut-off scheme.\n");
1037 #ifndef GMX_THREAD_MPI
1040 gmx_bcast_sim(sizeof(minf), &minf, cr);
1043 if (minf.bUseGPU && cr->npmenodes == -1)
1045 /* Don't automatically use PME-only nodes with GPUs */
1049 /* Check for externally set OpenMP affinity and turn off internal
1050 * pinning if any is found. We need to do this check early to tell
1051 * thread-MPI whether it should do pinning when spawning threads.
1052 * TODO: the above no longer holds, we should move these checks down
1054 gmx_omp_check_thread_affinity(fplog, cr, hw_opt);
1056 #ifdef GMX_THREAD_MPI
1057 /* With thread-MPI inputrec is only set here on the master thread */
1061 check_and_update_hw_opt(hw_opt, minf.cutoff_scheme, SIMMASTER(cr));
1063 #ifdef GMX_THREAD_MPI
1064 /* Early check for externally set process affinity. Can't do over all
1065 * MPI processes because hwinfo is not available everywhere, but with
1066 * thread-MPI it's needed as pinning might get turned off which needs
1067 * to be known before starting thread-MPI. */
1068 gmx_check_thread_affinity_set(fplog,
1070 hw_opt, hwinfo->nthreads_hw_avail, FALSE);
1073 #ifdef GMX_THREAD_MPI
1074 if (cr->npmenodes > 0 && hw_opt->nthreads_tmpi <= 0)
1076 gmx_fatal(FARGS, "You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME nodes");
1080 if (hw_opt->nthreads_omp_pme != hw_opt->nthreads_omp &&
1083 gmx_fatal(FARGS, "You need to explicitly specify the number of PME nodes (-npme) when using different number of OpenMP threads for PP and PME nodes");
1087 #ifdef GMX_THREAD_MPI
1090 /* NOW the threads will be started: */
1091 hw_opt->nthreads_tmpi = get_nthreads_mpi(hwinfo,
1095 if (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_omp <= 0)
1097 hw_opt->nthreads_omp = hw_opt->nthreads_tot/hw_opt->nthreads_tmpi;
1100 if (hw_opt->nthreads_tmpi > 1)
1102 /* now start the threads. */
1103 cr = mdrunner_start_threads(hw_opt, fplog, cr_old, nfile, fnm,
1104 oenv, bVerbose, bCompact, nstglobalcomm,
1105 ddxyz, dd_node_order, rdd, rconstr,
1106 dddlb_opt, dlb_scale, ddcsx, ddcsy, ddcsz,
1108 nsteps_cmdline, nstepout, resetstep, nmultisim,
1109 repl_ex_nst, repl_ex_nex, repl_ex_seed, pforce,
1110 cpt_period, max_hours, deviceOptions,
1112 /* the main thread continues here with a new cr. We don't deallocate
1113 the old cr because other threads may still be reading it. */
1116 gmx_comm("Failed to spawn threads");
1121 /* END OF CAUTION: cr is now reliable */
1123 /* g_membed initialisation *
1124 * Because we change the mtop, init_membed is called before the init_parallel *
1125 * (in case we ever want to make it run in parallel) */
1126 if (opt2bSet("-membed", nfile, fnm))
1130 fprintf(stderr, "Initializing membed");
1132 membed = init_membed(fplog, nfile, fnm, mtop, inputrec, state, cr, &cpt_period);
1137 /* now broadcast everything to the non-master nodes/threads: */
1138 init_parallel(fplog, cr, inputrec, mtop);
1140 /* This check needs to happen after get_nthreads_mpi() */
1141 if (inputrec->cutoff_scheme == ecutsVERLET && (Flags & MD_PARTDEC))
1143 gmx_fatal_collective(FARGS, cr, NULL,
1144 "The Verlet cut-off scheme is not supported with particle decomposition.\n"
1145 "You can achieve the same effect as particle decomposition by running in parallel using only OpenMP threads.");
1150 pr_inputrec(fplog, 0, "Input Parameters", inputrec, FALSE);
1153 /* now make sure the state is initialized and propagated */
1154 set_state_entries(state, inputrec, cr->nnodes);
1156 /* A parallel command line option consistency check that we can
1157 only do after any threads have started. */
1159 (ddxyz[XX] > 1 || ddxyz[YY] > 1 || ddxyz[ZZ] > 1 || cr->npmenodes > 0))
1162 "The -dd or -npme option request a parallel simulation, "
1164 "but %s was compiled without threads or MPI enabled"
1166 #ifdef GMX_THREAD_MPI
1167 "but the number of threads (option -nt) is 1"
1169 "but %s was not started through mpirun/mpiexec or only one process was requested through mpirun/mpiexec"
1176 if ((Flags & MD_RERUN) &&
1177 (EI_ENERGY_MINIMIZATION(inputrec->eI) || eiNM == inputrec->eI))
1179 gmx_fatal(FARGS, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
1182 if (can_use_allvsall(inputrec, mtop, TRUE, cr, fplog) && PAR(cr))
1184 /* Simple neighbour searching and (also?) all-vs-all loops
1185 * do not work with domain decomposition. */
1186 Flags |= MD_PARTDEC;
1189 if (!EEL_PME(inputrec->coulombtype) || (Flags & MD_PARTDEC))
1191 if (cr->npmenodes > 0)
1193 if (!EEL_PME(inputrec->coulombtype))
1195 gmx_fatal_collective(FARGS, cr, NULL,
1196 "PME nodes are requested, but the system does not use PME electrostatics");
1198 if (Flags & MD_PARTDEC)
1200 gmx_fatal_collective(FARGS, cr, NULL,
1201 "PME nodes are requested, but particle decomposition does not support separate PME nodes");
1209 fcRegisterSteps(inputrec->nsteps, inputrec->init_step);
1212 /* NMR restraints must be initialized before load_checkpoint,
1213 * since with time averaging the history is added to t_state.
1214 * For proper consistency check we therefore need to extend
1216 * So the PME-only nodes (if present) will also initialize
1217 * the distance restraints.
1221 /* This needs to be called before read_checkpoint to extend the state */
1222 init_disres(fplog, mtop, inputrec, cr, Flags & MD_PARTDEC, fcd, state, repl_ex_nst > 0);
1224 if (gmx_mtop_ftype_count(mtop, F_ORIRES) > 0)
1226 if (PAR(cr) && !(Flags & MD_PARTDEC))
1228 gmx_fatal(FARGS, "Orientation restraints do not work (yet) with domain decomposition, use particle decomposition (mdrun option -pd)");
1230 /* Orientation restraints */
1233 init_orires(fplog, mtop, state->x, inputrec, cr->ms, &(fcd->orires),
1238 if (DEFORM(*inputrec))
1240 /* Store the deform reference box before reading the checkpoint */
1243 copy_mat(state->box, box);
1247 gmx_bcast(sizeof(box), box, cr);
1249 /* Because we do not have the update struct available yet
1250 * in which the reference values should be stored,
1251 * we store them temporarily in static variables.
1252 * This should be thread safe, since they are only written once
1253 * and with identical values.
1255 #ifdef GMX_THREAD_MPI
1256 tMPI_Thread_mutex_lock(&deform_init_box_mutex);
1258 deform_init_init_step_tpx = inputrec->init_step;
1259 copy_mat(box, deform_init_box_tpx);
1260 #ifdef GMX_THREAD_MPI
1261 tMPI_Thread_mutex_unlock(&deform_init_box_mutex);
1265 if (opt2bSet("-cpi", nfile, fnm))
1267 /* Check if checkpoint file exists before doing continuation.
1268 * This way we can use identical input options for the first and subsequent runs...
1270 if (gmx_fexist_master(opt2fn_master("-cpi", nfile, fnm, cr), cr) )
1272 load_checkpoint(opt2fn_master("-cpi", nfile, fnm, cr), &fplog,
1273 cr, Flags & MD_PARTDEC, ddxyz,
1274 inputrec, state, &bReadRNG, &bReadEkin,
1275 (Flags & MD_APPENDFILES),
1276 (Flags & MD_APPENDFILESSET));
1280 Flags |= MD_READ_RNG;
1284 Flags |= MD_READ_EKIN;
1289 if (((MASTER(cr) || (Flags & MD_SEPPOT)) && (Flags & MD_APPENDFILES))
1290 #ifdef GMX_THREAD_MPI
1291 /* With thread MPI only the master node/thread exists in mdrun.c,
1292 * therefore non-master nodes need to open the "seppot" log file here.
1294 || (!MASTER(cr) && (Flags & MD_SEPPOT))
1298 gmx_log_open(ftp2fn(efLOG, nfile, fnm), cr, !(Flags & MD_SEPPOT),
1302 /* override nsteps with value from cmdline */
1303 override_nsteps_cmdline(fplog, nsteps_cmdline, inputrec, cr);
1307 copy_mat(state->box, box);
1312 gmx_bcast(sizeof(box), box, cr);
1315 /* Essential dynamics */
1316 if (opt2bSet("-ei", nfile, fnm))
1318 /* Open input and output files, allocate space for ED data structure */
1319 ed = ed_open(mtop->natoms, &state->edsamstate, nfile, fnm, Flags, oenv, cr);
1322 if (PAR(cr) && !((Flags & MD_PARTDEC) ||
1323 EI_TPI(inputrec->eI) ||
1324 inputrec->eI == eiNM))
1326 cr->dd = init_domain_decomposition(fplog, cr, Flags, ddxyz, rdd, rconstr,
1327 dddlb_opt, dlb_scale,
1328 ddcsx, ddcsy, ddcsz,
1331 &ddbox, &npme_major, &npme_minor);
1333 make_dd_communicators(fplog, cr, dd_node_order);
1335 /* Set overallocation to avoid frequent reallocation of arrays */
1336 set_over_alloc_dd(TRUE);
1340 /* PME, if used, is done on all nodes with 1D decomposition */
1342 cr->duty = (DUTY_PP | DUTY_PME);
1345 /* NM and TPI perform single node energy calculations in parallel */
1346 if (!(inputrec->eI == eiNM || EI_TPI(inputrec->eI)))
1348 npme_major = cr->nnodes;
1351 if (inputrec->ePBC == epbcSCREW)
1354 "pbc=%s is only implemented with domain decomposition",
1355 epbc_names[inputrec->ePBC]);
1361 /* After possible communicator splitting in make_dd_communicators.
1362 * we can set up the intra/inter node communication.
1364 gmx_setup_nodecomm(fplog, cr);
1367 /* Initialize per-physical-node MPI process/thread ID and counters. */
1368 gmx_init_intranode_counters(cr);
1371 md_print_info(cr, fplog, "Using %d MPI %s\n",
1373 #ifdef GMX_THREAD_MPI
1374 cr->nnodes == 1 ? "thread" : "threads"
1376 cr->nnodes == 1 ? "process" : "processes"
1382 gmx_omp_nthreads_init(fplog, cr,
1383 hwinfo->nthreads_hw_avail,
1384 hw_opt->nthreads_omp,
1385 hw_opt->nthreads_omp_pme,
1386 (cr->duty & DUTY_PP) == 0,
1387 inputrec->cutoff_scheme == ecutsVERLET);
1389 /* check consistency and decide on the number of gpus to use. */
1390 gmx_check_hw_runconf_consistency(fplog, hwinfo, cr, hw_opt->nthreads_tmpi,
1393 /* getting number of PP/PME threads
1394 PME: env variable should be read only on one node to make sure it is
1395 identical everywhere;
1397 /* TODO nthreads_pp is only used for pinning threads.
1398 * This is a temporary solution until we have a hw topology library.
1400 nthreads_pp = gmx_omp_nthreads_get(emntNonbonded);
1401 nthreads_pme = gmx_omp_nthreads_get(emntPME);
1403 wcycle = wallcycle_init(fplog, resetstep, cr, nthreads_pp, nthreads_pme);
1407 /* Master synchronizes its value of reset_counters with all nodes
1408 * including PME only nodes */
1409 reset_counters = wcycle_get_reset_counters(wcycle);
1410 gmx_bcast_sim(sizeof(reset_counters), &reset_counters, cr);
1411 wcycle_set_reset_counters(wcycle, reset_counters);
1415 if (cr->duty & DUTY_PP)
1417 /* For domain decomposition we allocate dynamically
1418 * in dd_partition_system.
1420 if (DOMAINDECOMP(cr))
1422 bcast_state_setup(cr, state);
1428 bcast_state(cr, state, TRUE);
1432 /* Initiate forcerecord */
1434 fr->hwinfo = hwinfo;
1435 init_forcerec(fplog, oenv, fr, fcd, inputrec, mtop, cr, box, FALSE,
1436 opt2fn("-table", nfile, fnm),
1437 opt2fn("-tabletf", nfile, fnm),
1438 opt2fn("-tablep", nfile, fnm),
1439 opt2fn("-tableb", nfile, fnm),
1443 /* version for PCA_NOT_READ_NODE (see md.c) */
1444 /*init_forcerec(fplog,fr,fcd,inputrec,mtop,cr,box,FALSE,
1445 "nofile","nofile","nofile","nofile",FALSE,pforce);
1447 fr->bSepDVDL = ((Flags & MD_SEPPOT) == MD_SEPPOT);
1449 /* Initialize QM-MM */
1452 init_QMMMrec(cr, box, mtop, inputrec, fr);
1455 /* Initialize the mdatoms structure.
1456 * mdatoms is not filled with atom data,
1457 * as this can not be done now with domain decomposition.
1459 mdatoms = init_mdatoms(fplog, mtop, inputrec->efep != efepNO);
1461 if (mdatoms->nPerturbed > 0 && inputrec->cutoff_scheme == ecutsVERLET)
1463 gmx_fatal(FARGS, "The Verlet cut-off scheme does not (yet) support free-energy calculations with perturbed atoms, only perturbed interactions. This will be implemented soon. Use the group scheme for now.");
1466 /* Initialize the virtual site communication */
1467 vsite = init_vsite(mtop, cr, FALSE);
1469 calc_shifts(box, fr->shift_vec);
1471 /* With periodic molecules the charge groups should be whole at start up
1472 * and the virtual sites should not be far from their proper positions.
1474 if (!inputrec->bContinuation && MASTER(cr) &&
1475 !(inputrec->ePBC != epbcNONE && inputrec->bPeriodicMols))
1477 /* Make molecules whole at start of run */
1478 if (fr->ePBC != epbcNONE)
1480 do_pbc_first_mtop(fplog, inputrec->ePBC, box, mtop, state->x);
1484 /* Correct initial vsite positions are required
1485 * for the initial distribution in the domain decomposition
1486 * and for the initial shell prediction.
1488 construct_vsites_mtop(fplog, vsite, mtop, state->x);
1492 if (EEL_PME(fr->eeltype))
1494 ewaldcoeff = fr->ewaldcoeff;
1495 pmedata = &fr->pmedata;
1504 /* This is a PME only node */
1506 /* We don't need the state */
1509 ewaldcoeff = calc_ewaldcoeff(inputrec->rcoulomb, inputrec->ewald_rtol);
1513 if (hw_opt->thread_affinity != threadaffOFF)
1515 /* Before setting affinity, check whether the affinity has changed
1516 * - which indicates that probably the OpenMP library has changed it
1517 * since we first checked).
1519 gmx_check_thread_affinity_set(fplog, cr,
1520 hw_opt, hwinfo->nthreads_hw_avail, TRUE);
1522 /* Set the CPU affinity */
1523 gmx_set_thread_affinity(fplog, cr, hw_opt, nthreads_pme, hwinfo,
1527 /* Initiate PME if necessary,
1528 * either on all nodes or on dedicated PME nodes only. */
1529 if (EEL_PME(inputrec->coulombtype))
1533 nChargePerturbed = mdatoms->nChargePerturbed;
1535 if (cr->npmenodes > 0)
1537 /* The PME only nodes need to know nChargePerturbed */
1538 gmx_bcast_sim(sizeof(nChargePerturbed), &nChargePerturbed, cr);
1541 if (cr->duty & DUTY_PME)
1543 status = gmx_pme_init(pmedata, cr, npme_major, npme_minor, inputrec,
1544 mtop ? mtop->natoms : 0, nChargePerturbed,
1545 (Flags & MD_REPRODUCIBLE), nthreads_pme);
1548 gmx_fatal(FARGS, "Error %d initializing PME", status);
1554 if (integrator[inputrec->eI].func == do_md)
1556 /* Turn on signal handling on all nodes */
1558 * (A user signal from the PME nodes (if any)
1559 * is communicated to the PP nodes.
1561 signal_handler_install();
1564 if (cr->duty & DUTY_PP)
1566 if (inputrec->ePull != epullNO)
1568 /* Initialize pull code */
1569 init_pull(fplog, inputrec, nfile, fnm, mtop, cr, oenv, inputrec->fepvals->init_lambda,
1570 EI_DYNAMICS(inputrec->eI) && MASTER(cr), Flags);
1575 /* Initialize enforced rotation code */
1576 init_rot(fplog, inputrec, nfile, fnm, cr, state->x, box, mtop, oenv,
1580 constr = init_constraints(fplog, mtop, inputrec, ed, state, cr);
1582 if (DOMAINDECOMP(cr))
1584 dd_init_bondeds(fplog, cr->dd, mtop, vsite, constr, inputrec,
1585 Flags & MD_DDBONDCHECK, fr->cginfo_mb);
1587 set_dd_parameters(fplog, cr->dd, dlb_scale, inputrec, fr, &ddbox);
1589 setup_dd_grid(fplog, cr->dd);
1592 /* Now do whatever the user wants us to do (how flexible...) */
1593 integrator[inputrec->eI].func(fplog, cr, nfile, fnm,
1594 oenv, bVerbose, bCompact,
1597 nstepout, inputrec, mtop,
1599 mdatoms, nrnb, wcycle, ed, fr,
1600 repl_ex_nst, repl_ex_nex, repl_ex_seed,
1602 cpt_period, max_hours,
1607 if (inputrec->ePull != epullNO)
1609 finish_pull(fplog, inputrec->pull);
1614 finish_rot(inputrec->rot);
1621 gmx_pmeonly(*pmedata, cr, nrnb, wcycle, &runtime, ewaldcoeff, FALSE, inputrec);
1624 if (EI_DYNAMICS(inputrec->eI) || EI_TPI(inputrec->eI))
1626 /* Some timing stats */
1629 if (runtime.proc == 0)
1631 runtime.proc = runtime.real;
1640 wallcycle_stop(wcycle, ewcRUN);
1642 /* Finish up, write some stuff
1643 * if rerunMD, don't write last frame again
1645 finish_run(fplog, cr, ftp2fn(efSTO, nfile, fnm),
1646 inputrec, nrnb, wcycle, &runtime,
1647 fr != NULL && fr->nbv != NULL && fr->nbv->bUseGPU ?
1648 nbnxn_cuda_get_timings(fr->nbv->cu_nbv) : NULL,
1650 EI_DYNAMICS(inputrec->eI) && !MULTISIM(cr));
1652 if ((cr->duty & DUTY_PP) && fr->nbv != NULL && fr->nbv->bUseGPU)
1654 char gpu_err_str[STRLEN];
1656 /* free GPU memory and uninitialize GPU (by destroying the context) */
1657 nbnxn_cuda_free(fplog, fr->nbv->cu_nbv);
1659 if (!free_gpu(gpu_err_str))
1661 gmx_warning("On node %d failed to free GPU #%d: %s",
1662 cr->nodeid, get_current_gpu_device_id(), gpu_err_str);
1666 if (opt2bSet("-membed", nfile, fnm))
1671 gmx_hardware_info_free(hwinfo);
1673 /* Does what it says */
1674 print_date_and_time(fplog, cr->nodeid, "Finished mdrun", &runtime);
1676 /* Close logfile already here if we were appending to it */
1677 if (MASTER(cr) && (Flags & MD_APPENDFILES))
1679 gmx_log_close(fplog);
1682 rc = (int)gmx_get_stop_condition();
1684 #ifdef GMX_THREAD_MPI
1685 /* we need to join all threads. The sub-threads join when they
1686 exit this function, but the master thread needs to be told to
1688 if (PAR(cr) && MASTER(cr))