2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2011,2012,2013,2014,2015, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
49 #include "gromacs/utility/smalloc.h"
54 #include "md_logging.h"
55 #include "md_support.h"
69 #include "checkpoint.h"
70 #include "mtop_util.h"
71 #include "sighandler.h"
73 #include "gmx_detect_hardware.h"
74 #include "gmx_omp_nthreads.h"
75 #include "gromacs/gmxpreprocess/calc_verletbuf.h"
76 #include "gmx_fatal_collective.h"
79 #include "gmx_thread_affinity.h"
82 #include "gromacs/fileio/tpxio.h"
83 #include "gromacs/mdlib/nbnxn_search.h"
84 #include "gromacs/mdlib/nbnxn_consts.h"
85 #include "gromacs/timing/wallcycle.h"
86 #include "gromacs/utility/gmxmpi.h"
87 #include "gromacs/utility/gmxomp.h"
88 #include "gromacs/swap/swapcoords.h"
89 #include "gromacs/essentialdynamics/edsam.h"
90 #include "gromacs/pulling/pull.h"
91 #include "gromacs/pulling/pull_rotation.h"
97 #include "gpu_utils.h"
98 #include "nbnxn_cuda_data_mgmt.h"
101 gmx_integrator_t *func;
104 /* The array should match the eI array in include/types/enums.h */
105 const gmx_intp_t integrator[eiNR] = { {do_md}, {do_steep}, {do_cg}, {do_md}, {do_md}, {do_nm}, {do_lbfgs}, {do_tpi}, {do_tpi}, {do_md}, {do_md}, {do_md}};
107 gmx_int64_t deform_init_init_step_tpx;
108 matrix deform_init_box_tpx;
109 tMPI_Thread_mutex_t deform_init_box_mutex = TMPI_THREAD_MUTEX_INITIALIZER;
112 #ifdef GMX_THREAD_MPI
113 /* The minimum number of atoms per tMPI thread. With fewer atoms than this,
114 * the number of threads will get lowered.
116 #define MIN_ATOMS_PER_MPI_THREAD 90
117 #define MIN_ATOMS_PER_GPU 900
119 struct mdrunner_arglist
134 const char *dddlb_opt;
139 const char *nbpu_opt;
141 gmx_int64_t nsteps_cmdline;
151 const char *deviceOptions;
157 /* The function used for spawning threads. Extracts the mdrunner()
158 arguments from its one argument and calls mdrunner(), after making
160 static void mdrunner_start_fn(void *arg)
162 struct mdrunner_arglist *mda = (struct mdrunner_arglist*)arg;
163 struct mdrunner_arglist mc = *mda; /* copy the arg list to make sure
164 that it's thread-local. This doesn't
165 copy pointed-to items, of course,
166 but those are all const. */
167 t_commrec *cr; /* we need a local version of this */
171 fnm = dup_tfn(mc.nfile, mc.fnm);
173 cr = reinitialize_commrec_for_this_thread(mc.cr);
180 mdrunner(&mc.hw_opt, fplog, cr, mc.nfile, fnm, mc.oenv,
181 mc.bVerbose, mc.bCompact, mc.nstglobalcomm,
182 mc.ddxyz, mc.dd_node_order, mc.rdd,
183 mc.rconstr, mc.dddlb_opt, mc.dlb_scale,
184 mc.ddcsx, mc.ddcsy, mc.ddcsz,
185 mc.nbpu_opt, mc.nstlist_cmdline,
186 mc.nsteps_cmdline, mc.nstepout, mc.resetstep,
187 mc.nmultisim, mc.repl_ex_nst, mc.repl_ex_nex, mc.repl_ex_seed, mc.pforce,
188 mc.cpt_period, mc.max_hours, mc.deviceOptions, mc.imdport, mc.Flags);
191 /* called by mdrunner() to start a specific number of threads (including
192 the main thread) for thread-parallel runs. This in turn calls mdrunner()
194 All options besides nthreads are the same as for mdrunner(). */
195 static t_commrec *mdrunner_start_threads(gmx_hw_opt_t *hw_opt,
196 FILE *fplog, t_commrec *cr, int nfile,
197 const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose,
198 gmx_bool bCompact, int nstglobalcomm,
199 ivec ddxyz, int dd_node_order, real rdd, real rconstr,
200 const char *dddlb_opt, real dlb_scale,
201 const char *ddcsx, const char *ddcsy, const char *ddcsz,
202 const char *nbpu_opt, int nstlist_cmdline,
203 gmx_int64_t nsteps_cmdline,
204 int nstepout, int resetstep,
205 int nmultisim, int repl_ex_nst, int repl_ex_nex, int repl_ex_seed,
206 real pforce, real cpt_period, real max_hours,
207 const char *deviceOptions, unsigned long Flags)
210 struct mdrunner_arglist *mda;
211 t_commrec *crn; /* the new commrec */
214 /* first check whether we even need to start tMPI */
215 if (hw_opt->nthreads_tmpi < 2)
220 /* a few small, one-time, almost unavoidable memory leaks: */
222 fnmn = dup_tfn(nfile, fnm);
224 /* fill the data structure to pass as void pointer to thread start fn */
225 /* hw_opt contains pointers, which should all be NULL at this stage */
226 mda->hw_opt = *hw_opt;
232 mda->bVerbose = bVerbose;
233 mda->bCompact = bCompact;
234 mda->nstglobalcomm = nstglobalcomm;
235 mda->ddxyz[XX] = ddxyz[XX];
236 mda->ddxyz[YY] = ddxyz[YY];
237 mda->ddxyz[ZZ] = ddxyz[ZZ];
238 mda->dd_node_order = dd_node_order;
240 mda->rconstr = rconstr;
241 mda->dddlb_opt = dddlb_opt;
242 mda->dlb_scale = dlb_scale;
246 mda->nbpu_opt = nbpu_opt;
247 mda->nstlist_cmdline = nstlist_cmdline;
248 mda->nsteps_cmdline = nsteps_cmdline;
249 mda->nstepout = nstepout;
250 mda->resetstep = resetstep;
251 mda->nmultisim = nmultisim;
252 mda->repl_ex_nst = repl_ex_nst;
253 mda->repl_ex_nex = repl_ex_nex;
254 mda->repl_ex_seed = repl_ex_seed;
255 mda->pforce = pforce;
256 mda->cpt_period = cpt_period;
257 mda->max_hours = max_hours;
258 mda->deviceOptions = deviceOptions;
261 /* now spawn new threads that start mdrunner_start_fn(), while
262 the main thread returns, we set thread affinity later */
263 ret = tMPI_Init_fn(TRUE, hw_opt->nthreads_tmpi, TMPI_AFFINITY_NONE,
264 mdrunner_start_fn, (void*)(mda) );
265 if (ret != TMPI_SUCCESS)
270 crn = reinitialize_commrec_for_this_thread(cr);
275 static int get_tmpi_omp_thread_division(const gmx_hw_info_t *hwinfo,
276 const gmx_hw_opt_t *hw_opt,
282 /* There are no separate PME nodes here, as we ensured in
283 * check_and_update_hw_opt that nthreads_tmpi>0 with PME nodes
284 * and a conditional ensures we would not have ended up here.
285 * Note that separate PME nodes might be switched on later.
289 nthreads_tmpi = ngpu;
290 if (nthreads_tot > 0 && nthreads_tot < nthreads_tmpi)
292 nthreads_tmpi = nthreads_tot;
295 else if (hw_opt->nthreads_omp > 0)
297 /* Here we could oversubscribe, when we do, we issue a warning later */
298 nthreads_tmpi = max(1, nthreads_tot/hw_opt->nthreads_omp);
302 /* TODO choose nthreads_omp based on hardware topology
303 when we have a hardware topology detection library */
304 /* In general, when running up to 4 threads, OpenMP should be faster.
305 * Note: on AMD Bulldozer we should avoid running OpenMP over two dies.
306 * On Intel>=Nehalem running OpenMP on a single CPU is always faster,
307 * even on two CPUs it's usually faster (but with many OpenMP threads
308 * it could be faster not to use HT, currently we always use HT).
309 * On Nehalem/Westmere we want to avoid running 16 threads over
310 * two CPUs with HT, so we need a limit<16; thus we use 12.
311 * A reasonable limit for Intel Sandy and Ivy bridge,
312 * not knowing the topology, is 16 threads.
313 * Below we check for Intel and AVX, which for now includes
314 * Sandy/Ivy Bridge, Has/Broadwell. By checking for AVX instead of
315 * model numbers we ensure also future Intel CPUs are covered.
317 const int nthreads_omp_always_faster = 4;
318 const int nthreads_omp_always_faster_Nehalem = 12;
319 const int nthreads_omp_always_faster_Intel_AVX = 16;
323 (gmx_cpuid_vendor(hwinfo->cpuid_info) == GMX_CPUID_VENDOR_INTEL &&
324 gmx_cpuid_feature(hwinfo->cpuid_info, GMX_CPUID_FEATURE_X86_AVX));
326 if (nthreads_tot <= nthreads_omp_always_faster ||
327 ((gmx_cpuid_is_intel_nehalem(hwinfo->cpuid_info) && nthreads_tot <= nthreads_omp_always_faster_Nehalem) ||
328 (bIntelAVX && nthreads_tot <= nthreads_omp_always_faster_Intel_AVX)))
330 /* Use pure OpenMP parallelization */
335 /* Don't use OpenMP parallelization */
336 nthreads_tmpi = nthreads_tot;
340 return nthreads_tmpi;
344 /* Get the number of threads to use for thread-MPI based on how many
345 * were requested, which algorithms we're using,
346 * and how many particles there are.
347 * At the point we have already called check_and_update_hw_opt.
348 * Thus all options should be internally consistent and consistent
349 * with the hardware, except that ntmpi could be larger than #GPU.
351 static int get_nthreads_mpi(const gmx_hw_info_t *hwinfo,
352 gmx_hw_opt_t *hw_opt,
353 t_inputrec *inputrec, gmx_mtop_t *mtop,
357 int nthreads_hw, nthreads_tot_max, nthreads_tmpi, nthreads_new, ngpu;
358 int min_atoms_per_mpi_thread;
363 if (hw_opt->nthreads_tmpi > 0)
365 /* Trivial, return right away */
366 return hw_opt->nthreads_tmpi;
369 nthreads_hw = hwinfo->nthreads_hw_avail;
371 /* How many total (#tMPI*#OpenMP) threads can we start? */
372 if (hw_opt->nthreads_tot > 0)
374 nthreads_tot_max = hw_opt->nthreads_tot;
378 nthreads_tot_max = nthreads_hw;
381 bCanUseGPU = (inputrec->cutoff_scheme == ecutsVERLET &&
382 hwinfo->gpu_info.ncuda_dev_compatible > 0);
385 ngpu = hwinfo->gpu_info.ncuda_dev_compatible;
392 if (inputrec->cutoff_scheme == ecutsGROUP)
394 /* We checked this before, but it doesn't hurt to do it once more */
395 assert(hw_opt->nthreads_omp == 1);
399 get_tmpi_omp_thread_division(hwinfo, hw_opt, nthreads_tot_max, ngpu);
401 if (inputrec->eI == eiNM || EI_TPI(inputrec->eI))
403 /* Dims/steps are divided over the nodes iso splitting the atoms */
404 min_atoms_per_mpi_thread = 0;
410 min_atoms_per_mpi_thread = MIN_ATOMS_PER_GPU;
414 min_atoms_per_mpi_thread = MIN_ATOMS_PER_MPI_THREAD;
418 /* Check if an algorithm does not support parallel simulation. */
419 if (nthreads_tmpi != 1 &&
420 ( inputrec->eI == eiLBFGS ||
421 inputrec->coulombtype == eelEWALD ) )
425 md_print_warn(cr, fplog, "The integration or electrostatics algorithm doesn't support parallel runs. Using a single thread-MPI thread.\n");
426 if (hw_opt->nthreads_tmpi > nthreads_tmpi)
428 gmx_fatal(FARGS, "You asked for more than 1 thread-MPI thread, but an algorithm doesn't support that");
431 else if (mtop->natoms/nthreads_tmpi < min_atoms_per_mpi_thread)
433 /* the thread number was chosen automatically, but there are too many
434 threads (too few atoms per thread) */
435 nthreads_new = max(1, mtop->natoms/min_atoms_per_mpi_thread);
437 /* Avoid partial use of Hyper-Threading */
438 if (gmx_cpuid_x86_smt(hwinfo->cpuid_info) == GMX_CPUID_X86_SMT_ENABLED &&
439 nthreads_new > nthreads_hw/2 && nthreads_new < nthreads_hw)
441 nthreads_new = nthreads_hw/2;
444 /* Avoid large prime numbers in the thread count */
445 if (nthreads_new >= 6)
447 /* Use only 6,8,10 with additional factors of 2 */
451 while (3*fac*2 <= nthreads_new)
456 nthreads_new = (nthreads_new/fac)*fac;
461 if (nthreads_new == 5)
467 nthreads_tmpi = nthreads_new;
469 fprintf(stderr, "\n");
470 fprintf(stderr, "NOTE: Parallelization is limited by the small number of atoms,\n");
471 fprintf(stderr, " only starting %d thread-MPI threads.\n", nthreads_tmpi);
472 fprintf(stderr, " You can use the -nt and/or -ntmpi option to optimize the number of threads.\n\n");
475 return nthreads_tmpi;
477 #endif /* GMX_THREAD_MPI */
480 /* We determine the extra cost of the non-bonded kernels compared to
481 * a reference nstlist value of 10 (which is the default in grompp).
483 static const int nbnxn_reference_nstlist = 10;
484 /* The values to try when switching */
485 const int nstlist_try[] = { 20, 25, 40 };
486 #define NNSTL sizeof(nstlist_try)/sizeof(nstlist_try[0])
487 /* Increase nstlist until the non-bonded cost increases more than listfac_ok,
488 * but never more than listfac_max.
489 * A standard (protein+)water system at 300K with PME ewald_rtol=1e-5
490 * needs 1.28 at rcoulomb=0.9 and 1.24 at rcoulomb=1.0 to get to nstlist=40.
491 * Note that both CPU and GPU factors are conservative. Performance should
492 * not go down due to this tuning, except with a relatively slow GPU.
493 * On the other hand, at medium/high parallelization or with fast GPUs
494 * nstlist will not be increased enough to reach optimal performance.
496 /* CPU: pair-search is about a factor 1.5 slower than the non-bonded kernel */
497 static const float nbnxn_cpu_listfac_ok = 1.05;
498 static const float nbnxn_cpu_listfac_max = 1.09;
499 /* GPU: pair-search is a factor 1.5-3 slower than the non-bonded kernel */
500 static const float nbnxn_gpu_listfac_ok = 1.20;
501 static const float nbnxn_gpu_listfac_max = 1.30;
503 /* Try to increase nstlist when using the Verlet cut-off scheme */
504 static void increase_nstlist(FILE *fp, t_commrec *cr,
505 t_inputrec *ir, int nstlist_cmdline,
506 const gmx_mtop_t *mtop, matrix box,
509 float listfac_ok, listfac_max;
510 int nstlist_orig, nstlist_prev;
511 verletbuf_list_setup_t ls;
512 real rlist_nstlist10, rlist_inc, rlist_ok, rlist_max;
513 real rlist_new, rlist_prev;
516 gmx_bool bBox, bDD, bCont;
517 const char *nstl_gpu = "\nFor optimal performance with a GPU nstlist (now %d) should be larger.\nThe optimum depends on your CPU and GPU resources.\nYou might want to try several nstlist values.\n";
518 const char *nve_err = "Can not increase nstlist because an NVE ensemble is used";
519 const char *vbd_err = "Can not increase nstlist because verlet-buffer-tolerance is not set or used";
520 const char *box_err = "Can not increase nstlist because the box is too small";
521 const char *dd_err = "Can not increase nstlist because of domain decomposition limitations";
524 if (nstlist_cmdline <= 0)
526 if (ir->nstlist == 1)
528 /* The user probably set nstlist=1 for a reason,
529 * don't mess with the settings.
534 if (fp != NULL && bGPU && ir->nstlist < nstlist_try[0])
536 fprintf(fp, nstl_gpu, ir->nstlist);
539 while (nstlist_ind < NNSTL && ir->nstlist >= nstlist_try[nstlist_ind])
543 if (nstlist_ind == NNSTL)
545 /* There are no larger nstlist value to try */
550 if (EI_MD(ir->eI) && ir->etc == etcNO)
554 fprintf(stderr, "%s\n", nve_err);
558 fprintf(fp, "%s\n", nve_err);
564 if (ir->verletbuf_tol == 0 && bGPU)
566 gmx_fatal(FARGS, "You are using an old tpr file with a GPU, please generate a new tpr file with an up to date version of grompp");
569 if (ir->verletbuf_tol < 0)
573 fprintf(stderr, "%s\n", vbd_err);
577 fprintf(fp, "%s\n", vbd_err);
585 listfac_ok = nbnxn_gpu_listfac_ok;
586 listfac_max = nbnxn_gpu_listfac_max;
590 listfac_ok = nbnxn_cpu_listfac_ok;
591 listfac_max = nbnxn_cpu_listfac_max;
594 nstlist_orig = ir->nstlist;
595 if (nstlist_cmdline > 0)
599 sprintf(buf, "Getting nstlist=%d from command line option",
602 ir->nstlist = nstlist_cmdline;
605 verletbuf_get_list_setup(bGPU, &ls);
607 /* Allow rlist to make the list a given factor larger than the list
608 * would be with nstlist=10.
610 nstlist_prev = ir->nstlist;
612 calc_verlet_buffer_size(mtop, det(box), ir, -1, &ls, NULL,
614 ir->nstlist = nstlist_prev;
616 /* Determine the pair list size increase due to zero interactions */
617 rlist_inc = nbnxn_get_rlist_effective_inc(ls.cluster_size_j,
618 mtop->natoms/det(box));
619 rlist_ok = (rlist_nstlist10 + rlist_inc)*pow(listfac_ok, 1.0/3.0) - rlist_inc;
620 rlist_max = (rlist_nstlist10 + rlist_inc)*pow(listfac_max, 1.0/3.0) - rlist_inc;
623 fprintf(debug, "nstlist tuning: rlist_inc %.3f rlist_ok %.3f rlist_max %.3f\n",
624 rlist_inc, rlist_ok, rlist_max);
627 nstlist_prev = nstlist_orig;
628 rlist_prev = ir->rlist;
631 if (nstlist_cmdline <= 0)
633 ir->nstlist = nstlist_try[nstlist_ind];
636 /* Set the pair-list buffer size in ir */
637 calc_verlet_buffer_size(mtop, det(box), ir, -1, &ls, NULL, &rlist_new);
639 /* Does rlist fit in the box? */
640 bBox = (sqr(rlist_new) < max_cutoff2(ir->ePBC, box));
642 if (bBox && DOMAINDECOMP(cr))
644 /* Check if rlist fits in the domain decomposition */
645 if (inputrec2nboundeddim(ir) < DIM)
647 gmx_incons("Changing nstlist with domain decomposition and unbounded dimensions is not implemented yet");
649 copy_mat(box, state_tmp.box);
650 bDD = change_dd_cutoff(cr, &state_tmp, ir, rlist_new);
655 fprintf(debug, "nstlist %d rlist %.3f bBox %d bDD %d\n",
656 ir->nstlist, rlist_new, bBox, bDD);
661 if (nstlist_cmdline <= 0)
663 if (bBox && bDD && rlist_new <= rlist_max)
665 /* Increase nstlist */
666 nstlist_prev = ir->nstlist;
667 rlist_prev = rlist_new;
668 bCont = (nstlist_ind+1 < NNSTL && rlist_new < rlist_ok);
672 /* Stick with the previous nstlist */
673 ir->nstlist = nstlist_prev;
674 rlist_new = rlist_prev;
686 gmx_warning(!bBox ? box_err : dd_err);
689 fprintf(fp, "\n%s\n", bBox ? box_err : dd_err);
691 ir->nstlist = nstlist_orig;
693 else if (ir->nstlist != nstlist_orig || rlist_new != ir->rlist)
695 sprintf(buf, "Changing nstlist from %d to %d, rlist from %g to %g",
696 nstlist_orig, ir->nstlist,
697 ir->rlist, rlist_new);
700 fprintf(stderr, "%s\n\n", buf);
704 fprintf(fp, "%s\n\n", buf);
706 ir->rlist = rlist_new;
707 ir->rlistlong = rlist_new;
711 static void prepare_verlet_scheme(FILE *fplog,
715 const gmx_mtop_t *mtop,
719 /* For NVE simulations, we will retain the initial list buffer */
720 if (ir->verletbuf_tol > 0 && !(EI_MD(ir->eI) && ir->etc == etcNO))
722 /* Update the Verlet buffer size for the current run setup */
723 verletbuf_list_setup_t ls;
726 /* Here we assume SIMD-enabled kernels are being used. But as currently
727 * calc_verlet_buffer_size gives the same results for 4x8 and 4x4
728 * and 4x2 gives a larger buffer than 4x4, this is ok.
730 verletbuf_get_list_setup(bUseGPU, &ls);
732 calc_verlet_buffer_size(mtop, det(box), ir, -1, &ls, NULL, &rlist_new);
734 if (rlist_new != ir->rlist)
738 fprintf(fplog, "\nChanging rlist from %g to %g for non-bonded %dx%d atom kernels\n\n",
739 ir->rlist, rlist_new,
740 ls.cluster_size_i, ls.cluster_size_j);
742 ir->rlist = rlist_new;
743 ir->rlistlong = rlist_new;
747 if (nstlist_cmdline > 0 && (!EI_DYNAMICS(ir->eI) || ir->verletbuf_tol <= 0))
749 gmx_fatal(FARGS, "Can not set nstlist without %s",
750 !EI_DYNAMICS(ir->eI) ? "dynamics" : "verlet-buffer-tolerance");
753 if (EI_DYNAMICS(ir->eI))
755 /* Set or try nstlist values */
756 increase_nstlist(fplog, cr, ir, nstlist_cmdline, mtop, box, bUseGPU);
760 static void convert_to_verlet_scheme(FILE *fplog,
762 gmx_mtop_t *mtop, real box_vol)
764 char *conv_mesg = "Converting input file with group cut-off scheme to the Verlet cut-off scheme";
766 md_print_warn(NULL, fplog, "%s\n", conv_mesg);
768 ir->cutoff_scheme = ecutsVERLET;
769 ir->verletbuf_tol = 0.005;
771 if (ir->rcoulomb != ir->rvdw)
773 gmx_fatal(FARGS, "The VdW and Coulomb cut-offs are different, whereas the Verlet scheme only supports equal cut-offs");
776 if (ir->vdwtype == evdwUSER || EEL_USER(ir->coulombtype))
778 gmx_fatal(FARGS, "User non-bonded potentials are not (yet) supported with the Verlet scheme");
780 else if (ir_vdw_switched(ir) || ir_coulomb_switched(ir))
782 if (ir_vdw_switched(ir) && ir->vdw_modifier == eintmodNONE)
784 ir->vdwtype = evdwCUT;
788 case evdwSHIFT: ir->vdw_modifier = eintmodFORCESWITCH; break;
789 case evdwSWITCH: ir->vdw_modifier = eintmodPOTSWITCH; break;
790 default: gmx_fatal(FARGS, "The Verlet scheme does not support Van der Waals interactions of type '%s'", evdw_names[ir->vdwtype]);
793 if (ir_coulomb_switched(ir) && ir->coulomb_modifier == eintmodNONE)
795 if (EEL_FULL(ir->coulombtype))
797 /* With full electrostatic only PME can be switched */
798 ir->coulombtype = eelPME;
799 ir->coulomb_modifier = eintmodPOTSHIFT;
803 md_print_warn(NULL, fplog, "NOTE: Replacing %s electrostatics with reaction-field with epsilon-rf=inf\n", eel_names[ir->coulombtype]);
804 ir->coulombtype = eelRF;
805 ir->epsilon_rf = 0.0;
806 ir->coulomb_modifier = eintmodPOTSHIFT;
810 /* We set the pair energy error tolerance to a small number.
811 * Note that this is only for testing. For production the user
812 * should think about this and set the mdp options.
814 ir->verletbuf_tol = 1e-4;
817 if (inputrec2nboundeddim(ir) != 3)
819 gmx_fatal(FARGS, "Can only convert old tpr files to the Verlet cut-off scheme with 3D pbc");
822 if (ir->efep != efepNO || ir->implicit_solvent != eisNO)
824 gmx_fatal(FARGS, "Will not convert old tpr files to the Verlet cut-off scheme with free-energy calculations or implicit solvent");
827 if (EI_DYNAMICS(ir->eI) && !(EI_MD(ir->eI) && ir->etc == etcNO))
829 verletbuf_list_setup_t ls;
831 verletbuf_get_list_setup(FALSE, &ls);
832 calc_verlet_buffer_size(mtop, box_vol, ir, -1, &ls, NULL, &ir->rlist);
840 rlist_fac = 1 + verlet_buffer_ratio_NVE_T0;
844 rlist_fac = 1 + verlet_buffer_ratio_nodynamics;
846 ir->verletbuf_tol = -1;
847 ir->rlist = rlist_fac*max(ir->rvdw, ir->rcoulomb);
850 gmx_mtop_remove_chargegroups(mtop);
853 static void print_hw_opt(FILE *fp, const gmx_hw_opt_t *hw_opt)
855 fprintf(fp, "hw_opt: nt %d ntmpi %d ntomp %d ntomp_pme %d gpu_id '%s'\n",
856 hw_opt->nthreads_tot,
857 hw_opt->nthreads_tmpi,
858 hw_opt->nthreads_omp,
859 hw_opt->nthreads_omp_pme,
860 hw_opt->gpu_opt.gpu_id != NULL ? hw_opt->gpu_opt.gpu_id : "");
863 /* Checks we can do when we don't (yet) know the cut-off scheme */
864 static void check_and_update_hw_opt_1(gmx_hw_opt_t *hw_opt,
865 gmx_bool bIsSimMaster)
867 gmx_omp_nthreads_read_env(&hw_opt->nthreads_omp, bIsSimMaster);
869 #ifndef GMX_THREAD_MPI
870 if (hw_opt->nthreads_tot > 0)
872 gmx_fatal(FARGS, "Setting the total number of threads is only supported with thread-MPI and Gromacs was compiled without thread-MPI");
874 if (hw_opt->nthreads_tmpi > 0)
876 gmx_fatal(FARGS, "Setting the number of thread-MPI threads is only supported with thread-MPI and Gromacs was compiled without thread-MPI");
881 if (hw_opt->nthreads_omp > 1)
883 gmx_fatal(FARGS, "More than 1 OpenMP thread requested, but Gromacs was compiled without OpenMP support");
885 hw_opt->nthreads_omp = 1;
888 if (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_omp_pme <= 0)
890 /* We have the same number of OpenMP threads for PP and PME processes,
891 * thus we can perform several consistency checks.
893 if (hw_opt->nthreads_tmpi > 0 &&
894 hw_opt->nthreads_omp > 0 &&
895 hw_opt->nthreads_tot != hw_opt->nthreads_tmpi*hw_opt->nthreads_omp)
897 gmx_fatal(FARGS, "The total number of threads requested (%d) does not match the thread-MPI threads (%d) times the OpenMP threads (%d) requested",
898 hw_opt->nthreads_tot, hw_opt->nthreads_tmpi, hw_opt->nthreads_omp);
901 if (hw_opt->nthreads_tmpi > 0 &&
902 hw_opt->nthreads_tot % hw_opt->nthreads_tmpi != 0)
904 gmx_fatal(FARGS, "The total number of threads requested (%d) is not divisible by the number of thread-MPI threads requested (%d)",
905 hw_opt->nthreads_tot, hw_opt->nthreads_tmpi);
908 if (hw_opt->nthreads_omp > 0 &&
909 hw_opt->nthreads_tot % hw_opt->nthreads_omp != 0)
911 gmx_fatal(FARGS, "The total number of threads requested (%d) is not divisible by the number of OpenMP threads requested (%d)",
912 hw_opt->nthreads_tot, hw_opt->nthreads_omp);
915 if (hw_opt->nthreads_tmpi > 0 &&
916 hw_opt->nthreads_omp <= 0)
918 hw_opt->nthreads_omp = hw_opt->nthreads_tot/hw_opt->nthreads_tmpi;
923 if (hw_opt->nthreads_omp > 1)
925 gmx_fatal(FARGS, "OpenMP threads are requested, but Gromacs was compiled without OpenMP support");
929 if (hw_opt->nthreads_omp_pme > 0 && hw_opt->nthreads_omp <= 0)
931 gmx_fatal(FARGS, "You need to specify -ntomp in addition to -ntomp_pme");
934 if (hw_opt->nthreads_tot == 1)
936 hw_opt->nthreads_tmpi = 1;
938 if (hw_opt->nthreads_omp > 1)
940 gmx_fatal(FARGS, "You requested %d OpenMP threads with %d total threads",
941 hw_opt->nthreads_tmpi, hw_opt->nthreads_tot);
943 hw_opt->nthreads_omp = 1;
946 if (hw_opt->nthreads_omp_pme <= 0 && hw_opt->nthreads_omp > 0)
948 hw_opt->nthreads_omp_pme = hw_opt->nthreads_omp;
951 /* Parse GPU IDs, if provided.
952 * We check consistency with the tMPI thread count later.
954 gmx_parse_gpu_ids(&hw_opt->gpu_opt);
956 #ifdef GMX_THREAD_MPI
957 if (hw_opt->gpu_opt.ncuda_dev_use > 0 && hw_opt->nthreads_tmpi == 0)
959 /* Set the number of MPI threads equal to the number of GPUs */
960 hw_opt->nthreads_tmpi = hw_opt->gpu_opt.ncuda_dev_use;
962 if (hw_opt->nthreads_tot > 0 &&
963 hw_opt->nthreads_tmpi > hw_opt->nthreads_tot)
965 /* We have more GPUs than total threads requested.
966 * We choose to (later) generate a mismatch error,
967 * instead of launching more threads than requested.
969 hw_opt->nthreads_tmpi = hw_opt->nthreads_tot;
976 print_hw_opt(debug, hw_opt);
980 /* Checks we can do when we know the cut-off scheme */
981 static void check_and_update_hw_opt_2(gmx_hw_opt_t *hw_opt,
984 if (cutoff_scheme == ecutsGROUP)
986 /* We only have OpenMP support for PME only nodes */
987 if (hw_opt->nthreads_omp > 1)
989 gmx_fatal(FARGS, "OpenMP threads have been requested with cut-off scheme %s, but these are only supported with cut-off scheme %s",
990 ecutscheme_names[cutoff_scheme],
991 ecutscheme_names[ecutsVERLET]);
993 hw_opt->nthreads_omp = 1;
996 if (hw_opt->nthreads_omp_pme <= 0 && hw_opt->nthreads_omp > 0)
998 hw_opt->nthreads_omp_pme = hw_opt->nthreads_omp;
1003 print_hw_opt(debug, hw_opt);
1008 /* Override the value in inputrec with value passed on the command line (if any) */
1009 static void override_nsteps_cmdline(FILE *fplog,
1010 gmx_int64_t nsteps_cmdline,
1012 const t_commrec *cr)
1014 char sbuf[STEPSTRSIZE];
1019 /* override with anything else than the default -2 */
1020 if (nsteps_cmdline > -2)
1024 ir->nsteps = nsteps_cmdline;
1025 if (EI_DYNAMICS(ir->eI) && nsteps_cmdline != -1)
1027 sprintf(stmp, "Overriding nsteps with value passed on the command line: %s steps, %.3g ps",
1028 gmx_step_str(nsteps_cmdline, sbuf),
1029 fabs(nsteps_cmdline*ir->delta_t));
1033 sprintf(stmp, "Overriding nsteps with value passed on the command line: %s steps",
1034 gmx_step_str(nsteps_cmdline, sbuf));
1037 md_print_warn(cr, fplog, "%s\n", stmp);
1041 /* Frees GPU memory and destroys the CUDA context.
1043 * Note that this function needs to be called even if GPUs are not used
1044 * in this run because the PME ranks have no knowledge of whether GPUs
1045 * are used or not, but all ranks need to enter the barrier below.
1047 static void free_gpu_resources(const t_forcerec *fr,
1048 const t_commrec *cr)
1050 gmx_bool bIsPPrankUsingGPU;
1051 char gpu_err_str[STRLEN];
1053 bIsPPrankUsingGPU = (cr->duty & DUTY_PP) && fr->nbv != NULL && fr->nbv->bUseGPU;
1055 if (bIsPPrankUsingGPU)
1057 /* free nbnxn data in GPU memory */
1058 nbnxn_cuda_free(fr->nbv->cu_nbv);
1060 /* With tMPI we need to wait for all ranks to finish deallocation before
1061 * destroying the context in free_gpu() as some ranks may be sharing
1063 * Note: as only PP ranks need to free GPU resources, so it is safe to
1064 * not call the barrier on PME ranks.
1066 #ifdef GMX_THREAD_MPI
1071 #endif /* GMX_THREAD_MPI */
1073 /* uninitialize GPU (by destroying the context) */
1074 if (!free_gpu(gpu_err_str))
1076 gmx_warning("On rank %d failed to free GPU #%d: %s",
1077 cr->nodeid, get_current_gpu_device_id(), gpu_err_str);
1082 int mdrunner(gmx_hw_opt_t *hw_opt,
1083 FILE *fplog, t_commrec *cr, int nfile,
1084 const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose,
1085 gmx_bool bCompact, int nstglobalcomm,
1086 ivec ddxyz, int dd_node_order, real rdd, real rconstr,
1087 const char *dddlb_opt, real dlb_scale,
1088 const char *ddcsx, const char *ddcsy, const char *ddcsz,
1089 const char *nbpu_opt, int nstlist_cmdline,
1090 gmx_int64_t nsteps_cmdline, int nstepout, int resetstep,
1091 int gmx_unused nmultisim, int repl_ex_nst, int repl_ex_nex,
1092 int repl_ex_seed, real pforce, real cpt_period, real max_hours,
1093 const char *deviceOptions, int imdport, unsigned long Flags)
1095 gmx_bool bForceUseGPU, bTryUseGPU, bRerunMD, bCantUseGPU;
1096 double nodetime = 0, realtime;
1097 t_inputrec *inputrec;
1098 t_state *state = NULL;
1100 gmx_ddbox_t ddbox = {0};
1101 int npme_major, npme_minor;
1104 gmx_mtop_t *mtop = NULL;
1105 t_mdatoms *mdatoms = NULL;
1106 t_forcerec *fr = NULL;
1107 t_fcdata *fcd = NULL;
1108 real ewaldcoeff_q = 0;
1109 real ewaldcoeff_lj = 0;
1110 gmx_pme_t *pmedata = NULL;
1111 gmx_vsite_t *vsite = NULL;
1112 gmx_constr_t constr;
1113 int i, m, nChargePerturbed = -1, nTypePerturbed = 0, status, nalloc;
1115 gmx_wallcycle_t wcycle;
1118 gmx_walltime_accounting_t walltime_accounting = NULL;
1120 gmx_int64_t reset_counters;
1121 gmx_edsam_t ed = NULL;
1122 t_commrec *cr_old = cr;
1123 int nthreads_pme = 1;
1124 int nthreads_pp = 1;
1125 gmx_membed_t membed = NULL;
1126 gmx_hw_info_t *hwinfo = NULL;
1127 /* The master rank decides early on bUseGPU and broadcasts this later */
1128 gmx_bool bUseGPU = FALSE;
1130 /* CAUTION: threads may be started later on in this function, so
1131 cr doesn't reflect the final parallel state right now */
1135 if (Flags & MD_APPENDFILES)
1140 bRerunMD = (Flags & MD_RERUN);
1141 bForceUseGPU = (strncmp(nbpu_opt, "gpu", 3) == 0);
1142 bTryUseGPU = (strncmp(nbpu_opt, "auto", 4) == 0) || bForceUseGPU;
1143 /* Rerun execution time is dominated by I/O and pair search, so
1144 * GPUs are not very useful, plus they do not support more than
1145 * one energy group. Don't select them when they can't be used,
1146 * unless the user requested it, then fatal_error is called later.
1148 * TODO it would be nice to notify the user that if this check
1149 * causes GPUs not to be used that this is what is happening, and
1150 * why, but that will be easier to do after some future
1152 bCantUseGPU = bRerunMD && (inputrec->opts.ngener > 1);
1153 bTryUseGPU = bTryUseGPU && !(bCantUseGPU && !bForceUseGPU);
1155 /* Detect hardware, gather information. This is an operation that is
1156 * global for this process (MPI rank). */
1157 hwinfo = gmx_detect_hardware(fplog, cr, bTryUseGPU);
1163 /* Read (nearly) all data required for the simulation */
1164 read_tpx_state(ftp2fn(efTPX, nfile, fnm), inputrec, state, NULL, mtop);
1166 if (inputrec->cutoff_scheme != ecutsVERLET &&
1167 ((Flags & MD_TESTVERLET) || getenv("GMX_VERLET_SCHEME") != NULL))
1169 convert_to_verlet_scheme(fplog, inputrec, mtop, det(state->box));
1172 if (inputrec->cutoff_scheme == ecutsVERLET)
1174 /* Here the master rank decides if all ranks will use GPUs */
1175 bUseGPU = (hwinfo->gpu_info.ncuda_dev_compatible > 0 ||
1176 getenv("GMX_EMULATE_GPU") != NULL);
1178 /* TODO add GPU kernels for this and replace this check by:
1179 * (bUseGPU && (ir->vdwtype == evdwPME &&
1180 * ir->ljpme_combination_rule == eljpmeLB))
1181 * update the message text and the content of nbnxn_acceleration_supported.
1184 !nbnxn_acceleration_supported(fplog, cr, inputrec, bUseGPU))
1186 /* Fallback message printed by nbnxn_acceleration_supported */
1189 gmx_fatal(FARGS, "GPU acceleration requested, but not supported with the given input settings");
1194 prepare_verlet_scheme(fplog, cr,
1195 inputrec, nstlist_cmdline, mtop, state->box,
1200 if (nstlist_cmdline > 0)
1202 gmx_fatal(FARGS, "Can not set nstlist with the group cut-off scheme");
1205 if (hwinfo->gpu_info.ncuda_dev_compatible > 0)
1207 md_print_warn(cr, fplog,
1208 "NOTE: GPU(s) found, but the current simulation can not use GPUs\n"
1209 " To use a GPU, set the mdp option: cutoff-scheme = Verlet\n"
1210 " (for quick performance testing you can use the -testverlet option)\n");
1215 gmx_fatal(FARGS, "GPU requested, but can't be used without cutoff-scheme=Verlet");
1218 #ifdef GMX_TARGET_BGQ
1219 md_print_warn(cr, fplog,
1220 "NOTE: There is no SIMD implementation of the group scheme kernels on\n"
1221 " BlueGene/Q. You will observe better performance from using the\n"
1222 " Verlet cut-off scheme.\n");
1226 if (inputrec->eI == eiSD2)
1228 md_print_warn(cr, fplog, "The stochastic dynamics integrator %s is deprecated, since\n"
1229 "it is slower than integrator %s and is slightly less accurate\n"
1230 "with constraints. Use the %s integrator.",
1231 ei_names[inputrec->eI], ei_names[eiSD1], ei_names[eiSD1]);
1235 /* Check for externally set OpenMP affinity and turn off internal
1236 * pinning if any is found. We need to do this check early to tell
1237 * thread-MPI whether it should do pinning when spawning threads.
1238 * TODO: the above no longer holds, we should move these checks down
1240 gmx_omp_check_thread_affinity(fplog, cr, hw_opt);
1242 /* Check and update the hardware options for internal consistency */
1243 check_and_update_hw_opt_1(hw_opt, SIMMASTER(cr));
1247 #ifdef GMX_THREAD_MPI
1248 /* Early check for externally set process affinity.
1249 * With thread-MPI this is needed as pinning might get turned off,
1250 * which needs to be known before starting thread-MPI.
1251 * With thread-MPI hw_opt is processed here on the master rank
1252 * and passed to the other ranks later, so we only do this on master.
1254 gmx_check_thread_affinity_set(fplog,
1256 hw_opt, hwinfo->nthreads_hw_avail, FALSE);
1259 #ifdef GMX_THREAD_MPI
1260 if (cr->npmenodes > 0 && hw_opt->nthreads_tmpi <= 0)
1262 gmx_fatal(FARGS, "You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME ranks");
1266 if (hw_opt->nthreads_omp_pme != hw_opt->nthreads_omp &&
1269 gmx_fatal(FARGS, "You need to explicitly specify the number of PME ranks (-npme) when using different number of OpenMP threads for PP and PME ranks");
1273 #ifdef GMX_THREAD_MPI
1276 /* Since the master knows the cut-off scheme, update hw_opt for this.
1277 * This is done later for normal MPI and also once more with tMPI
1278 * for all tMPI ranks.
1280 check_and_update_hw_opt_2(hw_opt, inputrec->cutoff_scheme);
1282 /* NOW the threads will be started: */
1283 hw_opt->nthreads_tmpi = get_nthreads_mpi(hwinfo,
1287 if (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_omp <= 0)
1289 hw_opt->nthreads_omp = hw_opt->nthreads_tot/hw_opt->nthreads_tmpi;
1292 if (hw_opt->nthreads_tmpi > 1)
1294 /* now start the threads. */
1295 cr = mdrunner_start_threads(hw_opt, fplog, cr_old, nfile, fnm,
1296 oenv, bVerbose, bCompact, nstglobalcomm,
1297 ddxyz, dd_node_order, rdd, rconstr,
1298 dddlb_opt, dlb_scale, ddcsx, ddcsy, ddcsz,
1299 nbpu_opt, nstlist_cmdline,
1300 nsteps_cmdline, nstepout, resetstep, nmultisim,
1301 repl_ex_nst, repl_ex_nex, repl_ex_seed, pforce,
1302 cpt_period, max_hours, deviceOptions,
1304 /* the main thread continues here with a new cr. We don't deallocate
1305 the old cr because other threads may still be reading it. */
1308 gmx_comm("Failed to spawn threads");
1313 /* END OF CAUTION: cr is now reliable */
1315 /* g_membed initialisation *
1316 * Because we change the mtop, init_membed is called before the init_parallel *
1317 * (in case we ever want to make it run in parallel) */
1318 if (opt2bSet("-membed", nfile, fnm))
1322 fprintf(stderr, "Initializing membed");
1324 membed = init_membed(fplog, nfile, fnm, mtop, inputrec, state, cr, &cpt_period);
1329 /* now broadcast everything to the non-master nodes/threads: */
1330 init_parallel(cr, inputrec, mtop);
1332 /* The master rank decided on the use of GPUs,
1333 * broadcast this information to all ranks.
1335 gmx_bcast_sim(sizeof(bUseGPU), &bUseGPU, cr);
1340 pr_inputrec(fplog, 0, "Input Parameters", inputrec, FALSE);
1343 /* now make sure the state is initialized and propagated */
1344 set_state_entries(state, inputrec);
1346 /* A parallel command line option consistency check that we can
1347 only do after any threads have started. */
1349 (ddxyz[XX] > 1 || ddxyz[YY] > 1 || ddxyz[ZZ] > 1 || cr->npmenodes > 0))
1352 "The -dd or -npme option request a parallel simulation, "
1354 "but %s was compiled without threads or MPI enabled"
1356 #ifdef GMX_THREAD_MPI
1357 "but the number of threads (option -nt) is 1"
1359 "but %s was not started through mpirun/mpiexec or only one rank was requested through mpirun/mpiexec"
1367 (EI_ENERGY_MINIMIZATION(inputrec->eI) || eiNM == inputrec->eI))
1369 gmx_fatal(FARGS, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
1372 if (can_use_allvsall(inputrec, TRUE, cr, fplog) && DOMAINDECOMP(cr))
1374 gmx_fatal(FARGS, "All-vs-all loops do not work with domain decomposition, use a single MPI rank");
1377 if (!(EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype)))
1379 if (cr->npmenodes > 0)
1381 gmx_fatal_collective(FARGS, cr, NULL,
1382 "PME-only ranks are requested, but the system does not use PME for electrostatics or LJ");
1388 if (bUseGPU && cr->npmenodes < 0)
1390 /* With GPUs we don't automatically use PME-only ranks. PME ranks can
1391 * improve performance with many threads per GPU, since our OpenMP
1392 * scaling is bad, but it's difficult to automate the setup.
1400 fcRegisterSteps(inputrec->nsteps, inputrec->init_step);
1404 /* NMR restraints must be initialized before load_checkpoint,
1405 * since with time averaging the history is added to t_state.
1406 * For proper consistency check we therefore need to extend
1408 * So the PME-only nodes (if present) will also initialize
1409 * the distance restraints.
1413 /* This needs to be called before read_checkpoint to extend the state */
1414 init_disres(fplog, mtop, inputrec, cr, fcd, state, repl_ex_nst > 0);
1416 init_orires(fplog, mtop, state->x, inputrec, cr, &(fcd->orires),
1419 if (DEFORM(*inputrec))
1421 /* Store the deform reference box before reading the checkpoint */
1424 copy_mat(state->box, box);
1428 gmx_bcast(sizeof(box), box, cr);
1430 /* Because we do not have the update struct available yet
1431 * in which the reference values should be stored,
1432 * we store them temporarily in static variables.
1433 * This should be thread safe, since they are only written once
1434 * and with identical values.
1436 tMPI_Thread_mutex_lock(&deform_init_box_mutex);
1437 deform_init_init_step_tpx = inputrec->init_step;
1438 copy_mat(box, deform_init_box_tpx);
1439 tMPI_Thread_mutex_unlock(&deform_init_box_mutex);
1442 if (opt2bSet("-cpi", nfile, fnm))
1444 /* Check if checkpoint file exists before doing continuation.
1445 * This way we can use identical input options for the first and subsequent runs...
1447 if (gmx_fexist_master(opt2fn_master("-cpi", nfile, fnm, cr), cr) )
1449 load_checkpoint(opt2fn_master("-cpi", nfile, fnm, cr), &fplog,
1451 inputrec, state, &bReadEkin,
1452 (Flags & MD_APPENDFILES),
1453 (Flags & MD_APPENDFILESSET));
1457 Flags |= MD_READ_EKIN;
1462 if (((MASTER(cr) || (Flags & MD_SEPPOT)) && (Flags & MD_APPENDFILES))
1463 #ifdef GMX_THREAD_MPI
1464 /* With thread MPI only the master node/thread exists in mdrun.c,
1465 * therefore non-master nodes need to open the "seppot" log file here.
1467 || (!MASTER(cr) && (Flags & MD_SEPPOT))
1471 gmx_log_open(ftp2fn(efLOG, nfile, fnm), cr, !(Flags & MD_SEPPOT),
1475 /* override nsteps with value from cmdline */
1476 override_nsteps_cmdline(fplog, nsteps_cmdline, inputrec, cr);
1480 copy_mat(state->box, box);
1485 gmx_bcast(sizeof(box), box, cr);
1488 /* Essential dynamics */
1489 if (opt2bSet("-ei", nfile, fnm))
1491 /* Open input and output files, allocate space for ED data structure */
1492 ed = ed_open(mtop->natoms, &state->edsamstate, nfile, fnm, Flags, oenv, cr);
1495 if (PAR(cr) && !(EI_TPI(inputrec->eI) ||
1496 inputrec->eI == eiNM))
1498 cr->dd = init_domain_decomposition(fplog, cr, Flags, ddxyz, rdd, rconstr,
1499 dddlb_opt, dlb_scale,
1500 ddcsx, ddcsy, ddcsz,
1503 &ddbox, &npme_major, &npme_minor);
1505 make_dd_communicators(fplog, cr, dd_node_order);
1507 /* Set overallocation to avoid frequent reallocation of arrays */
1508 set_over_alloc_dd(TRUE);
1512 /* PME, if used, is done on all nodes with 1D decomposition */
1514 cr->duty = (DUTY_PP | DUTY_PME);
1518 if (inputrec->ePBC == epbcSCREW)
1521 "pbc=%s is only implemented with domain decomposition",
1522 epbc_names[inputrec->ePBC]);
1528 /* After possible communicator splitting in make_dd_communicators.
1529 * we can set up the intra/inter node communication.
1531 gmx_setup_nodecomm(fplog, cr);
1534 /* Initialize per-physical-node MPI process/thread ID and counters. */
1535 gmx_init_intranode_counters(cr);
1539 md_print_info(cr, fplog,
1540 "This is simulation %d out of %d running as a composite Gromacs\n"
1541 "multi-simulation job. Setup for this simulation:\n\n",
1542 cr->ms->sim, cr->ms->nsim);
1544 md_print_info(cr, fplog, "Using %d MPI %s\n",
1546 #ifdef GMX_THREAD_MPI
1547 cr->nnodes == 1 ? "thread" : "threads"
1549 cr->nnodes == 1 ? "process" : "processes"
1555 /* Check and update hw_opt for the cut-off scheme */
1556 check_and_update_hw_opt_2(hw_opt, inputrec->cutoff_scheme);
1558 gmx_omp_nthreads_init(fplog, cr,
1559 hwinfo->nthreads_hw_avail,
1560 hw_opt->nthreads_omp,
1561 hw_opt->nthreads_omp_pme,
1562 (cr->duty & DUTY_PP) == 0,
1563 inputrec->cutoff_scheme == ecutsVERLET);
1567 /* Select GPU id's to use */
1568 gmx_select_gpu_ids(fplog, cr, &hwinfo->gpu_info, bForceUseGPU,
1573 /* Ignore (potentially) manually selected GPUs */
1574 hw_opt->gpu_opt.ncuda_dev_use = 0;
1577 /* check consistency across ranks of things like SIMD
1578 * support and number of GPUs selected */
1579 gmx_check_hw_runconf_consistency(fplog, hwinfo, cr, hw_opt, bUseGPU);
1581 if (DOMAINDECOMP(cr))
1583 /* When we share GPUs over ranks, we need to know this for the DLB */
1584 dd_setup_dlb_resource_sharing(cr, hwinfo, hw_opt);
1587 /* getting number of PP/PME threads
1588 PME: env variable should be read only on one node to make sure it is
1589 identical everywhere;
1591 /* TODO nthreads_pp is only used for pinning threads.
1592 * This is a temporary solution until we have a hw topology library.
1594 nthreads_pp = gmx_omp_nthreads_get(emntNonbonded);
1595 nthreads_pme = gmx_omp_nthreads_get(emntPME);
1597 wcycle = wallcycle_init(fplog, resetstep, cr, nthreads_pp, nthreads_pme);
1601 /* Master synchronizes its value of reset_counters with all nodes
1602 * including PME only nodes */
1603 reset_counters = wcycle_get_reset_counters(wcycle);
1604 gmx_bcast_sim(sizeof(reset_counters), &reset_counters, cr);
1605 wcycle_set_reset_counters(wcycle, reset_counters);
1609 if (cr->duty & DUTY_PP)
1611 bcast_state(cr, state);
1613 /* Initiate forcerecord */
1615 fr->hwinfo = hwinfo;
1616 fr->gpu_opt = &hw_opt->gpu_opt;
1617 init_forcerec(fplog, oenv, fr, fcd, inputrec, mtop, cr, box,
1618 opt2fn("-table", nfile, fnm),
1619 opt2fn("-tabletf", nfile, fnm),
1620 opt2fn("-tablep", nfile, fnm),
1621 opt2fn("-tableb", nfile, fnm),
1626 /* version for PCA_NOT_READ_NODE (see md.c) */
1627 /*init_forcerec(fplog,fr,fcd,inputrec,mtop,cr,box,FALSE,
1628 "nofile","nofile","nofile","nofile",FALSE,pforce);
1630 fr->bSepDVDL = ((Flags & MD_SEPPOT) == MD_SEPPOT);
1632 /* Initialize QM-MM */
1635 init_QMMMrec(cr, mtop, inputrec, fr);
1638 /* Initialize the mdatoms structure.
1639 * mdatoms is not filled with atom data,
1640 * as this can not be done now with domain decomposition.
1642 mdatoms = init_mdatoms(fplog, mtop, inputrec->efep != efepNO);
1644 /* Initialize the virtual site communication */
1645 vsite = init_vsite(mtop, cr, FALSE);
1647 calc_shifts(box, fr->shift_vec);
1649 /* With periodic molecules the charge groups should be whole at start up
1650 * and the virtual sites should not be far from their proper positions.
1652 if (!inputrec->bContinuation && MASTER(cr) &&
1653 !(inputrec->ePBC != epbcNONE && inputrec->bPeriodicMols))
1655 /* Make molecules whole at start of run */
1656 if (fr->ePBC != epbcNONE)
1658 do_pbc_first_mtop(fplog, inputrec->ePBC, box, mtop, state->x);
1662 /* Correct initial vsite positions are required
1663 * for the initial distribution in the domain decomposition
1664 * and for the initial shell prediction.
1666 construct_vsites_mtop(vsite, mtop, state->x);
1670 if (EEL_PME(fr->eeltype) || EVDW_PME(fr->vdwtype))
1672 ewaldcoeff_q = fr->ewaldcoeff_q;
1673 ewaldcoeff_lj = fr->ewaldcoeff_lj;
1674 pmedata = &fr->pmedata;
1683 /* This is a PME only node */
1685 /* We don't need the state */
1688 ewaldcoeff_q = calc_ewaldcoeff_q(inputrec->rcoulomb, inputrec->ewald_rtol);
1689 ewaldcoeff_lj = calc_ewaldcoeff_lj(inputrec->rvdw, inputrec->ewald_rtol_lj);
1693 if (hw_opt->thread_affinity != threadaffOFF)
1695 /* Before setting affinity, check whether the affinity has changed
1696 * - which indicates that probably the OpenMP library has changed it
1697 * since we first checked).
1699 gmx_check_thread_affinity_set(fplog, cr,
1700 hw_opt, hwinfo->nthreads_hw_avail, TRUE);
1702 /* Set the CPU affinity */
1703 gmx_set_thread_affinity(fplog, cr, hw_opt, hwinfo);
1706 /* Initiate PME if necessary,
1707 * either on all nodes or on dedicated PME nodes only. */
1708 if (EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype))
1712 nChargePerturbed = mdatoms->nChargePerturbed;
1713 if (EVDW_PME(inputrec->vdwtype))
1715 nTypePerturbed = mdatoms->nTypePerturbed;
1718 if (cr->npmenodes > 0)
1720 /* The PME only nodes need to know nChargePerturbed(FEP on Q) and nTypePerturbed(FEP on LJ)*/
1721 gmx_bcast_sim(sizeof(nChargePerturbed), &nChargePerturbed, cr);
1722 gmx_bcast_sim(sizeof(nTypePerturbed), &nTypePerturbed, cr);
1725 if (cr->duty & DUTY_PME)
1727 status = gmx_pme_init(pmedata, cr, npme_major, npme_minor, inputrec,
1728 mtop ? mtop->natoms : 0, nChargePerturbed, nTypePerturbed,
1729 (Flags & MD_REPRODUCIBLE), nthreads_pme);
1732 gmx_fatal(FARGS, "Error %d initializing PME", status);
1738 if (integrator[inputrec->eI].func == do_md)
1740 /* Turn on signal handling on all nodes */
1742 * (A user signal from the PME nodes (if any)
1743 * is communicated to the PP nodes.
1745 signal_handler_install();
1748 if (cr->duty & DUTY_PP)
1750 /* Assumes uniform use of the number of OpenMP threads */
1751 walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntDefault));
1753 if (inputrec->ePull != epullNO)
1755 /* Initialize pull code */
1756 init_pull(fplog, inputrec, nfile, fnm, mtop, cr, oenv, inputrec->fepvals->init_lambda,
1757 EI_DYNAMICS(inputrec->eI) && MASTER(cr), Flags);
1762 /* Initialize enforced rotation code */
1763 init_rot(fplog, inputrec, nfile, fnm, cr, state->x, box, mtop, oenv,
1767 if (inputrec->eSwapCoords != eswapNO)
1769 /* Initialize ion swapping code */
1770 init_swapcoords(fplog, bVerbose, inputrec, opt2fn_master("-swap", nfile, fnm, cr),
1771 mtop, state->x, state->box, &state->swapstate, cr, oenv, Flags);
1774 constr = init_constraints(fplog, mtop, inputrec, ed, state, cr);
1776 if (DOMAINDECOMP(cr))
1778 dd_init_bondeds(fplog, cr->dd, mtop, vsite, inputrec,
1779 Flags & MD_DDBONDCHECK, fr->cginfo_mb);
1781 set_dd_parameters(fplog, cr->dd, dlb_scale, inputrec, &ddbox);
1783 setup_dd_grid(fplog, cr->dd);
1786 /* Now do whatever the user wants us to do (how flexible...) */
1787 integrator[inputrec->eI].func(fplog, cr, nfile, fnm,
1788 oenv, bVerbose, bCompact,
1791 nstepout, inputrec, mtop,
1793 mdatoms, nrnb, wcycle, ed, fr,
1794 repl_ex_nst, repl_ex_nex, repl_ex_seed,
1796 cpt_period, max_hours,
1800 walltime_accounting);
1802 if (inputrec->ePull != epullNO)
1804 finish_pull(inputrec->pull);
1809 finish_rot(inputrec->rot);
1816 walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntPME));
1817 gmx_pmeonly(*pmedata, cr, nrnb, wcycle, walltime_accounting, ewaldcoeff_q, ewaldcoeff_lj, inputrec);
1820 wallcycle_stop(wcycle, ewcRUN);
1822 /* Finish up, write some stuff
1823 * if rerunMD, don't write last frame again
1825 finish_run(fplog, cr,
1826 inputrec, nrnb, wcycle, walltime_accounting,
1827 fr != NULL && fr->nbv != NULL && fr->nbv->bUseGPU ?
1828 nbnxn_cuda_get_timings(fr->nbv->cu_nbv) : NULL,
1829 EI_DYNAMICS(inputrec->eI) && !MULTISIM(cr));
1832 /* Free GPU memory and context */
1833 free_gpu_resources(fr, cr);
1835 if (opt2bSet("-membed", nfile, fnm))
1840 gmx_hardware_info_free(hwinfo);
1842 /* Does what it says */
1843 print_date_and_time(fplog, cr->nodeid, "Finished mdrun", gmx_gettime());
1844 walltime_accounting_destroy(walltime_accounting);
1846 /* Close logfile already here if we were appending to it */
1847 if (MASTER(cr) && (Flags & MD_APPENDFILES))
1849 gmx_log_close(fplog);
1852 rc = (int)gmx_get_stop_condition();
1854 #ifdef GMX_THREAD_MPI
1855 /* we need to join all threads. The sub-threads join when they
1856 exit this function, but the master thread needs to be told to
1858 if (PAR(cr) && MASTER(cr))