1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
11 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
12 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
13 * Copyright (c) 2001-2004, The GROMACS development team,
14 * check out http://www.gromacs.org for more information.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version 2
19 * of the License, or (at your option) any later version.
21 * If you want to redistribute modifications, please consider that
22 * scientific software is very special. Version control is crucial -
23 * bugs must be traceable. We will be happy to consider code for
24 * inclusion in the official distribution, but derived work must not
25 * be called official GROMACS. Details are found in the README & COPYING
26 * files - if they are missing, get the official version at www.gromacs.org.
28 * To help us fund GROMACS development, we humbly ask that you cite
29 * the papers on the package - you can find them in the top README file.
31 * For more info, check our website at http://www.gromacs.org
34 * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
42 #include <sys/syscall.h>
61 #include "mpelogging.h"
67 #include "checkpoint.h"
68 #include "mtop_util.h"
69 #include "sighandler.h"
72 #include "pull_rotation.h"
74 #include "md_openmm.h"
90 #include "md_openmm.h"
95 gmx_integrator_t *func;
98 /* The array should match the eI array in include/types/enums.h */
99 #ifdef GMX_OPENMM /* FIXME do_md_openmm needs fixing */
100 const gmx_intp_t integrator[eiNR] = { {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm},{do_md_openmm}};
102 const gmx_intp_t integrator[eiNR] = { {do_md}, {do_steep}, {do_cg}, {do_md}, {do_md}, {do_nm}, {do_lbfgs}, {do_tpi}, {do_tpi}, {do_md}, {do_md},{do_md}};
105 gmx_large_int_t deform_init_init_step_tpx;
106 matrix deform_init_box_tpx;
107 #ifdef GMX_THREAD_MPI
108 tMPI_Thread_mutex_t deform_init_box_mutex=TMPI_THREAD_MUTEX_INITIALIZER;
112 #ifdef GMX_THREAD_MPI
113 struct mdrunner_arglist
127 const char *dddlb_opt;
141 const char *deviceOptions;
143 int ret; /* return value */
147 /* The function used for spawning threads. Extracts the mdrunner()
148 arguments from its one argument and calls mdrunner(), after making
150 static void mdrunner_start_fn(void *arg)
152 struct mdrunner_arglist *mda=(struct mdrunner_arglist*)arg;
153 struct mdrunner_arglist mc=*mda; /* copy the arg list to make sure
154 that it's thread-local. This doesn't
155 copy pointed-to items, of course,
156 but those are all const. */
157 t_commrec *cr; /* we need a local version of this */
161 fnm = dup_tfn(mc.nfile, mc.fnm);
163 cr = init_par_threads(mc.cr);
170 mda->ret=mdrunner(cr->nnodes, fplog, cr, mc.nfile, fnm, mc.oenv,
171 mc.bVerbose, mc.bCompact, mc.nstglobalcomm,
172 mc.ddxyz, mc.dd_node_order, mc.rdd,
173 mc.rconstr, mc.dddlb_opt, mc.dlb_scale,
174 mc.ddcsx, mc.ddcsy, mc.ddcsz, mc.nstepout, mc.resetstep,
175 mc.nmultisim, mc.repl_ex_nst, mc.repl_ex_nex, mc.repl_ex_seed, mc.pforce,
176 mc.cpt_period, mc.max_hours, mc.deviceOptions, mc.Flags);
179 /* called by mdrunner() to start a specific number of threads (including
180 the main thread) for thread-parallel runs. This in turn calls mdrunner()
182 All options besides nthreads are the same as for mdrunner(). */
183 static t_commrec *mdrunner_start_threads(int nthreads,
184 FILE *fplog,t_commrec *cr,int nfile,
185 const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose,
186 gmx_bool bCompact, int nstglobalcomm,
187 ivec ddxyz,int dd_node_order,real rdd,real rconstr,
188 const char *dddlb_opt,real dlb_scale,
189 const char *ddcsx,const char *ddcsy,const char *ddcsz,
190 int nstepout,int resetstep,int nmultisim,int repl_ex_nst,
191 int repl_ex_nex, int repl_ex_seed, real pforce,real cpt_period, real max_hours,
192 const char *deviceOptions, unsigned long Flags)
195 struct mdrunner_arglist *mda;
196 t_commrec *crn; /* the new commrec */
199 /* first check whether we even need to start tMPI */
203 /* a few small, one-time, almost unavoidable memory leaks: */
205 fnmn=dup_tfn(nfile, fnm);
207 /* fill the data structure to pass as void pointer to thread start fn */
213 mda->bVerbose=bVerbose;
214 mda->bCompact=bCompact;
215 mda->nstglobalcomm=nstglobalcomm;
216 mda->ddxyz[XX]=ddxyz[XX];
217 mda->ddxyz[YY]=ddxyz[YY];
218 mda->ddxyz[ZZ]=ddxyz[ZZ];
219 mda->dd_node_order=dd_node_order;
221 mda->rconstr=rconstr;
222 mda->dddlb_opt=dddlb_opt;
223 mda->dlb_scale=dlb_scale;
227 mda->nstepout=nstepout;
228 mda->resetstep=resetstep;
229 mda->nmultisim=nmultisim;
230 mda->repl_ex_nst=repl_ex_nst;
231 mda->repl_ex_nex=repl_ex_nex;
232 mda->repl_ex_seed=repl_ex_seed;
234 mda->cpt_period=cpt_period;
235 mda->max_hours=max_hours;
236 mda->deviceOptions=deviceOptions;
239 fprintf(stderr, "Starting %d threads\n",nthreads);
241 /* now spawn new threads that start mdrunner_start_fn(), while
242 the main thread returns */
243 ret=tMPI_Init_fn(TRUE, nthreads, mdrunner_start_fn, (void*)(mda) );
244 if (ret!=TMPI_SUCCESS)
247 /* make a new comm_rec to reflect the new situation */
248 crn=init_par_threads(cr);
253 /* Get the number of threads to use for thread-MPI based on how many
254 * were requested, which algorithms we're using,
255 * and how many particles there are.
257 static int get_nthreads_mpi(int nthreads_requested, t_inputrec *inputrec,
260 int nthreads,nthreads_new;
261 int min_atoms_per_thread;
264 nthreads = nthreads_requested;
266 /* determine # of hardware threads. */
267 if (nthreads_requested < 1)
269 if ((env = getenv("GMX_MAX_THREADS")) != NULL)
272 sscanf(env,"%d",&nthreads);
275 gmx_fatal(FARGS,"GMX_MAX_THREADS (%d) should be larger than 0",
281 nthreads = tMPI_Thread_get_hw_number();
285 if (inputrec->eI == eiNM || EI_TPI(inputrec->eI))
287 /* Steps are divided over the nodes iso splitting the atoms */
288 min_atoms_per_thread = 0;
292 min_atoms_per_thread = MIN_ATOMS_PER_THREAD;
295 /* Check if an algorithm does not support parallel simulation. */
297 ( inputrec->eI == eiLBFGS ||
298 inputrec->coulombtype == eelEWALD ) )
300 fprintf(stderr,"\nThe integration or electrostatics algorithm doesn't support parallel runs. Not starting any threads.\n");
303 else if (nthreads_requested < 1 &&
304 mtop->natoms/nthreads < min_atoms_per_thread)
306 /* the thread number was chosen automatically, but there are too many
307 threads (too few atoms per thread) */
308 nthreads_new = max(1,mtop->natoms/min_atoms_per_thread);
310 if (nthreads_new > 8 || (nthreads == 8 && nthreads_new > 4))
312 /* Use only multiples of 4 above 8 threads
313 * or with an 8-core processor
314 * (to avoid 6 threads on 8 core processors with 4 real cores).
316 nthreads_new = (nthreads_new/4)*4;
318 else if (nthreads_new > 4)
320 /* Avoid 5 or 7 threads */
321 nthreads_new = (nthreads_new/2)*2;
324 nthreads = nthreads_new;
326 fprintf(stderr,"\n");
327 fprintf(stderr,"NOTE: Parallelization is limited by the small number of atoms,\n");
328 fprintf(stderr," only starting %d threads.\n",nthreads);
329 fprintf(stderr," You can use the -nt option to optimize the number of threads.\n\n");
336 int mdrunner(int nthreads_requested, FILE *fplog,t_commrec *cr,int nfile,
337 const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose,
338 gmx_bool bCompact, int nstglobalcomm,
339 ivec ddxyz,int dd_node_order,real rdd,real rconstr,
340 const char *dddlb_opt,real dlb_scale,
341 const char *ddcsx,const char *ddcsy,const char *ddcsz,
342 int nstepout,int resetstep,int nmultisim, int repl_ex_nst, int repl_ex_nex,
343 int repl_ex_seed, real pforce,real cpt_period,real max_hours,
344 const char *deviceOptions, unsigned long Flags)
346 double nodetime=0,realtime;
347 t_inputrec *inputrec;
350 gmx_ddbox_t ddbox={0};
351 int npme_major,npme_minor;
354 gmx_mtop_t *mtop=NULL;
355 t_mdatoms *mdatoms=NULL;
359 gmx_pme_t *pmedata=NULL;
360 gmx_vsite_t *vsite=NULL;
362 int i,m,nChargePerturbed=-1,status,nalloc;
364 gmx_wallcycle_t wcycle;
365 gmx_bool bReadRNG,bReadEkin;
367 gmx_runtime_t runtime;
369 gmx_large_int_t reset_counters;
371 t_commrec *cr_old=cr;
374 gmx_membed_t membed=NULL;
376 /* CAUTION: threads may be started later on in this function, so
377 cr doesn't reflect the final parallel state right now */
381 if (bVerbose && SIMMASTER(cr))
383 fprintf(stderr,"Getting Loaded...\n");
386 if (Flags & MD_APPENDFILES)
394 /* Read (nearly) all data required for the simulation */
395 read_tpx_state(ftp2fn(efTPX,nfile,fnm),inputrec,state,NULL,mtop);
397 /* NOW the threads will be started: */
398 #ifdef GMX_THREAD_MPI
399 nthreads_mpi = get_nthreads_mpi(nthreads_requested, inputrec, mtop);
401 if (nthreads_mpi > 1)
403 /* now start the threads. */
404 cr=mdrunner_start_threads(nthreads_mpi, fplog, cr_old, nfile, fnm,
405 oenv, bVerbose, bCompact, nstglobalcomm,
406 ddxyz, dd_node_order, rdd, rconstr,
407 dddlb_opt, dlb_scale, ddcsx, ddcsy, ddcsz,
408 nstepout, resetstep, nmultisim,
409 repl_ex_nst, repl_ex_nex, repl_ex_seed, pforce,
410 cpt_period, max_hours, deviceOptions,
412 /* the main thread continues here with a new cr. We don't deallocate
413 the old cr because other threads may still be reading it. */
416 gmx_comm("Failed to spawn threads");
421 /* END OF CAUTION: cr is now reliable */
423 /* g_membed initialisation *
424 * Because we change the mtop, init_membed is called before the init_parallel *
425 * (in case we ever want to make it run in parallel) */
426 if (opt2bSet("-membed",nfile,fnm))
430 fprintf(stderr,"Initializing membed");
432 membed = init_membed(fplog,nfile,fnm,mtop,inputrec,state,cr,&cpt_period);
437 /* now broadcast everything to the non-master nodes/threads: */
438 init_parallel(fplog, cr, inputrec, mtop);
442 pr_inputrec(fplog,0,"Input Parameters",inputrec,FALSE);
445 /* now make sure the state is initialized and propagated */
446 set_state_entries(state,inputrec,cr->nnodes);
448 /* remove when vv and rerun works correctly! */
449 if (PAR(cr) && EI_VV(inputrec->eI) && ((Flags & MD_RERUN) || (Flags & MD_RERUN_VSITE)))
452 "Currently can't do velocity verlet with rerun in parallel.");
455 /* A parallel command line option consistency check that we can
456 only do after any threads have started. */
458 (ddxyz[XX] > 1 || ddxyz[YY] > 1 || ddxyz[ZZ] > 1 || cr->npmenodes > 0))
461 "The -dd or -npme option request a parallel simulation, "
463 "but mdrun was compiled without threads or MPI enabled"
465 #ifdef GMX_THREAD_MPI
466 "but the number of threads (option -nt) is 1"
468 "but mdrun was not started through mpirun/mpiexec or only one process was requested through mpirun/mpiexec"
474 if ((Flags & MD_RERUN) &&
475 (EI_ENERGY_MINIMIZATION(inputrec->eI) || eiNM == inputrec->eI))
477 gmx_fatal(FARGS, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
480 if (can_use_allvsall(inputrec,mtop,TRUE,cr,fplog))
482 /* All-vs-all loops do not work with domain decomposition */
486 if (!EEL_PME(inputrec->coulombtype) || (Flags & MD_PARTDEC))
488 if (cr->npmenodes > 0)
490 if (!EEL_PME(inputrec->coulombtype))
492 gmx_fatal_collective(FARGS,cr,NULL,
493 "PME nodes are requested, but the system does not use PME electrostatics");
495 if (Flags & MD_PARTDEC)
497 gmx_fatal_collective(FARGS,cr,NULL,
498 "PME nodes are requested, but particle decomposition does not support separate PME nodes");
506 fcRegisterSteps(inputrec->nsteps,inputrec->init_step);
509 /* NMR restraints must be initialized before load_checkpoint,
510 * since with time averaging the history is added to t_state.
511 * For proper consistency check we therefore need to extend
513 * So the PME-only nodes (if present) will also initialize
514 * the distance restraints.
518 /* This needs to be called before read_checkpoint to extend the state */
519 init_disres(fplog,mtop,inputrec,cr,Flags & MD_PARTDEC,fcd,state);
521 if (gmx_mtop_ftype_count(mtop,F_ORIRES) > 0)
523 if (PAR(cr) && !(Flags & MD_PARTDEC))
525 gmx_fatal(FARGS,"Orientation restraints do not work (yet) with domain decomposition, use particle decomposition (mdrun option -pd)");
527 /* Orientation restraints */
530 init_orires(fplog,mtop,state->x,inputrec,cr->ms,&(fcd->orires),
535 if (DEFORM(*inputrec))
537 /* Store the deform reference box before reading the checkpoint */
540 copy_mat(state->box,box);
544 gmx_bcast(sizeof(box),box,cr);
546 /* Because we do not have the update struct available yet
547 * in which the reference values should be stored,
548 * we store them temporarily in static variables.
549 * This should be thread safe, since they are only written once
550 * and with identical values.
552 #ifdef GMX_THREAD_MPI
553 tMPI_Thread_mutex_lock(&deform_init_box_mutex);
555 deform_init_init_step_tpx = inputrec->init_step;
556 copy_mat(box,deform_init_box_tpx);
557 #ifdef GMX_THREAD_MPI
558 tMPI_Thread_mutex_unlock(&deform_init_box_mutex);
562 if (opt2bSet("-cpi",nfile,fnm))
564 /* Check if checkpoint file exists before doing continuation.
565 * This way we can use identical input options for the first and subsequent runs...
567 if( gmx_fexist_master(opt2fn_master("-cpi",nfile,fnm,cr),cr) )
569 load_checkpoint(opt2fn_master("-cpi",nfile,fnm,cr),&fplog,
570 cr,Flags & MD_PARTDEC,ddxyz,
571 inputrec,state,&bReadRNG,&bReadEkin,
572 (Flags & MD_APPENDFILES),
573 (Flags & MD_APPENDFILESSET));
577 Flags |= MD_READ_RNG;
581 Flags |= MD_READ_EKIN;
586 if (((MASTER(cr) || (Flags & MD_SEPPOT)) && (Flags & MD_APPENDFILES))
587 #ifdef GMX_THREAD_MPI
588 /* With thread MPI only the master node/thread exists in mdrun.c,
589 * therefore non-master nodes need to open the "seppot" log file here.
591 || (!MASTER(cr) && (Flags & MD_SEPPOT))
595 gmx_log_open(ftp2fn(efLOG,nfile,fnm),cr,!(Flags & MD_SEPPOT),
601 copy_mat(state->box,box);
606 gmx_bcast(sizeof(box),box,cr);
609 /* Essential dynamics */
610 if (opt2bSet("-ei",nfile,fnm))
612 /* Open input and output files, allocate space for ED data structure */
613 ed = ed_open(nfile,fnm,Flags,cr);
616 if (bVerbose && SIMMASTER(cr))
618 fprintf(stderr,"Loaded with Money\n\n");
621 if (PAR(cr) && !((Flags & MD_PARTDEC) ||
622 EI_TPI(inputrec->eI) ||
623 inputrec->eI == eiNM))
625 cr->dd = init_domain_decomposition(fplog,cr,Flags,ddxyz,rdd,rconstr,
630 &ddbox,&npme_major,&npme_minor);
632 make_dd_communicators(fplog,cr,dd_node_order);
634 /* Set overallocation to avoid frequent reallocation of arrays */
635 set_over_alloc_dd(TRUE);
639 /* PME, if used, is done on all nodes with 1D decomposition */
641 cr->duty = (DUTY_PP | DUTY_PME);
644 if (!EI_TPI(inputrec->eI))
646 npme_major = cr->nnodes;
649 if (inputrec->ePBC == epbcSCREW)
652 "pbc=%s is only implemented with domain decomposition",
653 epbc_names[inputrec->ePBC]);
659 /* After possible communicator splitting in make_dd_communicators.
660 * we can set up the intra/inter node communication.
662 gmx_setup_nodecomm(fplog,cr);
665 /* get number of OpenMP/PME threads
666 * env variable should be read only on one node to make sure it is identical everywhere */
668 if (EEL_PME(inputrec->coulombtype))
673 if ((ptr=getenv("GMX_PME_NTHREADS")) != NULL)
675 sscanf(ptr,"%d",&nthreads_pme);
677 if (fplog != NULL && nthreads_pme > 1)
679 fprintf(fplog,"Using %d threads for PME\n",nthreads_pme);
684 gmx_bcast_sim(sizeof(nthreads_pme),&nthreads_pme,cr);
689 wcycle = wallcycle_init(fplog,resetstep,cr,nthreads_pme);
692 /* Master synchronizes its value of reset_counters with all nodes
693 * including PME only nodes */
694 reset_counters = wcycle_get_reset_counters(wcycle);
695 gmx_bcast_sim(sizeof(reset_counters),&reset_counters,cr);
696 wcycle_set_reset_counters(wcycle, reset_counters);
701 if (cr->duty & DUTY_PP)
703 /* For domain decomposition we allocate dynamically
704 * in dd_partition_system.
706 if (DOMAINDECOMP(cr))
708 bcast_state_setup(cr,state);
714 bcast_state(cr,state,TRUE);
718 /* Initiate forcerecord */
720 init_forcerec(fplog,oenv,fr,fcd,inputrec,mtop,cr,box,FALSE,
721 opt2fn("-table",nfile,fnm),
722 opt2fn("-tabletf",nfile,fnm),
723 opt2fn("-tablep",nfile,fnm),
724 opt2fn("-tableb",nfile,fnm),FALSE,pforce);
726 /* version for PCA_NOT_READ_NODE (see md.c) */
727 /*init_forcerec(fplog,fr,fcd,inputrec,mtop,cr,box,FALSE,
728 "nofile","nofile","nofile","nofile",FALSE,pforce);
730 fr->bSepDVDL = ((Flags & MD_SEPPOT) == MD_SEPPOT);
732 /* Initialize QM-MM */
735 init_QMMMrec(cr,box,mtop,inputrec,fr);
738 /* Initialize the mdatoms structure.
739 * mdatoms is not filled with atom data,
740 * as this can not be done now with domain decomposition.
742 mdatoms = init_mdatoms(fplog,mtop,inputrec->efep!=efepNO);
744 /* Initialize the virtual site communication */
745 vsite = init_vsite(mtop,cr);
747 calc_shifts(box,fr->shift_vec);
749 /* With periodic molecules the charge groups should be whole at start up
750 * and the virtual sites should not be far from their proper positions.
752 if (!inputrec->bContinuation && MASTER(cr) &&
753 !(inputrec->ePBC != epbcNONE && inputrec->bPeriodicMols))
755 /* Make molecules whole at start of run */
756 if (fr->ePBC != epbcNONE)
758 do_pbc_first_mtop(fplog,inputrec->ePBC,box,mtop,state->x);
762 /* Correct initial vsite positions are required
763 * for the initial distribution in the domain decomposition
764 * and for the initial shell prediction.
766 construct_vsites_mtop(fplog,vsite,mtop,state->x);
770 if (EEL_PME(fr->eeltype))
772 ewaldcoeff = fr->ewaldcoeff;
773 pmedata = &fr->pmedata;
782 /* This is a PME only node */
784 /* We don't need the state */
787 ewaldcoeff = calc_ewaldcoeff(inputrec->rcoulomb, inputrec->ewald_rtol);
791 /* Initiate PME if necessary,
792 * either on all nodes or on dedicated PME nodes only. */
793 if (EEL_PME(inputrec->coulombtype))
797 nChargePerturbed = mdatoms->nChargePerturbed;
799 if (cr->npmenodes > 0)
801 /* The PME only nodes need to know nChargePerturbed */
802 gmx_bcast_sim(sizeof(nChargePerturbed),&nChargePerturbed,cr);
806 /* Set CPU affinity. Can be important for performance.
807 On some systems (e.g. Cray) CPU Affinity is set by default.
808 But default assigning doesn't work (well) with only some ranks
809 having threads. This causes very low performance.
810 External tools have cumbersome syntax for setting affinity
811 in the case that only some ranks have threads.
812 Thus it is important that GROMACS sets the affinity internally at
813 if only PME is using threads.
821 MPI_Comm comm_intra; /* intra communicator (but different to nc.comm_intra includes PME nodes) */
822 MPI_Comm_split(MPI_COMM_WORLD,gmx_hostname_num(),gmx_node_rank(),&comm_intra);
823 int local_omp_nthreads = (cr->duty & DUTY_PME) ? nthreads_pme : 1; /* threads on this node */
824 MPI_Scan(&local_omp_nthreads,&core, 1, MPI_INT, MPI_SUM, comm_intra);
825 core-=local_omp_nthreads; /* make exclusive scan */
826 #pragma omp parallel firstprivate(core) num_threads(local_omp_nthreads)
830 core+=gmx_omp_get_thread_num();
832 sched_setaffinity((pid_t) syscall (SYS_gettid),sizeof(cpu_set_t),&mask);
837 #endif /*GMX_OPENMP*/
839 if (cr->duty & DUTY_PME)
841 status = gmx_pme_init(pmedata,cr,npme_major,npme_minor,inputrec,
842 mtop ? mtop->natoms : 0,nChargePerturbed,
843 (Flags & MD_REPRODUCIBLE),nthreads_pme);
846 gmx_fatal(FARGS,"Error %d initializing PME",status);
852 if (integrator[inputrec->eI].func == do_md
855 integrator[inputrec->eI].func == do_md_openmm
859 /* Turn on signal handling on all nodes */
861 * (A user signal from the PME nodes (if any)
862 * is communicated to the PP nodes.
864 signal_handler_install();
867 if (cr->duty & DUTY_PP)
869 if (inputrec->ePull != epullNO)
871 /* Initialize pull code */
872 init_pull(fplog,inputrec,nfile,fnm,mtop,cr,oenv, inputrec->fepvals->init_lambda,
873 EI_DYNAMICS(inputrec->eI) && MASTER(cr),Flags);
878 /* Initialize enforced rotation code */
879 init_rot(fplog,inputrec,nfile,fnm,cr,state->x,box,mtop,oenv,
883 constr = init_constraints(fplog,mtop,inputrec,ed,state,cr);
885 if (DOMAINDECOMP(cr))
887 dd_init_bondeds(fplog,cr->dd,mtop,vsite,constr,inputrec,
888 Flags & MD_DDBONDCHECK,fr->cginfo_mb);
890 set_dd_parameters(fplog,cr->dd,dlb_scale,inputrec,fr,&ddbox);
892 setup_dd_grid(fplog,cr->dd);
895 /* Now do whatever the user wants us to do (how flexible...) */
896 integrator[inputrec->eI].func(fplog,cr,nfile,fnm,
897 oenv,bVerbose,bCompact,
900 nstepout,inputrec,mtop,
902 mdatoms,nrnb,wcycle,ed,fr,
903 repl_ex_nst,repl_ex_nex,repl_ex_seed,
905 cpt_period,max_hours,
910 if (inputrec->ePull != epullNO)
912 finish_pull(fplog,inputrec->pull);
917 finish_rot(fplog,inputrec->rot);
924 gmx_pmeonly(*pmedata,cr,nrnb,wcycle,ewaldcoeff,FALSE,inputrec);
927 if (EI_DYNAMICS(inputrec->eI) || EI_TPI(inputrec->eI))
929 /* Some timing stats */
932 if (runtime.proc == 0)
934 runtime.proc = runtime.real;
943 wallcycle_stop(wcycle,ewcRUN);
945 /* Finish up, write some stuff
946 * if rerunMD, don't write last frame again
948 finish_run(fplog,cr,ftp2fn(efSTO,nfile,fnm),
949 inputrec,nrnb,wcycle,&runtime,
950 EI_DYNAMICS(inputrec->eI) && !MULTISIM(cr));
952 if (opt2bSet("-membed",nfile,fnm))
957 /* Does what it says */
958 print_date_and_time(fplog,cr->nodeid,"Finished mdrun",&runtime);
960 /* Close logfile already here if we were appending to it */
961 if (MASTER(cr) && (Flags & MD_APPENDFILES))
963 gmx_log_close(fplog);
966 rc=(int)gmx_get_stop_condition();
968 #ifdef GMX_THREAD_MPI
969 /* we need to join all threads. The sub-threads join when they
970 exit this function, but the master thread needs to be told to
972 if (PAR(cr) && MASTER(cr))