1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
11 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
12 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
13 * Copyright (c) 2001-2004, The GROMACS development team,
14 * check out http://www.gromacs.org for more information.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version 2
19 * of the License, or (at your option) any later version.
21 * If you want to redistribute modifications, please consider that
22 * scientific software is very special. Version control is crucial -
23 * bugs must be traceable. We will be happy to consider code for
24 * inclusion in the official distribution, but derived work must not
25 * be called official GROMACS. Details are found in the README & COPYING
26 * files - if they are missing, get the official version at www.gromacs.org.
28 * To help us fund GROMACS development, we humbly ask that you cite
29 * the papers on the package - you can find them in the top README file.
31 * For more info, check our website at http://www.gromacs.org
34 * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
42 #include <sys/syscall.h>
61 #include "mpelogging.h"
67 #include "checkpoint.h"
68 #include "mtop_util.h"
69 #include "sighandler.h"
72 #include "pull_rotation.h"
74 #include "md_openmm.h"
88 #include "md_openmm.h"
97 gmx_integrator_t *func;
100 /* The array should match the eI array in include/types/enums.h */
101 #ifdef GMX_OPENMM /* FIXME do_md_openmm needs fixing */
102 const gmx_intp_t integrator[eiNR] = { {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm},{do_md_openmm}};
104 const gmx_intp_t integrator[eiNR] = { {do_md}, {do_steep}, {do_cg}, {do_md}, {do_md}, {do_nm}, {do_lbfgs}, {do_tpi}, {do_tpi}, {do_md}, {do_md},{do_md}};
107 gmx_large_int_t deform_init_init_step_tpx;
108 matrix deform_init_box_tpx;
109 #ifdef GMX_THREAD_MPI
110 tMPI_Thread_mutex_t deform_init_box_mutex=TMPI_THREAD_MUTEX_INITIALIZER;
114 #ifdef GMX_THREAD_MPI
115 struct mdrunner_arglist
129 const char *dddlb_opt;
143 const char *deviceOptions;
145 int ret; /* return value */
149 /* The function used for spawning threads. Extracts the mdrunner()
150 arguments from its one argument and calls mdrunner(), after making
152 static void mdrunner_start_fn(void *arg)
154 struct mdrunner_arglist *mda=(struct mdrunner_arglist*)arg;
155 struct mdrunner_arglist mc=*mda; /* copy the arg list to make sure
156 that it's thread-local. This doesn't
157 copy pointed-to items, of course,
158 but those are all const. */
159 t_commrec *cr; /* we need a local version of this */
163 fnm = dup_tfn(mc.nfile, mc.fnm);
165 cr = init_par_threads(mc.cr);
172 mda->ret=mdrunner(cr->nnodes, fplog, cr, mc.nfile, fnm, mc.oenv,
173 mc.bVerbose, mc.bCompact, mc.nstglobalcomm,
174 mc.ddxyz, mc.dd_node_order, mc.rdd,
175 mc.rconstr, mc.dddlb_opt, mc.dlb_scale,
176 mc.ddcsx, mc.ddcsy, mc.ddcsz, mc.nstepout, mc.resetstep,
177 mc.nmultisim, mc.repl_ex_nst, mc.repl_ex_nex, mc.repl_ex_seed, mc.pforce,
178 mc.cpt_period, mc.max_hours, mc.deviceOptions, mc.Flags);
181 /* called by mdrunner() to start a specific number of threads (including
182 the main thread) for thread-parallel runs. This in turn calls mdrunner()
184 All options besides nthreads are the same as for mdrunner(). */
185 static t_commrec *mdrunner_start_threads(int nthreads,
186 FILE *fplog,t_commrec *cr,int nfile,
187 const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose,
188 gmx_bool bCompact, int nstglobalcomm,
189 ivec ddxyz,int dd_node_order,real rdd,real rconstr,
190 const char *dddlb_opt,real dlb_scale,
191 const char *ddcsx,const char *ddcsy,const char *ddcsz,
192 int nstepout,int resetstep,int nmultisim,int repl_ex_nst,
193 int repl_ex_nex, int repl_ex_seed, real pforce,real cpt_period, real max_hours,
194 const char *deviceOptions, unsigned long Flags)
197 struct mdrunner_arglist *mda;
198 t_commrec *crn; /* the new commrec */
201 /* first check whether we even need to start tMPI */
205 /* a few small, one-time, almost unavoidable memory leaks: */
207 fnmn=dup_tfn(nfile, fnm);
209 /* fill the data structure to pass as void pointer to thread start fn */
215 mda->bVerbose=bVerbose;
216 mda->bCompact=bCompact;
217 mda->nstglobalcomm=nstglobalcomm;
218 mda->ddxyz[XX]=ddxyz[XX];
219 mda->ddxyz[YY]=ddxyz[YY];
220 mda->ddxyz[ZZ]=ddxyz[ZZ];
221 mda->dd_node_order=dd_node_order;
223 mda->rconstr=rconstr;
224 mda->dddlb_opt=dddlb_opt;
225 mda->dlb_scale=dlb_scale;
229 mda->nstepout=nstepout;
230 mda->resetstep=resetstep;
231 mda->nmultisim=nmultisim;
232 mda->repl_ex_nst=repl_ex_nst;
233 mda->repl_ex_nex=repl_ex_nex;
234 mda->repl_ex_seed=repl_ex_seed;
236 mda->cpt_period=cpt_period;
237 mda->max_hours=max_hours;
238 mda->deviceOptions=deviceOptions;
241 fprintf(stderr, "Starting %d threads\n",nthreads);
243 /* now spawn new threads that start mdrunner_start_fn(), while
244 the main thread returns */
245 ret=tMPI_Init_fn(TRUE, nthreads, mdrunner_start_fn, (void*)(mda) );
246 if (ret!=TMPI_SUCCESS)
249 /* make a new comm_rec to reflect the new situation */
250 crn=init_par_threads(cr);
255 /* Get the number of threads to use for thread-MPI based on how many
256 * were requested, which algorithms we're using,
257 * and how many particles there are.
259 static int get_nthreads_mpi(int nthreads_requested, t_inputrec *inputrec,
262 int nthreads,nthreads_new;
263 int min_atoms_per_thread;
266 nthreads = nthreads_requested;
268 /* determine # of hardware threads. */
269 if (nthreads_requested < 1)
271 if ((env = getenv("GMX_MAX_THREADS")) != NULL)
274 sscanf(env,"%d",&nthreads);
277 gmx_fatal(FARGS,"GMX_MAX_THREADS (%d) should be larger than 0",
283 nthreads = tMPI_Thread_get_hw_number();
287 if (inputrec->eI == eiNM || EI_TPI(inputrec->eI))
289 /* Steps are divided over the nodes iso splitting the atoms */
290 min_atoms_per_thread = 0;
294 min_atoms_per_thread = MIN_ATOMS_PER_THREAD;
297 /* Check if an algorithm does not support parallel simulation. */
299 ( inputrec->eI == eiLBFGS ||
300 inputrec->coulombtype == eelEWALD ) )
302 fprintf(stderr,"\nThe integration or electrostatics algorithm doesn't support parallel runs. Not starting any threads.\n");
305 else if (nthreads_requested < 1 &&
306 mtop->natoms/nthreads < min_atoms_per_thread)
308 /* the thread number was chosen automatically, but there are too many
309 threads (too few atoms per thread) */
310 nthreads_new = max(1,mtop->natoms/min_atoms_per_thread);
312 if (nthreads_new > 8 || (nthreads == 8 && nthreads_new > 4))
314 /* Use only multiples of 4 above 8 threads
315 * or with an 8-core processor
316 * (to avoid 6 threads on 8 core processors with 4 real cores).
318 nthreads_new = (nthreads_new/4)*4;
320 else if (nthreads_new > 4)
322 /* Avoid 5 or 7 threads */
323 nthreads_new = (nthreads_new/2)*2;
326 nthreads = nthreads_new;
328 fprintf(stderr,"\n");
329 fprintf(stderr,"NOTE: Parallelization is limited by the small number of atoms,\n");
330 fprintf(stderr," only starting %d threads.\n",nthreads);
331 fprintf(stderr," You can use the -nt option to optimize the number of threads.\n\n");
338 int mdrunner(int nthreads_requested, FILE *fplog,t_commrec *cr,int nfile,
339 const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose,
340 gmx_bool bCompact, int nstglobalcomm,
341 ivec ddxyz,int dd_node_order,real rdd,real rconstr,
342 const char *dddlb_opt,real dlb_scale,
343 const char *ddcsx,const char *ddcsy,const char *ddcsz,
344 int nstepout,int resetstep,int nmultisim, int repl_ex_nst, int repl_ex_nex,
345 int repl_ex_seed, real pforce,real cpt_period,real max_hours,
346 const char *deviceOptions, unsigned long Flags)
348 double nodetime=0,realtime;
349 t_inputrec *inputrec;
352 gmx_ddbox_t ddbox={0};
353 int npme_major,npme_minor;
356 gmx_mtop_t *mtop=NULL;
357 t_mdatoms *mdatoms=NULL;
361 gmx_pme_t *pmedata=NULL;
362 gmx_vsite_t *vsite=NULL;
364 int i,m,nChargePerturbed=-1,status,nalloc;
366 gmx_wallcycle_t wcycle;
367 gmx_bool bReadRNG,bReadEkin;
369 gmx_runtime_t runtime;
371 gmx_large_int_t reset_counters;
373 t_commrec *cr_old=cr;
376 gmx_membed_t membed=NULL;
378 /* CAUTION: threads may be started later on in this function, so
379 cr doesn't reflect the final parallel state right now */
383 if (bVerbose && SIMMASTER(cr))
385 fprintf(stderr,"Getting Loaded...\n");
388 if (Flags & MD_APPENDFILES)
396 /* Read (nearly) all data required for the simulation */
397 read_tpx_state(ftp2fn(efTPX,nfile,fnm),inputrec,state,NULL,mtop);
399 /* NOW the threads will be started: */
400 #ifdef GMX_THREAD_MPI
401 nthreads_mpi = get_nthreads_mpi(nthreads_requested, inputrec, mtop);
403 if (nthreads_mpi > 1)
405 /* now start the threads. */
406 cr=mdrunner_start_threads(nthreads_mpi, fplog, cr_old, nfile, fnm,
407 oenv, bVerbose, bCompact, nstglobalcomm,
408 ddxyz, dd_node_order, rdd, rconstr,
409 dddlb_opt, dlb_scale, ddcsx, ddcsy, ddcsz,
410 nstepout, resetstep, nmultisim,
411 repl_ex_nst, repl_ex_nex, repl_ex_seed, pforce,
412 cpt_period, max_hours, deviceOptions,
414 /* the main thread continues here with a new cr. We don't deallocate
415 the old cr because other threads may still be reading it. */
418 gmx_comm("Failed to spawn threads");
423 /* END OF CAUTION: cr is now reliable */
425 /* g_membed initialisation *
426 * Because we change the mtop, init_membed is called before the init_parallel *
427 * (in case we ever want to make it run in parallel) */
428 if (opt2bSet("-membed",nfile,fnm))
432 fprintf(stderr,"Initializing membed");
434 membed = init_membed(fplog,nfile,fnm,mtop,inputrec,state,cr,&cpt_period);
439 /* now broadcast everything to the non-master nodes/threads: */
440 init_parallel(fplog, cr, inputrec, mtop);
444 pr_inputrec(fplog,0,"Input Parameters",inputrec,FALSE);
447 /* now make sure the state is initialized and propagated */
448 set_state_entries(state,inputrec,cr->nnodes);
450 /* remove when vv and rerun works correctly! */
451 if (PAR(cr) && EI_VV(inputrec->eI) && ((Flags & MD_RERUN) || (Flags & MD_RERUN_VSITE)))
454 "Currently can't do velocity verlet with rerun in parallel.");
457 /* A parallel command line option consistency check that we can
458 only do after any threads have started. */
460 (ddxyz[XX] > 1 || ddxyz[YY] > 1 || ddxyz[ZZ] > 1 || cr->npmenodes > 0))
463 "The -dd or -npme option request a parallel simulation, "
465 "but mdrun was compiled without threads or MPI enabled"
467 #ifdef GMX_THREAD_MPI
468 "but the number of threads (option -nt) is 1"
470 "but mdrun was not started through mpirun/mpiexec or only one process was requested through mpirun/mpiexec"
476 if ((Flags & MD_RERUN) &&
477 (EI_ENERGY_MINIMIZATION(inputrec->eI) || eiNM == inputrec->eI))
479 gmx_fatal(FARGS, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
482 if (can_use_allvsall(inputrec,mtop,TRUE,cr,fplog))
484 /* All-vs-all loops do not work with domain decomposition */
488 if (!EEL_PME(inputrec->coulombtype) || (Flags & MD_PARTDEC))
490 if (cr->npmenodes > 0)
492 if (!EEL_PME(inputrec->coulombtype))
494 gmx_fatal_collective(FARGS,cr,NULL,
495 "PME nodes are requested, but the system does not use PME electrostatics");
497 if (Flags & MD_PARTDEC)
499 gmx_fatal_collective(FARGS,cr,NULL,
500 "PME nodes are requested, but particle decomposition does not support separate PME nodes");
508 fcRegisterSteps(inputrec->nsteps,inputrec->init_step);
511 /* NMR restraints must be initialized before load_checkpoint,
512 * since with time averaging the history is added to t_state.
513 * For proper consistency check we therefore need to extend
515 * So the PME-only nodes (if present) will also initialize
516 * the distance restraints.
520 /* This needs to be called before read_checkpoint to extend the state */
521 init_disres(fplog,mtop,inputrec,cr,Flags & MD_PARTDEC,fcd,state);
523 if (gmx_mtop_ftype_count(mtop,F_ORIRES) > 0)
525 if (PAR(cr) && !(Flags & MD_PARTDEC))
527 gmx_fatal(FARGS,"Orientation restraints do not work (yet) with domain decomposition, use particle decomposition (mdrun option -pd)");
529 /* Orientation restraints */
532 init_orires(fplog,mtop,state->x,inputrec,cr->ms,&(fcd->orires),
537 if (DEFORM(*inputrec))
539 /* Store the deform reference box before reading the checkpoint */
542 copy_mat(state->box,box);
546 gmx_bcast(sizeof(box),box,cr);
548 /* Because we do not have the update struct available yet
549 * in which the reference values should be stored,
550 * we store them temporarily in static variables.
551 * This should be thread safe, since they are only written once
552 * and with identical values.
554 #ifdef GMX_THREAD_MPI
555 tMPI_Thread_mutex_lock(&deform_init_box_mutex);
557 deform_init_init_step_tpx = inputrec->init_step;
558 copy_mat(box,deform_init_box_tpx);
559 #ifdef GMX_THREAD_MPI
560 tMPI_Thread_mutex_unlock(&deform_init_box_mutex);
564 if (opt2bSet("-cpi",nfile,fnm))
566 /* Check if checkpoint file exists before doing continuation.
567 * This way we can use identical input options for the first and subsequent runs...
569 if( gmx_fexist_master(opt2fn_master("-cpi",nfile,fnm,cr),cr) )
571 load_checkpoint(opt2fn_master("-cpi",nfile,fnm,cr),&fplog,
572 cr,Flags & MD_PARTDEC,ddxyz,
573 inputrec,state,&bReadRNG,&bReadEkin,
574 (Flags & MD_APPENDFILES),
575 (Flags & MD_APPENDFILESSET));
579 Flags |= MD_READ_RNG;
583 Flags |= MD_READ_EKIN;
588 if (((MASTER(cr) || (Flags & MD_SEPPOT)) && (Flags & MD_APPENDFILES))
589 #ifdef GMX_THREAD_MPI
590 /* With thread MPI only the master node/thread exists in mdrun.c,
591 * therefore non-master nodes need to open the "seppot" log file here.
593 || (!MASTER(cr) && (Flags & MD_SEPPOT))
597 gmx_log_open(ftp2fn(efLOG,nfile,fnm),cr,!(Flags & MD_SEPPOT),
603 copy_mat(state->box,box);
608 gmx_bcast(sizeof(box),box,cr);
611 /* Essential dynamics */
612 if (opt2bSet("-ei",nfile,fnm))
614 /* Open input and output files, allocate space for ED data structure */
615 ed = ed_open(nfile,fnm,Flags,cr);
618 if (bVerbose && SIMMASTER(cr))
620 fprintf(stderr,"Loaded with Money\n\n");
623 if (PAR(cr) && !((Flags & MD_PARTDEC) ||
624 EI_TPI(inputrec->eI) ||
625 inputrec->eI == eiNM))
627 cr->dd = init_domain_decomposition(fplog,cr,Flags,ddxyz,rdd,rconstr,
632 &ddbox,&npme_major,&npme_minor);
634 make_dd_communicators(fplog,cr,dd_node_order);
636 /* Set overallocation to avoid frequent reallocation of arrays */
637 set_over_alloc_dd(TRUE);
641 /* PME, if used, is done on all nodes with 1D decomposition */
643 cr->duty = (DUTY_PP | DUTY_PME);
646 if (!EI_TPI(inputrec->eI))
648 npme_major = cr->nnodes;
651 if (inputrec->ePBC == epbcSCREW)
654 "pbc=%s is only implemented with domain decomposition",
655 epbc_names[inputrec->ePBC]);
661 /* After possible communicator splitting in make_dd_communicators.
662 * we can set up the intra/inter node communication.
664 gmx_setup_nodecomm(fplog,cr);
667 /* get number of OpenMP/PME threads
668 * env variable should be read only on one node to make sure it is identical everywhere */
670 if (EEL_PME(inputrec->coulombtype))
675 if ((ptr=getenv("GMX_PME_NTHREADS")) != NULL)
677 sscanf(ptr,"%d",&nthreads_pme);
679 if (fplog != NULL && nthreads_pme > 1)
681 fprintf(fplog,"Using %d threads for PME\n",nthreads_pme);
686 gmx_bcast_sim(sizeof(nthreads_pme),&nthreads_pme,cr);
691 wcycle = wallcycle_init(fplog,resetstep,cr,nthreads_pme);
694 /* Master synchronizes its value of reset_counters with all nodes
695 * including PME only nodes */
696 reset_counters = wcycle_get_reset_counters(wcycle);
697 gmx_bcast_sim(sizeof(reset_counters),&reset_counters,cr);
698 wcycle_set_reset_counters(wcycle, reset_counters);
703 if (cr->duty & DUTY_PP)
705 /* For domain decomposition we allocate dynamically
706 * in dd_partition_system.
708 if (DOMAINDECOMP(cr))
710 bcast_state_setup(cr,state);
716 bcast_state(cr,state,TRUE);
720 /* Initiate forcerecord */
722 init_forcerec(fplog,oenv,fr,fcd,inputrec,mtop,cr,box,FALSE,
723 opt2fn("-table",nfile,fnm),
724 opt2fn("-tabletf",nfile,fnm),
725 opt2fn("-tablep",nfile,fnm),
726 opt2fn("-tableb",nfile,fnm),FALSE,pforce);
728 /* version for PCA_NOT_READ_NODE (see md.c) */
729 /*init_forcerec(fplog,fr,fcd,inputrec,mtop,cr,box,FALSE,
730 "nofile","nofile","nofile","nofile",FALSE,pforce);
732 fr->bSepDVDL = ((Flags & MD_SEPPOT) == MD_SEPPOT);
734 /* Initialize QM-MM */
737 init_QMMMrec(cr,box,mtop,inputrec,fr);
740 /* Initialize the mdatoms structure.
741 * mdatoms is not filled with atom data,
742 * as this can not be done now with domain decomposition.
744 mdatoms = init_mdatoms(fplog,mtop,inputrec->efep!=efepNO);
746 /* Initialize the virtual site communication */
747 vsite = init_vsite(mtop,cr);
749 calc_shifts(box,fr->shift_vec);
751 /* With periodic molecules the charge groups should be whole at start up
752 * and the virtual sites should not be far from their proper positions.
754 if (!inputrec->bContinuation && MASTER(cr) &&
755 !(inputrec->ePBC != epbcNONE && inputrec->bPeriodicMols))
757 /* Make molecules whole at start of run */
758 if (fr->ePBC != epbcNONE)
760 do_pbc_first_mtop(fplog,inputrec->ePBC,box,mtop,state->x);
764 /* Correct initial vsite positions are required
765 * for the initial distribution in the domain decomposition
766 * and for the initial shell prediction.
768 construct_vsites_mtop(fplog,vsite,mtop,state->x);
772 if (EEL_PME(fr->eeltype))
774 ewaldcoeff = fr->ewaldcoeff;
775 pmedata = &fr->pmedata;
784 /* This is a PME only node */
786 /* We don't need the state */
789 ewaldcoeff = calc_ewaldcoeff(inputrec->rcoulomb, inputrec->ewald_rtol);
793 /* Initiate PME if necessary,
794 * either on all nodes or on dedicated PME nodes only. */
795 if (EEL_PME(inputrec->coulombtype))
799 nChargePerturbed = mdatoms->nChargePerturbed;
801 if (cr->npmenodes > 0)
803 /* The PME only nodes need to know nChargePerturbed */
804 gmx_bcast_sim(sizeof(nChargePerturbed),&nChargePerturbed,cr);
808 /* Set CPU affinity. Can be important for performance.
809 On some systems (e.g. Cray) CPU Affinity is set by default.
810 But default assigning doesn't work (well) with only some ranks
811 having threads. This causes very low performance.
812 External tools have cumbersome syntax for setting affinity
813 in the case that only some ranks have threads.
814 Thus it is important that GROMACS sets the affinity internally at
815 if only PME is using threads.
823 MPI_Comm comm_intra; /* intra communicator (but different to nc.comm_intra includes PME nodes) */
824 MPI_Comm_split(MPI_COMM_WORLD,gmx_hostname_num(),gmx_node_rank(),&comm_intra);
825 int local_omp_nthreads = (cr->duty & DUTY_PME) ? nthreads_pme : 1; /* threads on this node */
826 MPI_Scan(&local_omp_nthreads,&core, 1, MPI_INT, MPI_SUM, comm_intra);
827 core-=local_omp_nthreads; /* make exclusive scan */
828 #pragma omp parallel firstprivate(core) num_threads(local_omp_nthreads)
832 core+=omp_get_thread_num();
834 sched_setaffinity((pid_t) syscall (SYS_gettid),sizeof(cpu_set_t),&mask);
839 #endif /*GMX_OPENMP*/
841 if (cr->duty & DUTY_PME)
843 status = gmx_pme_init(pmedata,cr,npme_major,npme_minor,inputrec,
844 mtop ? mtop->natoms : 0,nChargePerturbed,
845 (Flags & MD_REPRODUCIBLE),nthreads_pme);
848 gmx_fatal(FARGS,"Error %d initializing PME",status);
854 if (integrator[inputrec->eI].func == do_md
857 integrator[inputrec->eI].func == do_md_openmm
861 /* Turn on signal handling on all nodes */
863 * (A user signal from the PME nodes (if any)
864 * is communicated to the PP nodes.
866 signal_handler_install();
869 if (cr->duty & DUTY_PP)
871 if (inputrec->ePull != epullNO)
873 /* Initialize pull code */
874 init_pull(fplog,inputrec,nfile,fnm,mtop,cr,oenv, inputrec->fepvals->init_lambda,
875 EI_DYNAMICS(inputrec->eI) && MASTER(cr),Flags);
880 /* Initialize enforced rotation code */
881 init_rot(fplog,inputrec,nfile,fnm,cr,state->x,state->box,mtop,oenv,
885 constr = init_constraints(fplog,mtop,inputrec,ed,state,cr);
887 if (DOMAINDECOMP(cr))
889 dd_init_bondeds(fplog,cr->dd,mtop,vsite,constr,inputrec,
890 Flags & MD_DDBONDCHECK,fr->cginfo_mb);
892 set_dd_parameters(fplog,cr->dd,dlb_scale,inputrec,fr,&ddbox);
894 setup_dd_grid(fplog,cr->dd);
897 /* Now do whatever the user wants us to do (how flexible...) */
898 integrator[inputrec->eI].func(fplog,cr,nfile,fnm,
899 oenv,bVerbose,bCompact,
902 nstepout,inputrec,mtop,
904 mdatoms,nrnb,wcycle,ed,fr,
905 repl_ex_nst,repl_ex_nex,repl_ex_seed,
907 cpt_period,max_hours,
912 if (inputrec->ePull != epullNO)
914 finish_pull(fplog,inputrec->pull);
919 finish_rot(fplog,inputrec->rot);
926 gmx_pmeonly(*pmedata,cr,nrnb,wcycle,ewaldcoeff,FALSE,inputrec);
929 if (EI_DYNAMICS(inputrec->eI) || EI_TPI(inputrec->eI))
931 /* Some timing stats */
934 if (runtime.proc == 0)
936 runtime.proc = runtime.real;
945 wallcycle_stop(wcycle,ewcRUN);
947 /* Finish up, write some stuff
948 * if rerunMD, don't write last frame again
950 finish_run(fplog,cr,ftp2fn(efSTO,nfile,fnm),
951 inputrec,nrnb,wcycle,&runtime,
952 EI_DYNAMICS(inputrec->eI) && !MULTISIM(cr));
954 if (opt2bSet("-membed",nfile,fnm))
959 /* Does what it says */
960 print_date_and_time(fplog,cr->nodeid,"Finished mdrun",&runtime);
962 /* Close logfile already here if we were appending to it */
963 if (MASTER(cr) && (Flags & MD_APPENDFILES))
965 gmx_log_close(fplog);
968 rc=(int)gmx_get_stop_condition();
970 #ifdef GMX_THREAD_MPI
971 /* we need to join all threads. The sub-threads join when they
972 exit this function, but the master thread needs to be told to
974 if (PAR(cr) && MASTER(cr))