1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
11 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
12 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
13 * Copyright (c) 2001-2004, The GROMACS development team,
14 * check out http://www.gromacs.org for more information.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version 2
19 * of the License, or (at your option) any later version.
21 * If you want to redistribute modifications, please consider that
22 * scientific software is very special. Version control is crucial -
23 * bugs must be traceable. We will be happy to consider code for
24 * inclusion in the official distribution, but derived work must not
25 * be called official GROMACS. Details are found in the README & COPYING
26 * files - if they are missing, get the official version at www.gromacs.org.
28 * To help us fund GROMACS development, we humbly ask that you cite
29 * the papers on the package - you can find them in the top README file.
31 * For more info, check our website at http://www.gromacs.org
34 * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
56 #include "md_support.h"
57 #include "md_logging.h"
73 #include "domdec_network.h"
79 #include "compute_io.h"
81 #include "checkpoint.h"
82 #include "mtop_util.h"
83 #include "sighandler.h"
86 #include "pme_loadbal.h"
89 #include "types/nlistheuristics.h"
90 #include "types/iteratedconstraints.h"
91 #include "nbnxn_cuda_data_mgmt.h"
93 #include "gromacs/utility/gmxmpi.h"
99 static void reset_all_counters(FILE *fplog, t_commrec *cr,
100 gmx_large_int_t step,
101 gmx_large_int_t *step_rel, t_inputrec *ir,
102 gmx_wallcycle_t wcycle, t_nrnb *nrnb,
103 gmx_runtime_t *runtime,
104 nbnxn_cuda_ptr_t cu_nbv)
106 char sbuf[STEPSTRSIZE];
108 /* Reset all the counters related to performance over the run */
109 md_print_warn(cr, fplog, "step %s: resetting all time and cycle counters\n",
110 gmx_step_str(step, sbuf));
114 nbnxn_cuda_reset_timings(cu_nbv);
117 wallcycle_stop(wcycle, ewcRUN);
118 wallcycle_reset_all(wcycle);
119 if (DOMAINDECOMP(cr))
121 reset_dd_statistics_counters(cr->dd);
124 ir->init_step += *step_rel;
125 ir->nsteps -= *step_rel;
127 wallcycle_start(wcycle, ewcRUN);
128 runtime_start(runtime);
129 print_date_and_time(fplog, cr->nodeid, "Restarted time", runtime);
132 double do_md(FILE *fplog, t_commrec *cr, int nfile, const t_filenm fnm[],
133 const output_env_t oenv, gmx_bool bVerbose, gmx_bool bCompact,
135 gmx_vsite_t *vsite, gmx_constr_t constr,
136 int stepout, t_inputrec *ir,
137 gmx_mtop_t *top_global,
139 t_state *state_global,
141 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
142 gmx_edsam_t ed, t_forcerec *fr,
143 int repl_ex_nst, int repl_ex_nex, int repl_ex_seed, gmx_membed_t membed,
144 real cpt_period, real max_hours,
145 const char *deviceOptions,
147 gmx_runtime_t *runtime)
150 gmx_large_int_t step, step_rel;
152 double t, t0, lam0[efptNR];
153 gmx_bool bGStatEveryStep, bGStat, bCalcVir, bCalcEner;
154 gmx_bool bNS, bNStList, bSimAnn, bStopCM, bRerunMD, bNotLastFrame = FALSE,
155 bFirstStep, bStateFromCP, bStateFromTPX, bInitStep, bLastStep,
156 bBornRadii, bStartingFromCpt;
157 gmx_bool bDoDHDL = FALSE, bDoFEP = FALSE, bDoExpanded = FALSE;
158 gmx_bool do_ene, do_log, do_verbose, bRerunWarnNoV = TRUE,
159 bForceUpdate = FALSE, bCPT;
161 gmx_bool bMasterState;
162 int force_flags, cglo_flags;
163 tensor force_vir, shake_vir, total_vir, tmp_vir, pres;
168 t_state *bufstate = NULL;
169 matrix *scale_tot, pcoupl_mu, M, ebox;
172 gmx_repl_ex_t repl_ex = NULL;
175 t_mdebin *mdebin = NULL;
176 df_history_t df_history;
177 t_state *state = NULL;
178 rvec *f_global = NULL;
181 gmx_enerdata_t *enerd;
183 gmx_global_stat_t gstat;
184 gmx_update_t upd = NULL;
185 t_graph *graph = NULL;
187 gmx_rng_t mcrng = NULL;
189 gmx_groups_t *groups;
190 gmx_ekindata_t *ekind, *ekind_save;
191 gmx_shellfc_t shellfc;
192 int count, nconverged = 0;
195 gmx_bool bIonize = FALSE;
196 gmx_bool bTCR = FALSE, bConverged = TRUE, bOK, bSumEkinhOld, bExchanged;
198 gmx_bool bResetCountersHalfMaxH = FALSE;
199 gmx_bool bVV, bIterativeCase, bFirstIterate, bTemp, bPres, bTrotter;
200 gmx_bool bUpdateDoLR;
201 real mu_aver = 0, dvdl;
202 int a0, a1, gnx = 0, ii;
203 atom_id *grpindex = NULL;
205 t_coupl_rec *tcr = NULL;
206 rvec *xcopy = NULL, *vcopy = NULL, *cbuf = NULL;
207 matrix boxcopy = {{0}}, lastbox;
209 real fom, oldfom, veta_save, pcurr, scalevir, tracevir;
216 real saved_conserved_quantity = 0;
221 char sbuf[STEPSTRSIZE], sbuf2[STEPSTRSIZE];
222 int handled_stop_condition = gmx_stop_cond_none; /* compare to get_stop_condition*/
223 gmx_iterate_t iterate;
224 gmx_large_int_t multisim_nsteps = -1; /* number of steps to do before first multisim
225 simulation stops. If equal to zero, don't
226 communicate any more between multisims.*/
227 /* PME load balancing data for GPU kernels */
228 pme_load_balancing_t pme_loadbal = NULL;
230 gmx_bool bPMETuneTry = FALSE, bPMETuneRunning = FALSE;
233 /* Temporary addition for FAHCORE checkpointing */
237 /* Check for special mdrun options */
238 bRerunMD = (Flags & MD_RERUN);
239 bIonize = (Flags & MD_IONIZE);
240 bFFscan = (Flags & MD_FFSCAN);
241 bAppend = (Flags & MD_APPENDFILES);
242 if (Flags & MD_RESETCOUNTERSHALFWAY)
246 /* Signal to reset the counters half the simulation steps. */
247 wcycle_set_reset_counters(wcycle, ir->nsteps/2);
249 /* Signal to reset the counters halfway the simulation time. */
250 bResetCountersHalfMaxH = (max_hours > 0);
253 /* md-vv uses averaged full step velocities for T-control
254 md-vv-avek uses averaged half step velocities for T-control (but full step ekin for P control)
255 md uses averaged half step kinetic energies to determine temperature unless defined otherwise by GMX_EKIN_AVE_VEL; */
257 if (bVV) /* to store the initial velocities while computing virial */
259 snew(cbuf, top_global->natoms);
261 /* all the iteratative cases - only if there are constraints */
262 bIterativeCase = ((IR_NPH_TROTTER(ir) || IR_NPT_TROTTER(ir)) && (constr) && (!bRerunMD));
263 gmx_iterate_init(&iterate, FALSE); /* The default value of iterate->bIterationActive is set to
264 false in this step. The correct value, true or false,
265 is set at each step, as it depends on the frequency of temperature
266 and pressure control.*/
267 bTrotter = (bVV && (IR_NPT_TROTTER(ir) || IR_NPH_TROTTER(ir) || IR_NVT_TROTTER(ir)));
271 /* Since we don't know if the frames read are related in any way,
272 * rebuild the neighborlist at every step.
275 ir->nstcalcenergy = 1;
279 check_ir_old_tpx_versions(cr, fplog, ir, top_global);
281 nstglobalcomm = check_nstglobalcomm(fplog, cr, nstglobalcomm, ir);
282 bGStatEveryStep = (nstglobalcomm == 1);
284 if (!bGStatEveryStep && ir->nstlist == -1 && fplog != NULL)
287 "To reduce the energy communication with nstlist = -1\n"
288 "the neighbor list validity should not be checked at every step,\n"
289 "this means that exact integration is not guaranteed.\n"
290 "The neighbor list validity is checked after:\n"
291 " <n.list life time> - 2*std.dev.(n.list life time) steps.\n"
292 "In most cases this will result in exact integration.\n"
293 "This reduces the energy communication by a factor of 2 to 3.\n"
294 "If you want less energy communication, set nstlist > 3.\n\n");
297 if (bRerunMD || bFFscan)
301 groups = &top_global->groups;
304 init_md(fplog, cr, ir, oenv, &t, &t0, state_global->lambda,
305 &(state_global->fep_state), lam0,
306 nrnb, top_global, &upd,
307 nfile, fnm, &outf, &mdebin,
308 force_vir, shake_vir, mu_tot, &bSimAnn, &vcm, state_global, Flags);
310 clear_mat(total_vir);
312 /* Energy terms and groups */
314 init_enerdata(top_global->groups.grps[egcENER].nr, ir->fepvals->n_lambda,
316 if (DOMAINDECOMP(cr))
322 snew(f, top_global->natoms);
325 /* lambda Monte carlo random number generator */
328 mcrng = gmx_rng_init(ir->expandedvals->lmc_seed);
330 /* copy the state into df_history */
331 copy_df_history(&df_history, &state_global->dfhist);
333 /* Kinetic energy data */
335 init_ekindata(fplog, top_global, &(ir->opts), ekind);
336 /* needed for iteration of constraints */
338 init_ekindata(fplog, top_global, &(ir->opts), ekind_save);
339 /* Copy the cos acceleration to the groups struct */
340 ekind->cosacc.cos_accel = ir->cos_accel;
342 gstat = global_stat_init(ir);
345 /* Check for polarizable models and flexible constraints */
346 shellfc = init_shell_flexcon(fplog,
347 top_global, n_flexible_constraints(constr),
348 (ir->bContinuation ||
349 (DOMAINDECOMP(cr) && !MASTER(cr))) ?
350 NULL : state_global->x);
354 #ifdef GMX_THREAD_MPI
355 tMPI_Thread_mutex_lock(&deform_init_box_mutex);
357 set_deform_reference_box(upd,
358 deform_init_init_step_tpx,
359 deform_init_box_tpx);
360 #ifdef GMX_THREAD_MPI
361 tMPI_Thread_mutex_unlock(&deform_init_box_mutex);
366 double io = compute_io(ir, top_global->natoms, groups, mdebin->ebin->nener, 1);
367 if ((io > 2000) && MASTER(cr))
370 "\nWARNING: This run will generate roughly %.0f Mb of data\n\n",
375 if (DOMAINDECOMP(cr))
377 top = dd_init_local_top(top_global);
380 dd_init_local_state(cr->dd, state_global, state);
382 if (DDMASTER(cr->dd) && ir->nstfout)
384 snew(f_global, state_global->natoms);
391 /* Initialize the particle decomposition and split the topology */
392 top = split_system(fplog, top_global, ir, cr);
394 pd_cg_range(cr, &fr->cg0, &fr->hcg);
395 pd_at_range(cr, &a0, &a1);
399 top = gmx_mtop_generate_local_top(top_global, ir);
402 a1 = top_global->natoms;
405 forcerec_set_excl_load(fr, top, cr);
407 state = partdec_init_local_state(cr, state_global);
410 atoms2md(top_global, ir, 0, NULL, a0, a1-a0, mdatoms);
414 set_vsite_top(vsite, top, mdatoms, cr);
417 if (ir->ePBC != epbcNONE && !fr->bMolPBC)
419 graph = mk_graph(fplog, &(top->idef), 0, top_global->natoms, FALSE, FALSE);
424 make_local_shells(cr, mdatoms, shellfc);
427 init_bonded_thread_force_reduction(fr, &top->idef);
429 if (ir->pull && PAR(cr))
431 dd_make_local_pull_groups(NULL, ir->pull, mdatoms);
435 if (DOMAINDECOMP(cr))
437 /* Distribute the charge groups over the nodes from the master node */
438 dd_partition_system(fplog, ir->init_step, cr, TRUE, 1,
439 state_global, top_global, ir,
440 state, &f, mdatoms, top, fr,
441 vsite, shellfc, constr,
442 nrnb, wcycle, FALSE);
446 update_mdatoms(mdatoms, state->lambda[efptMASS]);
448 if (opt2bSet("-cpi", nfile, fnm))
450 bStateFromCP = gmx_fexist_master(opt2fn_master("-cpi", nfile, fnm, cr), cr);
454 bStateFromCP = FALSE;
461 /* Update mdebin with energy history if appending to output files */
462 if (Flags & MD_APPENDFILES)
464 restore_energyhistory_from_state(mdebin, &state_global->enerhist);
468 /* We might have read an energy history from checkpoint,
469 * free the allocated memory and reset the counts.
471 done_energyhistory(&state_global->enerhist);
472 init_energyhistory(&state_global->enerhist);
475 /* Set the initial energy history in state by updating once */
476 update_energyhistory(&state_global->enerhist, mdebin);
479 if ((state->flags & (1<<estLD_RNG)) && (Flags & MD_READ_RNG))
481 /* Set the random state if we read a checkpoint file */
482 set_stochd_state(upd, state);
485 if (state->flags & (1<<estMC_RNG))
487 set_mc_state(mcrng, state);
490 /* Initialize constraints */
493 if (!DOMAINDECOMP(cr))
495 set_constraints(constr, top, ir, mdatoms, cr);
499 /* Check whether we have to GCT stuff */
500 bTCR = ftp2bSet(efGCT, nfile, fnm);
505 fprintf(stderr, "Will do General Coupling Theory!\n");
507 gnx = top_global->mols.nr;
509 for (i = 0; (i < gnx); i++)
517 /* We need to be sure replica exchange can only occur
518 * when the energies are current */
519 check_nst_param(fplog, cr, "nstcalcenergy", ir->nstcalcenergy,
520 "repl_ex_nst", &repl_ex_nst);
521 /* This check needs to happen before inter-simulation
522 * signals are initialized, too */
524 if (repl_ex_nst > 0 && MASTER(cr))
526 repl_ex = init_replica_exchange(fplog, cr->ms, state_global, ir,
527 repl_ex_nst, repl_ex_nex, repl_ex_seed);
530 /* PME tuning is only supported with GPUs or PME nodes and not with rerun.
531 * With perturbed charges with soft-core we should not change the cut-off.
533 if ((Flags & MD_TUNEPME) &&
534 EEL_PME(fr->eeltype) &&
535 ( (fr->cutoff_scheme == ecutsVERLET && fr->nbv->bUseGPU) || !(cr->duty & DUTY_PME)) &&
536 !(ir->efep != efepNO && mdatoms->nChargePerturbed > 0 && ir->fepvals->bScCoul) &&
539 pme_loadbal_init(&pme_loadbal, ir, state->box, fr->ic, fr->pmedata);
541 if (cr->duty & DUTY_PME)
543 /* Start tuning right away, as we can't measure the load */
544 bPMETuneRunning = TRUE;
548 /* Separate PME nodes, we can measure the PP/PME load balance */
553 if (!ir->bContinuation && !bRerunMD)
555 if (mdatoms->cFREEZE && (state->flags & (1<<estV)))
557 /* Set the velocities of frozen particles to zero */
558 for (i = mdatoms->start; i < mdatoms->start+mdatoms->homenr; i++)
560 for (m = 0; m < DIM; m++)
562 if (ir->opts.nFreeze[mdatoms->cFREEZE[i]][m])
572 /* Constrain the initial coordinates and velocities */
573 do_constrain_first(fplog, constr, ir, mdatoms, state, f,
574 graph, cr, nrnb, fr, top, shake_vir);
578 /* Construct the virtual sites for the initial configuration */
579 construct_vsites(fplog, vsite, state->x, nrnb, ir->delta_t, NULL,
580 top->idef.iparams, top->idef.il,
581 fr->ePBC, fr->bMolPBC, graph, cr, state->box);
587 /* set free energy calculation frequency as the minimum of nstdhdl, nstexpanded, and nstrepl_ex_nst*/
588 nstfep = ir->fepvals->nstdhdl;
589 if (ir->bExpanded && (nstfep > ir->expandedvals->nstexpanded))
591 nstfep = ir->expandedvals->nstexpanded;
593 if (repl_ex_nst > 0 && nstfep > repl_ex_nst)
595 nstfep = repl_ex_nst;
598 /* I'm assuming we need global communication the first time! MRS */
599 cglo_flags = (CGLO_TEMPERATURE | CGLO_GSTAT
600 | ((ir->comm_mode != ecmNO) ? CGLO_STOPCM : 0)
601 | (bVV ? CGLO_PRESSURE : 0)
602 | (bVV ? CGLO_CONSTRAINT : 0)
603 | (bRerunMD ? CGLO_RERUNMD : 0)
604 | ((Flags & MD_READ_EKIN) ? CGLO_READEKIN : 0));
606 bSumEkinhOld = FALSE;
607 compute_globals(fplog, gstat, cr, ir, fr, ekind, state, state_global, mdatoms, nrnb, vcm,
608 NULL, enerd, force_vir, shake_vir, total_vir, pres, mu_tot,
609 constr, NULL, FALSE, state->box,
610 top_global, &pcurr, top_global->natoms, &bSumEkinhOld, cglo_flags);
611 if (ir->eI == eiVVAK)
613 /* a second call to get the half step temperature initialized as well */
614 /* we do the same call as above, but turn the pressure off -- internally to
615 compute_globals, this is recognized as a velocity verlet half-step
616 kinetic energy calculation. This minimized excess variables, but
617 perhaps loses some logic?*/
619 compute_globals(fplog, gstat, cr, ir, fr, ekind, state, state_global, mdatoms, nrnb, vcm,
620 NULL, enerd, force_vir, shake_vir, total_vir, pres, mu_tot,
621 constr, NULL, FALSE, state->box,
622 top_global, &pcurr, top_global->natoms, &bSumEkinhOld,
623 cglo_flags &~(CGLO_STOPCM | CGLO_PRESSURE));
626 /* Calculate the initial half step temperature, and save the ekinh_old */
627 if (!(Flags & MD_STARTFROMCPT))
629 for (i = 0; (i < ir->opts.ngtc); i++)
631 copy_mat(ekind->tcstat[i].ekinh, ekind->tcstat[i].ekinh_old);
636 enerd->term[F_TEMP] *= 2; /* result of averages being done over previous and current step,
637 and there is no previous step */
640 /* if using an iterative algorithm, we need to create a working directory for the state. */
643 bufstate = init_bufstate(state);
647 snew(xcopy, state->natoms);
648 snew(vcopy, state->natoms);
649 copy_rvecn(state->x, xcopy, 0, state->natoms);
650 copy_rvecn(state->v, vcopy, 0, state->natoms);
651 copy_mat(state->box, boxcopy);
654 /* need to make an initiation call to get the Trotter variables set, as well as other constants for non-trotter
655 temperature control */
656 trotter_seq = init_npt_vars(ir, state, &MassQ, bTrotter);
660 if (constr && !ir->bContinuation && ir->eConstrAlg == econtLINCS)
663 "RMS relative constraint deviation after constraining: %.2e\n",
664 constr_rmsd(constr, FALSE));
666 if (EI_STATE_VELOCITY(ir->eI))
668 fprintf(fplog, "Initial temperature: %g K\n", enerd->term[F_TEMP]);
672 fprintf(stderr, "starting md rerun '%s', reading coordinates from"
673 " input trajectory '%s'\n\n",
674 *(top_global->name), opt2fn("-rerun", nfile, fnm));
677 fprintf(stderr, "Calculated time to finish depends on nsteps from "
678 "run input file,\nwhich may not correspond to the time "
679 "needed to process input trajectory.\n\n");
685 fprintf(stderr, "starting mdrun '%s'\n",
686 *(top_global->name));
689 sprintf(tbuf, "%8.1f", (ir->init_step+ir->nsteps)*ir->delta_t);
693 sprintf(tbuf, "%s", "infinite");
695 if (ir->init_step > 0)
697 fprintf(stderr, "%s steps, %s ps (continuing from step %s, %8.1f ps).\n",
698 gmx_step_str(ir->init_step+ir->nsteps, sbuf), tbuf,
699 gmx_step_str(ir->init_step, sbuf2),
700 ir->init_step*ir->delta_t);
704 fprintf(stderr, "%s steps, %s ps.\n",
705 gmx_step_str(ir->nsteps, sbuf), tbuf);
708 fprintf(fplog, "\n");
711 /* Set and write start time */
712 runtime_start(runtime);
713 print_date_and_time(fplog, cr->nodeid, "Started mdrun", runtime);
714 wallcycle_start(wcycle, ewcRUN);
717 fprintf(fplog, "\n");
720 /* safest point to do file checkpointing is here. More general point would be immediately before integrator call */
722 chkpt_ret = fcCheckPointParallel( cr->nodeid,
726 gmx_fatal( 3, __FILE__, __LINE__, "Checkpoint error on step %d\n", 0 );
731 /***********************************************************
735 ************************************************************/
737 /* if rerunMD then read coordinates and velocities from input trajectory */
740 if (getenv("GMX_FORCE_UPDATE"))
748 bNotLastFrame = read_first_frame(oenv, &status,
749 opt2fn("-rerun", nfile, fnm),
750 &rerun_fr, TRX_NEED_X | TRX_READ_V);
751 if (rerun_fr.natoms != top_global->natoms)
754 "Number of atoms in trajectory (%d) does not match the "
755 "run input file (%d)\n",
756 rerun_fr.natoms, top_global->natoms);
758 if (ir->ePBC != epbcNONE)
762 gmx_fatal(FARGS, "Rerun trajectory frame step %d time %f does not contain a box, while pbc is used", rerun_fr.step, rerun_fr.time);
764 if (max_cutoff2(ir->ePBC, rerun_fr.box) < sqr(fr->rlistlong))
766 gmx_fatal(FARGS, "Rerun trajectory frame step %d time %f has too small box dimensions", rerun_fr.step, rerun_fr.time);
773 rerun_parallel_comm(cr, &rerun_fr, &bNotLastFrame);
776 if (ir->ePBC != epbcNONE)
778 /* Set the shift vectors.
779 * Necessary here when have a static box different from the tpr box.
781 calc_shifts(rerun_fr.box, fr->shift_vec);
785 /* loop over MD steps or if rerunMD to end of input trajectory */
787 /* Skip the first Nose-Hoover integration when we get the state from tpx */
788 bStateFromTPX = !bStateFromCP;
789 bInitStep = bFirstStep && (bStateFromTPX || bVV);
790 bStartingFromCpt = (Flags & MD_STARTFROMCPT) && bInitStep;
792 bSumEkinhOld = FALSE;
795 init_global_signals(&gs, cr, ir, repl_ex_nst);
797 step = ir->init_step;
800 if (ir->nstlist == -1)
802 init_nlistheuristics(&nlh, bGStatEveryStep, step);
805 if (MULTISIM(cr) && (repl_ex_nst <= 0 ))
807 /* check how many steps are left in other sims */
808 multisim_nsteps = get_multisim_nsteps(cr, ir->nsteps);
812 /* and stop now if we should */
813 bLastStep = (bRerunMD || (ir->nsteps >= 0 && step_rel > ir->nsteps) ||
814 ((multisim_nsteps >= 0) && (step_rel >= multisim_nsteps )));
815 while (!bLastStep || (bRerunMD && bNotLastFrame))
818 wallcycle_start(wcycle, ewcSTEP);
824 step = rerun_fr.step;
825 step_rel = step - ir->init_step;
838 bLastStep = (step_rel == ir->nsteps);
839 t = t0 + step*ir->delta_t;
842 if (ir->efep != efepNO || ir->bSimTemp)
844 /* find and set the current lambdas. If rerunning, we either read in a state, or a lambda value,
845 requiring different logic. */
847 set_current_lambdas(step, ir->fepvals, bRerunMD, &rerun_fr, state_global, state, lam0);
848 bDoDHDL = do_per_step(step, ir->fepvals->nstdhdl);
849 bDoFEP = (do_per_step(step, nstfep) && (ir->efep != efepNO));
850 bDoExpanded = (do_per_step(step, ir->expandedvals->nstexpanded) && (ir->bExpanded) && (step > 0));
855 update_annealing_target_temp(&(ir->opts), t);
860 if (!(DOMAINDECOMP(cr) && !MASTER(cr)))
862 for (i = 0; i < state_global->natoms; i++)
864 copy_rvec(rerun_fr.x[i], state_global->x[i]);
868 for (i = 0; i < state_global->natoms; i++)
870 copy_rvec(rerun_fr.v[i], state_global->v[i]);
875 for (i = 0; i < state_global->natoms; i++)
877 clear_rvec(state_global->v[i]);
881 fprintf(stderr, "\nWARNING: Some frames do not contain velocities.\n"
882 " Ekin, temperature and pressure are incorrect,\n"
883 " the virial will be incorrect when constraints are present.\n"
885 bRerunWarnNoV = FALSE;
889 copy_mat(rerun_fr.box, state_global->box);
890 copy_mat(state_global->box, state->box);
892 if (vsite && (Flags & MD_RERUN_VSITE))
894 if (DOMAINDECOMP(cr))
896 gmx_fatal(FARGS, "Vsite recalculation with -rerun is not implemented for domain decomposition, use particle decomposition");
900 /* Following is necessary because the graph may get out of sync
901 * with the coordinates if we only have every N'th coordinate set
903 mk_mshift(fplog, graph, fr->ePBC, state->box, state->x);
904 shift_self(graph, state->box, state->x);
906 construct_vsites(fplog, vsite, state->x, nrnb, ir->delta_t, state->v,
907 top->idef.iparams, top->idef.il,
908 fr->ePBC, fr->bMolPBC, graph, cr, state->box);
911 unshift_self(graph, state->box, state->x);
916 /* Stop Center of Mass motion */
917 bStopCM = (ir->comm_mode != ecmNO && do_per_step(step, ir->nstcomm));
919 /* Copy back starting coordinates in case we're doing a forcefield scan */
922 for (ii = 0; (ii < state->natoms); ii++)
924 copy_rvec(xcopy[ii], state->x[ii]);
925 copy_rvec(vcopy[ii], state->v[ii]);
927 copy_mat(boxcopy, state->box);
932 /* for rerun MD always do Neighbour Searching */
933 bNS = (bFirstStep || ir->nstlist != 0);
938 /* Determine whether or not to do Neighbour Searching and LR */
939 bNStList = (ir->nstlist > 0 && step % ir->nstlist == 0);
941 bNS = (bFirstStep || bExchanged || bNStList || bDoFEP ||
942 (ir->nstlist == -1 && nlh.nabnsb > 0));
944 if (bNS && ir->nstlist == -1)
946 set_nlistheuristics(&nlh, bFirstStep || bExchanged || bDoFEP, step);
950 /* check whether we should stop because another simulation has
954 if ( (multisim_nsteps >= 0) && (step_rel >= multisim_nsteps) &&
955 (multisim_nsteps != ir->nsteps) )
962 "Stopping simulation %d because another one has finished\n",
966 gs.sig[eglsCHKPT] = 1;
971 /* < 0 means stop at next step, > 0 means stop at next NS step */
972 if ( (gs.set[eglsSTOPCOND] < 0 ) ||
973 ( (gs.set[eglsSTOPCOND] > 0 ) && ( bNS || ir->nstlist == 0)) )
978 /* Determine whether or not to update the Born radii if doing GB */
979 bBornRadii = bFirstStep;
980 if (ir->implicit_solvent && (step % ir->nstgbradii == 0))
985 do_log = do_per_step(step, ir->nstlog) || bFirstStep || bLastStep;
986 do_verbose = bVerbose &&
987 (step % stepout == 0 || bFirstStep || bLastStep);
989 if (bNS && !(bFirstStep && ir->bContinuation && !bRerunMD))
997 bMasterState = FALSE;
998 /* Correct the new box if it is too skewed */
999 if (DYNAMIC_BOX(*ir))
1001 if (correct_box(fplog, step, state->box, graph))
1003 bMasterState = TRUE;
1006 if (DOMAINDECOMP(cr) && bMasterState)
1008 dd_collect_state(cr->dd, state, state_global);
1012 if (DOMAINDECOMP(cr))
1014 /* Repartition the domain decomposition */
1015 wallcycle_start(wcycle, ewcDOMDEC);
1016 dd_partition_system(fplog, step, cr,
1017 bMasterState, nstglobalcomm,
1018 state_global, top_global, ir,
1019 state, &f, mdatoms, top, fr,
1020 vsite, shellfc, constr,
1022 do_verbose && !bPMETuneRunning);
1023 wallcycle_stop(wcycle, ewcDOMDEC);
1024 /* If using an iterative integrator, reallocate space to match the decomposition */
1028 if (MASTER(cr) && do_log && !bFFscan)
1030 print_ebin_header(fplog, step, t, state->lambda[efptFEP]); /* can we improve the information printed here? */
1033 if (ir->efep != efepNO)
1035 update_mdatoms(mdatoms, state->lambda[efptMASS]);
1038 if ((bRerunMD && rerun_fr.bV) || bExchanged)
1041 /* We need the kinetic energy at minus the half step for determining
1042 * the full step kinetic energy and possibly for T-coupling.*/
1043 /* This may not be quite working correctly yet . . . . */
1044 compute_globals(fplog, gstat, cr, ir, fr, ekind, state, state_global, mdatoms, nrnb, vcm,
1045 wcycle, enerd, NULL, NULL, NULL, NULL, mu_tot,
1046 constr, NULL, FALSE, state->box,
1047 top_global, &pcurr, top_global->natoms, &bSumEkinhOld,
1048 CGLO_RERUNMD | CGLO_GSTAT | CGLO_TEMPERATURE);
1050 clear_mat(force_vir);
1052 /* Ionize the atoms if necessary */
1055 ionize(fplog, oenv, mdatoms, top_global, t, ir, state->x, state->v,
1056 mdatoms->start, mdatoms->start+mdatoms->homenr, state->box, cr);
1059 /* Update force field in ffscan program */
1062 if (update_forcefield(fplog,
1064 mdatoms->nr, state->x, state->box))
1072 /* We write a checkpoint at this MD step when:
1073 * either at an NS step when we signalled through gs,
1074 * or at the last step (but not when we do not want confout),
1075 * but never at the first step or with rerun.
1077 bCPT = (((gs.set[eglsCHKPT] && (bNS || ir->nstlist == 0)) ||
1078 (bLastStep && (Flags & MD_CONFOUT))) &&
1079 step > ir->init_step && !bRerunMD);
1082 gs.set[eglsCHKPT] = 0;
1085 /* Determine the energy and pressure:
1086 * at nstcalcenergy steps and at energy output steps (set below).
1088 if (EI_VV(ir->eI) && (!bInitStep))
1090 /* for vv, the first half of the integration actually corresponds
1091 to the previous step. bCalcEner is only required to be evaluated on the 'next' step,
1092 but the virial needs to be calculated on both the current step and the 'next' step. Future
1093 reorganization may be able to get rid of one of the bCalcVir=TRUE steps. */
1095 bCalcEner = do_per_step(step-1, ir->nstcalcenergy);
1096 bCalcVir = bCalcEner ||
1097 (ir->epc != epcNO && (do_per_step(step, ir->nstpcouple) || do_per_step(step-1, ir->nstpcouple)));
1101 bCalcEner = do_per_step(step, ir->nstcalcenergy);
1102 bCalcVir = bCalcEner ||
1103 (ir->epc != epcNO && do_per_step(step, ir->nstpcouple));
1106 /* Do we need global communication ? */
1107 bGStat = (bCalcVir || bCalcEner || bStopCM ||
1108 do_per_step(step, nstglobalcomm) || (bVV && IR_NVT_TROTTER(ir) && do_per_step(step-1, nstglobalcomm)) ||
1109 (ir->nstlist == -1 && !bRerunMD && step >= nlh.step_nscheck));
1111 do_ene = (do_per_step(step, ir->nstenergy) || bLastStep);
1113 if (do_ene || do_log)
1120 /* these CGLO_ options remain the same throughout the iteration */
1121 cglo_flags = ((bRerunMD ? CGLO_RERUNMD : 0) |
1122 (bGStat ? CGLO_GSTAT : 0)
1125 force_flags = (GMX_FORCE_STATECHANGED |
1126 ((DYNAMIC_BOX(*ir) || bRerunMD) ? GMX_FORCE_DYNAMICBOX : 0) |
1127 GMX_FORCE_ALLFORCES |
1129 (bCalcVir ? GMX_FORCE_VIRIAL : 0) |
1130 (bCalcEner ? GMX_FORCE_ENERGY : 0) |
1131 (bDoFEP ? GMX_FORCE_DHDL : 0)
1136 if (do_per_step(step, ir->nstcalclr))
1138 force_flags |= GMX_FORCE_DO_LR;
1144 /* Now is the time to relax the shells */
1145 count = relax_shell_flexcon(fplog, cr, bVerbose, bFFscan ? step+1 : step,
1146 ir, bNS, force_flags,
1147 bStopCM, top, top_global,
1149 state, f, force_vir, mdatoms,
1150 nrnb, wcycle, graph, groups,
1151 shellfc, fr, bBornRadii, t, mu_tot,
1152 state->natoms, &bConverged, vsite,
1163 /* The coordinates (x) are shifted (to get whole molecules)
1165 * This is parallellized as well, and does communication too.
1166 * Check comments in sim_util.c
1168 do_force(fplog, cr, ir, step, nrnb, wcycle, top, top_global, groups,
1169 state->box, state->x, &state->hist,
1170 f, force_vir, mdatoms, enerd, fcd,
1171 state->lambda, graph,
1172 fr, vsite, mu_tot, t, outf->fp_field, ed, bBornRadii,
1173 (bNS ? GMX_FORCE_NS : 0) | force_flags);
1178 mu_aver = calc_mu_aver(cr, state->x, mdatoms->chargeA,
1179 mu_tot, &top_global->mols, mdatoms, gnx, grpindex);
1182 if (bTCR && bFirstStep)
1184 tcr = init_coupling(fplog, nfile, fnm, cr, fr, mdatoms, &(top->idef));
1185 fprintf(fplog, "Done init_coupling\n");
1189 if (bVV && !bStartingFromCpt && !bRerunMD)
1190 /* ############### START FIRST UPDATE HALF-STEP FOR VV METHODS############### */
1192 if (ir->eI == eiVV && bInitStep)
1194 /* if using velocity verlet with full time step Ekin,
1195 * take the first half step only to compute the
1196 * virial for the first step. From there,
1197 * revert back to the initial coordinates
1198 * so that the input is actually the initial step.
1200 copy_rvecn(state->v, cbuf, 0, state->natoms); /* should make this better for parallelizing? */
1204 /* this is for NHC in the Ekin(t+dt/2) version of vv */
1205 trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ1);
1208 /* If we are using twin-range interactions where the long-range component
1209 * is only evaluated every nstcalclr>1 steps, we should do a special update
1210 * step to combine the long-range forces on these steps.
1211 * For nstcalclr=1 this is not done, since the forces would have been added
1212 * directly to the short-range forces already.
1214 bUpdateDoLR = (fr->bTwinRange && do_per_step(step, ir->nstcalclr));
1216 update_coords(fplog, step, ir, mdatoms, state, fr->bMolPBC,
1217 f, bUpdateDoLR, fr->f_twin, fcd,
1218 ekind, M, wcycle, upd, bInitStep, etrtVELOCITY1,
1219 cr, nrnb, constr, &top->idef);
1221 if (bIterativeCase && do_per_step(step-1, ir->nstpcouple) && !bInitStep)
1223 gmx_iterate_init(&iterate, TRUE);
1225 /* for iterations, we save these vectors, as we will be self-consistently iterating
1228 /*#### UPDATE EXTENDED VARIABLES IN TROTTER FORMULATION */
1230 /* save the state */
1231 if (iterate.bIterationActive)
1233 copy_coupling_state(state, bufstate, ekind, ekind_save, &(ir->opts));
1236 bFirstIterate = TRUE;
1237 while (bFirstIterate || iterate.bIterationActive)
1239 if (iterate.bIterationActive)
1241 copy_coupling_state(bufstate, state, ekind_save, ekind, &(ir->opts));
1242 if (bFirstIterate && bTrotter)
1244 /* The first time through, we need a decent first estimate
1245 of veta(t+dt) to compute the constraints. Do
1246 this by computing the box volume part of the
1247 trotter integration at this time. Nothing else
1248 should be changed by this routine here. If
1249 !(first time), we start with the previous value
1252 veta_save = state->veta;
1253 trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ0);
1254 vetanew = state->veta;
1255 state->veta = veta_save;
1260 if (!bRerunMD || rerun_fr.bV || bForceUpdate) /* Why is rerun_fr.bV here? Unclear. */
1264 update_constraints(fplog, step, &dvdl, ir, ekind, mdatoms,
1265 state, fr->bMolPBC, graph, f,
1266 &top->idef, shake_vir, NULL,
1267 cr, nrnb, wcycle, upd, constr,
1268 bInitStep, TRUE, bCalcVir, vetanew);
1270 if (!bOK && !bFFscan)
1272 gmx_fatal(FARGS, "Constraint error: Shake, Lincs or Settle could not solve the constrains");
1278 /* Need to unshift here if a do_force has been
1279 called in the previous step */
1280 unshift_self(graph, state->box, state->x);
1283 /* if VV, compute the pressure and constraints */
1284 /* For VV2, we strictly only need this if using pressure
1285 * control, but we really would like to have accurate pressures
1287 * Think about ways around this in the future?
1288 * For now, keep this choice in comments.
1290 /*bPres = (ir->eI==eiVV || IR_NPT_TROTTER(ir)); */
1291 /*bTemp = ((ir->eI==eiVV &&(!bInitStep)) || (ir->eI==eiVVAK && IR_NPT_TROTTER(ir)));*/
1293 bTemp = ((ir->eI == eiVV && (!bInitStep)) || (ir->eI == eiVVAK));
1294 if (bCalcEner && ir->eI == eiVVAK) /*MRS: 7/9/2010 -- this still doesn't fix it?*/
1296 bSumEkinhOld = TRUE;
1298 /* for vv, the first half of the integration actually corresponds to the previous step.
1299 So we need information from the last step in the first half of the integration */
1300 if (bGStat || do_per_step(step-1, nstglobalcomm))
1302 compute_globals(fplog, gstat, cr, ir, fr, ekind, state, state_global, mdatoms, nrnb, vcm,
1303 wcycle, enerd, force_vir, shake_vir, total_vir, pres, mu_tot,
1304 constr, NULL, FALSE, state->box,
1305 top_global, &pcurr, top_global->natoms, &bSumEkinhOld,
1308 | (bTemp ? CGLO_TEMPERATURE : 0)
1309 | (bPres ? CGLO_PRESSURE : 0)
1310 | (bPres ? CGLO_CONSTRAINT : 0)
1311 | ((iterate.bIterationActive) ? CGLO_ITERATE : 0)
1312 | (bFirstIterate ? CGLO_FIRSTITERATE : 0)
1315 /* explanation of above:
1316 a) We compute Ekin at the full time step
1317 if 1) we are using the AveVel Ekin, and it's not the
1318 initial step, or 2) if we are using AveEkin, but need the full
1319 time step kinetic energy for the pressure (always true now, since we want accurate statistics).
1320 b) If we are using EkinAveEkin for the kinetic energy for the temperature control, we still feed in
1321 EkinAveVel because it's needed for the pressure */
1323 /* temperature scaling and pressure scaling to produce the extended variables at t+dt */
1328 m_add(force_vir, shake_vir, total_vir); /* we need the un-dispersion corrected total vir here */
1329 trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ2);
1336 /* We need the kinetic energy at minus the half step for determining
1337 * the full step kinetic energy and possibly for T-coupling.*/
1338 /* This may not be quite working correctly yet . . . . */
1339 compute_globals(fplog, gstat, cr, ir, fr, ekind, state, state_global, mdatoms, nrnb, vcm,
1340 wcycle, enerd, NULL, NULL, NULL, NULL, mu_tot,
1341 constr, NULL, FALSE, state->box,
1342 top_global, &pcurr, top_global->natoms, &bSumEkinhOld,
1343 CGLO_RERUNMD | CGLO_GSTAT | CGLO_TEMPERATURE);
1348 if (iterate.bIterationActive &&
1349 done_iterating(cr, fplog, step, &iterate, bFirstIterate,
1350 state->veta, &vetanew))
1354 bFirstIterate = FALSE;
1357 if (bTrotter && !bInitStep)
1359 enerd->term[F_DVDL_BONDED] += dvdl; /* only add after iterations */
1360 copy_mat(shake_vir, state->svir_prev);
1361 copy_mat(force_vir, state->fvir_prev);
1362 if (IR_NVT_TROTTER(ir) && ir->eI == eiVV)
1364 /* update temperature and kinetic energy now that step is over - this is the v(t+dt) point */
1365 enerd->term[F_TEMP] = sum_ekin(&(ir->opts), ekind, NULL, (ir->eI == eiVV), FALSE, FALSE);
1366 enerd->term[F_EKIN] = trace(ekind->ekin);
1369 /* if it's the initial step, we performed this first step just to get the constraint virial */
1370 if (bInitStep && ir->eI == eiVV)
1372 copy_rvecn(cbuf, state->v, 0, state->natoms);
1375 if (fr->bSepDVDL && fplog && do_log)
1377 fprintf(fplog, sepdvdlformat, "Constraint", 0.0, dvdl);
1379 enerd->term[F_DVDL_BONDED] += dvdl;
1382 /* MRS -- now done iterating -- compute the conserved quantity */
1385 saved_conserved_quantity = compute_conserved_from_auxiliary(ir, state, &MassQ);
1388 last_ekin = enerd->term[F_EKIN];
1390 if ((ir->eDispCorr != edispcEnerPres) && (ir->eDispCorr != edispcAllEnerPres))
1392 saved_conserved_quantity -= enerd->term[F_DISPCORR];
1394 /* sum up the foreign energy and dhdl terms for vv. currently done every step so that dhdl is correct in the .edr */
1397 sum_dhdl(enerd, state->lambda, ir->fepvals);
1401 /* ######## END FIRST UPDATE STEP ############## */
1402 /* ######## If doing VV, we now have v(dt) ###### */
1405 /* perform extended ensemble sampling in lambda - we don't
1406 actually move to the new state before outputting
1407 statistics, but if performing simulated tempering, we
1408 do update the velocities and the tau_t. */
1410 lamnew = ExpandedEnsembleDynamics(fplog, ir, enerd, state, &MassQ, &df_history, step, mcrng, state->v, mdatoms);
1412 /* ################## START TRAJECTORY OUTPUT ################# */
1414 /* Now we have the energies and forces corresponding to the
1415 * coordinates at time t. We must output all of this before
1417 * for RerunMD t is read from input trajectory
1420 if (do_per_step(step, ir->nstxout))
1422 mdof_flags |= MDOF_X;
1424 if (do_per_step(step, ir->nstvout))
1426 mdof_flags |= MDOF_V;
1428 if (do_per_step(step, ir->nstfout))
1430 mdof_flags |= MDOF_F;
1432 if (do_per_step(step, ir->nstxtcout))
1434 mdof_flags |= MDOF_XTC;
1438 mdof_flags |= MDOF_CPT;
1442 #if defined(GMX_FAHCORE) || defined(GMX_WRITELASTSTEP)
1445 /* Enforce writing positions and velocities at end of run */
1446 mdof_flags |= (MDOF_X | MDOF_V);
1452 fcReportProgress( ir->nsteps, step );
1455 /* sync bCPT and fc record-keeping */
1456 if (bCPT && MASTER(cr))
1458 fcRequestCheckPoint();
1462 if (mdof_flags != 0)
1464 wallcycle_start(wcycle, ewcTRAJ);
1467 if (state->flags & (1<<estLD_RNG))
1469 get_stochd_state(upd, state);
1471 if (state->flags & (1<<estMC_RNG))
1473 get_mc_state(mcrng, state);
1479 state_global->ekinstate.bUpToDate = FALSE;
1483 update_ekinstate(&state_global->ekinstate, ekind);
1484 state_global->ekinstate.bUpToDate = TRUE;
1486 update_energyhistory(&state_global->enerhist, mdebin);
1487 if (ir->efep != efepNO || ir->bSimTemp)
1489 state_global->fep_state = state->fep_state; /* MRS: seems kludgy. The code should be
1490 structured so this isn't necessary.
1491 Note this reassignment is only necessary
1492 for single threads.*/
1493 copy_df_history(&state_global->dfhist, &df_history);
1497 write_traj(fplog, cr, outf, mdof_flags, top_global,
1498 step, t, state, state_global, f, f_global, &n_xtc, &x_xtc);
1505 if (bLastStep && step_rel == ir->nsteps &&
1506 (Flags & MD_CONFOUT) && MASTER(cr) &&
1507 !bRerunMD && !bFFscan)
1509 /* x and v have been collected in write_traj,
1510 * because a checkpoint file will always be written
1513 fprintf(stderr, "\nWriting final coordinates.\n");
1516 /* Make molecules whole only for confout writing */
1517 do_pbc_mtop(fplog, ir->ePBC, state->box, top_global, state_global->x);
1519 write_sto_conf_mtop(ftp2fn(efSTO, nfile, fnm),
1520 *top_global->name, top_global,
1521 state_global->x, state_global->v,
1522 ir->ePBC, state->box);
1525 wallcycle_stop(wcycle, ewcTRAJ);
1528 /* kludge -- virial is lost with restart for NPT control. Must restart */
1529 if (bStartingFromCpt && bVV)
1531 copy_mat(state->svir_prev, shake_vir);
1532 copy_mat(state->fvir_prev, force_vir);
1534 /* ################## END TRAJECTORY OUTPUT ################ */
1536 /* Determine the wallclock run time up till now */
1537 run_time = gmx_gettime() - (double)runtime->real;
1539 /* Check whether everything is still allright */
1540 if (((int)gmx_get_stop_condition() > handled_stop_condition)
1541 #ifdef GMX_THREAD_MPI
1546 /* this is just make gs.sig compatible with the hack
1547 of sending signals around by MPI_Reduce with together with
1549 if (gmx_get_stop_condition() == gmx_stop_cond_next_ns)
1551 gs.sig[eglsSTOPCOND] = 1;
1553 if (gmx_get_stop_condition() == gmx_stop_cond_next)
1555 gs.sig[eglsSTOPCOND] = -1;
1557 /* < 0 means stop at next step, > 0 means stop at next NS step */
1561 "\n\nReceived the %s signal, stopping at the next %sstep\n\n",
1562 gmx_get_signal_name(),
1563 gs.sig[eglsSTOPCOND] == 1 ? "NS " : "");
1567 "\n\nReceived the %s signal, stopping at the next %sstep\n\n",
1568 gmx_get_signal_name(),
1569 gs.sig[eglsSTOPCOND] == 1 ? "NS " : "");
1571 handled_stop_condition = (int)gmx_get_stop_condition();
1573 else if (MASTER(cr) && (bNS || ir->nstlist <= 0) &&
1574 (max_hours > 0 && run_time > max_hours*60.0*60.0*0.99) &&
1575 gs.sig[eglsSTOPCOND] == 0 && gs.set[eglsSTOPCOND] == 0)
1577 /* Signal to terminate the run */
1578 gs.sig[eglsSTOPCOND] = 1;
1581 fprintf(fplog, "\nStep %s: Run time exceeded %.3f hours, will terminate the run\n", gmx_step_str(step, sbuf), max_hours*0.99);
1583 fprintf(stderr, "\nStep %s: Run time exceeded %.3f hours, will terminate the run\n", gmx_step_str(step, sbuf), max_hours*0.99);
1586 if (bResetCountersHalfMaxH && MASTER(cr) &&
1587 run_time > max_hours*60.0*60.0*0.495)
1589 gs.sig[eglsRESETCOUNTERS] = 1;
1592 if (ir->nstlist == -1 && !bRerunMD)
1594 /* When bGStatEveryStep=FALSE, global_stat is only called
1595 * when we check the atom displacements, not at NS steps.
1596 * This means that also the bonded interaction count check is not
1597 * performed immediately after NS. Therefore a few MD steps could
1598 * be performed with missing interactions.
1599 * But wrong energies are never written to file,
1600 * since energies are only written after global_stat
1603 if (step >= nlh.step_nscheck)
1605 nlh.nabnsb = natoms_beyond_ns_buffer(ir, fr, &top->cgs,
1606 nlh.scale_tot, state->x);
1610 /* This is not necessarily true,
1611 * but step_nscheck is determined quite conservatively.
1617 /* In parallel we only have to check for checkpointing in steps
1618 * where we do global communication,
1619 * otherwise the other nodes don't know.
1621 if (MASTER(cr) && ((bGStat || !PAR(cr)) &&
1624 run_time >= nchkpt*cpt_period*60.0)) &&
1625 gs.set[eglsCHKPT] == 0)
1627 gs.sig[eglsCHKPT] = 1;
1630 /* at the start of step, randomize or scale the velocities (trotter done elsewhere) */
1635 update_tcouple(fplog, step, ir, state, ekind, wcycle, upd, &MassQ, mdatoms);
1637 if (ETC_ANDERSEN(ir->etc)) /* keep this outside of update_tcouple because of the extra info required to pass */
1639 gmx_bool bIfRandomize;
1640 bIfRandomize = update_randomize_velocities(ir, step, mdatoms, state, upd, &top->idef, constr);
1641 /* if we have constraints, we have to remove the kinetic energy parallel to the bonds */
1642 if (constr && bIfRandomize)
1644 update_constraints(fplog, step, &dvdl, ir, ekind, mdatoms,
1645 state, fr->bMolPBC, graph, f,
1646 &top->idef, tmp_vir, NULL,
1647 cr, nrnb, wcycle, upd, constr,
1648 bInitStep, TRUE, bCalcVir, vetanew);
1653 if (bIterativeCase && do_per_step(step, ir->nstpcouple))
1655 gmx_iterate_init(&iterate, TRUE);
1656 /* for iterations, we save these vectors, as we will be redoing the calculations */
1657 copy_coupling_state(state, bufstate, ekind, ekind_save, &(ir->opts));
1660 bFirstIterate = TRUE;
1661 while (bFirstIterate || iterate.bIterationActive)
1663 /* We now restore these vectors to redo the calculation with improved extended variables */
1664 if (iterate.bIterationActive)
1666 copy_coupling_state(bufstate, state, ekind_save, ekind, &(ir->opts));
1669 /* We make the decision to break or not -after- the calculation of Ekin and Pressure,
1670 so scroll down for that logic */
1672 /* ######### START SECOND UPDATE STEP ################# */
1673 /* Box is changed in update() when we do pressure coupling,
1674 * but we should still use the old box for energy corrections and when
1675 * writing it to the energy file, so it matches the trajectory files for
1676 * the same timestep above. Make a copy in a separate array.
1678 copy_mat(state->box, lastbox);
1681 if (!(bRerunMD && !rerun_fr.bV && !bForceUpdate))
1683 wallcycle_start(wcycle, ewcUPDATE);
1685 /* UPDATE PRESSURE VARIABLES IN TROTTER FORMULATION WITH CONSTRAINTS */
1688 if (iterate.bIterationActive)
1696 /* we use a new value of scalevir to converge the iterations faster */
1697 scalevir = tracevir/trace(shake_vir);
1699 msmul(shake_vir, scalevir, shake_vir);
1700 m_add(force_vir, shake_vir, total_vir);
1701 clear_mat(shake_vir);
1703 trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ3);
1704 /* We can only do Berendsen coupling after we have summed
1705 * the kinetic energy or virial. Since the happens
1706 * in global_state after update, we should only do it at
1707 * step % nstlist = 1 with bGStatEveryStep=FALSE.
1712 update_tcouple(fplog, step, ir, state, ekind, wcycle, upd, &MassQ, mdatoms);
1713 update_pcouple(fplog, step, ir, state, pcoupl_mu, M, wcycle,
1719 bUpdateDoLR = (fr->bTwinRange && do_per_step(step, ir->nstcalclr));
1721 /* velocity half-step update */
1722 update_coords(fplog, step, ir, mdatoms, state, fr->bMolPBC, f,
1723 bUpdateDoLR, fr->f_twin, fcd,
1724 ekind, M, wcycle, upd, FALSE, etrtVELOCITY2,
1725 cr, nrnb, constr, &top->idef);
1728 /* Above, initialize just copies ekinh into ekin,
1729 * it doesn't copy position (for VV),
1730 * and entire integrator for MD.
1733 if (ir->eI == eiVVAK)
1735 copy_rvecn(state->x, cbuf, 0, state->natoms);
1737 bUpdateDoLR = (fr->bTwinRange && do_per_step(step, ir->nstcalclr));
1739 update_coords(fplog, step, ir, mdatoms, state, fr->bMolPBC, f,
1740 bUpdateDoLR, fr->f_twin, fcd,
1741 ekind, M, wcycle, upd, bInitStep, etrtPOSITION, cr, nrnb, constr, &top->idef);
1742 wallcycle_stop(wcycle, ewcUPDATE);
1744 update_constraints(fplog, step, &dvdl, ir, ekind, mdatoms, state,
1745 fr->bMolPBC, graph, f,
1746 &top->idef, shake_vir, force_vir,
1747 cr, nrnb, wcycle, upd, constr,
1748 bInitStep, FALSE, bCalcVir, state->veta);
1750 if (ir->eI == eiVVAK)
1752 /* erase F_EKIN and F_TEMP here? */
1753 /* just compute the kinetic energy at the half step to perform a trotter step */
1754 compute_globals(fplog, gstat, cr, ir, fr, ekind, state, state_global, mdatoms, nrnb, vcm,
1755 wcycle, enerd, force_vir, shake_vir, total_vir, pres, mu_tot,
1756 constr, NULL, FALSE, lastbox,
1757 top_global, &pcurr, top_global->natoms, &bSumEkinhOld,
1758 cglo_flags | CGLO_TEMPERATURE
1760 wallcycle_start(wcycle, ewcUPDATE);
1761 trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ4);
1762 /* now we know the scaling, we can compute the positions again again */
1763 copy_rvecn(cbuf, state->x, 0, state->natoms);
1765 bUpdateDoLR = (fr->bTwinRange && do_per_step(step, ir->nstcalclr));
1767 update_coords(fplog, step, ir, mdatoms, state, fr->bMolPBC, f,
1768 bUpdateDoLR, fr->f_twin, fcd,
1769 ekind, M, wcycle, upd, bInitStep, etrtPOSITION, cr, nrnb, constr, &top->idef);
1770 wallcycle_stop(wcycle, ewcUPDATE);
1772 /* do we need an extra constraint here? just need to copy out of state->v to upd->xp? */
1773 /* are the small terms in the shake_vir here due
1774 * to numerical errors, or are they important
1775 * physically? I'm thinking they are just errors, but not completely sure.
1776 * For now, will call without actually constraining, constr=NULL*/
1777 update_constraints(fplog, step, &dvdl, ir, ekind, mdatoms,
1778 state, fr->bMolPBC, graph, f,
1779 &top->idef, tmp_vir, force_vir,
1780 cr, nrnb, wcycle, upd, NULL,
1781 bInitStep, FALSE, bCalcVir,
1784 if (!bOK && !bFFscan)
1786 gmx_fatal(FARGS, "Constraint error: Shake, Lincs or Settle could not solve the constrains");
1789 if (fr->bSepDVDL && fplog && do_log)
1791 fprintf(fplog, sepdvdlformat, "Constraint dV/dl", 0.0, dvdl);
1793 enerd->term[F_DVDL_BONDED] += dvdl;
1797 /* Need to unshift here */
1798 unshift_self(graph, state->box, state->x);
1803 wallcycle_start(wcycle, ewcVSITECONSTR);
1806 shift_self(graph, state->box, state->x);
1808 construct_vsites(fplog, vsite, state->x, nrnb, ir->delta_t, state->v,
1809 top->idef.iparams, top->idef.il,
1810 fr->ePBC, fr->bMolPBC, graph, cr, state->box);
1814 unshift_self(graph, state->box, state->x);
1816 wallcycle_stop(wcycle, ewcVSITECONSTR);
1819 /* ############## IF NOT VV, Calculate globals HERE, also iterate constraints ############ */
1820 /* With Leap-Frog we can skip compute_globals at
1821 * non-communication steps, but we need to calculate
1822 * the kinetic energy one step before communication.
1824 if (bGStat || (!EI_VV(ir->eI) && do_per_step(step+1, nstglobalcomm)))
1826 if (ir->nstlist == -1 && bFirstIterate)
1828 gs.sig[eglsNABNSB] = nlh.nabnsb;
1830 compute_globals(fplog, gstat, cr, ir, fr, ekind, state, state_global, mdatoms, nrnb, vcm,
1831 wcycle, enerd, force_vir, shake_vir, total_vir, pres, mu_tot,
1833 bFirstIterate ? &gs : NULL,
1834 (step_rel % gs.nstms == 0) &&
1835 (multisim_nsteps < 0 || (step_rel < multisim_nsteps)),
1837 top_global, &pcurr, top_global->natoms, &bSumEkinhOld,
1839 | (!EI_VV(ir->eI) || bRerunMD ? CGLO_ENERGY : 0)
1840 | (!EI_VV(ir->eI) && bStopCM ? CGLO_STOPCM : 0)
1841 | (!EI_VV(ir->eI) ? CGLO_TEMPERATURE : 0)
1842 | (!EI_VV(ir->eI) || bRerunMD ? CGLO_PRESSURE : 0)
1843 | (iterate.bIterationActive ? CGLO_ITERATE : 0)
1844 | (bFirstIterate ? CGLO_FIRSTITERATE : 0)
1847 if (ir->nstlist == -1 && bFirstIterate)
1849 nlh.nabnsb = gs.set[eglsNABNSB];
1850 gs.set[eglsNABNSB] = 0;
1853 /* bIterate is set to keep it from eliminating the old ekin kinetic energy terms */
1854 /* ############# END CALC EKIN AND PRESSURE ################# */
1856 /* Note: this is OK, but there are some numerical precision issues with using the convergence of
1857 the virial that should probably be addressed eventually. state->veta has better properies,
1858 but what we actually need entering the new cycle is the new shake_vir value. Ideally, we could
1859 generate the new shake_vir, but test the veta value for convergence. This will take some thought. */
1861 if (iterate.bIterationActive &&
1862 done_iterating(cr, fplog, step, &iterate, bFirstIterate,
1863 trace(shake_vir), &tracevir))
1867 bFirstIterate = FALSE;
1870 /* only add constraint dvdl after constraints */
1871 enerd->term[F_DVDL_BONDED] += dvdl;
1872 if (!bVV || bRerunMD)
1874 /* sum up the foreign energy and dhdl terms for md and sd. currently done every step so that dhdl is correct in the .edr */
1875 sum_dhdl(enerd, state->lambda, ir->fepvals);
1877 update_box(fplog, step, ir, mdatoms, state, graph, f,
1878 ir->nstlist == -1 ? &nlh.scale_tot : NULL, pcoupl_mu, nrnb, wcycle, upd, bInitStep, FALSE);
1880 /* ################# END UPDATE STEP 2 ################# */
1881 /* #### We now have r(t+dt) and v(t+dt/2) ############# */
1883 /* The coordinates (x) were unshifted in update */
1884 if (bFFscan && (shellfc == NULL || bConverged))
1886 if (print_forcefield(fplog, enerd->term, mdatoms->homenr,
1888 &(top_global->mols), mdatoms->massT, pres))
1892 fprintf(stderr, "\n");
1898 /* We will not sum ekinh_old,
1899 * so signal that we still have to do it.
1901 bSumEkinhOld = TRUE;
1906 /* Only do GCT when the relaxation of shells (minimization) has converged,
1907 * otherwise we might be coupling to bogus energies.
1908 * In parallel we must always do this, because the other sims might
1912 /* Since this is called with the new coordinates state->x, I assume
1913 * we want the new box state->box too. / EL 20040121
1915 do_coupling(fplog, oenv, nfile, fnm, tcr, t, step, enerd->term, fr,
1917 mdatoms, &(top->idef), mu_aver,
1918 top_global->mols.nr, cr,
1919 state->box, total_vir, pres,
1920 mu_tot, state->x, f, bConverged);
1924 /* ######### BEGIN PREPARING EDR OUTPUT ########### */
1926 /* use the directly determined last velocity, not actually the averaged half steps */
1927 if (bTrotter && ir->eI == eiVV)
1929 enerd->term[F_EKIN] = last_ekin;
1931 enerd->term[F_ETOT] = enerd->term[F_EPOT] + enerd->term[F_EKIN];
1935 enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + saved_conserved_quantity;
1939 enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + compute_conserved_from_auxiliary(ir, state, &MassQ);
1941 /* Check for excessively large energies */
1945 real etot_max = 1e200;
1947 real etot_max = 1e30;
1949 if (fabs(enerd->term[F_ETOT]) > etot_max)
1951 fprintf(stderr, "Energy too large (%g), giving up\n",
1952 enerd->term[F_ETOT]);
1955 /* ######### END PREPARING EDR OUTPUT ########### */
1957 /* Time for performance */
1958 if (((step % stepout) == 0) || bLastStep)
1960 runtime_upd_proc(runtime);
1966 gmx_bool do_dr, do_or;
1968 if (fplog && do_log && bDoExpanded)
1970 /* only needed if doing expanded ensemble */
1971 PrintFreeEnergyInfoToFile(fplog, ir->fepvals, ir->expandedvals, ir->bSimTemp ? ir->simtempvals : NULL,
1972 &df_history, state->fep_state, ir->nstlog, step);
1974 if (!(bStartingFromCpt && (EI_VV(ir->eI))))
1978 upd_mdebin(mdebin, bDoDHDL, TRUE,
1979 t, mdatoms->tmass, enerd, state,
1980 ir->fepvals, ir->expandedvals, lastbox,
1981 shake_vir, force_vir, total_vir, pres,
1982 ekind, mu_tot, constr);
1986 upd_mdebin_step(mdebin);
1989 do_dr = do_per_step(step, ir->nstdisreout);
1990 do_or = do_per_step(step, ir->nstorireout);
1992 print_ebin(outf->fp_ene, do_ene, do_dr, do_or, do_log ? fplog : NULL,
1994 eprNORMAL, bCompact, mdebin, fcd, groups, &(ir->opts));
1996 if (ir->ePull != epullNO)
1998 pull_print_output(ir->pull, step, t);
2001 if (do_per_step(step, ir->nstlog))
2003 if (fflush(fplog) != 0)
2005 gmx_fatal(FARGS, "Cannot flush logfile - maybe you are out of disk space?");
2011 /* Have to do this part after outputting the logfile and the edr file */
2012 state->fep_state = lamnew;
2013 for (i = 0; i < efptNR; i++)
2015 state_global->lambda[i] = ir->fepvals->all_lambda[i][lamnew];
2018 /* Remaining runtime */
2019 if (MULTIMASTER(cr) && (do_verbose || gmx_got_usr_signal()) && !bPMETuneRunning)
2023 fprintf(stderr, "\n");
2025 print_time(stderr, runtime, step, ir, cr);
2028 /* Replica exchange */
2030 if ((repl_ex_nst > 0) && (step > 0) && !bLastStep &&
2031 do_per_step(step, repl_ex_nst))
2033 bExchanged = replica_exchange(fplog, cr, repl_ex,
2034 state_global, enerd,
2037 if (bExchanged && DOMAINDECOMP(cr))
2039 dd_partition_system(fplog, step, cr, TRUE, 1,
2040 state_global, top_global, ir,
2041 state, &f, mdatoms, top, fr,
2042 vsite, shellfc, constr,
2043 nrnb, wcycle, FALSE);
2049 bStartingFromCpt = FALSE;
2051 /* ####### SET VARIABLES FOR NEXT ITERATION IF THEY STILL NEED IT ###### */
2052 /* With all integrators, except VV, we need to retain the pressure
2053 * at the current step for coupling at the next step.
2055 if ((state->flags & (1<<estPRES_PREV)) &&
2057 (ir->nstpcouple > 0 && step % ir->nstpcouple == 0)))
2059 /* Store the pressure in t_state for pressure coupling
2060 * at the next MD step.
2062 copy_mat(pres, state->pres_prev);
2065 /* ####### END SET VARIABLES FOR NEXT ITERATION ###### */
2067 if ( (membed != NULL) && (!bLastStep) )
2069 rescale_membed(step_rel, membed, state_global->x);
2076 /* read next frame from input trajectory */
2077 bNotLastFrame = read_next_frame(oenv, status, &rerun_fr);
2082 rerun_parallel_comm(cr, &rerun_fr, &bNotLastFrame);
2086 if (!bRerunMD || !rerun_fr.bStep)
2088 /* increase the MD step number */
2093 cycles = wallcycle_stop(wcycle, ewcSTEP);
2094 if (DOMAINDECOMP(cr) && wcycle)
2096 dd_cycles_add(cr->dd, cycles, ddCyclStep);
2099 if (bPMETuneRunning || bPMETuneTry)
2101 /* PME grid + cut-off optimization with GPUs or PME nodes */
2103 /* Count the total cycles over the last steps */
2104 cycles_pmes += cycles;
2106 /* We can only switch cut-off at NS steps */
2107 if (step % ir->nstlist == 0)
2109 /* PME grid + cut-off optimization with GPUs or PME nodes */
2112 if (DDMASTER(cr->dd))
2114 /* PME node load is too high, start tuning */
2115 bPMETuneRunning = (dd_pme_f_ratio(cr->dd) >= 1.05);
2117 dd_bcast(cr->dd, sizeof(gmx_bool), &bPMETuneRunning);
2119 if (bPMETuneRunning || step_rel > ir->nstlist*50)
2121 bPMETuneTry = FALSE;
2124 if (bPMETuneRunning)
2126 /* init_step might not be a multiple of nstlist,
2127 * but the first cycle is always skipped anyhow.
2130 pme_load_balance(pme_loadbal, cr,
2131 (bVerbose && MASTER(cr)) ? stderr : NULL,
2133 ir, state, cycles_pmes,
2134 fr->ic, fr->nbv, &fr->pmedata,
2137 /* Update constants in forcerec/inputrec to keep them in sync with fr->ic */
2138 fr->ewaldcoeff = fr->ic->ewaldcoeff;
2139 fr->rlist = fr->ic->rlist;
2140 fr->rlistlong = fr->ic->rlistlong;
2141 fr->rcoulomb = fr->ic->rcoulomb;
2142 fr->rvdw = fr->ic->rvdw;
2148 if (step_rel == wcycle_get_reset_counters(wcycle) ||
2149 gs.set[eglsRESETCOUNTERS] != 0)
2151 /* Reset all the counters related to performance over the run */
2152 reset_all_counters(fplog, cr, step, &step_rel, ir, wcycle, nrnb, runtime,
2153 fr->nbv != NULL && fr->nbv->bUseGPU ? fr->nbv->cu_nbv : NULL);
2154 wcycle_set_reset_counters(wcycle, -1);
2155 if (!(cr->duty & DUTY_PME))
2157 /* Tell our PME node to reset its counters */
2158 gmx_pme_send_resetcounters(cr, step);
2160 /* Correct max_hours for the elapsed time */
2161 max_hours -= run_time/(60.0*60.0);
2162 bResetCountersHalfMaxH = FALSE;
2163 gs.set[eglsRESETCOUNTERS] = 0;
2167 /* End of main MD loop */
2171 runtime_end(runtime);
2173 if (bRerunMD && MASTER(cr))
2178 if (!(cr->duty & DUTY_PME))
2180 /* Tell the PME only node to finish */
2181 gmx_pme_send_finish(cr);
2186 if (ir->nstcalcenergy > 0 && !bRerunMD)
2188 print_ebin(outf->fp_ene, FALSE, FALSE, FALSE, fplog, step, t,
2189 eprAVER, FALSE, mdebin, fcd, groups, &(ir->opts));
2197 if (ir->nstlist == -1 && nlh.nns > 0 && fplog)
2199 fprintf(fplog, "Average neighborlist lifetime: %.1f steps, std.dev.: %.1f steps\n", nlh.s1/nlh.nns, sqrt(nlh.s2/nlh.nns - sqr(nlh.s1/nlh.nns)));
2200 fprintf(fplog, "Average number of atoms that crossed the half buffer length: %.1f\n\n", nlh.ab/nlh.nns);
2203 if (pme_loadbal != NULL)
2205 pme_loadbal_done(pme_loadbal, cr, fplog,
2206 fr->nbv != NULL && fr->nbv->bUseGPU);
2209 if (shellfc && fplog)
2211 fprintf(fplog, "Fraction of iterations that converged: %.2f %%\n",
2212 (nconverged*100.0)/step_rel);
2213 fprintf(fplog, "Average number of force evaluations per MD step: %.2f\n\n",
2217 if (repl_ex_nst > 0 && MASTER(cr))
2219 print_replica_exchange_statistics(fplog, repl_ex);
2222 runtime->nsteps_done = step_rel;