2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2011,2012,2013,2014, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
45 #include "thread_mpi/threads.h"
47 #include "gromacs/ewald/pme-load-balancing.h"
48 #include "gromacs/ewald/pme.h"
49 #include "gromacs/fileio/filenm.h"
50 #include "gromacs/fileio/mdoutf.h"
51 #include "gromacs/fileio/trajectory_writing.h"
52 #include "gromacs/fileio/trx.h"
53 #include "gromacs/fileio/trxio.h"
54 #include "gromacs/gmxpreprocess/compute_io.h"
55 #include "gromacs/imd/imd.h"
56 #include "gromacs/legacyheaders/bonded-threading.h"
57 #include "gromacs/legacyheaders/constr.h"
58 #include "gromacs/legacyheaders/domdec.h"
59 #include "gromacs/legacyheaders/domdec_network.h"
60 #include "gromacs/legacyheaders/ebin.h"
61 #include "gromacs/legacyheaders/force.h"
62 #include "gromacs/legacyheaders/md_logging.h"
63 #include "gromacs/legacyheaders/md_support.h"
64 #include "gromacs/legacyheaders/mdatoms.h"
65 #include "gromacs/legacyheaders/mdebin.h"
66 #include "gromacs/legacyheaders/mdrun.h"
67 #include "gromacs/legacyheaders/network.h"
68 #include "gromacs/legacyheaders/nrnb.h"
69 #include "gromacs/legacyheaders/ns.h"
70 #include "gromacs/legacyheaders/shellfc.h"
71 #include "gromacs/legacyheaders/sighandler.h"
72 #include "gromacs/legacyheaders/sim_util.h"
73 #include "gromacs/legacyheaders/tgroup.h"
74 #include "gromacs/legacyheaders/typedefs.h"
75 #include "gromacs/legacyheaders/update.h"
76 #include "gromacs/legacyheaders/vcm.h"
77 #include "gromacs/legacyheaders/vsite.h"
78 #include "gromacs/legacyheaders/types/commrec.h"
79 #include "gromacs/legacyheaders/types/constr.h"
80 #include "gromacs/legacyheaders/types/enums.h"
81 #include "gromacs/legacyheaders/types/fcdata.h"
82 #include "gromacs/legacyheaders/types/force_flags.h"
83 #include "gromacs/legacyheaders/types/forcerec.h"
84 #include "gromacs/legacyheaders/types/globsig.h"
85 #include "gromacs/legacyheaders/types/group.h"
86 #include "gromacs/legacyheaders/types/inputrec.h"
87 #include "gromacs/legacyheaders/types/interaction_const.h"
88 #include "gromacs/legacyheaders/types/iteratedconstraints.h"
89 #include "gromacs/legacyheaders/types/mdatom.h"
90 #include "gromacs/legacyheaders/types/membedt.h"
91 #include "gromacs/legacyheaders/types/nlistheuristics.h"
92 #include "gromacs/legacyheaders/types/nrnb.h"
93 #include "gromacs/legacyheaders/types/oenv.h"
94 #include "gromacs/legacyheaders/types/shellfc.h"
95 #include "gromacs/legacyheaders/types/state.h"
96 #include "gromacs/math/utilities.h"
97 #include "gromacs/math/vec.h"
98 #include "gromacs/math/vectypes.h"
99 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_data_mgmt.h"
100 #include "gromacs/pbcutil/mshift.h"
101 #include "gromacs/pbcutil/pbc.h"
102 #include "gromacs/pulling/pull.h"
103 #include "gromacs/swap/swapcoords.h"
104 #include "gromacs/timing/wallcycle.h"
105 #include "gromacs/timing/walltime_accounting.h"
106 #include "gromacs/topology/atoms.h"
107 #include "gromacs/topology/idef.h"
108 #include "gromacs/topology/mtop_util.h"
109 #include "gromacs/topology/topology.h"
110 #include "gromacs/utility/basedefinitions.h"
111 #include "gromacs/utility/cstringutil.h"
112 #include "gromacs/utility/fatalerror.h"
113 #include "gromacs/utility/real.h"
114 #include "gromacs/utility/smalloc.h"
121 #include "corewrap.h"
124 static void reset_all_counters(FILE *fplog, t_commrec *cr,
126 gmx_int64_t *step_rel, t_inputrec *ir,
127 gmx_wallcycle_t wcycle, t_nrnb *nrnb,
128 gmx_walltime_accounting_t walltime_accounting,
129 struct nonbonded_verlet_t *nbv)
131 char sbuf[STEPSTRSIZE];
133 /* Reset all the counters related to performance over the run */
134 md_print_warn(cr, fplog, "step %s: resetting all time and cycle counters\n",
135 gmx_step_str(step, sbuf));
137 nbnxn_cuda_reset_timings(nbv);
139 wallcycle_stop(wcycle, ewcRUN);
140 wallcycle_reset_all(wcycle);
141 if (DOMAINDECOMP(cr))
143 reset_dd_statistics_counters(cr->dd);
146 ir->init_step += *step_rel;
147 ir->nsteps -= *step_rel;
149 wallcycle_start(wcycle, ewcRUN);
150 walltime_accounting_start(walltime_accounting);
151 print_date_and_time(fplog, cr->nodeid, "Restarted time", gmx_gettime());
154 double do_md(FILE *fplog, t_commrec *cr, int nfile, const t_filenm fnm[],
155 const output_env_t oenv, gmx_bool bVerbose, gmx_bool bCompact,
157 gmx_vsite_t *vsite, gmx_constr_t constr,
158 int stepout, t_inputrec *ir,
159 gmx_mtop_t *top_global,
161 t_state *state_global,
163 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
164 gmx_edsam_t ed, t_forcerec *fr,
165 int repl_ex_nst, int repl_ex_nex, int repl_ex_seed, gmx_membed_t membed,
166 real cpt_period, real max_hours,
167 const char gmx_unused *deviceOptions,
170 gmx_walltime_accounting_t walltime_accounting)
172 gmx_mdoutf_t outf = NULL;
173 gmx_int64_t step, step_rel;
175 double t, t0, lam0[efptNR];
176 gmx_bool bGStatEveryStep, bGStat, bCalcVir, bCalcEner;
177 gmx_bool bNS, bNStList, bSimAnn, bStopCM, bRerunMD, bNotLastFrame = FALSE,
178 bFirstStep, bStateFromCP, bStateFromTPX, bInitStep, bLastStep,
179 bBornRadii, bStartingFromCpt;
180 gmx_bool bDoDHDL = FALSE, bDoFEP = FALSE, bDoExpanded = FALSE;
181 gmx_bool do_ene, do_log, do_verbose, bRerunWarnNoV = TRUE,
182 bForceUpdate = FALSE, bCPT;
183 gmx_bool bMasterState;
184 int force_flags, cglo_flags;
185 tensor force_vir, shake_vir, total_vir, tmp_vir, pres;
190 t_state *bufstate = NULL;
194 gmx_repl_ex_t repl_ex = NULL;
197 t_mdebin *mdebin = NULL;
198 t_state *state = NULL;
199 rvec *f_global = NULL;
200 gmx_enerdata_t *enerd;
202 gmx_global_stat_t gstat;
203 gmx_update_t upd = NULL;
204 t_graph *graph = NULL;
206 gmx_groups_t *groups;
207 gmx_ekindata_t *ekind, *ekind_save;
208 gmx_shellfc_t shellfc;
209 int count, nconverged = 0;
211 gmx_bool bConverged = TRUE, bSumEkinhOld, bDoReplEx, bExchanged, bNeedRepartition;
212 gmx_bool bResetCountersHalfMaxH = FALSE;
213 gmx_bool bVV, bIterativeCase, bFirstIterate, bTemp, bPres, bTrotter;
214 gmx_bool bUpdateDoLR;
218 real veta_save, scalevir, tracevir;
224 real saved_conserved_quantity = 0;
228 char sbuf[STEPSTRSIZE], sbuf2[STEPSTRSIZE];
229 int handled_stop_condition = gmx_stop_cond_none; /* compare to get_stop_condition*/
230 gmx_iterate_t iterate;
231 gmx_int64_t multisim_nsteps = -1; /* number of steps to do before first multisim
232 simulation stops. If equal to zero, don't
233 communicate any more between multisims.*/
234 /* PME load balancing data for GPU kernels */
235 pme_load_balancing_t pme_loadbal = NULL;
237 gmx_bool bPMETuneTry = FALSE, bPMETuneRunning = FALSE;
240 gmx_bool bIMDstep = FALSE;
243 /* Temporary addition for FAHCORE checkpointing */
247 /* Check for special mdrun options */
248 bRerunMD = (Flags & MD_RERUN);
249 if (Flags & MD_RESETCOUNTERSHALFWAY)
253 /* Signal to reset the counters half the simulation steps. */
254 wcycle_set_reset_counters(wcycle, ir->nsteps/2);
256 /* Signal to reset the counters halfway the simulation time. */
257 bResetCountersHalfMaxH = (max_hours > 0);
260 /* md-vv uses averaged full step velocities for T-control
261 md-vv-avek uses averaged half step velocities for T-control (but full step ekin for P control)
262 md uses averaged half step kinetic energies to determine temperature unless defined otherwise by GMX_EKIN_AVE_VEL; */
264 if (bVV) /* to store the initial velocities while computing virial */
266 snew(cbuf, top_global->natoms);
268 /* all the iteratative cases - only if there are constraints */
269 bIterativeCase = ((IR_NPH_TROTTER(ir) || IR_NPT_TROTTER(ir)) && (constr) && (!bRerunMD));
270 gmx_iterate_init(&iterate, FALSE); /* The default value of iterate->bIterationActive is set to
271 false in this step. The correct value, true or false,
272 is set at each step, as it depends on the frequency of temperature
273 and pressure control.*/
274 bTrotter = (bVV && (IR_NPT_TROTTER(ir) || IR_NPH_TROTTER(ir) || IR_NVT_TROTTER(ir)));
278 /* Since we don't know if the frames read are related in any way,
279 * rebuild the neighborlist at every step.
282 ir->nstcalcenergy = 1;
286 check_ir_old_tpx_versions(cr, fplog, ir, top_global);
288 nstglobalcomm = check_nstglobalcomm(fplog, cr, nstglobalcomm, ir);
289 bGStatEveryStep = (nstglobalcomm == 1);
291 if (!bGStatEveryStep && ir->nstlist == -1 && fplog != NULL)
294 "To reduce the energy communication with nstlist = -1\n"
295 "the neighbor list validity should not be checked at every step,\n"
296 "this means that exact integration is not guaranteed.\n"
297 "The neighbor list validity is checked after:\n"
298 " <n.list life time> - 2*std.dev.(n.list life time) steps.\n"
299 "In most cases this will result in exact integration.\n"
300 "This reduces the energy communication by a factor of 2 to 3.\n"
301 "If you want less energy communication, set nstlist > 3.\n\n");
306 ir->nstxout_compressed = 0;
308 groups = &top_global->groups;
311 init_md(fplog, cr, ir, oenv, &t, &t0, state_global->lambda,
312 &(state_global->fep_state), lam0,
313 nrnb, top_global, &upd,
314 nfile, fnm, &outf, &mdebin,
315 force_vir, shake_vir, mu_tot, &bSimAnn, &vcm, Flags, wcycle);
317 clear_mat(total_vir);
319 /* Energy terms and groups */
321 init_enerdata(top_global->groups.grps[egcENER].nr, ir->fepvals->n_lambda,
323 if (DOMAINDECOMP(cr))
329 snew(f, top_global->natoms);
332 /* Kinetic energy data */
334 init_ekindata(fplog, top_global, &(ir->opts), ekind);
335 /* needed for iteration of constraints */
337 init_ekindata(fplog, top_global, &(ir->opts), ekind_save);
338 /* Copy the cos acceleration to the groups struct */
339 ekind->cosacc.cos_accel = ir->cos_accel;
341 gstat = global_stat_init(ir);
344 /* Check for polarizable models and flexible constraints */
345 shellfc = init_shell_flexcon(fplog,
346 top_global, n_flexible_constraints(constr),
347 (ir->bContinuation ||
348 (DOMAINDECOMP(cr) && !MASTER(cr))) ?
349 NULL : state_global->x);
350 if (shellfc && ir->nstcalcenergy != 1)
352 gmx_fatal(FARGS, "You have nstcalcenergy set to a value (%d) that is different from 1.\nThis is not supported in combinations with shell particles.\nPlease make a new tpr file.", ir->nstcalcenergy);
354 if (shellfc && DOMAINDECOMP(cr))
356 gmx_fatal(FARGS, "Shell particles are not implemented with domain decomposition, use a single rank");
358 if (shellfc && ir->eI == eiNM)
360 /* Currently shells don't work with Normal Modes */
361 gmx_fatal(FARGS, "Normal Mode analysis is not supported with shells.\nIf you'd like to help with adding support, we have an open discussion at http://redmine.gromacs.org/issues/879\n");
364 if (vsite && ir->eI == eiNM)
366 /* Currently virtual sites don't work with Normal Modes */
367 gmx_fatal(FARGS, "Normal Mode analysis is not supported with virtual sites.\nIf you'd like to help with adding support, we have an open discussion at http://redmine.gromacs.org/issues/879\n");
372 tMPI_Thread_mutex_lock(&deform_init_box_mutex);
373 set_deform_reference_box(upd,
374 deform_init_init_step_tpx,
375 deform_init_box_tpx);
376 tMPI_Thread_mutex_unlock(&deform_init_box_mutex);
380 double io = compute_io(ir, top_global->natoms, groups, mdebin->ebin->nener, 1);
381 if ((io > 2000) && MASTER(cr))
384 "\nWARNING: This run will generate roughly %.0f Mb of data\n\n",
389 if (DOMAINDECOMP(cr))
391 top = dd_init_local_top(top_global);
394 dd_init_local_state(cr->dd, state_global, state);
396 if (DDMASTER(cr->dd) && ir->nstfout)
398 snew(f_global, state_global->natoms);
403 top = gmx_mtop_generate_local_top(top_global, ir);
405 forcerec_set_excl_load(fr, top);
407 state = serial_init_local_state(state_global);
410 atoms2md(top_global, ir, 0, NULL, top_global->natoms, mdatoms);
414 set_vsite_top(vsite, top, mdatoms, cr);
417 if (ir->ePBC != epbcNONE && !fr->bMolPBC)
419 graph = mk_graph(fplog, &(top->idef), 0, top_global->natoms, FALSE, FALSE);
424 make_local_shells(cr, mdatoms, shellfc);
427 setup_bonded_threading(fr, &top->idef);
430 /* Set up interactive MD (IMD) */
431 init_IMD(ir, cr, top_global, fplog, ir->nstcalcenergy, state_global->x,
432 nfile, fnm, oenv, imdport, Flags);
434 if (DOMAINDECOMP(cr))
436 /* Distribute the charge groups over the nodes from the master node */
437 dd_partition_system(fplog, ir->init_step, cr, TRUE, 1,
438 state_global, top_global, ir,
439 state, &f, mdatoms, top, fr,
440 vsite, shellfc, constr,
441 nrnb, wcycle, FALSE);
445 update_mdatoms(mdatoms, state->lambda[efptMASS]);
447 if (opt2bSet("-cpi", nfile, fnm))
449 bStateFromCP = gmx_fexist_master(opt2fn_master("-cpi", nfile, fnm, cr), cr);
453 bStateFromCP = FALSE;
458 init_expanded_ensemble(bStateFromCP, ir, &state->dfhist);
465 /* Update mdebin with energy history if appending to output files */
466 if (Flags & MD_APPENDFILES)
468 restore_energyhistory_from_state(mdebin, &state_global->enerhist);
472 /* We might have read an energy history from checkpoint,
473 * free the allocated memory and reset the counts.
475 done_energyhistory(&state_global->enerhist);
476 init_energyhistory(&state_global->enerhist);
479 /* Set the initial energy history in state by updating once */
480 update_energyhistory(&state_global->enerhist, mdebin);
483 /* Initialize constraints */
484 if (constr && !DOMAINDECOMP(cr))
486 set_constraints(constr, top, ir, mdatoms, cr);
489 if (repl_ex_nst > 0 && MASTER(cr))
491 repl_ex = init_replica_exchange(fplog, cr->ms, state_global, ir,
492 repl_ex_nst, repl_ex_nex, repl_ex_seed);
495 /* PME tuning is only supported with GPUs or PME nodes and not with rerun.
496 * PME tuning is not supported with PME only for LJ and not for Coulomb.
498 if ((Flags & MD_TUNEPME) &&
499 EEL_PME(fr->eeltype) &&
500 ( use_GPU(fr->nbv) || !(cr->duty & DUTY_PME)) &&
503 pme_loadbal_init(&pme_loadbal, ir, state->box, fr->ic, fr->pmedata);
505 if (cr->duty & DUTY_PME)
507 /* Start tuning right away, as we can't measure the load */
508 bPMETuneRunning = TRUE;
512 /* Separate PME nodes, we can measure the PP/PME load balance */
517 if (!ir->bContinuation && !bRerunMD)
519 if (mdatoms->cFREEZE && (state->flags & (1<<estV)))
521 /* Set the velocities of frozen particles to zero */
522 for (i = 0; i < mdatoms->homenr; i++)
524 for (m = 0; m < DIM; m++)
526 if (ir->opts.nFreeze[mdatoms->cFREEZE[i]][m])
536 /* Constrain the initial coordinates and velocities */
537 do_constrain_first(fplog, constr, ir, mdatoms, state,
542 /* Construct the virtual sites for the initial configuration */
543 construct_vsites(vsite, state->x, ir->delta_t, NULL,
544 top->idef.iparams, top->idef.il,
545 fr->ePBC, fr->bMolPBC, cr, state->box);
551 /* set free energy calculation frequency as the minimum
552 greatest common denominator of nstdhdl, nstexpanded, and repl_ex_nst*/
553 nstfep = ir->fepvals->nstdhdl;
556 nstfep = gmx_greatest_common_divisor(ir->fepvals->nstdhdl, nstfep);
560 nstfep = gmx_greatest_common_divisor(repl_ex_nst, nstfep);
563 /* I'm assuming we need global communication the first time! MRS */
564 cglo_flags = (CGLO_TEMPERATURE | CGLO_GSTAT
565 | ((ir->comm_mode != ecmNO) ? CGLO_STOPCM : 0)
566 | (bVV ? CGLO_PRESSURE : 0)
567 | (bVV ? CGLO_CONSTRAINT : 0)
568 | (bRerunMD ? CGLO_RERUNMD : 0)
569 | ((Flags & MD_READ_EKIN) ? CGLO_READEKIN : 0));
571 bSumEkinhOld = FALSE;
572 compute_globals(fplog, gstat, cr, ir, fr, ekind, state, state_global, mdatoms, nrnb, vcm,
573 NULL, enerd, force_vir, shake_vir, total_vir, pres, mu_tot,
574 constr, NULL, FALSE, state->box,
575 top_global, &bSumEkinhOld, cglo_flags);
576 if (ir->eI == eiVVAK)
578 /* a second call to get the half step temperature initialized as well */
579 /* we do the same call as above, but turn the pressure off -- internally to
580 compute_globals, this is recognized as a velocity verlet half-step
581 kinetic energy calculation. This minimized excess variables, but
582 perhaps loses some logic?*/
584 compute_globals(fplog, gstat, cr, ir, fr, ekind, state, state_global, mdatoms, nrnb, vcm,
585 NULL, enerd, force_vir, shake_vir, total_vir, pres, mu_tot,
586 constr, NULL, FALSE, state->box,
587 top_global, &bSumEkinhOld,
588 cglo_flags &~(CGLO_STOPCM | CGLO_PRESSURE));
591 /* Calculate the initial half step temperature, and save the ekinh_old */
592 if (!(Flags & MD_STARTFROMCPT))
594 for (i = 0; (i < ir->opts.ngtc); i++)
596 copy_mat(ekind->tcstat[i].ekinh, ekind->tcstat[i].ekinh_old);
601 enerd->term[F_TEMP] *= 2; /* result of averages being done over previous and current step,
602 and there is no previous step */
605 /* if using an iterative algorithm, we need to create a working directory for the state. */
608 bufstate = init_bufstate(state);
611 /* need to make an initiation call to get the Trotter variables set, as well as other constants for non-trotter
612 temperature control */
613 trotter_seq = init_npt_vars(ir, state, &MassQ, bTrotter);
617 if (constr && !ir->bContinuation && ir->eConstrAlg == econtLINCS)
620 "RMS relative constraint deviation after constraining: %.2e\n",
621 constr_rmsd(constr, FALSE));
623 if (EI_STATE_VELOCITY(ir->eI))
625 fprintf(fplog, "Initial temperature: %g K\n", enerd->term[F_TEMP]);
629 fprintf(stderr, "starting md rerun '%s', reading coordinates from"
630 " input trajectory '%s'\n\n",
631 *(top_global->name), opt2fn("-rerun", nfile, fnm));
634 fprintf(stderr, "Calculated time to finish depends on nsteps from "
635 "run input file,\nwhich may not correspond to the time "
636 "needed to process input trajectory.\n\n");
642 fprintf(stderr, "starting mdrun '%s'\n",
643 *(top_global->name));
646 sprintf(tbuf, "%8.1f", (ir->init_step+ir->nsteps)*ir->delta_t);
650 sprintf(tbuf, "%s", "infinite");
652 if (ir->init_step > 0)
654 fprintf(stderr, "%s steps, %s ps (continuing from step %s, %8.1f ps).\n",
655 gmx_step_str(ir->init_step+ir->nsteps, sbuf), tbuf,
656 gmx_step_str(ir->init_step, sbuf2),
657 ir->init_step*ir->delta_t);
661 fprintf(stderr, "%s steps, %s ps.\n",
662 gmx_step_str(ir->nsteps, sbuf), tbuf);
665 fprintf(fplog, "\n");
668 walltime_accounting_start(walltime_accounting);
669 wallcycle_start(wcycle, ewcRUN);
670 print_start(fplog, cr, walltime_accounting, "mdrun");
672 /* safest point to do file checkpointing is here. More general point would be immediately before integrator call */
674 chkpt_ret = fcCheckPointParallel( cr->nodeid,
678 gmx_fatal( 3, __FILE__, __LINE__, "Checkpoint error on step %d\n", 0 );
683 /***********************************************************
687 ************************************************************/
689 /* if rerunMD then read coordinates and velocities from input trajectory */
692 if (getenv("GMX_FORCE_UPDATE"))
700 bNotLastFrame = read_first_frame(oenv, &status,
701 opt2fn("-rerun", nfile, fnm),
702 &rerun_fr, TRX_NEED_X | TRX_READ_V);
703 if (rerun_fr.natoms != top_global->natoms)
706 "Number of atoms in trajectory (%d) does not match the "
707 "run input file (%d)\n",
708 rerun_fr.natoms, top_global->natoms);
710 if (ir->ePBC != epbcNONE)
714 gmx_fatal(FARGS, "Rerun trajectory frame step %d time %f does not contain a box, while pbc is used", rerun_fr.step, rerun_fr.time);
716 if (max_cutoff2(ir->ePBC, rerun_fr.box) < sqr(fr->rlistlong))
718 gmx_fatal(FARGS, "Rerun trajectory frame step %d time %f has too small box dimensions", rerun_fr.step, rerun_fr.time);
725 rerun_parallel_comm(cr, &rerun_fr, &bNotLastFrame);
728 if (ir->ePBC != epbcNONE)
730 /* Set the shift vectors.
731 * Necessary here when have a static box different from the tpr box.
733 calc_shifts(rerun_fr.box, fr->shift_vec);
737 /* loop over MD steps or if rerunMD to end of input trajectory */
739 /* Skip the first Nose-Hoover integration when we get the state from tpx */
740 bStateFromTPX = !bStateFromCP;
741 bInitStep = bFirstStep && (bStateFromTPX || bVV);
742 bStartingFromCpt = (Flags & MD_STARTFROMCPT) && bInitStep;
743 bSumEkinhOld = FALSE;
745 bNeedRepartition = FALSE;
747 init_global_signals(&gs, cr, ir, repl_ex_nst);
749 step = ir->init_step;
752 init_nlistheuristics(&nlh, bGStatEveryStep, step);
754 if (MULTISIM(cr) && (repl_ex_nst <= 0 ))
756 /* check how many steps are left in other sims */
757 multisim_nsteps = get_multisim_nsteps(cr, ir->nsteps);
761 /* and stop now if we should */
762 bLastStep = (bRerunMD || (ir->nsteps >= 0 && step_rel > ir->nsteps) ||
763 ((multisim_nsteps >= 0) && (step_rel >= multisim_nsteps )));
764 while (!bLastStep || (bRerunMD && bNotLastFrame))
767 wallcycle_start(wcycle, ewcSTEP);
773 step = rerun_fr.step;
774 step_rel = step - ir->init_step;
787 bLastStep = (step_rel == ir->nsteps);
788 t = t0 + step*ir->delta_t;
791 if (ir->efep != efepNO || ir->bSimTemp)
793 /* find and set the current lambdas. If rerunning, we either read in a state, or a lambda value,
794 requiring different logic. */
796 set_current_lambdas(step, ir->fepvals, bRerunMD, &rerun_fr, state_global, state, lam0);
797 bDoDHDL = do_per_step(step, ir->fepvals->nstdhdl);
798 bDoFEP = (do_per_step(step, nstfep) && (ir->efep != efepNO));
799 bDoExpanded = (do_per_step(step, ir->expandedvals->nstexpanded)
800 && (ir->bExpanded) && (step > 0) && (!bStartingFromCpt));
803 bDoReplEx = ((repl_ex_nst > 0) && (step > 0) && !bLastStep &&
804 do_per_step(step, repl_ex_nst));
808 update_annealing_target_temp(&(ir->opts), t);
813 if (!DOMAINDECOMP(cr) || MASTER(cr))
815 for (i = 0; i < state_global->natoms; i++)
817 copy_rvec(rerun_fr.x[i], state_global->x[i]);
821 for (i = 0; i < state_global->natoms; i++)
823 copy_rvec(rerun_fr.v[i], state_global->v[i]);
828 for (i = 0; i < state_global->natoms; i++)
830 clear_rvec(state_global->v[i]);
834 fprintf(stderr, "\nWARNING: Some frames do not contain velocities.\n"
835 " Ekin, temperature and pressure are incorrect,\n"
836 " the virial will be incorrect when constraints are present.\n"
838 bRerunWarnNoV = FALSE;
842 copy_mat(rerun_fr.box, state_global->box);
843 copy_mat(state_global->box, state->box);
845 if (vsite && (Flags & MD_RERUN_VSITE))
847 if (DOMAINDECOMP(cr))
849 gmx_fatal(FARGS, "Vsite recalculation with -rerun is not implemented with domain decomposition, use a single rank");
853 /* Following is necessary because the graph may get out of sync
854 * with the coordinates if we only have every N'th coordinate set
856 mk_mshift(fplog, graph, fr->ePBC, state->box, state->x);
857 shift_self(graph, state->box, state->x);
859 construct_vsites(vsite, state->x, ir->delta_t, state->v,
860 top->idef.iparams, top->idef.il,
861 fr->ePBC, fr->bMolPBC, cr, state->box);
864 unshift_self(graph, state->box, state->x);
869 /* Stop Center of Mass motion */
870 bStopCM = (ir->comm_mode != ecmNO && do_per_step(step, ir->nstcomm));
874 /* for rerun MD always do Neighbour Searching */
875 bNS = (bFirstStep || ir->nstlist != 0);
880 /* Determine whether or not to do Neighbour Searching and LR */
881 bNStList = (ir->nstlist > 0 && step % ir->nstlist == 0);
883 bNS = (bFirstStep || bExchanged || bNeedRepartition || bNStList || bDoFEP ||
884 (ir->nstlist == -1 && nlh.nabnsb > 0));
886 if (bNS && ir->nstlist == -1)
888 set_nlistheuristics(&nlh, bFirstStep || bExchanged || bNeedRepartition || bDoFEP, step);
892 /* check whether we should stop because another simulation has
896 if ( (multisim_nsteps >= 0) && (step_rel >= multisim_nsteps) &&
897 (multisim_nsteps != ir->nsteps) )
904 "Stopping simulation %d because another one has finished\n",
908 gs.sig[eglsCHKPT] = 1;
913 /* < 0 means stop at next step, > 0 means stop at next NS step */
914 if ( (gs.set[eglsSTOPCOND] < 0) ||
915 ( (gs.set[eglsSTOPCOND] > 0) && (bNStList || ir->nstlist == 0) ) )
920 /* Determine whether or not to update the Born radii if doing GB */
921 bBornRadii = bFirstStep;
922 if (ir->implicit_solvent && (step % ir->nstgbradii == 0))
927 do_log = do_per_step(step, ir->nstlog) || bFirstStep || bLastStep;
928 do_verbose = bVerbose &&
929 (step % stepout == 0 || bFirstStep || bLastStep);
931 if (bNS && !(bFirstStep && ir->bContinuation && !bRerunMD))
939 bMasterState = FALSE;
940 /* Correct the new box if it is too skewed */
941 if (DYNAMIC_BOX(*ir))
943 if (correct_box(fplog, step, state->box, graph))
948 if (DOMAINDECOMP(cr) && bMasterState)
950 dd_collect_state(cr->dd, state, state_global);
954 if (DOMAINDECOMP(cr))
956 /* Repartition the domain decomposition */
957 wallcycle_start(wcycle, ewcDOMDEC);
958 dd_partition_system(fplog, step, cr,
959 bMasterState, nstglobalcomm,
960 state_global, top_global, ir,
961 state, &f, mdatoms, top, fr,
962 vsite, shellfc, constr,
964 do_verbose && !bPMETuneRunning);
965 wallcycle_stop(wcycle, ewcDOMDEC);
966 /* If using an iterative integrator, reallocate space to match the decomposition */
970 if (MASTER(cr) && do_log)
972 print_ebin_header(fplog, step, t, state->lambda[efptFEP]); /* can we improve the information printed here? */
975 if (ir->efep != efepNO)
977 update_mdatoms(mdatoms, state->lambda[efptMASS]);
980 if ((bRerunMD && rerun_fr.bV) || bExchanged)
983 /* We need the kinetic energy at minus the half step for determining
984 * the full step kinetic energy and possibly for T-coupling.*/
985 /* This may not be quite working correctly yet . . . . */
986 compute_globals(fplog, gstat, cr, ir, fr, ekind, state, state_global, mdatoms, nrnb, vcm,
987 wcycle, enerd, NULL, NULL, NULL, NULL, mu_tot,
988 constr, NULL, FALSE, state->box,
989 top_global, &bSumEkinhOld,
990 CGLO_RERUNMD | CGLO_GSTAT | CGLO_TEMPERATURE);
992 clear_mat(force_vir);
994 /* We write a checkpoint at this MD step when:
995 * either at an NS step when we signalled through gs,
996 * or at the last step (but not when we do not want confout),
997 * but never at the first step or with rerun.
999 bCPT = (((gs.set[eglsCHKPT] && (bNS || ir->nstlist == 0)) ||
1000 (bLastStep && (Flags & MD_CONFOUT))) &&
1001 step > ir->init_step && !bRerunMD);
1004 gs.set[eglsCHKPT] = 0;
1007 /* Determine the energy and pressure:
1008 * at nstcalcenergy steps and at energy output steps (set below).
1010 if (EI_VV(ir->eI) && (!bInitStep))
1012 /* for vv, the first half of the integration actually corresponds
1013 to the previous step. bCalcEner is only required to be evaluated on the 'next' step,
1014 but the virial needs to be calculated on both the current step and the 'next' step. Future
1015 reorganization may be able to get rid of one of the bCalcVir=TRUE steps. */
1017 bCalcEner = do_per_step(step-1, ir->nstcalcenergy);
1018 bCalcVir = bCalcEner ||
1019 (ir->epc != epcNO && (do_per_step(step, ir->nstpcouple) || do_per_step(step-1, ir->nstpcouple)));
1023 bCalcEner = do_per_step(step, ir->nstcalcenergy);
1024 bCalcVir = bCalcEner ||
1025 (ir->epc != epcNO && do_per_step(step, ir->nstpcouple));
1028 /* Do we need global communication ? */
1029 bGStat = (bCalcVir || bCalcEner || bStopCM ||
1030 do_per_step(step, nstglobalcomm) || (bVV && IR_NVT_TROTTER(ir) && do_per_step(step-1, nstglobalcomm)) ||
1031 (ir->nstlist == -1 && !bRerunMD && step >= nlh.step_nscheck));
1033 do_ene = (do_per_step(step, ir->nstenergy) || bLastStep);
1035 if (do_ene || do_log || bDoReplEx)
1042 /* these CGLO_ options remain the same throughout the iteration */
1043 cglo_flags = ((bRerunMD ? CGLO_RERUNMD : 0) |
1044 (bGStat ? CGLO_GSTAT : 0)
1047 force_flags = (GMX_FORCE_STATECHANGED |
1048 ((DYNAMIC_BOX(*ir) || bRerunMD) ? GMX_FORCE_DYNAMICBOX : 0) |
1049 GMX_FORCE_ALLFORCES |
1051 (bCalcVir ? GMX_FORCE_VIRIAL : 0) |
1052 (bCalcEner ? GMX_FORCE_ENERGY : 0) |
1053 (bDoFEP ? GMX_FORCE_DHDL : 0)
1058 if (do_per_step(step, ir->nstcalclr))
1060 force_flags |= GMX_FORCE_DO_LR;
1066 /* Now is the time to relax the shells */
1067 count = relax_shell_flexcon(fplog, cr, bVerbose, step,
1068 ir, bNS, force_flags,
1071 state, f, force_vir, mdatoms,
1072 nrnb, wcycle, graph, groups,
1073 shellfc, fr, bBornRadii, t, mu_tot,
1075 mdoutf_get_fp_field(outf));
1085 /* The coordinates (x) are shifted (to get whole molecules)
1087 * This is parallellized as well, and does communication too.
1088 * Check comments in sim_util.c
1090 do_force(fplog, cr, ir, step, nrnb, wcycle, top, groups,
1091 state->box, state->x, &state->hist,
1092 f, force_vir, mdatoms, enerd, fcd,
1093 state->lambda, graph,
1094 fr, vsite, mu_tot, t, mdoutf_get_fp_field(outf), ed, bBornRadii,
1095 (bNS ? GMX_FORCE_NS : 0) | force_flags);
1098 if (bVV && !bStartingFromCpt && !bRerunMD)
1099 /* ############### START FIRST UPDATE HALF-STEP FOR VV METHODS############### */
1101 wallcycle_start(wcycle, ewcUPDATE);
1102 if (ir->eI == eiVV && bInitStep)
1104 /* if using velocity verlet with full time step Ekin,
1105 * take the first half step only to compute the
1106 * virial for the first step. From there,
1107 * revert back to the initial coordinates
1108 * so that the input is actually the initial step.
1110 copy_rvecn(state->v, cbuf, 0, state->natoms); /* should make this better for parallelizing? */
1114 /* this is for NHC in the Ekin(t+dt/2) version of vv */
1115 trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ1);
1118 /* If we are using twin-range interactions where the long-range component
1119 * is only evaluated every nstcalclr>1 steps, we should do a special update
1120 * step to combine the long-range forces on these steps.
1121 * For nstcalclr=1 this is not done, since the forces would have been added
1122 * directly to the short-range forces already.
1124 * TODO Remove various aspects of VV+twin-range in master
1125 * branch, because VV integrators did not ever support
1126 * twin-range multiple time stepping with constraints.
1128 bUpdateDoLR = (fr->bTwinRange && do_per_step(step, ir->nstcalclr));
1130 update_coords(fplog, step, ir, mdatoms, state, fr->bMolPBC,
1131 f, bUpdateDoLR, fr->f_twin, bCalcVir ? &fr->vir_twin_constr : NULL, fcd,
1132 ekind, M, upd, bInitStep, etrtVELOCITY1,
1133 cr, nrnb, constr, &top->idef);
1135 if (bIterativeCase && do_per_step(step-1, ir->nstpcouple) && !bInitStep)
1137 gmx_iterate_init(&iterate, TRUE);
1139 /* for iterations, we save these vectors, as we will be self-consistently iterating
1142 /*#### UPDATE EXTENDED VARIABLES IN TROTTER FORMULATION */
1144 /* save the state */
1145 if (iterate.bIterationActive)
1147 copy_coupling_state(state, bufstate, ekind, ekind_save, &(ir->opts));
1150 bFirstIterate = TRUE;
1151 while (bFirstIterate || iterate.bIterationActive)
1153 if (iterate.bIterationActive)
1155 copy_coupling_state(bufstate, state, ekind_save, ekind, &(ir->opts));
1156 if (bFirstIterate && bTrotter)
1158 /* The first time through, we need a decent first estimate
1159 of veta(t+dt) to compute the constraints. Do
1160 this by computing the box volume part of the
1161 trotter integration at this time. Nothing else
1162 should be changed by this routine here. If
1163 !(first time), we start with the previous value
1166 veta_save = state->veta;
1167 trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ0);
1168 vetanew = state->veta;
1169 state->veta = veta_save;
1173 if (!bRerunMD || rerun_fr.bV || bForceUpdate) /* Why is rerun_fr.bV here? Unclear. */
1175 wallcycle_stop(wcycle, ewcUPDATE);
1176 update_constraints(fplog, step, NULL, ir, ekind, mdatoms,
1177 state, fr->bMolPBC, graph, f,
1178 &top->idef, shake_vir,
1179 cr, nrnb, wcycle, upd, constr,
1180 TRUE, bCalcVir, vetanew);
1181 wallcycle_start(wcycle, ewcUPDATE);
1183 if (bCalcVir && bUpdateDoLR && ir->nstcalclr > 1)
1185 /* Correct the virial for multiple time stepping */
1186 m_sub(shake_vir, fr->vir_twin_constr, shake_vir);
1191 /* Need to unshift here if a do_force has been
1192 called in the previous step */
1193 unshift_self(graph, state->box, state->x);
1196 /* if VV, compute the pressure and constraints */
1197 /* For VV2, we strictly only need this if using pressure
1198 * control, but we really would like to have accurate pressures
1200 * Think about ways around this in the future?
1201 * For now, keep this choice in comments.
1203 /*bPres = (ir->eI==eiVV || IR_NPT_TROTTER(ir)); */
1204 /*bTemp = ((ir->eI==eiVV &&(!bInitStep)) || (ir->eI==eiVVAK && IR_NPT_TROTTER(ir)));*/
1206 bTemp = ((ir->eI == eiVV && (!bInitStep)) || (ir->eI == eiVVAK));
1207 if (bCalcEner && ir->eI == eiVVAK) /*MRS: 7/9/2010 -- this still doesn't fix it?*/
1209 bSumEkinhOld = TRUE;
1211 /* for vv, the first half of the integration actually corresponds to the previous step.
1212 So we need information from the last step in the first half of the integration */
1213 if (bGStat || do_per_step(step-1, nstglobalcomm))
1215 wallcycle_stop(wcycle, ewcUPDATE);
1216 compute_globals(fplog, gstat, cr, ir, fr, ekind, state, state_global, mdatoms, nrnb, vcm,
1217 wcycle, enerd, force_vir, shake_vir, total_vir, pres, mu_tot,
1218 constr, NULL, FALSE, state->box,
1219 top_global, &bSumEkinhOld,
1222 | (bTemp ? CGLO_TEMPERATURE : 0)
1223 | (bPres ? CGLO_PRESSURE : 0)
1224 | (bPres ? CGLO_CONSTRAINT : 0)
1225 | ((iterate.bIterationActive) ? CGLO_ITERATE : 0)
1226 | (bFirstIterate ? CGLO_FIRSTITERATE : 0)
1229 /* explanation of above:
1230 a) We compute Ekin at the full time step
1231 if 1) we are using the AveVel Ekin, and it's not the
1232 initial step, or 2) if we are using AveEkin, but need the full
1233 time step kinetic energy for the pressure (always true now, since we want accurate statistics).
1234 b) If we are using EkinAveEkin for the kinetic energy for the temperature control, we still feed in
1235 EkinAveVel because it's needed for the pressure */
1236 wallcycle_start(wcycle, ewcUPDATE);
1238 /* temperature scaling and pressure scaling to produce the extended variables at t+dt */
1243 m_add(force_vir, shake_vir, total_vir); /* we need the un-dispersion corrected total vir here */
1244 trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ2);
1250 wallcycle_stop(wcycle, ewcUPDATE);
1251 /* We need the kinetic energy at minus the half step for determining
1252 * the full step kinetic energy and possibly for T-coupling.*/
1253 /* This may not be quite working correctly yet . . . . */
1254 compute_globals(fplog, gstat, cr, ir, fr, ekind, state, state_global, mdatoms, nrnb, vcm,
1255 wcycle, enerd, NULL, NULL, NULL, NULL, mu_tot,
1256 constr, NULL, FALSE, state->box,
1257 top_global, &bSumEkinhOld,
1258 CGLO_RERUNMD | CGLO_GSTAT | CGLO_TEMPERATURE);
1259 wallcycle_start(wcycle, ewcUPDATE);
1264 if (iterate.bIterationActive &&
1265 done_iterating(cr, fplog, step, &iterate, bFirstIterate,
1266 state->veta, &vetanew))
1270 bFirstIterate = FALSE;
1273 if (bTrotter && !bInitStep)
1275 copy_mat(shake_vir, state->svir_prev);
1276 copy_mat(force_vir, state->fvir_prev);
1277 if (IR_NVT_TROTTER(ir) && ir->eI == eiVV)
1279 /* update temperature and kinetic energy now that step is over - this is the v(t+dt) point */
1280 enerd->term[F_TEMP] = sum_ekin(&(ir->opts), ekind, NULL, (ir->eI == eiVV), FALSE);
1281 enerd->term[F_EKIN] = trace(ekind->ekin);
1284 /* if it's the initial step, we performed this first step just to get the constraint virial */
1285 if (bInitStep && ir->eI == eiVV)
1287 copy_rvecn(cbuf, state->v, 0, state->natoms);
1289 wallcycle_stop(wcycle, ewcUPDATE);
1292 /* MRS -- now done iterating -- compute the conserved quantity */
1295 saved_conserved_quantity = compute_conserved_from_auxiliary(ir, state, &MassQ);
1298 last_ekin = enerd->term[F_EKIN];
1300 if ((ir->eDispCorr != edispcEnerPres) && (ir->eDispCorr != edispcAllEnerPres))
1302 saved_conserved_quantity -= enerd->term[F_DISPCORR];
1304 /* sum up the foreign energy and dhdl terms for vv. currently done every step so that dhdl is correct in the .edr */
1307 sum_dhdl(enerd, state->lambda, ir->fepvals);
1311 /* ######## END FIRST UPDATE STEP ############## */
1312 /* ######## If doing VV, we now have v(dt) ###### */
1315 /* perform extended ensemble sampling in lambda - we don't
1316 actually move to the new state before outputting
1317 statistics, but if performing simulated tempering, we
1318 do update the velocities and the tau_t. */
1320 lamnew = ExpandedEnsembleDynamics(fplog, ir, enerd, state, &MassQ, state->fep_state, &state->dfhist, step, state->v, mdatoms);
1321 /* history is maintained in state->dfhist, but state_global is what is sent to trajectory and log output */
1322 copy_df_history(&state_global->dfhist, &state->dfhist);
1325 /* Now we have the energies and forces corresponding to the
1326 * coordinates at time t. We must output all of this before
1329 do_md_trajectory_writing(fplog, cr, nfile, fnm, step, step_rel, t,
1330 ir, state, state_global, top_global, fr,
1331 outf, mdebin, ekind, f, f_global,
1333 bCPT, bRerunMD, bLastStep, (Flags & MD_CONFOUT),
1335 /* Check if IMD step and do IMD communication, if bIMD is TRUE. */
1336 bIMDstep = do_IMD(ir->bIMD, step, cr, bNS, state->box, state->x, ir, t, wcycle);
1338 /* kludge -- virial is lost with restart for NPT control. Must restart */
1339 if (bStartingFromCpt && bVV)
1341 copy_mat(state->svir_prev, shake_vir);
1342 copy_mat(state->fvir_prev, force_vir);
1345 elapsed_time = walltime_accounting_get_current_elapsed_time(walltime_accounting);
1347 /* Check whether everything is still allright */
1348 if (((int)gmx_get_stop_condition() > handled_stop_condition)
1349 #ifdef GMX_THREAD_MPI
1354 /* this is just make gs.sig compatible with the hack
1355 of sending signals around by MPI_Reduce with together with
1357 if (gmx_get_stop_condition() == gmx_stop_cond_next_ns)
1359 gs.sig[eglsSTOPCOND] = 1;
1361 if (gmx_get_stop_condition() == gmx_stop_cond_next)
1363 gs.sig[eglsSTOPCOND] = -1;
1365 /* < 0 means stop at next step, > 0 means stop at next NS step */
1369 "\n\nReceived the %s signal, stopping at the next %sstep\n\n",
1370 gmx_get_signal_name(),
1371 gs.sig[eglsSTOPCOND] == 1 ? "NS " : "");
1375 "\n\nReceived the %s signal, stopping at the next %sstep\n\n",
1376 gmx_get_signal_name(),
1377 gs.sig[eglsSTOPCOND] == 1 ? "NS " : "");
1379 handled_stop_condition = (int)gmx_get_stop_condition();
1381 else if (MASTER(cr) && (bNS || ir->nstlist <= 0) &&
1382 (max_hours > 0 && elapsed_time > max_hours*60.0*60.0*0.99) &&
1383 gs.sig[eglsSTOPCOND] == 0 && gs.set[eglsSTOPCOND] == 0)
1385 /* Signal to terminate the run */
1386 gs.sig[eglsSTOPCOND] = 1;
1389 fprintf(fplog, "\nStep %s: Run time exceeded %.3f hours, will terminate the run\n", gmx_step_str(step, sbuf), max_hours*0.99);
1391 fprintf(stderr, "\nStep %s: Run time exceeded %.3f hours, will terminate the run\n", gmx_step_str(step, sbuf), max_hours*0.99);
1394 if (bResetCountersHalfMaxH && MASTER(cr) &&
1395 elapsed_time > max_hours*60.0*60.0*0.495)
1397 gs.sig[eglsRESETCOUNTERS] = 1;
1400 if (ir->nstlist == -1 && !bRerunMD)
1402 /* When bGStatEveryStep=FALSE, global_stat is only called
1403 * when we check the atom displacements, not at NS steps.
1404 * This means that also the bonded interaction count check is not
1405 * performed immediately after NS. Therefore a few MD steps could
1406 * be performed with missing interactions.
1407 * But wrong energies are never written to file,
1408 * since energies are only written after global_stat
1411 if (step >= nlh.step_nscheck)
1413 nlh.nabnsb = natoms_beyond_ns_buffer(ir, fr, &top->cgs,
1414 nlh.scale_tot, state->x);
1418 /* This is not necessarily true,
1419 * but step_nscheck is determined quite conservatively.
1425 /* In parallel we only have to check for checkpointing in steps
1426 * where we do global communication,
1427 * otherwise the other nodes don't know.
1429 if (MASTER(cr) && ((bGStat || !PAR(cr)) &&
1432 elapsed_time >= nchkpt*cpt_period*60.0)) &&
1433 gs.set[eglsCHKPT] == 0)
1435 gs.sig[eglsCHKPT] = 1;
1438 /* at the start of step, randomize or scale the velocities (trotter done elsewhere) */
1443 update_tcouple(step, ir, state, ekind, &MassQ, mdatoms);
1445 if (ETC_ANDERSEN(ir->etc)) /* keep this outside of update_tcouple because of the extra info required to pass */
1447 gmx_bool bIfRandomize;
1448 bIfRandomize = update_randomize_velocities(ir, step, cr, mdatoms, state, upd, constr);
1449 /* if we have constraints, we have to remove the kinetic energy parallel to the bonds */
1450 if (constr && bIfRandomize)
1452 update_constraints(fplog, step, NULL, ir, ekind, mdatoms,
1453 state, fr->bMolPBC, graph, f,
1454 &top->idef, tmp_vir,
1455 cr, nrnb, wcycle, upd, constr,
1456 TRUE, bCalcVir, vetanew);
1461 if (bIterativeCase && do_per_step(step, ir->nstpcouple))
1463 gmx_iterate_init(&iterate, TRUE);
1464 /* for iterations, we save these vectors, as we will be redoing the calculations */
1465 copy_coupling_state(state, bufstate, ekind, ekind_save, &(ir->opts));
1468 bFirstIterate = TRUE;
1469 while (bFirstIterate || iterate.bIterationActive)
1471 /* We now restore these vectors to redo the calculation with improved extended variables */
1472 if (iterate.bIterationActive)
1474 copy_coupling_state(bufstate, state, ekind_save, ekind, &(ir->opts));
1477 /* We make the decision to break or not -after- the calculation of Ekin and Pressure,
1478 so scroll down for that logic */
1480 /* ######### START SECOND UPDATE STEP ################# */
1481 /* Box is changed in update() when we do pressure coupling,
1482 * but we should still use the old box for energy corrections and when
1483 * writing it to the energy file, so it matches the trajectory files for
1484 * the same timestep above. Make a copy in a separate array.
1486 copy_mat(state->box, lastbox);
1490 if (!(bRerunMD && !rerun_fr.bV && !bForceUpdate))
1492 wallcycle_start(wcycle, ewcUPDATE);
1493 /* UPDATE PRESSURE VARIABLES IN TROTTER FORMULATION WITH CONSTRAINTS */
1496 if (iterate.bIterationActive)
1504 /* we use a new value of scalevir to converge the iterations faster */
1505 scalevir = tracevir/trace(shake_vir);
1507 msmul(shake_vir, scalevir, shake_vir);
1508 m_add(force_vir, shake_vir, total_vir);
1509 clear_mat(shake_vir);
1511 trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ3);
1512 /* We can only do Berendsen coupling after we have summed
1513 * the kinetic energy or virial. Since the happens
1514 * in global_state after update, we should only do it at
1515 * step % nstlist = 1 with bGStatEveryStep=FALSE.
1520 update_tcouple(step, ir, state, ekind, &MassQ, mdatoms);
1521 update_pcouple(fplog, step, ir, state, pcoupl_mu, M, bInitStep);
1526 bUpdateDoLR = (fr->bTwinRange && do_per_step(step, ir->nstcalclr));
1528 /* velocity half-step update */
1529 update_coords(fplog, step, ir, mdatoms, state, fr->bMolPBC, f,
1530 bUpdateDoLR, fr->f_twin, bCalcVir ? &fr->vir_twin_constr : NULL, fcd,
1531 ekind, M, upd, FALSE, etrtVELOCITY2,
1532 cr, nrnb, constr, &top->idef);
1535 /* Above, initialize just copies ekinh into ekin,
1536 * it doesn't copy position (for VV),
1537 * and entire integrator for MD.
1540 if (ir->eI == eiVVAK)
1542 copy_rvecn(state->x, cbuf, 0, state->natoms);
1544 bUpdateDoLR = (fr->bTwinRange && do_per_step(step, ir->nstcalclr));
1546 update_coords(fplog, step, ir, mdatoms, state, fr->bMolPBC, f,
1547 bUpdateDoLR, fr->f_twin, bCalcVir ? &fr->vir_twin_constr : NULL, fcd,
1548 ekind, M, upd, bInitStep, etrtPOSITION, cr, nrnb, constr, &top->idef);
1549 wallcycle_stop(wcycle, ewcUPDATE);
1551 update_constraints(fplog, step, &dvdl_constr, ir, ekind, mdatoms, state,
1552 fr->bMolPBC, graph, f,
1553 &top->idef, shake_vir,
1554 cr, nrnb, wcycle, upd, constr,
1555 FALSE, bCalcVir, state->veta);
1557 if (bCalcVir && bUpdateDoLR && ir->nstcalclr > 1)
1559 /* Correct the virial for multiple time stepping */
1560 m_sub(shake_vir, fr->vir_twin_constr, shake_vir);
1563 if (ir->eI == eiVVAK)
1565 /* erase F_EKIN and F_TEMP here? */
1566 /* just compute the kinetic energy at the half step to perform a trotter step */
1567 compute_globals(fplog, gstat, cr, ir, fr, ekind, state, state_global, mdatoms, nrnb, vcm,
1568 wcycle, enerd, force_vir, shake_vir, total_vir, pres, mu_tot,
1569 constr, NULL, FALSE, lastbox,
1570 top_global, &bSumEkinhOld,
1571 cglo_flags | CGLO_TEMPERATURE
1573 wallcycle_start(wcycle, ewcUPDATE);
1574 trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ4);
1575 /* now we know the scaling, we can compute the positions again again */
1576 copy_rvecn(cbuf, state->x, 0, state->natoms);
1578 bUpdateDoLR = (fr->bTwinRange && do_per_step(step, ir->nstcalclr));
1580 update_coords(fplog, step, ir, mdatoms, state, fr->bMolPBC, f,
1581 bUpdateDoLR, fr->f_twin, bCalcVir ? &fr->vir_twin_constr : NULL, fcd,
1582 ekind, M, upd, bInitStep, etrtPOSITION, cr, nrnb, constr, &top->idef);
1583 wallcycle_stop(wcycle, ewcUPDATE);
1585 /* do we need an extra constraint here? just need to copy out of state->v to upd->xp? */
1586 /* are the small terms in the shake_vir here due
1587 * to numerical errors, or are they important
1588 * physically? I'm thinking they are just errors, but not completely sure.
1589 * For now, will call without actually constraining, constr=NULL*/
1590 update_constraints(fplog, step, NULL, ir, ekind, mdatoms,
1591 state, fr->bMolPBC, graph, f,
1592 &top->idef, tmp_vir,
1593 cr, nrnb, wcycle, upd, NULL,
1600 /* this factor or 2 correction is necessary
1601 because half of the constraint force is removed
1602 in the vv step, so we have to double it. See
1603 the Redmine issue #1255. It is not yet clear
1604 if the factor of 2 is exact, or just a very
1605 good approximation, and this will be
1606 investigated. The next step is to see if this
1607 can be done adding a dhdl contribution from the
1608 rattle step, but this is somewhat more
1609 complicated with the current code. Will be
1610 investigated, hopefully for 4.6.3. However,
1611 this current solution is much better than
1612 having it completely wrong.
1614 enerd->term[F_DVDL_CONSTR] += 2*dvdl_constr;
1618 enerd->term[F_DVDL_CONSTR] += dvdl_constr;
1623 /* Need to unshift here */
1624 unshift_self(graph, state->box, state->x);
1629 wallcycle_start(wcycle, ewcVSITECONSTR);
1632 shift_self(graph, state->box, state->x);
1634 construct_vsites(vsite, state->x, ir->delta_t, state->v,
1635 top->idef.iparams, top->idef.il,
1636 fr->ePBC, fr->bMolPBC, cr, state->box);
1640 unshift_self(graph, state->box, state->x);
1642 wallcycle_stop(wcycle, ewcVSITECONSTR);
1645 /* ############## IF NOT VV, Calculate globals HERE, also iterate constraints ############ */
1646 /* With Leap-Frog we can skip compute_globals at
1647 * non-communication steps, but we need to calculate
1648 * the kinetic energy one step before communication.
1650 if (bGStat || (!EI_VV(ir->eI) && do_per_step(step+1, nstglobalcomm)))
1652 if (ir->nstlist == -1 && bFirstIterate)
1654 gs.sig[eglsNABNSB] = nlh.nabnsb;
1656 compute_globals(fplog, gstat, cr, ir, fr, ekind, state, state_global, mdatoms, nrnb, vcm,
1657 wcycle, enerd, force_vir, shake_vir, total_vir, pres, mu_tot,
1659 bFirstIterate ? &gs : NULL,
1660 (step_rel % gs.nstms == 0) &&
1661 (multisim_nsteps < 0 || (step_rel < multisim_nsteps)),
1663 top_global, &bSumEkinhOld,
1665 | (!EI_VV(ir->eI) || bRerunMD ? CGLO_ENERGY : 0)
1666 | (!EI_VV(ir->eI) && bStopCM ? CGLO_STOPCM : 0)
1667 | (!EI_VV(ir->eI) ? CGLO_TEMPERATURE : 0)
1668 | (!EI_VV(ir->eI) || bRerunMD ? CGLO_PRESSURE : 0)
1669 | (iterate.bIterationActive ? CGLO_ITERATE : 0)
1670 | (bFirstIterate ? CGLO_FIRSTITERATE : 0)
1673 if (ir->nstlist == -1 && bFirstIterate)
1675 nlh.nabnsb = gs.set[eglsNABNSB];
1676 gs.set[eglsNABNSB] = 0;
1679 /* bIterate is set to keep it from eliminating the old ekin kinetic energy terms */
1680 /* ############# END CALC EKIN AND PRESSURE ################# */
1682 /* Note: this is OK, but there are some numerical precision issues with using the convergence of
1683 the virial that should probably be addressed eventually. state->veta has better properies,
1684 but what we actually need entering the new cycle is the new shake_vir value. Ideally, we could
1685 generate the new shake_vir, but test the veta value for convergence. This will take some thought. */
1687 if (iterate.bIterationActive &&
1688 done_iterating(cr, fplog, step, &iterate, bFirstIterate,
1689 trace(shake_vir), &tracevir))
1693 bFirstIterate = FALSE;
1696 if (!bVV || bRerunMD)
1698 /* sum up the foreign energy and dhdl terms for md and sd. currently done every step so that dhdl is correct in the .edr */
1699 sum_dhdl(enerd, state->lambda, ir->fepvals);
1701 update_box(fplog, step, ir, mdatoms, state, f,
1702 ir->nstlist == -1 ? &nlh.scale_tot : NULL, pcoupl_mu, nrnb, upd);
1704 /* ################# END UPDATE STEP 2 ################# */
1705 /* #### We now have r(t+dt) and v(t+dt/2) ############# */
1707 /* The coordinates (x) were unshifted in update */
1710 /* We will not sum ekinh_old,
1711 * so signal that we still have to do it.
1713 bSumEkinhOld = TRUE;
1716 /* ######### BEGIN PREPARING EDR OUTPUT ########### */
1718 /* use the directly determined last velocity, not actually the averaged half steps */
1719 if (bTrotter && ir->eI == eiVV)
1721 enerd->term[F_EKIN] = last_ekin;
1723 enerd->term[F_ETOT] = enerd->term[F_EPOT] + enerd->term[F_EKIN];
1727 enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + saved_conserved_quantity;
1731 enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + compute_conserved_from_auxiliary(ir, state, &MassQ);
1733 /* ######### END PREPARING EDR OUTPUT ########### */
1738 gmx_bool do_dr, do_or;
1740 if (fplog && do_log && bDoExpanded)
1742 /* only needed if doing expanded ensemble */
1743 PrintFreeEnergyInfoToFile(fplog, ir->fepvals, ir->expandedvals, ir->bSimTemp ? ir->simtempvals : NULL,
1744 &state_global->dfhist, state->fep_state, ir->nstlog, step);
1746 if (!(bStartingFromCpt && (EI_VV(ir->eI))))
1750 upd_mdebin(mdebin, bDoDHDL, TRUE,
1751 t, mdatoms->tmass, enerd, state,
1752 ir->fepvals, ir->expandedvals, lastbox,
1753 shake_vir, force_vir, total_vir, pres,
1754 ekind, mu_tot, constr);
1758 upd_mdebin_step(mdebin);
1761 do_dr = do_per_step(step, ir->nstdisreout);
1762 do_or = do_per_step(step, ir->nstorireout);
1764 print_ebin(mdoutf_get_fp_ene(outf), do_ene, do_dr, do_or, do_log ? fplog : NULL,
1766 eprNORMAL, bCompact, mdebin, fcd, groups, &(ir->opts));
1768 if (ir->ePull != epullNO)
1770 pull_print_output(ir->pull, step, t);
1773 if (do_per_step(step, ir->nstlog))
1775 if (fflush(fplog) != 0)
1777 gmx_fatal(FARGS, "Cannot flush logfile - maybe you are out of disk space?");
1783 /* Have to do this part _after_ outputting the logfile and the edr file */
1784 /* Gets written into the state at the beginning of next loop*/
1785 state->fep_state = lamnew;
1787 /* Print the remaining wall clock time for the run */
1788 if (MULTIMASTER(cr) && (do_verbose || gmx_got_usr_signal()) && !bPMETuneRunning)
1792 fprintf(stderr, "\n");
1794 print_time(stderr, walltime_accounting, step, ir, cr);
1797 /* Ion/water position swapping.
1798 * Not done in last step since trajectory writing happens before this call
1799 * in the MD loop and exchanges would be lost anyway. */
1800 bNeedRepartition = FALSE;
1801 if ((ir->eSwapCoords != eswapNO) && (step > 0) && !bLastStep &&
1802 do_per_step(step, ir->swap->nstswap))
1804 bNeedRepartition = do_swapcoords(cr, step, t, ir, wcycle,
1805 bRerunMD ? rerun_fr.x : state->x,
1806 bRerunMD ? rerun_fr.box : state->box,
1807 top_global, MASTER(cr) && bVerbose, bRerunMD);
1809 if (bNeedRepartition && DOMAINDECOMP(cr))
1811 dd_collect_state(cr->dd, state, state_global);
1815 /* Replica exchange */
1819 bExchanged = replica_exchange(fplog, cr, repl_ex,
1820 state_global, enerd,
1824 if ( (bExchanged || bNeedRepartition) && DOMAINDECOMP(cr) )
1826 dd_partition_system(fplog, step, cr, TRUE, 1,
1827 state_global, top_global, ir,
1828 state, &f, mdatoms, top, fr,
1829 vsite, shellfc, constr,
1830 nrnb, wcycle, FALSE);
1835 bStartingFromCpt = FALSE;
1837 /* ####### SET VARIABLES FOR NEXT ITERATION IF THEY STILL NEED IT ###### */
1838 /* With all integrators, except VV, we need to retain the pressure
1839 * at the current step for coupling at the next step.
1841 if ((state->flags & (1<<estPRES_PREV)) &&
1843 (ir->nstpcouple > 0 && step % ir->nstpcouple == 0)))
1845 /* Store the pressure in t_state for pressure coupling
1846 * at the next MD step.
1848 copy_mat(pres, state->pres_prev);
1851 /* ####### END SET VARIABLES FOR NEXT ITERATION ###### */
1853 if ( (membed != NULL) && (!bLastStep) )
1855 rescale_membed(step_rel, membed, state_global->x);
1862 /* read next frame from input trajectory */
1863 bNotLastFrame = read_next_frame(oenv, status, &rerun_fr);
1868 rerun_parallel_comm(cr, &rerun_fr, &bNotLastFrame);
1872 if (!bRerunMD || !rerun_fr.bStep)
1874 /* increase the MD step number */
1879 cycles = wallcycle_stop(wcycle, ewcSTEP);
1880 if (DOMAINDECOMP(cr) && wcycle)
1882 dd_cycles_add(cr->dd, cycles, ddCyclStep);
1885 if (bPMETuneRunning || bPMETuneTry)
1887 /* PME grid + cut-off optimization with GPUs or PME nodes */
1889 /* Count the total cycles over the last steps */
1890 cycles_pmes += cycles;
1892 /* We can only switch cut-off at NS steps */
1893 if (step % ir->nstlist == 0)
1895 /* PME grid + cut-off optimization with GPUs or PME nodes */
1898 if (DDMASTER(cr->dd))
1900 /* PME node load is too high, start tuning */
1901 bPMETuneRunning = (dd_pme_f_ratio(cr->dd) >= 1.05);
1903 dd_bcast(cr->dd, sizeof(gmx_bool), &bPMETuneRunning);
1905 if (bPMETuneRunning &&
1906 use_GPU(fr->nbv) && DOMAINDECOMP(cr) &&
1907 !(cr->duty & DUTY_PME))
1909 /* Lock DLB=auto to off (does nothing when DLB=yes/no).
1910 * With GPUs + separate PME ranks, we don't want DLB.
1911 * This could happen when we scan coarse grids and
1912 * it would then never be turned off again.
1913 * This would hurt performance at the final, optimal
1914 * grid spacing, where DLB almost never helps.
1915 * Also, DLB can limit the cut-off for PME tuning.
1917 dd_dlb_set_lock(cr->dd, TRUE);
1920 if (bPMETuneRunning || step_rel > ir->nstlist*50)
1922 bPMETuneTry = FALSE;
1925 if (bPMETuneRunning)
1927 /* init_step might not be a multiple of nstlist,
1928 * but the first cycle is always skipped anyhow.
1931 pme_load_balance(pme_loadbal, cr,
1932 (bVerbose && MASTER(cr)) ? stderr : NULL,
1934 ir, state, cycles_pmes,
1935 fr->ic, fr->nbv, &fr->pmedata,
1938 /* Update constants in forcerec/inputrec to keep them in sync with fr->ic */
1939 fr->ewaldcoeff_q = fr->ic->ewaldcoeff_q;
1940 fr->ewaldcoeff_lj = fr->ic->ewaldcoeff_lj;
1941 fr->rlist = fr->ic->rlist;
1942 fr->rlistlong = fr->ic->rlistlong;
1943 fr->rcoulomb = fr->ic->rcoulomb;
1944 fr->rvdw = fr->ic->rvdw;
1946 if (ir->eDispCorr != edispcNO)
1948 calc_enervirdiff(NULL, ir->eDispCorr, fr);
1951 if (!bPMETuneRunning &&
1953 dd_dlb_is_locked(cr->dd))
1955 /* Unlock the DLB=auto, DLB is allowed to activate
1956 * (but we don't expect it to activate in most cases).
1958 dd_dlb_set_lock(cr->dd, FALSE);
1965 if (step_rel == wcycle_get_reset_counters(wcycle) ||
1966 gs.set[eglsRESETCOUNTERS] != 0)
1968 /* Reset all the counters related to performance over the run */
1969 reset_all_counters(fplog, cr, step, &step_rel, ir, wcycle, nrnb, walltime_accounting,
1970 use_GPU(fr->nbv) ? fr->nbv : NULL);
1971 wcycle_set_reset_counters(wcycle, -1);
1972 if (!(cr->duty & DUTY_PME))
1974 /* Tell our PME node to reset its counters */
1975 gmx_pme_send_resetcounters(cr, step);
1977 /* Correct max_hours for the elapsed time */
1978 max_hours -= elapsed_time/(60.0*60.0);
1979 bResetCountersHalfMaxH = FALSE;
1980 gs.set[eglsRESETCOUNTERS] = 0;
1983 /* If bIMD is TRUE, the master updates the IMD energy record and sends positions to VMD client */
1984 IMD_prep_energies_send_positions(ir->bIMD && MASTER(cr), bIMDstep, ir->imd, enerd, step, bCalcEner, wcycle);
1987 /* End of main MD loop */
1990 /* Closing TNG files can include compressing data. Therefore it is good to do that
1991 * before stopping the time measurements. */
1992 mdoutf_tng_close(outf);
1994 /* Stop measuring walltime */
1995 walltime_accounting_end(walltime_accounting);
1997 if (bRerunMD && MASTER(cr))
2002 if (!(cr->duty & DUTY_PME))
2004 /* Tell the PME only node to finish */
2005 gmx_pme_send_finish(cr);
2010 if (ir->nstcalcenergy > 0 && !bRerunMD)
2012 print_ebin(mdoutf_get_fp_ene(outf), FALSE, FALSE, FALSE, fplog, step, t,
2013 eprAVER, FALSE, mdebin, fcd, groups, &(ir->opts));
2020 if (ir->nstlist == -1 && nlh.nns > 0 && fplog)
2022 fprintf(fplog, "Average neighborlist lifetime: %.1f steps, std.dev.: %.1f steps\n", nlh.s1/nlh.nns, sqrt(nlh.s2/nlh.nns - sqr(nlh.s1/nlh.nns)));
2023 fprintf(fplog, "Average number of atoms that crossed the half buffer length: %.1f\n\n", nlh.ab/nlh.nns);
2026 if (pme_loadbal != NULL)
2028 pme_loadbal_done(pme_loadbal, cr, fplog,
2032 if (shellfc && fplog)
2034 fprintf(fplog, "Fraction of iterations that converged: %.2f %%\n",
2035 (nconverged*100.0)/step_rel);
2036 fprintf(fplog, "Average number of force evaluations per MD step: %.2f\n\n",
2040 if (repl_ex_nst > 0 && MASTER(cr))
2042 print_replica_exchange_statistics(fplog, repl_ex);
2045 /* IMD cleanup, if bIMD is TRUE. */
2046 IMD_finalize(ir->bIMD, ir->imd);
2048 walltime_accounting_set_nsteps_done(walltime_accounting, step_rel);