2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012,2013, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
46 #include "types/commrec.h"
57 #include "gmx_random.h"
71 #include "gmx_wallcycle.h"
72 #include "gmx_omp_nthreads.h"
75 /*For debugging, start at v(-dt/2) for velolcity verlet -- uncomment next line */
76 /*#define STARTFROMDT2*/
96 /* The random state for ngaussrand threads.
97 * Normal thermostats need just 1 random number generator,
98 * but SD and BD with OpenMP parallelization need 1 for each thread.
101 gmx_rng_t *gaussrand;
106 gmx_sd_sigma_t *sdsig;
109 /* andersen temperature control stuff */
110 gmx_bool *randomize_group;
114 typedef struct gmx_update
117 /* xprime for constraint algorithms */
121 /* variable size arrays for andersen */
124 gmx_bool randatom_list_init;
126 /* Variables for the deform algorithm */
127 gmx_large_int_t deformref_step;
128 matrix deformref_box;
132 static void do_update_md(int start, int nrend, double dt,
133 t_grp_tcstat *tcstat,
135 gmx_bool bNEMD, t_grp_acc *gstat, rvec accel[],
138 unsigned short ptype[], unsigned short cFREEZE[],
139 unsigned short cACC[], unsigned short cTC[],
140 rvec x[], rvec xprime[], rvec v[],
142 gmx_bool bNH, gmx_bool bPR)
145 int gf = 0, ga = 0, gt = 0;
147 real vn, vv, va, vb, vnrel;
153 /* Update with coupling to extended ensembles, used for
154 * Nose-Hoover and Parrinello-Rahman coupling
155 * Nose-Hoover uses the reversible leap-frog integrator from
156 * Holian et al. Phys Rev E 52(3) : 2338, 1995
158 for (n = start; n < nrend; n++)
173 lg = tcstat[gt].lambda;
178 rvec_sub(v[n], gstat[ga].u, vrel);
180 for (d = 0; d < DIM; d++)
182 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
184 vnrel = (lg*vrel[d] + dt*(imass*f[n][d] - 0.5*vxi*vrel[d]
185 - iprod(M[d], vrel)))/(1 + 0.5*vxi*dt);
186 /* do not scale the mean velocities u */
187 vn = gstat[ga].u[d] + accel[ga][d]*dt + vnrel;
189 xprime[n][d] = x[n][d]+vn*dt;
194 xprime[n][d] = x[n][d];
199 else if (cFREEZE != NULL ||
200 nFreeze[0][XX] || nFreeze[0][YY] || nFreeze[0][ZZ] ||
203 /* Update with Berendsen/v-rescale coupling and freeze or NEMD */
204 for (n = start; n < nrend; n++)
206 w_dt = invmass[n]*dt;
219 lg = tcstat[gt].lambda;
221 for (d = 0; d < DIM; d++)
224 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
226 vv = lg*vn + f[n][d]*w_dt;
228 /* do not scale the mean velocities u */
230 va = vv + accel[ga][d]*dt;
231 vb = va + (1.0-lg)*u;
233 xprime[n][d] = x[n][d]+vb*dt;
238 xprime[n][d] = x[n][d];
245 /* Plain update with Berendsen/v-rescale coupling */
246 for (n = start; n < nrend; n++)
248 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell))
250 w_dt = invmass[n]*dt;
255 lg = tcstat[gt].lambda;
257 for (d = 0; d < DIM; d++)
259 vn = lg*v[n][d] + f[n][d]*w_dt;
261 xprime[n][d] = x[n][d] + vn*dt;
266 for (d = 0; d < DIM; d++)
269 xprime[n][d] = x[n][d];
276 static void do_update_vv_vel(int start, int nrend, double dt,
277 t_grp_tcstat *tcstat, t_grp_acc *gstat,
278 rvec accel[], ivec nFreeze[], real invmass[],
279 unsigned short ptype[], unsigned short cFREEZE[],
280 unsigned short cACC[], rvec v[], rvec f[],
281 gmx_bool bExtended, real veta, real alpha)
286 real u, vn, vv, va, vb, vnrel;
292 g = 0.25*dt*veta*alpha;
294 mv2 = series_sinhx(g);
301 for (n = start; n < nrend; n++)
303 w_dt = invmass[n]*dt;
313 for (d = 0; d < DIM; d++)
315 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
317 v[n][d] = mv1*(mv1*v[n][d] + 0.5*(w_dt*mv2*f[n][d]))+0.5*accel[ga][d]*dt;
325 } /* do_update_vv_vel */
327 static void do_update_vv_pos(int start, int nrend, double dt,
328 t_grp_tcstat *tcstat, t_grp_acc *gstat,
329 rvec accel[], ivec nFreeze[], real invmass[],
330 unsigned short ptype[], unsigned short cFREEZE[],
331 rvec x[], rvec xprime[], rvec v[],
332 rvec f[], gmx_bool bExtended, real veta, real alpha)
339 /* Would it make more sense if Parrinello-Rahman was put here? */
344 mr2 = series_sinhx(g);
352 for (n = start; n < nrend; n++)
360 for (d = 0; d < DIM; d++)
362 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
364 xprime[n][d] = mr1*(mr1*x[n][d]+mr2*dt*v[n][d]);
368 xprime[n][d] = x[n][d];
372 } /* do_update_vv_pos */
374 static void do_update_visc(int start, int nrend, double dt,
375 t_grp_tcstat *tcstat,
378 unsigned short ptype[], unsigned short cTC[],
379 rvec x[], rvec xprime[], rvec v[],
380 rvec f[], matrix M, matrix box, real
381 cos_accel, real vcos,
382 gmx_bool bNH, gmx_bool bPR)
387 real lg, vxi = 0, vv;
392 fac = 2*M_PI/(box[ZZ][ZZ]);
396 /* Update with coupling to extended ensembles, used for
397 * Nose-Hoover and Parrinello-Rahman coupling
399 for (n = start; n < nrend; n++)
406 lg = tcstat[gt].lambda;
407 cosz = cos(fac*x[n][ZZ]);
409 copy_rvec(v[n], vrel);
417 for (d = 0; d < DIM; d++)
421 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell))
423 vn = (lg*vrel[d] + dt*(imass*f[n][d] - 0.5*vxi*vrel[d]
424 - iprod(M[d], vrel)))/(1 + 0.5*vxi*dt);
427 vn += vc + dt*cosz*cos_accel;
430 xprime[n][d] = x[n][d]+vn*dt;
434 xprime[n][d] = x[n][d];
441 /* Classic version of update, used with berendsen coupling */
442 for (n = start; n < nrend; n++)
444 w_dt = invmass[n]*dt;
449 lg = tcstat[gt].lambda;
450 cosz = cos(fac*x[n][ZZ]);
452 for (d = 0; d < DIM; d++)
456 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell))
461 /* Do not scale the cosine velocity profile */
462 vv = vc + lg*(vn - vc + f[n][d]*w_dt);
463 /* Add the cosine accelaration profile */
464 vv += dt*cosz*cos_accel;
468 vv = lg*(vn + f[n][d]*w_dt);
471 xprime[n][d] = x[n][d]+vv*dt;
476 xprime[n][d] = x[n][d];
483 /* Allocates and initializes sd->gaussrand[i] for i=1, i<sd->ngaussrand,
484 * Using seeds generated from sd->gaussrand[0].
486 static void init_multiple_gaussrand(gmx_stochd_t *sd)
491 ngr = sd->ngaussrand;
494 for (i = 1; i < ngr; i++)
496 seed[i] = gmx_rng_uniform_uint32(sd->gaussrand[0]);
499 if (ngr != gmx_omp_nthreads_get(emntUpdate))
501 gmx_incons("The number of Gaussian number generators should be equal to gmx_omp_nthreads_get(emntUpdate)");
504 #pragma omp parallel num_threads(gmx_omp_nthreads_get(emntUpdate))
508 th = gmx_omp_get_thread_num();
511 /* Initialize on each thread to get memory allocated thread-local */
512 sd->gaussrand[th] = gmx_rng_init(seed[th]);
519 static gmx_stochd_t *init_stochd(FILE *fplog, t_inputrec *ir, int nthreads)
528 /* Initiate random number generator for langevin type dynamics,
529 * for BD, SD or velocity rescaling temperature coupling.
531 if (ir->eI == eiBD || EI_SD(ir->eI))
533 sd->ngaussrand = nthreads;
539 snew(sd->gaussrand, sd->ngaussrand);
541 /* Initialize the first random generator */
542 sd->gaussrand[0] = gmx_rng_init(ir->ld_seed);
544 if (sd->ngaussrand > 1)
546 /* Initialize the rest of the random number generators,
547 * using the first one to generate seeds.
549 init_multiple_gaussrand(sd);
552 ngtc = ir->opts.ngtc;
556 snew(sd->bd_rf, ngtc);
558 else if (EI_SD(ir->eI))
561 snew(sd->sdsig, ngtc);
564 for (n = 0; n < ngtc; n++)
566 if (ir->opts.tau_t[n] > 0)
568 sdc[n].gdt = ir->delta_t/ir->opts.tau_t[n];
569 sdc[n].eph = exp(sdc[n].gdt/2);
570 sdc[n].emh = exp(-sdc[n].gdt/2);
571 sdc[n].em = exp(-sdc[n].gdt);
575 /* No friction and noise on this group */
581 if (sdc[n].gdt >= 0.05)
583 sdc[n].b = sdc[n].gdt*(sdc[n].eph*sdc[n].eph - 1)
584 - 4*(sdc[n].eph - 1)*(sdc[n].eph - 1);
585 sdc[n].c = sdc[n].gdt - 3 + 4*sdc[n].emh - sdc[n].em;
586 sdc[n].d = 2 - sdc[n].eph - sdc[n].emh;
591 /* Seventh order expansions for small y */
592 sdc[n].b = y*y*y*y*(1/3.0+y*(1/3.0+y*(17/90.0+y*7/9.0)));
593 sdc[n].c = y*y*y*(2/3.0+y*(-1/2.0+y*(7/30.0+y*(-1/12.0+y*31/1260.0))));
594 sdc[n].d = y*y*(-1+y*y*(-1/12.0-y*y/360.0));
598 fprintf(debug, "SD const tc-grp %d: b %g c %g d %g\n",
599 n, sdc[n].b, sdc[n].c, sdc[n].d);
603 else if (ETC_ANDERSEN(ir->etc))
612 snew(sd->randomize_group, ngtc);
613 snew(sd->boltzfac, ngtc);
615 /* for now, assume that all groups, if randomized, are randomized at the same rate, i.e. tau_t is the same. */
616 /* since constraint groups don't necessarily match up with temperature groups! This is checked in readir.c */
618 for (n = 0; n < ngtc; n++)
620 reft = max(0.0, opts->ref_t[n]);
621 if ((opts->tau_t[n] > 0) && (reft > 0)) /* tau_t or ref_t = 0 means that no randomization is done */
623 sd->randomize_group[n] = TRUE;
624 sd->boltzfac[n] = BOLTZ*opts->ref_t[n];
628 sd->randomize_group[n] = FALSE;
635 void get_stochd_state(gmx_update_t upd, t_state *state)
637 /* Note that we only get the state of the first random generator,
638 * even if there are multiple. This avoids repetition.
640 gmx_rng_get_state(upd->sd->gaussrand[0], state->ld_rng, state->ld_rngi);
643 void set_stochd_state(gmx_update_t upd, t_state *state)
650 gmx_rng_set_state(sd->gaussrand[0], state->ld_rng, state->ld_rngi[0]);
652 if (sd->ngaussrand > 1)
654 /* We only end up here with SD or BD with OpenMP.
655 * Destroy and reinitialize the rest of the random number generators,
656 * using seeds generated from the first one.
657 * Although this doesn't recover the previous state,
658 * it at least avoids repetition, which is most important.
659 * Exaclty restoring states with all MPI+OpenMP setups is difficult
660 * and as the integrator is random to start with, doesn't gain us much.
662 for (i = 1; i < sd->ngaussrand; i++)
664 gmx_rng_destroy(sd->gaussrand[i]);
667 init_multiple_gaussrand(sd);
671 gmx_update_t init_update(FILE *fplog, t_inputrec *ir)
677 if (ir->eI == eiBD || EI_SD(ir->eI) || ir->etc == etcVRESCALE || ETC_ANDERSEN(ir->etc))
679 upd->sd = init_stochd(fplog, ir, gmx_omp_nthreads_get(emntUpdate));
684 upd->randatom = NULL;
685 upd->randatom_list = NULL;
686 upd->randatom_list_init = FALSE; /* we have not yet cleared the data structure at this point */
691 static void do_update_sd1(gmx_stochd_t *sd,
693 int start, int nrend, double dt,
694 rvec accel[], ivec nFreeze[],
695 real invmass[], unsigned short ptype[],
696 unsigned short cFREEZE[], unsigned short cACC[],
697 unsigned short cTC[],
698 rvec x[], rvec xprime[], rvec v[], rvec f[],
700 int ngtc, real tau_t[], real ref_t[])
705 int gf = 0, ga = 0, gt = 0;
712 for (n = 0; n < ngtc; n++)
715 /* The mass is encounted for later, since this differs per atom */
716 sig[n].V = sqrt(kT*(1 - sdc[n].em*sdc[n].em));
719 for (n = start; n < nrend; n++)
721 ism = sqrt(invmass[n]);
735 for (d = 0; d < DIM; d++)
737 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
739 sd_V = ism*sig[gt].V*gmx_rng_gaussian_table(gaussrand);
741 v[n][d] = v[n][d]*sdc[gt].em
742 + (invmass[n]*f[n][d] + accel[ga][d])*tau_t[gt]*(1 - sdc[gt].em)
745 xprime[n][d] = x[n][d] + v[n][d]*dt;
750 xprime[n][d] = x[n][d];
756 static void check_sd2_work_data_allocation(gmx_stochd_t *sd, int nrend)
758 if (nrend > sd->sd_V_nalloc)
760 sd->sd_V_nalloc = over_alloc_dd(nrend);
761 srenew(sd->sd_V, sd->sd_V_nalloc);
765 static void do_update_sd2_Tconsts(gmx_stochd_t *sd,
770 /* This is separated from the update below, because it is single threaded */
779 for (gt = 0; gt < ngtc; gt++)
781 kT = BOLTZ*ref_t[gt];
782 /* The mass is encounted for later, since this differs per atom */
783 sig[gt].V = sqrt(kT*(1-sdc[gt].em));
784 sig[gt].X = sqrt(kT*sqr(tau_t[gt])*sdc[gt].c);
785 sig[gt].Yv = sqrt(kT*sdc[gt].b/sdc[gt].c);
786 sig[gt].Yx = sqrt(kT*sqr(tau_t[gt])*sdc[gt].b/(1-sdc[gt].em));
790 static void do_update_sd2(gmx_stochd_t *sd,
793 int start, int nrend,
794 rvec accel[], ivec nFreeze[],
795 real invmass[], unsigned short ptype[],
796 unsigned short cFREEZE[], unsigned short cACC[],
797 unsigned short cTC[],
798 rvec x[], rvec xprime[], rvec v[], rvec f[],
805 /* The random part of the velocity update, generated in the first
806 * half of the update, needs to be remembered for the second half.
810 int gf = 0, ga = 0, gt = 0;
811 real vn = 0, Vmh, Xmh;
819 for (n = start; n < nrend; n++)
821 ism = sqrt(invmass[n]);
835 for (d = 0; d < DIM; d++)
841 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
847 sd_X[n][d] = ism*sig[gt].X*gmx_rng_gaussian_table(gaussrand);
849 Vmh = sd_X[n][d]*sdc[gt].d/(tau_t[gt]*sdc[gt].c)
850 + ism*sig[gt].Yv*gmx_rng_gaussian_table(gaussrand);
851 sd_V[n][d] = ism*sig[gt].V*gmx_rng_gaussian_table(gaussrand);
853 v[n][d] = vn*sdc[gt].em
854 + (invmass[n]*f[n][d] + accel[ga][d])*tau_t[gt]*(1 - sdc[gt].em)
855 + sd_V[n][d] - sdc[gt].em*Vmh;
857 xprime[n][d] = x[n][d] + v[n][d]*tau_t[gt]*(sdc[gt].eph - sdc[gt].emh);
862 /* Correct the velocities for the constraints.
863 * This operation introduces some inaccuracy,
864 * since the velocity is determined from differences in coordinates.
867 (xprime[n][d] - x[n][d])/(tau_t[gt]*(sdc[gt].eph - sdc[gt].emh));
869 Xmh = sd_V[n][d]*tau_t[gt]*sdc[gt].d/(sdc[gt].em-1)
870 + ism*sig[gt].Yx*gmx_rng_gaussian_table(gaussrand);
871 sd_X[n][d] = ism*sig[gt].X*gmx_rng_gaussian_table(gaussrand);
873 xprime[n][d] += sd_X[n][d] - Xmh;
882 xprime[n][d] = x[n][d];
889 static void do_update_bd_Tconsts(double dt, real friction_coefficient,
890 int ngtc, const real ref_t[],
893 /* This is separated from the update below, because it is single threaded */
896 if (friction_coefficient != 0)
898 for (gt = 0; gt < ngtc; gt++)
900 rf[gt] = sqrt(2.0*BOLTZ*ref_t[gt]/(friction_coefficient*dt));
905 for (gt = 0; gt < ngtc; gt++)
907 rf[gt] = sqrt(2.0*BOLTZ*ref_t[gt]);
912 static void do_update_bd(int start, int nrend, double dt,
914 real invmass[], unsigned short ptype[],
915 unsigned short cFREEZE[], unsigned short cTC[],
916 rvec x[], rvec xprime[], rvec v[],
917 rvec f[], real friction_coefficient,
918 real *rf, gmx_rng_t gaussrand)
920 /* note -- these appear to be full step velocities . . . */
926 if (friction_coefficient != 0)
928 invfr = 1.0/friction_coefficient;
931 for (n = start; (n < nrend); n++)
941 for (d = 0; (d < DIM); d++)
943 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
945 if (friction_coefficient != 0)
947 vn = invfr*f[n][d] + rf[gt]*gmx_rng_gaussian_table(gaussrand);
951 /* NOTE: invmass = 2/(mass*friction_constant*dt) */
952 vn = 0.5*invmass[n]*f[n][d]*dt
953 + sqrt(0.5*invmass[n])*rf[gt]*gmx_rng_gaussian_table(gaussrand);
957 xprime[n][d] = x[n][d]+vn*dt;
962 xprime[n][d] = x[n][d];
968 static void dump_it_all(FILE *fp, const char *title,
969 int natoms, rvec x[], rvec xp[], rvec v[], rvec f[])
974 fprintf(fp, "%s\n", title);
975 pr_rvecs(fp, 0, "x", x, natoms);
976 pr_rvecs(fp, 0, "xp", xp, natoms);
977 pr_rvecs(fp, 0, "v", v, natoms);
978 pr_rvecs(fp, 0, "f", f, natoms);
983 static void calc_ke_part_normal(rvec v[], t_grpopts *opts, t_mdatoms *md,
984 gmx_ekindata_t *ekind, t_nrnb *nrnb, gmx_bool bEkinAveVel,
985 gmx_bool bSaveEkinOld)
988 t_grp_tcstat *tcstat = ekind->tcstat;
989 t_grp_acc *grpstat = ekind->grpstat;
992 /* three main: VV with AveVel, vv with AveEkin, leap with AveEkin. Leap with AveVel is also
993 an option, but not supported now. Additionally, if we are doing iterations.
994 bEkinAveVel: If TRUE, we sum into ekin, if FALSE, into ekinh.
995 bSavEkinOld: If TRUE (in the case of iteration = bIterate is TRUE), we don't copy over the ekinh_old.
996 If FALSE, we overrwrite it.
999 /* group velocities are calculated in update_ekindata and
1000 * accumulated in acumulate_groups.
1001 * Now the partial global and groups ekin.
1003 for (g = 0; (g < opts->ngtc); g++)
1008 copy_mat(tcstat[g].ekinh, tcstat[g].ekinh_old);
1012 clear_mat(tcstat[g].ekinf);
1016 clear_mat(tcstat[g].ekinh);
1020 tcstat[g].ekinscalef_nhc = 1.0; /* need to clear this -- logic is complicated! */
1023 ekind->dekindl_old = ekind->dekindl;
1025 nthread = gmx_omp_nthreads_get(emntUpdate);
1027 #pragma omp parallel for num_threads(nthread) schedule(static)
1028 for (thread = 0; thread < nthread; thread++)
1030 int start_t, end_t, n;
1038 start_t = md->start + ((thread+0)*md->homenr)/nthread;
1039 end_t = md->start + ((thread+1)*md->homenr)/nthread;
1041 ekin_sum = ekind->ekin_work[thread];
1042 dekindl_sum = ekind->dekindl_work[thread];
1044 for (gt = 0; gt < opts->ngtc; gt++)
1046 clear_mat(ekin_sum[gt]);
1052 for (n = start_t; n < end_t; n++)
1062 hm = 0.5*md->massT[n];
1064 for (d = 0; (d < DIM); d++)
1066 v_corrt[d] = v[n][d] - grpstat[ga].u[d];
1068 for (d = 0; (d < DIM); d++)
1070 for (m = 0; (m < DIM); m++)
1072 /* if we're computing a full step velocity, v_corrt[d] has v(t). Otherwise, v(t+dt/2) */
1073 ekin_sum[gt][m][d] += hm*v_corrt[m]*v_corrt[d];
1076 if (md->nMassPerturbed && md->bPerturbed[n])
1079 0.5*(md->massB[n] - md->massA[n])*iprod(v_corrt, v_corrt);
1085 for (thread = 0; thread < nthread; thread++)
1087 for (g = 0; g < opts->ngtc; g++)
1091 m_add(tcstat[g].ekinf, ekind->ekin_work[thread][g],
1096 m_add(tcstat[g].ekinh, ekind->ekin_work[thread][g],
1101 ekind->dekindl += *ekind->dekindl_work[thread];
1104 inc_nrnb(nrnb, eNR_EKIN, md->homenr);
1107 static void calc_ke_part_visc(matrix box, rvec x[], rvec v[],
1108 t_grpopts *opts, t_mdatoms *md,
1109 gmx_ekindata_t *ekind,
1110 t_nrnb *nrnb, gmx_bool bEkinAveVel, gmx_bool bSaveEkinOld)
1112 int start = md->start, homenr = md->homenr;
1113 int g, d, n, m, gt = 0;
1116 t_grp_tcstat *tcstat = ekind->tcstat;
1117 t_cos_acc *cosacc = &(ekind->cosacc);
1122 for (g = 0; g < opts->ngtc; g++)
1124 copy_mat(ekind->tcstat[g].ekinh, ekind->tcstat[g].ekinh_old);
1125 clear_mat(ekind->tcstat[g].ekinh);
1127 ekind->dekindl_old = ekind->dekindl;
1129 fac = 2*M_PI/box[ZZ][ZZ];
1132 for (n = start; n < start+homenr; n++)
1138 hm = 0.5*md->massT[n];
1140 /* Note that the times of x and v differ by half a step */
1141 /* MRS -- would have to be changed for VV */
1142 cosz = cos(fac*x[n][ZZ]);
1143 /* Calculate the amplitude of the new velocity profile */
1144 mvcos += 2*cosz*md->massT[n]*v[n][XX];
1146 copy_rvec(v[n], v_corrt);
1147 /* Subtract the profile for the kinetic energy */
1148 v_corrt[XX] -= cosz*cosacc->vcos;
1149 for (d = 0; (d < DIM); d++)
1151 for (m = 0; (m < DIM); m++)
1153 /* if we're computing a full step velocity, v_corrt[d] has v(t). Otherwise, v(t+dt/2) */
1156 tcstat[gt].ekinf[m][d] += hm*v_corrt[m]*v_corrt[d];
1160 tcstat[gt].ekinh[m][d] += hm*v_corrt[m]*v_corrt[d];
1164 if (md->nPerturbed && md->bPerturbed[n])
1166 dekindl += 0.5*(md->massB[n] - md->massA[n])*iprod(v_corrt, v_corrt);
1169 ekind->dekindl = dekindl;
1170 cosacc->mvcos = mvcos;
1172 inc_nrnb(nrnb, eNR_EKIN, homenr);
1175 void calc_ke_part(t_state *state, t_grpopts *opts, t_mdatoms *md,
1176 gmx_ekindata_t *ekind, t_nrnb *nrnb, gmx_bool bEkinAveVel, gmx_bool bSaveEkinOld)
1178 if (ekind->cosacc.cos_accel == 0)
1180 calc_ke_part_normal(state->v, opts, md, ekind, nrnb, bEkinAveVel, bSaveEkinOld);
1184 calc_ke_part_visc(state->box, state->x, state->v, opts, md, ekind, nrnb, bEkinAveVel, bSaveEkinOld);
1188 extern void init_ekinstate(ekinstate_t *ekinstate, const t_inputrec *ir)
1190 ekinstate->ekin_n = ir->opts.ngtc;
1191 snew(ekinstate->ekinh, ekinstate->ekin_n);
1192 snew(ekinstate->ekinf, ekinstate->ekin_n);
1193 snew(ekinstate->ekinh_old, ekinstate->ekin_n);
1194 snew(ekinstate->ekinscalef_nhc, ekinstate->ekin_n);
1195 snew(ekinstate->ekinscaleh_nhc, ekinstate->ekin_n);
1196 snew(ekinstate->vscale_nhc, ekinstate->ekin_n);
1197 ekinstate->dekindl = 0;
1198 ekinstate->mvcos = 0;
1201 void update_ekinstate(ekinstate_t *ekinstate, gmx_ekindata_t *ekind)
1205 for (i = 0; i < ekinstate->ekin_n; i++)
1207 copy_mat(ekind->tcstat[i].ekinh, ekinstate->ekinh[i]);
1208 copy_mat(ekind->tcstat[i].ekinf, ekinstate->ekinf[i]);
1209 copy_mat(ekind->tcstat[i].ekinh_old, ekinstate->ekinh_old[i]);
1210 ekinstate->ekinscalef_nhc[i] = ekind->tcstat[i].ekinscalef_nhc;
1211 ekinstate->ekinscaleh_nhc[i] = ekind->tcstat[i].ekinscaleh_nhc;
1212 ekinstate->vscale_nhc[i] = ekind->tcstat[i].vscale_nhc;
1215 copy_mat(ekind->ekin, ekinstate->ekin_total);
1216 ekinstate->dekindl = ekind->dekindl;
1217 ekinstate->mvcos = ekind->cosacc.mvcos;
1221 void restore_ekinstate_from_state(t_commrec *cr,
1222 gmx_ekindata_t *ekind, ekinstate_t *ekinstate)
1228 for (i = 0; i < ekinstate->ekin_n; i++)
1230 copy_mat(ekinstate->ekinh[i], ekind->tcstat[i].ekinh);
1231 copy_mat(ekinstate->ekinf[i], ekind->tcstat[i].ekinf);
1232 copy_mat(ekinstate->ekinh_old[i], ekind->tcstat[i].ekinh_old);
1233 ekind->tcstat[i].ekinscalef_nhc = ekinstate->ekinscalef_nhc[i];
1234 ekind->tcstat[i].ekinscaleh_nhc = ekinstate->ekinscaleh_nhc[i];
1235 ekind->tcstat[i].vscale_nhc = ekinstate->vscale_nhc[i];
1238 copy_mat(ekinstate->ekin_total, ekind->ekin);
1240 ekind->dekindl = ekinstate->dekindl;
1241 ekind->cosacc.mvcos = ekinstate->mvcos;
1242 n = ekinstate->ekin_n;
1247 gmx_bcast(sizeof(n), &n, cr);
1248 for (i = 0; i < n; i++)
1250 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinh[0][0]),
1251 ekind->tcstat[i].ekinh[0], cr);
1252 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinf[0][0]),
1253 ekind->tcstat[i].ekinf[0], cr);
1254 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinh_old[0][0]),
1255 ekind->tcstat[i].ekinh_old[0], cr);
1257 gmx_bcast(sizeof(ekind->tcstat[i].ekinscalef_nhc),
1258 &(ekind->tcstat[i].ekinscalef_nhc), cr);
1259 gmx_bcast(sizeof(ekind->tcstat[i].ekinscaleh_nhc),
1260 &(ekind->tcstat[i].ekinscaleh_nhc), cr);
1261 gmx_bcast(sizeof(ekind->tcstat[i].vscale_nhc),
1262 &(ekind->tcstat[i].vscale_nhc), cr);
1264 gmx_bcast(DIM*DIM*sizeof(ekind->ekin[0][0]),
1265 ekind->ekin[0], cr);
1267 gmx_bcast(sizeof(ekind->dekindl), &ekind->dekindl, cr);
1268 gmx_bcast(sizeof(ekind->cosacc.mvcos), &ekind->cosacc.mvcos, cr);
1272 void set_deform_reference_box(gmx_update_t upd, gmx_large_int_t step, matrix box)
1274 upd->deformref_step = step;
1275 copy_mat(box, upd->deformref_box);
1278 static void deform(gmx_update_t upd,
1279 int start, int homenr, rvec x[], matrix box, matrix *scale_tot,
1280 const t_inputrec *ir, gmx_large_int_t step)
1282 matrix bnew, invbox, mu;
1286 elapsed_time = (step + 1 - upd->deformref_step)*ir->delta_t;
1287 copy_mat(box, bnew);
1288 for (i = 0; i < DIM; i++)
1290 for (j = 0; j < DIM; j++)
1292 if (ir->deform[i][j] != 0)
1295 upd->deformref_box[i][j] + elapsed_time*ir->deform[i][j];
1299 /* We correct the off-diagonal elements,
1300 * which can grow indefinitely during shearing,
1301 * so the shifts do not get messed up.
1303 for (i = 1; i < DIM; i++)
1305 for (j = i-1; j >= 0; j--)
1307 while (bnew[i][j] - box[i][j] > 0.5*bnew[j][j])
1309 rvec_dec(bnew[i], bnew[j]);
1311 while (bnew[i][j] - box[i][j] < -0.5*bnew[j][j])
1313 rvec_inc(bnew[i], bnew[j]);
1317 m_inv_ur0(box, invbox);
1318 copy_mat(bnew, box);
1319 mmul_ur0(box, invbox, mu);
1321 for (i = start; i < start+homenr; i++)
1323 x[i][XX] = mu[XX][XX]*x[i][XX]+mu[YY][XX]*x[i][YY]+mu[ZZ][XX]*x[i][ZZ];
1324 x[i][YY] = mu[YY][YY]*x[i][YY]+mu[ZZ][YY]*x[i][ZZ];
1325 x[i][ZZ] = mu[ZZ][ZZ]*x[i][ZZ];
1329 /* The transposes of the scaling matrices are stored,
1330 * so we need to do matrix multiplication in the inverse order.
1332 mmul_ur0(*scale_tot, mu, *scale_tot);
1336 void update_tcouple(FILE *fplog,
1337 gmx_large_int_t step,
1338 t_inputrec *inputrec,
1340 gmx_ekindata_t *ekind,
1341 gmx_wallcycle_t wcycle,
1347 gmx_bool bTCouple = FALSE;
1349 int i, start, end, homenr, offset;
1351 /* if using vv with trotter decomposition methods, we do this elsewhere in the code */
1352 if (inputrec->etc != etcNO &&
1353 !(IR_NVT_TROTTER(inputrec) || IR_NPT_TROTTER(inputrec) || IR_NPH_TROTTER(inputrec)))
1355 /* We should only couple after a step where energies were determined (for leapfrog versions)
1356 or the step energies are determined, for velocity verlet versions */
1358 if (EI_VV(inputrec->eI))
1366 bTCouple = (inputrec->nsttcouple == 1 ||
1367 do_per_step(step+inputrec->nsttcouple-offset,
1368 inputrec->nsttcouple));
1373 dttc = inputrec->nsttcouple*inputrec->delta_t;
1375 switch (inputrec->etc)
1380 berendsen_tcoupl(inputrec, ekind, dttc);
1383 nosehoover_tcoupl(&(inputrec->opts), ekind, dttc,
1384 state->nosehoover_xi, state->nosehoover_vxi, MassQ);
1387 vrescale_tcoupl(inputrec, ekind, dttc,
1388 state->therm_integral, upd->sd->gaussrand[0]);
1391 /* rescale in place here */
1392 if (EI_VV(inputrec->eI))
1394 rescale_velocities(ekind, md, md->start, md->start+md->homenr, state->v);
1399 /* Set the T scaling lambda to 1 to have no scaling */
1400 for (i = 0; (i < inputrec->opts.ngtc); i++)
1402 ekind->tcstat[i].lambda = 1.0;
1407 void update_pcouple(FILE *fplog,
1408 gmx_large_int_t step,
1409 t_inputrec *inputrec,
1413 gmx_wallcycle_t wcycle,
1417 gmx_bool bPCouple = FALSE;
1421 /* if using Trotter pressure, we do this in coupling.c, so we leave it false. */
1422 if (inputrec->epc != epcNO && (!(IR_NPT_TROTTER(inputrec) || IR_NPH_TROTTER(inputrec))))
1424 /* We should only couple after a step where energies were determined */
1425 bPCouple = (inputrec->nstpcouple == 1 ||
1426 do_per_step(step+inputrec->nstpcouple-1,
1427 inputrec->nstpcouple));
1430 clear_mat(pcoupl_mu);
1431 for (i = 0; i < DIM; i++)
1433 pcoupl_mu[i][i] = 1.0;
1440 dtpc = inputrec->nstpcouple*inputrec->delta_t;
1442 switch (inputrec->epc)
1444 /* We can always pcoupl, even if we did not sum the energies
1445 * the previous step, since state->pres_prev is only updated
1446 * when the energies have been summed.
1450 case (epcBERENDSEN):
1453 berendsen_pcoupl(fplog, step, inputrec, dtpc, state->pres_prev, state->box,
1457 case (epcPARRINELLORAHMAN):
1458 parrinellorahman_pcoupl(fplog, step, inputrec, dtpc, state->pres_prev,
1459 state->box, state->box_rel, state->boxv,
1460 M, pcoupl_mu, bInitStep);
1468 static rvec *get_xprime(const t_state *state, gmx_update_t upd)
1470 if (state->nalloc > upd->xp_nalloc)
1472 upd->xp_nalloc = state->nalloc;
1473 srenew(upd->xp, upd->xp_nalloc);
1479 static void combine_forces(gmx_update_t upd,
1481 gmx_constr_t constr,
1482 t_inputrec *ir, t_mdatoms *md, t_idef *idef,
1484 gmx_large_int_t step,
1485 t_state *state, gmx_bool bMolPBC,
1486 int start, int nrend,
1487 rvec f[], rvec f_lr[],
1488 tensor *vir_lr_constr,
1493 /* f contains the short-range forces + the long range forces
1494 * which are stored separately in f_lr.
1497 if (constr != NULL && vir_lr_constr != NULL &&
1498 !(ir->eConstrAlg == econtSHAKE && ir->epc == epcNO))
1500 /* We need to constrain the LR forces separately,
1501 * because due to the different pre-factor for the SR and LR
1502 * forces in the update algorithm, we have to correct
1503 * the constraint virial for the nstcalclr-1 extra f_lr.
1504 * Constrain only the additional LR part of the force.
1506 /* MRS -- need to make sure this works with trotter integration -- the constraint calls may not be right.*/
1511 xp = get_xprime(state, upd);
1513 fac = (nstcalclr - 1)*ir->delta_t*ir->delta_t;
1515 for (i = md->start; i < md->start + md->homenr; i++)
1517 if (md->cFREEZE != NULL)
1519 gf = md->cFREEZE[i];
1521 for (d = 0; d < DIM; d++)
1523 if ((md->ptype[i] != eptVSite) &&
1524 (md->ptype[i] != eptShell) &&
1525 !ir->opts.nFreeze[gf][d])
1527 xp[i][d] = state->x[i][d] + fac*f_lr[i][d]*md->invmass[i];
1531 constrain(NULL, FALSE, FALSE, constr, idef, ir, NULL, cr, step, 0, md,
1532 state->x, xp, xp, bMolPBC, state->box, state->lambda[efptBONDED], NULL,
1533 NULL, vir_lr_constr, nrnb, econqCoord, ir->epc == epcMTTK, state->veta, state->veta);
1536 /* Add nstcalclr-1 times the LR force to the sum of both forces
1537 * and store the result in forces_lr.
1539 for (i = start; i < nrend; i++)
1541 for (d = 0; d < DIM; d++)
1543 f_lr[i][d] = f[i][d] + (nstcalclr - 1)*f_lr[i][d];
1548 void update_constraints(FILE *fplog,
1549 gmx_large_int_t step,
1550 real *dvdlambda, /* the contribution to be added to the bonded interactions */
1551 t_inputrec *inputrec, /* input record and box stuff */
1552 gmx_ekindata_t *ekind,
1557 rvec force[], /* forces on home particles */
1560 tensor vir, /* tensors for virial and ekin, needed for computing */
1563 gmx_wallcycle_t wcycle,
1565 gmx_constr_t constr,
1567 gmx_bool bFirstHalf,
1571 gmx_bool bExtended, bLastStep, bLog = FALSE, bEner = FALSE, bDoConstr = FALSE;
1574 int start, homenr, nrend, i, n, m, g, d;
1576 rvec *vbuf, *xprime = NULL;
1583 if (bFirstHalf && !EI_VV(inputrec->eI))
1588 /* for now, SD update is here -- though it really seems like it
1589 should be reformulated as a velocity verlet method, since it has two parts */
1592 homenr = md->homenr;
1593 nrend = start+homenr;
1595 dt = inputrec->delta_t;
1600 * APPLY CONSTRAINTS:
1603 * When doing PR pressure coupling we have to constrain the
1604 * bonds in each iteration. If we are only using Nose-Hoover tcoupling
1605 * it is enough to do this once though, since the relative velocities
1606 * after this will be normal to the bond vector
1611 /* clear out constraints before applying */
1612 clear_mat(vir_part);
1614 xprime = get_xprime(state, upd);
1616 bLastStep = (step == inputrec->init_step+inputrec->nsteps);
1617 bLog = (do_per_step(step, inputrec->nstlog) || bLastStep || (step < 0));
1618 bEner = (do_per_step(step, inputrec->nstenergy) || bLastStep);
1619 /* Constrain the coordinates xprime */
1620 wallcycle_start(wcycle, ewcCONSTR);
1621 if (EI_VV(inputrec->eI) && bFirstHalf)
1623 constrain(NULL, bLog, bEner, constr, idef,
1624 inputrec, ekind, cr, step, 1, md,
1625 state->x, state->v, state->v,
1626 bMolPBC, state->box,
1627 state->lambda[efptBONDED], dvdlambda,
1628 NULL, bCalcVir ? &vir_con : NULL, nrnb, econqVeloc,
1629 inputrec->epc == epcMTTK, state->veta, vetanew);
1633 constrain(NULL, bLog, bEner, constr, idef,
1634 inputrec, ekind, cr, step, 1, md,
1635 state->x, xprime, NULL,
1636 bMolPBC, state->box,
1637 state->lambda[efptBONDED], dvdlambda,
1638 state->v, bCalcVir ? &vir_con : NULL, nrnb, econqCoord,
1639 inputrec->epc == epcMTTK, state->veta, state->veta);
1641 wallcycle_stop(wcycle, ewcCONSTR);
1645 dump_it_all(fplog, "After Shake",
1646 state->natoms, state->x, xprime, state->v, force);
1650 if (inputrec->eI == eiSD2)
1652 /* A correction factor eph is needed for the SD constraint force */
1653 /* Here we can, unfortunately, not have proper corrections
1654 * for different friction constants, so we use the first one.
1656 for (i = 0; i < DIM; i++)
1658 for (m = 0; m < DIM; m++)
1660 vir_part[i][m] += upd->sd->sdc[0].eph*vir_con[i][m];
1666 m_add(vir_part, vir_con, vir_part);
1670 pr_rvecs(debug, 0, "constraint virial", vir_part, DIM);
1676 if ((inputrec->eI == eiSD2) && !(bFirstHalf))
1678 xprime = get_xprime(state, upd);
1680 nth = gmx_omp_nthreads_get(emntUpdate);
1682 #pragma omp parallel for num_threads(nth) schedule(static)
1683 for (th = 0; th < nth; th++)
1685 int start_th, end_th;
1687 start_th = start + ((nrend-start)* th )/nth;
1688 end_th = start + ((nrend-start)*(th+1))/nth;
1690 /* The second part of the SD integration */
1691 do_update_sd2(upd->sd, upd->sd->gaussrand[th],
1692 FALSE, start_th, end_th,
1693 inputrec->opts.acc, inputrec->opts.nFreeze,
1694 md->invmass, md->ptype,
1695 md->cFREEZE, md->cACC, md->cTC,
1696 state->x, xprime, state->v, force, state->sd_X,
1697 inputrec->opts.tau_t,
1700 inc_nrnb(nrnb, eNR_UPDATE, homenr);
1704 /* Constrain the coordinates xprime */
1705 wallcycle_start(wcycle, ewcCONSTR);
1706 constrain(NULL, bLog, bEner, constr, idef,
1707 inputrec, NULL, cr, step, 1, md,
1708 state->x, xprime, NULL,
1709 bMolPBC, state->box,
1710 state->lambda[efptBONDED], dvdlambda,
1711 NULL, NULL, nrnb, econqCoord, FALSE, 0, 0);
1712 wallcycle_stop(wcycle, ewcCONSTR);
1716 /* We must always unshift after updating coordinates; if we did not shake
1717 x was shifted in do_force */
1719 if (!(bFirstHalf)) /* in the first half of vv, no shift. */
1721 if (graph && (graph->nnodes > 0))
1723 unshift_x(graph, state->box, state->x, upd->xp);
1724 if (TRICLINIC(state->box))
1726 inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
1730 inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
1735 #pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntUpdate)) schedule(static)
1736 for (i = start; i < nrend; i++)
1738 copy_rvec(upd->xp[i], state->x[i]);
1742 dump_it_all(fplog, "After unshift",
1743 state->natoms, state->x, upd->xp, state->v, force);
1745 /* ############# END the update of velocities and positions ######### */
1748 void update_box(FILE *fplog,
1749 gmx_large_int_t step,
1750 t_inputrec *inputrec, /* input record and box stuff */
1754 rvec force[], /* forces on home particles */
1758 gmx_wallcycle_t wcycle,
1761 gmx_bool bFirstHalf)
1763 gmx_bool bExtended, bLastStep, bLog = FALSE, bEner = FALSE;
1766 int start, homenr, nrend, i, n, m, g;
1770 homenr = md->homenr;
1771 nrend = start+homenr;
1774 (inputrec->etc == etcNOSEHOOVER) ||
1775 (inputrec->epc == epcPARRINELLORAHMAN) ||
1776 (inputrec->epc == epcMTTK);
1778 dt = inputrec->delta_t;
1782 /* now update boxes */
1783 switch (inputrec->epc)
1787 case (epcBERENDSEN):
1788 berendsen_pscale(inputrec, pcoupl_mu, state->box, state->box_rel,
1789 start, homenr, state->x, md->cFREEZE, nrnb);
1791 case (epcPARRINELLORAHMAN):
1792 /* The box velocities were updated in do_pr_pcoupl in the update
1793 * iteration, but we dont change the box vectors until we get here
1794 * since we need to be able to shift/unshift above.
1796 for (i = 0; i < DIM; i++)
1798 for (m = 0; m <= i; m++)
1800 state->box[i][m] += dt*state->boxv[i][m];
1803 preserve_box_shape(inputrec, state->box_rel, state->box);
1805 /* Scale the coordinates */
1806 for (n = start; (n < start+homenr); n++)
1808 tmvmul_ur0(pcoupl_mu, state->x[n], state->x[n]);
1812 switch (inputrec->epct)
1814 case (epctISOTROPIC):
1815 /* DIM * eta = ln V. so DIM*eta_new = DIM*eta_old + DIM*dt*veta =>
1816 ln V_new = ln V_old + 3*dt*veta => V_new = V_old*exp(3*dt*veta) =>
1817 Side length scales as exp(veta*dt) */
1819 msmul(state->box, exp(state->veta*dt), state->box);
1821 /* Relate veta to boxv. veta = d(eta)/dT = (1/DIM)*1/V dV/dT.
1822 o If we assume isotropic scaling, and box length scaling
1823 factor L, then V = L^DIM (det(M)). So dV/dt = DIM
1824 L^(DIM-1) dL/dt det(M), and veta = (1/L) dL/dt. The
1825 determinant of B is L^DIM det(M), and the determinant
1826 of dB/dt is (dL/dT)^DIM det (M). veta will be
1827 (det(dB/dT)/det(B))^(1/3). Then since M =
1828 B_new*(vol_new)^(1/3), dB/dT_new = (veta_new)*B(new). */
1830 msmul(state->box, state->veta, state->boxv);
1840 if ((!(IR_NPT_TROTTER(inputrec) || IR_NPH_TROTTER(inputrec))) && scale_tot)
1842 /* The transposes of the scaling matrices are stored,
1843 * therefore we need to reverse the order in the multiplication.
1845 mmul_ur0(*scale_tot, pcoupl_mu, *scale_tot);
1848 if (DEFORM(*inputrec))
1850 deform(upd, start, homenr, state->x, state->box, scale_tot, inputrec, step);
1853 dump_it_all(fplog, "After update",
1854 state->natoms, state->x, upd->xp, state->v, force);
1857 void update_coords(FILE *fplog,
1858 gmx_large_int_t step,
1859 t_inputrec *inputrec, /* input record and box stuff */
1863 rvec *f, /* forces on home particles */
1866 tensor *vir_lr_constr,
1868 gmx_ekindata_t *ekind,
1870 gmx_wallcycle_t wcycle,
1874 t_commrec *cr, /* these shouldn't be here -- need to think about it */
1876 gmx_constr_t constr,
1879 gmx_bool bNH, bPR, bLastStep, bLog = FALSE, bEner = FALSE;
1881 real *imass, *imassin;
1884 int start, homenr, nrend, i, j, d, n, m, g;
1885 int blen0, blen1, iatom, jatom, nshake, nsettle, nconstr, nexpand;
1888 rvec *vcom, *xcom, *vall, *xall, *xin, *vin, *forcein, *fall, *xpall, *xprimein, *xprime;
1891 /* Running the velocity half does nothing except for velocity verlet */
1892 if ((UpdatePart == etrtVELOCITY1 || UpdatePart == etrtVELOCITY2) &&
1893 !EI_VV(inputrec->eI))
1895 gmx_incons("update_coords called for velocity without VV integrator");
1899 homenr = md->homenr;
1900 nrend = start+homenr;
1902 xprime = get_xprime(state, upd);
1904 dt = inputrec->delta_t;
1907 /* We need to update the NMR restraint history when time averaging is used */
1908 if (state->flags & (1<<estDISRE_RM3TAV))
1910 update_disres_history(fcd, &state->hist);
1912 if (state->flags & (1<<estORIRE_DTAV))
1914 update_orires_history(fcd, &state->hist);
1918 bNH = inputrec->etc == etcNOSEHOOVER;
1919 bPR = ((inputrec->epc == epcPARRINELLORAHMAN) || (inputrec->epc == epcMTTK));
1921 if (bDoLR && inputrec->nstcalclr > 1 && !EI_VV(inputrec->eI)) /* get this working with VV? */
1923 /* Store the total force + nstcalclr-1 times the LR force
1924 * in forces_lr, so it can be used in a normal update algorithm
1925 * to produce twin time stepping.
1927 /* is this correct in the new construction? MRS */
1929 inputrec->nstcalclr, constr, inputrec, md, idef, cr,
1930 step, state, bMolPBC,
1931 start, nrend, f, f_lr, vir_lr_constr, nrnb);
1939 /* ############# START The update of velocities and positions ######### */
1941 dump_it_all(fplog, "Before update",
1942 state->natoms, state->x, xprime, state->v, force);
1944 if (inputrec->eI == eiSD2)
1946 check_sd2_work_data_allocation(upd->sd, nrend);
1948 do_update_sd2_Tconsts(upd->sd,
1949 inputrec->opts.ngtc,
1950 inputrec->opts.tau_t,
1951 inputrec->opts.ref_t);
1953 if (inputrec->eI == eiBD)
1955 do_update_bd_Tconsts(dt, inputrec->bd_fric,
1956 inputrec->opts.ngtc, inputrec->opts.ref_t,
1960 nth = gmx_omp_nthreads_get(emntUpdate);
1962 #pragma omp parallel for num_threads(nth) schedule(static) private(alpha)
1963 for (th = 0; th < nth; th++)
1965 int start_th, end_th;
1967 start_th = start + ((nrend-start)* th )/nth;
1968 end_th = start + ((nrend-start)*(th+1))/nth;
1970 switch (inputrec->eI)
1973 if (ekind->cosacc.cos_accel == 0)
1975 do_update_md(start_th, end_th, dt,
1976 ekind->tcstat, state->nosehoover_vxi,
1977 ekind->bNEMD, ekind->grpstat, inputrec->opts.acc,
1978 inputrec->opts.nFreeze,
1979 md->invmass, md->ptype,
1980 md->cFREEZE, md->cACC, md->cTC,
1981 state->x, xprime, state->v, force, M,
1986 do_update_visc(start_th, end_th, dt,
1987 ekind->tcstat, state->nosehoover_vxi,
1988 md->invmass, md->ptype,
1989 md->cTC, state->x, xprime, state->v, force, M,
1991 ekind->cosacc.cos_accel,
1997 do_update_sd1(upd->sd, upd->sd->gaussrand[th],
1998 start_th, end_th, dt,
1999 inputrec->opts.acc, inputrec->opts.nFreeze,
2000 md->invmass, md->ptype,
2001 md->cFREEZE, md->cACC, md->cTC,
2002 state->x, xprime, state->v, force, state->sd_X,
2003 inputrec->opts.ngtc, inputrec->opts.tau_t, inputrec->opts.ref_t);
2006 /* The SD update is done in 2 parts, because an extra constraint step
2009 do_update_sd2(upd->sd, upd->sd->gaussrand[th],
2010 bInitStep, start_th, end_th,
2011 inputrec->opts.acc, inputrec->opts.nFreeze,
2012 md->invmass, md->ptype,
2013 md->cFREEZE, md->cACC, md->cTC,
2014 state->x, xprime, state->v, force, state->sd_X,
2015 inputrec->opts.tau_t,
2019 do_update_bd(start_th, end_th, dt,
2020 inputrec->opts.nFreeze, md->invmass, md->ptype,
2021 md->cFREEZE, md->cTC,
2022 state->x, xprime, state->v, force,
2024 upd->sd->bd_rf, upd->sd->gaussrand[th]);
2028 alpha = 1.0 + DIM/((double)inputrec->opts.nrdf[0]); /* assuming barostat coupled to group 0. */
2033 do_update_vv_vel(start_th, end_th, dt,
2034 ekind->tcstat, ekind->grpstat,
2035 inputrec->opts.acc, inputrec->opts.nFreeze,
2036 md->invmass, md->ptype,
2037 md->cFREEZE, md->cACC,
2039 (bNH || bPR), state->veta, alpha);
2042 do_update_vv_pos(start_th, end_th, dt,
2043 ekind->tcstat, ekind->grpstat,
2044 inputrec->opts.acc, inputrec->opts.nFreeze,
2045 md->invmass, md->ptype, md->cFREEZE,
2046 state->x, xprime, state->v, force,
2047 (bNH || bPR), state->veta, alpha);
2052 gmx_fatal(FARGS, "Don't know how to update coordinates");
2060 void correct_ekin(FILE *log, int start, int end, rvec v[], rvec vcm, real mass[],
2061 real tmass, tensor ekin)
2064 * This is a debugging routine. It should not be called for production code
2066 * The kinetic energy should calculated according to:
2067 * Ekin = 1/2 m (v-vcm)^2
2068 * However the correction is not always applied, since vcm may not be
2069 * known in time and we compute
2070 * Ekin' = 1/2 m v^2 instead
2071 * This can be corrected afterwards by computing
2072 * Ekin = Ekin' + 1/2 m ( -2 v vcm + vcm^2)
2074 * Ekin = Ekin' - m v vcm + 1/2 m vcm^2
2081 /* Local particles */
2084 /* Processor dependent part. */
2086 for (i = start; (i < end); i++)
2090 for (j = 0; (j < DIM); j++)
2096 svmul(1/tmass, vcm, vcm);
2097 svmul(0.5, vcm, hvcm);
2099 for (j = 0; (j < DIM); j++)
2101 for (k = 0; (k < DIM); k++)
2103 dekin[j][k] += vcm[k]*(tm*hvcm[j]-mv[j]);
2106 pr_rvecs(log, 0, "dekin", dekin, DIM);
2107 pr_rvecs(log, 0, " ekin", ekin, DIM);
2108 fprintf(log, "dekin = %g, ekin = %g vcm = (%8.4f %8.4f %8.4f)\n",
2109 trace(dekin), trace(ekin), vcm[XX], vcm[YY], vcm[ZZ]);
2110 fprintf(log, "mv = (%8.4f %8.4f %8.4f)\n",
2111 mv[XX], mv[YY], mv[ZZ]);
2114 extern gmx_bool update_randomize_velocities(t_inputrec *ir, gmx_large_int_t step, t_mdatoms *md, t_state *state, gmx_update_t upd, t_idef *idef, gmx_constr_t constr, gmx_bool bIsDomainDecomposition)
2118 real rate = (ir->delta_t)/ir->opts.tau_t[0];
2120 if (ir->etc == etcANDERSEN && constr && bIsDomainDecomposition)
2122 gmx_fatal(FARGS, "Normal Andersen is currently not supported with constraints and domain decomposition. Please consider the massive Andersen thermostat.");
2125 /* proceed with andersen if 1) it's fixed probability per
2126 particle andersen or 2) it's massive andersen and it's tau_t/dt */
2127 if ((ir->etc == etcANDERSEN) || do_per_step(step, (int)(1.0/rate)))
2129 srenew(upd->randatom, state->nalloc);
2130 srenew(upd->randatom_list, state->nalloc);
2131 if (upd->randatom_list_init == FALSE)
2133 for (i = 0; i < state->nalloc; i++)
2135 upd->randatom[i] = FALSE;
2136 upd->randatom_list[i] = 0;
2138 upd->randatom_list_init = TRUE;
2140 andersen_tcoupl(ir, md, state, upd->sd->gaussrand[0], rate,
2141 (ir->etc == etcANDERSEN) ? idef : NULL,
2142 constr ? get_nblocks(constr) : 0,
2143 constr ? get_sblock(constr) : NULL,
2144 upd->randatom, upd->randatom_list,
2145 upd->sd->randomize_group, upd->sd->boltzfac);