2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
45 #include "gromacs/legacyheaders/types/commrec.h"
46 #include "gromacs/legacyheaders/typedefs.h"
47 #include "gromacs/legacyheaders/nrnb.h"
48 #include "gromacs/math/units.h"
49 #include "gromacs/legacyheaders/macros.h"
50 #include "gromacs/math/vec.h"
51 #include "gromacs/legacyheaders/update.h"
52 #include "gromacs/random/random.h"
53 #include "gromacs/legacyheaders/tgroup.h"
54 #include "gromacs/legacyheaders/force.h"
55 #include "gromacs/legacyheaders/names.h"
56 #include "gromacs/legacyheaders/txtdump.h"
57 #include "gromacs/legacyheaders/mdrun.h"
58 #include "gromacs/legacyheaders/constr.h"
59 #include "gromacs/legacyheaders/disre.h"
60 #include "gromacs/legacyheaders/orires.h"
61 #include "gromacs/legacyheaders/gmx_omp_nthreads.h"
63 #include "gromacs/fileio/confio.h"
64 #include "gromacs/pbcutil/mshift.h"
65 #include "gromacs/pbcutil/pbc.h"
66 #include "gromacs/pulling/pull.h"
67 #include "gromacs/timing/wallcycle.h"
68 #include "gromacs/utility/futil.h"
69 #include "gromacs/utility/gmxomp.h"
70 #include "gromacs/utility/smalloc.h"
72 /*For debugging, start at v(-dt/2) for velolcity verlet -- uncomment next line */
73 /*#define STARTFROMDT2*/
97 gmx_sd_sigma_t *sdsig;
100 /* andersen temperature control stuff */
101 gmx_bool *randomize_group;
105 typedef struct gmx_update
108 /* xprime for constraint algorithms */
112 /* Variables for the deform algorithm */
113 gmx_int64_t deformref_step;
114 matrix deformref_box;
118 static void do_update_md(int start, int nrend, double dt,
119 t_grp_tcstat *tcstat,
121 gmx_bool bNEMD, t_grp_acc *gstat, rvec accel[],
124 unsigned short ptype[], unsigned short cFREEZE[],
125 unsigned short cACC[], unsigned short cTC[],
126 rvec x[], rvec xprime[], rvec v[],
128 gmx_bool bNH, gmx_bool bPR)
131 int gf = 0, ga = 0, gt = 0;
133 real vn, vv, va, vb, vnrel;
139 /* Update with coupling to extended ensembles, used for
140 * Nose-Hoover and Parrinello-Rahman coupling
141 * Nose-Hoover uses the reversible leap-frog integrator from
142 * Holian et al. Phys Rev E 52(3) : 2338, 1995
144 for (n = start; n < nrend; n++)
159 lg = tcstat[gt].lambda;
164 rvec_sub(v[n], gstat[ga].u, vrel);
166 for (d = 0; d < DIM; d++)
168 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
170 vnrel = (lg*vrel[d] + dt*(imass*f[n][d] - 0.5*vxi*vrel[d]
171 - iprod(M[d], vrel)))/(1 + 0.5*vxi*dt);
172 /* do not scale the mean velocities u */
173 vn = gstat[ga].u[d] + accel[ga][d]*dt + vnrel;
175 xprime[n][d] = x[n][d]+vn*dt;
180 xprime[n][d] = x[n][d];
185 else if (cFREEZE != NULL ||
186 nFreeze[0][XX] || nFreeze[0][YY] || nFreeze[0][ZZ] ||
189 /* Update with Berendsen/v-rescale coupling and freeze or NEMD */
190 for (n = start; n < nrend; n++)
192 w_dt = invmass[n]*dt;
205 lg = tcstat[gt].lambda;
207 for (d = 0; d < DIM; d++)
210 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
212 vv = lg*vn + f[n][d]*w_dt;
214 /* do not scale the mean velocities u */
216 va = vv + accel[ga][d]*dt;
217 vb = va + (1.0-lg)*u;
219 xprime[n][d] = x[n][d]+vb*dt;
224 xprime[n][d] = x[n][d];
231 /* Plain update with Berendsen/v-rescale coupling */
232 for (n = start; n < nrend; n++)
234 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell))
236 w_dt = invmass[n]*dt;
241 lg = tcstat[gt].lambda;
243 for (d = 0; d < DIM; d++)
245 vn = lg*v[n][d] + f[n][d]*w_dt;
247 xprime[n][d] = x[n][d] + vn*dt;
252 for (d = 0; d < DIM; d++)
255 xprime[n][d] = x[n][d];
262 static void do_update_vv_vel(int start, int nrend, double dt,
263 rvec accel[], ivec nFreeze[], real invmass[],
264 unsigned short ptype[], unsigned short cFREEZE[],
265 unsigned short cACC[], rvec v[], rvec f[],
266 gmx_bool bExtended, real veta, real alpha)
271 real u, vn, vv, va, vb, vnrel;
277 g = 0.25*dt*veta*alpha;
279 mv2 = series_sinhx(g);
286 for (n = start; n < nrend; n++)
288 w_dt = invmass[n]*dt;
298 for (d = 0; d < DIM; d++)
300 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
302 v[n][d] = mv1*(mv1*v[n][d] + 0.5*(w_dt*mv2*f[n][d]))+0.5*accel[ga][d]*dt;
310 } /* do_update_vv_vel */
312 static void do_update_vv_pos(int start, int nrend, double dt,
314 unsigned short ptype[], unsigned short cFREEZE[],
315 rvec x[], rvec xprime[], rvec v[],
316 gmx_bool bExtended, real veta)
323 /* Would it make more sense if Parrinello-Rahman was put here? */
328 mr2 = series_sinhx(g);
336 for (n = start; n < nrend; n++)
344 for (d = 0; d < DIM; d++)
346 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
348 xprime[n][d] = mr1*(mr1*x[n][d]+mr2*dt*v[n][d]);
352 xprime[n][d] = x[n][d];
356 } /* do_update_vv_pos */
358 static void do_update_visc(int start, int nrend, double dt,
359 t_grp_tcstat *tcstat,
362 unsigned short ptype[], unsigned short cTC[],
363 rvec x[], rvec xprime[], rvec v[],
364 rvec f[], matrix M, matrix box, real
365 cos_accel, real vcos,
366 gmx_bool bNH, gmx_bool bPR)
371 real lg, vxi = 0, vv;
376 fac = 2*M_PI/(box[ZZ][ZZ]);
380 /* Update with coupling to extended ensembles, used for
381 * Nose-Hoover and Parrinello-Rahman coupling
383 for (n = start; n < nrend; n++)
390 lg = tcstat[gt].lambda;
391 cosz = cos(fac*x[n][ZZ]);
393 copy_rvec(v[n], vrel);
401 for (d = 0; d < DIM; d++)
405 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell))
407 vn = (lg*vrel[d] + dt*(imass*f[n][d] - 0.5*vxi*vrel[d]
408 - iprod(M[d], vrel)))/(1 + 0.5*vxi*dt);
411 vn += vc + dt*cosz*cos_accel;
414 xprime[n][d] = x[n][d]+vn*dt;
418 xprime[n][d] = x[n][d];
425 /* Classic version of update, used with berendsen coupling */
426 for (n = start; n < nrend; n++)
428 w_dt = invmass[n]*dt;
433 lg = tcstat[gt].lambda;
434 cosz = cos(fac*x[n][ZZ]);
436 for (d = 0; d < DIM; d++)
440 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell))
445 /* Do not scale the cosine velocity profile */
446 vv = vc + lg*(vn - vc + f[n][d]*w_dt);
447 /* Add the cosine accelaration profile */
448 vv += dt*cosz*cos_accel;
452 vv = lg*(vn + f[n][d]*w_dt);
455 xprime[n][d] = x[n][d]+vv*dt;
460 xprime[n][d] = x[n][d];
467 static gmx_stochd_t *init_stochd(t_inputrec *ir)
476 ngtc = ir->opts.ngtc;
480 snew(sd->bd_rf, ngtc);
482 else if (EI_SD(ir->eI))
485 snew(sd->sdsig, ngtc);
488 for (n = 0; n < ngtc; n++)
490 if (ir->opts.tau_t[n] > 0)
492 sdc[n].gdt = ir->delta_t/ir->opts.tau_t[n];
493 sdc[n].eph = exp(sdc[n].gdt/2);
494 sdc[n].emh = exp(-sdc[n].gdt/2);
495 sdc[n].em = exp(-sdc[n].gdt);
499 /* No friction and noise on this group */
505 if (sdc[n].gdt >= 0.05)
507 sdc[n].b = sdc[n].gdt*(sdc[n].eph*sdc[n].eph - 1)
508 - 4*(sdc[n].eph - 1)*(sdc[n].eph - 1);
509 sdc[n].c = sdc[n].gdt - 3 + 4*sdc[n].emh - sdc[n].em;
510 sdc[n].d = 2 - sdc[n].eph - sdc[n].emh;
515 /* Seventh order expansions for small y */
516 sdc[n].b = y*y*y*y*(1/3.0+y*(1/3.0+y*(17/90.0+y*7/9.0)));
517 sdc[n].c = y*y*y*(2/3.0+y*(-1/2.0+y*(7/30.0+y*(-1/12.0+y*31/1260.0))));
518 sdc[n].d = y*y*(-1+y*y*(-1/12.0-y*y/360.0));
522 fprintf(debug, "SD const tc-grp %d: b %g c %g d %g\n",
523 n, sdc[n].b, sdc[n].c, sdc[n].d);
527 else if (ETC_ANDERSEN(ir->etc))
536 snew(sd->randomize_group, ngtc);
537 snew(sd->boltzfac, ngtc);
539 /* for now, assume that all groups, if randomized, are randomized at the same rate, i.e. tau_t is the same. */
540 /* since constraint groups don't necessarily match up with temperature groups! This is checked in readir.c */
542 for (n = 0; n < ngtc; n++)
544 reft = max(0.0, opts->ref_t[n]);
545 if ((opts->tau_t[n] > 0) && (reft > 0)) /* tau_t or ref_t = 0 means that no randomization is done */
547 sd->randomize_group[n] = TRUE;
548 sd->boltzfac[n] = BOLTZ*opts->ref_t[n];
552 sd->randomize_group[n] = FALSE;
559 gmx_update_t init_update(t_inputrec *ir)
565 if (ir->eI == eiBD || EI_SD(ir->eI) || ir->etc == etcVRESCALE || ETC_ANDERSEN(ir->etc))
567 upd->sd = init_stochd(ir);
576 static void do_update_sd1(gmx_stochd_t *sd,
577 int start, int nrend, double dt,
578 rvec accel[], ivec nFreeze[],
579 real invmass[], unsigned short ptype[],
580 unsigned short cFREEZE[], unsigned short cACC[],
581 unsigned short cTC[],
582 rvec x[], rvec xprime[], rvec v[], rvec f[],
583 int ngtc, real ref_t[],
585 gmx_bool bFirstHalfConstr,
586 gmx_int64_t step, int seed, int* gatindex)
591 int gf = 0, ga = 0, gt = 0;
598 for (n = 0; n < ngtc; n++)
601 /* The mass is encounted for later, since this differs per atom */
602 sig[n].V = sqrt(kT*(1 - sdc[n].em*sdc[n].em));
607 for (n = start; n < nrend; n++)
610 int ng = gatindex ? gatindex[n] : n;
612 ism = sqrt(invmass[n]);
626 gmx_rng_cycle_3gaussian_table(step, ng, seed, RND_SEED_UPDATE, rnd);
628 for (d = 0; d < DIM; d++)
630 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
634 sd_V = ism*sig[gt].V*rnd[d];
635 vn = v[n][d] + (invmass[n]*f[n][d] + accel[ga][d])*dt;
636 v[n][d] = vn*sdc[gt].em + sd_V;
637 /* Here we include half of the friction+noise
638 * update of v into the integration of x.
640 xprime[n][d] = x[n][d] + 0.5*(vn + v[n][d])*dt;
645 xprime[n][d] = x[n][d];
652 /* We do have constraints */
653 if (bFirstHalfConstr)
655 /* First update without friction and noise */
658 for (n = start; n < nrend; n++)
675 for (d = 0; d < DIM; d++)
677 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
679 v[n][d] = v[n][d] + (im*f[n][d] + accel[ga][d])*dt;
680 xprime[n][d] = x[n][d] + v[n][d]*dt;
685 xprime[n][d] = x[n][d];
692 /* Update friction and noise only */
693 for (n = start; n < nrend; n++)
696 int ng = gatindex ? gatindex[n] : n;
698 ism = sqrt(invmass[n]);
712 gmx_rng_cycle_3gaussian_table(step, ng, seed, RND_SEED_UPDATE, rnd);
714 for (d = 0; d < DIM; d++)
716 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
720 sd_V = ism*sig[gt].V*rnd[d];
722 v[n][d] = vn*sdc[gt].em + sd_V;
723 /* Add the friction and noise contribution only */
724 xprime[n][d] = xprime[n][d] + 0.5*(v[n][d] - vn)*dt;
732 static void check_sd2_work_data_allocation(gmx_stochd_t *sd, int nrend)
734 if (nrend > sd->sd_V_nalloc)
736 sd->sd_V_nalloc = over_alloc_dd(nrend);
737 srenew(sd->sd_V, sd->sd_V_nalloc);
741 static void do_update_sd2_Tconsts(gmx_stochd_t *sd,
746 /* This is separated from the update below, because it is single threaded */
755 for (gt = 0; gt < ngtc; gt++)
757 kT = BOLTZ*ref_t[gt];
758 /* The mass is encounted for later, since this differs per atom */
759 sig[gt].V = sqrt(kT*(1-sdc[gt].em));
760 sig[gt].X = sqrt(kT*sqr(tau_t[gt])*sdc[gt].c);
761 sig[gt].Yv = sqrt(kT*sdc[gt].b/sdc[gt].c);
762 sig[gt].Yx = sqrt(kT*sqr(tau_t[gt])*sdc[gt].b/(1-sdc[gt].em));
766 static void do_update_sd2(gmx_stochd_t *sd,
768 int start, int nrend,
769 rvec accel[], ivec nFreeze[],
770 real invmass[], unsigned short ptype[],
771 unsigned short cFREEZE[], unsigned short cACC[],
772 unsigned short cTC[],
773 rvec x[], rvec xprime[], rvec v[], rvec f[],
776 gmx_bool bFirstHalf, gmx_int64_t step, int seed,
781 /* The random part of the velocity update, generated in the first
782 * half of the update, needs to be remembered for the second half.
786 int gf = 0, ga = 0, gt = 0;
787 real vn = 0, Vmh, Xmh;
795 for (n = start; n < nrend; n++)
797 real rnd[6], rndi[3];
798 ng = gatindex ? gatindex[n] : n;
799 ism = sqrt(invmass[n]);
813 gmx_rng_cycle_6gaussian_table(step*2+(bFirstHalf ? 1 : 2), ng, seed, RND_SEED_UPDATE, rnd);
816 gmx_rng_cycle_3gaussian_table(step*2, ng, seed, RND_SEED_UPDATE, rndi);
818 for (d = 0; d < DIM; d++)
824 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
830 sd_X[n][d] = ism*sig[gt].X*rndi[d];
832 Vmh = sd_X[n][d]*sdc[gt].d/(tau_t[gt]*sdc[gt].c)
833 + ism*sig[gt].Yv*rnd[d*2];
834 sd_V[n][d] = ism*sig[gt].V*rnd[d*2+1];
836 v[n][d] = vn*sdc[gt].em
837 + (invmass[n]*f[n][d] + accel[ga][d])*tau_t[gt]*(1 - sdc[gt].em)
838 + sd_V[n][d] - sdc[gt].em*Vmh;
840 xprime[n][d] = x[n][d] + v[n][d]*tau_t[gt]*(sdc[gt].eph - sdc[gt].emh);
844 /* Correct the velocities for the constraints.
845 * This operation introduces some inaccuracy,
846 * since the velocity is determined from differences in coordinates.
849 (xprime[n][d] - x[n][d])/(tau_t[gt]*(sdc[gt].eph - sdc[gt].emh));
851 Xmh = sd_V[n][d]*tau_t[gt]*sdc[gt].d/(sdc[gt].em-1)
852 + ism*sig[gt].Yx*rnd[d*2];
853 sd_X[n][d] = ism*sig[gt].X*rnd[d*2+1];
855 xprime[n][d] += sd_X[n][d] - Xmh;
864 xprime[n][d] = x[n][d];
871 static void do_update_bd_Tconsts(double dt, real friction_coefficient,
872 int ngtc, const real ref_t[],
875 /* This is separated from the update below, because it is single threaded */
878 if (friction_coefficient != 0)
880 for (gt = 0; gt < ngtc; gt++)
882 rf[gt] = sqrt(2.0*BOLTZ*ref_t[gt]/(friction_coefficient*dt));
887 for (gt = 0; gt < ngtc; gt++)
889 rf[gt] = sqrt(2.0*BOLTZ*ref_t[gt]);
894 static void do_update_bd(int start, int nrend, double dt,
896 real invmass[], unsigned short ptype[],
897 unsigned short cFREEZE[], unsigned short cTC[],
898 rvec x[], rvec xprime[], rvec v[],
899 rvec f[], real friction_coefficient,
900 real *rf, gmx_int64_t step, int seed,
903 /* note -- these appear to be full step velocities . . . */
909 if (friction_coefficient != 0)
911 invfr = 1.0/friction_coefficient;
914 for (n = start; (n < nrend); n++)
917 int ng = gatindex ? gatindex[n] : n;
927 gmx_rng_cycle_3gaussian_table(step, ng, seed, RND_SEED_UPDATE, rnd);
928 for (d = 0; (d < DIM); d++)
930 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
932 if (friction_coefficient != 0)
934 vn = invfr*f[n][d] + rf[gt]*rnd[d];
938 /* NOTE: invmass = 2/(mass*friction_constant*dt) */
939 vn = 0.5*invmass[n]*f[n][d]*dt
940 + sqrt(0.5*invmass[n])*rf[gt]*rnd[d];
944 xprime[n][d] = x[n][d]+vn*dt;
949 xprime[n][d] = x[n][d];
955 static void dump_it_all(FILE gmx_unused *fp, const char gmx_unused *title,
956 int gmx_unused natoms, rvec gmx_unused x[], rvec gmx_unused xp[],
957 rvec gmx_unused v[], rvec gmx_unused f[])
962 fprintf(fp, "%s\n", title);
963 pr_rvecs(fp, 0, "x", x, natoms);
964 pr_rvecs(fp, 0, "xp", xp, natoms);
965 pr_rvecs(fp, 0, "v", v, natoms);
966 pr_rvecs(fp, 0, "f", f, natoms);
971 static void calc_ke_part_normal(rvec v[], t_grpopts *opts, t_mdatoms *md,
972 gmx_ekindata_t *ekind, t_nrnb *nrnb, gmx_bool bEkinAveVel,
973 gmx_bool bSaveEkinOld)
976 t_grp_tcstat *tcstat = ekind->tcstat;
977 t_grp_acc *grpstat = ekind->grpstat;
980 /* three main: VV with AveVel, vv with AveEkin, leap with AveEkin. Leap with AveVel is also
981 an option, but not supported now. Additionally, if we are doing iterations.
982 bEkinAveVel: If TRUE, we sum into ekin, if FALSE, into ekinh.
983 bSavEkinOld: If TRUE (in the case of iteration = bIterate is TRUE), we don't copy over the ekinh_old.
984 If FALSE, we overrwrite it.
987 /* group velocities are calculated in update_ekindata and
988 * accumulated in acumulate_groups.
989 * Now the partial global and groups ekin.
991 for (g = 0; (g < opts->ngtc); g++)
996 copy_mat(tcstat[g].ekinh, tcstat[g].ekinh_old);
1000 clear_mat(tcstat[g].ekinf);
1004 clear_mat(tcstat[g].ekinh);
1008 tcstat[g].ekinscalef_nhc = 1.0; /* need to clear this -- logic is complicated! */
1011 ekind->dekindl_old = ekind->dekindl;
1013 nthread = gmx_omp_nthreads_get(emntUpdate);
1015 #pragma omp parallel for num_threads(nthread) schedule(static)
1016 for (thread = 0; thread < nthread; thread++)
1018 int start_t, end_t, n;
1026 start_t = ((thread+0)*md->homenr)/nthread;
1027 end_t = ((thread+1)*md->homenr)/nthread;
1029 ekin_sum = ekind->ekin_work[thread];
1030 dekindl_sum = ekind->dekindl_work[thread];
1032 for (gt = 0; gt < opts->ngtc; gt++)
1034 clear_mat(ekin_sum[gt]);
1040 for (n = start_t; n < end_t; n++)
1050 hm = 0.5*md->massT[n];
1052 for (d = 0; (d < DIM); d++)
1054 v_corrt[d] = v[n][d] - grpstat[ga].u[d];
1056 for (d = 0; (d < DIM); d++)
1058 for (m = 0; (m < DIM); m++)
1060 /* if we're computing a full step velocity, v_corrt[d] has v(t). Otherwise, v(t+dt/2) */
1061 ekin_sum[gt][m][d] += hm*v_corrt[m]*v_corrt[d];
1064 if (md->nMassPerturbed && md->bPerturbed[n])
1067 0.5*(md->massB[n] - md->massA[n])*iprod(v_corrt, v_corrt);
1073 for (thread = 0; thread < nthread; thread++)
1075 for (g = 0; g < opts->ngtc; g++)
1079 m_add(tcstat[g].ekinf, ekind->ekin_work[thread][g],
1084 m_add(tcstat[g].ekinh, ekind->ekin_work[thread][g],
1089 ekind->dekindl += *ekind->dekindl_work[thread];
1092 inc_nrnb(nrnb, eNR_EKIN, md->homenr);
1095 static void calc_ke_part_visc(matrix box, rvec x[], rvec v[],
1096 t_grpopts *opts, t_mdatoms *md,
1097 gmx_ekindata_t *ekind,
1098 t_nrnb *nrnb, gmx_bool bEkinAveVel)
1100 int start = 0, homenr = md->homenr;
1101 int g, d, n, m, gt = 0;
1104 t_grp_tcstat *tcstat = ekind->tcstat;
1105 t_cos_acc *cosacc = &(ekind->cosacc);
1110 for (g = 0; g < opts->ngtc; g++)
1112 copy_mat(ekind->tcstat[g].ekinh, ekind->tcstat[g].ekinh_old);
1113 clear_mat(ekind->tcstat[g].ekinh);
1115 ekind->dekindl_old = ekind->dekindl;
1117 fac = 2*M_PI/box[ZZ][ZZ];
1120 for (n = start; n < start+homenr; n++)
1126 hm = 0.5*md->massT[n];
1128 /* Note that the times of x and v differ by half a step */
1129 /* MRS -- would have to be changed for VV */
1130 cosz = cos(fac*x[n][ZZ]);
1131 /* Calculate the amplitude of the new velocity profile */
1132 mvcos += 2*cosz*md->massT[n]*v[n][XX];
1134 copy_rvec(v[n], v_corrt);
1135 /* Subtract the profile for the kinetic energy */
1136 v_corrt[XX] -= cosz*cosacc->vcos;
1137 for (d = 0; (d < DIM); d++)
1139 for (m = 0; (m < DIM); m++)
1141 /* if we're computing a full step velocity, v_corrt[d] has v(t). Otherwise, v(t+dt/2) */
1144 tcstat[gt].ekinf[m][d] += hm*v_corrt[m]*v_corrt[d];
1148 tcstat[gt].ekinh[m][d] += hm*v_corrt[m]*v_corrt[d];
1152 if (md->nPerturbed && md->bPerturbed[n])
1154 /* The minus sign here might be confusing.
1155 * The kinetic contribution from dH/dl doesn't come from
1156 * d m(l)/2 v^2 / dl, but rather from d p^2/2m(l) / dl,
1157 * where p are the momenta. The difference is only a minus sign.
1159 dekindl -= 0.5*(md->massB[n] - md->massA[n])*iprod(v_corrt, v_corrt);
1162 ekind->dekindl = dekindl;
1163 cosacc->mvcos = mvcos;
1165 inc_nrnb(nrnb, eNR_EKIN, homenr);
1168 void calc_ke_part(t_state *state, t_grpopts *opts, t_mdatoms *md,
1169 gmx_ekindata_t *ekind, t_nrnb *nrnb, gmx_bool bEkinAveVel, gmx_bool bSaveEkinOld)
1171 if (ekind->cosacc.cos_accel == 0)
1173 calc_ke_part_normal(state->v, opts, md, ekind, nrnb, bEkinAveVel, bSaveEkinOld);
1177 calc_ke_part_visc(state->box, state->x, state->v, opts, md, ekind, nrnb, bEkinAveVel);
1181 extern void init_ekinstate(ekinstate_t *ekinstate, const t_inputrec *ir)
1183 ekinstate->ekin_n = ir->opts.ngtc;
1184 snew(ekinstate->ekinh, ekinstate->ekin_n);
1185 snew(ekinstate->ekinf, ekinstate->ekin_n);
1186 snew(ekinstate->ekinh_old, ekinstate->ekin_n);
1187 snew(ekinstate->ekinscalef_nhc, ekinstate->ekin_n);
1188 snew(ekinstate->ekinscaleh_nhc, ekinstate->ekin_n);
1189 snew(ekinstate->vscale_nhc, ekinstate->ekin_n);
1190 ekinstate->dekindl = 0;
1191 ekinstate->mvcos = 0;
1194 void update_ekinstate(ekinstate_t *ekinstate, gmx_ekindata_t *ekind)
1198 for (i = 0; i < ekinstate->ekin_n; i++)
1200 copy_mat(ekind->tcstat[i].ekinh, ekinstate->ekinh[i]);
1201 copy_mat(ekind->tcstat[i].ekinf, ekinstate->ekinf[i]);
1202 copy_mat(ekind->tcstat[i].ekinh_old, ekinstate->ekinh_old[i]);
1203 ekinstate->ekinscalef_nhc[i] = ekind->tcstat[i].ekinscalef_nhc;
1204 ekinstate->ekinscaleh_nhc[i] = ekind->tcstat[i].ekinscaleh_nhc;
1205 ekinstate->vscale_nhc[i] = ekind->tcstat[i].vscale_nhc;
1208 copy_mat(ekind->ekin, ekinstate->ekin_total);
1209 ekinstate->dekindl = ekind->dekindl;
1210 ekinstate->mvcos = ekind->cosacc.mvcos;
1214 void restore_ekinstate_from_state(t_commrec *cr,
1215 gmx_ekindata_t *ekind, ekinstate_t *ekinstate)
1221 for (i = 0; i < ekinstate->ekin_n; i++)
1223 copy_mat(ekinstate->ekinh[i], ekind->tcstat[i].ekinh);
1224 copy_mat(ekinstate->ekinf[i], ekind->tcstat[i].ekinf);
1225 copy_mat(ekinstate->ekinh_old[i], ekind->tcstat[i].ekinh_old);
1226 ekind->tcstat[i].ekinscalef_nhc = ekinstate->ekinscalef_nhc[i];
1227 ekind->tcstat[i].ekinscaleh_nhc = ekinstate->ekinscaleh_nhc[i];
1228 ekind->tcstat[i].vscale_nhc = ekinstate->vscale_nhc[i];
1231 copy_mat(ekinstate->ekin_total, ekind->ekin);
1233 ekind->dekindl = ekinstate->dekindl;
1234 ekind->cosacc.mvcos = ekinstate->mvcos;
1235 n = ekinstate->ekin_n;
1240 gmx_bcast(sizeof(n), &n, cr);
1241 for (i = 0; i < n; i++)
1243 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinh[0][0]),
1244 ekind->tcstat[i].ekinh[0], cr);
1245 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinf[0][0]),
1246 ekind->tcstat[i].ekinf[0], cr);
1247 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinh_old[0][0]),
1248 ekind->tcstat[i].ekinh_old[0], cr);
1250 gmx_bcast(sizeof(ekind->tcstat[i].ekinscalef_nhc),
1251 &(ekind->tcstat[i].ekinscalef_nhc), cr);
1252 gmx_bcast(sizeof(ekind->tcstat[i].ekinscaleh_nhc),
1253 &(ekind->tcstat[i].ekinscaleh_nhc), cr);
1254 gmx_bcast(sizeof(ekind->tcstat[i].vscale_nhc),
1255 &(ekind->tcstat[i].vscale_nhc), cr);
1257 gmx_bcast(DIM*DIM*sizeof(ekind->ekin[0][0]),
1258 ekind->ekin[0], cr);
1260 gmx_bcast(sizeof(ekind->dekindl), &ekind->dekindl, cr);
1261 gmx_bcast(sizeof(ekind->cosacc.mvcos), &ekind->cosacc.mvcos, cr);
1265 void set_deform_reference_box(gmx_update_t upd, gmx_int64_t step, matrix box)
1267 upd->deformref_step = step;
1268 copy_mat(box, upd->deformref_box);
1271 static void deform(gmx_update_t upd,
1272 int start, int homenr, rvec x[], matrix box, matrix *scale_tot,
1273 const t_inputrec *ir, gmx_int64_t step)
1275 matrix bnew, invbox, mu;
1279 elapsed_time = (step + 1 - upd->deformref_step)*ir->delta_t;
1280 copy_mat(box, bnew);
1281 for (i = 0; i < DIM; i++)
1283 for (j = 0; j < DIM; j++)
1285 if (ir->deform[i][j] != 0)
1288 upd->deformref_box[i][j] + elapsed_time*ir->deform[i][j];
1292 /* We correct the off-diagonal elements,
1293 * which can grow indefinitely during shearing,
1294 * so the shifts do not get messed up.
1296 for (i = 1; i < DIM; i++)
1298 for (j = i-1; j >= 0; j--)
1300 while (bnew[i][j] - box[i][j] > 0.5*bnew[j][j])
1302 rvec_dec(bnew[i], bnew[j]);
1304 while (bnew[i][j] - box[i][j] < -0.5*bnew[j][j])
1306 rvec_inc(bnew[i], bnew[j]);
1310 m_inv_ur0(box, invbox);
1311 copy_mat(bnew, box);
1312 mmul_ur0(box, invbox, mu);
1314 for (i = start; i < start+homenr; i++)
1316 x[i][XX] = mu[XX][XX]*x[i][XX]+mu[YY][XX]*x[i][YY]+mu[ZZ][XX]*x[i][ZZ];
1317 x[i][YY] = mu[YY][YY]*x[i][YY]+mu[ZZ][YY]*x[i][ZZ];
1318 x[i][ZZ] = mu[ZZ][ZZ]*x[i][ZZ];
1320 if (scale_tot != NULL)
1322 /* The transposes of the scaling matrices are stored,
1323 * so we need to do matrix multiplication in the inverse order.
1325 mmul_ur0(*scale_tot, mu, *scale_tot);
1329 void update_tcouple(gmx_int64_t step,
1330 t_inputrec *inputrec,
1332 gmx_ekindata_t *ekind,
1337 gmx_bool bTCouple = FALSE;
1339 int i, start, end, homenr, offset;
1341 /* if using vv with trotter decomposition methods, we do this elsewhere in the code */
1342 if (inputrec->etc != etcNO &&
1343 !(IR_NVT_TROTTER(inputrec) || IR_NPT_TROTTER(inputrec) || IR_NPH_TROTTER(inputrec)))
1345 /* We should only couple after a step where energies were determined (for leapfrog versions)
1346 or the step energies are determined, for velocity verlet versions */
1348 if (EI_VV(inputrec->eI))
1356 bTCouple = (inputrec->nsttcouple == 1 ||
1357 do_per_step(step+inputrec->nsttcouple-offset,
1358 inputrec->nsttcouple));
1363 dttc = inputrec->nsttcouple*inputrec->delta_t;
1365 switch (inputrec->etc)
1370 berendsen_tcoupl(inputrec, ekind, dttc);
1373 nosehoover_tcoupl(&(inputrec->opts), ekind, dttc,
1374 state->nosehoover_xi, state->nosehoover_vxi, MassQ);
1377 vrescale_tcoupl(inputrec, step, ekind, dttc,
1378 state->therm_integral);
1381 /* rescale in place here */
1382 if (EI_VV(inputrec->eI))
1384 rescale_velocities(ekind, md, 0, md->homenr, state->v);
1389 /* Set the T scaling lambda to 1 to have no scaling */
1390 for (i = 0; (i < inputrec->opts.ngtc); i++)
1392 ekind->tcstat[i].lambda = 1.0;
1397 void update_pcouple(FILE *fplog,
1399 t_inputrec *inputrec,
1405 gmx_bool bPCouple = FALSE;
1409 /* if using Trotter pressure, we do this in coupling.c, so we leave it false. */
1410 if (inputrec->epc != epcNO && (!(IR_NPT_TROTTER(inputrec) || IR_NPH_TROTTER(inputrec))))
1412 /* We should only couple after a step where energies were determined */
1413 bPCouple = (inputrec->nstpcouple == 1 ||
1414 do_per_step(step+inputrec->nstpcouple-1,
1415 inputrec->nstpcouple));
1418 clear_mat(pcoupl_mu);
1419 for (i = 0; i < DIM; i++)
1421 pcoupl_mu[i][i] = 1.0;
1428 dtpc = inputrec->nstpcouple*inputrec->delta_t;
1430 switch (inputrec->epc)
1432 /* We can always pcoupl, even if we did not sum the energies
1433 * the previous step, since state->pres_prev is only updated
1434 * when the energies have been summed.
1438 case (epcBERENDSEN):
1441 berendsen_pcoupl(fplog, step, inputrec, dtpc, state->pres_prev, state->box,
1445 case (epcPARRINELLORAHMAN):
1446 parrinellorahman_pcoupl(fplog, step, inputrec, dtpc, state->pres_prev,
1447 state->box, state->box_rel, state->boxv,
1448 M, pcoupl_mu, bInitStep);
1456 static rvec *get_xprime(const t_state *state, gmx_update_t upd)
1458 if (state->nalloc > upd->xp_nalloc)
1460 upd->xp_nalloc = state->nalloc;
1461 srenew(upd->xp, upd->xp_nalloc);
1467 static void combine_forces(gmx_update_t upd,
1469 gmx_constr_t constr,
1470 t_inputrec *ir, t_mdatoms *md, t_idef *idef,
1473 t_state *state, gmx_bool bMolPBC,
1474 int start, int nrend,
1475 rvec f[], rvec f_lr[],
1476 tensor *vir_lr_constr,
1481 /* f contains the short-range forces + the long range forces
1482 * which are stored separately in f_lr.
1485 if (constr != NULL && vir_lr_constr != NULL &&
1486 !(ir->eConstrAlg == econtSHAKE && ir->epc == epcNO))
1488 /* We need to constrain the LR forces separately,
1489 * because due to the different pre-factor for the SR and LR
1490 * forces in the update algorithm, we have to correct
1491 * the constraint virial for the nstcalclr-1 extra f_lr.
1492 * Constrain only the additional LR part of the force.
1494 /* MRS -- need to make sure this works with trotter integration -- the constraint calls may not be right.*/
1499 xp = get_xprime(state, upd);
1501 fac = (nstcalclr - 1)*ir->delta_t*ir->delta_t;
1503 for (i = 0; i < md->homenr; i++)
1505 if (md->cFREEZE != NULL)
1507 gf = md->cFREEZE[i];
1509 for (d = 0; d < DIM; d++)
1511 if ((md->ptype[i] != eptVSite) &&
1512 (md->ptype[i] != eptShell) &&
1513 !ir->opts.nFreeze[gf][d])
1515 xp[i][d] = state->x[i][d] + fac*f_lr[i][d]*md->invmass[i];
1519 constrain(NULL, FALSE, FALSE, constr, idef, ir, NULL, cr, step, 0, 1.0, md,
1520 state->x, xp, xp, bMolPBC, state->box, state->lambda[efptBONDED], NULL,
1521 NULL, vir_lr_constr, nrnb, econqCoord, ir->epc == epcMTTK, state->veta, state->veta);
1524 /* Add nstcalclr-1 times the LR force to the sum of both forces
1525 * and store the result in forces_lr.
1527 for (i = start; i < nrend; i++)
1529 for (d = 0; d < DIM; d++)
1531 f_lr[i][d] = f[i][d] + (nstcalclr - 1)*f_lr[i][d];
1536 void update_constraints(FILE *fplog,
1538 real *dvdlambda, /* the contribution to be added to the bonded interactions */
1539 t_inputrec *inputrec, /* input record and box stuff */
1540 gmx_ekindata_t *ekind,
1545 rvec force[], /* forces on home particles */
1550 gmx_wallcycle_t wcycle,
1552 gmx_constr_t constr,
1553 gmx_bool bFirstHalf,
1557 gmx_bool bExtended, bLastStep, bLog = FALSE, bEner = FALSE, bDoConstr = FALSE;
1560 int start, homenr, nrend, i, n, m, g, d;
1562 rvec *vbuf, *xprime = NULL;
1569 if (bFirstHalf && !EI_VV(inputrec->eI))
1574 /* for now, SD update is here -- though it really seems like it
1575 should be reformulated as a velocity verlet method, since it has two parts */
1578 homenr = md->homenr;
1579 nrend = start+homenr;
1581 dt = inputrec->delta_t;
1586 * APPLY CONSTRAINTS:
1589 * When doing PR pressure coupling we have to constrain the
1590 * bonds in each iteration. If we are only using Nose-Hoover tcoupling
1591 * it is enough to do this once though, since the relative velocities
1592 * after this will be normal to the bond vector
1597 /* clear out constraints before applying */
1598 clear_mat(vir_part);
1600 xprime = get_xprime(state, upd);
1602 bLastStep = (step == inputrec->init_step+inputrec->nsteps);
1603 bLog = (do_per_step(step, inputrec->nstlog) || bLastStep || (step < 0));
1604 bEner = (do_per_step(step, inputrec->nstenergy) || bLastStep);
1605 /* Constrain the coordinates xprime */
1606 wallcycle_start(wcycle, ewcCONSTR);
1607 if (EI_VV(inputrec->eI) && bFirstHalf)
1609 constrain(NULL, bLog, bEner, constr, idef,
1610 inputrec, ekind, cr, step, 1, 1.0, md,
1611 state->x, state->v, state->v,
1612 bMolPBC, state->box,
1613 state->lambda[efptBONDED], dvdlambda,
1614 NULL, bCalcVir ? &vir_con : NULL, nrnb, econqVeloc,
1615 inputrec->epc == epcMTTK, state->veta, vetanew);
1619 constrain(NULL, bLog, bEner, constr, idef,
1620 inputrec, ekind, cr, step, 1, 1.0, md,
1621 state->x, xprime, NULL,
1622 bMolPBC, state->box,
1623 state->lambda[efptBONDED], dvdlambda,
1624 state->v, bCalcVir ? &vir_con : NULL, nrnb, econqCoord,
1625 inputrec->epc == epcMTTK, state->veta, state->veta);
1627 wallcycle_stop(wcycle, ewcCONSTR);
1631 dump_it_all(fplog, "After Shake",
1632 state->natoms, state->x, xprime, state->v, force);
1636 if (inputrec->eI == eiSD2)
1638 /* A correction factor eph is needed for the SD constraint force */
1639 /* Here we can, unfortunately, not have proper corrections
1640 * for different friction constants, so we use the first one.
1642 for (i = 0; i < DIM; i++)
1644 for (m = 0; m < DIM; m++)
1646 vir_part[i][m] += upd->sd->sdc[0].eph*vir_con[i][m];
1652 m_add(vir_part, vir_con, vir_part);
1656 pr_rvecs(debug, 0, "constraint virial", vir_part, DIM);
1663 if (inputrec->eI == eiSD1 && bDoConstr && !bFirstHalf)
1665 wallcycle_start(wcycle, ewcUPDATE);
1666 xprime = get_xprime(state, upd);
1668 nth = gmx_omp_nthreads_get(emntUpdate);
1670 #pragma omp parallel for num_threads(nth) schedule(static)
1672 for (th = 0; th < nth; th++)
1674 int start_th, end_th;
1676 start_th = start + ((nrend-start)* th )/nth;
1677 end_th = start + ((nrend-start)*(th+1))/nth;
1679 /* The second part of the SD integration */
1680 do_update_sd1(upd->sd,
1681 start_th, end_th, dt,
1682 inputrec->opts.acc, inputrec->opts.nFreeze,
1683 md->invmass, md->ptype,
1684 md->cFREEZE, md->cACC, md->cTC,
1685 state->x, xprime, state->v, force,
1686 inputrec->opts.ngtc, inputrec->opts.ref_t,
1688 step, inputrec->ld_seed,
1689 DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL);
1691 inc_nrnb(nrnb, eNR_UPDATE, homenr);
1692 wallcycle_stop(wcycle, ewcUPDATE);
1696 /* Constrain the coordinates xprime for half a time step */
1697 wallcycle_start(wcycle, ewcCONSTR);
1699 constrain(NULL, bLog, bEner, constr, idef,
1700 inputrec, NULL, cr, step, 1, 0.5, md,
1701 state->x, xprime, NULL,
1702 bMolPBC, state->box,
1703 state->lambda[efptBONDED], dvdlambda,
1704 state->v, NULL, nrnb, econqCoord, FALSE, 0, 0);
1706 wallcycle_stop(wcycle, ewcCONSTR);
1710 if ((inputrec->eI == eiSD2) && !(bFirstHalf))
1712 wallcycle_start(wcycle, ewcUPDATE);
1713 xprime = get_xprime(state, upd);
1715 nth = gmx_omp_nthreads_get(emntUpdate);
1717 #pragma omp parallel for num_threads(nth) schedule(static)
1718 for (th = 0; th < nth; th++)
1720 int start_th, end_th;
1722 start_th = start + ((nrend-start)* th )/nth;
1723 end_th = start + ((nrend-start)*(th+1))/nth;
1725 /* The second part of the SD integration */
1726 do_update_sd2(upd->sd,
1727 FALSE, start_th, end_th,
1728 inputrec->opts.acc, inputrec->opts.nFreeze,
1729 md->invmass, md->ptype,
1730 md->cFREEZE, md->cACC, md->cTC,
1731 state->x, xprime, state->v, force, state->sd_X,
1732 inputrec->opts.tau_t,
1733 FALSE, step, inputrec->ld_seed,
1734 DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL);
1736 inc_nrnb(nrnb, eNR_UPDATE, homenr);
1737 wallcycle_stop(wcycle, ewcUPDATE);
1741 /* Constrain the coordinates xprime */
1742 wallcycle_start(wcycle, ewcCONSTR);
1743 constrain(NULL, bLog, bEner, constr, idef,
1744 inputrec, NULL, cr, step, 1, 1.0, md,
1745 state->x, xprime, NULL,
1746 bMolPBC, state->box,
1747 state->lambda[efptBONDED], dvdlambda,
1748 NULL, NULL, nrnb, econqCoord, FALSE, 0, 0);
1749 wallcycle_stop(wcycle, ewcCONSTR);
1754 /* We must always unshift after updating coordinates; if we did not shake
1755 x was shifted in do_force */
1757 if (!(bFirstHalf)) /* in the first half of vv, no shift. */
1759 if (graph && (graph->nnodes > 0))
1761 unshift_x(graph, state->box, state->x, upd->xp);
1762 if (TRICLINIC(state->box))
1764 inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
1768 inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
1773 #pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntUpdate)) schedule(static)
1774 for (i = start; i < nrend; i++)
1776 copy_rvec(upd->xp[i], state->x[i]);
1780 dump_it_all(fplog, "After unshift",
1781 state->natoms, state->x, upd->xp, state->v, force);
1783 /* ############# END the update of velocities and positions ######### */
1786 void update_box(FILE *fplog,
1788 t_inputrec *inputrec, /* input record and box stuff */
1791 rvec force[], /* forces on home particles */
1797 gmx_bool bExtended, bLastStep, bLog = FALSE, bEner = FALSE;
1800 int start, homenr, nrend, i, n, m, g;
1804 homenr = md->homenr;
1805 nrend = start+homenr;
1808 (inputrec->etc == etcNOSEHOOVER) ||
1809 (inputrec->epc == epcPARRINELLORAHMAN) ||
1810 (inputrec->epc == epcMTTK);
1812 dt = inputrec->delta_t;
1816 /* now update boxes */
1817 switch (inputrec->epc)
1821 case (epcBERENDSEN):
1822 berendsen_pscale(inputrec, pcoupl_mu, state->box, state->box_rel,
1823 start, homenr, state->x, md->cFREEZE, nrnb);
1825 case (epcPARRINELLORAHMAN):
1826 /* The box velocities were updated in do_pr_pcoupl in the update
1827 * iteration, but we dont change the box vectors until we get here
1828 * since we need to be able to shift/unshift above.
1830 for (i = 0; i < DIM; i++)
1832 for (m = 0; m <= i; m++)
1834 state->box[i][m] += dt*state->boxv[i][m];
1837 preserve_box_shape(inputrec, state->box_rel, state->box);
1839 /* Scale the coordinates */
1840 for (n = start; (n < start+homenr); n++)
1842 tmvmul_ur0(pcoupl_mu, state->x[n], state->x[n]);
1846 switch (inputrec->epct)
1848 case (epctISOTROPIC):
1849 /* DIM * eta = ln V. so DIM*eta_new = DIM*eta_old + DIM*dt*veta =>
1850 ln V_new = ln V_old + 3*dt*veta => V_new = V_old*exp(3*dt*veta) =>
1851 Side length scales as exp(veta*dt) */
1853 msmul(state->box, exp(state->veta*dt), state->box);
1855 /* Relate veta to boxv. veta = d(eta)/dT = (1/DIM)*1/V dV/dT.
1856 o If we assume isotropic scaling, and box length scaling
1857 factor L, then V = L^DIM (det(M)). So dV/dt = DIM
1858 L^(DIM-1) dL/dt det(M), and veta = (1/L) dL/dt. The
1859 determinant of B is L^DIM det(M), and the determinant
1860 of dB/dt is (dL/dT)^DIM det (M). veta will be
1861 (det(dB/dT)/det(B))^(1/3). Then since M =
1862 B_new*(vol_new)^(1/3), dB/dT_new = (veta_new)*B(new). */
1864 msmul(state->box, state->veta, state->boxv);
1874 if ((!(IR_NPT_TROTTER(inputrec) || IR_NPH_TROTTER(inputrec))) && scale_tot)
1876 /* The transposes of the scaling matrices are stored,
1877 * therefore we need to reverse the order in the multiplication.
1879 mmul_ur0(*scale_tot, pcoupl_mu, *scale_tot);
1882 if (DEFORM(*inputrec))
1884 deform(upd, start, homenr, state->x, state->box, scale_tot, inputrec, step);
1887 dump_it_all(fplog, "After update",
1888 state->natoms, state->x, upd->xp, state->v, force);
1891 void update_coords(FILE *fplog,
1893 t_inputrec *inputrec, /* input record and box stuff */
1897 rvec *f, /* forces on home particles */
1900 tensor *vir_lr_constr,
1902 gmx_ekindata_t *ekind,
1907 t_commrec *cr, /* these shouldn't be here -- need to think about it */
1909 gmx_constr_t constr,
1912 gmx_bool bNH, bPR, bLastStep, bLog = FALSE, bEner = FALSE, bDoConstr = FALSE;
1914 real *imass, *imassin;
1917 int start, homenr, nrend, i, j, d, n, m, g;
1918 int blen0, blen1, iatom, jatom, nshake, nsettle, nconstr, nexpand;
1921 rvec *vcom, *xcom, *vall, *xall, *xin, *vin, *forcein, *fall, *xpall, *xprimein, *xprime;
1924 bDoConstr = (NULL != constr);
1926 /* Running the velocity half does nothing except for velocity verlet */
1927 if ((UpdatePart == etrtVELOCITY1 || UpdatePart == etrtVELOCITY2) &&
1928 !EI_VV(inputrec->eI))
1930 gmx_incons("update_coords called for velocity without VV integrator");
1934 homenr = md->homenr;
1935 nrend = start+homenr;
1937 xprime = get_xprime(state, upd);
1939 dt = inputrec->delta_t;
1942 /* We need to update the NMR restraint history when time averaging is used */
1943 if (state->flags & (1<<estDISRE_RM3TAV))
1945 update_disres_history(fcd, &state->hist);
1947 if (state->flags & (1<<estORIRE_DTAV))
1949 update_orires_history(fcd, &state->hist);
1953 bNH = inputrec->etc == etcNOSEHOOVER;
1954 bPR = ((inputrec->epc == epcPARRINELLORAHMAN) || (inputrec->epc == epcMTTK));
1956 if (bDoLR && inputrec->nstcalclr > 1 && !EI_VV(inputrec->eI)) /* get this working with VV? */
1958 /* Store the total force + nstcalclr-1 times the LR force
1959 * in forces_lr, so it can be used in a normal update algorithm
1960 * to produce twin time stepping.
1962 /* is this correct in the new construction? MRS */
1964 inputrec->nstcalclr, constr, inputrec, md, idef, cr,
1965 step, state, bMolPBC,
1966 start, nrend, f, f_lr, vir_lr_constr, nrnb);
1974 /* ############# START The update of velocities and positions ######### */
1976 dump_it_all(fplog, "Before update",
1977 state->natoms, state->x, xprime, state->v, force);
1979 if (inputrec->eI == eiSD2)
1981 check_sd2_work_data_allocation(upd->sd, nrend);
1983 do_update_sd2_Tconsts(upd->sd,
1984 inputrec->opts.ngtc,
1985 inputrec->opts.tau_t,
1986 inputrec->opts.ref_t);
1988 if (inputrec->eI == eiBD)
1990 do_update_bd_Tconsts(dt, inputrec->bd_fric,
1991 inputrec->opts.ngtc, inputrec->opts.ref_t,
1995 nth = gmx_omp_nthreads_get(emntUpdate);
1997 #pragma omp parallel for num_threads(nth) schedule(static) private(alpha)
1998 for (th = 0; th < nth; th++)
2000 int start_th, end_th;
2002 start_th = start + ((nrend-start)* th )/nth;
2003 end_th = start + ((nrend-start)*(th+1))/nth;
2005 switch (inputrec->eI)
2008 if (ekind->cosacc.cos_accel == 0)
2010 do_update_md(start_th, end_th, dt,
2011 ekind->tcstat, state->nosehoover_vxi,
2012 ekind->bNEMD, ekind->grpstat, inputrec->opts.acc,
2013 inputrec->opts.nFreeze,
2014 md->invmass, md->ptype,
2015 md->cFREEZE, md->cACC, md->cTC,
2016 state->x, xprime, state->v, force, M,
2021 do_update_visc(start_th, end_th, dt,
2022 ekind->tcstat, state->nosehoover_vxi,
2023 md->invmass, md->ptype,
2024 md->cTC, state->x, xprime, state->v, force, M,
2026 ekind->cosacc.cos_accel,
2032 /* With constraints, the SD1 update is done in 2 parts */
2033 do_update_sd1(upd->sd,
2034 start_th, end_th, dt,
2035 inputrec->opts.acc, inputrec->opts.nFreeze,
2036 md->invmass, md->ptype,
2037 md->cFREEZE, md->cACC, md->cTC,
2038 state->x, xprime, state->v, force,
2039 inputrec->opts.ngtc, inputrec->opts.ref_t,
2041 step, inputrec->ld_seed, DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL);
2044 /* The SD2 update is always done in 2 parts,
2045 * because an extra constraint step is needed
2047 do_update_sd2(upd->sd,
2048 bInitStep, start_th, end_th,
2049 inputrec->opts.acc, inputrec->opts.nFreeze,
2050 md->invmass, md->ptype,
2051 md->cFREEZE, md->cACC, md->cTC,
2052 state->x, xprime, state->v, force, state->sd_X,
2053 inputrec->opts.tau_t,
2054 TRUE, step, inputrec->ld_seed,
2055 DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL);
2058 do_update_bd(start_th, end_th, dt,
2059 inputrec->opts.nFreeze, md->invmass, md->ptype,
2060 md->cFREEZE, md->cTC,
2061 state->x, xprime, state->v, force,
2064 step, inputrec->ld_seed, DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL);
2068 alpha = 1.0 + DIM/((double)inputrec->opts.nrdf[0]); /* assuming barostat coupled to group 0. */
2073 do_update_vv_vel(start_th, end_th, dt,
2074 inputrec->opts.acc, inputrec->opts.nFreeze,
2075 md->invmass, md->ptype,
2076 md->cFREEZE, md->cACC,
2078 (bNH || bPR), state->veta, alpha);
2081 do_update_vv_pos(start_th, end_th, dt,
2082 inputrec->opts.nFreeze,
2083 md->ptype, md->cFREEZE,
2084 state->x, xprime, state->v,
2085 (bNH || bPR), state->veta);
2090 gmx_fatal(FARGS, "Don't know how to update coordinates");
2098 void correct_ekin(FILE *log, int start, int end, rvec v[], rvec vcm, real mass[],
2099 real tmass, tensor ekin)
2102 * This is a debugging routine. It should not be called for production code
2104 * The kinetic energy should calculated according to:
2105 * Ekin = 1/2 m (v-vcm)^2
2106 * However the correction is not always applied, since vcm may not be
2107 * known in time and we compute
2108 * Ekin' = 1/2 m v^2 instead
2109 * This can be corrected afterwards by computing
2110 * Ekin = Ekin' + 1/2 m ( -2 v vcm + vcm^2)
2112 * Ekin = Ekin' - m v vcm + 1/2 m vcm^2
2119 /* Local particles */
2122 /* Processor dependent part. */
2124 for (i = start; (i < end); i++)
2128 for (j = 0; (j < DIM); j++)
2134 svmul(1/tmass, vcm, vcm);
2135 svmul(0.5, vcm, hvcm);
2137 for (j = 0; (j < DIM); j++)
2139 for (k = 0; (k < DIM); k++)
2141 dekin[j][k] += vcm[k]*(tm*hvcm[j]-mv[j]);
2144 pr_rvecs(log, 0, "dekin", dekin, DIM);
2145 pr_rvecs(log, 0, " ekin", ekin, DIM);
2146 fprintf(log, "dekin = %g, ekin = %g vcm = (%8.4f %8.4f %8.4f)\n",
2147 trace(dekin), trace(ekin), vcm[XX], vcm[YY], vcm[ZZ]);
2148 fprintf(log, "mv = (%8.4f %8.4f %8.4f)\n",
2149 mv[XX], mv[YY], mv[ZZ]);
2152 extern gmx_bool update_randomize_velocities(t_inputrec *ir, gmx_int64_t step, const t_commrec *cr,
2153 t_mdatoms *md, t_state *state, gmx_update_t upd, gmx_constr_t constr)
2157 real rate = (ir->delta_t)/ir->opts.tau_t[0];
2159 if (ir->etc == etcANDERSEN && constr != NULL)
2161 gmx_fatal(FARGS, "Normal Andersen is currently not supported with constraints, use massive Andersen instead");
2164 /* proceed with andersen if 1) it's fixed probability per
2165 particle andersen or 2) it's massive andersen and it's tau_t/dt */
2166 if ((ir->etc == etcANDERSEN) || do_per_step(step, (int)(1.0/rate)))
2168 andersen_tcoupl(ir, step, cr, md, state, rate,
2169 upd->sd->randomize_group, upd->sd->boltzfac);