1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
11 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
12 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
13 * Copyright (c) 2001-2004, The GROMACS development team,
14 * check out http://www.gromacs.org for more information.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version 2
19 * of the License, or (at your option) any later version.
21 * If you want to redistribute modifications, please consider that
22 * scientific software is very special. Version control is crucial -
23 * bugs must be traceable. We will be happy to consider code for
24 * inclusion in the official distribution, but derived work must not
25 * be called official GROMACS. Details are found in the README & COPYING
26 * files - if they are missing, get the official version at www.gromacs.org.
28 * To help us fund GROMACS development, we humbly ask that you cite
29 * the papers on the package - you can find them in the top README file.
31 * For more info, check our website at http://www.gromacs.org
34 * GROwing Monsters And Cloning Shrimps
44 #include "types/commrec.h"
55 #include "gmx_random.h"
69 #include "gmx_wallcycle.h"
70 #include "gmx_omp_nthreads.h"
73 /*For debugging, start at v(-dt/2) for velolcity verlet -- uncomment next line */
74 /*#define STARTFROMDT2*/
94 /* The random state for ngaussrand threads.
95 * Normal thermostats need just 1 random number generator,
96 * but SD and BD with OpenMP parallelization need 1 for each thread.
104 gmx_sd_sigma_t *sdsig;
107 /* andersen temperature control stuff */
108 gmx_bool *randomize_group;
112 typedef struct gmx_update
115 /* xprime for constraint algorithms */
119 /* variable size arrays for andersen */
122 gmx_bool randatom_list_init;
124 /* Variables for the deform algorithm */
125 gmx_large_int_t deformref_step;
126 matrix deformref_box;
130 static void do_update_md(int start,int nrend,double dt,
131 t_grp_tcstat *tcstat,
133 gmx_bool bNEMD,t_grp_acc *gstat,rvec accel[],
136 unsigned short ptype[],unsigned short cFREEZE[],
137 unsigned short cACC[],unsigned short cTC[],
138 rvec x[],rvec xprime[],rvec v[],
140 gmx_bool bNH,gmx_bool bPR)
145 real vn,vv,va,vb,vnrel;
151 /* Update with coupling to extended ensembles, used for
152 * Nose-Hoover and Parrinello-Rahman coupling
153 * Nose-Hoover uses the reversible leap-frog integrator from
154 * Holian et al. Phys Rev E 52(3) : 2338, 1995
156 for(n=start; n<nrend; n++)
171 lg = tcstat[gt].lambda;
175 rvec_sub(v[n],gstat[ga].u,vrel);
179 if((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
181 vnrel = (lg*vrel[d] + dt*(imass*f[n][d] - 0.5*vxi*vrel[d]
182 - iprod(M[d],vrel)))/(1 + 0.5*vxi*dt);
183 /* do not scale the mean velocities u */
184 vn = gstat[ga].u[d] + accel[ga][d]*dt + vnrel;
186 xprime[n][d] = x[n][d]+vn*dt;
191 xprime[n][d] = x[n][d];
196 else if (cFREEZE != NULL ||
197 nFreeze[0][XX] || nFreeze[0][YY] || nFreeze[0][ZZ] ||
200 /* Update with Berendsen/v-rescale coupling and freeze or NEMD */
201 for(n=start; n<nrend; n++)
203 w_dt = invmass[n]*dt;
216 lg = tcstat[gt].lambda;
221 if((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
223 vv = lg*vn + f[n][d]*w_dt;
225 /* do not scale the mean velocities u */
227 va = vv + accel[ga][d]*dt;
228 vb = va + (1.0-lg)*u;
230 xprime[n][d] = x[n][d]+vb*dt;
235 xprime[n][d] = x[n][d];
242 /* Plain update with Berendsen/v-rescale coupling */
243 for(n=start; n<nrend; n++)
245 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell))
247 w_dt = invmass[n]*dt;
252 lg = tcstat[gt].lambda;
256 vn = lg*v[n][d] + f[n][d]*w_dt;
258 xprime[n][d] = x[n][d] + vn*dt;
266 xprime[n][d] = x[n][d];
273 static void do_update_vv_vel(int start,int nrend,double dt,
274 t_grp_tcstat *tcstat,t_grp_acc *gstat,
275 rvec accel[],ivec nFreeze[],real invmass[],
276 unsigned short ptype[],unsigned short cFREEZE[],
277 unsigned short cACC[],rvec v[],rvec f[],
278 gmx_bool bExtended, real veta, real alpha)
283 real u,vn,vv,va,vb,vnrel;
289 g = 0.25*dt*veta*alpha;
291 mv2 = series_sinhx(g);
298 for(n=start; n<nrend; n++)
300 w_dt = invmass[n]*dt;
312 if((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
314 v[n][d] = mv1*(mv1*v[n][d] + 0.5*(w_dt*mv2*f[n][d]))+0.5*accel[ga][d]*dt;
322 } /* do_update_vv_vel */
324 static void do_update_vv_pos(int start,int nrend,double dt,
325 t_grp_tcstat *tcstat,t_grp_acc *gstat,
326 rvec accel[],ivec nFreeze[],real invmass[],
327 unsigned short ptype[],unsigned short cFREEZE[],
328 rvec x[],rvec xprime[],rvec v[],
329 rvec f[],gmx_bool bExtended, real veta, real alpha)
336 /* Would it make more sense if Parrinello-Rahman was put here? */
341 mr2 = series_sinhx(g);
347 for(n=start; n<nrend; n++) {
356 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
358 xprime[n][d] = mr1*(mr1*x[n][d]+mr2*dt*v[n][d]);
362 xprime[n][d] = x[n][d];
366 }/* do_update_vv_pos */
368 static void do_update_visc(int start,int nrend,double dt,
369 t_grp_tcstat *tcstat,
372 unsigned short ptype[],unsigned short cTC[],
373 rvec x[],rvec xprime[],rvec v[],
374 rvec f[],matrix M,matrix box,real
376 gmx_bool bNH,gmx_bool bPR)
386 fac = 2*M_PI/(box[ZZ][ZZ]);
389 /* Update with coupling to extended ensembles, used for
390 * Nose-Hoover and Parrinello-Rahman coupling
392 for(n=start; n<nrend; n++) {
398 lg = tcstat[gt].lambda;
399 cosz = cos(fac*x[n][ZZ]);
401 copy_rvec(v[n],vrel);
413 if((ptype[n] != eptVSite) && (ptype[n] != eptShell))
415 vn = (lg*vrel[d] + dt*(imass*f[n][d] - 0.5*vxi*vrel[d]
416 - iprod(M[d],vrel)))/(1 + 0.5*vxi*dt);
419 vn += vc + dt*cosz*cos_accel;
422 xprime[n][d] = x[n][d]+vn*dt;
426 xprime[n][d] = x[n][d];
433 /* Classic version of update, used with berendsen coupling */
434 for(n=start; n<nrend; n++)
436 w_dt = invmass[n]*dt;
441 lg = tcstat[gt].lambda;
442 cosz = cos(fac*x[n][ZZ]);
448 if((ptype[n] != eptVSite) && (ptype[n] != eptShell))
453 /* Do not scale the cosine velocity profile */
454 vv = vc + lg*(vn - vc + f[n][d]*w_dt);
455 /* Add the cosine accelaration profile */
456 vv += dt*cosz*cos_accel;
460 vv = lg*(vn + f[n][d]*w_dt);
463 xprime[n][d] = x[n][d]+vv*dt;
468 xprime[n][d] = x[n][d];
475 /* Allocates and initializes sd->gaussrand[i] for i=1, i<sd->ngaussrand,
476 * Using seeds generated from sd->gaussrand[0].
478 static void init_multiple_gaussrand(gmx_stochd_t *sd)
483 ngr = sd->ngaussrand;
488 seed[i] = gmx_rng_uniform_uint32(sd->gaussrand[0]);
491 #pragma omp parallel num_threads(ngr)
495 th = gmx_omp_get_thread_num();
498 /* Initialize on each thread to have thread-local memory alloced */
499 sd->gaussrand[th] = gmx_rng_init(seed[th]);
506 static gmx_stochd_t *init_stochd(FILE *fplog,t_inputrec *ir,int nthreads)
515 /* Initiate random number generator for langevin type dynamics,
516 * for BD, SD or velocity rescaling temperature coupling.
518 if (ir->eI == eiBD || EI_SD(ir->eI))
520 sd->ngaussrand = nthreads;
526 snew(sd->gaussrand,sd->ngaussrand);
528 /* Initialize the first random generator */
529 sd->gaussrand[0] = gmx_rng_init(ir->ld_seed);
531 if (sd->ngaussrand > 1)
533 /* Initialize the rest of the random number generators,
534 * using the first one to generate seeds.
536 init_multiple_gaussrand(sd);
539 ngtc = ir->opts.ngtc;
543 snew(sd->bd_rf,ngtc);
545 else if (EI_SD(ir->eI))
548 snew(sd->sdsig,ngtc);
551 for(n=0; n<ngtc; n++)
553 if (ir->opts.tau_t[n] > 0)
555 sdc[n].gdt = ir->delta_t/ir->opts.tau_t[n];
556 sdc[n].eph = exp(sdc[n].gdt/2);
557 sdc[n].emh = exp(-sdc[n].gdt/2);
558 sdc[n].em = exp(-sdc[n].gdt);
562 /* No friction and noise on this group */
568 if (sdc[n].gdt >= 0.05)
570 sdc[n].b = sdc[n].gdt*(sdc[n].eph*sdc[n].eph - 1)
571 - 4*(sdc[n].eph - 1)*(sdc[n].eph - 1);
572 sdc[n].c = sdc[n].gdt - 3 + 4*sdc[n].emh - sdc[n].em;
573 sdc[n].d = 2 - sdc[n].eph - sdc[n].emh;
578 /* Seventh order expansions for small y */
579 sdc[n].b = y*y*y*y*(1/3.0+y*(1/3.0+y*(17/90.0+y*7/9.0)));
580 sdc[n].c = y*y*y*(2/3.0+y*(-1/2.0+y*(7/30.0+y*(-1/12.0+y*31/1260.0))));
581 sdc[n].d = y*y*(-1+y*y*(-1/12.0-y*y/360.0));
584 fprintf(debug,"SD const tc-grp %d: b %g c %g d %g\n",
585 n,sdc[n].b,sdc[n].c,sdc[n].d);
588 else if (ETC_ANDERSEN(ir->etc))
597 snew(sd->randomize_group,ngtc);
598 snew(sd->boltzfac,ngtc);
600 /* for now, assume that all groups, if randomized, are randomized at the same rate, i.e. tau_t is the same. */
601 /* since constraint groups don't necessarily match up with temperature groups! This is checked in readir.c */
603 for (n=0;n<ngtc;n++) {
604 reft = max(0.0,opts->ref_t[n]);
605 if ((opts->tau_t[n] > 0) && (reft > 0)) /* tau_t or ref_t = 0 means that no randomization is done */
607 sd->randomize_group[n] = TRUE;
608 sd->boltzfac[n] = BOLTZ*opts->ref_t[n];
610 sd->randomize_group[n] = FALSE;
617 void get_stochd_state(gmx_update_t upd,t_state *state)
619 /* Note that we only get the state of the first random generator,
620 * even if there are multiple. This avoids repetition.
622 gmx_rng_get_state(upd->sd->gaussrand[0],state->ld_rng,state->ld_rngi);
625 void set_stochd_state(gmx_update_t upd,t_state *state)
632 gmx_rng_set_state(sd->gaussrand[0],state->ld_rng,state->ld_rngi[0]);
634 if (sd->ngaussrand > 1)
636 /* We only end up here with SD or BD with OpenMP.
637 * Destroy and reinitialize the rest of the random number generators,
638 * using seeds generated from the first one.
639 * Although this doesn't recover the previous state,
640 * it at least avoids repetition, which is most important.
641 * Exaclty restoring states with all MPI+OpenMP setups is difficult
642 * and as the integrator is random to start with, doesn't gain us much.
644 for(i=1; i<sd->ngaussrand; i++)
646 gmx_rng_destroy(sd->gaussrand[i]);
649 init_multiple_gaussrand(sd);
653 gmx_update_t init_update(FILE *fplog,t_inputrec *ir)
659 if (ir->eI == eiBD || EI_SD(ir->eI) || ir->etc == etcVRESCALE || ETC_ANDERSEN(ir->etc))
661 upd->sd = init_stochd(fplog,ir,gmx_omp_nthreads_get(emntUpdate));
666 upd->randatom = NULL;
667 upd->randatom_list = NULL;
668 upd->randatom_list_init = FALSE; /* we have not yet cleared the data structure at this point */
673 static void do_update_sd1(gmx_stochd_t *sd,
675 int start,int nrend,double dt,
676 rvec accel[],ivec nFreeze[],
677 real invmass[],unsigned short ptype[],
678 unsigned short cFREEZE[],unsigned short cACC[],
679 unsigned short cTC[],
680 rvec x[],rvec xprime[],rvec v[],rvec f[],
682 int ngtc,real tau_t[],real ref_t[])
693 if (nrend-start > sd->sd_V_nalloc)
695 sd->sd_V_nalloc = over_alloc_dd(nrend-start);
696 srenew(sd->sd_V,sd->sd_V_nalloc);
699 for(n=0; n<ngtc; n++)
702 /* The mass is encounted for later, since this differs per atom */
703 sig[n].V = sqrt(kT*(1 - sdc[n].em*sdc[n].em));
706 for(n=start; n<nrend; n++)
708 ism = sqrt(invmass[n]);
724 if((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
726 sd_V = ism*sig[gt].V*gmx_rng_gaussian_table(gaussrand);
728 v[n][d] = v[n][d]*sdc[gt].em
729 + (invmass[n]*f[n][d] + accel[ga][d])*tau_t[gt]*(1 - sdc[gt].em)
732 xprime[n][d] = x[n][d] + v[n][d]*dt;
737 xprime[n][d] = x[n][d];
743 static void do_update_sd2(gmx_stochd_t *sd,
747 rvec accel[],ivec nFreeze[],
748 real invmass[],unsigned short ptype[],
749 unsigned short cFREEZE[],unsigned short cACC[],
750 unsigned short cTC[],
751 rvec x[],rvec xprime[],rvec v[],rvec f[],
753 int ngtc,real tau_t[],real ref_t[],
758 /* The random part of the velocity update, generated in the first
759 * half of the update, needs to be remembered for the second half.
770 if (nrend-start > sd->sd_V_nalloc)
772 sd->sd_V_nalloc = over_alloc_dd(nrend-start);
773 srenew(sd->sd_V,sd->sd_V_nalloc);
779 for (n=0; n<ngtc; n++)
782 /* The mass is encounted for later, since this differs per atom */
783 sig[n].V = sqrt(kT*(1-sdc[n].em));
784 sig[n].X = sqrt(kT*sqr(tau_t[n])*sdc[n].c);
785 sig[n].Yv = sqrt(kT*sdc[n].b/sdc[n].c);
786 sig[n].Yx = sqrt(kT*sqr(tau_t[n])*sdc[n].b/(1-sdc[n].em));
790 for (n=start; n<nrend; n++)
792 ism = sqrt(invmass[n]);
812 if((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
818 sd_X[n][d] = ism*sig[gt].X*gmx_rng_gaussian_table(gaussrand);
820 Vmh = sd_X[n][d]*sdc[gt].d/(tau_t[gt]*sdc[gt].c)
821 + ism*sig[gt].Yv*gmx_rng_gaussian_table(gaussrand);
822 sd_V[n-start][d] = ism*sig[gt].V*gmx_rng_gaussian_table(gaussrand);
824 v[n][d] = vn*sdc[gt].em
825 + (invmass[n]*f[n][d] + accel[ga][d])*tau_t[gt]*(1 - sdc[gt].em)
826 + sd_V[n-start][d] - sdc[gt].em*Vmh;
828 xprime[n][d] = x[n][d] + v[n][d]*tau_t[gt]*(sdc[gt].eph - sdc[gt].emh);
833 /* Correct the velocities for the constraints.
834 * This operation introduces some inaccuracy,
835 * since the velocity is determined from differences in coordinates.
838 (xprime[n][d] - x[n][d])/(tau_t[gt]*(sdc[gt].eph - sdc[gt].emh));
840 Xmh = sd_V[n-start][d]*tau_t[gt]*sdc[gt].d/(sdc[gt].em-1)
841 + ism*sig[gt].Yx*gmx_rng_gaussian_table(gaussrand);
842 sd_X[n][d] = ism*sig[gt].X*gmx_rng_gaussian_table(gaussrand);
844 xprime[n][d] += sd_X[n][d] - Xmh;
853 xprime[n][d] = x[n][d];
860 static void do_update_bd(int start,int nrend,double dt,
862 real invmass[],unsigned short ptype[],
863 unsigned short cFREEZE[],unsigned short cTC[],
864 rvec x[],rvec xprime[],rvec v[],
865 rvec f[],real friction_coefficient,
866 int ngtc,real tau_t[],real ref_t[],
867 real *rf,gmx_rng_t gaussrand)
869 /* note -- these appear to be full step velocities . . . */
875 if (friction_coefficient != 0)
877 invfr = 1.0/friction_coefficient;
878 for(n=0; n<ngtc; n++)
880 rf[n] = sqrt(2.0*BOLTZ*ref_t[n]/(friction_coefficient*dt));
885 for(n=0; n<ngtc; n++)
887 rf[n] = sqrt(2.0*BOLTZ*ref_t[n]);
890 for(n=start; (n<nrend); n++)
900 for(d=0; (d<DIM); d++)
902 if((ptype[n]!=eptVSite) && (ptype[n]!=eptShell) && !nFreeze[gf][d])
904 if (friction_coefficient != 0) {
905 vn = invfr*f[n][d] + rf[gt]*gmx_rng_gaussian_table(gaussrand);
909 /* NOTE: invmass = 2/(mass*friction_constant*dt) */
910 vn = 0.5*invmass[n]*f[n][d]*dt
911 + sqrt(0.5*invmass[n])*rf[gt]*gmx_rng_gaussian_table(gaussrand);
915 xprime[n][d] = x[n][d]+vn*dt;
920 xprime[n][d] = x[n][d];
926 static void dump_it_all(FILE *fp,const char *title,
927 int natoms,rvec x[],rvec xp[],rvec v[],rvec f[])
932 fprintf(fp,"%s\n",title);
933 pr_rvecs(fp,0,"x",x,natoms);
934 pr_rvecs(fp,0,"xp",xp,natoms);
935 pr_rvecs(fp,0,"v",v,natoms);
936 pr_rvecs(fp,0,"f",f,natoms);
941 static void calc_ke_part_normal(rvec v[], t_grpopts *opts,t_mdatoms *md,
942 gmx_ekindata_t *ekind,t_nrnb *nrnb,gmx_bool bEkinAveVel,
943 gmx_bool bSaveEkinOld)
946 t_grp_tcstat *tcstat=ekind->tcstat;
947 t_grp_acc *grpstat=ekind->grpstat;
950 /* three main: VV with AveVel, vv with AveEkin, leap with AveEkin. Leap with AveVel is also
951 an option, but not supported now. Additionally, if we are doing iterations.
952 bEkinAveVel: If TRUE, we sum into ekin, if FALSE, into ekinh.
953 bSavEkinOld: If TRUE (in the case of iteration = bIterate is TRUE), we don't copy over the ekinh_old.
954 If FALSE, we overrwrite it.
957 /* group velocities are calculated in update_ekindata and
958 * accumulated in acumulate_groups.
959 * Now the partial global and groups ekin.
961 for(g=0; (g<opts->ngtc); g++)
965 copy_mat(tcstat[g].ekinh,tcstat[g].ekinh_old);
968 clear_mat(tcstat[g].ekinf);
970 clear_mat(tcstat[g].ekinh);
973 tcstat[g].ekinscalef_nhc = 1.0; /* need to clear this -- logic is complicated! */
976 ekind->dekindl_old = ekind->dekindl;
978 nthread = gmx_omp_nthreads_get(emntUpdate);
980 #pragma omp parallel for num_threads(nthread) schedule(static)
981 for(thread=0; thread<nthread; thread++)
991 start_t = md->start + ((thread+0)*md->homenr)/nthread;
992 end_t = md->start + ((thread+1)*md->homenr)/nthread;
994 ekin_sum = ekind->ekin_work[thread];
995 dekindl_sum = &ekind->ekin_work[thread][opts->ngtc][0][0];
997 for(gt=0; gt<opts->ngtc; gt++)
999 clear_mat(ekin_sum[gt]);
1004 for(n=start_t; n<end_t; n++)
1014 hm = 0.5*md->massT[n];
1016 for(d=0; (d<DIM); d++)
1018 v_corrt[d] = v[n][d] - grpstat[ga].u[d];
1020 for(d=0; (d<DIM); d++)
1022 for (m=0;(m<DIM); m++)
1024 /* if we're computing a full step velocity, v_corrt[d] has v(t). Otherwise, v(t+dt/2) */
1025 ekin_sum[gt][m][d] += hm*v_corrt[m]*v_corrt[d];
1028 if (md->nMassPerturbed && md->bPerturbed[n])
1031 0.5*(md->massB[n] - md->massA[n])*iprod(v_corrt,v_corrt);
1037 for(thread=0; thread<nthread; thread++)
1039 for(g=0; g<opts->ngtc; g++)
1043 m_add(tcstat[g].ekinf,ekind->ekin_work[thread][g],
1048 m_add(tcstat[g].ekinh,ekind->ekin_work[thread][g],
1053 ekind->dekindl += ekind->ekin_work[thread][opts->ngtc][0][0];
1056 inc_nrnb(nrnb,eNR_EKIN,md->homenr);
1059 static void calc_ke_part_visc(matrix box,rvec x[],rvec v[],
1060 t_grpopts *opts,t_mdatoms *md,
1061 gmx_ekindata_t *ekind,
1062 t_nrnb *nrnb, gmx_bool bEkinAveVel, gmx_bool bSaveEkinOld)
1064 int start=md->start,homenr=md->homenr;
1068 t_grp_tcstat *tcstat=ekind->tcstat;
1069 t_cos_acc *cosacc=&(ekind->cosacc);
1074 for(g=0; g<opts->ngtc; g++)
1076 copy_mat(ekind->tcstat[g].ekinh,ekind->tcstat[g].ekinh_old);
1077 clear_mat(ekind->tcstat[g].ekinh);
1079 ekind->dekindl_old = ekind->dekindl;
1081 fac = 2*M_PI/box[ZZ][ZZ];
1084 for(n=start; n<start+homenr; n++)
1090 hm = 0.5*md->massT[n];
1092 /* Note that the times of x and v differ by half a step */
1093 /* MRS -- would have to be changed for VV */
1094 cosz = cos(fac*x[n][ZZ]);
1095 /* Calculate the amplitude of the new velocity profile */
1096 mvcos += 2*cosz*md->massT[n]*v[n][XX];
1098 copy_rvec(v[n],v_corrt);
1099 /* Subtract the profile for the kinetic energy */
1100 v_corrt[XX] -= cosz*cosacc->vcos;
1101 for (d=0; (d<DIM); d++)
1103 for (m=0; (m<DIM); m++)
1105 /* if we're computing a full step velocity, v_corrt[d] has v(t). Otherwise, v(t+dt/2) */
1108 tcstat[gt].ekinf[m][d]+=hm*v_corrt[m]*v_corrt[d];
1112 tcstat[gt].ekinh[m][d]+=hm*v_corrt[m]*v_corrt[d];
1116 if(md->nPerturbed && md->bPerturbed[n])
1118 dekindl -= 0.5*(md->massB[n] - md->massA[n])*iprod(v_corrt,v_corrt);
1121 ekind->dekindl = dekindl;
1122 cosacc->mvcos = mvcos;
1124 inc_nrnb(nrnb,eNR_EKIN,homenr);
1127 void calc_ke_part(t_state *state,t_grpopts *opts,t_mdatoms *md,
1128 gmx_ekindata_t *ekind,t_nrnb *nrnb, gmx_bool bEkinAveVel, gmx_bool bSaveEkinOld)
1130 if (ekind->cosacc.cos_accel == 0)
1132 calc_ke_part_normal(state->v,opts,md,ekind,nrnb,bEkinAveVel,bSaveEkinOld);
1136 calc_ke_part_visc(state->box,state->x,state->v,opts,md,ekind,nrnb,bEkinAveVel,bSaveEkinOld);
1140 extern void init_ekinstate(ekinstate_t *ekinstate,const t_inputrec *ir)
1142 ekinstate->ekin_n = ir->opts.ngtc;
1143 snew(ekinstate->ekinh,ekinstate->ekin_n);
1144 snew(ekinstate->ekinf,ekinstate->ekin_n);
1145 snew(ekinstate->ekinh_old,ekinstate->ekin_n);
1146 snew(ekinstate->ekinscalef_nhc,ekinstate->ekin_n);
1147 snew(ekinstate->ekinscaleh_nhc,ekinstate->ekin_n);
1148 snew(ekinstate->vscale_nhc,ekinstate->ekin_n);
1149 ekinstate->dekindl = 0;
1150 ekinstate->mvcos = 0;
1153 void update_ekinstate(ekinstate_t *ekinstate,gmx_ekindata_t *ekind)
1157 for(i=0;i<ekinstate->ekin_n;i++)
1159 copy_mat(ekind->tcstat[i].ekinh,ekinstate->ekinh[i]);
1160 copy_mat(ekind->tcstat[i].ekinf,ekinstate->ekinf[i]);
1161 copy_mat(ekind->tcstat[i].ekinh_old,ekinstate->ekinh_old[i]);
1162 ekinstate->ekinscalef_nhc[i] = ekind->tcstat[i].ekinscalef_nhc;
1163 ekinstate->ekinscaleh_nhc[i] = ekind->tcstat[i].ekinscaleh_nhc;
1164 ekinstate->vscale_nhc[i] = ekind->tcstat[i].vscale_nhc;
1167 copy_mat(ekind->ekin,ekinstate->ekin_total);
1168 ekinstate->dekindl = ekind->dekindl;
1169 ekinstate->mvcos = ekind->cosacc.mvcos;
1173 void restore_ekinstate_from_state(t_commrec *cr,
1174 gmx_ekindata_t *ekind,ekinstate_t *ekinstate)
1180 for(i=0;i<ekinstate->ekin_n;i++)
1182 copy_mat(ekinstate->ekinh[i],ekind->tcstat[i].ekinh);
1183 copy_mat(ekinstate->ekinf[i],ekind->tcstat[i].ekinf);
1184 copy_mat(ekinstate->ekinh_old[i],ekind->tcstat[i].ekinh_old);
1185 ekind->tcstat[i].ekinscalef_nhc = ekinstate->ekinscalef_nhc[i];
1186 ekind->tcstat[i].ekinscaleh_nhc = ekinstate->ekinscaleh_nhc[i];
1187 ekind->tcstat[i].vscale_nhc = ekinstate->vscale_nhc[i];
1190 copy_mat(ekinstate->ekin_total,ekind->ekin);
1192 ekind->dekindl = ekinstate->dekindl;
1193 ekind->cosacc.mvcos = ekinstate->mvcos;
1194 n = ekinstate->ekin_n;
1199 gmx_bcast(sizeof(n),&n,cr);
1202 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinh[0][0]),
1203 ekind->tcstat[i].ekinh[0],cr);
1204 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinf[0][0]),
1205 ekind->tcstat[i].ekinf[0],cr);
1206 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinh_old[0][0]),
1207 ekind->tcstat[i].ekinh_old[0],cr);
1209 gmx_bcast(sizeof(ekind->tcstat[i].ekinscalef_nhc),
1210 &(ekind->tcstat[i].ekinscalef_nhc),cr);
1211 gmx_bcast(sizeof(ekind->tcstat[i].ekinscaleh_nhc),
1212 &(ekind->tcstat[i].ekinscaleh_nhc),cr);
1213 gmx_bcast(sizeof(ekind->tcstat[i].vscale_nhc),
1214 &(ekind->tcstat[i].vscale_nhc),cr);
1216 gmx_bcast(DIM*DIM*sizeof(ekind->ekin[0][0]),
1219 gmx_bcast(sizeof(ekind->dekindl),&ekind->dekindl,cr);
1220 gmx_bcast(sizeof(ekind->cosacc.mvcos),&ekind->cosacc.mvcos,cr);
1224 void set_deform_reference_box(gmx_update_t upd,gmx_large_int_t step,matrix box)
1226 upd->deformref_step = step;
1227 copy_mat(box,upd->deformref_box);
1230 static void deform(gmx_update_t upd,
1231 int start,int homenr,rvec x[],matrix box,matrix *scale_tot,
1232 const t_inputrec *ir,gmx_large_int_t step)
1234 matrix bnew,invbox,mu;
1238 elapsed_time = (step + 1 - upd->deformref_step)*ir->delta_t;
1240 for(i=0; i<DIM; i++)
1242 for(j=0; j<DIM; j++)
1244 if (ir->deform[i][j] != 0)
1247 upd->deformref_box[i][j] + elapsed_time*ir->deform[i][j];
1251 /* We correct the off-diagonal elements,
1252 * which can grow indefinitely during shearing,
1253 * so the shifts do not get messed up.
1255 for(i=1; i<DIM; i++)
1257 for(j=i-1; j>=0; j--)
1259 while (bnew[i][j] - box[i][j] > 0.5*bnew[j][j])
1261 rvec_dec(bnew[i],bnew[j]);
1263 while (bnew[i][j] - box[i][j] < -0.5*bnew[j][j])
1265 rvec_inc(bnew[i],bnew[j]);
1269 m_inv_ur0(box,invbox);
1271 mmul_ur0(box,invbox,mu);
1273 for(i=start; i<start+homenr; i++)
1275 x[i][XX] = mu[XX][XX]*x[i][XX]+mu[YY][XX]*x[i][YY]+mu[ZZ][XX]*x[i][ZZ];
1276 x[i][YY] = mu[YY][YY]*x[i][YY]+mu[ZZ][YY]*x[i][ZZ];
1277 x[i][ZZ] = mu[ZZ][ZZ]*x[i][ZZ];
1281 /* The transposes of the scaling matrices are stored,
1282 * so we need to do matrix multiplication in the inverse order.
1284 mmul_ur0(*scale_tot,mu,*scale_tot);
1288 static void combine_forces(int nstcalclr,
1289 gmx_constr_t constr,
1290 t_inputrec *ir,t_mdatoms *md,t_idef *idef,
1292 gmx_large_int_t step,
1293 t_state *state,gmx_bool bMolPBC,
1294 int start,int nrend,
1295 rvec f[],rvec f_lr[],
1300 /* f contains the short-range forces + the long range forces
1301 * which are stored separately in f_lr.
1304 if (constr != NULL && !(ir->eConstrAlg == econtSHAKE && ir->epc == epcNO))
1306 /* We need to constrain the LR forces separately,
1307 * because due to the different pre-factor for the SR and LR
1308 * forces in the update algorithm, we can not determine
1309 * the constraint force for the coordinate constraining.
1310 * Constrain only the additional LR part of the force.
1312 /* MRS -- need to make sure this works with trotter integration -- the constraint calls may not be right.*/
1313 constrain(NULL,FALSE,FALSE,constr,idef,ir,NULL,cr,step,0,md,
1314 state->x,f_lr,f_lr,bMolPBC,state->box,state->lambda[efptBONDED],NULL,
1315 NULL,NULL,nrnb,econqForce,ir->epc==epcMTTK,state->veta,state->veta);
1318 /* Add nstcalclr-1 times the LR force to the sum of both forces
1319 * and store the result in forces_lr.
1321 nm1 = nstcalclr - 1;
1322 for(i=start; i<nrend; i++)
1324 for(d=0; d<DIM; d++)
1326 f_lr[i][d] = f[i][d] + nm1*f_lr[i][d];
1331 void update_tcouple(FILE *fplog,
1332 gmx_large_int_t step,
1333 t_inputrec *inputrec,
1335 gmx_ekindata_t *ekind,
1336 gmx_wallcycle_t wcycle,
1342 gmx_bool bTCouple=FALSE;
1344 int i,start,end,homenr,offset;
1346 /* if using vv with trotter decomposition methods, we do this elsewhere in the code */
1347 if (inputrec->etc != etcNO &&
1348 !(IR_NVT_TROTTER(inputrec) || IR_NPT_TROTTER(inputrec) || IR_NPH_TROTTER(inputrec)))
1350 /* We should only couple after a step where energies were determined (for leapfrog versions)
1351 or the step energies are determined, for velocity verlet versions */
1353 if (EI_VV(inputrec->eI)) {
1358 bTCouple = (inputrec->nsttcouple == 1 ||
1359 do_per_step(step+inputrec->nsttcouple-offset,
1360 inputrec->nsttcouple));
1365 dttc = inputrec->nsttcouple*inputrec->delta_t;
1367 switch (inputrec->etc)
1372 berendsen_tcoupl(inputrec,ekind,dttc);
1375 nosehoover_tcoupl(&(inputrec->opts),ekind,dttc,
1376 state->nosehoover_xi,state->nosehoover_vxi,MassQ);
1379 vrescale_tcoupl(inputrec,ekind,dttc,
1380 state->therm_integral,upd->sd->gaussrand[0]);
1383 /* rescale in place here */
1384 if (EI_VV(inputrec->eI))
1386 rescale_velocities(ekind,md,md->start,md->start+md->homenr,state->v);
1391 /* Set the T scaling lambda to 1 to have no scaling */
1392 for(i=0; (i<inputrec->opts.ngtc); i++)
1394 ekind->tcstat[i].lambda = 1.0;
1399 void update_pcouple(FILE *fplog,
1400 gmx_large_int_t step,
1401 t_inputrec *inputrec,
1405 gmx_wallcycle_t wcycle,
1409 gmx_bool bPCouple=FALSE;
1413 /* if using Trotter pressure, we do this in coupling.c, so we leave it false. */
1414 if (inputrec->epc != epcNO && (!(IR_NPT_TROTTER(inputrec) || IR_NPH_TROTTER(inputrec))))
1416 /* We should only couple after a step where energies were determined */
1417 bPCouple = (inputrec->nstpcouple == 1 ||
1418 do_per_step(step+inputrec->nstpcouple-1,
1419 inputrec->nstpcouple));
1422 clear_mat(pcoupl_mu);
1423 for(i=0; i<DIM; i++)
1425 pcoupl_mu[i][i] = 1.0;
1432 dtpc = inputrec->nstpcouple*inputrec->delta_t;
1434 switch (inputrec->epc)
1436 /* We can always pcoupl, even if we did not sum the energies
1437 * the previous step, since state->pres_prev is only updated
1438 * when the energies have been summed.
1442 case (epcBERENDSEN):
1445 berendsen_pcoupl(fplog,step,inputrec,dtpc,state->pres_prev,state->box,
1449 case (epcPARRINELLORAHMAN):
1450 parrinellorahman_pcoupl(fplog,step,inputrec,dtpc,state->pres_prev,
1451 state->box,state->box_rel,state->boxv,
1452 M,pcoupl_mu,bInitStep);
1460 static rvec *get_xprime(const t_state *state,gmx_update_t upd)
1462 if (state->nalloc > upd->xp_nalloc)
1464 upd->xp_nalloc = state->nalloc;
1465 srenew(upd->xp,upd->xp_nalloc);
1471 void update_constraints(FILE *fplog,
1472 gmx_large_int_t step,
1473 real *dvdlambda, /* the contribution to be added to the bonded interactions */
1474 t_inputrec *inputrec, /* input record and box stuff */
1475 gmx_ekindata_t *ekind,
1480 rvec force[], /* forces on home particles */
1483 tensor vir, /* tensors for virial and ekin, needed for computing */
1486 gmx_wallcycle_t wcycle,
1488 gmx_constr_t constr,
1490 gmx_bool bFirstHalf,
1494 gmx_bool bExtended,bLastStep,bLog=FALSE,bEner=FALSE,bDoConstr=FALSE;
1497 int start,homenr,nrend,i,n,m,g,d;
1499 rvec *vbuf,*xprime=NULL;
1502 if (constr) {bDoConstr=TRUE;}
1503 if (bFirstHalf && !EI_VV(inputrec->eI)) {bDoConstr=FALSE;}
1505 /* for now, SD update is here -- though it really seems like it
1506 should be reformulated as a velocity verlet method, since it has two parts */
1509 homenr = md->homenr;
1510 nrend = start+homenr;
1512 dt = inputrec->delta_t;
1517 * APPLY CONSTRAINTS:
1520 * When doing PR pressure coupling we have to constrain the
1521 * bonds in each iteration. If we are only using Nose-Hoover tcoupling
1522 * it is enough to do this once though, since the relative velocities
1523 * after this will be normal to the bond vector
1528 /* clear out constraints before applying */
1529 clear_mat(vir_part);
1531 xprime = get_xprime(state,upd);
1533 bLastStep = (step == inputrec->init_step+inputrec->nsteps);
1534 bLog = (do_per_step(step,inputrec->nstlog) || bLastStep || (step < 0));
1535 bEner = (do_per_step(step,inputrec->nstenergy) || bLastStep);
1536 /* Constrain the coordinates xprime */
1537 wallcycle_start(wcycle,ewcCONSTR);
1538 if (EI_VV(inputrec->eI) && bFirstHalf)
1540 constrain(NULL,bLog,bEner,constr,idef,
1541 inputrec,ekind,cr,step,1,md,
1542 state->x,state->v,state->v,
1544 state->lambda[efptBONDED],dvdlambda,
1545 NULL,bCalcVir ? &vir_con : NULL,nrnb,econqVeloc,
1546 inputrec->epc==epcMTTK,state->veta,vetanew);
1550 constrain(NULL,bLog,bEner,constr,idef,
1551 inputrec,ekind,cr,step,1,md,
1552 state->x,xprime,NULL,
1554 state->lambda[efptBONDED],dvdlambda,
1555 state->v,bCalcVir ? &vir_con : NULL ,nrnb,econqCoord,
1556 inputrec->epc==epcMTTK,state->veta,state->veta);
1558 wallcycle_stop(wcycle,ewcCONSTR);
1562 dump_it_all(fplog,"After Shake",
1563 state->natoms,state->x,xprime,state->v,force);
1567 if (inputrec->eI == eiSD2)
1569 /* A correction factor eph is needed for the SD constraint force */
1570 /* Here we can, unfortunately, not have proper corrections
1571 * for different friction constants, so we use the first one.
1573 for(i=0; i<DIM; i++)
1575 for(m=0; m<DIM; m++)
1577 vir_part[i][m] += upd->sd->sdc[0].eph*vir_con[i][m];
1583 m_add(vir_part,vir_con,vir_part);
1587 pr_rvecs(debug,0,"constraint virial",vir_part,DIM);
1593 if ((inputrec->eI == eiSD2) && !(bFirstHalf))
1595 xprime = get_xprime(state,upd);
1597 nth = gmx_omp_nthreads_get(emntUpdate);
1599 #pragma omp parallel for num_threads(nth) schedule(static)
1600 for(th=0; th<nth; th++)
1602 int start_th,end_th;
1604 start_th = start + ((nrend-start)* th )/nth;
1605 end_th = start + ((nrend-start)*(th+1))/nth;
1607 /* The second part of the SD integration */
1608 do_update_sd2(upd->sd,upd->sd->gaussrand[th],
1609 FALSE,start_th,end_th,
1610 inputrec->opts.acc,inputrec->opts.nFreeze,
1611 md->invmass,md->ptype,
1612 md->cFREEZE,md->cACC,md->cTC,
1613 state->x,xprime,state->v,force,state->sd_X,
1614 inputrec->opts.ngtc,inputrec->opts.tau_t,
1615 inputrec->opts.ref_t,FALSE);
1617 inc_nrnb(nrnb, eNR_UPDATE, homenr);
1621 /* Constrain the coordinates xprime */
1622 wallcycle_start(wcycle,ewcCONSTR);
1623 constrain(NULL,bLog,bEner,constr,idef,
1624 inputrec,NULL,cr,step,1,md,
1625 state->x,xprime,NULL,
1627 state->lambda[efptBONDED],dvdlambda,
1628 NULL,NULL,nrnb,econqCoord,FALSE,0,0);
1629 wallcycle_stop(wcycle,ewcCONSTR);
1633 /* We must always unshift after updating coordinates; if we did not shake
1634 x was shifted in do_force */
1636 if (!(bFirstHalf)) /* in the first half of vv, no shift. */
1638 if (graph && (graph->nnodes > 0))
1640 unshift_x(graph,state->box,state->x,upd->xp);
1641 if (TRICLINIC(state->box))
1643 inc_nrnb(nrnb,eNR_SHIFTX,2*graph->nnodes);
1647 inc_nrnb(nrnb,eNR_SHIFTX,graph->nnodes);
1652 #pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntUpdate)) schedule(static)
1653 for(i=start; i<nrend; i++)
1655 copy_rvec(upd->xp[i],state->x[i]);
1659 dump_it_all(fplog,"After unshift",
1660 state->natoms,state->x,upd->xp,state->v,force);
1662 /* ############# END the update of velocities and positions ######### */
1665 void update_box(FILE *fplog,
1666 gmx_large_int_t step,
1667 t_inputrec *inputrec, /* input record and box stuff */
1671 rvec force[], /* forces on home particles */
1675 gmx_wallcycle_t wcycle,
1678 gmx_bool bFirstHalf)
1680 gmx_bool bExtended,bLastStep,bLog=FALSE,bEner=FALSE;
1683 int start,homenr,nrend,i,n,m,g;
1687 homenr = md->homenr;
1688 nrend = start+homenr;
1691 (inputrec->etc == etcNOSEHOOVER) ||
1692 (inputrec->epc == epcPARRINELLORAHMAN) ||
1693 (inputrec->epc == epcMTTK);
1695 dt = inputrec->delta_t;
1699 /* now update boxes */
1700 switch (inputrec->epc) {
1703 case (epcBERENDSEN):
1704 berendsen_pscale(inputrec,pcoupl_mu,state->box,state->box_rel,
1705 start,homenr,state->x,md->cFREEZE,nrnb);
1707 case (epcPARRINELLORAHMAN):
1708 /* The box velocities were updated in do_pr_pcoupl in the update
1709 * iteration, but we dont change the box vectors until we get here
1710 * since we need to be able to shift/unshift above.
1716 state->box[i][m] += dt*state->boxv[i][m];
1719 preserve_box_shape(inputrec,state->box_rel,state->box);
1721 /* Scale the coordinates */
1722 for(n=start; (n<start+homenr); n++)
1724 tmvmul_ur0(pcoupl_mu,state->x[n],state->x[n]);
1728 switch (inputrec->epct)
1730 case (epctISOTROPIC):
1731 /* DIM * eta = ln V. so DIM*eta_new = DIM*eta_old + DIM*dt*veta =>
1732 ln V_new = ln V_old + 3*dt*veta => V_new = V_old*exp(3*dt*veta) =>
1733 Side length scales as exp(veta*dt) */
1735 msmul(state->box,exp(state->veta*dt),state->box);
1737 /* Relate veta to boxv. veta = d(eta)/dT = (1/DIM)*1/V dV/dT.
1738 o If we assume isotropic scaling, and box length scaling
1739 factor L, then V = L^DIM (det(M)). So dV/dt = DIM
1740 L^(DIM-1) dL/dt det(M), and veta = (1/L) dL/dt. The
1741 determinant of B is L^DIM det(M), and the determinant
1742 of dB/dt is (dL/dT)^DIM det (M). veta will be
1743 (det(dB/dT)/det(B))^(1/3). Then since M =
1744 B_new*(vol_new)^(1/3), dB/dT_new = (veta_new)*B(new). */
1746 msmul(state->box,state->veta,state->boxv);
1756 if ((!(IR_NPT_TROTTER(inputrec) || IR_NPH_TROTTER(inputrec))) && scale_tot)
1758 /* The transposes of the scaling matrices are stored,
1759 * therefore we need to reverse the order in the multiplication.
1761 mmul_ur0(*scale_tot,pcoupl_mu,*scale_tot);
1764 if (DEFORM(*inputrec))
1766 deform(upd,start,homenr,state->x,state->box,scale_tot,inputrec,step);
1769 dump_it_all(fplog,"After update",
1770 state->natoms,state->x,upd->xp,state->v,force);
1773 void update_coords(FILE *fplog,
1774 gmx_large_int_t step,
1775 t_inputrec *inputrec, /* input record and box stuff */
1779 rvec *f, /* forces on home particles */
1783 gmx_ekindata_t *ekind,
1785 gmx_wallcycle_t wcycle,
1789 t_commrec *cr, /* these shouldn't be here -- need to think about it */
1791 gmx_constr_t constr,
1794 gmx_bool bNH,bPR,bLastStep,bLog=FALSE,bEner=FALSE;
1796 real *imass,*imassin;
1799 int start,homenr,nrend,i,j,d,n,m,g;
1800 int blen0,blen1,iatom,jatom,nshake,nsettle,nconstr,nexpand;
1803 rvec *vcom,*xcom,*vall,*xall,*xin,*vin,*forcein,*fall,*xpall,*xprimein,*xprime;
1806 /* Running the velocity half does nothing except for velocity verlet */
1807 if ((UpdatePart == etrtVELOCITY1 || UpdatePart == etrtVELOCITY2) &&
1808 !EI_VV(inputrec->eI))
1810 gmx_incons("update_coords called for velocity without VV integrator");
1814 homenr = md->homenr;
1815 nrend = start+homenr;
1817 xprime = get_xprime(state,upd);
1819 dt = inputrec->delta_t;
1822 /* We need to update the NMR restraint history when time averaging is used */
1823 if (state->flags & (1<<estDISRE_RM3TAV))
1825 update_disres_history(fcd,&state->hist);
1827 if (state->flags & (1<<estORIRE_DTAV))
1829 update_orires_history(fcd,&state->hist);
1833 bNH = inputrec->etc == etcNOSEHOOVER;
1834 bPR = ((inputrec->epc == epcPARRINELLORAHMAN) || (inputrec->epc == epcMTTK));
1836 if (bDoLR && inputrec->nstcalclr > 1 && !EI_VV(inputrec->eI)) /* get this working with VV? */
1838 /* Store the total force + nstcalclr-1 times the LR force
1839 * in forces_lr, so it can be used in a normal update algorithm
1840 * to produce twin time stepping.
1842 /* is this correct in the new construction? MRS */
1843 combine_forces(inputrec->nstcalclr,constr,inputrec,md,idef,cr,
1845 start,nrend,f,f_lr,nrnb);
1853 /* ############# START The update of velocities and positions ######### */
1855 dump_it_all(fplog,"Before update",
1856 state->natoms,state->x,xprime,state->v,force);
1858 if (EI_RANDOM(inputrec->eI))
1860 /* We still need to take care of generating random seeds properly
1861 * when multi-threading.
1867 nth = gmx_omp_nthreads_get(emntUpdate);
1870 #pragma omp parallel for num_threads(nth) schedule(static) private(alpha)
1871 for(th=0; th<nth; th++)
1873 int start_th,end_th;
1875 start_th = start + ((nrend-start)* th )/nth;
1876 end_th = start + ((nrend-start)*(th+1))/nth;
1878 switch (inputrec->eI) {
1880 if (ekind->cosacc.cos_accel == 0)
1882 do_update_md(start_th,end_th,dt,
1883 ekind->tcstat,state->nosehoover_vxi,
1884 ekind->bNEMD,ekind->grpstat,inputrec->opts.acc,
1885 inputrec->opts.nFreeze,
1886 md->invmass,md->ptype,
1887 md->cFREEZE,md->cACC,md->cTC,
1888 state->x,xprime,state->v,force,M,
1893 do_update_visc(start_th,end_th,dt,
1894 ekind->tcstat,state->nosehoover_vxi,
1895 md->invmass,md->ptype,
1896 md->cTC,state->x,xprime,state->v,force,M,
1898 ekind->cosacc.cos_accel,
1904 do_update_sd1(upd->sd,upd->sd->gaussrand[th],
1906 inputrec->opts.acc,inputrec->opts.nFreeze,
1907 md->invmass,md->ptype,
1908 md->cFREEZE,md->cACC,md->cTC,
1909 state->x,xprime,state->v,force,state->sd_X,
1910 inputrec->opts.ngtc,inputrec->opts.tau_t,inputrec->opts.ref_t);
1913 /* The SD update is done in 2 parts, because an extra constraint step
1916 do_update_sd2(upd->sd,upd->sd->gaussrand[th],
1917 bInitStep,start_th,end_th,
1918 inputrec->opts.acc,inputrec->opts.nFreeze,
1919 md->invmass,md->ptype,
1920 md->cFREEZE,md->cACC,md->cTC,
1921 state->x,xprime,state->v,force,state->sd_X,
1922 inputrec->opts.ngtc,inputrec->opts.tau_t,inputrec->opts.ref_t,
1926 do_update_bd(start_th,end_th,dt,
1927 inputrec->opts.nFreeze,md->invmass,md->ptype,
1928 md->cFREEZE,md->cTC,
1929 state->x,xprime,state->v,force,
1931 inputrec->opts.ngtc,inputrec->opts.tau_t,inputrec->opts.ref_t,
1932 upd->sd->bd_rf,upd->sd->gaussrand[th]);
1936 alpha = 1.0 + DIM/((double)inputrec->opts.nrdf[0]); /* assuming barostat coupled to group 0. */
1937 switch (UpdatePart) {
1940 do_update_vv_vel(start_th,end_th,dt,
1941 ekind->tcstat,ekind->grpstat,
1942 inputrec->opts.acc,inputrec->opts.nFreeze,
1943 md->invmass,md->ptype,
1944 md->cFREEZE,md->cACC,
1946 (bNH || bPR),state->veta,alpha);
1949 do_update_vv_pos(start_th,end_th,dt,
1950 ekind->tcstat,ekind->grpstat,
1951 inputrec->opts.acc,inputrec->opts.nFreeze,
1952 md->invmass,md->ptype,md->cFREEZE,
1953 state->x,xprime,state->v,force,
1954 (bNH || bPR),state->veta,alpha);
1959 gmx_fatal(FARGS,"Don't know how to update coordinates");
1967 void correct_ekin(FILE *log,int start,int end,rvec v[],rvec vcm,real mass[],
1968 real tmass,tensor ekin)
1971 * This is a debugging routine. It should not be called for production code
1973 * The kinetic energy should calculated according to:
1974 * Ekin = 1/2 m (v-vcm)^2
1975 * However the correction is not always applied, since vcm may not be
1976 * known in time and we compute
1977 * Ekin' = 1/2 m v^2 instead
1978 * This can be corrected afterwards by computing
1979 * Ekin = Ekin' + 1/2 m ( -2 v vcm + vcm^2)
1981 * Ekin = Ekin' - m v vcm + 1/2 m vcm^2
1988 /* Local particles */
1991 /* Processor dependent part. */
1993 for(i=start; (i<end); i++)
1997 for(j=0; (j<DIM); j++)
2003 svmul(1/tmass,vcm,vcm);
2004 svmul(0.5,vcm,hvcm);
2006 for(j=0; (j<DIM); j++)
2008 for(k=0; (k<DIM); k++)
2010 dekin[j][k] += vcm[k]*(tm*hvcm[j]-mv[j]);
2013 pr_rvecs(log,0,"dekin",dekin,DIM);
2014 pr_rvecs(log,0," ekin", ekin,DIM);
2015 fprintf(log,"dekin = %g, ekin = %g vcm = (%8.4f %8.4f %8.4f)\n",
2016 trace(dekin),trace(ekin),vcm[XX],vcm[YY],vcm[ZZ]);
2017 fprintf(log,"mv = (%8.4f %8.4f %8.4f)\n",
2018 mv[XX],mv[YY],mv[ZZ]);
2021 extern gmx_bool update_randomize_velocities(t_inputrec *ir, gmx_large_int_t step, t_mdatoms *md, t_state *state, gmx_update_t upd, t_idef *idef, gmx_constr_t constr) {
2024 real rate = (ir->delta_t)/ir->opts.tau_t[0];
2025 /* proceed with andersen if 1) it's fixed probability per
2026 particle andersen or 2) it's massive andersen and it's tau_t/dt */
2027 if ((ir->etc==etcANDERSEN) || do_per_step(step,(int)(1.0/rate)))
2029 srenew(upd->randatom,state->nalloc);
2030 srenew(upd->randatom_list,state->nalloc);
2031 if (upd->randatom_list_init == FALSE) {
2032 for (i=0;i<state->nalloc;i++) {
2033 upd->randatom[i] = FALSE;
2034 upd->randatom_list[i] = 0;
2036 upd->randatom_list_init = TRUE;
2038 andersen_tcoupl(ir,md,state,upd->sd->gaussrand[0],rate,
2039 (ir->etc==etcANDERSEN)?idef:NULL,
2040 constr?get_nblocks(constr):0,
2041 constr?get_sblock(constr):NULL,
2042 upd->randatom,upd->randatom_list,
2043 upd->sd->randomize_group,upd->sd->boltzfac);