2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /* This file is completely threadsafe - keep it that way! */
40 #include "gromacs/legacyheaders/tgroup.h"
44 #include "gromacs/legacyheaders/gmx_omp_nthreads.h"
45 #include "gromacs/legacyheaders/macros.h"
46 #include "gromacs/legacyheaders/network.h"
47 #include "gromacs/legacyheaders/rbin.h"
48 #include "gromacs/legacyheaders/update.h"
49 #include "gromacs/math/vec.h"
50 #include "gromacs/topology/mtop_util.h"
51 #include "gromacs/utility/futil.h"
52 #include "gromacs/utility/smalloc.h"
54 static void init_grptcstat(int ngtc, t_grp_tcstat tcstat[])
58 for (i = 0; (i < ngtc); i++)
61 clear_mat(tcstat[i].ekinh);
62 clear_mat(tcstat[i].ekinh_old);
63 clear_mat(tcstat[i].ekinf);
67 static void init_grpstat(gmx_mtop_t *mtop, int ngacc, t_grp_acc gstat[])
70 gmx_mtop_atomloop_all_t aloop;
76 groups = &mtop->groups;
77 aloop = gmx_mtop_atomloop_all_init(mtop);
78 while (gmx_mtop_atomloop_all_next(aloop, &i, &atom))
80 grp = ggrpnr(groups, egcACC, i);
81 if ((grp < 0) && (grp >= ngacc))
83 gmx_incons("Input for acceleration groups wrong");
86 /* This will not work for integrator BD */
87 gstat[grp].mA += atom->m;
88 gstat[grp].mB += atom->mB;
93 void init_ekindata(FILE gmx_unused *log, gmx_mtop_t *mtop, t_grpopts *opts,
94 gmx_ekindata_t *ekind)
99 fprintf(log, "ngtc: %d, ngacc: %d, ngener: %d\n", opts->ngtc, opts->ngacc,
103 /* bNEMD tells if we should remove remove the COM velocity
104 * from the velocities during velocity scaling in T-coupling.
105 * Turn this on when we have multiple acceleration groups
106 * or one accelerated group.
108 ekind->bNEMD = (opts->ngacc > 1 || norm(opts->acc[0]) > 0);
110 ekind->ngtc = opts->ngtc;
111 snew(ekind->tcstat, opts->ngtc);
112 init_grptcstat(opts->ngtc, ekind->tcstat);
113 /* Set Berendsen tcoupl lambda's to 1,
114 * so runs without Berendsen coupling are not affected.
116 for (i = 0; i < opts->ngtc; i++)
118 ekind->tcstat[i].lambda = 1.0;
119 ekind->tcstat[i].vscale_nhc = 1.0;
120 ekind->tcstat[i].ekinscaleh_nhc = 1.0;
121 ekind->tcstat[i].ekinscalef_nhc = 1.0;
124 nthread = gmx_omp_nthreads_get(emntUpdate);
126 snew(ekind->ekin_work_alloc, nthread);
127 snew(ekind->ekin_work, nthread);
128 snew(ekind->dekindl_work, nthread);
129 #pragma omp parallel for num_threads(nthread) schedule(static)
130 for (thread = 0; thread < nthread; thread++)
132 #define EKIN_WORK_BUFFER_SIZE 2
133 /* Allocate 2 extra elements on both sides, so in single
135 * EKIN_WORK_BUFFER_SIZE*DIM*DIM*sizeof(real) = 72/144 bytes
136 * buffer on both sides to avoid cache pollution.
138 snew(ekind->ekin_work_alloc[thread], ekind->ngtc+2*EKIN_WORK_BUFFER_SIZE);
139 ekind->ekin_work[thread] = ekind->ekin_work_alloc[thread] + EKIN_WORK_BUFFER_SIZE;
140 /* Nasty hack so we can have the per-thread accumulation
141 * variable for dekindl in the same thread-local cache lines
142 * as the per-thread accumulation tensors for ekin[fh],
143 * because they are accumulated in the same loop. */
144 ekind->dekindl_work[thread] = &(ekind->ekin_work[thread][ekind->ngtc][0][0]);
145 #undef EKIN_WORK_BUFFER_SIZE
148 ekind->ngacc = opts->ngacc;
149 snew(ekind->grpstat, opts->ngacc);
150 init_grpstat(mtop, opts->ngacc, ekind->grpstat);
153 void accumulate_u(t_commrec *cr, t_grpopts *opts, gmx_ekindata_t *ekind)
155 /* This routine will only be called when it's necessary */
161 for (g = 0; (g < opts->ngacc); g++)
163 add_binr(rb, DIM, ekind->grpstat[g].u);
167 for (g = 0; (g < opts->ngacc); g++)
169 extract_binr(rb, DIM*g, DIM, ekind->grpstat[g].u);
174 /* I don't think accumulate_ekin is used anymore? */
177 static void accumulate_ekin(t_commrec *cr, t_grpopts *opts,
178 gmx_ekindata_t *ekind)
184 for (g = 0; (g < opts->ngtc); g++)
186 gmx_sum(DIM*DIM, ekind->tcstat[g].ekinf[0], cr);
192 void update_ekindata(int start, int homenr, gmx_ekindata_t *ekind,
193 t_grpopts *opts, rvec v[], t_mdatoms *md, real lambda)
198 /* calculate mean velocities at whole timestep */
199 for (g = 0; (g < opts->ngtc); g++)
201 ekind->tcstat[g].T = 0;
206 for (g = 0; (g < opts->ngacc); g++)
208 clear_rvec(ekind->grpstat[g].u);
212 for (n = start; (n < start+homenr); n++)
218 for (d = 0; (d < DIM); d++)
220 mv = md->massT[n]*v[n][d];
221 ekind->grpstat[g].u[d] += mv;
225 for (g = 0; (g < opts->ngacc); g++)
227 for (d = 0; (d < DIM); d++)
229 ekind->grpstat[g].u[d] /=
230 (1-lambda)*ekind->grpstat[g].mA + lambda*ekind->grpstat[g].mB;
236 real sum_ekin(t_grpopts *opts, gmx_ekindata_t *ekind, real *dekindlambda,
237 gmx_bool bEkinAveVel, gmx_bool bScaleEkin)
241 t_grp_tcstat *tcstat;
250 clear_mat(ekind->ekin);
252 for (i = 0; (i < ngtc); i++)
256 tcstat = &ekind->tcstat[i];
257 /* Sometimes a group does not have degrees of freedom, e.g.
258 * when it consists of shells and virtual sites, then we just
259 * set the temperatue to 0 and also neglect the kinetic
260 * energy, which should be zero anyway.
269 /* in this case, kinetic energy is from the current velocities already */
270 msmul(tcstat->ekinf, tcstat->ekinscalef_nhc, tcstat->ekinf);
275 /* Calculate the full step Ekin as the average of the half steps */
276 for (j = 0; (j < DIM); j++)
278 for (m = 0; (m < DIM); m++)
280 tcstat->ekinf[j][m] =
281 0.5*(tcstat->ekinh[j][m]*tcstat->ekinscaleh_nhc + tcstat->ekinh_old[j][m]);
285 m_add(tcstat->ekinf, ekind->ekin, ekind->ekin);
287 tcstat->Th = calc_temp(trace(tcstat->ekinh), nd);
288 tcstat->T = calc_temp(trace(tcstat->ekinf), nd);
290 /* after the scaling factors have been multiplied in, we can remove them */
293 tcstat->ekinscalef_nhc = 1.0;
297 tcstat->ekinscaleh_nhc = 1.0;
316 *dekindlambda = ekind->dekindl;
320 *dekindlambda = 0.5*(ekind->dekindl + ekind->dekindl_old);