2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /* IMPORTANT FOR DEVELOPERS:
39 * Triclinic pme stuff isn't entirely trivial, and we've experienced
40 * some bugs during development (many of them due to me). To avoid
41 * this in the future, please check the following things if you make
42 * changes in this file:
44 * 1. You should obtain identical (at least to the PME precision)
45 * energies, forces, and virial for
46 * a rectangular box and a triclinic one where the z (or y) axis is
47 * tilted a whole box side. For instance you could use these boxes:
49 * rectangular triclinic
54 * 2. You should check the energy conservation in a triclinic box.
56 * It might seem an overkill, but better safe than sorry.
68 #include "gromacs/ewald/pme.h"
69 #include "gromacs/fft/parallel_3dfft.h"
70 #include "gromacs/fileio/pdbio.h"
71 #include "gromacs/gmxlib/network.h"
72 #include "gromacs/gmxlib/nrnb.h"
73 #include "gromacs/math/gmxcomplex.h"
74 #include "gromacs/math/units.h"
75 #include "gromacs/math/vec.h"
76 #include "gromacs/mdtypes/commrec.h"
77 #include "gromacs/mdtypes/inputrec.h"
78 #include "gromacs/timing/cyclecounter.h"
79 #include "gromacs/timing/wallcycle.h"
80 #include "gromacs/utility/fatalerror.h"
81 #include "gromacs/utility/futil.h"
82 #include "gromacs/utility/gmxmpi.h"
83 #include "gromacs/utility/gmxomp.h"
84 #include "gromacs/utility/smalloc.h"
86 #include "pme-internal.h"
88 static void reset_pmeonly_counters(gmx_wallcycle_t wcycle,
89 gmx_walltime_accounting_t walltime_accounting,
90 t_nrnb *nrnb, t_inputrec *ir,
93 /* Reset all the counters related to performance over the run */
94 wallcycle_stop(wcycle, ewcRUN);
95 wallcycle_reset_all(wcycle);
99 /* ir->nsteps is not used here, but we update it for consistency */
100 ir->nsteps -= step - ir->init_step;
102 ir->init_step = step;
103 wallcycle_start(wcycle, ewcRUN);
104 walltime_accounting_start(walltime_accounting);
108 static void gmx_pmeonly_switch(int *npmedata, struct gmx_pme_t ***pmedata,
110 real ewaldcoeff_q, real ewaldcoeff_lj,
111 t_commrec *cr, t_inputrec *ir,
112 struct gmx_pme_t **pme_ret)
115 struct gmx_pme_t *pme = nullptr;
118 while (ind < *npmedata)
120 pme = (*pmedata)[ind];
121 if (pme->nkx == grid_size[XX] &&
122 pme->nky == grid_size[YY] &&
123 pme->nkz == grid_size[ZZ])
125 /* Here we have found an existing PME data structure that suits us.
126 * However, in the GPU case, we have to reinitialize it - there's only one GPU structure.
127 * This should not cause actual GPU reallocations, at least (the allocated buffers are never shrunk).
128 * So, just some grid size updates in the GPU kernel parameters.
130 gmx_pme_reinit(&((*pmedata)[ind]), cr, pme, ir, grid_size, ewaldcoeff_q, ewaldcoeff_lj);
139 srenew(*pmedata, *npmedata);
141 /* Generate a new PME data structure, copying part of the old pointers */
142 gmx_pme_reinit(&((*pmedata)[ind]), cr, pme, ir, grid_size, ewaldcoeff_q, ewaldcoeff_lj);
144 *pme_ret = (*pmedata)[ind];
147 int gmx_pmeonly(struct gmx_pme_t *pme,
148 t_commrec *cr, t_nrnb *mynrnb,
149 gmx_wallcycle_t wcycle,
150 gmx_walltime_accounting_t walltime_accounting,
151 real ewaldcoeff_q, real ewaldcoeff_lj,
155 struct gmx_pme_t **pmedata;
160 rvec *x_pp = nullptr, *f_pp = nullptr;
161 real *chargeA = nullptr, *chargeB = nullptr;
162 real *c6A = nullptr, *c6B = nullptr;
163 real *sigmaA = nullptr, *sigmaB = nullptr;
166 int maxshift_x = 0, maxshift_y = 0;
167 real energy_q, energy_lj, dvdlambda_q, dvdlambda_lj;
168 matrix vir_q, vir_lj;
175 /* This data will only use with PME tuning, i.e. switching PME grids */
177 snew(pmedata, npmedata);
180 pme_pp = gmx_pme_pp_init(cr);
185 do /****** this is a quasi-loop over time steps! */
187 /* The reason for having a loop here is PME grid tuning/switching */
190 /* Domain decomposition */
191 bool atomSetChanged = false;
192 ret = gmx_pme_recv_coeffs_coords(pme_pp,
198 &maxshift_x, &maxshift_y,
199 &lambda_q, &lambda_lj,
207 if (ret == pmerecvqxSWITCHGRID)
209 /* Switch the PME grid to grid_switch */
210 gmx_pmeonly_switch(&npmedata, &pmedata, grid_switch, ewaldcoeff_q, ewaldcoeff_lj, cr, ir, &pme);
215 gmx_pme_reinit_atoms(pme, natoms, chargeA);
218 if (ret == pmerecvqxRESETCOUNTERS)
220 /* Reset the cycle and flop counters */
221 reset_pmeonly_counters(wcycle, walltime_accounting, mynrnb, ir, step);
224 while (ret == pmerecvqxSWITCHGRID || ret == pmerecvqxRESETCOUNTERS);
226 if (ret == pmerecvqxFINISH)
228 /* We should stop: break out of the loop */
234 wallcycle_start(wcycle, ewcRUN);
235 walltime_accounting_start(walltime_accounting);
238 wallcycle_start(wcycle, ewcPMEMESH);
245 gmx_pme_do(pme, 0, natoms, x_pp, f_pp,
246 chargeA, chargeB, c6A, c6B, sigmaA, sigmaB, box,
247 cr, maxshift_x, maxshift_y, mynrnb, wcycle,
249 &energy_q, &energy_lj, lambda_q, lambda_lj, &dvdlambda_q, &dvdlambda_lj,
250 GMX_PME_DO_ALL_F | (bEnerVir ? GMX_PME_CALC_ENER_VIR : 0));
252 cycles = wallcycle_stop(wcycle, ewcPMEMESH);
254 gmx_pme_send_force_vir_ener(pme_pp,
255 f_pp, vir_q, energy_q, vir_lj, energy_lj,
256 dvdlambda_q, dvdlambda_lj, cycles);
259 } /***** end of quasi-loop, we stop with the break above */
262 walltime_accounting_end(walltime_accounting);