2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
43 #include "gromacs/legacyheaders/network.h"
44 #include "gromacs/legacyheaders/typedefs.h"
45 #include "gromacs/math/gmxcomplex.h"
46 #include "gromacs/timing/wallcycle.h"
47 #include "gromacs/timing/walltime_accounting.h"
53 typedef real *splinevec[DIM];
56 GMX_SUM_GRID_FORWARD, GMX_SUM_GRID_BACKWARD
59 int gmx_pme_init(gmx_pme_t *pmedata, t_commrec *cr,
60 int nnodes_major, int nnodes_minor,
61 t_inputrec *ir, int homenr,
62 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
63 gmx_bool bReproducible, int nthread);
64 /* Initialize the pme data structures resepectively.
65 * Return value 0 indicates all well, non zero is an error code.
68 int gmx_pme_reinit(gmx_pme_t * pmedata,
71 const t_inputrec * ir,
73 /* As gmx_pme_init, but takes most settings, except the grid, from pme_src */
75 int gmx_pme_destroy(FILE *log, gmx_pme_t *pmedata);
76 /* Destroy the pme data structures resepectively.
77 * Return value 0 indicates all well, non zero is an error code.
80 #define GMX_PME_SPREAD (1<<0)
81 #define GMX_PME_SOLVE (1<<1)
82 #define GMX_PME_CALC_F (1<<2)
83 #define GMX_PME_CALC_ENER_VIR (1<<3)
84 /* This forces the grid to be backtransformed even without GMX_PME_CALC_F */
85 #define GMX_PME_CALC_POT (1<<4)
87 /* These values label bits used for sending messages to PME nodes using the
88 * routines in pme_pp.c and shouldn't conflict with the flags used there
90 #define GMX_PME_DO_COULOMB (1<<13)
91 #define GMX_PME_DO_LJ (1<<14)
93 #define GMX_PME_DO_ALL_F (GMX_PME_SPREAD | GMX_PME_SOLVE | GMX_PME_CALC_F)
95 int gmx_pme_do(gmx_pme_t pme,
96 int start, int homenr,
98 real chargeA[], real chargeB[],
99 real c6A[], real c6B[],
100 real sigmaA[], real sigmaB[],
101 matrix box, t_commrec *cr,
102 int maxshift_x, int maxshift_y,
103 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
104 matrix vir_q, real ewaldcoeff_q,
105 matrix vir_lj, real ewaldcoeff_lj,
106 real *energy_q, real *energy_lj,
107 real lambda_q, real lambda_lj,
108 real *dvdlambda_q, real *dvdlambda_lj,
110 /* Do a PME calculation for the long range electrostatics and/or LJ.
111 * flags, defined above, determine which parts of the calculation are performed.
112 * Return value 0 indicates all well, non zero is an error code.
115 int gmx_pmeonly(gmx_pme_t pme,
116 t_commrec *cr, t_nrnb *mynrnb,
117 gmx_wallcycle_t wcycle,
118 gmx_walltime_accounting_t walltime_accounting,
119 real ewaldcoeff_q, real ewaldcoeff_lj,
121 /* Called on the nodes that do PME exclusively (as slaves)
124 void gmx_pme_calc_energy(gmx_pme_t pme, int n, rvec *x, real *q, real *V);
125 /* Calculate the PME grid energy V for n charges with a potential
126 * in the pme struct determined before with a call to gmx_pme_do
127 * with at least GMX_PME_SPREAD and GMX_PME_SOLVE specified.
128 * Note that the charges are not spread on the grid in the pme struct.
129 * Currently does not work in parallel or with free energy.
132 /* The following three routines are for PME/PP node splitting in pme_pp.c */
134 /* Abstract type for PME <-> PP communication */
135 typedef struct gmx_pme_pp *gmx_pme_pp_t;
137 void gmx_pme_check_restrictions(int pme_order,
138 int nkx, int nky, int nkz,
141 gmx_bool bUseThreads,
143 gmx_bool *bValidSettings);
144 /* Check restrictions on pme_order and the PME grid nkx,nky,nkz.
145 * With bFatal=TRUE, a fatal error is generated on violation,
146 * bValidSettings=NULL can be passed.
147 * With bFatal=FALSE, *bValidSettings reports the validity of the settings.
148 * bUseThreads tells if any MPI rank doing PME uses more than 1 threads.
149 * If at calling you bUseThreads is unknown, pass TRUE for conservative
153 gmx_pme_pp_t gmx_pme_pp_init(t_commrec *cr);
154 /* Initialize the PME-only side of the PME <-> PP communication */
156 void gmx_pme_send_parameters(t_commrec *cr,
157 const interaction_const_t *ic,
158 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
159 real *chargeA, real *chargeB,
160 real *sqrt_c6A, real *sqrt_c6B,
161 real *sigmaA, real *sigmaB,
162 int maxshift_x, int maxshift_y);
163 /* Send the charges and maxshift to out PME-only node. */
165 void gmx_pme_send_coordinates(t_commrec *cr, matrix box, rvec *x,
166 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
167 real lambda_q, real lambda_lj,
168 gmx_bool bEnerVir, int pme_flags,
170 /* Send the coordinates to our PME-only node and request a PME calculation */
172 void gmx_pme_send_finish(t_commrec *cr);
173 /* Tell our PME-only node to finish */
175 void gmx_pme_send_switchgrid(t_commrec *cr, ivec grid_size, real ewaldcoeff_q, real ewaldcoeff_lj);
176 /* Tell our PME-only node to switch to a new grid size */
178 void gmx_pme_send_resetcounters(t_commrec *cr, gmx_int64_t step);
179 /* Tell our PME-only node to reset all cycle and flop counters */
181 void gmx_pme_receive_f(t_commrec *cr,
182 rvec f[], matrix vir_q, real *energy_q,
183 matrix vir_lj, real *energy_lj,
184 real *dvdlambda_q, real *dvdlambda_lj,
186 /* PP nodes receive the long range forces from the PME nodes */
188 /* Return values for gmx_pme_recv_q_x */
190 pmerecvqxX, /* calculate PME mesh interactions for new x */
191 pmerecvqxFINISH, /* the simulation should finish, we should quit */
192 pmerecvqxSWITCHGRID, /* change the PME grid size */
193 pmerecvqxRESETCOUNTERS /* reset the cycle and flop counters */
196 int gmx_pme_recv_coeffs_coords(gmx_pme_pp_t pme_pp,
198 real **chargeA, real **chargeB,
199 real **sqrt_c6A, real **sqrt_c6B,
200 real **sigmaA, real **sigmaB,
201 matrix box, rvec **x, rvec **f,
202 int *maxshift_x, int *maxshift_y,
203 gmx_bool *bFreeEnergy_q, gmx_bool *bFreeEnergy_lj,
204 real *lambda_q, real *lambda_lj,
205 gmx_bool *bEnerVir, int *pme_flags,
207 ivec grid_size, real *ewaldcoeff_q, real *ewaldcoeff_lj);
209 /* With return value:
210 * pmerecvqxX: all parameters set, chargeA and chargeB can be NULL
211 * pmerecvqxFINISH: no parameters set
212 * pmerecvqxSWITCHGRID: only grid_size and *ewaldcoeff are set
213 * pmerecvqxRESETCOUNTERS: *step is set
216 void gmx_pme_send_force_vir_ener(gmx_pme_pp_t pme_pp,
217 rvec *f, matrix vir_q, real energy_q,
218 matrix vir_lj, real energy_lj,
219 real dvdlambda_q, real dvdlambda_lj,
221 /* Send the PME mesh force, virial and energy to the PP-only nodes */