3 * This source code is part of
7 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2004, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Gromacs Runs On Most of All Computer Systems
41 #include "gmxcomplex.h"
48 typedef real *splinevec[DIM];
51 GMX_SUM_QGRID_FORWARD, GMX_SUM_QGRID_BACKWARD
54 int gmx_pme_init(gmx_pme_t *pmedata, t_commrec *cr,
55 int nnodes_major, int nnodes_minor,
56 t_inputrec *ir, int homenr,
57 gmx_bool bFreeEnergy, gmx_bool bReproducible, int nthread);
58 /* Initialize the pme data structures resepectively.
59 * Return value 0 indicates all well, non zero is an error code.
62 int gmx_pme_reinit(gmx_pme_t * pmedata,
65 const t_inputrec * ir,
67 /* As gmx_pme_init, but takes most settings, except the grid, from pme_src */
69 int gmx_pme_destroy(FILE *log, gmx_pme_t *pmedata);
70 /* Destroy the pme data structures resepectively.
71 * Return value 0 indicates all well, non zero is an error code.
74 #define GMX_PME_SPREAD_Q (1<<0)
75 #define GMX_PME_SOLVE (1<<1)
76 #define GMX_PME_CALC_F (1<<2)
77 #define GMX_PME_CALC_ENER_VIR (1<<3)
78 /* This forces the grid to be backtransformed even without GMX_PME_CALC_F */
79 #define GMX_PME_CALC_POT (1<<4)
80 #define GMX_PME_DO_ALL_F (GMX_PME_SPREAD_Q | GMX_PME_SOLVE | GMX_PME_CALC_F)
82 int gmx_pme_do(gmx_pme_t pme,
83 int start, int homenr,
85 real chargeA[], real chargeB[],
86 matrix box, t_commrec *cr,
87 int maxshift_x, int maxshift_y,
88 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
89 matrix lrvir, real ewaldcoeff,
90 real *energy, real lambda,
91 real *dvdlambda, int flags);
92 /* Do a PME calculation for the long range electrostatics.
93 * flags, defined above, determine which parts of the calculation are performed.
94 * Return value 0 indicates all well, non zero is an error code.
97 int gmx_pmeonly(gmx_pme_t pme,
98 t_commrec *cr, t_nrnb *mynrnb,
99 gmx_wallcycle_t wcycle,
100 gmx_walltime_accounting_t walltime_accounting,
103 /* Called on the nodes that do PME exclusively (as slaves)
106 void gmx_pme_calc_energy(gmx_pme_t pme, int n, rvec *x, real *q, real *V);
107 /* Calculate the PME grid energy V for n charges with a potential
108 * in the pme struct determined before with a call to gmx_pme_do
109 * with at least GMX_PME_SPREAD_Q and GMX_PME_SOLVE specified.
110 * Note that the charges are not spread on the grid in the pme struct.
111 * Currently does not work in parallel or with free energy.
114 /* The following three routines are for PME/PP node splitting in pme_pp.c */
116 /* Abstract type for PME <-> PP communication */
117 typedef struct gmx_pme_pp *gmx_pme_pp_t;
119 void gmx_pme_check_restrictions(int pme_order,
120 int nkx, int nky, int nkz,
123 gmx_bool bUseThreads,
125 gmx_bool *bValidSettings);
126 /* Check restrictions on pme_order and the PME grid nkx,nky,nkz.
127 * With bFatal=TRUE, a fatal error is generated on violation,
128 * bValidSettings=NULL can be passed.
129 * With bFatal=FALSE, *bValidSettings reports the validity of the settings.
130 * bUseThreads tells if any MPI rank doing PME uses more than 1 threads.
131 * If at calling you bUseThreads is unknown, pass TRUE for conservative
135 gmx_pme_pp_t gmx_pme_pp_init(t_commrec *cr);
136 /* Initialize the PME-only side of the PME <-> PP communication */
138 void gmx_pme_send_q(t_commrec *cr,
139 gmx_bool bFreeEnergy, real *chargeA, real *chargeB,
140 int maxshift_x, int maxshift_y);
141 /* Send the charges and maxshift to out PME-only node. */
143 void gmx_pme_send_x(t_commrec *cr, matrix box, rvec *x,
144 gmx_bool bFreeEnergy, real lambda,
146 gmx_large_int_t step);
147 /* Send the coordinates to our PME-only node and request a PME calculation */
149 void gmx_pme_send_finish(t_commrec *cr);
150 /* Tell our PME-only node to finish */
152 void gmx_pme_send_switchgrid(t_commrec *cr, ivec grid_size, real ewaldcoeff);
153 /* Tell our PME-only node to switch to a new grid size */
155 void gmx_pme_send_resetcounters(t_commrec *cr, gmx_large_int_t step);
156 /* Tell our PME-only node to reset all cycle and flop counters */
158 void gmx_pme_receive_f(t_commrec *cr,
159 rvec f[], matrix vir,
160 real *energy, real *dvdlambda,
162 /* PP nodes receive the long range forces from the PME nodes */
164 /* Return values for gmx_pme_recv_q_x */
166 pmerecvqxX, /* calculate PME mesh interactions for new x */
167 pmerecvqxFINISH, /* the simulation should finish, we should quit */
168 pmerecvqxSWITCHGRID, /* change the PME grid size */
169 pmerecvqxRESETCOUNTERS /* reset the cycle and flop counters */
172 int gmx_pme_recv_q_x(gmx_pme_pp_t pme_pp,
174 real **chargeA, real **chargeB,
175 matrix box, rvec **x, rvec **f,
176 int *maxshift_x, int *maxshift_y,
177 gmx_bool *bFreeEnergy, real *lambda,
179 gmx_large_int_t *step,
180 ivec grid_size, real *ewaldcoeff);
182 /* With return value:
183 * pmerecvqxX: all parameters set, chargeA and chargeB can be NULL
184 * pmerecvqxFINISH: no parameters set
185 * pmerecvqxSWITCHGRID: only grid_size and *ewaldcoeff are set
186 * pmerecvqxRESETCOUNTERS: *step is set
189 void gmx_pme_send_force_vir_ener(gmx_pme_pp_t pme_pp,
191 real energy, real dvdlambda,
193 /* Send the PME mesh force, virial and energy to the PP-only nodes */