2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /*! \libinternal \file
39 * \brief This file contains function declarations necessary for
40 * computing energies and forces for the PME long-ranged part (Coulomb
43 * \author Berk Hess <hess@kth.se>
45 * \ingroup module_ewald
48 #ifndef GMX_EWALD_PME_H
49 #define GMX_EWALD_PME_H
51 #include "gromacs/timing/wallcycle.h"
52 #include "gromacs/timing/walltime_accounting.h"
53 #include "gromacs/utility/real.h"
55 #include "pme-gpu-types.h"
57 struct interaction_const_t;
62 struct gmx_wallclock_gpu_pme_t;
63 struct gmx_device_info_t;
71 GMX_SUM_GRID_FORWARD, GMX_SUM_GRID_BACKWARD
74 /*! \brief Return the smallest allowed PME grid size for \p pmeOrder */
75 int minimalPmeGridSize(int pmeOrder);
77 /*! \brief Check restrictions on pme_order and the PME grid nkx,nky,nkz.
79 * With errorsAreFatal=true, an exception or fatal error is generated
80 * on violation of restrictions.
81 * With errorsAreFatal=false, false is returned on violation of restrictions.
82 * When all restrictions are obeyed, true is returned.
83 * Argument useThreads tells if any MPI rank doing PME uses more than 1 threads.
84 * If at calling useThreads is unknown, pass true for conservative checking.
86 bool gmx_pme_check_restrictions(int pme_order,
87 int nkx, int nky, int nkz,
92 /*! \brief Initialize \p pmedata
94 * \returns 0 indicates all well, non zero is an error code.
95 * \throws gmx::InconsistentInputError if input grid sizes/PME order are inconsistent.
97 int gmx_pme_init(struct gmx_pme_t **pmedata, struct t_commrec *cr,
98 int nnodes_major, int nnodes_minor,
99 const t_inputrec *ir, int homenr,
100 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
101 gmx_bool bReproducible,
102 real ewaldcoeff_q, real ewaldcoeff_lj,
106 gmx_device_info_t *gpuInfo,
107 const gmx::MDLogger &mdlog);
109 /*! \brief Destroys the PME data structure.*/
110 void gmx_pme_destroy(gmx_pme_t *pme);
113 /*! \brief Flag values that control what gmx_pme_do() will calculate
115 * These can be combined with bitwise-OR if more than one thing is required.
117 #define GMX_PME_SPREAD (1<<0)
118 #define GMX_PME_SOLVE (1<<1)
119 #define GMX_PME_CALC_F (1<<2)
120 #define GMX_PME_CALC_ENER_VIR (1<<3)
121 /* This forces the grid to be backtransformed even without GMX_PME_CALC_F */
122 #define GMX_PME_CALC_POT (1<<4)
124 #define GMX_PME_DO_ALL_F (GMX_PME_SPREAD | GMX_PME_SOLVE | GMX_PME_CALC_F)
127 /*! \brief Do a PME calculation on a CPU for the long range electrostatics and/or LJ.
129 * The meaning of \p flags is defined above, and determines which
130 * parts of the calculation are performed.
132 * \return 0 indicates all well, non zero is an error code.
134 int gmx_pme_do(struct gmx_pme_t *pme,
135 int start, int homenr,
137 real chargeA[], real chargeB[],
138 real c6A[], real c6B[],
139 real sigmaA[], real sigmaB[],
140 matrix box, t_commrec *cr,
141 int maxshift_x, int maxshift_y,
142 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
143 matrix vir_q, matrix vir_lj,
144 real *energy_q, real *energy_lj,
145 real lambda_q, real lambda_lj,
146 real *dvdlambda_q, real *dvdlambda_lj,
149 /*! \brief Called on the nodes that do PME exclusively (as slaves) */
150 int gmx_pmeonly(struct gmx_pme_t *pme,
151 struct t_commrec *cr, t_nrnb *mynrnb,
152 gmx_wallcycle_t wcycle,
153 gmx_walltime_accounting_t walltime_accounting,
154 real ewaldcoeff_q, real ewaldcoeff_lj,
157 /*! \brief Calculate the PME grid energy V for n charges.
159 * The potential (found in \p pme) must have been found already with a
160 * call to gmx_pme_do() with at least GMX_PME_SPREAD and GMX_PME_SOLVE
161 * specified. Note that the charges are not spread on the grid in the
162 * pme struct. Currently does not work in parallel or with free
165 void gmx_pme_calc_energy(struct gmx_pme_t *pme, int n, rvec *x, real *q, real *V);
167 /*! \brief Send the charges and maxshift to out PME-only node. */
168 void gmx_pme_send_parameters(struct t_commrec *cr,
169 const interaction_const_t *ic,
170 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
171 real *chargeA, real *chargeB,
172 real *sqrt_c6A, real *sqrt_c6B,
173 real *sigmaA, real *sigmaB,
174 int maxshift_x, int maxshift_y);
176 /*! \brief Send the coordinates to our PME-only node and request a PME calculation */
177 void gmx_pme_send_coordinates(struct t_commrec *cr, matrix box, rvec *x,
178 real lambda_q, real lambda_lj,
182 /*! \brief Tell our PME-only node to finish */
183 void gmx_pme_send_finish(struct t_commrec *cr);
185 /*! \brief Tell our PME-only node to reset all cycle and flop counters */
186 void gmx_pme_send_resetcounters(struct t_commrec *cr, gmx_int64_t step);
188 /*! \brief PP nodes receive the long range forces from the PME nodes */
189 void gmx_pme_receive_f(struct t_commrec *cr,
190 rvec f[], matrix vir_q, real *energy_q,
191 matrix vir_lj, real *energy_lj,
192 real *dvdlambda_q, real *dvdlambda_lj,
196 * This function updates the local atom data on GPU after DD (charges, coordinates, etc.).
197 * TODO: it should update the PME CPU atom data as well.
198 * (currently PME CPU call gmx_pme_do() gets passed the input pointers each step).
200 * \param[in] pme The PME structure.
201 * \param[in] nAtoms The number of particles.
202 * \param[in] charges The pointer to the array of particle charges.
204 void gmx_pme_reinit_atoms(const gmx_pme_t *pme, const int nAtoms, const real *charges);
206 /* A block of PME GPU functions */
209 * Tells if PME is enabled to run on GPU (not necessarily active at the moment).
210 * For now, this decision is stored in the PME structure itself.
211 * FIXME: this is an information that should be managed by the task scheduler.
212 * As soon as such functionality appears, this function should be removed from this module.
214 * \param[in] pme The PME data structure.
215 * \returns true if PME can run on GPU, false otherwise.
217 bool pme_gpu_task_enabled(const gmx_pme_t *pme);
220 * Resets the PME GPU timings. To be called at the reset step.
222 * \param[in] pme The PME structure.
224 void pme_gpu_reset_timings(const gmx_pme_t *pme);
227 * Copies the PME GPU timings to the gmx_wallclock_gpu_pme_t structure (for log output). To be called at the run end.
229 * \param[in] pme The PME structure.
230 * \param[in] timings The gmx_wallclock_gpu_pme_t structure.
232 void pme_gpu_get_timings(const gmx_pme_t *pme,
233 gmx_wallclock_gpu_pme_t *timings);
235 /* The main PME GPU functions */
238 * Gets the output forces and virial/energy if corresponding flags are (were?) passed in.
240 * \param[in] pme The PME data structure.
241 * \param[in] wcycle The wallclock counter.
242 * \param[out] vir_q The output virial matrix.
243 * \param[out] energy_q The output energy.
245 void pme_gpu_get_results(const gmx_pme_t *pme,
246 gmx_wallcycle_t wcycle,