2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /*! \libinternal \file
39 * \brief This file contains function declarations necessary for
40 * computing energies and forces for the PME long-ranged part (Coulomb
43 * \author Berk Hess <hess@kth.se>
45 * \ingroup module_ewald
48 #ifndef GMX_EWALD_PME_H
49 #define GMX_EWALD_PME_H
53 #include "gromacs/gpu_utils/gpu_macros.h"
54 #include "gromacs/math/vectypes.h"
55 #include "gromacs/timing/walltime_accounting.h"
56 #include "gromacs/utility/arrayref.h"
57 #include "gromacs/utility/basedefinitions.h"
58 #include "gromacs/utility/real.h"
60 struct interaction_const_t;
65 struct gmx_wallclock_gpu_pme_t;
66 struct gmx_device_info_t;
70 enum class GpuTaskCompletion;
74 class ForceWithVirial;
79 GMX_SUM_GRID_FORWARD, GMX_SUM_GRID_BACKWARD
82 /*! \brief Possible PME codepaths on a rank.
83 * \todo: make this enum class with gmx_pme_t C++ refactoring
87 None, //!< No PME task is done
88 CPU, //!< Whole PME computation is done on CPU
89 GPU, //!< Whole PME computation is done on GPU
90 Mixed, //!< Mixed mode: only spread and gather run on GPU; FFT and solving are done on CPU.
93 //! PME gathering output forces treatment
94 enum class PmeForceOutputHandling
96 Set, /**< Gather simply writes into provided force buffer */
97 ReduceWithInput, /**< Gather adds its output to the buffer.
98 On GPU, that means additional H2D copy before the kernel launch. */
101 /*! \brief Return the smallest allowed PME grid size for \p pmeOrder */
102 int minimalPmeGridSize(int pmeOrder);
104 /*! \brief Check restrictions on pme_order and the PME grid nkx,nky,nkz.
106 * With errorsAreFatal=true, an exception or fatal error is generated
107 * on violation of restrictions.
108 * With errorsAreFatal=false, false is returned on violation of restrictions.
109 * When all restrictions are obeyed, true is returned.
110 * Argument useThreads tells if any MPI rank doing PME uses more than 1 threads.
111 * If at calling useThreads is unknown, pass true for conservative checking.
113 * The PME GPU restrictions are checked separately during pme_gpu_init().
115 bool gmx_pme_check_restrictions(int pme_order,
116 int nkx, int nky, int nkz,
119 bool errorsAreFatal);
121 /*! \brief Construct PME data
123 * \throws gmx::InconsistentInputError if input grid sizes/PME order are inconsistent.
124 * \returns Pointer to newly allocated and initialized PME data.
126 gmx_pme_t *gmx_pme_init(const t_commrec *cr,
127 int nnodes_major, int nnodes_minor,
128 const t_inputrec *ir, int homenr,
129 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
130 gmx_bool bReproducible,
131 real ewaldcoeff_q, real ewaldcoeff_lj,
135 gmx_device_info_t *gpuInfo,
136 const gmx::MDLogger &mdlog);
138 /*! \brief Destroys the PME data structure.*/
139 void gmx_pme_destroy(gmx_pme_t *pme);
142 /*! \brief Flag values that control what gmx_pme_do() will calculate
144 * These can be combined with bitwise-OR if more than one thing is required.
146 #define GMX_PME_SPREAD (1<<0)
147 #define GMX_PME_SOLVE (1<<1)
148 #define GMX_PME_CALC_F (1<<2)
149 #define GMX_PME_CALC_ENER_VIR (1<<3)
150 /* This forces the grid to be backtransformed even without GMX_PME_CALC_F */
151 #define GMX_PME_CALC_POT (1<<4)
153 #define GMX_PME_DO_ALL_F (GMX_PME_SPREAD | GMX_PME_SOLVE | GMX_PME_CALC_F)
156 /*! \brief Do a PME calculation on a CPU for the long range electrostatics and/or LJ.
158 * The meaning of \p flags is defined above, and determines which
159 * parts of the calculation are performed.
161 * \return 0 indicates all well, non zero is an error code.
163 int gmx_pme_do(struct gmx_pme_t *pme,
164 int start, int homenr,
166 real chargeA[], real chargeB[],
167 real c6A[], real c6B[],
168 real sigmaA[], real sigmaB[],
169 matrix box, const t_commrec *cr,
170 int maxshift_x, int maxshift_y,
171 t_nrnb *nrnb, gmx_wallcycle *wcycle,
172 matrix vir_q, matrix vir_lj,
173 real *energy_q, real *energy_lj,
174 real lambda_q, real lambda_lj,
175 real *dvdlambda_q, real *dvdlambda_lj,
178 /*! \brief Called on the nodes that do PME exclusively (as slaves) */
179 int gmx_pmeonly(struct gmx_pme_t *pme,
180 const t_commrec *cr, t_nrnb *mynrnb,
181 gmx_wallcycle *wcycle,
182 gmx_walltime_accounting_t walltime_accounting,
183 t_inputrec *ir, PmeRunMode runMode);
185 /*! \brief Calculate the PME grid energy V for n charges.
187 * The potential (found in \p pme) must have been found already with a
188 * call to gmx_pme_do() with at least GMX_PME_SPREAD and GMX_PME_SOLVE
189 * specified. Note that the charges are not spread on the grid in the
190 * pme struct. Currently does not work in parallel or with free
193 void gmx_pme_calc_energy(struct gmx_pme_t *pme, int n, rvec *x, real *q, real *V);
195 /*! \brief Send the charges and maxshift to out PME-only node. */
196 void gmx_pme_send_parameters(const t_commrec *cr,
197 const interaction_const_t *ic,
198 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
199 real *chargeA, real *chargeB,
200 real *sqrt_c6A, real *sqrt_c6B,
201 real *sigmaA, real *sigmaB,
202 int maxshift_x, int maxshift_y);
204 /*! \brief Send the coordinates to our PME-only node and request a PME calculation */
205 void gmx_pme_send_coordinates(const t_commrec *cr, matrix box, rvec *x,
206 real lambda_q, real lambda_lj,
208 gmx_int64_t step, gmx_wallcycle *wcycle);
210 /*! \brief Tell our PME-only node to finish */
211 void gmx_pme_send_finish(const t_commrec *cr);
213 /*! \brief Tell our PME-only node to reset all cycle and flop counters */
214 void gmx_pme_send_resetcounters(const t_commrec *cr, gmx_int64_t step);
216 /*! \brief PP nodes receive the long range forces from the PME nodes */
217 void gmx_pme_receive_f(const t_commrec *cr,
218 gmx::ForceWithVirial *forceWithVirial,
219 real *energy_q, real *energy_lj,
220 real *dvdlambda_q, real *dvdlambda_lj,
224 * This function updates the local atom data on GPU after DD (charges, coordinates, etc.).
225 * TODO: it should update the PME CPU atom data as well.
226 * (currently PME CPU call gmx_pme_do() gets passed the input pointers for each computation).
228 * \param[in] pme The PME structure.
229 * \param[in] nAtoms The number of particles.
230 * \param[in] charges The pointer to the array of particle charges.
232 void gmx_pme_reinit_atoms(const gmx_pme_t *pme, const int nAtoms, const real *charges);
234 /* A block of PME GPU functions */
236 /*! \brief Checks whether the input system allows to run PME on GPU.
237 * TODO: this mostly duplicates an internal PME assert function
238 * pme_gpu_check_restrictions(), except that works with a
239 * formed gmx_pme_t structure. Should that one go away/work with inputrec?
241 * \param[in] ir Input system.
242 * \param[out] error The error message if the input is not supported on GPU.
244 * \returns true if PME can run on GPU with this input, false otherwise.
246 bool pme_gpu_supports_input(const t_inputrec *ir, std::string *error);
249 * Returns the active PME codepath (CPU, GPU, mixed).
250 * \todo This is a rather static data that should be managed by the higher level task scheduler.
252 * \param[in] pme The PME data structure.
253 * \returns active PME codepath.
255 PmeRunMode pme_run_mode(const gmx_pme_t *pme);
258 * Tells if PME is enabled to run on GPU (not necessarily active at the moment).
259 * \todo This is a rather static data that should be managed by the hardware assignment manager.
260 * For now, it is synonymous with the active PME codepath (in the absence of dynamic switching).
262 * \param[in] pme The PME data structure.
263 * \returns true if PME can run on GPU, false otherwise.
265 inline bool pme_gpu_task_enabled(const gmx_pme_t *pme)
267 return (pme != nullptr) && (pme_run_mode(pme) != PmeRunMode::CPU);
270 // The following functions are all the PME GPU entry points,
271 // currently inlining to nothing on non-CUDA builds.
274 * Resets the PME GPU timings. To be called at the reset step.
276 * \param[in] pme The PME structure.
278 CUDA_FUNC_QUALIFIER void pme_gpu_reset_timings(const gmx_pme_t *CUDA_FUNC_ARGUMENT(pme)) CUDA_FUNC_TERM
281 * Copies the PME GPU timings to the gmx_wallclock_gpu_pme_t structure (for log output). To be called at the run end.
283 * \param[in] pme The PME structure.
284 * \param[in] timings The gmx_wallclock_gpu_pme_t structure.
286 CUDA_FUNC_QUALIFIER void pme_gpu_get_timings(const gmx_pme_t *CUDA_FUNC_ARGUMENT(pme),
287 gmx_wallclock_gpu_pme_t *CUDA_FUNC_ARGUMENT(timings)) CUDA_FUNC_TERM
289 /* The main PME GPU functions */
292 * Prepares PME on GPU computation (updating the box if needed)
293 * \param[in] pme The PME data structure.
294 * \param[in] needToUpdateBox Tells if the stored unit cell parameters should be updated from \p box.
295 * \param[in] box The unit cell box.
296 * \param[in] wcycle The wallclock counter.
297 * \param[in] flags The combination of flags to affect this PME computation.
298 * The flags are the GMX_PME_ flags from pme.h.
300 CUDA_FUNC_QUALIFIER void pme_gpu_prepare_computation(gmx_pme_t *CUDA_FUNC_ARGUMENT(pme),
301 bool CUDA_FUNC_ARGUMENT(needToUpdateBox),
302 const matrix CUDA_FUNC_ARGUMENT(box),
303 gmx_wallcycle *CUDA_FUNC_ARGUMENT(wcycle),
304 int CUDA_FUNC_ARGUMENT(flags)) CUDA_FUNC_TERM
307 * Launches first stage of PME on GPU - H2D input transfers, spreading kernel, and D2H grid transfer if needed.
309 * \param[in] pme The PME data structure.
310 * \param[in] x The array of local atoms' coordinates.
311 * \param[in] wcycle The wallclock counter.
313 CUDA_FUNC_QUALIFIER void pme_gpu_launch_spread(gmx_pme_t *CUDA_FUNC_ARGUMENT(pme),
314 const rvec *CUDA_FUNC_ARGUMENT(x),
315 gmx_wallcycle *CUDA_FUNC_ARGUMENT(wcycle)) CUDA_FUNC_TERM
318 * Launches middle stages of PME (FFT R2C, solving, FFT C2R) either on GPU or on CPU, depending on the run mode.
320 * \param[in] pme The PME data structure.
321 * \param[in] wcycle The wallclock counter.
323 CUDA_FUNC_QUALIFIER void pme_gpu_launch_complex_transforms(gmx_pme_t *CUDA_FUNC_ARGUMENT(pme),
324 gmx_wallcycle *CUDA_FUNC_ARGUMENT(wcycle)) CUDA_FUNC_TERM
327 * Launches last stage of PME on GPU - force gathering and D2H force transfer.
329 * \param[in] pme The PME data structure.
330 * \param[in] wcycle The wallclock counter.
331 * \param[in] forceTreatment Tells how data should be treated. The gathering kernel either stores
332 * the output reciprocal forces into the host array, or copies its contents to the GPU first
333 * and accumulates. The reduction is non-atomic.
335 CUDA_FUNC_QUALIFIER void pme_gpu_launch_gather(const gmx_pme_t *CUDA_FUNC_ARGUMENT(pme),
336 gmx_wallcycle *CUDA_FUNC_ARGUMENT(wcycle),
337 PmeForceOutputHandling CUDA_FUNC_ARGUMENT(forceTreatment)) CUDA_FUNC_TERM
340 * Blocks until PME GPU tasks are completed, and gets the output forces and virial/energy
341 * (if they were to be computed).
343 * \param[in] pme The PME data structure.
344 * \param[out] wcycle The wallclock counter.
345 * \param[out] forces The output forces.
346 * \param[out] virial The output virial matrix.
347 * \param[out] energy The output energy.
349 CUDA_FUNC_QUALIFIER void pme_gpu_wait_finish_task(const gmx_pme_t *CUDA_FUNC_ARGUMENT(pme),
350 gmx_wallcycle *CUDA_FUNC_ARGUMENT(wcycle),
351 gmx::ArrayRef<const gmx::RVec> *CUDA_FUNC_ARGUMENT(forces),
352 matrix CUDA_FUNC_ARGUMENT(virial),
353 real *CUDA_FUNC_ARGUMENT(energy)) CUDA_FUNC_TERM
355 * Attempts to complete PME GPU tasks.
357 * The \p completionKind argument controls whether the function blocks until all
358 * PME GPU tasks enqueued completed (as pme_gpu_wait_finish_task() does) or only
359 * checks and returns immediately if they did not.
360 * When blocking or the tasks have completed it also gets the output forces
361 * by assigning the ArrayRef to the \p forces pointer passed in.
362 * Virial/energy are also outputs if they were to be computed.
364 * Note: also launches the reinitalization of the PME output buffers.
365 * TODO: this should be moved out to avoid miscounting its wall-time (as wait iso launch).
367 * \param[in] pme The PME data structure.
368 * \param[in] wcycle The wallclock counter.
369 * \param[out] forces The output forces.
370 * \param[out] virial The output virial matrix.
371 * \param[out] energy The output energy.
372 * \param[in] completionKind Indicates whether PME task completion should only be checked rather than waited for
373 * \returns True if the PME GPU tasks have completed
375 CUDA_FUNC_QUALIFIER bool pme_gpu_try_finish_task(const gmx_pme_t *CUDA_FUNC_ARGUMENT(pme),
376 gmx_wallcycle *CUDA_FUNC_ARGUMENT(wcycle),
377 gmx::ArrayRef<const gmx::RVec> *CUDA_FUNC_ARGUMENT(forces),
378 matrix CUDA_FUNC_ARGUMENT(virial),
379 real *CUDA_FUNC_ARGUMENT(energy),
380 GpuTaskCompletion CUDA_FUNC_ARGUMENT(completionKind)) CUDA_FUNC_TERM_WITH_RETURN(false)
383 * The PME GPU reinitialization function that is called both at the end of any PME computation and on any load balancing.
385 * Clears the internal grid and energy/virial buffers; it is not safe to start
386 * the PME computation without calling this.
387 * Note that unlike in the nbnxn module, the force buffer does not need clearing.
389 * \todo Rename this function to *clear* -- it clearly only does output resetting
390 * and we should be clear about what the function does..
392 * \param[in] pme The PME data structure.
393 * \param[in] wcycle The wallclock counter.
395 CUDA_FUNC_QUALIFIER void pme_gpu_reinit_computation(const gmx_pme_t *CUDA_FUNC_ARGUMENT(pme),
396 gmx_wallcycle *CUDA_FUNC_ARGUMENT(wcycle)) CUDA_FUNC_TERM