2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /*! \libinternal \file
39 * \brief This file contains function declarations necessary for
40 * computing energies and forces for the PME long-ranged part (Coulomb
43 * \author Berk Hess <hess@kth.se>
45 * \ingroup module_ewald
48 #ifndef GMX_EWALD_PME_H
49 #define GMX_EWALD_PME_H
53 #include "gromacs/gpu_utils/gpu_macros.h"
54 #include "gromacs/math/vectypes.h"
55 #include "gromacs/timing/walltime_accounting.h"
56 #include "gromacs/utility/arrayref.h"
57 #include "gromacs/utility/basedefinitions.h"
58 #include "gromacs/utility/real.h"
60 struct interaction_const_t;
65 struct gmx_wallclock_gpu_pme_t;
66 struct gmx_device_info_t;
72 enum class GpuTaskCompletion;
75 using PmeGpuProgramHandle = const PmeGpuProgram *;
79 class ForceWithVirial;
81 enum class PinningPolicy : int;
85 GMX_SUM_GRID_FORWARD, GMX_SUM_GRID_BACKWARD
88 /*! \brief Possible PME codepaths on a rank.
89 * \todo: make this enum class with gmx_pme_t C++ refactoring
93 None, //!< No PME task is done
94 CPU, //!< Whole PME computation is done on CPU
95 GPU, //!< Whole PME computation is done on GPU
96 Mixed, //!< Mixed mode: only spread and gather run on GPU; FFT and solving are done on CPU.
99 //! PME gathering output forces treatment
100 enum class PmeForceOutputHandling
102 Set, /**< Gather simply writes into provided force buffer */
103 ReduceWithInput, /**< Gather adds its output to the buffer.
104 On GPU, that means additional H2D copy before the kernel launch. */
107 /*! \brief Return the smallest allowed PME grid size for \p pmeOrder */
108 int minimalPmeGridSize(int pmeOrder);
110 /*! \brief Check restrictions on pme_order and the PME grid nkx,nky,nkz.
112 * With errorsAreFatal=true, an exception or fatal error is generated
113 * on violation of restrictions.
114 * With errorsAreFatal=false, false is returned on violation of restrictions.
115 * When all restrictions are obeyed, true is returned.
116 * Argument useThreads tells if any MPI rank doing PME uses more than 1 threads.
117 * If at calling useThreads is unknown, pass true for conservative checking.
119 * The PME GPU restrictions are checked separately during pme_gpu_init().
121 bool gmx_pme_check_restrictions(int pme_order,
122 int nkx, int nky, int nkz,
123 int numPmeDomainsAlongX,
125 bool errorsAreFatal);
127 /*! \brief Construct PME data
129 * \throws gmx::InconsistentInputError if input grid sizes/PME order are inconsistent.
130 * \returns Pointer to newly allocated and initialized PME data.
132 * \todo We should evolve something like a \c GpuManager that holds \c
133 * gmx_device_info_t * and \c PmeGpuProgramHandle and perhaps other
134 * related things whose lifetime can/should exceed that of a task (or
135 * perhaps task manager). See Redmine #2522.
137 gmx_pme_t *gmx_pme_init(const t_commrec *cr,
138 const NumPmeDomains &numPmeDomains,
139 const t_inputrec *ir, int homenr,
140 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
141 gmx_bool bReproducible,
142 real ewaldcoeff_q, real ewaldcoeff_lj,
146 const gmx_device_info_t *gpuInfo,
147 PmeGpuProgramHandle pmeGpuProgram,
148 const gmx::MDLogger &mdlog);
150 /*! \brief Destroys the PME data structure.*/
151 void gmx_pme_destroy(gmx_pme_t *pme);
154 /*! \brief Flag values that control what gmx_pme_do() will calculate
156 * These can be combined with bitwise-OR if more than one thing is required.
158 #define GMX_PME_SPREAD (1<<0)
159 #define GMX_PME_SOLVE (1<<1)
160 #define GMX_PME_CALC_F (1<<2)
161 #define GMX_PME_CALC_ENER_VIR (1<<3)
162 /* This forces the grid to be backtransformed even without GMX_PME_CALC_F */
163 #define GMX_PME_CALC_POT (1<<4)
165 #define GMX_PME_DO_ALL_F (GMX_PME_SPREAD | GMX_PME_SOLVE | GMX_PME_CALC_F)
168 /*! \brief Do a PME calculation on a CPU for the long range electrostatics and/or LJ.
170 * The meaning of \p flags is defined above, and determines which
171 * parts of the calculation are performed.
173 * \return 0 indicates all well, non zero is an error code.
175 int gmx_pme_do(struct gmx_pme_t *pme,
176 int start, int homenr,
178 real chargeA[], real chargeB[],
179 real c6A[], real c6B[],
180 real sigmaA[], real sigmaB[],
181 matrix box, const t_commrec *cr,
182 int maxshift_x, int maxshift_y,
183 t_nrnb *nrnb, gmx_wallcycle *wcycle,
184 matrix vir_q, matrix vir_lj,
185 real *energy_q, real *energy_lj,
186 real lambda_q, real lambda_lj,
187 real *dvdlambda_q, real *dvdlambda_lj,
190 /*! \brief Called on the nodes that do PME exclusively */
191 int gmx_pmeonly(struct gmx_pme_t *pme,
192 const t_commrec *cr, t_nrnb *mynrnb,
193 gmx_wallcycle *wcycle,
194 gmx_walltime_accounting_t walltime_accounting,
195 t_inputrec *ir, PmeRunMode runMode);
197 /*! \brief Calculate the PME grid energy V for n charges.
199 * The potential (found in \p pme) must have been found already with a
200 * call to gmx_pme_do() with at least GMX_PME_SPREAD and GMX_PME_SOLVE
201 * specified. Note that the charges are not spread on the grid in the
202 * pme struct. Currently does not work in parallel or with free
205 void gmx_pme_calc_energy(struct gmx_pme_t *pme, int n, rvec *x, real *q, real *V);
207 /*! \brief Send the charges and maxshift to out PME-only node. */
208 void gmx_pme_send_parameters(const t_commrec *cr,
209 const interaction_const_t *ic,
210 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
211 real *chargeA, real *chargeB,
212 real *sqrt_c6A, real *sqrt_c6B,
213 real *sigmaA, real *sigmaB,
214 int maxshift_x, int maxshift_y);
216 /*! \brief Send the coordinates to our PME-only node and request a PME calculation */
217 void gmx_pme_send_coordinates(const t_commrec *cr, matrix box, rvec *x,
218 real lambda_q, real lambda_lj,
220 int64_t step, gmx_wallcycle *wcycle);
222 /*! \brief Tell our PME-only node to finish */
223 void gmx_pme_send_finish(const t_commrec *cr);
225 /*! \brief Tell our PME-only node to reset all cycle and flop counters */
226 void gmx_pme_send_resetcounters(const t_commrec *cr, int64_t step);
228 /*! \brief PP nodes receive the long range forces from the PME nodes */
229 void gmx_pme_receive_f(const t_commrec *cr,
230 gmx::ForceWithVirial *forceWithVirial,
231 real *energy_q, real *energy_lj,
232 real *dvdlambda_q, real *dvdlambda_lj,
236 * This function updates the local atom data on GPU after DD (charges, coordinates, etc.).
237 * TODO: it should update the PME CPU atom data as well.
238 * (currently PME CPU call gmx_pme_do() gets passed the input pointers for each computation).
240 * \param[in] pme The PME structure.
241 * \param[in] nAtoms The number of particles.
242 * \param[in] charges The pointer to the array of particle charges.
244 void gmx_pme_reinit_atoms(const gmx_pme_t *pme, int nAtoms, const real *charges);
246 /* A block of PME GPU functions */
248 /*! \brief Checks whether the GROMACS build allows to run PME on GPU.
249 * TODO: this partly duplicates an internal PME assert function
250 * pme_gpu_check_restrictions(), except that works with a
251 * formed gmx_pme_t structure. Should that one go away/work with inputrec?
253 * \param[out] error If non-null, the error message when PME is not supported on GPU.
255 * \returns true if PME can run on GPU on this build, false otherwise.
257 bool pme_gpu_supports_build(std::string *error);
259 /*! \brief Checks whether the input system allows to run PME on GPU.
260 * TODO: this partly duplicates an internal PME assert function
261 * pme_gpu_check_restrictions(), except that works with a
262 * formed gmx_pme_t structure. Should that one go away/work with inputrec?
264 * \param[in] ir Input system.
265 * \param[in] mtop Complete system topology to check if an FE simulation perturbs charges.
266 * \param[out] error If non-null, the error message if the input is not supported on GPU.
268 * \returns true if PME can run on GPU with this input, false otherwise.
270 bool pme_gpu_supports_input(const t_inputrec &ir, const gmx_mtop_t &mtop, std::string *error);
273 * Returns the active PME codepath (CPU, GPU, mixed).
274 * \todo This is a rather static data that should be managed by the higher level task scheduler.
276 * \param[in] pme The PME data structure.
277 * \returns active PME codepath.
279 PmeRunMode pme_run_mode(const gmx_pme_t *pme);
281 /*! \libinternal \brief
282 * Return the pinning policy appropriate for this build configuration
283 * for relevant buffers used for PME task on this rank (e.g. running
285 gmx::PinningPolicy pme_get_pinning_policy();
288 * Tells if PME is enabled to run on GPU (not necessarily active at the moment).
289 * \todo This is a rather static data that should be managed by the hardware assignment manager.
290 * For now, it is synonymous with the active PME codepath (in the absence of dynamic switching).
292 * \param[in] pme The PME data structure.
293 * \returns true if PME can run on GPU, false otherwise.
295 inline bool pme_gpu_task_enabled(const gmx_pme_t *pme)
297 return (pme != nullptr) && (pme_run_mode(pme) != PmeRunMode::CPU);
300 // The following functions are all the PME GPU entry points,
301 // currently inlining to nothing on non-CUDA builds.
304 * Resets the PME GPU timings. To be called at the reset step.
306 * \param[in] pme The PME structure.
308 GPU_FUNC_QUALIFIER void pme_gpu_reset_timings(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme)) GPU_FUNC_TERM
311 * Copies the PME GPU timings to the gmx_wallclock_gpu_pme_t structure (for log output). To be called at the run end.
313 * \param[in] pme The PME structure.
314 * \param[in] timings The gmx_wallclock_gpu_pme_t structure.
316 GPU_FUNC_QUALIFIER void pme_gpu_get_timings(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
317 gmx_wallclock_gpu_pme_t *GPU_FUNC_ARGUMENT(timings)) GPU_FUNC_TERM
319 /* The main PME GPU functions */
322 * Prepares PME on GPU computation (updating the box if needed)
323 * \param[in] pme The PME data structure.
324 * \param[in] needToUpdateBox Tells if the stored unit cell parameters should be updated from \p box.
325 * \param[in] box The unit cell box.
326 * \param[in] wcycle The wallclock counter.
327 * \param[in] flags The combination of flags to affect this PME computation.
328 * The flags are the GMX_PME_ flags from pme.h.
330 GPU_FUNC_QUALIFIER void pme_gpu_prepare_computation(gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
331 bool GPU_FUNC_ARGUMENT(needToUpdateBox),
332 const matrix GPU_FUNC_ARGUMENT(box),
333 gmx_wallcycle *GPU_FUNC_ARGUMENT(wcycle),
334 int GPU_FUNC_ARGUMENT(flags)) GPU_FUNC_TERM
337 * Launches first stage of PME on GPU - H2D input transfers, spreading kernel, and D2H grid transfer if needed.
339 * \param[in] pme The PME data structure.
340 * \param[in] x The array of local atoms' coordinates.
341 * \param[in] wcycle The wallclock counter.
343 GPU_FUNC_QUALIFIER void pme_gpu_launch_spread(gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
344 const rvec *GPU_FUNC_ARGUMENT(x),
345 gmx_wallcycle *GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM
348 * Launches middle stages of PME (FFT R2C, solving, FFT C2R) either on GPU or on CPU, depending on the run mode.
350 * \param[in] pme The PME data structure.
351 * \param[in] wcycle The wallclock counter.
353 GPU_FUNC_QUALIFIER void pme_gpu_launch_complex_transforms(gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
354 gmx_wallcycle *GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM
357 * Launches last stage of PME on GPU - force gathering and D2H force transfer.
359 * \param[in] pme The PME data structure.
360 * \param[in] wcycle The wallclock counter.
361 * \param[in] forceTreatment Tells how data should be treated. The gathering kernel either stores
362 * the output reciprocal forces into the host array, or copies its contents to the GPU first
363 * and accumulates. The reduction is non-atomic.
365 GPU_FUNC_QUALIFIER void pme_gpu_launch_gather(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
366 gmx_wallcycle *GPU_FUNC_ARGUMENT(wcycle),
367 PmeForceOutputHandling GPU_FUNC_ARGUMENT(forceTreatment)) GPU_FUNC_TERM
370 * Blocks until PME GPU tasks are completed, and gets the output forces and virial/energy
371 * (if they were to be computed).
373 * \param[in] pme The PME data structure.
374 * \param[out] wcycle The wallclock counter.
375 * \param[out] forces The output forces.
376 * \param[out] virial The output virial matrix.
377 * \param[out] energy The output energy.
379 GPU_FUNC_QUALIFIER void pme_gpu_wait_finish_task(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
380 gmx_wallcycle *GPU_FUNC_ARGUMENT(wcycle),
381 gmx::ArrayRef<const gmx::RVec> *GPU_FUNC_ARGUMENT(forces),
382 matrix GPU_FUNC_ARGUMENT(virial),
383 real *GPU_FUNC_ARGUMENT(energy)) GPU_FUNC_TERM
385 * Attempts to complete PME GPU tasks.
387 * The \p completionKind argument controls whether the function blocks until all
388 * PME GPU tasks enqueued completed (as pme_gpu_wait_finish_task() does) or only
389 * checks and returns immediately if they did not.
390 * When blocking or the tasks have completed it also gets the output forces
391 * by assigning the ArrayRef to the \p forces pointer passed in.
392 * Virial/energy are also outputs if they were to be computed.
394 * Note: also launches the reinitalization of the PME output buffers.
395 * TODO: this should be moved out to avoid miscounting its wall-time (as wait iso launch).
397 * \param[in] pme The PME data structure.
398 * \param[in] wcycle The wallclock counter.
399 * \param[out] forces The output forces.
400 * \param[out] virial The output virial matrix.
401 * \param[out] energy The output energy.
402 * \param[in] completionKind Indicates whether PME task completion should only be checked rather than waited for
403 * \returns True if the PME GPU tasks have completed
405 GPU_FUNC_QUALIFIER bool pme_gpu_try_finish_task(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
406 gmx_wallcycle *GPU_FUNC_ARGUMENT(wcycle),
407 gmx::ArrayRef<const gmx::RVec> *GPU_FUNC_ARGUMENT(forces),
408 matrix GPU_FUNC_ARGUMENT(virial),
409 real *GPU_FUNC_ARGUMENT(energy),
410 GpuTaskCompletion GPU_FUNC_ARGUMENT(completionKind)) GPU_FUNC_TERM_WITH_RETURN(false)
413 * The PME GPU reinitialization function that is called both at the end of any PME computation and on any load balancing.
415 * Clears the internal grid and energy/virial buffers; it is not safe to start
416 * the PME computation without calling this.
417 * Note that unlike in the nbnxn module, the force buffer does not need clearing.
419 * \todo Rename this function to *clear* -- it clearly only does output resetting
420 * and we should be clear about what the function does..
422 * \param[in] pme The PME data structure.
423 * \param[in] wcycle The wallclock counter.
425 GPU_FUNC_QUALIFIER void pme_gpu_reinit_computation(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
426 gmx_wallcycle *GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM