2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
7 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
38 /*! \libinternal \file
40 * \brief This file contains function declarations necessary for
41 * computing energies and forces for the PME long-ranged part (Coulomb
44 * \author Berk Hess <hess@kth.se>
46 * \ingroup module_ewald
49 #ifndef GMX_EWALD_PME_H
50 #define GMX_EWALD_PME_H
54 #include "gromacs/gpu_utils/devicebuffer_datatype.h"
55 #include "gromacs/gpu_utils/gpu_macros.h"
56 #include "gromacs/math/vectypes.h"
57 #include "gromacs/timing/walltime_accounting.h"
58 #include "gromacs/utility/arrayref.h"
59 #include "gromacs/utility/basedefinitions.h"
60 #include "gromacs/utility/real.h"
67 struct gmx_wallclock_gpu_pme_t;
68 struct gmx_device_info_t;
69 struct gmx_enerdata_t;
75 enum class GpuTaskCompletion;
77 class GpuEventSynchronizer;
81 class ForceWithVirial;
83 enum class PinningPolicy : int;
92 /*! \brief Possible PME codepaths on a rank.
93 * \todo: make this enum class with gmx_pme_t C++ refactoring
97 None, //!< No PME task is done
98 CPU, //!< Whole PME computation is done on CPU
99 GPU, //!< Whole PME computation is done on GPU
100 Mixed, //!< Mixed mode: only spread and gather run on GPU; FFT and solving are done on CPU.
103 //! PME gathering output forces treatment
104 enum class PmeForceOutputHandling
106 Set, /**< Gather simply writes into provided force buffer */
107 ReduceWithInput, /**< Gather adds its output to the buffer.
108 On GPU, that means additional H2D copy before the kernel launch. */
111 /*! \brief Return the smallest allowed PME grid size for \p pmeOrder */
112 int minimalPmeGridSize(int pmeOrder);
114 //! Return whether the grid of \c pme is identical to \c grid_size.
115 bool gmx_pme_grid_matches(const gmx_pme_t& pme, const ivec grid_size);
117 /*! \brief Check restrictions on pme_order and the PME grid nkx,nky,nkz.
119 * With errorsAreFatal=true, an exception or fatal error is generated
120 * on violation of restrictions.
121 * With errorsAreFatal=false, false is returned on violation of restrictions.
122 * When all restrictions are obeyed, true is returned.
123 * Argument useThreads tells if any MPI rank doing PME uses more than 1 threads.
124 * If at calling useThreads is unknown, pass true for conservative checking.
126 * The PME GPU restrictions are checked separately during pme_gpu_init().
128 bool gmx_pme_check_restrictions(int pme_order,
132 int numPmeDomainsAlongX,
134 bool errorsAreFatal);
136 /*! \brief Construct PME data
138 * \throws gmx::InconsistentInputError if input grid sizes/PME order are inconsistent.
139 * \returns Pointer to newly allocated and initialized PME data.
141 * \todo We should evolve something like a \c GpuManager that holds \c
142 * gmx_device_info_t * and \c PmeGpuProgram* and perhaps other
143 * related things whose lifetime can/should exceed that of a task (or
144 * perhaps task manager). See Redmine #2522.
146 gmx_pme_t* gmx_pme_init(const t_commrec* cr,
147 const NumPmeDomains& numPmeDomains,
148 const t_inputrec* ir,
149 gmx_bool bFreeEnergy_q,
150 gmx_bool bFreeEnergy_lj,
151 gmx_bool bReproducible,
157 const gmx_device_info_t* gpuInfo,
158 const PmeGpuProgram* pmeGpuProgram,
159 const gmx::MDLogger& mdlog);
161 /*! \brief As gmx_pme_init, but takes most settings, except the grid/Ewald coefficients, from
162 * pme_src. This is only called when the PME cut-off/grid size changes.
164 void gmx_pme_reinit(gmx_pme_t** pmedata,
167 const t_inputrec* ir,
168 const ivec grid_size,
172 /*! \brief Destroys the PME data structure.*/
173 void gmx_pme_destroy(gmx_pme_t* pme);
176 /*! \brief Flag values that control what gmx_pme_do() will calculate
178 * These can be combined with bitwise-OR if more than one thing is required.
180 #define GMX_PME_SPREAD (1 << 0)
181 #define GMX_PME_SOLVE (1 << 1)
182 #define GMX_PME_CALC_F (1 << 2)
183 #define GMX_PME_CALC_ENER_VIR (1 << 3)
184 /* This forces the grid to be backtransformed even without GMX_PME_CALC_F */
185 #define GMX_PME_CALC_POT (1 << 4)
187 #define GMX_PME_DO_ALL_F (GMX_PME_SPREAD | GMX_PME_SOLVE | GMX_PME_CALC_F)
190 /*! \brief Do a PME calculation on a CPU for the long range electrostatics and/or LJ.
192 * Computes the PME forces and the energy and viral, when requested,
193 * for all atoms in \p coordinates. Forces, when requested, are added
194 * to the buffer \p forces, which is allowed to contain more elements
195 * than the number of elements in \p coordinates.
196 * The meaning of \p flags is defined above, and determines which
197 * parts of the calculation are performed.
199 * \return 0 indicates all well, non zero is an error code.
201 int gmx_pme_do(struct gmx_pme_t* pme,
202 gmx::ArrayRef<const gmx::RVec> coordinates,
203 gmx::ArrayRef<gmx::RVec> forces,
215 gmx_wallcycle* wcycle,
226 /*! \brief Calculate the PME grid energy V for n charges.
228 * The potential (found in \p pme) must have been found already with a
229 * call to gmx_pme_do() with at least GMX_PME_SPREAD and GMX_PME_SOLVE
230 * specified. Note that the charges are not spread on the grid in the
231 * pme struct. Currently does not work in parallel or with free
234 void gmx_pme_calc_energy(gmx_pme_t* pme, gmx::ArrayRef<const gmx::RVec> x, gmx::ArrayRef<const real> q, real* V);
237 * This function updates the local atom data on GPU after DD (charges, coordinates, etc.).
238 * TODO: it should update the PME CPU atom data as well.
239 * (currently PME CPU call gmx_pme_do() gets passed the input pointers for each computation).
241 * \param[in,out] pme The PME structure.
242 * \param[in] numAtoms The number of particles.
243 * \param[in] charges The pointer to the array of particle charges.
245 void gmx_pme_reinit_atoms(gmx_pme_t* pme, int numAtoms, const real* charges);
247 /* A block of PME GPU functions */
249 /*! \brief Checks whether the GROMACS build allows to run PME on GPU.
250 * TODO: this partly duplicates an internal PME assert function
251 * pme_gpu_check_restrictions(), except that works with a
252 * formed gmx_pme_t structure. Should that one go away/work with inputrec?
254 * \param[out] error If non-null, the error message when PME is not supported on GPU.
256 * \returns true if PME can run on GPU on this build, false otherwise.
258 bool pme_gpu_supports_build(std::string* error);
260 /*! \brief Checks whether the detected (GPU) hardware allows to run PME on GPU.
262 * \param[in] hwinfo Information about the detected hardware
263 * \param[out] error If non-null, the error message when PME is not supported on GPU.
265 * \returns true if PME can run on GPU on this build, false otherwise.
267 bool pme_gpu_supports_hardware(const gmx_hw_info_t& hwinfo, std::string* error);
269 /*! \brief Checks whether the input system allows to run PME on GPU.
270 * TODO: this partly duplicates an internal PME assert function
271 * pme_gpu_check_restrictions(), except that works with a
272 * formed gmx_pme_t structure. Should that one go away/work with inputrec?
274 * \param[in] ir Input system.
275 * \param[in] mtop Complete system topology to check if an FE simulation perturbs charges.
276 * \param[out] error If non-null, the error message if the input is not supported on GPU.
278 * \returns true if PME can run on GPU with this input, false otherwise.
280 bool pme_gpu_supports_input(const t_inputrec& ir, const gmx_mtop_t& mtop, std::string* error);
283 * Returns the active PME codepath (CPU, GPU, mixed).
284 * \todo This is a rather static data that should be managed by the higher level task scheduler.
286 * \param[in] pme The PME data structure.
287 * \returns active PME codepath.
289 PmeRunMode pme_run_mode(const gmx_pme_t* pme);
291 /*! \libinternal \brief
292 * Return the pinning policy appropriate for this build configuration
293 * for relevant buffers used for PME task on this rank (e.g. running
295 gmx::PinningPolicy pme_get_pinning_policy();
298 * Tells if PME is enabled to run on GPU (not necessarily active at the moment).
299 * \todo This is a rather static data that should be managed by the hardware assignment manager.
300 * For now, it is synonymous with the active PME codepath (in the absence of dynamic switching).
302 * \param[in] pme The PME data structure.
303 * \returns true if PME can run on GPU, false otherwise.
305 inline bool pme_gpu_task_enabled(const gmx_pme_t* pme)
307 return (pme != nullptr) && (pme_run_mode(pme) != PmeRunMode::CPU);
310 /*! \brief Returns the size of the padding needed by GPU version of PME in the coordinates array.
312 * \param[in] pme The PME data structure.
314 GPU_FUNC_QUALIFIER int pme_gpu_get_padding_size(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
315 GPU_FUNC_TERM_WITH_RETURN(0);
317 // The following functions are all the PME GPU entry points,
318 // currently inlining to nothing on non-CUDA builds.
321 * Resets the PME GPU timings. To be called at the reset step.
323 * \param[in] pme The PME structure.
325 GPU_FUNC_QUALIFIER void pme_gpu_reset_timings(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme)) GPU_FUNC_TERM;
328 * Copies the PME GPU timings to the gmx_wallclock_gpu_pme_t structure (for log output). To be called at the run end.
330 * \param[in] pme The PME structure.
331 * \param[in] timings The gmx_wallclock_gpu_pme_t structure.
333 GPU_FUNC_QUALIFIER void pme_gpu_get_timings(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
334 gmx_wallclock_gpu_pme_t* GPU_FUNC_ARGUMENT(timings)) GPU_FUNC_TERM;
336 /* The main PME GPU functions */
339 * Prepares PME on GPU computation (updating the box if needed)
340 * \param[in] pme The PME data structure.
341 * \param[in] needToUpdateBox Tells if the stored unit cell parameters should be updated from \p box.
342 * \param[in] box The unit cell box.
343 * \param[in] wcycle The wallclock counter.
344 * \param[in] flags The combination of flags to affect this PME computation.
345 * The flags are the GMX_PME_ flags from pme.h.
346 * \param[in] useGpuForceReduction Whether PME forces are reduced on GPU this step or should be downloaded for CPU reduction
348 GPU_FUNC_QUALIFIER void pme_gpu_prepare_computation(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
349 bool GPU_FUNC_ARGUMENT(needToUpdateBox),
350 const matrix GPU_FUNC_ARGUMENT(box),
351 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
352 int GPU_FUNC_ARGUMENT(flags),
353 bool GPU_FUNC_ARGUMENT(useGpuForceReduction)) GPU_FUNC_TERM;
356 * Launches first stage of PME on GPU - spreading kernel.
358 * \param[in] pme The PME data structure.
359 * \param[in] xReadyOnDevice Event synchronizer indicating that the coordinates are ready in the device memory; nullptr allowed only on separate PME ranks.
360 * \param[in] wcycle The wallclock counter.
362 GPU_FUNC_QUALIFIER void pme_gpu_launch_spread(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
363 GpuEventSynchronizer* GPU_FUNC_ARGUMENT(xReadyOnDevice),
364 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM;
367 * Launches middle stages of PME (FFT R2C, solving, FFT C2R) either on GPU or on CPU, depending on the run mode.
369 * \param[in] pme The PME data structure.
370 * \param[in] wcycle The wallclock counter.
372 GPU_FUNC_QUALIFIER void pme_gpu_launch_complex_transforms(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
373 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM;
376 * Launches last stage of PME on GPU - force gathering and D2H force transfer.
378 * \param[in] pme The PME data structure.
379 * \param[in] wcycle The wallclock counter.
380 * \param[in] forceTreatment Tells how data should be treated. The gathering kernel either
381 * stores the output reciprocal forces into the host array, or copies its contents to the GPU first
382 * and accumulates. The reduction is non-atomic.
384 GPU_FUNC_QUALIFIER void pme_gpu_launch_gather(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
385 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
386 PmeForceOutputHandling GPU_FUNC_ARGUMENT(forceTreatment)) GPU_FUNC_TERM;
389 * Attempts to complete PME GPU tasks.
391 * The \p completionKind argument controls whether the function blocks until all
392 * PME GPU tasks enqueued completed (as pme_gpu_wait_finish_task() does) or only
393 * checks and returns immediately if they did not.
394 * When blocking or the tasks have completed it also gets the output forces
395 * by assigning the ArrayRef to the \p forces pointer passed in.
396 * Virial/energy are also outputs if they were to be computed.
398 * \param[in] pme The PME data structure.
399 * \param[in] flags The combination of flags to affect this PME computation.
400 * The flags are the GMX_PME_ flags from pme.h.
401 * \param[in] wcycle The wallclock counter.
402 * \param[out] forceWithVirial The output force and virial
403 * \param[out] enerd The output energies
404 * \param[in] flags The combination of flags to affect this PME computation.
405 * The flags are the GMX_PME_ flags from pme.h.
406 * \param[in] completionKind Indicates whether PME task completion should only be checked rather
407 * than waited for \returns True if the PME GPU tasks have completed
409 GPU_FUNC_QUALIFIER bool pme_gpu_try_finish_task(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
410 int GPU_FUNC_ARGUMENT(flags),
411 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
412 gmx::ForceWithVirial* GPU_FUNC_ARGUMENT(forceWithVirial),
413 gmx_enerdata_t* GPU_FUNC_ARGUMENT(enerd),
414 GpuTaskCompletion GPU_FUNC_ARGUMENT(completionKind))
415 GPU_FUNC_TERM_WITH_RETURN(false);
418 * Blocks until PME GPU tasks are completed, and gets the output forces and virial/energy
419 * (if they were to be computed).
421 * \param[in] pme The PME data structure.
422 * \param[in] flags The combination of flags to affect this PME computation.
423 * The flags are the GMX_PME_ flags from pme.h.
424 * \param[in] wcycle The wallclock counter.
425 * \param[out] forceWithVirial The output force and virial
426 * \param[out] enerd The output energies
428 GPU_FUNC_QUALIFIER void pme_gpu_wait_and_reduce(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
429 int GPU_FUNC_ARGUMENT(flags),
430 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
431 gmx::ForceWithVirial* GPU_FUNC_ARGUMENT(forceWithVirial),
432 gmx_enerdata_t* GPU_FUNC_ARGUMENT(enerd)) GPU_FUNC_TERM;
435 * The PME GPU reinitialization function that is called both at the end of any PME computation and on any load balancing.
437 * Clears the internal grid and energy/virial buffers; it is not safe to start
438 * the PME computation without calling this.
439 * Note that unlike in the nbnxn module, the force buffer does not need clearing.
441 * \todo Rename this function to *clear* -- it clearly only does output resetting
442 * and we should be clear about what the function does..
444 * \param[in] pme The PME data structure.
445 * \param[in] wcycle The wallclock counter.
447 GPU_FUNC_QUALIFIER void pme_gpu_reinit_computation(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
448 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM;
451 /*! \brief Get pointer to device copy of coordinate data.
452 * \param[in] pme The PME data structure.
453 * \returns Pointer to coordinate data
455 GPU_FUNC_QUALIFIER DeviceBuffer<float> pme_gpu_get_device_x(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
456 GPU_FUNC_TERM_WITH_RETURN(DeviceBuffer<float>{});
458 /*! \brief Set pointer to device copy of coordinate data.
459 * \param[in] pme The PME data structure.
460 * \param[in] d_x The pointer to the positions buffer to be set
462 GPU_FUNC_QUALIFIER void pme_gpu_set_device_x(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
463 DeviceBuffer<float> GPU_FUNC_ARGUMENT(d_x)) GPU_FUNC_TERM;
465 /*! \brief Get pointer to device copy of force data.
466 * \param[in] pme The PME data structure.
467 * \returns Pointer to force data
469 GPU_FUNC_QUALIFIER void* pme_gpu_get_device_f(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
470 GPU_FUNC_TERM_WITH_RETURN(nullptr);
472 /*! \brief Returns the pointer to the GPU stream.
473 * \param[in] pme The PME data structure.
474 * \returns Pointer to GPU stream object.
476 GPU_FUNC_QUALIFIER void* pme_gpu_get_device_stream(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
477 GPU_FUNC_TERM_WITH_RETURN(nullptr);
479 /*! \brief Returns the pointer to the GPU context.
480 * \param[in] pme The PME data structure.
481 * \returns Pointer to GPU context object.
483 GPU_FUNC_QUALIFIER void* pme_gpu_get_device_context(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
484 GPU_FUNC_TERM_WITH_RETURN(nullptr);
486 /*! \brief Get pointer to the device synchronizer object that allows syncing on PME force calculation completion
487 * \param[in] pme The PME data structure.
488 * \returns Pointer to sychronizer
490 GPU_FUNC_QUALIFIER GpuEventSynchronizer* pme_gpu_get_f_ready_synchronizer(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
491 GPU_FUNC_TERM_WITH_RETURN(nullptr);