2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /*! \libinternal \file
39 * \brief This file contains function declarations necessary for
40 * computing energies and forces for the PME long-ranged part (Coulomb
43 * \author Berk Hess <hess@kth.se>
45 * \ingroup module_ewald
48 #ifndef GMX_EWALD_PME_H
49 #define GMX_EWALD_PME_H
53 #include "gromacs/gpu_utils/devicebuffer_datatype.h"
54 #include "gromacs/gpu_utils/gpu_macros.h"
55 #include "gromacs/math/vectypes.h"
56 #include "gromacs/timing/walltime_accounting.h"
57 #include "gromacs/utility/arrayref.h"
58 #include "gromacs/utility/basedefinitions.h"
59 #include "gromacs/utility/real.h"
62 struct interaction_const_t;
68 struct gmx_wallclock_gpu_pme_t;
69 struct gmx_device_info_t;
70 struct gmx_enerdata_t;
76 enum class GpuTaskCompletion;
78 class GpuEventSynchronizer;
80 using PmeGpuProgramHandle = const PmeGpuProgram*;
85 class ForceWithVirial;
87 enum class PinningPolicy : int;
96 /*! \brief Possible PME codepaths on a rank.
97 * \todo: make this enum class with gmx_pme_t C++ refactoring
101 None, //!< No PME task is done
102 CPU, //!< Whole PME computation is done on CPU
103 GPU, //!< Whole PME computation is done on GPU
104 Mixed, //!< Mixed mode: only spread and gather run on GPU; FFT and solving are done on CPU.
107 //! PME gathering output forces treatment
108 enum class PmeForceOutputHandling
110 Set, /**< Gather simply writes into provided force buffer */
111 ReduceWithInput, /**< Gather adds its output to the buffer.
112 On GPU, that means additional H2D copy before the kernel launch. */
115 /*! \brief Return the smallest allowed PME grid size for \p pmeOrder */
116 int minimalPmeGridSize(int pmeOrder);
118 /*! \brief Check restrictions on pme_order and the PME grid nkx,nky,nkz.
120 * With errorsAreFatal=true, an exception or fatal error is generated
121 * on violation of restrictions.
122 * With errorsAreFatal=false, false is returned on violation of restrictions.
123 * When all restrictions are obeyed, true is returned.
124 * Argument useThreads tells if any MPI rank doing PME uses more than 1 threads.
125 * If at calling useThreads is unknown, pass true for conservative checking.
127 * The PME GPU restrictions are checked separately during pme_gpu_init().
129 bool gmx_pme_check_restrictions(int pme_order,
133 int numPmeDomainsAlongX,
135 bool errorsAreFatal);
137 /*! \brief Construct PME data
139 * \throws gmx::InconsistentInputError if input grid sizes/PME order are inconsistent.
140 * \returns Pointer to newly allocated and initialized PME data.
142 * \todo We should evolve something like a \c GpuManager that holds \c
143 * gmx_device_info_t * and \c PmeGpuProgramHandle and perhaps other
144 * related things whose lifetime can/should exceed that of a task (or
145 * perhaps task manager). See Redmine #2522.
147 gmx_pme_t* gmx_pme_init(const t_commrec* cr,
148 const NumPmeDomains& numPmeDomains,
149 const t_inputrec* ir,
150 gmx_bool bFreeEnergy_q,
151 gmx_bool bFreeEnergy_lj,
152 gmx_bool bReproducible,
158 const gmx_device_info_t* gpuInfo,
159 PmeGpuProgramHandle pmeGpuProgram,
160 const gmx::MDLogger& mdlog);
162 /*! \brief Destroys the PME data structure.*/
163 void gmx_pme_destroy(gmx_pme_t* pme);
166 /*! \brief Flag values that control what gmx_pme_do() will calculate
168 * These can be combined with bitwise-OR if more than one thing is required.
170 #define GMX_PME_SPREAD (1 << 0)
171 #define GMX_PME_SOLVE (1 << 1)
172 #define GMX_PME_CALC_F (1 << 2)
173 #define GMX_PME_CALC_ENER_VIR (1 << 3)
174 /* This forces the grid to be backtransformed even without GMX_PME_CALC_F */
175 #define GMX_PME_CALC_POT (1 << 4)
177 #define GMX_PME_DO_ALL_F (GMX_PME_SPREAD | GMX_PME_SOLVE | GMX_PME_CALC_F)
180 /*! \brief Do a PME calculation on a CPU for the long range electrostatics and/or LJ.
182 * Computes the PME forces and the energy and viral, when requested,
183 * for all atoms in \p coordinates. Forces, when requested, are added
184 * to the buffer \p forces, which is allowed to contain more elements
185 * than the number of elements in \p coordinates.
186 * The meaning of \p flags is defined above, and determines which
187 * parts of the calculation are performed.
189 * \return 0 indicates all well, non zero is an error code.
191 int gmx_pme_do(struct gmx_pme_t* pme,
192 gmx::ArrayRef<const gmx::RVec> coordinates,
193 gmx::ArrayRef<gmx::RVec> forces,
205 gmx_wallcycle* wcycle,
216 /*! \brief Called on the nodes that do PME exclusively */
217 int gmx_pmeonly(struct gmx_pme_t* pme,
220 gmx_wallcycle* wcycle,
221 gmx_walltime_accounting_t walltime_accounting,
225 /*! \brief Calculate the PME grid energy V for n charges.
227 * The potential (found in \p pme) must have been found already with a
228 * call to gmx_pme_do() with at least GMX_PME_SPREAD and GMX_PME_SOLVE
229 * specified. Note that the charges are not spread on the grid in the
230 * pme struct. Currently does not work in parallel or with free
233 void gmx_pme_calc_energy(gmx_pme_t* pme, gmx::ArrayRef<const gmx::RVec> x, gmx::ArrayRef<const real> q, real* V);
235 /*! \brief Send the charges and maxshift to out PME-only node. */
236 void gmx_pme_send_parameters(const t_commrec* cr,
237 const interaction_const_t* ic,
238 gmx_bool bFreeEnergy_q,
239 gmx_bool bFreeEnergy_lj,
249 /*! \brief Send the coordinates to our PME-only node and request a PME calculation */
250 void gmx_pme_send_coordinates(t_forcerec* fr,
258 bool useGpuPmePpComms,
259 bool reinitGpuPmePpComms,
260 bool sendCoordinatesFromGpu,
261 GpuEventSynchronizer* coordinatesReadyOnDeviceEvent,
262 gmx_wallcycle* wcycle);
264 /*! \brief Tell our PME-only node to finish */
265 void gmx_pme_send_finish(const t_commrec* cr);
267 /*! \brief Tell our PME-only node to reset all cycle and flop counters */
268 void gmx_pme_send_resetcounters(const t_commrec* cr, int64_t step);
270 /*! \brief PP nodes receive the long range forces from the PME nodes */
271 void gmx_pme_receive_f(gmx::PmePpCommGpu* pmePpCommGpu,
273 gmx::ForceWithVirial* forceWithVirial,
278 bool useGpuPmePpComms,
279 bool receivePmeForceToGpu,
283 * This function updates the local atom data on GPU after DD (charges, coordinates, etc.).
284 * TODO: it should update the PME CPU atom data as well.
285 * (currently PME CPU call gmx_pme_do() gets passed the input pointers for each computation).
287 * \param[in,out] pme The PME structure.
288 * \param[in] numAtoms The number of particles.
289 * \param[in] charges The pointer to the array of particle charges.
291 void gmx_pme_reinit_atoms(gmx_pme_t* pme, int numAtoms, const real* charges);
293 /* A block of PME GPU functions */
295 /*! \brief Checks whether the GROMACS build allows to run PME on GPU.
296 * TODO: this partly duplicates an internal PME assert function
297 * pme_gpu_check_restrictions(), except that works with a
298 * formed gmx_pme_t structure. Should that one go away/work with inputrec?
300 * \param[out] error If non-null, the error message when PME is not supported on GPU.
302 * \returns true if PME can run on GPU on this build, false otherwise.
304 bool pme_gpu_supports_build(std::string* error);
306 /*! \brief Checks whether the detected (GPU) hardware allows to run PME on GPU.
308 * \param[in] hwinfo Information about the detected hardware
309 * \param[out] error If non-null, the error message when PME is not supported on GPU.
311 * \returns true if PME can run on GPU on this build, false otherwise.
313 bool pme_gpu_supports_hardware(const gmx_hw_info_t& hwinfo, std::string* error);
315 /*! \brief Checks whether the input system allows to run PME on GPU.
316 * TODO: this partly duplicates an internal PME assert function
317 * pme_gpu_check_restrictions(), except that works with a
318 * formed gmx_pme_t structure. Should that one go away/work with inputrec?
320 * \param[in] ir Input system.
321 * \param[in] mtop Complete system topology to check if an FE simulation perturbs charges.
322 * \param[out] error If non-null, the error message if the input is not supported on GPU.
324 * \returns true if PME can run on GPU with this input, false otherwise.
326 bool pme_gpu_supports_input(const t_inputrec& ir, const gmx_mtop_t& mtop, std::string* error);
329 * Returns the active PME codepath (CPU, GPU, mixed).
330 * \todo This is a rather static data that should be managed by the higher level task scheduler.
332 * \param[in] pme The PME data structure.
333 * \returns active PME codepath.
335 PmeRunMode pme_run_mode(const gmx_pme_t* pme);
337 /*! \libinternal \brief
338 * Return the pinning policy appropriate for this build configuration
339 * for relevant buffers used for PME task on this rank (e.g. running
341 gmx::PinningPolicy pme_get_pinning_policy();
344 * Tells if PME is enabled to run on GPU (not necessarily active at the moment).
345 * \todo This is a rather static data that should be managed by the hardware assignment manager.
346 * For now, it is synonymous with the active PME codepath (in the absence of dynamic switching).
348 * \param[in] pme The PME data structure.
349 * \returns true if PME can run on GPU, false otherwise.
351 inline bool pme_gpu_task_enabled(const gmx_pme_t* pme)
353 return (pme != nullptr) && (pme_run_mode(pme) != PmeRunMode::CPU);
356 /*! \brief Returns the size of the padding needed by GPU version of PME in the coordinates array.
358 * \param[in] pme The PME data structure.
360 GPU_FUNC_QUALIFIER int pme_gpu_get_padding_size(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
361 GPU_FUNC_TERM_WITH_RETURN(0);
363 // The following functions are all the PME GPU entry points,
364 // currently inlining to nothing on non-CUDA builds.
367 * Resets the PME GPU timings. To be called at the reset step.
369 * \param[in] pme The PME structure.
371 GPU_FUNC_QUALIFIER void pme_gpu_reset_timings(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme)) GPU_FUNC_TERM;
374 * Copies the PME GPU timings to the gmx_wallclock_gpu_pme_t structure (for log output). To be called at the run end.
376 * \param[in] pme The PME structure.
377 * \param[in] timings The gmx_wallclock_gpu_pme_t structure.
379 GPU_FUNC_QUALIFIER void pme_gpu_get_timings(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
380 gmx_wallclock_gpu_pme_t* GPU_FUNC_ARGUMENT(timings)) GPU_FUNC_TERM;
382 /* The main PME GPU functions */
385 * Prepares PME on GPU computation (updating the box if needed)
386 * \param[in] pme The PME data structure.
387 * \param[in] needToUpdateBox Tells if the stored unit cell parameters should be updated from \p box.
388 * \param[in] box The unit cell box.
389 * \param[in] wcycle The wallclock counter.
390 * \param[in] flags The combination of flags to affect this PME computation.
391 * The flags are the GMX_PME_ flags from pme.h.
392 * \param[in] useGpuForceReduction Whether PME forces are reduced on GPU this step or should be downloaded for CPU reduction
394 GPU_FUNC_QUALIFIER void pme_gpu_prepare_computation(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
395 bool GPU_FUNC_ARGUMENT(needToUpdateBox),
396 const matrix GPU_FUNC_ARGUMENT(box),
397 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
398 int GPU_FUNC_ARGUMENT(flags),
399 bool GPU_FUNC_ARGUMENT(useGpuForceReduction)) GPU_FUNC_TERM;
402 * Launches first stage of PME on GPU - spreading kernel.
404 * \param[in] pme The PME data structure.
405 * \param[in] xReadyOnDevice Event synchronizer indicating that the coordinates are ready in the device memory; nullptr allowed only on separate PME ranks.
406 * \param[in] wcycle The wallclock counter.
408 GPU_FUNC_QUALIFIER void pme_gpu_launch_spread(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
409 GpuEventSynchronizer* GPU_FUNC_ARGUMENT(xReadyOnDevice),
410 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM;
413 * Launches middle stages of PME (FFT R2C, solving, FFT C2R) either on GPU or on CPU, depending on the run mode.
415 * \param[in] pme The PME data structure.
416 * \param[in] wcycle The wallclock counter.
418 GPU_FUNC_QUALIFIER void pme_gpu_launch_complex_transforms(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
419 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM;
422 * Launches last stage of PME on GPU - force gathering and D2H force transfer.
424 * \param[in] pme The PME data structure.
425 * \param[in] wcycle The wallclock counter.
426 * \param[in] forceTreatment Tells how data should be treated. The gathering kernel either
427 * stores the output reciprocal forces into the host array, or copies its contents to the GPU first
428 * and accumulates. The reduction is non-atomic.
430 GPU_FUNC_QUALIFIER void pme_gpu_launch_gather(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
431 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
432 PmeForceOutputHandling GPU_FUNC_ARGUMENT(forceTreatment)) GPU_FUNC_TERM;
435 * Attempts to complete PME GPU tasks.
437 * The \p completionKind argument controls whether the function blocks until all
438 * PME GPU tasks enqueued completed (as pme_gpu_wait_finish_task() does) or only
439 * checks and returns immediately if they did not.
440 * When blocking or the tasks have completed it also gets the output forces
441 * by assigning the ArrayRef to the \p forces pointer passed in.
442 * Virial/energy are also outputs if they were to be computed.
444 * \param[in] pme The PME data structure.
445 * \param[in] flags The combination of flags to affect this PME computation.
446 * The flags are the GMX_PME_ flags from pme.h.
447 * \param[in] wcycle The wallclock counter.
448 * \param[out] forceWithVirial The output force and virial
449 * \param[out] enerd The output energies
450 * \param[in] flags The combination of flags to affect this PME computation.
451 * The flags are the GMX_PME_ flags from pme.h.
452 * \param[in] completionKind Indicates whether PME task completion should only be checked rather
453 * than waited for \returns True if the PME GPU tasks have completed
455 GPU_FUNC_QUALIFIER bool pme_gpu_try_finish_task(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
456 int GPU_FUNC_ARGUMENT(flags),
457 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
458 gmx::ForceWithVirial* GPU_FUNC_ARGUMENT(forceWithVirial),
459 gmx_enerdata_t* GPU_FUNC_ARGUMENT(enerd),
460 GpuTaskCompletion GPU_FUNC_ARGUMENT(completionKind))
461 GPU_FUNC_TERM_WITH_RETURN(false);
464 * Blocks until PME GPU tasks are completed, and gets the output forces and virial/energy
465 * (if they were to be computed).
467 * \param[in] pme The PME data structure.
468 * \param[in] flags The combination of flags to affect this PME computation.
469 * The flags are the GMX_PME_ flags from pme.h.
470 * \param[in] wcycle The wallclock counter.
471 * \param[out] forceWithVirial The output force and virial
472 * \param[out] enerd The output energies
474 GPU_FUNC_QUALIFIER void pme_gpu_wait_and_reduce(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
475 int GPU_FUNC_ARGUMENT(flags),
476 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
477 gmx::ForceWithVirial* GPU_FUNC_ARGUMENT(forceWithVirial),
478 gmx_enerdata_t* GPU_FUNC_ARGUMENT(enerd)) GPU_FUNC_TERM;
481 * The PME GPU reinitialization function that is called both at the end of any PME computation and on any load balancing.
483 * Clears the internal grid and energy/virial buffers; it is not safe to start
484 * the PME computation without calling this.
485 * Note that unlike in the nbnxn module, the force buffer does not need clearing.
487 * \todo Rename this function to *clear* -- it clearly only does output resetting
488 * and we should be clear about what the function does..
490 * \param[in] pme The PME data structure.
491 * \param[in] wcycle The wallclock counter.
493 GPU_FUNC_QUALIFIER void pme_gpu_reinit_computation(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
494 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM;
497 /*! \brief Get pointer to device copy of coordinate data.
498 * \param[in] pme The PME data structure.
499 * \returns Pointer to coordinate data
501 GPU_FUNC_QUALIFIER DeviceBuffer<float> pme_gpu_get_device_x(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
502 GPU_FUNC_TERM_WITH_RETURN(DeviceBuffer<float>{});
504 /*! \brief Set pointer to device copy of coordinate data.
505 * \param[in] pme The PME data structure.
506 * \param[in] d_x The pointer to the positions buffer to be set
508 GPU_FUNC_QUALIFIER void pme_gpu_set_device_x(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
509 DeviceBuffer<float> GPU_FUNC_ARGUMENT(d_x)) GPU_FUNC_TERM;
511 /*! \brief Get pointer to device copy of force data.
512 * \param[in] pme The PME data structure.
513 * \returns Pointer to force data
515 GPU_FUNC_QUALIFIER void* pme_gpu_get_device_f(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
516 GPU_FUNC_TERM_WITH_RETURN(nullptr);
518 /*! \brief Returns the pointer to the GPU stream.
519 * \param[in] pme The PME data structure.
520 * \returns Pointer to GPU stream object.
522 GPU_FUNC_QUALIFIER void* pme_gpu_get_device_stream(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
523 GPU_FUNC_TERM_WITH_RETURN(nullptr);
525 /*! \brief Returns the pointer to the GPU context.
526 * \param[in] pme The PME data structure.
527 * \returns Pointer to GPU context object.
529 GPU_FUNC_QUALIFIER void* pme_gpu_get_device_context(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
530 GPU_FUNC_TERM_WITH_RETURN(nullptr);
532 /*! \brief Get pointer to the device synchronizer object that allows syncing on PME force calculation completion
533 * \param[in] pme The PME data structure.
534 * \returns Pointer to sychronizer
536 GPU_FUNC_QUALIFIER GpuEventSynchronizer* pme_gpu_get_f_ready_synchronizer(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
537 GPU_FUNC_TERM_WITH_RETURN(nullptr);