2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /*! \libinternal \file
39 * \brief This file contains function declarations necessary for
40 * computing energies and forces for the PME long-ranged part (Coulomb
43 * \author Berk Hess <hess@kth.se>
45 * \ingroup module_ewald
48 #ifndef GMX_EWALD_PME_H
49 #define GMX_EWALD_PME_H
53 #include "gromacs/gpu_utils/devicebuffer_datatype.h"
54 #include "gromacs/gpu_utils/gpu_macros.h"
55 #include "gromacs/math/vectypes.h"
56 #include "gromacs/timing/walltime_accounting.h"
57 #include "gromacs/utility/arrayref.h"
58 #include "gromacs/utility/basedefinitions.h"
59 #include "gromacs/utility/real.h"
62 struct interaction_const_t;
67 struct gmx_wallclock_gpu_pme_t;
68 struct gmx_device_info_t;
69 struct gmx_enerdata_t;
75 enum class GpuTaskCompletion;
77 class GpuEventSynchronizer;
79 using PmeGpuProgramHandle = const PmeGpuProgram *;
84 class ForceWithVirial;
86 enum class PinningPolicy : int;
90 GMX_SUM_GRID_FORWARD, GMX_SUM_GRID_BACKWARD
93 /*! \brief Possible PME codepaths on a rank.
94 * \todo: make this enum class with gmx_pme_t C++ refactoring
98 None, //!< No PME task is done
99 CPU, //!< Whole PME computation is done on CPU
100 GPU, //!< Whole PME computation is done on GPU
101 Mixed, //!< Mixed mode: only spread and gather run on GPU; FFT and solving are done on CPU.
104 //! PME gathering output forces treatment
105 enum class PmeForceOutputHandling
107 Set, /**< Gather simply writes into provided force buffer */
108 ReduceWithInput, /**< Gather adds its output to the buffer.
109 On GPU, that means additional H2D copy before the kernel launch. */
112 /*! \brief Return the smallest allowed PME grid size for \p pmeOrder */
113 int minimalPmeGridSize(int pmeOrder);
115 /*! \brief Check restrictions on pme_order and the PME grid nkx,nky,nkz.
117 * With errorsAreFatal=true, an exception or fatal error is generated
118 * on violation of restrictions.
119 * With errorsAreFatal=false, false is returned on violation of restrictions.
120 * When all restrictions are obeyed, true is returned.
121 * Argument useThreads tells if any MPI rank doing PME uses more than 1 threads.
122 * If at calling useThreads is unknown, pass true for conservative checking.
124 * The PME GPU restrictions are checked separately during pme_gpu_init().
126 bool gmx_pme_check_restrictions(int pme_order,
127 int nkx, int nky, int nkz,
128 int numPmeDomainsAlongX,
130 bool errorsAreFatal);
132 /*! \brief Construct PME data
134 * \throws gmx::InconsistentInputError if input grid sizes/PME order are inconsistent.
135 * \returns Pointer to newly allocated and initialized PME data.
137 * \todo We should evolve something like a \c GpuManager that holds \c
138 * gmx_device_info_t * and \c PmeGpuProgramHandle and perhaps other
139 * related things whose lifetime can/should exceed that of a task (or
140 * perhaps task manager). See Redmine #2522.
142 gmx_pme_t *gmx_pme_init(const t_commrec *cr,
143 const NumPmeDomains &numPmeDomains,
144 const t_inputrec *ir,
145 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
146 gmx_bool bReproducible,
147 real ewaldcoeff_q, real ewaldcoeff_lj,
151 const gmx_device_info_t *gpuInfo,
152 PmeGpuProgramHandle pmeGpuProgram,
153 const gmx::MDLogger &mdlog);
155 /*! \brief Destroys the PME data structure.*/
156 void gmx_pme_destroy(gmx_pme_t *pme);
159 /*! \brief Flag values that control what gmx_pme_do() will calculate
161 * These can be combined with bitwise-OR if more than one thing is required.
163 #define GMX_PME_SPREAD (1<<0)
164 #define GMX_PME_SOLVE (1<<1)
165 #define GMX_PME_CALC_F (1<<2)
166 #define GMX_PME_CALC_ENER_VIR (1<<3)
167 /* This forces the grid to be backtransformed even without GMX_PME_CALC_F */
168 #define GMX_PME_CALC_POT (1<<4)
170 #define GMX_PME_DO_ALL_F (GMX_PME_SPREAD | GMX_PME_SOLVE | GMX_PME_CALC_F)
173 /*! \brief Do a PME calculation on a CPU for the long range electrostatics and/or LJ.
175 * Computes the PME forces and the energy and viral, when requested,
176 * for all atoms in \p coordinates. Forces, when requested, are added
177 * to the buffer \p forces, which is allowed to contain more elements
178 * than the number of elements in \p coordinates.
179 * The meaning of \p flags is defined above, and determines which
180 * parts of the calculation are performed.
182 * \return 0 indicates all well, non zero is an error code.
184 int gmx_pme_do(struct gmx_pme_t *pme,
185 gmx::ArrayRef<const gmx::RVec> coordinates,
186 gmx::ArrayRef<gmx::RVec> forces,
187 real chargeA[], real chargeB[],
188 real c6A[], real c6B[],
189 real sigmaA[], real sigmaB[],
190 const matrix box, const t_commrec *cr,
191 int maxshift_x, int maxshift_y,
192 t_nrnb *nrnb, gmx_wallcycle *wcycle,
193 matrix vir_q, matrix vir_lj,
194 real *energy_q, real *energy_lj,
195 real lambda_q, real lambda_lj,
196 real *dvdlambda_q, real *dvdlambda_lj,
199 /*! \brief Called on the nodes that do PME exclusively */
200 int gmx_pmeonly(struct gmx_pme_t *pme,
201 const t_commrec *cr, t_nrnb *mynrnb,
202 gmx_wallcycle *wcycle,
203 gmx_walltime_accounting_t walltime_accounting,
204 t_inputrec *ir, PmeRunMode runMode);
206 /*! \brief Calculate the PME grid energy V for n charges.
208 * The potential (found in \p pme) must have been found already with a
209 * call to gmx_pme_do() with at least GMX_PME_SPREAD and GMX_PME_SOLVE
210 * specified. Note that the charges are not spread on the grid in the
211 * pme struct. Currently does not work in parallel or with free
214 void gmx_pme_calc_energy(gmx_pme_t *pme,
215 gmx::ArrayRef<const gmx::RVec> x,
216 gmx::ArrayRef<const real> q,
219 /*! \brief Send the charges and maxshift to out PME-only node. */
220 void gmx_pme_send_parameters(const t_commrec *cr,
221 const interaction_const_t *ic,
222 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
223 real *chargeA, real *chargeB,
224 real *sqrt_c6A, real *sqrt_c6B,
225 real *sigmaA, real *sigmaB,
226 int maxshift_x, int maxshift_y);
228 /*! \brief Send the coordinates to our PME-only node and request a PME calculation */
229 void gmx_pme_send_coordinates(const t_commrec *cr, const matrix box, const rvec *x,
230 real lambda_q, real lambda_lj,
232 int64_t step, bool useGpuPmePpComms, gmx_wallcycle *wcycle);
234 /*! \brief Tell our PME-only node to finish */
235 void gmx_pme_send_finish(const t_commrec *cr);
237 /*! \brief Tell our PME-only node to reset all cycle and flop counters */
238 void gmx_pme_send_resetcounters(const t_commrec *cr, int64_t step);
240 /*! \brief PP nodes receive the long range forces from the PME nodes */
241 void gmx_pme_receive_f(gmx::PmePpCommGpu *pmePpCommGpu,
243 gmx::ForceWithVirial *forceWithVirial,
244 real *energy_q, real *energy_lj,
245 real *dvdlambda_q, real *dvdlambda_lj,
246 bool useGpuPmePpComms, float *pme_cycles);
249 * This function updates the local atom data on GPU after DD (charges, coordinates, etc.).
250 * TODO: it should update the PME CPU atom data as well.
251 * (currently PME CPU call gmx_pme_do() gets passed the input pointers for each computation).
253 * \param[in,out] pme The PME structure.
254 * \param[in] numAtoms The number of particles.
255 * \param[in] charges The pointer to the array of particle charges.
257 void gmx_pme_reinit_atoms(gmx_pme_t *pme,
259 const real *charges);
261 /* A block of PME GPU functions */
263 /*! \brief Checks whether the GROMACS build allows to run PME on GPU.
264 * TODO: this partly duplicates an internal PME assert function
265 * pme_gpu_check_restrictions(), except that works with a
266 * formed gmx_pme_t structure. Should that one go away/work with inputrec?
268 * \param[out] error If non-null, the error message when PME is not supported on GPU.
270 * \returns true if PME can run on GPU on this build, false otherwise.
272 bool pme_gpu_supports_build(std::string *error);
274 /*! \brief Checks whether the detected (GPU) hardware allows to run PME on GPU.
276 * \param[in] hwinfo Information about the detected hardware
277 * \param[out] error If non-null, the error message when PME is not supported on GPU.
279 * \returns true if PME can run on GPU on this build, false otherwise.
281 bool pme_gpu_supports_hardware(const gmx_hw_info_t &hwinfo,
284 /*! \brief Checks whether the input system allows to run PME on GPU.
285 * TODO: this partly duplicates an internal PME assert function
286 * pme_gpu_check_restrictions(), except that works with a
287 * formed gmx_pme_t structure. Should that one go away/work with inputrec?
289 * \param[in] ir Input system.
290 * \param[in] mtop Complete system topology to check if an FE simulation perturbs charges.
291 * \param[out] error If non-null, the error message if the input is not supported on GPU.
293 * \returns true if PME can run on GPU with this input, false otherwise.
295 bool pme_gpu_supports_input(const t_inputrec &ir, const gmx_mtop_t &mtop, std::string *error);
298 * Returns the active PME codepath (CPU, GPU, mixed).
299 * \todo This is a rather static data that should be managed by the higher level task scheduler.
301 * \param[in] pme The PME data structure.
302 * \returns active PME codepath.
304 PmeRunMode pme_run_mode(const gmx_pme_t *pme);
306 /*! \libinternal \brief
307 * Return the pinning policy appropriate for this build configuration
308 * for relevant buffers used for PME task on this rank (e.g. running
310 gmx::PinningPolicy pme_get_pinning_policy();
313 * Tells if PME is enabled to run on GPU (not necessarily active at the moment).
314 * \todo This is a rather static data that should be managed by the hardware assignment manager.
315 * For now, it is synonymous with the active PME codepath (in the absence of dynamic switching).
317 * \param[in] pme The PME data structure.
318 * \returns true if PME can run on GPU, false otherwise.
320 inline bool pme_gpu_task_enabled(const gmx_pme_t *pme)
322 return (pme != nullptr) && (pme_run_mode(pme) != PmeRunMode::CPU);
325 /*! \brief Returns the size of the padding needed by GPU version of PME in the coordinates array.
327 * \param[in] pme The PME data structure.
329 GPU_FUNC_QUALIFIER int pme_gpu_get_padding_size(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme)) GPU_FUNC_TERM_WITH_RETURN(0);
331 // The following functions are all the PME GPU entry points,
332 // currently inlining to nothing on non-CUDA builds.
335 * Resets the PME GPU timings. To be called at the reset step.
337 * \param[in] pme The PME structure.
339 GPU_FUNC_QUALIFIER void pme_gpu_reset_timings(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme)) GPU_FUNC_TERM;
342 * Copies the PME GPU timings to the gmx_wallclock_gpu_pme_t structure (for log output). To be called at the run end.
344 * \param[in] pme The PME structure.
345 * \param[in] timings The gmx_wallclock_gpu_pme_t structure.
347 GPU_FUNC_QUALIFIER void pme_gpu_get_timings(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
348 gmx_wallclock_gpu_pme_t *GPU_FUNC_ARGUMENT(timings)) GPU_FUNC_TERM;
350 /* The main PME GPU functions */
353 * Prepares PME on GPU computation (updating the box if needed)
354 * \param[in] pme The PME data structure.
355 * \param[in] needToUpdateBox Tells if the stored unit cell parameters should be updated from \p box.
356 * \param[in] box The unit cell box.
357 * \param[in] wcycle The wallclock counter.
358 * \param[in] flags The combination of flags to affect this PME computation.
359 * The flags are the GMX_PME_ flags from pme.h.
360 * \param[in] useGpuForceReduction Whether PME forces are reduced on GPU this step or should be downloaded for CPU reduction
362 GPU_FUNC_QUALIFIER void pme_gpu_prepare_computation(gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
363 bool GPU_FUNC_ARGUMENT(needToUpdateBox),
364 const matrix GPU_FUNC_ARGUMENT(box),
365 gmx_wallcycle *GPU_FUNC_ARGUMENT(wcycle),
366 int GPU_FUNC_ARGUMENT(flags),
367 bool GPU_FUNC_ARGUMENT(useGpuForceReduction)) GPU_FUNC_TERM;
370 * Launches first stage of PME on GPU - spreading kernel.
372 * \param[in] pme The PME data structure.
373 * \param[in] xReadyOnDevice Event synchronizer indicating that the coordinates are ready in the device memory; nullptr allowed only on separate PME ranks.
374 * \param[in] wcycle The wallclock counter.
376 GPU_FUNC_QUALIFIER void pme_gpu_launch_spread(gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
377 GpuEventSynchronizer *GPU_FUNC_ARGUMENT(xReadyOnDevice),
378 gmx_wallcycle *GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM;
381 * Launches middle stages of PME (FFT R2C, solving, FFT C2R) either on GPU or on CPU, depending on the run mode.
383 * \param[in] pme The PME data structure.
384 * \param[in] wcycle The wallclock counter.
386 GPU_FUNC_QUALIFIER void pme_gpu_launch_complex_transforms(gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
387 gmx_wallcycle *GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM;
390 * Launches last stage of PME on GPU - force gathering and D2H force transfer.
392 * \param[in] pme The PME data structure.
393 * \param[in] wcycle The wallclock counter.
394 * \param[in] forceTreatment Tells how data should be treated. The gathering kernel either stores
395 * the output reciprocal forces into the host array, or copies its contents to the GPU first
396 * and accumulates. The reduction is non-atomic.
398 GPU_FUNC_QUALIFIER void pme_gpu_launch_gather(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
399 gmx_wallcycle *GPU_FUNC_ARGUMENT(wcycle),
400 PmeForceOutputHandling GPU_FUNC_ARGUMENT(forceTreatment)) GPU_FUNC_TERM;
403 * Attempts to complete PME GPU tasks.
405 * The \p completionKind argument controls whether the function blocks until all
406 * PME GPU tasks enqueued completed (as pme_gpu_wait_finish_task() does) or only
407 * checks and returns immediately if they did not.
408 * When blocking or the tasks have completed it also gets the output forces
409 * by assigning the ArrayRef to the \p forces pointer passed in.
410 * Virial/energy are also outputs if they were to be computed.
412 * \param[in] pme The PME data structure.
413 * \param[in] flags The combination of flags to affect this PME computation.
414 * The flags are the GMX_PME_ flags from pme.h.
415 * \param[in] wcycle The wallclock counter.
416 * \param[out] forceWithVirial The output force and virial
417 * \param[out] enerd The output energies
418 * \param[in] flags The combination of flags to affect this PME computation.
419 * The flags are the GMX_PME_ flags from pme.h.
420 * \param[in] completionKind Indicates whether PME task completion should only be checked rather than waited for
421 * \returns True if the PME GPU tasks have completed
423 GPU_FUNC_QUALIFIER bool
424 pme_gpu_try_finish_task(gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
425 int GPU_FUNC_ARGUMENT(flags),
426 gmx_wallcycle *GPU_FUNC_ARGUMENT(wcycle),
427 gmx::ForceWithVirial *GPU_FUNC_ARGUMENT(forceWithVirial),
428 gmx_enerdata_t *GPU_FUNC_ARGUMENT(enerd),
429 GpuTaskCompletion GPU_FUNC_ARGUMENT(completionKind)) GPU_FUNC_TERM_WITH_RETURN(false);
432 * Blocks until PME GPU tasks are completed, and gets the output forces and virial/energy
433 * (if they were to be computed).
435 * \param[in] pme The PME data structure.
436 * \param[in] flags The combination of flags to affect this PME computation.
437 * The flags are the GMX_PME_ flags from pme.h.
438 * \param[in] wcycle The wallclock counter.
439 * \param[out] forceWithVirial The output force and virial
440 * \param[out] enerd The output energies
442 GPU_FUNC_QUALIFIER void
443 pme_gpu_wait_and_reduce(gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
444 int GPU_FUNC_ARGUMENT(flags),
445 gmx_wallcycle *GPU_FUNC_ARGUMENT(wcycle),
446 gmx::ForceWithVirial *GPU_FUNC_ARGUMENT(forceWithVirial),
447 gmx_enerdata_t *GPU_FUNC_ARGUMENT(enerd)) GPU_FUNC_TERM;
450 * The PME GPU reinitialization function that is called both at the end of any PME computation and on any load balancing.
452 * Clears the internal grid and energy/virial buffers; it is not safe to start
453 * the PME computation without calling this.
454 * Note that unlike in the nbnxn module, the force buffer does not need clearing.
456 * \todo Rename this function to *clear* -- it clearly only does output resetting
457 * and we should be clear about what the function does..
459 * \param[in] pme The PME data structure.
460 * \param[in] wcycle The wallclock counter.
462 GPU_FUNC_QUALIFIER void pme_gpu_reinit_computation(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
463 gmx_wallcycle *GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM;
466 /*! \brief Get pointer to device copy of coordinate data.
467 * \param[in] pme The PME data structure.
468 * \returns Pointer to coordinate data
470 GPU_FUNC_QUALIFIER DeviceBuffer<float> pme_gpu_get_device_x(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme)) GPU_FUNC_TERM_WITH_RETURN(DeviceBuffer<float> {});
472 /*! \brief Set pointer to device copy of coordinate data.
473 * \param[in] pme The PME data structure.
474 * \param[in] d_x The pointer to the positions buffer to be set
476 GPU_FUNC_QUALIFIER void pme_gpu_set_device_x(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme),
477 DeviceBuffer<float> GPU_FUNC_ARGUMENT(d_x)) GPU_FUNC_TERM;
479 /*! \brief Get pointer to device copy of force data.
480 * \param[in] pme The PME data structure.
481 * \returns Pointer to force data
483 GPU_FUNC_QUALIFIER void *pme_gpu_get_device_f(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme)) GPU_FUNC_TERM_WITH_RETURN(nullptr);
485 /*! \brief Returns the pointer to the GPU stream.
486 * \param[in] pme The PME data structure.
487 * \returns Pointer to GPU stream object.
489 GPU_FUNC_QUALIFIER void *pme_gpu_get_device_stream(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme)) GPU_FUNC_TERM_WITH_RETURN(nullptr);
491 /*! \brief Returns the pointer to the GPU context.
492 * \param[in] pme The PME data structure.
493 * \returns Pointer to GPU context object.
495 GPU_FUNC_QUALIFIER void *pme_gpu_get_device_context(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme)) GPU_FUNC_TERM_WITH_RETURN(nullptr);
497 /*! \brief Get pointer to the device synchronizer object that allows syncing on PME force calculation completion
498 * \param[in] pme The PME data structure.
499 * \returns Pointer to sychronizer
501 GPU_FUNC_QUALIFIER GpuEventSynchronizer *pme_gpu_get_f_ready_synchronizer(const gmx_pme_t *GPU_FUNC_ARGUMENT(pme)) GPU_FUNC_TERM_WITH_RETURN(nullptr);