2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 // FIXME: remove the "__" prefix in front of the group def when we move the
37 // nonbonded code into separate dir.
39 /*! \libinternal \defgroup __module_nbnxm Short-range non-bonded interaction module
40 * \ingroup group_mdrun
42 * \brief Computes forces and energies for short-range pair-interactions
43 * based on the Verlet algorithm. The algorithm uses pair-lists generated
44 * at fixed intervals as well as various flavors of pair interaction kernels
45 * implemented for a wide range of CPU and GPU architectures.
47 * The module includes support for flavors of Coulomb and Lennard-Jones interaction
48 * treatment implemented for a large range of SIMD instruction sets for CPU
49 * architectures as well as in CUDA and OpenCL for GPU architectures.
50 * Additionally there is a reference CPU non-SIMD and a reference CPU
51 * for GPU pair-list setup interaction kernel.
53 * The implementation of the kernels is based on the cluster non-bonded algorithm
54 * which in the code is referred to as the NxM algorithms ("nbnxm_" prefix);
55 * for details of the algorithm see DOI:10.1016/j.cpc.2013.06.003.
57 * Algorithmically, the non-bonded computation has two different modes:
58 * A "classical" mode: generate a list every nstlist steps containing at least
59 * all atom pairs up to a distance of rlistOuter and compute pair interactions
60 * for all pairs that are within the interaction cut-off.
61 * A "dynamic pruning" mode: generate an "outer-list" up to cut-off rlistOuter
62 * every nstlist steps and prune the outer-list using a cut-off of rlistInner
63 * every nstlistPrune steps to obtain a, smaller, "inner-list". This
64 * results in fewer interaction computations and allows for a larger nstlist.
65 * On a GPU, this dynamic pruning is performed in a rolling fashion, pruning
66 * only a sub-part of the list each (second) step. This way it can often
67 * overlap with integration and constraints on the CPU.
68 * Currently a simple heuristic determines which mode will be used.
70 * TODO: add a summary list and brief descriptions of the different submodules:
71 * search, CPU kernels, GPU glue code + kernels.
73 * \author Berk Hess <hess@kth.se>
74 * \author Szilárd Páll <pall.szilard@gmail.com>
75 * \author Mark Abraham <mark.j.abraham@gmail.com>
76 * \author Anca Hamuraru <anca@streamcomputing.eu>
77 * \author Teemu Virolainen <teemu@streamcomputing.eu>
78 * \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
80 * TODO: add more authors!
83 /*! \libinternal \file
85 * \brief This file contains the public interface of the nbnxm module
86 * that implements the NxM atom cluster non-bonded algorithm to efficiently
87 * compute pair forces.
90 * \author Berk Hess <hess@kth.se>
91 * \author Szilárd Páll <pall.szilard@gmail.com>
94 * \ingroup __module_nbnxm
98 #ifndef GMX_NBNXM_NBNXM_H
99 #define GMX_NBNXM_NBNXM_H
103 #include "gromacs/math/vectypes.h"
104 #include "gromacs/utility/arrayref.h"
105 #include "gromacs/utility/enumerationhelpers.h"
106 #include "gromacs/utility/real.h"
108 #include "locality.h"
110 // TODO: Remove this include and the two nbnxm includes above
111 #include "nbnxm_gpu.h"
113 struct gmx_device_info_t;
114 struct gmx_domdec_zones_t;
115 struct gmx_enerdata_t;
116 struct gmx_hw_info_t;
118 struct gmx_wallcycle;
119 struct interaction_const_t;
120 struct nonbonded_verlet_t;
131 /*! \brief Switch for whether to use GPU for buffer ops*/
132 enum class BufferOpsUseGpu
138 class GpuEventSynchronizer;
142 class ForceWithShiftForces;
144 class UpdateGroupsCog;
149 enum class KernelType;
155 /*! \brief Nonbonded NxN kernel types: plain C, CPU SIMD, GPU, GPU emulation */
156 enum class KernelType : int
167 /*! \brief Ewald exclusion types */
168 enum class EwaldExclusionType : int
176 /* \brief The non-bonded setup, also affects the pairlist construction kernel */
179 //! The non-bonded type, also affects the pairlist construction kernel
180 KernelType kernelType = KernelType::NotSet;
181 //! Ewald exclusion computation handling type, currently only used for CPU
182 EwaldExclusionType ewaldExclusionType = EwaldExclusionType::NotSet;
185 /*! \brief Return a string identifying the kernel type.
187 * \param [in] kernelType nonbonded kernel type, takes values from the nbnxn_kernel_type enum
188 * \returns a string identifying the kernel corresponding to the type passed as argument
190 const char *lookup_kernel_name(Nbnxm::KernelType kernelType);
194 /*! \brief Flag to tell the nonbonded kernels whether to clear the force output buffers */
196 enbvClearFNo, enbvClearFYes
200 * \brief Top-level non-bonded data structure for the Verlet-type cut-off scheme. */
201 struct nonbonded_verlet_t
204 //! Constructs an object from its components
205 nonbonded_verlet_t(std::unique_ptr<PairlistSets> pairlistSets,
206 std::unique_ptr<PairSearch> pairSearch,
207 std::unique_ptr<nbnxn_atomdata_t> nbat,
208 const Nbnxm::KernelSetup &kernelSetup,
209 gmx_nbnxn_gpu_t *gpu_nbv,
210 gmx_wallcycle *wcycle);
212 ~nonbonded_verlet_t();
214 //! Returns whether a GPU is use for the non-bonded calculations
217 return kernelSetup_.kernelType == Nbnxm::KernelType::Gpu8x8x8;
220 //! Returns whether a GPU is emulated for the non-bonded calculations
221 bool emulateGpu() const
223 return kernelSetup_.kernelType == Nbnxm::KernelType::Cpu8x8x8_PlainC;
226 //! Return whether the pairlist is of simple, CPU type
227 bool pairlistIsSimple() const
229 return !useGpu() && !emulateGpu();
232 //! Initialize the pair list sets, TODO this should be private
233 void initPairlistSets(bool haveMultipleDomains);
235 //! Returns the order of the local atoms on the grid
236 gmx::ArrayRef<const int> getLocalAtomOrder() const;
238 //! Sets the order of the local atoms to the order grid atom ordering
239 void setLocalAtomOrder();
241 //! Returns the index position of the atoms on the search grid
242 gmx::ArrayRef<const int> getGridIndices() const;
244 //! Constructs the pairlist for the given locality
245 void constructPairlist(Nbnxm::InteractionLocality iLocality,
246 const t_blocka *excl,
250 //! Updates all the atom properties in Nbnxm
251 void setAtomProperties(const t_mdatoms &mdatoms,
252 gmx::ArrayRef<const int> atomInfo);
254 //! Updates the coordinates in Nbnxm for the given locality
255 void setCoordinates(Nbnxm::AtomLocality locality,
257 gmx::ArrayRef<const gmx::RVec> x,
258 BufferOpsUseGpu useGpu,
259 void *xPmeDevicePtr);
261 //! Init for GPU version of setup coordinates in Nbnxm
262 void atomdata_init_copy_x_to_nbat_x_gpu();
264 //! Sync the nonlocal GPU stream with dependent tasks in the local queue.
265 void insertNonlocalGpuDependency(Nbnxm::InteractionLocality interactionLocality);
267 //! Returns a reference to the pairlist sets
268 const PairlistSets &pairlistSets() const
270 return *pairlistSets_;
273 //! Returns whether step is a dynamic list pruning step, for CPU lists
274 bool isDynamicPruningStepCpu(int64_t step) const;
276 //! Returns whether step is a dynamic list pruning step, for GPU lists
277 bool isDynamicPruningStepGpu(int64_t step) const;
279 //! Dispatches the dynamic pruning kernel for the given locality, for CPU lists
280 void dispatchPruneKernelCpu(Nbnxm::InteractionLocality iLocality,
281 const rvec *shift_vec);
283 //! Dispatches the dynamic pruning kernel for GPU lists
284 void dispatchPruneKernelGpu(int64_t step);
286 //! \brief Executes the non-bonded kernel of the GPU or launches it on the GPU
287 void dispatchNonbondedKernel(Nbnxm::InteractionLocality iLocality,
288 const interaction_const_t &ic,
291 const t_forcerec &fr,
292 gmx_enerdata_t *enerd,
295 //! Executes the non-bonded free-energy kernel, always runs on the CPU
296 void dispatchFreeEnergyKernel(Nbnxm::InteractionLocality iLocality,
297 const t_forcerec *fr,
299 gmx::ForceWithShiftForces *forceWithShiftForces,
300 const t_mdatoms &mdatoms,
303 gmx_enerdata_t *enerd,
307 /*! \brief Add the forces stored in nbat to f, zeros the forces in nbat
308 * \param [in] locality Local or non-local
309 * \param [inout] force Force to be added to
311 void atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality locality,
312 gmx::ArrayRef<gmx::RVec> force);
314 /*! \brief Add the forces stored in nbat to f, allowing for possibility that GPU buffer ops are active
315 * \param [in] locality Local or non-local
316 * \param [inout] force Force to be added to
317 * \param [in] fPme Force from PME calculation
318 * \param [in] pmeForcesReady Event triggered when PME force calculation has completed
319 * \param [in] useGpu Whether GPU buffer ops are active
320 * \param [in] useGpuFPmeReduction Whether PME force reduction is on GPU
321 * \param [in] accumulateForce Whether force should be accumulated or stored
323 void atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality locality,
324 gmx::ArrayRef<gmx::RVec> force,
326 GpuEventSynchronizer *pmeForcesReady,
327 BufferOpsUseGpu useGpu,
328 bool useGpuFPmeReduction,
329 bool accumulateForce);
331 /*! \brief Outer body of function to perform initialization for F buffer operations on GPU. */
332 void atomdata_init_add_nbat_f_to_f_gpu();
334 /*! \brief H2D transfer of force buffer*/
335 void launch_copy_f_to_gpu(rvec *f, Nbnxm::AtomLocality locality);
337 /*! \brief D2H transfer of force buffer*/
338 void launch_copy_f_from_gpu(rvec *f, Nbnxm::AtomLocality locality);
340 /*! \brief Wait for GPU force reduction task and D2H transfer of its results to complete
342 * FIXME: need more details: when should be called / after which operation, etc.
344 void wait_for_gpu_force_reduction(Nbnxm::AtomLocality locality);
346 //! Return the kernel setup
347 const Nbnxm::KernelSetup &kernelSetup() const
352 //! Returns the outer radius for the pair list
353 real pairlistInnerRadius() const;
355 //! Returns the outer radius for the pair list
356 real pairlistOuterRadius() const;
358 //! Changes the pair-list outer and inner radius
359 void changePairlistRadii(real rlistOuter,
362 //! Set up internal flags that indicate what type of short-range work there is.
363 void setupGpuShortRangeWork(const gmx::GpuBonded *gpuBonded,
364 const Nbnxm::InteractionLocality iLocality)
366 if (useGpu() && !emulateGpu())
368 Nbnxm::setupGpuShortRangeWork(gpu_nbv, gpuBonded, iLocality);
372 //! Returns true if there is GPU short-range work for the given atom locality.
373 bool haveGpuShortRangeWork(const Nbnxm::AtomLocality aLocality)
375 return ((useGpu() && !emulateGpu()) &&
376 Nbnxm::haveGpuShortRangeWork(gpu_nbv, aLocality));
379 // TODO: Make all data members private
381 //! All data related to the pair lists
382 std::unique_ptr<PairlistSets> pairlistSets_;
383 //! Working data for constructing the pairlists
384 std::unique_ptr<PairSearch> pairSearch_;
386 std::unique_ptr<nbnxn_atomdata_t> nbat;
388 //! The non-bonded setup, also affects the pairlist construction kernel
389 Nbnxm::KernelSetup kernelSetup_;
390 //! \brief Pointer to wallcycle structure.
391 gmx_wallcycle *wcycle_;
393 //! GPU Nbnxm data, only used with a physical GPU (TODO: use unique_ptr)
394 gmx_nbnxn_gpu_t *gpu_nbv;
400 /*! \brief Creates an Nbnxm object */
401 std::unique_ptr<nonbonded_verlet_t>
402 init_nb_verlet(const gmx::MDLogger &mdlog,
403 gmx_bool bFEP_NonBonded,
404 const t_inputrec *ir,
405 const t_forcerec *fr,
407 const gmx_hw_info_t &hardwareInfo,
408 const gmx_device_info_t *deviceInfo,
409 const gmx_mtop_t *mtop,
411 gmx_wallcycle *wcycle);
415 /*! \brief Put the atoms on the pair search grid.
417 * Only atoms atomStart to atomEnd in x are put on the grid.
418 * The atom_density is used to determine the grid size.
419 * When atomDensity<=0, the density is determined from atomEnd-atomStart and the corners.
420 * With domain decomposition part of the n particles might have migrated,
421 * but have not been removed yet. This count is given by nmoved.
422 * When move[i] < 0 particle i has migrated and will not be put on the grid.
423 * Without domain decomposition move will be NULL.
425 void nbnxn_put_on_grid(nonbonded_verlet_t *nb_verlet,
428 const rvec lowerCorner,
429 const rvec upperCorner,
430 const gmx::UpdateGroupsCog *updateGroupsCog,
434 gmx::ArrayRef<const int> atomInfo,
435 gmx::ArrayRef<const gmx::RVec> x,
439 /*! \brief As nbnxn_put_on_grid, but for the non-local atoms
441 * with domain decomposition. Should be called after calling
442 * nbnxn_search_put_on_grid for the local atoms / home zone.
444 void nbnxn_put_on_grid_nonlocal(nonbonded_verlet_t *nb_verlet,
445 const struct gmx_domdec_zones_t *zones,
446 gmx::ArrayRef<const int> atomInfo,
447 gmx::ArrayRef<const gmx::RVec> x);
449 #endif // GMX_NBNXN_NBNXM_H