2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Declaration of low-level functions and fields of GPU state propagator object.
39 * \author Artem Zhmurov <zhmurov@gmail.com>
41 * \ingroup module_mdtypes
43 #ifndef GMX_MDTYPES_STATE_PROPAGATOR_DATA_GPU_IMPL_H
44 #define GMX_MDTYPES_STATE_PROPAGATOR_DATA_GPU_IMPL_H
50 #include "gromacs/gpu_utils/devicebuffer.h"
51 #if GMX_GPU == GMX_GPU_CUDA
52 # include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
53 #elif GMX_GPU == GMX_GPU_OPENCL
54 # include "gromacs/gpu_utils/gpueventsynchronizer_ocl.h"
56 #include "gromacs/math/vectypes.h"
57 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
58 #include "gromacs/utility/classhelpers.h"
59 #include "gromacs/utility/enumerationhelpers.h"
66 class StatePropagatorDataGpu::Impl
72 /*! \brief Constructor
74 * The buffers are reallocated only at the reinit call, the padding is
75 * used there for the coordinates buffer. It is needed for PME and added at
76 * the end of the buffer. It is assumed that if the rank has PME duties on the
77 * GPU, all coordinates are copied to the GPU and hence, for this rank, the
78 * coordinates buffer is not split into local and non-local ranges. For other
79 * ranks, the padding size is zero. This works because only one rank ever does
80 * PME work on the GPU, and if that rank also does PP work that is the only
81 * rank. So all coordinates are always transferred.
83 * In OpenCL, only pmeStream is used since it is the only stream created in
84 * PME context. The local and non-local streams are only needed when buffer
85 * ops are offloaded. This feature is currently not available in OpenCL and
86 * hence these streams are not set in these builds.
88 * \note In CUDA, the update stream is created in the constructor as a temporary
89 * solution, in place until the stream manager is introduced.
90 * Note that this makes it impossible to construct this object in CUDA
91 * builds executing on a host without any CUDA-capable device available.
93 * \note In CUDA, \p deviceContext is unused, hence always nullptr;
94 * all stream arguments can also be nullptr in runs where the
95 * respective streams are not required.
96 * In OpenCL, \p deviceContext needs to be a valid device context.
97 * In OpenCL runs StatePropagatorDataGpu is currently only used
98 * with PME offload, and only on ranks with PME duty. Hence, the
99 * \p pmeStream argument needs to be a valid OpenCL queue object
100 * which must have been created in \p deviceContext.
102 * \param[in] pmeStream Device PME stream, nullptr allowed.
103 * \param[in] localStream Device NBNXM local stream, nullptr allowed.
104 * \param[in] nonLocalStream Device NBNXM non-local stream, nullptr allowed.
105 * \param[in] deviceContext Device context, nullptr allowed.
106 * \param[in] transferKind H2D/D2H transfer call behavior (synchronous or not).
107 * \param[in] allocationBlockSizeDivisor Determines the padding size for coordinates buffer.
108 * \param[in] wcycle Wall cycle counter data.
110 Impl(const DeviceStream* pmeStream,
111 const DeviceStream* localStream,
112 const DeviceStream* nonLocalStream,
113 const DeviceContext& deviceContext,
114 GpuApiCallBehavior transferKind,
115 int allocationBlockSizeDivisor,
116 gmx_wallcycle* wcycle);
118 /*! \brief Constructor to use in PME-only rank and in tests.
120 * This constructor should be used if only a coordinate device buffer should be managed
121 * using a single stream. Any operation on force or velocity buffer as well as copy of
122 * non-local coordinates will exit with assertion failure. Note, that the pmeStream can
123 * not be a nullptr and the constructor will exit with an assertion failure.
125 * \todo Currently, unsupported copy operations are blocked by assertion that the stream
126 * not nullptr. This should be improved.
128 * \param[in] pmeStream Device PME stream, nullptr is not allowed.
129 * \param[in] deviceContext Device context, nullptr allowed for non-OpenCL builds.
130 * \param[in] transferKind H2D/D2H transfer call behavior (synchronous or not).
131 * \param[in] allocationBlockSizeDivisor Determines the padding size for coordinates buffer.
132 * \param[in] wcycle Wall cycle counter data.
134 Impl(const DeviceStream* pmeStream,
135 const DeviceContext& deviceContext,
136 GpuApiCallBehavior transferKind,
137 int allocationBlockSizeDivisor,
138 gmx_wallcycle* wcycle);
143 /*! \brief Set the ranges for local and non-local atoms and reallocates buffers.
145 * Reallocates coordinate, velocities and force buffers on the device.
148 * The coordinates buffer is (re)allocated, when required by PME, with a padding,
149 * the size of which is set by the constructor. The padding region clearing kernel
150 * is scheduled in the \p pmeStream_ (unlike the coordinates H2D) as only the PME
151 * task uses this padding area.
154 * The force buffer is cleared if its size increases, so that previously unused
155 * memory is cleared before forces are accumulated.
157 * \param[in] numAtomsLocal Number of atoms in local domain.
158 * \param[in] numAtomsAll Total number of atoms to handle.
160 void reinit(int numAtomsLocal, int numAtomsAll);
162 /*! \brief Returns the range of atoms to be copied based on the copy type (all, local or non-local).
164 * \todo There are at least three versions of the function with this functionality in the code:
165 * this one and two more in NBNXM. These should be unified in a shape of a general function
168 * \param[in] atomLocality If all, local or non-local ranges are needed.
170 * \returns Tuple, containing the index of the first atom in the range and the total number of atoms in the range.
172 std::tuple<int, int> getAtomRangesFromAtomLocality(AtomLocality atomLocality);
175 /*! \brief Get the positions buffer on the GPU.
177 * \returns GPU positions buffer.
179 DeviceBuffer<RVec> getCoordinates();
181 /*! \brief Copy positions to the GPU memory.
183 * \param[in] h_x Positions in the host memory.
184 * \param[in] atomLocality Locality of the particles to copy.
186 void copyCoordinatesToGpu(gmx::ArrayRef<const gmx::RVec> h_x, AtomLocality atomLocality);
188 /*! \brief Get the event synchronizer of the coordinates ready for the consumption on the device.
190 * Returns the event synchronizer which indicates that the coordinates are ready for the
191 * consumption on the device. Takes into account that the producer may be different.
193 * If the update is offloaded, and the current step is not a DD/search step, the returned
194 * synchronizer indicates the completion of GPU update-constraint kernels. Otherwise, on search
195 * steps and if update is not offloaded, the coordinates are provided by the H2D copy and the
196 * returned synchronizer indicates that the copy is complete.
198 * \param[in] atomLocality Locality of the particles to wait for.
199 * \param[in] simulationWork The simulation lifetime flags.
200 * \param[in] stepWork The step lifetime flags.
202 * \returns The event to synchronize the stream that consumes coordinates on device.
204 GpuEventSynchronizer* getCoordinatesReadyOnDeviceEvent(AtomLocality atomLocality,
205 const SimulationWorkload& simulationWork,
206 const StepWorkload& stepWork);
208 /*! \brief Blocking wait until coordinates are copied to the device.
210 * Synchronizes the stream in which the copy was executed.
212 * \param[in] atomLocality Locality of the particles to wait for.
214 void waitCoordinatesCopiedToDevice(AtomLocality atomLocality);
216 /*! \brief Getter for the event synchronizer for the update is done on th GPU
218 * \returns The event to synchronize the stream coordinates wre updated on device.
220 GpuEventSynchronizer* xUpdatedOnDevice();
222 /*! \brief Copy positions from the GPU memory.
224 * \param[in] h_x Positions buffer in the host memory.
225 * \param[in] atomLocality Locality of the particles to copy.
227 void copyCoordinatesFromGpu(gmx::ArrayRef<gmx::RVec> h_x, AtomLocality atomLocality);
229 /*! \brief Wait until coordinates are available on the host.
231 * \param[in] atomLocality Locality of the particles to wait for.
233 void waitCoordinatesReadyOnHost(AtomLocality atomLocality);
236 /*! \brief Get the velocities buffer on the GPU.
238 * \returns GPU velocities buffer.
240 DeviceBuffer<RVec> getVelocities();
242 /*! \brief Copy velocities to the GPU memory.
244 * \param[in] h_v Velocities in the host memory.
245 * \param[in] atomLocality Locality of the particles to copy.
247 void copyVelocitiesToGpu(gmx::ArrayRef<const gmx::RVec> h_v, AtomLocality atomLocality);
249 /*! \brief Get the event synchronizer on the H2D velocities copy.
251 * \param[in] atomLocality Locality of the particles to wait for.
253 * \returns The event to synchronize the stream that consumes velocities on device.
255 GpuEventSynchronizer* getVelocitiesReadyOnDeviceEvent(AtomLocality atomLocality);
257 /*! \brief Copy velocities from the GPU memory.
259 * \param[in] h_v Velocities buffer in the host memory.
260 * \param[in] atomLocality Locality of the particles to copy.
262 void copyVelocitiesFromGpu(gmx::ArrayRef<gmx::RVec> h_v, AtomLocality atomLocality);
264 /*! \brief Wait until velocities are available on the host.
266 * \param[in] atomLocality Locality of the particles to wait for.
268 void waitVelocitiesReadyOnHost(AtomLocality atomLocality);
271 /*! \brief Get the force buffer on the GPU.
273 * \returns GPU force buffer.
275 DeviceBuffer<RVec> getForces();
277 /*! \brief Copy forces to the GPU memory.
279 * \param[in] h_f Forces in the host memory.
280 * \param[in] atomLocality Locality of the particles to copy.
282 void copyForcesToGpu(gmx::ArrayRef<const gmx::RVec> h_f, AtomLocality atomLocality);
284 /*! \brief Get the event synchronizer for the forces ready on device.
286 * Returns either of the event synchronizers, depending on the offload scenario
287 * for the current simulation timestep:
288 * 1. The forces are copied to the device (when GPU buffer ops are off)
289 * 2. The forces are reduced on the device (GPU buffer ops are on)
291 * \todo Pass step workload instead of the useGpuFBufferOps boolean.
293 * \param[in] atomLocality Locality of the particles to wait for.
294 * \param[in] useGpuFBufferOps If the force buffer ops are offloaded to the GPU.
296 * \returns The event to synchronize the stream that consumes forces on device.
298 GpuEventSynchronizer* getForcesReadyOnDeviceEvent(AtomLocality atomLocality, bool useGpuFBufferOps);
300 /*! \brief Getter for the event synchronizer for the forces are reduced on the GPU.
302 * \returns The event to mark when forces are reduced on the GPU.
304 GpuEventSynchronizer* fReducedOnDevice();
306 /*! \brief Copy forces from the GPU memory.
308 * \param[in] h_f Forces buffer in the host memory.
309 * \param[in] atomLocality Locality of the particles to copy.
311 void copyForcesFromGpu(gmx::ArrayRef<gmx::RVec> h_f, AtomLocality atomLocality);
313 /*! \brief Wait until forces are available on the host.
315 * \param[in] atomLocality Locality of the particles to wait for.
317 void waitForcesReadyOnHost(AtomLocality atomLocality);
319 /*! \brief Getter for the update stream.
321 * \todo This is temporary here, until the management of this stream is taken over.
323 * \returns The device command stream to use in update-constraints.
325 const DeviceStream* getUpdateStream();
327 /*! \brief Getter for the number of local atoms.
329 * \returns The number of local atoms.
333 /*! \brief Getter for the total number of atoms.
335 * \returns The total number of atoms.
341 const DeviceStream* pmeStream_;
342 //! GPU NBNXM local stream.
343 const DeviceStream* localStream_;
344 //! GPU NBNXM non-local stream.
345 const DeviceStream* nonLocalStream_;
346 //! GPU Update-constreaints stream.
347 const DeviceStream* updateStream_;
349 //! An owning pointer to the update stream, in case we manage its lifetime here. Temporary.
350 DeviceStream updateStreamOwn_;
352 // Streams to use for coordinates H2D and D2H copies (one event for each atom locality)
353 EnumerationArray<AtomLocality, const DeviceStream*> xCopyStreams_ = { { nullptr } };
354 // Streams to use for velocities H2D and D2H copies (one event for each atom locality)
355 EnumerationArray<AtomLocality, const DeviceStream*> vCopyStreams_ = { { nullptr } };
356 // Streams to use for forces H2D and D2H copies (one event for each atom locality)
357 EnumerationArray<AtomLocality, const DeviceStream*> fCopyStreams_ = { { nullptr } };
359 /*! \brief An array of events that indicate H2D copy is complete (one event for each atom locality)
361 * \todo Reconsider naming. It should be xCopiedToDevice or xH2DCopyComplete, etc.
363 EnumerationArray<AtomLocality, GpuEventSynchronizer> xReadyOnDevice_;
364 //! An event that the coordinates are ready after update-constraints execution
365 GpuEventSynchronizer xUpdatedOnDevice_;
366 //! An array of events that indicate D2H copy of coordinates is complete (one event for each atom locality)
367 EnumerationArray<AtomLocality, GpuEventSynchronizer> xReadyOnHost_;
369 //! An array of events that indicate H2D copy of velocities is complete (one event for each atom locality)
370 EnumerationArray<AtomLocality, GpuEventSynchronizer> vReadyOnDevice_;
371 //! An array of events that indicate D2H copy of velocities is complete (one event for each atom locality)
372 EnumerationArray<AtomLocality, GpuEventSynchronizer> vReadyOnHost_;
374 //! An array of events that indicate H2D copy of forces is complete (one event for each atom locality)
375 EnumerationArray<AtomLocality, GpuEventSynchronizer> fReadyOnDevice_;
376 //! An event that the forces were reduced on the GPU
377 GpuEventSynchronizer fReducedOnDevice_;
378 //! An array of events that indicate D2H copy of forces is complete (one event for each atom locality)
379 EnumerationArray<AtomLocality, GpuEventSynchronizer> fReadyOnHost_;
381 //! GPU context (for OpenCL builds)
382 const DeviceContext& deviceContext_;
383 //! Default GPU calls behavior
384 GpuApiCallBehavior transferKind_ = GpuApiCallBehavior::Async;
385 //! Required minimum divisor of the allocation size of the coordinates buffer
386 int allocationBlockSizeDivisor_ = 0;
388 //! Number of local atoms
389 int numAtomsLocal_ = -1;
390 //! Total number of atoms
391 int numAtomsAll_ = -1;
393 //! Device positions buffer
394 DeviceBuffer<RVec> d_x_;
395 //! Number of particles saved in the positions buffer
397 //! Allocation size for the positions buffer
398 int d_xCapacity_ = -1;
400 //! Device velocities buffer
401 DeviceBuffer<RVec> d_v_;
402 //! Number of particles saved in the velocities buffer
404 //! Allocation size for the velocities buffer
405 int d_vCapacity_ = -1;
407 //! Device force buffer
408 DeviceBuffer<RVec> d_f_;
409 //! Number of particles saved in the force buffer
411 //! Allocation size for the force buffer
412 int d_fCapacity_ = -1;
414 //! \brief Pointer to wallcycle structure.
415 gmx_wallcycle* wcycle_;
417 /*! \brief Performs the copy of data from host to device buffer.
419 * \todo Template on locality.
421 * \param[out] d_data Device-side buffer.
422 * \param[in] h_data Host-side buffer.
423 * \param[in] dataSize Device-side data allocation size.
424 * \param[in] atomLocality If all, local or non-local ranges should be copied.
425 * \param[in] deviceStream GPU stream to execute copy in.
427 void copyToDevice(DeviceBuffer<RVec> d_data,
428 gmx::ArrayRef<const gmx::RVec> h_data,
430 AtomLocality atomLocality,
431 const DeviceStream& deviceStream);
433 /*! \brief Performs the copy of data from device to host buffer.
435 * \param[out] h_data Host-side buffer.
436 * \param[in] d_data Device-side buffer.
437 * \param[in] dataSize Device-side data allocation size.
438 * \param[in] atomLocality If all, local or non-local ranges should be copied.
439 * \param[in] deviceStream GPU stream to execute copy in.
441 void copyFromDevice(gmx::ArrayRef<gmx::RVec> h_data,
442 DeviceBuffer<RVec> d_data,
444 AtomLocality atomLocality,
445 const DeviceStream& deviceStream);
450 #endif // GMX_MDTYPES_STATE_PROPAGATOR_DATA_GPU_IMPL_H