2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Declaration of low-level functions and fields of GPU state propagator object.
39 * \author Artem Zhmurov <zhmurov@gmail.com>
41 * \ingroup module_mdtypes
43 #ifndef GMX_MDTYPES_STATE_PROPAGATOR_DATA_GPU_IMPL_H
44 #define GMX_MDTYPES_STATE_PROPAGATOR_DATA_GPU_IMPL_H
50 #include "gromacs/gpu_utils/devicebuffer.h"
51 #if GMX_GPU == GMX_GPU_CUDA
52 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
53 #elif GMX_GPU == GMX_GPU_OPENCL
54 #include "gromacs/gpu_utils/gpueventsynchronizer_ocl.h"
56 #include "gromacs/math/vectypes.h"
57 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
58 #include "gromacs/utility/classhelpers.h"
59 #include "gromacs/utility/enumerationhelpers.h"
64 class StatePropagatorDataGpu::Impl
71 /*! \brief Constructor
73 * The buffers are reallocated only at the reinit call, the padding is
74 * used there for the coordinates buffer. It is needed for PME and added at
75 * the end of the buffer. It is assumed that if the rank has PME duties on the
76 * GPU, all coordinates are copied to the GPU and hence, for this rank, the
77 * coordinates buffer is not split into local and non-local ranges. For other
78 * ranks, the padding size is zero. This works because only one rank ever does
79 * PME work on the GPU, and if that rank also does PP work that is the only
80 * rank. So all coordinates are always transferred.
82 * In OpenCL, only pmeStream is used since it is the only stream created in
83 * PME context. The local and non-local streams are only needed when buffer
84 * ops are offloaded. This feature is currently not available in OpenCL and
85 * hence these streams are not set in these builds.
87 * \note In CUDA, the update stream is created in the constructor as a temporary
88 * solution, in place until the stream manager is introduced.
89 * Note that this makes it impossible to construct this object in CUDA
90 * builds executing on a host without any CUDA-capable device available.
92 * \note In CUDA, \p deviceContext is unused, hence always nullptr;
93 * all stream arguments can also be nullptr in runs where the
94 * respective streams are not required.
95 * In OpenCL, \p deviceContext needs to be a valid device context.
96 * In OpenCL runs StatePropagatorDataGpu is currently only used
97 * with PME offload, and only on ranks with PME duty. Hence, the
98 * \p pmeStream argument needs to be a valid OpenCL queue object
99 * which must have been created in \p deviceContext.
101 * \todo Make a \p CommandStream visible in the CPU parts of the code so we
102 * will not have to pass a void*.
103 * \todo Make a \p DeviceContext object visible in CPU parts of the code so we
104 * will not have to pass a void*.
106 * \param[in] pmeStream Device PME stream, nullptr allowed.
107 * \param[in] localStream Device NBNXM local stream, nullptr allowed.
108 * \param[in] nonLocalStream Device NBNXM non-local stream, nullptr allowed.
109 * \param[in] deviceContext Device context, nullptr allowed.
110 * \param[in] transferKind H2D/D2H transfer call behavior (synchronous or not).
111 * \param[in] paddingSize Padding size for coordinates buffer.
113 Impl(const void *pmeStream,
114 const void *localStream,
115 const void *nonLocalStream,
116 const void *deviceContext,
117 GpuApiCallBehavior transferKind,
120 /*! \brief Constructor to use in PME-only rank and in tests.
122 * This constructor should be used if only a coordinate device buffer should be managed
123 * using a single stream. Any operation on force or velocity buffer as well as copy of
124 * non-local coordinates will exit with assertion failure. Note, that the pmeStream can
125 * not be a nullptr and the constructor will exit with an assertion failure.
127 * \todo Currently, unsupported copy operations are blocked by assertion that the stream
128 * not nullptr. This should be improved.
130 * \param[in] pmeStream Device PME stream, nullptr is not allowed.
131 * \param[in] deviceContext Device context, nullptr allowed for non-OpenCL builds.
132 * \param[in] transferKind H2D/D2H transfer call behavior (synchronous or not).
133 * \param[in] paddingSize Padding size for coordinates buffer.
135 Impl(const void *pmeStream,
136 const void *deviceContext,
137 GpuApiCallBehavior transferKind,
143 /*! \brief Set the ranges for local and non-local atoms and reallocates buffers.
146 * The coordinates buffer is (re)allocated, when required by PME, with a padding,
147 * the size of which is set by the constructor. The padding region clearing kernel
148 * is scheduled in the \p pmeStream_ (unlike the coordinates H2D) as only the PME
149 * task uses this padding area.
151 * \param[in] numAtomsLocal Number of atoms in local domain.
152 * \param[in] numAtomsAll Total number of atoms to handle.
154 void reinit(int numAtomsLocal, int numAtomsAll);
156 /*! \brief Returns the range of atoms to be copied based on the copy type (all, local or non-local).
158 * \todo There are at least three versions of the function with this functionality in the code:
159 * this one and two more in NBNXM. These should be unified in a shape of a general function
162 * \param[in] atomLocality If all, local or non-local ranges are needed.
164 * \returns Tuple, containing the index of the first atom in the range and the total number of atoms in the range.
166 std::tuple<int, int> getAtomRangesFromAtomLocality(AtomLocality atomLocality);
169 /*! \brief Get the positions buffer on the GPU.
171 * \returns GPU positions buffer.
173 DeviceBuffer<float> getCoordinates();
175 /*! \brief Copy positions to the GPU memory.
177 * \param[in] h_x Positions in the host memory.
178 * \param[in] atomLocality Locality of the particles to copy.
180 void copyCoordinatesToGpu(gmx::ArrayRef<const gmx::RVec> h_x,
181 AtomLocality atomLocality);
183 /*! \brief Get the event synchronizer of the coordinates ready for the consumption on the device.
185 * Returns the event synchronizer which indicates that the coordinates are ready for the
186 * consumption on the device. Takes into account that the producer may be different.
188 * If the update is offloaded, and the current step is not a DD/search step, the returned
189 * synchronizer indicates the completion of GPU update-constraint kernels. Otherwise, on search
190 * steps and if update is not offloaded, the coordinates are provided by the H2D copy and the
191 * returned synchronizer indicates that the copy is complete.
193 * \param[in] atomLocality Locality of the particles to wait for.
194 * \param[in] simulationWork The simulation lifetime flags.
195 * \param[in] stepWork The step lifetime flags.
197 * \returns The event to synchronize the stream that consumes coordinates on device.
199 GpuEventSynchronizer* getCoordinatesReadyOnDeviceEvent(AtomLocality atomLocality,
200 const SimulationWorkload &simulationWork,
201 const StepWorkload &stepWork);
203 /*! \brief Blocking wait until coordinates are copied to the device.
205 * Synchronizes the stream in which the copy was executed.
207 * \param[in] atomLocality Locality of the particles to wait for.
209 void waitCoordinatesCopiedToDevice(AtomLocality atomLocality);
211 /*! \brief Getter for the event synchronizer for the update is done on th GPU
213 * \returns The event to synchronize the stream coordinates wre updated on device.
215 GpuEventSynchronizer* xUpdatedOnDevice();
217 /*! \brief Copy positions from the GPU memory.
219 * \param[in] h_x Positions buffer in the host memory.
220 * \param[in] atomLocality Locality of the particles to copy.
222 void copyCoordinatesFromGpu(gmx::ArrayRef<gmx::RVec> h_x,
223 AtomLocality atomLocality);
225 /*! \brief Wait until coordinates are available on the host.
227 * \param[in] atomLocality Locality of the particles to wait for.
229 void waitCoordinatesReadyOnHost(AtomLocality atomLocality);
232 /*! \brief Get the velocities buffer on the GPU.
234 * \returns GPU velocities buffer.
236 DeviceBuffer<float> getVelocities();
238 /*! \brief Copy velocities to the GPU memory.
240 * \param[in] h_v Velocities in the host memory.
241 * \param[in] atomLocality Locality of the particles to copy.
243 void copyVelocitiesToGpu(gmx::ArrayRef<const gmx::RVec> h_v,
244 AtomLocality atomLocality);
246 /*! \brief Get the event synchronizer on the H2D velocities copy.
248 * \param[in] atomLocality Locality of the particles to wait for.
250 * \returns The event to synchronize the stream that consumes velocities on device.
252 GpuEventSynchronizer* getVelocitiesReadyOnDeviceEvent(AtomLocality atomLocality);
254 /*! \brief Copy velocities from the GPU memory.
256 * \param[in] h_v Velocities buffer in the host memory.
257 * \param[in] atomLocality Locality of the particles to copy.
259 void copyVelocitiesFromGpu(gmx::ArrayRef<gmx::RVec> h_v,
260 AtomLocality atomLocality);
262 /*! \brief Wait until velocities are available on the host.
264 * \param[in] atomLocality Locality of the particles to wait for.
266 void waitVelocitiesReadyOnHost(AtomLocality atomLocality);
269 /*! \brief Get the force buffer on the GPU.
271 * \returns GPU force buffer.
273 DeviceBuffer<float> getForces();
275 /*! \brief Copy forces to the GPU memory.
277 * \param[in] h_f Forces in the host memory.
278 * \param[in] atomLocality Locality of the particles to copy.
280 void copyForcesToGpu(gmx::ArrayRef<const gmx::RVec> h_f,
281 AtomLocality atomLocality);
283 /*! \brief Get the event synchronizer for the forces ready on device.
285 * Returns either of the event synchronizers, depending on the offload scenario
286 * for the current simulation timestep:
287 * 1. The forces are copied to the device (when GPU buffer ops are off)
288 * 2. The forces are reduced on the device (GPU buffer ops are on)
290 * \todo Pass step workload instead of the useGpuFBufferOps boolean.
292 * \param[in] atomLocality Locality of the particles to wait for.
293 * \param[in] useGpuFBufferOps If the force buffer ops are offloaded to the GPU.
295 * \returns The event to synchronize the stream that consumes forces on device.
297 GpuEventSynchronizer* getForcesReadyOnDeviceEvent(AtomLocality atomLocality,
298 bool useGpuFBufferOps);
300 /*! \brief Getter for the event synchronizer for the forces are reduced on the GPU.
302 * \returns The event to mark when forces are reduced on the GPU.
304 GpuEventSynchronizer* fReducedOnDevice();
306 /*! \brief Copy forces from the GPU memory.
308 * \param[in] h_f Forces buffer in the host memory.
309 * \param[in] atomLocality Locality of the particles to copy.
311 void copyForcesFromGpu(gmx::ArrayRef<gmx::RVec> h_f,
312 AtomLocality atomLocality);
314 /*! \brief Wait until forces are available on the host.
316 * \param[in] atomLocality Locality of the particles to wait for.
318 void waitForcesReadyOnHost(AtomLocality atomLocality);
320 /*! \brief Getter for the update stream.
322 * \todo This is temporary here, until the management of this stream is taken over.
324 * \returns The device command stream to use in update-constraints.
326 void* getUpdateStream();
328 /*! \brief Getter for the number of local atoms.
330 * \returns The number of local atoms.
334 /*! \brief Getter for the total number of atoms.
336 * \returns The total number of atoms.
343 CommandStream pmeStream_ = nullptr;
344 //! GPU NBNXM local stream.
345 CommandStream localStream_ = nullptr;
346 //! GPU NBNXM non-local stream
347 CommandStream nonLocalStream_ = nullptr;
348 //! GPU Update-constreaints stream.
349 CommandStream updateStream_ = nullptr;
351 // Streams to use for coordinates H2D and D2H copies (one event for each atom locality)
352 EnumerationArray<AtomLocality, CommandStream> xCopyStreams_ = {{nullptr}};
353 // Streams to use for velocities H2D and D2H copies (one event for each atom locality)
354 EnumerationArray<AtomLocality, CommandStream> vCopyStreams_ = {{nullptr}};
355 // Streams to use for forces H2D and D2H copies (one event for each atom locality)
356 EnumerationArray<AtomLocality, CommandStream> fCopyStreams_ = {{nullptr}};
358 /*! \brief An array of events that indicate H2D copy is complete (one event for each atom locality)
360 * \todo Reconsider naming. It should be xCopiedToDevice or xH2DCopyComplete, etc.
362 EnumerationArray<AtomLocality, GpuEventSynchronizer> xReadyOnDevice_;
363 //! An event that the coordinates are ready after update-constraints execution
364 GpuEventSynchronizer xUpdatedOnDevice_;
365 //! An array of events that indicate D2H copy of coordinates is complete (one event for each atom locality)
366 EnumerationArray<AtomLocality, GpuEventSynchronizer> xReadyOnHost_;
368 //! An array of events that indicate H2D copy of velocities is complete (one event for each atom locality)
369 EnumerationArray<AtomLocality, GpuEventSynchronizer> vReadyOnDevice_;
370 //! An array of events that indicate D2H copy of velocities is complete (one event for each atom locality)
371 EnumerationArray<AtomLocality, GpuEventSynchronizer> vReadyOnHost_;
373 //! An array of events that indicate H2D copy of forces is complete (one event for each atom locality)
374 EnumerationArray<AtomLocality, GpuEventSynchronizer> fReadyOnDevice_;
375 //! An event that the forces were reduced on the GPU
376 GpuEventSynchronizer fReducedOnDevice_;
377 //! An array of events that indicate D2H copy of forces is complete (one event for each atom locality)
378 EnumerationArray<AtomLocality, GpuEventSynchronizer> fReadyOnHost_;
380 /*! \brief GPU context (for OpenCL builds)
381 * \todo Make a Context class usable in CPU code
383 DeviceContext deviceContext_ = nullptr;
384 //! Default GPU calls behavior
385 GpuApiCallBehavior transferKind_ = GpuApiCallBehavior::Async;
386 //! Padding size for the coordinates buffer
387 int paddingSize_ = 0;
389 //! Number of local atoms
390 int numAtomsLocal_ = -1;
391 //! Total number of atoms
392 int numAtomsAll_ = -1;
394 //! Device positions buffer
395 DeviceBuffer<float> d_x_;
396 //! Number of particles saved in the positions buffer
398 //! Allocation size for the positions buffer
399 int d_xCapacity_ = -1;
401 //! Device velocities buffer
402 DeviceBuffer<float> d_v_;
403 //! Number of particles saved in the velocities buffer
405 //! Allocation size for the velocities buffer
406 int d_vCapacity_ = -1;
408 //! Device force buffer
409 DeviceBuffer<float> d_f_;
410 //! Number of particles saved in the force buffer
412 //! Allocation size for the force buffer
413 int d_fCapacity_ = -1;
415 /*! \brief Performs the copy of data from host to device buffer.
417 * \todo Template on locality.
419 * \param[out] d_data Device-side buffer.
420 * \param[in] h_data Host-side buffer.
421 * \param[in] dataSize Device-side data allocation size.
422 * \param[in] atomLocality If all, local or non-local ranges should be copied.
423 * \param[in] commandStream GPU stream to execute copy in.
425 void copyToDevice(DeviceBuffer<float> d_data,
426 gmx::ArrayRef<const gmx::RVec> h_data,
428 AtomLocality atomLocality,
429 CommandStream commandStream);
431 /*! \brief Performs the copy of data from device to host buffer.
433 * \param[out] h_data Host-side buffer.
434 * \param[in] d_data Device-side buffer.
435 * \param[in] dataSize Device-side data allocation size.
436 * \param[in] atomLocality If all, local or non-local ranges should be copied.
437 * \param[in] commandStream GPU stream to execute copy in.
439 void copyFromDevice(gmx::ArrayRef<gmx::RVec> h_data,
440 DeviceBuffer<float> d_data,
442 AtomLocality atomLocality,
443 CommandStream commandStream);
448 #endif // GMX_MDTYPES_STATE_PROPAGATOR_DATA_GPU_IMPL_H