/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "config.h"
+#include <memory>
+
#include "gromacs/gpu_utils/devicebuffer.h"
-#if GMX_GPU == GMX_GPU_CUDA
-# include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
-#elif GMX_GPU == GMX_GPU_OPENCL
-# include "gromacs/gpu_utils/gpueventsynchronizer_ocl.h"
-#endif
+#include "gromacs/gpu_utils/gpueventsynchronizer.h"
#include "gromacs/math/vectypes.h"
#include "gromacs/mdtypes/state_propagator_data_gpu.h"
-#include "gromacs/utility/classhelpers.h"
#include "gromacs/utility/enumerationhelpers.h"
struct gmx_wallcycle;
* ops are offloaded. This feature is currently not available in OpenCL and
* hence these streams are not set in these builds.
*
- * \note In CUDA, the update stream is created in the constructor as a temporary
- * solution, in place until the stream manager is introduced.
- * Note that this makes it impossible to construct this object in CUDA
- * builds executing on a host without any CUDA-capable device available.
- *
- * \note In CUDA, \p deviceContext is unused, hence always nullptr;
- * all stream arguments can also be nullptr in runs where the
- * respective streams are not required.
- * In OpenCL, \p deviceContext needs to be a valid device context.
- * In OpenCL runs StatePropagatorDataGpu is currently only used
- * with PME offload, and only on ranks with PME duty. Hence, the
- * \p pmeStream argument needs to be a valid OpenCL queue object
- * which must have been created in \p deviceContext.
- *
- * \todo Make a \p CommandStream visible in the CPU parts of the code so we
- * will not have to pass a void*.
- * \todo Make a \p DeviceContext object visible in CPU parts of the code so we
- * will not have to pass a void*.
- *
- * \param[in] pmeStream Device PME stream, nullptr allowed.
- * \param[in] localStream Device NBNXM local stream, nullptr allowed.
- * \param[in] nonLocalStream Device NBNXM non-local stream, nullptr allowed.
- * \param[in] deviceContext Device context, nullptr allowed.
- * \param[in] transferKind H2D/D2H transfer call behavior (synchronous or not).
- * \param[in] paddingSize Padding size for coordinates buffer.
- * \param[in] wcycle Wall cycle counter data.
+ * \param[in] deviceStreamManager Object that owns the DeviceContext and DeviceStreams.
+ * \param[in] transferKind H2D/D2H transfer call behavior (synchronous or not).
+ * \param[in] allocationBlockSizeDivisor Determines the padding size for coordinates buffer.
+ * \param[in] wcycle Wall cycle counter data.
*/
- Impl(const void* pmeStream,
- const void* localStream,
- const void* nonLocalStream,
- const void* deviceContext,
- GpuApiCallBehavior transferKind,
- int paddingSize,
- gmx_wallcycle* wcycle);
+ Impl(const DeviceStreamManager& deviceStreamManager,
+ GpuApiCallBehavior transferKind,
+ int allocationBlockSizeDivisor,
+ gmx_wallcycle* wcycle);
/*! \brief Constructor to use in PME-only rank and in tests.
*
* \param[in] pmeStream Device PME stream, nullptr is not allowed.
* \param[in] deviceContext Device context, nullptr allowed for non-OpenCL builds.
* \param[in] transferKind H2D/D2H transfer call behavior (synchronous or not).
- * \param[in] paddingSize Padding size for coordinates buffer.
+ * \param[in] allocationBlockSizeDivisor Determines the padding size for coordinates buffer.
* \param[in] wcycle Wall cycle counter data.
*/
- Impl(const void* pmeStream,
- const void* deviceContext,
- GpuApiCallBehavior transferKind,
- int paddingSize,
- gmx_wallcycle* wcycle);
+ Impl(const DeviceStream* pmeStream,
+ const DeviceContext& deviceContext,
+ GpuApiCallBehavior transferKind,
+ int allocationBlockSizeDivisor,
+ gmx_wallcycle* wcycle);
~Impl();
*
* \returns Tuple, containing the index of the first atom in the range and the total number of atoms in the range.
*/
- std::tuple<int, int> getAtomRangesFromAtomLocality(AtomLocality atomLocality);
+ std::tuple<int, int> getAtomRangesFromAtomLocality(AtomLocality atomLocality) const;
/*! \brief Get the positions buffer on the GPU.
* \param[in] atomLocality Locality of the particles to wait for.
* \param[in] simulationWork The simulation lifetime flags.
* \param[in] stepWork The step lifetime flags.
+ * \param[in] gpuCoordinateHaloLaunched Event recorded when GPU coordinate halo has been launched.
*
* \returns The event to synchronize the stream that consumes coordinates on device.
*/
GpuEventSynchronizer* getCoordinatesReadyOnDeviceEvent(AtomLocality atomLocality,
const SimulationWorkload& simulationWork,
- const StepWorkload& stepWork);
+ const StepWorkload& stepWork,
+ GpuEventSynchronizer* gpuCoordinateHaloLaunched = nullptr);
/*! \brief Blocking wait until coordinates are copied to the device.
*
*/
void waitCoordinatesCopiedToDevice(AtomLocality atomLocality);
- /*! \brief Getter for the event synchronizer for the update is done on th GPU
+ /*! \brief Consume the event for copying coordinates to the device.
+ *
+ * Used for manual event consumption. Does nothing except changing the internal event counter.
+ *
+ * \param[in] atomLocality Locality of the particles.
+ */
+ void consumeCoordinatesCopiedToDeviceEvent(AtomLocality atomLocality);
+
+ /*! \brief Reset the event for copying coordinates to the device.
+ *
+ * Used for manual event consumption. Does nothing except resetting the event.
+ *
+ * \param[in] atomLocality Locality of the particles.
+ */
+ void resetCoordinatesCopiedToDeviceEvent(AtomLocality atomLocality);
+
+ /*! \brief Setter for the event synchronizer for the update is done on th GPU
*
- * \returns The event to synchronize the stream coordinates wre updated on device.
+ * \param[in] xUpdatedOnDeviceEvent The event to synchronize the stream coordinates wre updated on device.
*/
- GpuEventSynchronizer* xUpdatedOnDevice();
+ void setXUpdatedOnDeviceEvent(GpuEventSynchronizer* xUpdatedOnDeviceEvent);
- /*! \brief Copy positions from the GPU memory.
+ /*! \brief Copy positions from the GPU memory, with an optional explicit dependency.
*
* \param[in] h_x Positions buffer in the host memory.
* \param[in] atomLocality Locality of the particles to copy.
+ * \param[in] dependency Dependency event for this operation.
*/
- void copyCoordinatesFromGpu(gmx::ArrayRef<gmx::RVec> h_x, AtomLocality atomLocality);
+ void copyCoordinatesFromGpu(gmx::ArrayRef<gmx::RVec> h_x,
+ AtomLocality atomLocality,
+ GpuEventSynchronizer* dependency = nullptr);
/*! \brief Wait until coordinates are available on the host.
*
*/
void copyVelocitiesToGpu(gmx::ArrayRef<const gmx::RVec> h_v, AtomLocality atomLocality);
- /*! \brief Get the event synchronizer on the H2D velocities copy.
- *
- * \param[in] atomLocality Locality of the particles to wait for.
- *
- * \returns The event to synchronize the stream that consumes velocities on device.
- */
- GpuEventSynchronizer* getVelocitiesReadyOnDeviceEvent(AtomLocality atomLocality);
-
/*! \brief Copy velocities from the GPU memory.
*
* \param[in] h_v Velocities buffer in the host memory.
*/
void copyForcesToGpu(gmx::ArrayRef<const gmx::RVec> h_f, AtomLocality atomLocality);
+ /*! \brief Clear forces in the GPU memory.
+ *
+ * \param[in] atomLocality Locality of the particles to clear.
+ * \param[in] dependency Dependency event for this operation.
+ */
+ void clearForcesOnGpu(AtomLocality atomLocality, GpuEventSynchronizer* dependency);
+
/*! \brief Get the event synchronizer for the forces ready on device.
*
* Returns either of the event synchronizers, depending on the offload scenario
* 1. The forces are copied to the device (when GPU buffer ops are off)
* 2. The forces are reduced on the device (GPU buffer ops are on)
*
- * \todo Pass step workload instead of the useGpuFBufferOps boolean.
- *
- * \param[in] atomLocality Locality of the particles to wait for.
- * \param[in] useGpuFBufferOps If the force buffer ops are offloaded to the GPU.
+ * \param[in] stepWork Step workload flags
+ * \param[in] simulationWork Simulation workload flags
*
* \returns The event to synchronize the stream that consumes forces on device.
*/
- GpuEventSynchronizer* getForcesReadyOnDeviceEvent(AtomLocality atomLocality, bool useGpuFBufferOps);
+ GpuEventSynchronizer* getLocalForcesReadyOnDeviceEvent(StepWorkload stepWork,
+ SimulationWorkload simulationWork);
- /*! \brief Getter for the event synchronizer for the forces are reduced on the GPU.
+ /*! \brief Getter for the event synchronizer for when forces are reduced on the GPU.
*
- * \returns The event to mark when forces are reduced on the GPU.
+ * \param[in] atomLocality Locality of the particles to wait for.
+ * \returns The event to mark when forces are reduced on the GPU.
+ */
+ GpuEventSynchronizer* fReducedOnDevice(AtomLocality atomLocality);
+
+ //! \brief Consume the event for when the forces are reduced on device.
+ void consumeForcesReducedOnDeviceEvent(AtomLocality atomLocality);
+
+ /*! \brief Getter for the event synchronizer for the forces are ready for GPU update.
+ *
+ * \param[in] atomLocality Locality of the particles to wait for.
+ * \returns The event to mark when forces are ready for GPU update.
*/
- GpuEventSynchronizer* fReducedOnDevice();
+ GpuEventSynchronizer* fReadyOnDevice(AtomLocality atomLocality);
/*! \brief Copy forces from the GPU memory.
*
*
* \returns The device command stream to use in update-constraints.
*/
- void* getUpdateStream();
+ const DeviceStream* getUpdateStream();
/*! \brief Getter for the number of local atoms.
*
* \returns The number of local atoms.
*/
- int numAtomsLocal();
+ int numAtomsLocal() const;
/*! \brief Getter for the total number of atoms.
*
* \returns The total number of atoms.
*/
- int numAtomsAll();
+ int numAtomsAll() const;
private:
//! GPU PME stream.
- CommandStream pmeStream_ = nullptr;
+ const DeviceStream* pmeStream_;
//! GPU NBNXM local stream.
- CommandStream localStream_ = nullptr;
- //! GPU NBNXM non-local stream
- CommandStream nonLocalStream_ = nullptr;
- //! GPU Update-constreaints stream.
- CommandStream updateStream_ = nullptr;
+ const DeviceStream* localStream_;
+ //! GPU NBNXM non-local stream.
+ const DeviceStream* nonLocalStream_;
+ //! GPU Update-constraints stream.
+ const DeviceStream* updateStream_;
// Streams to use for coordinates H2D and D2H copies (one event for each atom locality)
- EnumerationArray<AtomLocality, CommandStream> xCopyStreams_ = { { nullptr } };
+ EnumerationArray<AtomLocality, const DeviceStream*> xCopyStreams_ = { { nullptr } };
// Streams to use for velocities H2D and D2H copies (one event for each atom locality)
- EnumerationArray<AtomLocality, CommandStream> vCopyStreams_ = { { nullptr } };
+ EnumerationArray<AtomLocality, const DeviceStream*> vCopyStreams_ = { { nullptr } };
// Streams to use for forces H2D and D2H copies (one event for each atom locality)
- EnumerationArray<AtomLocality, CommandStream> fCopyStreams_ = { { nullptr } };
+ EnumerationArray<AtomLocality, const DeviceStream*> fCopyStreams_ = { { nullptr } };
+ // Streams internal to this module
+ std::unique_ptr<DeviceStream> copyInStream_;
+ std::unique_ptr<DeviceStream> memsetStream_;
/*! \brief An array of events that indicate H2D copy is complete (one event for each atom locality)
*
* \todo Reconsider naming. It should be xCopiedToDevice or xH2DCopyComplete, etc.
*/
EnumerationArray<AtomLocality, GpuEventSynchronizer> xReadyOnDevice_;
- //! An event that the coordinates are ready after update-constraints execution
- GpuEventSynchronizer xUpdatedOnDevice_;
+ //! A pointer to an event that the coordinates are ready after update-constraints execution
+ GpuEventSynchronizer* xUpdatedOnDeviceEvent_ = nullptr;
//! An array of events that indicate D2H copy of coordinates is complete (one event for each atom locality)
EnumerationArray<AtomLocality, GpuEventSynchronizer> xReadyOnHost_;
- //! An array of events that indicate H2D copy of velocities is complete (one event for each atom locality)
- EnumerationArray<AtomLocality, GpuEventSynchronizer> vReadyOnDevice_;
//! An array of events that indicate D2H copy of velocities is complete (one event for each atom locality)
EnumerationArray<AtomLocality, GpuEventSynchronizer> vReadyOnHost_;
//! An array of events that indicate H2D copy of forces is complete (one event for each atom locality)
EnumerationArray<AtomLocality, GpuEventSynchronizer> fReadyOnDevice_;
- //! An event that the forces were reduced on the GPU
- GpuEventSynchronizer fReducedOnDevice_;
+ //! An array of events that indicate the forces were reduced on the GPU (one event for each atom locality)
+ EnumerationArray<AtomLocality, GpuEventSynchronizer> fReducedOnDevice_;
//! An array of events that indicate D2H copy of forces is complete (one event for each atom locality)
EnumerationArray<AtomLocality, GpuEventSynchronizer> fReadyOnHost_;
- /*! \brief GPU context (for OpenCL builds)
- * \todo Make a Context class usable in CPU code
- */
- DeviceContext deviceContext_ = nullptr;
+ //! GPU context (for OpenCL builds)
+ const DeviceContext& deviceContext_;
//! Default GPU calls behavior
GpuApiCallBehavior transferKind_ = GpuApiCallBehavior::Async;
- //! Padding size for the coordinates buffer
- int paddingSize_ = 0;
+ //! Required minimum divisor of the allocation size of the coordinates buffer
+ int allocationBlockSizeDivisor_ = 0;
//! Number of local atoms
int numAtomsLocal_ = -1;
* \param[in] h_data Host-side buffer.
* \param[in] dataSize Device-side data allocation size.
* \param[in] atomLocality If all, local or non-local ranges should be copied.
- * \param[in] commandStream GPU stream to execute copy in.
+ * \param[in] deviceStream GPU stream to execute copy in.
*/
void copyToDevice(DeviceBuffer<RVec> d_data,
gmx::ArrayRef<const gmx::RVec> h_data,
int dataSize,
AtomLocality atomLocality,
- CommandStream commandStream);
+ const DeviceStream& deviceStream);
/*! \brief Performs the copy of data from device to host buffer.
*
* \param[in] d_data Device-side buffer.
* \param[in] dataSize Device-side data allocation size.
* \param[in] atomLocality If all, local or non-local ranges should be copied.
- * \param[in] commandStream GPU stream to execute copy in.
+ * \param[in] deviceStream GPU stream to execute copy in.
*/
void copyFromDevice(gmx::ArrayRef<gmx::RVec> h_data,
DeviceBuffer<RVec> d_data,
int dataSize,
AtomLocality atomLocality,
- CommandStream commandStream);
+ const DeviceStream& deviceStream);
+
+ /*! \brief Performs the clearing of data in device buffer.
+ *
+ * \todo Template on locality.
+ *
+ * \param[out] d_data Device-side buffer.
+ * \param[in] dataSize Device-side data allocation size.
+ * \param[in] atomLocality If all, local or non-local ranges should be cleared.
+ * \param[in] deviceStream GPU stream to execute copy in.
+ */
+ void clearOnDevice(DeviceBuffer<RVec> d_data,
+ int dataSize,
+ AtomLocality atomLocality,
+ const DeviceStream& deviceStream) const;
};
} // namespace gmx