/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "config.h"
-#if GMX_GPU != GMX_GPU_NONE
+#if GMX_GPU
-#if GMX_GPU == GMX_GPU_CUDA
-#include "gromacs/gpu_utils/cudautils.cuh"
-#endif
-#include "gromacs/gpu_utils/devicebuffer.h"
-#if GMX_GPU == GMX_GPU_OPENCL
-#include "gromacs/gpu_utils/oclutils.h"
-#endif
-#include "gromacs/math/vectypes.h"
-#include "gromacs/mdtypes/state_propagator_data_gpu.h"
-#include "gromacs/utility/classhelpers.h"
+# include "gromacs/gpu_utils/device_stream_manager.h"
+# include "gromacs/gpu_utils/devicebuffer.h"
+# include "gromacs/gpu_utils/gpueventsynchronizer.h"
+# include "gromacs/math/vectypes.h"
+# include "gromacs/mdtypes/state_propagator_data_gpu.h"
+# include "gromacs/timing/wallcycle.h"
+# include "gromacs/utility/classhelpers.h"
+
+# include "state_propagator_data_gpu_impl.h"
-#include "state_propagator_data_gpu_impl.h"
namespace gmx
{
-StatePropagatorDataGpu::Impl::Impl(gmx_unused const void *commandStream,
- gmx_unused const void *deviceContext,
- GpuApiCallBehavior transferKind,
- int paddingSize) :
+StatePropagatorDataGpu::Impl::Impl(const DeviceStreamManager& deviceStreamManager,
+ GpuApiCallBehavior transferKind,
+ int allocationBlockSizeDivisor,
+ gmx_wallcycle* wcycle) :
+ deviceContext_(deviceStreamManager.context()),
+ transferKind_(transferKind),
+ allocationBlockSizeDivisor_(allocationBlockSizeDivisor),
+ wcycle_(wcycle)
+{
+ static_assert(
+ GMX_GPU,
+ "GPU state propagator data object should only be constructed on the GPU code-paths.");
+
+ // We need to keep local copies for re-initialization.
+ pmeStream_ = &deviceStreamManager.stream(DeviceStreamType::Pme);
+ localStream_ = &deviceStreamManager.stream(DeviceStreamType::NonBondedLocal);
+ nonLocalStream_ = &deviceStreamManager.stream(DeviceStreamType::NonBondedNonLocal);
+ // PME stream is used in OpenCL for H2D coordinate transfer
+ updateStream_ = &deviceStreamManager.stream(
+ GMX_GPU_OPENCL ? DeviceStreamType::Pme : DeviceStreamType::UpdateAndConstraints);
+
+ // Map the atom locality to the stream that will be used for coordinates,
+ // velocities and forces transfers. Same streams are used for H2D and D2H copies.
+ // Note, that nullptr stream is used here to indicate that the copy is not supported.
+ xCopyStreams_[AtomLocality::Local] = updateStream_;
+ xCopyStreams_[AtomLocality::NonLocal] = nonLocalStream_;
+ xCopyStreams_[AtomLocality::All] = nullptr;
+
+ vCopyStreams_[AtomLocality::Local] = updateStream_;
+ vCopyStreams_[AtomLocality::NonLocal] = nullptr;
+ vCopyStreams_[AtomLocality::All] = nullptr;
+
+ fCopyStreams_[AtomLocality::Local] = localStream_;
+ fCopyStreams_[AtomLocality::NonLocal] = nonLocalStream_;
+ fCopyStreams_[AtomLocality::All] = updateStream_;
+
+ copyInStream_ = std::make_unique<DeviceStream>(deviceContext_, DeviceStreamPriority::Normal, false);
+ memsetStream_ = std::make_unique<DeviceStream>(deviceContext_, DeviceStreamPriority::Normal, false);
+}
+
+StatePropagatorDataGpu::Impl::Impl(const DeviceStream* pmeStream,
+ const DeviceContext& deviceContext,
+ GpuApiCallBehavior transferKind,
+ int allocationBlockSizeDivisor,
+ gmx_wallcycle* wcycle) :
+ deviceContext_(deviceContext),
transferKind_(transferKind),
- paddingSize_(paddingSize)
+ allocationBlockSizeDivisor_(allocationBlockSizeDivisor),
+ wcycle_(wcycle)
{
+ static_assert(
+ GMX_GPU,
+ "GPU state propagator data object should only be constructed on the GPU code-paths.");
- GMX_RELEASE_ASSERT(getenv("GMX_USE_GPU_BUFFER_OPS") == nullptr, "GPU buffer ops are not supported in this build.");
+ GMX_ASSERT(pmeStream->isValid(), "GPU PME stream should be valid.");
+ pmeStream_ = pmeStream;
+ localStream_ = pmeStream; // For clearing the force buffer
+ nonLocalStream_ = nullptr;
+ updateStream_ = nullptr;
- // Set the stream-context pair for the OpenCL builds,
- // use the nullptr stream for CUDA builds
-#if GMX_GPU == GMX_GPU_OPENCL
- if (commandStream != nullptr)
- {
- commandStream_ = *static_cast<const CommandStream*>(commandStream);
- }
- if (deviceContext != nullptr)
- {
- deviceContext_ = *static_cast<const DeviceContext*>(deviceContext);
- }
-#endif
-}
+ // Only local/all coordinates are allowed to be copied in PME-only rank/ PME tests.
+ // This it temporary measure to make it safe to use this class in those cases.
+ xCopyStreams_[AtomLocality::Local] = pmeStream_;
+ xCopyStreams_[AtomLocality::NonLocal] = nullptr;
+ xCopyStreams_[AtomLocality::All] = nullptr;
-StatePropagatorDataGpu::Impl::~Impl()
-{
+ vCopyStreams_[AtomLocality::Local] = nullptr;
+ vCopyStreams_[AtomLocality::NonLocal] = nullptr;
+ vCopyStreams_[AtomLocality::All] = nullptr;
+
+ fCopyStreams_[AtomLocality::Local] = nullptr;
+ fCopyStreams_[AtomLocality::NonLocal] = nullptr;
+ fCopyStreams_[AtomLocality::All] = nullptr;
}
+StatePropagatorDataGpu::Impl::~Impl() {}
+
void StatePropagatorDataGpu::Impl::reinit(int numAtomsLocal, int numAtomsAll)
{
-#if GMX_GPU == GMX_GPU_OPENCL
- GMX_ASSERT(deviceContext_ != nullptr, "GPU context should be set in OpenCL builds.");
-#endif
+ wallcycle_start_nocount(wcycle_, WallCycleCounter::LaunchGpu);
+ wallcycle_sub_start_nocount(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+
numAtomsLocal_ = numAtomsLocal;
numAtomsAll_ = numAtomsAll;
int numAtomsPadded;
- if (paddingSize_ > 0)
+ if (allocationBlockSizeDivisor_ > 0)
{
- numAtomsPadded = ((numAtomsAll_ + paddingSize_ - 1 ) / paddingSize_ )*paddingSize_;
+ numAtomsPadded = ((numAtomsAll_ + allocationBlockSizeDivisor_ - 1) / allocationBlockSizeDivisor_)
+ * allocationBlockSizeDivisor_;
}
else
{
numAtomsPadded = numAtomsAll_;
}
- reallocateDeviceBuffer(&d_x_, DIM*numAtomsPadded, &d_xSize_, &d_xCapacity_, deviceContext_);
+ reallocateDeviceBuffer(&d_x_, numAtomsPadded, &d_xSize_, &d_xCapacity_, deviceContext_);
const size_t paddingAllocationSize = numAtomsPadded - numAtomsAll_;
if (paddingAllocationSize > 0)
{
- clearDeviceBufferAsync(&d_x_, DIM*numAtomsAll_, DIM*paddingAllocationSize, commandStream_);
+ // The PME stream is used here because the padding region of d_x_ is only in the PME task.
+ clearDeviceBufferAsync(&d_x_, numAtomsAll_, paddingAllocationSize, *pmeStream_);
}
- reallocateDeviceBuffer(&d_v_, DIM*numAtomsAll_, &d_vSize_, &d_vCapacity_, deviceContext_);
- reallocateDeviceBuffer(&d_f_, DIM*numAtomsAll_, &d_fSize_, &d_fCapacity_, deviceContext_);
+ reallocateDeviceBuffer(&d_v_, numAtomsAll_, &d_vSize_, &d_vCapacity_, deviceContext_);
+ const int d_fOldCapacity = d_fCapacity_;
+ reallocateDeviceBuffer(&d_f_, numAtomsAll_, &d_fSize_, &d_fCapacity_, deviceContext_);
+ // Clearing of the forces can be done in local stream since the nonlocal stream cannot reach
+ // the force accumulation stage before syncing with the local stream. Only done in CUDA and
+ // SYCL, since the force buffer ops are not implemented in OpenCL.
+ if ((bool(GMX_GPU_CUDA) || bool(GMX_GPU_SYCL)) && d_fCapacity_ != d_fOldCapacity)
+ {
+ clearDeviceBufferAsync(&d_f_, 0, d_fCapacity_, *localStream_);
+ }
+
+ wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+ wallcycle_stop(wcycle_, WallCycleCounter::LaunchGpu);
}
-std::tuple<int, int> StatePropagatorDataGpu::Impl::getAtomRangesFromAtomLocality(AtomLocality atomLocality)
+std::tuple<int, int> StatePropagatorDataGpu::Impl::getAtomRangesFromAtomLocality(AtomLocality atomLocality) const
{
int atomsStartAt = 0;
int numAtomsToCopy = 0;
switch (atomLocality)
{
case AtomLocality::All:
- atomsStartAt = 0;
- numAtomsToCopy = numAtomsAll_;
+ atomsStartAt = 0;
+ numAtomsToCopy = numAtomsAll_;
break;
case AtomLocality::Local:
- atomsStartAt = 0;
- numAtomsToCopy = numAtomsLocal_;
+ atomsStartAt = 0;
+ numAtomsToCopy = numAtomsLocal_;
break;
case AtomLocality::NonLocal:
- atomsStartAt = numAtomsLocal_;
- numAtomsToCopy = numAtomsAll_ - numAtomsLocal_;
+ atomsStartAt = numAtomsLocal_;
+ numAtomsToCopy = numAtomsAll_ - numAtomsLocal_;
break;
default:
- GMX_RELEASE_ASSERT(false, "Wrong range of atoms requested in GPU state data manager. Should be All, Local or NonLocal.");
+ GMX_RELEASE_ASSERT(false,
+ "Wrong range of atoms requested in GPU state data manager. Should "
+ "be All, Local or NonLocal.");
}
- GMX_ASSERT(atomsStartAt >= 0, "The first elemtnt to copy has negative index. Probably, the GPU propagator state was not initialized.");
- GMX_ASSERT(numAtomsToCopy >= 0, "Number of atoms to copy is negative. Probably, the GPU propagator state was not initialized.");
+ GMX_ASSERT(atomsStartAt >= 0,
+ "The first elemtnt to copy has negative index. Probably, the GPU propagator state "
+ "was not initialized.");
+ GMX_ASSERT(numAtomsToCopy >= 0,
+ "Number of atoms to copy is negative. Probably, the GPU propagator state was not "
+ "initialized.");
return std::make_tuple(atomsStartAt, numAtomsToCopy);
}
-void StatePropagatorDataGpu::Impl::copyToDevice(DeviceBuffer<float> d_data,
- const gmx::ArrayRef<const gmx::RVec> h_data,
- int dataSize,
- AtomLocality atomLocality)
+void StatePropagatorDataGpu::Impl::copyToDevice(DeviceBuffer<RVec> d_data,
+ const gmx::ArrayRef<const gmx::RVec> h_data,
+ int dataSize,
+ AtomLocality atomLocality,
+ const DeviceStream& deviceStream)
{
-
-#if GMX_GPU == GMX_GPU_OPENCL
- GMX_ASSERT(deviceContext_ != nullptr, "GPU context should be set in OpenCL builds.");
-#endif
-
GMX_UNUSED_VALUE(dataSize);
+ GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
+
GMX_ASSERT(dataSize >= 0, "Trying to copy to device buffer before it was allocated.");
+ GMX_ASSERT(deviceStream.isValid(), "No stream is valid for copying with given atom locality.");
+
int atomsStartAt, numAtomsToCopy;
std::tie(atomsStartAt, numAtomsToCopy) = getAtomRangesFromAtomLocality(atomLocality);
- int elementsStartAt = atomsStartAt*DIM;
- int numElementsToCopy = numAtomsToCopy*DIM;
-
if (numAtomsToCopy != 0)
{
- GMX_ASSERT(elementsStartAt + numElementsToCopy <= dataSize, "The device allocation is smaller than requested copy range.");
- GMX_ASSERT(atomsStartAt + numAtomsToCopy <= h_data.ssize(), "The host buffer is smaller than the requested copy range.");
-
- // TODO: Use the proper stream
- copyToDeviceBuffer(&d_data, reinterpret_cast<const float *>(&h_data.data()[atomsStartAt]),
- elementsStartAt, numElementsToCopy,
- commandStream_, transferKind_, nullptr);
+ GMX_ASSERT(atomsStartAt + numAtomsToCopy <= dataSize,
+ "The device allocation is smaller than requested copy range.");
+ GMX_ASSERT(atomsStartAt + numAtomsToCopy <= h_data.ssize(),
+ "The host buffer is smaller than the requested copy range.");
+
+ copyToDeviceBuffer(&d_data,
+ reinterpret_cast<const RVec*>(&h_data.data()[atomsStartAt]),
+ atomsStartAt,
+ numAtomsToCopy,
+ deviceStream,
+ transferKind_,
+ nullptr);
}
}
-void StatePropagatorDataGpu::Impl::copyFromDevice(gmx::ArrayRef<gmx::RVec> h_data,
- DeviceBuffer<float> d_data,
- int dataSize,
- AtomLocality atomLocality)
+void StatePropagatorDataGpu::Impl::copyFromDevice(gmx::ArrayRef<gmx::RVec> h_data,
+ DeviceBuffer<RVec> d_data,
+ int dataSize,
+ AtomLocality atomLocality,
+ const DeviceStream& deviceStream)
{
-
-#if GMX_GPU == GMX_GPU_OPENCL
- GMX_ASSERT(deviceContext_ != nullptr, "GPU context should be set in OpenCL builds.");
-#endif
-
GMX_UNUSED_VALUE(dataSize);
+ GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
+
GMX_ASSERT(dataSize >= 0, "Trying to copy from device buffer before it was allocated.");
+ GMX_ASSERT(deviceStream.isValid(), "No stream is valid for copying with given atom locality.");
+
int atomsStartAt, numAtomsToCopy;
std::tie(atomsStartAt, numAtomsToCopy) = getAtomRangesFromAtomLocality(atomLocality);
- int elementsStartAt = atomsStartAt*DIM;
- int numElementsToCopy = numAtomsToCopy*DIM;
-
if (numAtomsToCopy != 0)
{
- GMX_ASSERT(elementsStartAt + numElementsToCopy <= dataSize, "The device allocation is smaller than requested copy range.");
- GMX_ASSERT(atomsStartAt + numAtomsToCopy <= h_data.ssize(), "The host buffer is smaller than the requested copy range.");
+ GMX_ASSERT(atomsStartAt + numAtomsToCopy <= dataSize,
+ "The device allocation is smaller than requested copy range.");
+ GMX_ASSERT(atomsStartAt + numAtomsToCopy <= h_data.ssize(),
+ "The host buffer is smaller than the requested copy range.");
+
+ copyFromDeviceBuffer(reinterpret_cast<RVec*>(&h_data.data()[atomsStartAt]),
+ &d_data,
+ atomsStartAt,
+ numAtomsToCopy,
+ deviceStream,
+ transferKind_,
+ nullptr);
+ }
+}
+
+void StatePropagatorDataGpu::Impl::clearOnDevice(DeviceBuffer<RVec> d_data,
+ int dataSize,
+ AtomLocality atomLocality,
+ const DeviceStream& deviceStream) const
+{
+ GMX_UNUSED_VALUE(dataSize);
+
+ GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
- // TODO: Use the proper stream
- copyFromDeviceBuffer(reinterpret_cast<float*>(&h_data.data()[atomsStartAt]), &d_data,
- elementsStartAt, numElementsToCopy,
- commandStream_, transferKind_, nullptr);
+ GMX_ASSERT(dataSize >= 0, "Trying to clear to device buffer before it was allocated.");
+ GMX_ASSERT(deviceStream.isValid(), "No stream is valid for clearing with given atom locality.");
+
+ int atomsStartAt, numAtomsToClear;
+ std::tie(atomsStartAt, numAtomsToClear) = getAtomRangesFromAtomLocality(atomLocality);
+
+ if (numAtomsToClear != 0)
+ {
+ GMX_ASSERT(atomsStartAt + numAtomsToClear <= dataSize,
+ "The device allocation is smaller than requested clear range.");
+
+ clearDeviceBufferAsync(&d_data, atomsStartAt, numAtomsToClear, deviceStream);
}
}
-DeviceBuffer<float> StatePropagatorDataGpu::Impl::getCoordinates()
+DeviceBuffer<RVec> StatePropagatorDataGpu::Impl::getCoordinates()
{
return d_x_;
}
-void StatePropagatorDataGpu::Impl::copyCoordinatesToGpu(const gmx::ArrayRef<const gmx::RVec> h_x,
- AtomLocality atomLocality)
+void StatePropagatorDataGpu::Impl::copyCoordinatesToGpu(const gmx::ArrayRef<const gmx::RVec> h_x,
+ AtomLocality atomLocality)
{
- copyToDevice(d_x_, h_x, d_xSize_, atomLocality);
+ GMX_ASSERT(atomLocality < AtomLocality::All,
+ formatString("Wrong atom locality. Only Local and NonLocal are allowed for "
+ "coordinate transfers, passed value is \"%s\"",
+ enumValueToString(atomLocality))
+ .c_str());
+
+ const DeviceStream* deviceStream = xCopyStreams_[atomLocality];
+ GMX_ASSERT(deviceStream != nullptr,
+ "No stream is valid for copying positions with given atom locality.");
+
+ wallcycle_start_nocount(wcycle_, WallCycleCounter::LaunchGpu);
+ wallcycle_sub_start(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+
+ copyToDevice(d_x_, h_x, d_xSize_, atomLocality, *deviceStream);
+
+ // markEvent is skipped in OpenCL as:
+ // - it's not needed, copy is done in the same stream as the only consumer task (PME)
+ // - we don't consume the events in OpenCL which is not allowed by GpuEventSynchronizer (would leak memory).
+ // TODO: remove this by adding an event-mark free flavor of this function
+ if (bool(GMX_GPU_CUDA) || bool(GMX_GPU_SYCL))
+ {
+ xReadyOnDevice_[atomLocality].markEvent(*deviceStream);
+ }
+
+ wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+ wallcycle_stop(wcycle_, WallCycleCounter::LaunchGpu);
+}
+
+GpuEventSynchronizer* StatePropagatorDataGpu::Impl::getCoordinatesReadyOnDeviceEvent(
+ AtomLocality atomLocality,
+ const SimulationWorkload& simulationWork,
+ const StepWorkload& stepWork,
+ GpuEventSynchronizer* gpuCoordinateHaloLaunched)
+{
+ // The provider of the coordinates may be different for local atoms. If the update is offloaded
+ // and this is not a neighbor search step, then the consumer needs to wait for the update
+ // to complete. Otherwise, the coordinates are copied from the host and we need to wait for
+ // the copy event. Non-local coordinates are provided by the GPU halo exchange (if active), otherwise by H2D copy.
+ //
+ // In OpenCL no events are used as coordinate sync is not necessary
+ if (GMX_GPU_OPENCL)
+ {
+ return nullptr;
+ }
+ if (atomLocality == AtomLocality::NonLocal && stepWork.useGpuXHalo)
+ {
+ GMX_ASSERT(gpuCoordinateHaloLaunched != nullptr,
+ "GPU halo exchange is active but its completion event is null.");
+ return gpuCoordinateHaloLaunched;
+ }
+ if (atomLocality == AtomLocality::Local && simulationWork.useGpuUpdate && !stepWork.doNeighborSearch)
+ {
+ GMX_ASSERT(xUpdatedOnDeviceEvent_ != nullptr, "The event synchronizer can not be nullptr.");
+ return xUpdatedOnDeviceEvent_;
+ }
+ else
+ {
+ if (stepWork.doNeighborSearch && xUpdatedOnDeviceEvent_)
+ {
+ /* On search steps, we do not consume the result of the GPU update
+ * but rather that of a H2D transfer. So, we reset the event triggered after
+ * update to avoid leaving it unconsumed.
+ * Unfortunately, we don't always have the event marked either (e.g., on the
+ * first step) so we just reset it here.
+ * See Issue #3988. */
+ xUpdatedOnDeviceEvent_->reset();
+ }
+ return &xReadyOnDevice_[atomLocality];
+ }
+}
+
+void StatePropagatorDataGpu::Impl::waitCoordinatesCopiedToDevice(AtomLocality atomLocality)
+{
+ wallcycle_start(wcycle_, WallCycleCounter::WaitGpuStatePropagatorData);
+ GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
+ xReadyOnDevice_[atomLocality].waitForEvent();
+ wallcycle_stop(wcycle_, WallCycleCounter::WaitGpuStatePropagatorData);
+}
+
+void StatePropagatorDataGpu::Impl::consumeCoordinatesCopiedToDeviceEvent(AtomLocality atomLocality)
+{
+ GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
+ xReadyOnDevice_[atomLocality].consume();
+}
+
+void StatePropagatorDataGpu::Impl::resetCoordinatesCopiedToDeviceEvent(AtomLocality atomLocality)
+{
+ GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
+ xReadyOnDevice_[atomLocality].reset();
+}
+
+void StatePropagatorDataGpu::Impl::setXUpdatedOnDeviceEvent(GpuEventSynchronizer* xUpdatedOnDeviceEvent)
+{
+ GMX_ASSERT(xUpdatedOnDeviceEvent != nullptr, "The event synchronizer can not be nullptr.");
+ xUpdatedOnDeviceEvent_ = xUpdatedOnDeviceEvent;
+}
+
+void StatePropagatorDataGpu::Impl::copyCoordinatesFromGpu(gmx::ArrayRef<gmx::RVec> h_x,
+ AtomLocality atomLocality,
+ GpuEventSynchronizer* dependency)
+{
+ GMX_ASSERT(atomLocality < AtomLocality::All,
+ formatString("Wrong atom locality. Only Local and NonLocal are allowed for "
+ "coordinate transfers, passed value is \"%s\"",
+ enumValueToString(atomLocality))
+ .c_str());
+ const DeviceStream* deviceStream = xCopyStreams_[atomLocality];
+ GMX_ASSERT(deviceStream != nullptr,
+ "No stream is valid for copying positions with given atom locality.");
+
+ if (dependency != nullptr)
+ {
+ dependency->enqueueWaitEvent(*deviceStream);
+ }
+
+ wallcycle_start_nocount(wcycle_, WallCycleCounter::LaunchGpu);
+ wallcycle_sub_start(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+
+ copyFromDevice(h_x, d_x_, d_xSize_, atomLocality, *deviceStream);
+ // Note: unlike copyCoordinatesToGpu this is not used in OpenCL, and the conditional is not needed.
+ xReadyOnHost_[atomLocality].markEvent(*deviceStream);
+
+ wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+ wallcycle_stop(wcycle_, WallCycleCounter::LaunchGpu);
}
-void StatePropagatorDataGpu::Impl::copyCoordinatesFromGpu(gmx::ArrayRef<gmx::RVec> h_x,
- AtomLocality atomLocality)
+void StatePropagatorDataGpu::Impl::waitCoordinatesReadyOnHost(AtomLocality atomLocality)
{
- copyFromDevice(h_x, d_x_, d_xSize_, atomLocality);
+ wallcycle_start(wcycle_, WallCycleCounter::WaitGpuStatePropagatorData);
+ xReadyOnHost_[atomLocality].waitForEvent();
+ wallcycle_stop(wcycle_, WallCycleCounter::WaitGpuStatePropagatorData);
}
-DeviceBuffer<float> StatePropagatorDataGpu::Impl::getVelocities()
+DeviceBuffer<RVec> StatePropagatorDataGpu::Impl::getVelocities()
{
return d_v_;
}
-void StatePropagatorDataGpu::Impl::copyVelocitiesToGpu(const gmx::ArrayRef<const gmx::RVec> h_v,
- AtomLocality atomLocality)
+void StatePropagatorDataGpu::Impl::copyVelocitiesToGpu(const gmx::ArrayRef<const gmx::RVec> h_v,
+ AtomLocality atomLocality)
{
- copyToDevice(d_v_, h_v, d_vSize_, atomLocality);
+ GMX_ASSERT(atomLocality == AtomLocality::Local,
+ formatString("Wrong atom locality. Only Local is allowed for "
+ "velocity transfers, passed value is \"%s\"",
+ enumValueToString(atomLocality))
+ .c_str());
+ const DeviceStream* deviceStream = vCopyStreams_[atomLocality];
+ GMX_ASSERT(deviceStream != nullptr,
+ "No stream is valid for copying velocities with given atom locality.");
+
+ wallcycle_start_nocount(wcycle_, WallCycleCounter::LaunchGpu);
+ wallcycle_sub_start(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+
+ copyToDevice(d_v_, h_v, d_vSize_, atomLocality, *deviceStream);
+ /* Not marking the event, because it is not used anywhere.
+ * Since we only use velocities on the device for update, and we launch the copy in
+ * the "update" stream, that should be safe.
+ */
+
+ wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+ wallcycle_stop(wcycle_, WallCycleCounter::LaunchGpu);
}
-void StatePropagatorDataGpu::Impl::copyVelocitiesFromGpu(gmx::ArrayRef<gmx::RVec> h_v,
- AtomLocality atomLocality)
+void StatePropagatorDataGpu::Impl::copyVelocitiesFromGpu(gmx::ArrayRef<gmx::RVec> h_v, AtomLocality atomLocality)
{
- copyFromDevice(h_v, d_v_, d_vSize_, atomLocality);
+ GMX_ASSERT(atomLocality == AtomLocality::Local,
+ formatString("Wrong atom locality. Only Local is allowed for "
+ "velocity transfers, passed value is \"%s\"",
+ enumValueToString(atomLocality))
+ .c_str());
+ const DeviceStream* deviceStream = vCopyStreams_[atomLocality];
+ GMX_ASSERT(deviceStream != nullptr,
+ "No stream is valid for copying velocities with given atom locality.");
+
+ wallcycle_start_nocount(wcycle_, WallCycleCounter::LaunchGpu);
+ wallcycle_sub_start(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+
+ copyFromDevice(h_v, d_v_, d_vSize_, atomLocality, *deviceStream);
+ vReadyOnHost_[atomLocality].markEvent(*deviceStream);
+
+ wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+ wallcycle_stop(wcycle_, WallCycleCounter::LaunchGpu);
+}
+
+void StatePropagatorDataGpu::Impl::waitVelocitiesReadyOnHost(AtomLocality atomLocality)
+{
+ wallcycle_start(wcycle_, WallCycleCounter::WaitGpuStatePropagatorData);
+ vReadyOnHost_[atomLocality].waitForEvent();
+ wallcycle_stop(wcycle_, WallCycleCounter::WaitGpuStatePropagatorData);
}
-DeviceBuffer<float> StatePropagatorDataGpu::Impl::getForces()
+DeviceBuffer<RVec> StatePropagatorDataGpu::Impl::getForces()
{
return d_f_;
}
-void StatePropagatorDataGpu::Impl::copyForcesToGpu(const gmx::ArrayRef<const gmx::RVec> h_f,
- AtomLocality atomLocality)
+// Copy CPU forces to GPU using stream internal to this module to allow overlap
+// with GPU force calculations.
+void StatePropagatorDataGpu::Impl::copyForcesToGpu(const gmx::ArrayRef<const gmx::RVec> h_f,
+ AtomLocality atomLocality)
+{
+ GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
+ DeviceStream* deviceStream = copyInStream_.get();
+ GMX_ASSERT(deviceStream != nullptr,
+ "No stream is valid for copying forces with given atom locality.");
+
+ wallcycle_start_nocount(wcycle_, WallCycleCounter::LaunchGpu);
+ wallcycle_sub_start(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+
+ copyToDevice(d_f_, h_f, d_fSize_, atomLocality, *deviceStream);
+ fReadyOnDevice_[atomLocality].markEvent(*deviceStream);
+
+ wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+ wallcycle_stop(wcycle_, WallCycleCounter::LaunchGpu);
+}
+
+void StatePropagatorDataGpu::Impl::clearForcesOnGpu(AtomLocality atomLocality, GpuEventSynchronizer* dependency)
+{
+ GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
+ DeviceStream* deviceStream = memsetStream_.get();
+
+ GMX_ASSERT(dependency != nullptr, "Dependency is not valid for clearing forces.");
+ dependency->enqueueWaitEvent(*deviceStream);
+
+ GMX_ASSERT(deviceStream != nullptr,
+ "No stream is valid for clearing forces with given atom locality.");
+
+ wallcycle_start_nocount(wcycle_, WallCycleCounter::LaunchGpu);
+ wallcycle_sub_start(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+
+ clearOnDevice(d_f_, d_fSize_, atomLocality, *deviceStream);
+
+ fReadyOnDevice_[atomLocality].markEvent(*deviceStream);
+
+ wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+ wallcycle_stop(wcycle_, WallCycleCounter::LaunchGpu);
+}
+
+GpuEventSynchronizer* StatePropagatorDataGpu::Impl::getLocalForcesReadyOnDeviceEvent(StepWorkload stepWork,
+ SimulationWorkload simulationWork)
+{
+ if (stepWork.useGpuFBufferOps && !simulationWork.useCpuPmePpCommunication)
+ {
+ return &fReducedOnDevice_[AtomLocality::Local];
+ }
+ else
+ {
+ return &fReadyOnDevice_[AtomLocality::Local];
+ }
+}
+
+GpuEventSynchronizer* StatePropagatorDataGpu::Impl::fReducedOnDevice(AtomLocality atomLocality)
+{
+ return &fReducedOnDevice_[atomLocality];
+}
+
+void StatePropagatorDataGpu::Impl::consumeForcesReducedOnDeviceEvent(AtomLocality atomLocality)
+{
+ fReducedOnDevice_[atomLocality].consume();
+}
+
+GpuEventSynchronizer* StatePropagatorDataGpu::Impl::fReadyOnDevice(AtomLocality atomLocality)
+{
+ return &fReadyOnDevice_[atomLocality];
+}
+
+void StatePropagatorDataGpu::Impl::copyForcesFromGpu(gmx::ArrayRef<gmx::RVec> h_f, AtomLocality atomLocality)
{
- copyToDevice(d_f_, h_f, d_fSize_, atomLocality);
+ GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
+ const DeviceStream* deviceStream = fCopyStreams_[atomLocality];
+ GMX_ASSERT(deviceStream != nullptr,
+ "No stream is valid for copying forces with given atom locality.");
+
+ wallcycle_start_nocount(wcycle_, WallCycleCounter::LaunchGpu);
+ wallcycle_sub_start(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+
+ copyFromDevice(h_f, d_f_, d_fSize_, atomLocality, *deviceStream);
+ fReadyOnHost_[atomLocality].markEvent(*deviceStream);
+
+ wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchStatePropagatorData);
+ wallcycle_stop(wcycle_, WallCycleCounter::LaunchGpu);
}
-void StatePropagatorDataGpu::Impl::copyForcesFromGpu(gmx::ArrayRef<gmx::RVec> h_f,
- AtomLocality atomLocality)
+void StatePropagatorDataGpu::Impl::waitForcesReadyOnHost(AtomLocality atomLocality)
{
- copyFromDevice(h_f, d_f_, d_fSize_, atomLocality);
+ wallcycle_start(wcycle_, WallCycleCounter::WaitGpuStatePropagatorData);
+ fReadyOnHost_[atomLocality].waitForEvent();
+ wallcycle_stop(wcycle_, WallCycleCounter::WaitGpuStatePropagatorData);
}
-void StatePropagatorDataGpu::Impl::synchronizeStream()
+const DeviceStream* StatePropagatorDataGpu::Impl::getUpdateStream()
{
- gpuStreamSynchronize(commandStream_);
+ return updateStream_;
}
-int StatePropagatorDataGpu::Impl::numAtomsLocal()
+int StatePropagatorDataGpu::Impl::numAtomsLocal() const
{
return numAtomsLocal_;
}
-int StatePropagatorDataGpu::Impl::numAtomsAll()
+int StatePropagatorDataGpu::Impl::numAtomsAll() const
{
return numAtomsAll_;
}
+StatePropagatorDataGpu::StatePropagatorDataGpu(const DeviceStreamManager& deviceStreamManager,
+ GpuApiCallBehavior transferKind,
+ int allocationBlockSizeDivisor,
+ gmx_wallcycle* wcycle) :
+ impl_(new Impl(deviceStreamManager, transferKind, allocationBlockSizeDivisor, wcycle))
+{
+}
-StatePropagatorDataGpu::StatePropagatorDataGpu(const void *commandStream,
- const void *deviceContext,
- GpuApiCallBehavior transferKind,
- int paddingSize)
- : impl_(new Impl(commandStream,
- deviceContext,
- transferKind,
- paddingSize))
+StatePropagatorDataGpu::StatePropagatorDataGpu(const DeviceStream* pmeStream,
+ const DeviceContext& deviceContext,
+ GpuApiCallBehavior transferKind,
+ int allocationBlockSizeDivisor,
+ gmx_wallcycle* wcycle) :
+ impl_(new Impl(pmeStream, deviceContext, transferKind, allocationBlockSizeDivisor, wcycle))
{
}
-StatePropagatorDataGpu::StatePropagatorDataGpu(StatePropagatorDataGpu && /* other */) noexcept = default;
+StatePropagatorDataGpu::StatePropagatorDataGpu(StatePropagatorDataGpu&& /* other */) noexcept = default;
-StatePropagatorDataGpu &StatePropagatorDataGpu::operator=(StatePropagatorDataGpu && /* other */) noexcept = default;
+StatePropagatorDataGpu& StatePropagatorDataGpu::operator=(StatePropagatorDataGpu&& /* other */) noexcept = default;
StatePropagatorDataGpu::~StatePropagatorDataGpu() = default;
return impl_->reinit(numAtomsLocal, numAtomsAll);
}
-std::tuple<int, int> StatePropagatorDataGpu::getAtomRangesFromAtomLocality(AtomLocality atomLocality)
+std::tuple<int, int> StatePropagatorDataGpu::getAtomRangesFromAtomLocality(AtomLocality atomLocality) const
{
return impl_->getAtomRangesFromAtomLocality(atomLocality);
}
-DeviceBuffer<float> StatePropagatorDataGpu::getCoordinates()
+DeviceBuffer<RVec> StatePropagatorDataGpu::getCoordinates()
{
return impl_->getCoordinates();
}
-void StatePropagatorDataGpu::copyCoordinatesToGpu(const gmx::ArrayRef<const gmx::RVec> h_x,
- AtomLocality atomLocality)
+void StatePropagatorDataGpu::copyCoordinatesToGpu(const gmx::ArrayRef<const gmx::RVec> h_x,
+ AtomLocality atomLocality)
{
return impl_->copyCoordinatesToGpu(h_x, atomLocality);
}
-void StatePropagatorDataGpu::copyCoordinatesFromGpu(gmx::ArrayRef<RVec> h_x,
- AtomLocality atomLocality)
+GpuEventSynchronizer*
+StatePropagatorDataGpu::getCoordinatesReadyOnDeviceEvent(AtomLocality atomLocality,
+ const SimulationWorkload& simulationWork,
+ const StepWorkload& stepWork,
+ GpuEventSynchronizer* gpuCoordinateHaloLaunched)
{
- return impl_->copyCoordinatesFromGpu(h_x, atomLocality);
+ return impl_->getCoordinatesReadyOnDeviceEvent(
+ atomLocality, simulationWork, stepWork, gpuCoordinateHaloLaunched);
+}
+
+void StatePropagatorDataGpu::waitCoordinatesCopiedToDevice(AtomLocality atomLocality)
+{
+ return impl_->waitCoordinatesCopiedToDevice(atomLocality);
+}
+
+void StatePropagatorDataGpu::consumeCoordinatesCopiedToDeviceEvent(AtomLocality atomLocality)
+{
+ return impl_->consumeCoordinatesCopiedToDeviceEvent(atomLocality);
+}
+
+void StatePropagatorDataGpu::resetCoordinatesCopiedToDeviceEvent(AtomLocality atomLocality)
+{
+ return impl_->resetCoordinatesCopiedToDeviceEvent(atomLocality);
+}
+
+void StatePropagatorDataGpu::setXUpdatedOnDeviceEvent(GpuEventSynchronizer* xUpdatedOnDeviceEvent)
+{
+ impl_->setXUpdatedOnDeviceEvent(xUpdatedOnDeviceEvent);
+}
+
+void StatePropagatorDataGpu::copyCoordinatesFromGpu(gmx::ArrayRef<RVec> h_x,
+ AtomLocality atomLocality,
+ GpuEventSynchronizer* dependency)
+{
+ return impl_->copyCoordinatesFromGpu(h_x, atomLocality, dependency);
+}
+
+void StatePropagatorDataGpu::waitCoordinatesReadyOnHost(AtomLocality atomLocality)
+{
+ return impl_->waitCoordinatesReadyOnHost(atomLocality);
}
-DeviceBuffer<float> StatePropagatorDataGpu::getVelocities()
+DeviceBuffer<RVec> StatePropagatorDataGpu::getVelocities()
{
return impl_->getVelocities();
}
-void StatePropagatorDataGpu::copyVelocitiesToGpu(const gmx::ArrayRef<const gmx::RVec> h_v,
- AtomLocality atomLocality)
+void StatePropagatorDataGpu::copyVelocitiesToGpu(const gmx::ArrayRef<const gmx::RVec> h_v,
+ AtomLocality atomLocality)
{
return impl_->copyVelocitiesToGpu(h_v, atomLocality);
}
-void StatePropagatorDataGpu::copyVelocitiesFromGpu(gmx::ArrayRef<RVec> h_v,
- AtomLocality atomLocality)
+void StatePropagatorDataGpu::copyVelocitiesFromGpu(gmx::ArrayRef<RVec> h_v, AtomLocality atomLocality)
{
return impl_->copyVelocitiesFromGpu(h_v, atomLocality);
}
+void StatePropagatorDataGpu::waitVelocitiesReadyOnHost(AtomLocality atomLocality)
+{
+ return impl_->waitVelocitiesReadyOnHost(atomLocality);
+}
+
-DeviceBuffer<float> StatePropagatorDataGpu::getForces()
+DeviceBuffer<RVec> StatePropagatorDataGpu::getForces()
{
return impl_->getForces();
}
-void StatePropagatorDataGpu::copyForcesToGpu(const gmx::ArrayRef<const gmx::RVec> h_f,
- AtomLocality atomLocality)
+void StatePropagatorDataGpu::copyForcesToGpu(const gmx::ArrayRef<const gmx::RVec> h_f, AtomLocality atomLocality)
{
return impl_->copyForcesToGpu(h_f, atomLocality);
}
-void StatePropagatorDataGpu::copyForcesFromGpu(gmx::ArrayRef<RVec> h_f,
- AtomLocality atomLocality)
+void StatePropagatorDataGpu::clearForcesOnGpu(AtomLocality atomLocality, GpuEventSynchronizer* dependency)
+{
+ return impl_->clearForcesOnGpu(atomLocality, dependency);
+}
+
+GpuEventSynchronizer* StatePropagatorDataGpu::getLocalForcesReadyOnDeviceEvent(StepWorkload stepWork,
+ SimulationWorkload simulationWork)
+{
+ return impl_->getLocalForcesReadyOnDeviceEvent(stepWork, simulationWork);
+}
+
+GpuEventSynchronizer* StatePropagatorDataGpu::fReducedOnDevice(AtomLocality atomLocality)
+{
+ return impl_->fReducedOnDevice(atomLocality);
+}
+
+void StatePropagatorDataGpu::consumeForcesReducedOnDeviceEvent(AtomLocality atomLocality)
+{
+ impl_->consumeForcesReducedOnDeviceEvent(atomLocality);
+}
+
+GpuEventSynchronizer* StatePropagatorDataGpu::fReadyOnDevice(AtomLocality atomLocality)
+{
+ return impl_->fReadyOnDevice(atomLocality);
+}
+
+void StatePropagatorDataGpu::copyForcesFromGpu(gmx::ArrayRef<RVec> h_f, AtomLocality atomLocality)
{
return impl_->copyForcesFromGpu(h_f, atomLocality);
}
-void StatePropagatorDataGpu::synchronizeStream()
+void StatePropagatorDataGpu::waitForcesReadyOnHost(AtomLocality atomLocality)
+{
+ return impl_->waitForcesReadyOnHost(atomLocality);
+}
+
+
+const DeviceStream* StatePropagatorDataGpu::getUpdateStream()
{
- return impl_->synchronizeStream();
+ return impl_->getUpdateStream();
}
-int StatePropagatorDataGpu::numAtomsLocal()
+int StatePropagatorDataGpu::numAtomsLocal() const
{
return impl_->numAtomsLocal();
}
-int StatePropagatorDataGpu::numAtomsAll()
+int StatePropagatorDataGpu::numAtomsAll() const
{
return impl_->numAtomsAll();
}
-} // namespace gmx
+} // namespace gmx
-#endif // GMX_GPU == GMX_GPU_NONE
+#endif // GMX_GPU