2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Declaration of low-level functions and fields of GPU state propagator object.
39 * \author Artem Zhmurov <zhmurov@gmail.com>
41 * \ingroup module_mdtypes
43 #ifndef GMX_MDTYPES_STATE_PROPAGATOR_DATA_GPU_IMPL_H
44 #define GMX_MDTYPES_STATE_PROPAGATOR_DATA_GPU_IMPL_H
50 #include "gromacs/gpu_utils/devicebuffer.h"
51 #if GMX_GPU == GMX_GPU_CUDA
52 # include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
53 #elif GMX_GPU == GMX_GPU_OPENCL
54 # include "gromacs/gpu_utils/gpueventsynchronizer_ocl.h"
56 #include "gromacs/math/vectypes.h"
57 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
58 #include "gromacs/utility/classhelpers.h"
59 #include "gromacs/utility/enumerationhelpers.h"
66 class StatePropagatorDataGpu::Impl
72 /*! \brief Constructor
74 * The buffers are reallocated only at the reinit call, the padding is
75 * used there for the coordinates buffer. It is needed for PME and added at
76 * the end of the buffer. It is assumed that if the rank has PME duties on the
77 * GPU, all coordinates are copied to the GPU and hence, for this rank, the
78 * coordinates buffer is not split into local and non-local ranges. For other
79 * ranks, the padding size is zero. This works because only one rank ever does
80 * PME work on the GPU, and if that rank also does PP work that is the only
81 * rank. So all coordinates are always transferred.
83 * In OpenCL, only pmeStream is used since it is the only stream created in
84 * PME context. The local and non-local streams are only needed when buffer
85 * ops are offloaded. This feature is currently not available in OpenCL and
86 * hence these streams are not set in these builds.
88 * \param[in] deviceStreamManager Object that owns the DeviceContext and DeviceStreams.
89 * \param[in] transferKind H2D/D2H transfer call behavior (synchronous or not).
90 * \param[in] allocationBlockSizeDivisor Determines the padding size for coordinates buffer.
91 * \param[in] wcycle Wall cycle counter data.
93 Impl(const DeviceStreamManager& deviceStreamManager,
94 GpuApiCallBehavior transferKind,
95 int allocationBlockSizeDivisor,
96 gmx_wallcycle* wcycle);
98 /*! \brief Constructor to use in PME-only rank and in tests.
100 * This constructor should be used if only a coordinate device buffer should be managed
101 * using a single stream. Any operation on force or velocity buffer as well as copy of
102 * non-local coordinates will exit with assertion failure. Note, that the pmeStream can
103 * not be a nullptr and the constructor will exit with an assertion failure.
105 * \todo Currently, unsupported copy operations are blocked by assertion that the stream
106 * not nullptr. This should be improved.
108 * \param[in] pmeStream Device PME stream, nullptr is not allowed.
109 * \param[in] deviceContext Device context, nullptr allowed for non-OpenCL builds.
110 * \param[in] transferKind H2D/D2H transfer call behavior (synchronous or not).
111 * \param[in] allocationBlockSizeDivisor Determines the padding size for coordinates buffer.
112 * \param[in] wcycle Wall cycle counter data.
114 Impl(const DeviceStream* pmeStream,
115 const DeviceContext& deviceContext,
116 GpuApiCallBehavior transferKind,
117 int allocationBlockSizeDivisor,
118 gmx_wallcycle* wcycle);
123 /*! \brief Set the ranges for local and non-local atoms and reallocates buffers.
125 * Reallocates coordinate, velocities and force buffers on the device.
128 * The coordinates buffer is (re)allocated, when required by PME, with a padding,
129 * the size of which is set by the constructor. The padding region clearing kernel
130 * is scheduled in the \p pmeStream_ (unlike the coordinates H2D) as only the PME
131 * task uses this padding area.
134 * The force buffer is cleared if its size increases, so that previously unused
135 * memory is cleared before forces are accumulated.
137 * \param[in] numAtomsLocal Number of atoms in local domain.
138 * \param[in] numAtomsAll Total number of atoms to handle.
140 void reinit(int numAtomsLocal, int numAtomsAll);
142 /*! \brief Returns the range of atoms to be copied based on the copy type (all, local or non-local).
144 * \todo There are at least three versions of the function with this functionality in the code:
145 * this one and two more in NBNXM. These should be unified in a shape of a general function
148 * \param[in] atomLocality If all, local or non-local ranges are needed.
150 * \returns Tuple, containing the index of the first atom in the range and the total number of atoms in the range.
152 std::tuple<int, int> getAtomRangesFromAtomLocality(AtomLocality atomLocality);
155 /*! \brief Get the positions buffer on the GPU.
157 * \returns GPU positions buffer.
159 DeviceBuffer<RVec> getCoordinates();
161 /*! \brief Copy positions to the GPU memory.
163 * \param[in] h_x Positions in the host memory.
164 * \param[in] atomLocality Locality of the particles to copy.
166 void copyCoordinatesToGpu(gmx::ArrayRef<const gmx::RVec> h_x, AtomLocality atomLocality);
168 /*! \brief Get the event synchronizer of the coordinates ready for the consumption on the device.
170 * Returns the event synchronizer which indicates that the coordinates are ready for the
171 * consumption on the device. Takes into account that the producer may be different.
173 * If the update is offloaded, and the current step is not a DD/search step, the returned
174 * synchronizer indicates the completion of GPU update-constraint kernels. Otherwise, on search
175 * steps and if update is not offloaded, the coordinates are provided by the H2D copy and the
176 * returned synchronizer indicates that the copy is complete.
178 * \param[in] atomLocality Locality of the particles to wait for.
179 * \param[in] simulationWork The simulation lifetime flags.
180 * \param[in] stepWork The step lifetime flags.
182 * \returns The event to synchronize the stream that consumes coordinates on device.
184 GpuEventSynchronizer* getCoordinatesReadyOnDeviceEvent(AtomLocality atomLocality,
185 const SimulationWorkload& simulationWork,
186 const StepWorkload& stepWork);
188 /*! \brief Blocking wait until coordinates are copied to the device.
190 * Synchronizes the stream in which the copy was executed.
192 * \param[in] atomLocality Locality of the particles to wait for.
194 void waitCoordinatesCopiedToDevice(AtomLocality atomLocality);
196 /*! \brief Getter for the event synchronizer for the update is done on th GPU
198 * \returns The event to synchronize the stream coordinates wre updated on device.
200 GpuEventSynchronizer* xUpdatedOnDevice();
202 /*! \brief Copy positions from the GPU memory.
204 * \param[in] h_x Positions buffer in the host memory.
205 * \param[in] atomLocality Locality of the particles to copy.
207 void copyCoordinatesFromGpu(gmx::ArrayRef<gmx::RVec> h_x, AtomLocality atomLocality);
209 /*! \brief Wait until coordinates are available on the host.
211 * \param[in] atomLocality Locality of the particles to wait for.
213 void waitCoordinatesReadyOnHost(AtomLocality atomLocality);
216 /*! \brief Get the velocities buffer on the GPU.
218 * \returns GPU velocities buffer.
220 DeviceBuffer<RVec> getVelocities();
222 /*! \brief Copy velocities to the GPU memory.
224 * \param[in] h_v Velocities in the host memory.
225 * \param[in] atomLocality Locality of the particles to copy.
227 void copyVelocitiesToGpu(gmx::ArrayRef<const gmx::RVec> h_v, AtomLocality atomLocality);
229 /*! \brief Get the event synchronizer on the H2D velocities copy.
231 * \param[in] atomLocality Locality of the particles to wait for.
233 * \returns The event to synchronize the stream that consumes velocities on device.
235 GpuEventSynchronizer* getVelocitiesReadyOnDeviceEvent(AtomLocality atomLocality);
237 /*! \brief Copy velocities from the GPU memory.
239 * \param[in] h_v Velocities buffer in the host memory.
240 * \param[in] atomLocality Locality of the particles to copy.
242 void copyVelocitiesFromGpu(gmx::ArrayRef<gmx::RVec> h_v, AtomLocality atomLocality);
244 /*! \brief Wait until velocities are available on the host.
246 * \param[in] atomLocality Locality of the particles to wait for.
248 void waitVelocitiesReadyOnHost(AtomLocality atomLocality);
251 /*! \brief Get the force buffer on the GPU.
253 * \returns GPU force buffer.
255 DeviceBuffer<RVec> getForces();
257 /*! \brief Copy forces to the GPU memory.
259 * \param[in] h_f Forces in the host memory.
260 * \param[in] atomLocality Locality of the particles to copy.
262 void copyForcesToGpu(gmx::ArrayRef<const gmx::RVec> h_f, AtomLocality atomLocality);
264 /*! \brief Get the event synchronizer for the forces ready on device.
266 * Returns either of the event synchronizers, depending on the offload scenario
267 * for the current simulation timestep:
268 * 1. The forces are copied to the device (when GPU buffer ops are off)
269 * 2. The forces are reduced on the device (GPU buffer ops are on)
271 * \todo Pass step workload instead of the useGpuFBufferOps boolean.
273 * \param[in] atomLocality Locality of the particles to wait for.
274 * \param[in] useGpuFBufferOps If the force buffer ops are offloaded to the GPU.
276 * \returns The event to synchronize the stream that consumes forces on device.
278 GpuEventSynchronizer* getForcesReadyOnDeviceEvent(AtomLocality atomLocality, bool useGpuFBufferOps);
280 /*! \brief Getter for the event synchronizer for the forces are reduced on the GPU.
282 * \returns The event to mark when forces are reduced on the GPU.
284 GpuEventSynchronizer* fReducedOnDevice();
286 /*! \brief Copy forces from the GPU memory.
288 * \param[in] h_f Forces buffer in the host memory.
289 * \param[in] atomLocality Locality of the particles to copy.
291 void copyForcesFromGpu(gmx::ArrayRef<gmx::RVec> h_f, AtomLocality atomLocality);
293 /*! \brief Wait until forces are available on the host.
295 * \param[in] atomLocality Locality of the particles to wait for.
297 void waitForcesReadyOnHost(AtomLocality atomLocality);
299 /*! \brief Getter for the update stream.
301 * \todo This is temporary here, until the management of this stream is taken over.
303 * \returns The device command stream to use in update-constraints.
305 const DeviceStream* getUpdateStream();
307 /*! \brief Getter for the number of local atoms.
309 * \returns The number of local atoms.
313 /*! \brief Getter for the total number of atoms.
315 * \returns The total number of atoms.
321 const DeviceStream* pmeStream_;
322 //! GPU NBNXM local stream.
323 const DeviceStream* localStream_;
324 //! GPU NBNXM non-local stream.
325 const DeviceStream* nonLocalStream_;
326 //! GPU Update-constreaints stream.
327 const DeviceStream* updateStream_;
329 // Streams to use for coordinates H2D and D2H copies (one event for each atom locality)
330 EnumerationArray<AtomLocality, const DeviceStream*> xCopyStreams_ = { { nullptr } };
331 // Streams to use for velocities H2D and D2H copies (one event for each atom locality)
332 EnumerationArray<AtomLocality, const DeviceStream*> vCopyStreams_ = { { nullptr } };
333 // Streams to use for forces H2D and D2H copies (one event for each atom locality)
334 EnumerationArray<AtomLocality, const DeviceStream*> fCopyStreams_ = { { nullptr } };
336 /*! \brief An array of events that indicate H2D copy is complete (one event for each atom locality)
338 * \todo Reconsider naming. It should be xCopiedToDevice or xH2DCopyComplete, etc.
340 EnumerationArray<AtomLocality, GpuEventSynchronizer> xReadyOnDevice_;
341 //! An event that the coordinates are ready after update-constraints execution
342 GpuEventSynchronizer xUpdatedOnDevice_;
343 //! An array of events that indicate D2H copy of coordinates is complete (one event for each atom locality)
344 EnumerationArray<AtomLocality, GpuEventSynchronizer> xReadyOnHost_;
346 //! An array of events that indicate H2D copy of velocities is complete (one event for each atom locality)
347 EnumerationArray<AtomLocality, GpuEventSynchronizer> vReadyOnDevice_;
348 //! An array of events that indicate D2H copy of velocities is complete (one event for each atom locality)
349 EnumerationArray<AtomLocality, GpuEventSynchronizer> vReadyOnHost_;
351 //! An array of events that indicate H2D copy of forces is complete (one event for each atom locality)
352 EnumerationArray<AtomLocality, GpuEventSynchronizer> fReadyOnDevice_;
353 //! An event that the forces were reduced on the GPU
354 GpuEventSynchronizer fReducedOnDevice_;
355 //! An array of events that indicate D2H copy of forces is complete (one event for each atom locality)
356 EnumerationArray<AtomLocality, GpuEventSynchronizer> fReadyOnHost_;
358 //! GPU context (for OpenCL builds)
359 const DeviceContext& deviceContext_;
360 //! Default GPU calls behavior
361 GpuApiCallBehavior transferKind_ = GpuApiCallBehavior::Async;
362 //! Required minimum divisor of the allocation size of the coordinates buffer
363 int allocationBlockSizeDivisor_ = 0;
365 //! Number of local atoms
366 int numAtomsLocal_ = -1;
367 //! Total number of atoms
368 int numAtomsAll_ = -1;
370 //! Device positions buffer
371 DeviceBuffer<RVec> d_x_;
372 //! Number of particles saved in the positions buffer
374 //! Allocation size for the positions buffer
375 int d_xCapacity_ = -1;
377 //! Device velocities buffer
378 DeviceBuffer<RVec> d_v_;
379 //! Number of particles saved in the velocities buffer
381 //! Allocation size for the velocities buffer
382 int d_vCapacity_ = -1;
384 //! Device force buffer
385 DeviceBuffer<RVec> d_f_;
386 //! Number of particles saved in the force buffer
388 //! Allocation size for the force buffer
389 int d_fCapacity_ = -1;
391 //! \brief Pointer to wallcycle structure.
392 gmx_wallcycle* wcycle_;
394 /*! \brief Performs the copy of data from host to device buffer.
396 * \todo Template on locality.
398 * \param[out] d_data Device-side buffer.
399 * \param[in] h_data Host-side buffer.
400 * \param[in] dataSize Device-side data allocation size.
401 * \param[in] atomLocality If all, local or non-local ranges should be copied.
402 * \param[in] deviceStream GPU stream to execute copy in.
404 void copyToDevice(DeviceBuffer<RVec> d_data,
405 gmx::ArrayRef<const gmx::RVec> h_data,
407 AtomLocality atomLocality,
408 const DeviceStream& deviceStream);
410 /*! \brief Performs the copy of data from device to host buffer.
412 * \param[out] h_data Host-side buffer.
413 * \param[in] d_data Device-side buffer.
414 * \param[in] dataSize Device-side data allocation size.
415 * \param[in] atomLocality If all, local or non-local ranges should be copied.
416 * \param[in] deviceStream GPU stream to execute copy in.
418 void copyFromDevice(gmx::ArrayRef<gmx::RVec> h_data,
419 DeviceBuffer<RVec> d_data,
421 AtomLocality atomLocality,
422 const DeviceStream& deviceStream);
427 #endif // GMX_MDTYPES_STATE_PROPAGATOR_DATA_GPU_IMPL_H