2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Declaration of low-level functions and fields of GPU state propagator object.
39 * \author Artem Zhmurov <zhmurov@gmail.com>
41 * \ingroup module_mdtypes
43 #ifndef GMX_MDTYPES_STATE_PROPAGATOR_DATA_GPU_IMPL_H
44 #define GMX_MDTYPES_STATE_PROPAGATOR_DATA_GPU_IMPL_H
50 #include "gromacs/gpu_utils/devicebuffer.h"
51 #if GMX_GPU == GMX_GPU_CUDA
52 # include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
53 #elif GMX_GPU == GMX_GPU_OPENCL
54 # include "gromacs/gpu_utils/gpueventsynchronizer_ocl.h"
56 #include "gromacs/math/vectypes.h"
57 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
58 #include "gromacs/utility/classhelpers.h"
59 #include "gromacs/utility/enumerationhelpers.h"
66 class StatePropagatorDataGpu::Impl
72 /*! \brief Constructor
74 * The buffers are reallocated only at the reinit call, the padding is
75 * used there for the coordinates buffer. It is needed for PME and added at
76 * the end of the buffer. It is assumed that if the rank has PME duties on the
77 * GPU, all coordinates are copied to the GPU and hence, for this rank, the
78 * coordinates buffer is not split into local and non-local ranges. For other
79 * ranks, the padding size is zero. This works because only one rank ever does
80 * PME work on the GPU, and if that rank also does PP work that is the only
81 * rank. So all coordinates are always transferred.
83 * In OpenCL, only pmeStream is used since it is the only stream created in
84 * PME context. The local and non-local streams are only needed when buffer
85 * ops are offloaded. This feature is currently not available in OpenCL and
86 * hence these streams are not set in these builds.
88 * \note In CUDA, the update stream is created in the constructor as a temporary
89 * solution, in place until the stream manager is introduced.
90 * Note that this makes it impossible to construct this object in CUDA
91 * builds executing on a host without any CUDA-capable device available.
93 * \note In CUDA, \p deviceContext is unused, hence always nullptr;
94 * all stream arguments can also be nullptr in runs where the
95 * respective streams are not required.
96 * In OpenCL, \p deviceContext needs to be a valid device context.
97 * In OpenCL runs StatePropagatorDataGpu is currently only used
98 * with PME offload, and only on ranks with PME duty. Hence, the
99 * \p pmeStream argument needs to be a valid OpenCL queue object
100 * which must have been created in \p deviceContext.
102 * \todo Make a \p CommandStream visible in the CPU parts of the code so we
103 * will not have to pass a void*.
104 * \todo Make a \p DeviceContext object visible in CPU parts of the code so we
105 * will not have to pass a void*.
107 * \param[in] pmeStream Device PME stream, nullptr allowed.
108 * \param[in] localStream Device NBNXM local stream, nullptr allowed.
109 * \param[in] nonLocalStream Device NBNXM non-local stream, nullptr allowed.
110 * \param[in] deviceContext Device context, nullptr allowed.
111 * \param[in] transferKind H2D/D2H transfer call behavior (synchronous or not).
112 * \param[in] paddingSize Padding size for coordinates buffer.
113 * \param[in] wcycle Wall cycle counter data.
115 Impl(const void* pmeStream,
116 const void* localStream,
117 const void* nonLocalStream,
118 const void* deviceContext,
119 GpuApiCallBehavior transferKind,
121 gmx_wallcycle* wcycle);
123 /*! \brief Constructor to use in PME-only rank and in tests.
125 * This constructor should be used if only a coordinate device buffer should be managed
126 * using a single stream. Any operation on force or velocity buffer as well as copy of
127 * non-local coordinates will exit with assertion failure. Note, that the pmeStream can
128 * not be a nullptr and the constructor will exit with an assertion failure.
130 * \todo Currently, unsupported copy operations are blocked by assertion that the stream
131 * not nullptr. This should be improved.
133 * \param[in] pmeStream Device PME stream, nullptr is not allowed.
134 * \param[in] deviceContext Device context, nullptr allowed for non-OpenCL builds.
135 * \param[in] transferKind H2D/D2H transfer call behavior (synchronous or not).
136 * \param[in] paddingSize Padding size for coordinates buffer.
137 * \param[in] wcycle Wall cycle counter data.
139 Impl(const void* pmeStream,
140 const void* deviceContext,
141 GpuApiCallBehavior transferKind,
143 gmx_wallcycle* wcycle);
148 /*! \brief Set the ranges for local and non-local atoms and reallocates buffers.
150 * Reallocates coordinate, velocities and force buffers on the device.
153 * The coordinates buffer is (re)allocated, when required by PME, with a padding,
154 * the size of which is set by the constructor. The padding region clearing kernel
155 * is scheduled in the \p pmeStream_ (unlike the coordinates H2D) as only the PME
156 * task uses this padding area.
159 * The force buffer is cleared if its size increases, so that previously unused
160 * memory is cleared before forces are accumulated.
162 * \param[in] numAtomsLocal Number of atoms in local domain.
163 * \param[in] numAtomsAll Total number of atoms to handle.
165 void reinit(int numAtomsLocal, int numAtomsAll);
167 /*! \brief Returns the range of atoms to be copied based on the copy type (all, local or non-local).
169 * \todo There are at least three versions of the function with this functionality in the code:
170 * this one and two more in NBNXM. These should be unified in a shape of a general function
173 * \param[in] atomLocality If all, local or non-local ranges are needed.
175 * \returns Tuple, containing the index of the first atom in the range and the total number of atoms in the range.
177 std::tuple<int, int> getAtomRangesFromAtomLocality(AtomLocality atomLocality);
180 /*! \brief Get the positions buffer on the GPU.
182 * \returns GPU positions buffer.
184 DeviceBuffer<RVec> getCoordinates();
186 /*! \brief Copy positions to the GPU memory.
188 * \param[in] h_x Positions in the host memory.
189 * \param[in] atomLocality Locality of the particles to copy.
191 void copyCoordinatesToGpu(gmx::ArrayRef<const gmx::RVec> h_x, AtomLocality atomLocality);
193 /*! \brief Get the event synchronizer of the coordinates ready for the consumption on the device.
195 * Returns the event synchronizer which indicates that the coordinates are ready for the
196 * consumption on the device. Takes into account that the producer may be different.
198 * If the update is offloaded, and the current step is not a DD/search step, the returned
199 * synchronizer indicates the completion of GPU update-constraint kernels. Otherwise, on search
200 * steps and if update is not offloaded, the coordinates are provided by the H2D copy and the
201 * returned synchronizer indicates that the copy is complete.
203 * \param[in] atomLocality Locality of the particles to wait for.
204 * \param[in] simulationWork The simulation lifetime flags.
205 * \param[in] stepWork The step lifetime flags.
207 * \returns The event to synchronize the stream that consumes coordinates on device.
209 GpuEventSynchronizer* getCoordinatesReadyOnDeviceEvent(AtomLocality atomLocality,
210 const SimulationWorkload& simulationWork,
211 const StepWorkload& stepWork);
213 /*! \brief Blocking wait until coordinates are copied to the device.
215 * Synchronizes the stream in which the copy was executed.
217 * \param[in] atomLocality Locality of the particles to wait for.
219 void waitCoordinatesCopiedToDevice(AtomLocality atomLocality);
221 /*! \brief Getter for the event synchronizer for the update is done on th GPU
223 * \returns The event to synchronize the stream coordinates wre updated on device.
225 GpuEventSynchronizer* xUpdatedOnDevice();
227 /*! \brief Copy positions from the GPU memory.
229 * \param[in] h_x Positions buffer in the host memory.
230 * \param[in] atomLocality Locality of the particles to copy.
232 void copyCoordinatesFromGpu(gmx::ArrayRef<gmx::RVec> h_x, AtomLocality atomLocality);
234 /*! \brief Wait until coordinates are available on the host.
236 * \param[in] atomLocality Locality of the particles to wait for.
238 void waitCoordinatesReadyOnHost(AtomLocality atomLocality);
241 /*! \brief Get the velocities buffer on the GPU.
243 * \returns GPU velocities buffer.
245 DeviceBuffer<RVec> getVelocities();
247 /*! \brief Copy velocities to the GPU memory.
249 * \param[in] h_v Velocities in the host memory.
250 * \param[in] atomLocality Locality of the particles to copy.
252 void copyVelocitiesToGpu(gmx::ArrayRef<const gmx::RVec> h_v, AtomLocality atomLocality);
254 /*! \brief Get the event synchronizer on the H2D velocities copy.
256 * \param[in] atomLocality Locality of the particles to wait for.
258 * \returns The event to synchronize the stream that consumes velocities on device.
260 GpuEventSynchronizer* getVelocitiesReadyOnDeviceEvent(AtomLocality atomLocality);
262 /*! \brief Copy velocities from the GPU memory.
264 * \param[in] h_v Velocities buffer in the host memory.
265 * \param[in] atomLocality Locality of the particles to copy.
267 void copyVelocitiesFromGpu(gmx::ArrayRef<gmx::RVec> h_v, AtomLocality atomLocality);
269 /*! \brief Wait until velocities are available on the host.
271 * \param[in] atomLocality Locality of the particles to wait for.
273 void waitVelocitiesReadyOnHost(AtomLocality atomLocality);
276 /*! \brief Get the force buffer on the GPU.
278 * \returns GPU force buffer.
280 DeviceBuffer<RVec> getForces();
282 /*! \brief Copy forces to the GPU memory.
284 * \param[in] h_f Forces in the host memory.
285 * \param[in] atomLocality Locality of the particles to copy.
287 void copyForcesToGpu(gmx::ArrayRef<const gmx::RVec> h_f, AtomLocality atomLocality);
289 /*! \brief Get the event synchronizer for the forces ready on device.
291 * Returns either of the event synchronizers, depending on the offload scenario
292 * for the current simulation timestep:
293 * 1. The forces are copied to the device (when GPU buffer ops are off)
294 * 2. The forces are reduced on the device (GPU buffer ops are on)
296 * \todo Pass step workload instead of the useGpuFBufferOps boolean.
298 * \param[in] atomLocality Locality of the particles to wait for.
299 * \param[in] useGpuFBufferOps If the force buffer ops are offloaded to the GPU.
301 * \returns The event to synchronize the stream that consumes forces on device.
303 GpuEventSynchronizer* getForcesReadyOnDeviceEvent(AtomLocality atomLocality, bool useGpuFBufferOps);
305 /*! \brief Getter for the event synchronizer for the forces are reduced on the GPU.
307 * \returns The event to mark when forces are reduced on the GPU.
309 GpuEventSynchronizer* fReducedOnDevice();
311 /*! \brief Copy forces from the GPU memory.
313 * \param[in] h_f Forces buffer in the host memory.
314 * \param[in] atomLocality Locality of the particles to copy.
316 void copyForcesFromGpu(gmx::ArrayRef<gmx::RVec> h_f, AtomLocality atomLocality);
318 /*! \brief Wait until forces are available on the host.
320 * \param[in] atomLocality Locality of the particles to wait for.
322 void waitForcesReadyOnHost(AtomLocality atomLocality);
324 /*! \brief Getter for the update stream.
326 * \todo This is temporary here, until the management of this stream is taken over.
328 * \returns The device command stream to use in update-constraints.
330 void* getUpdateStream();
332 /*! \brief Getter for the number of local atoms.
334 * \returns The number of local atoms.
338 /*! \brief Getter for the total number of atoms.
340 * \returns The total number of atoms.
346 CommandStream pmeStream_ = nullptr;
347 //! GPU NBNXM local stream.
348 CommandStream localStream_ = nullptr;
349 //! GPU NBNXM non-local stream
350 CommandStream nonLocalStream_ = nullptr;
351 //! GPU Update-constreaints stream.
352 CommandStream updateStream_ = nullptr;
354 // Streams to use for coordinates H2D and D2H copies (one event for each atom locality)
355 EnumerationArray<AtomLocality, CommandStream> xCopyStreams_ = { { nullptr } };
356 // Streams to use for velocities H2D and D2H copies (one event for each atom locality)
357 EnumerationArray<AtomLocality, CommandStream> vCopyStreams_ = { { nullptr } };
358 // Streams to use for forces H2D and D2H copies (one event for each atom locality)
359 EnumerationArray<AtomLocality, CommandStream> fCopyStreams_ = { { nullptr } };
361 /*! \brief An array of events that indicate H2D copy is complete (one event for each atom locality)
363 * \todo Reconsider naming. It should be xCopiedToDevice or xH2DCopyComplete, etc.
365 EnumerationArray<AtomLocality, GpuEventSynchronizer> xReadyOnDevice_;
366 //! An event that the coordinates are ready after update-constraints execution
367 GpuEventSynchronizer xUpdatedOnDevice_;
368 //! An array of events that indicate D2H copy of coordinates is complete (one event for each atom locality)
369 EnumerationArray<AtomLocality, GpuEventSynchronizer> xReadyOnHost_;
371 //! An array of events that indicate H2D copy of velocities is complete (one event for each atom locality)
372 EnumerationArray<AtomLocality, GpuEventSynchronizer> vReadyOnDevice_;
373 //! An array of events that indicate D2H copy of velocities is complete (one event for each atom locality)
374 EnumerationArray<AtomLocality, GpuEventSynchronizer> vReadyOnHost_;
376 //! An array of events that indicate H2D copy of forces is complete (one event for each atom locality)
377 EnumerationArray<AtomLocality, GpuEventSynchronizer> fReadyOnDevice_;
378 //! An event that the forces were reduced on the GPU
379 GpuEventSynchronizer fReducedOnDevice_;
380 //! An array of events that indicate D2H copy of forces is complete (one event for each atom locality)
381 EnumerationArray<AtomLocality, GpuEventSynchronizer> fReadyOnHost_;
383 /*! \brief GPU context (for OpenCL builds)
384 * \todo Make a Context class usable in CPU code
386 DeviceContext deviceContext_ = nullptr;
387 //! Default GPU calls behavior
388 GpuApiCallBehavior transferKind_ = GpuApiCallBehavior::Async;
389 //! Padding size for the coordinates buffer
390 int paddingSize_ = 0;
392 //! Number of local atoms
393 int numAtomsLocal_ = -1;
394 //! Total number of atoms
395 int numAtomsAll_ = -1;
397 //! Device positions buffer
398 DeviceBuffer<RVec> d_x_;
399 //! Number of particles saved in the positions buffer
401 //! Allocation size for the positions buffer
402 int d_xCapacity_ = -1;
404 //! Device velocities buffer
405 DeviceBuffer<RVec> d_v_;
406 //! Number of particles saved in the velocities buffer
408 //! Allocation size for the velocities buffer
409 int d_vCapacity_ = -1;
411 //! Device force buffer
412 DeviceBuffer<RVec> d_f_;
413 //! Number of particles saved in the force buffer
415 //! Allocation size for the force buffer
416 int d_fCapacity_ = -1;
418 //! \brief Pointer to wallcycle structure.
419 gmx_wallcycle* wcycle_;
421 /*! \brief Performs the copy of data from host to device buffer.
423 * \todo Template on locality.
425 * \param[out] d_data Device-side buffer.
426 * \param[in] h_data Host-side buffer.
427 * \param[in] dataSize Device-side data allocation size.
428 * \param[in] atomLocality If all, local or non-local ranges should be copied.
429 * \param[in] commandStream GPU stream to execute copy in.
431 void copyToDevice(DeviceBuffer<RVec> d_data,
432 gmx::ArrayRef<const gmx::RVec> h_data,
434 AtomLocality atomLocality,
435 CommandStream commandStream);
437 /*! \brief Performs the copy of data from device to host buffer.
439 * \param[out] h_data Host-side buffer.
440 * \param[in] d_data Device-side buffer.
441 * \param[in] dataSize Device-side data allocation size.
442 * \param[in] atomLocality If all, local or non-local ranges should be copied.
443 * \param[in] commandStream GPU stream to execute copy in.
445 void copyFromDevice(gmx::ArrayRef<gmx::RVec> h_data,
446 DeviceBuffer<RVec> d_data,
448 AtomLocality atomLocality,
449 CommandStream commandStream);
454 #endif // GMX_MDTYPES_STATE_PROPAGATOR_DATA_GPU_IMPL_H