Add separate constructor to StatePropagatorDataGpu for PME-only rank / PME tests
[alexxy/gromacs.git] / src / gromacs / mdtypes / state_propagator_data_gpu_impl_gpu.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *
37  * \brief Definitions of interfaces for GPU state data propagator object.
38  *
39  * \author Artem Zhmurov <zhmurov@gmail.com>
40  *
41  * \ingroup module_mdtypes
42  */
43 #include "gmxpre.h"
44
45 #include "config.h"
46
47 #if GMX_GPU != GMX_GPU_NONE
48
49 #if GMX_GPU == GMX_GPU_CUDA
50 #include "gromacs/gpu_utils/cudautils.cuh"
51 #endif
52 #include "gromacs/gpu_utils/devicebuffer.h"
53 #if GMX_GPU == GMX_GPU_OPENCL
54 #include "gromacs/gpu_utils/oclutils.h"
55 #endif
56 #include "gromacs/math/vectypes.h"
57 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
58 #include "gromacs/utility/classhelpers.h"
59
60 #include "state_propagator_data_gpu_impl.h"
61
62 namespace gmx
63 {
64
65 StatePropagatorDataGpu::Impl::Impl(const void            *pmeStream,
66                                    const void            *localStream,
67                                    const void            *nonLocalStream,
68                                    const void            *deviceContext,
69                                    GpuApiCallBehavior     transferKind,
70                                    int                    paddingSize) :
71     transferKind_(transferKind),
72     paddingSize_(paddingSize)
73 {
74     static_assert(GMX_GPU != GMX_GPU_NONE, "This object should only be constructed on the GPU code-paths.");
75     GMX_RELEASE_ASSERT(getenv("GMX_USE_GPU_BUFFER_OPS") == nullptr, "GPU buffer ops are not supported in this build.");
76
77     // TODO: Refactor when the StreamManager is introduced.
78     if (GMX_GPU == GMX_GPU_OPENCL)
79     {
80         GMX_ASSERT(deviceContext != nullptr, "GPU context should be set in OpenCL builds.");
81         GMX_ASSERT(pmeStream != nullptr, "GPU PME stream should be set in OpenCL builds.");
82
83         // The update stream is set to the PME stream in OpenCL, since PME stream is the only stream created in the PME context.
84         pmeStream_      = *static_cast<const CommandStream*>(pmeStream);
85         updateStream_   = *static_cast<const CommandStream*>(pmeStream);
86         deviceContext_  = *static_cast<const DeviceContext*>(deviceContext);
87         GMX_UNUSED_VALUE(localStream);
88         GMX_UNUSED_VALUE(nonLocalStream);
89     }
90
91     if (GMX_GPU == GMX_GPU_CUDA)
92     {
93         if (pmeStream != nullptr)
94         {
95             pmeStream_ = *static_cast<const CommandStream*>(pmeStream);
96         }
97         if (localStream != nullptr)
98         {
99             localStream_ = *static_cast<const CommandStream*>(localStream);
100         }
101         if (nonLocalStream != nullptr)
102         {
103             nonLocalStream_ = *static_cast<const CommandStream*>(nonLocalStream);
104         }
105
106         // TODO: The update stream should be created only when it is needed.
107 #if (GMX_GPU == GMX_GPU_CUDA)
108         cudaError_t stat;
109         stat = cudaStreamCreate(&updateStream_);
110         CU_RET_ERR(stat, "CUDA stream creation failed in StatePropagatorDataGpu");
111 #endif
112         GMX_UNUSED_VALUE(deviceContext);
113     }
114
115     // Map the atom locality to the stream that will be used for coordinates,
116     // velocities and forces transfers. Same streams are used for H2D and D2H copies.
117     // Note, that nullptr stream is used here to indicate that the copy is not supported.
118     xCopyStreams_[AtomLocality::Local]    = updateStream_;
119     xCopyStreams_[AtomLocality::NonLocal] = nonLocalStream_;
120     xCopyStreams_[AtomLocality::All]      = updateStream_;
121
122     vCopyStreams_[AtomLocality::Local]    = updateStream_;
123     vCopyStreams_[AtomLocality::NonLocal] = nullptr;
124     vCopyStreams_[AtomLocality::All]      = updateStream_;
125
126     fCopyStreams_[AtomLocality::Local]    = localStream_;
127     fCopyStreams_[AtomLocality::NonLocal] = nonLocalStream_;
128     fCopyStreams_[AtomLocality::All]      = nullptr;
129 }
130
131 StatePropagatorDataGpu::Impl::Impl(const void            *pmeStream,
132                                    const void            *deviceContext,
133                                    GpuApiCallBehavior     transferKind,
134                                    int                    paddingSize) :
135     transferKind_(transferKind),
136     paddingSize_(paddingSize)
137 {
138     static_assert(GMX_GPU != GMX_GPU_NONE, "This object should only be constructed on the GPU code-paths.");
139     GMX_RELEASE_ASSERT(getenv("GMX_USE_GPU_BUFFER_OPS") == nullptr, "GPU buffer ops are not supported in this build.");
140
141     if (GMX_GPU == GMX_GPU_OPENCL)
142     {
143         GMX_ASSERT(deviceContext != nullptr, "GPU context should be set in OpenCL builds.");
144         deviceContext_  = *static_cast<const DeviceContext*>(deviceContext);
145     }
146
147     GMX_ASSERT(pmeStream != nullptr, "GPU PME stream should be set.");
148     pmeStream_      = *static_cast<const CommandStream*>(pmeStream);
149
150     localStream_    = nullptr;
151     nonLocalStream_ = nullptr;
152     updateStream_   = nullptr;
153
154
155     // Only local/all coordinates are allowed to be copied in PME-only rank/ PME tests.
156     // This it temporary measure to make it safe to use this class in those cases.
157     xCopyStreams_[AtomLocality::Local]    = pmeStream_;
158     xCopyStreams_[AtomLocality::NonLocal] = nullptr;
159     xCopyStreams_[AtomLocality::All]      = pmeStream_;
160
161     vCopyStreams_[AtomLocality::Local]    = nullptr;
162     vCopyStreams_[AtomLocality::NonLocal] = nullptr;
163     vCopyStreams_[AtomLocality::All]      = nullptr;
164
165     fCopyStreams_[AtomLocality::Local]    = nullptr;
166     fCopyStreams_[AtomLocality::NonLocal] = nullptr;
167     fCopyStreams_[AtomLocality::All]      = nullptr;
168 }
169
170 StatePropagatorDataGpu::Impl::~Impl()
171 {
172 }
173
174 void StatePropagatorDataGpu::Impl::reinit(int numAtomsLocal, int numAtomsAll)
175 {
176     numAtomsLocal_ = numAtomsLocal;
177     numAtomsAll_   = numAtomsAll;
178
179     int numAtomsPadded;
180     if (paddingSize_ > 0)
181     {
182         numAtomsPadded = ((numAtomsAll_ + paddingSize_ - 1 ) / paddingSize_ )*paddingSize_;
183     }
184     else
185     {
186         numAtomsPadded = numAtomsAll_;
187     }
188
189     reallocateDeviceBuffer(&d_x_, DIM*numAtomsPadded, &d_xSize_, &d_xCapacity_, deviceContext_);
190
191     const size_t paddingAllocationSize = numAtomsPadded - numAtomsAll_;
192     if (paddingAllocationSize > 0)
193     {
194         // The PME stream is used here because the padding region of d_x_ is only in the PME task.
195         clearDeviceBufferAsync(&d_x_, DIM*numAtomsAll_, DIM*paddingAllocationSize, pmeStream_);
196     }
197
198     reallocateDeviceBuffer(&d_v_, DIM*numAtomsAll_, &d_vSize_, &d_vCapacity_, deviceContext_);
199     reallocateDeviceBuffer(&d_f_, DIM*numAtomsAll_, &d_fSize_, &d_fCapacity_, deviceContext_);
200
201 }
202
203 std::tuple<int, int> StatePropagatorDataGpu::Impl::getAtomRangesFromAtomLocality(AtomLocality  atomLocality)
204 {
205     int atomsStartAt   = 0;
206     int numAtomsToCopy = 0;
207     switch (atomLocality)
208     {
209         case AtomLocality::All:
210             atomsStartAt    = 0;
211             numAtomsToCopy  = numAtomsAll_;
212             break;
213         case AtomLocality::Local:
214             atomsStartAt    = 0;
215             numAtomsToCopy  = numAtomsLocal_;
216             break;
217         case AtomLocality::NonLocal:
218             atomsStartAt    = numAtomsLocal_;
219             numAtomsToCopy  = numAtomsAll_ - numAtomsLocal_;
220             break;
221         default:
222             GMX_RELEASE_ASSERT(false, "Wrong range of atoms requested in GPU state data manager. Should be All, Local or NonLocal.");
223     }
224     GMX_ASSERT(atomsStartAt   >= 0, "The first elemtnt to copy has negative index. Probably, the GPU propagator state was not initialized.");
225     GMX_ASSERT(numAtomsToCopy >= 0, "Number of atoms to copy is negative. Probably, the GPU propagator state was not initialized.");
226     return std::make_tuple(atomsStartAt, numAtomsToCopy);
227 }
228
229 void StatePropagatorDataGpu::Impl::copyToDevice(DeviceBuffer<float>                   d_data,
230                                                 const gmx::ArrayRef<const gmx::RVec>  h_data,
231                                                 int                                   dataSize,
232                                                 AtomLocality                          atomLocality,
233                                                 CommandStream                         commandStream)
234 {
235
236     GMX_UNUSED_VALUE(dataSize);
237
238     GMX_ASSERT(dataSize >= 0, "Trying to copy to device buffer before it was allocated.");
239
240     int atomsStartAt, numAtomsToCopy;
241     std::tie(atomsStartAt, numAtomsToCopy) = getAtomRangesFromAtomLocality(atomLocality);
242
243     int elementsStartAt   = atomsStartAt*DIM;
244     int numElementsToCopy = numAtomsToCopy*DIM;
245
246     if (numAtomsToCopy != 0)
247     {
248         GMX_ASSERT(elementsStartAt + numElementsToCopy <= dataSize, "The device allocation is smaller than requested copy range.");
249         GMX_ASSERT(atomsStartAt + numAtomsToCopy <= h_data.ssize(), "The host buffer is smaller than the requested copy range.");
250
251         copyToDeviceBuffer(&d_data, reinterpret_cast<const float *>(&h_data.data()[atomsStartAt]),
252                            elementsStartAt, numElementsToCopy,
253                            commandStream, transferKind_, nullptr);
254     }
255 }
256
257 void StatePropagatorDataGpu::Impl::copyFromDevice(gmx::ArrayRef<gmx::RVec>  h_data,
258                                                   DeviceBuffer<float>       d_data,
259                                                   int                       dataSize,
260                                                   AtomLocality              atomLocality,
261                                                   CommandStream             commandStream)
262 {
263
264     GMX_UNUSED_VALUE(dataSize);
265
266     GMX_ASSERT(dataSize >= 0, "Trying to copy from device buffer before it was allocated.");
267
268     int atomsStartAt, numAtomsToCopy;
269     std::tie(atomsStartAt, numAtomsToCopy) = getAtomRangesFromAtomLocality(atomLocality);
270
271     int elementsStartAt   = atomsStartAt*DIM;
272     int numElementsToCopy = numAtomsToCopy*DIM;
273
274     if (numAtomsToCopy != 0)
275     {
276         GMX_ASSERT(elementsStartAt + numElementsToCopy <= dataSize, "The device allocation is smaller than requested copy range.");
277         GMX_ASSERT(atomsStartAt + numAtomsToCopy <= h_data.ssize(), "The host buffer is smaller than the requested copy range.");
278
279         copyFromDeviceBuffer(reinterpret_cast<float*>(&h_data.data()[atomsStartAt]), &d_data,
280                              elementsStartAt, numElementsToCopy,
281                              commandStream, transferKind_, nullptr);
282     }
283 }
284
285 DeviceBuffer<float> StatePropagatorDataGpu::Impl::getCoordinates()
286 {
287     return d_x_;
288 }
289
290 void StatePropagatorDataGpu::Impl::copyCoordinatesToGpu(const gmx::ArrayRef<const gmx::RVec>  h_x,
291                                                         AtomLocality                          atomLocality)
292 {
293     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
294     CommandStream commandStream = xCopyStreams_[atomLocality];
295     GMX_ASSERT(commandStream != nullptr, "No stream is valid for copying positions with given atom locality.");
296
297     copyToDevice(d_x_, h_x, d_xSize_, atomLocality, commandStream);
298
299     // markEvent is skipped in OpenCL as:
300     //   - it's not needed, copy is done in the same stream as the only consumer task (PME)
301     //   - we don't consume the events in OpenCL which is not allowed by GpuEventSynchronizer (would leak memory).
302     // TODO: remove this by adding an event-mark free flavor of this function
303     if (GMX_GPU == GMX_GPU_CUDA)
304     {
305         xReadyOnDevice_[atomLocality].markEvent(commandStream);
306         // TODO: Remove When event-based synchronization is introduced
307         gpuStreamSynchronize(commandStream);
308     }
309 }
310
311 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::getCoordinatesReadyOnDeviceEvent(AtomLocality              atomLocality,
312                                                                                      const SimulationWorkload &simulationWork,
313                                                                                      const StepWorkload       &stepWork)
314 {
315     // The provider of the coordinates may be different for local atoms. If the update is offloaded
316     // and this is not a neighbor search step, then the consumer needs to wait for the update
317     // to complete. Otherwise, the coordinates are copied from the host and we need to wait for
318     // the copy event. Non-local coordinates are always provided by the H2D copy.
319     //
320     // TODO: This should be reconsidered to support the halo exchange.
321     //
322     if (atomLocality == AtomLocality::Local && simulationWork.useGpuUpdate && !stepWork.doNeighborSearch)
323     {
324         return &xUpdatedOnDevice_;
325     }
326     else
327     {
328         return &xReadyOnDevice_[atomLocality];
329     }
330 }
331
332 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::xUpdatedOnDevice()
333 {
334     return &xUpdatedOnDevice_;
335 }
336
337 void StatePropagatorDataGpu::Impl::copyCoordinatesFromGpu(gmx::ArrayRef<gmx::RVec>  h_x,
338                                                           AtomLocality              atomLocality)
339 {
340     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
341     CommandStream commandStream = xCopyStreams_[atomLocality];
342     GMX_ASSERT(commandStream != nullptr, "No stream is valid for copying positions with given atom locality.");
343
344     copyFromDevice(h_x, d_x_, d_xSize_, atomLocality, commandStream);
345     // TODO: Remove When event-based synchronization is introduced
346     gpuStreamSynchronize(commandStream);
347     // Note: unlike copyCoordinatesToGpu this is not used in OpenCL, and the conditional is not needed.
348     xReadyOnHost_[atomLocality].markEvent(commandStream);
349 }
350
351 void StatePropagatorDataGpu::Impl::waitCoordinatesReadyOnHost(AtomLocality  atomLocality)
352 {
353     xReadyOnHost_[atomLocality].waitForEvent();
354 }
355
356
357 DeviceBuffer<float> StatePropagatorDataGpu::Impl::getVelocities()
358 {
359     return d_v_;
360 }
361
362 void StatePropagatorDataGpu::Impl::copyVelocitiesToGpu(const gmx::ArrayRef<const gmx::RVec>  h_v,
363                                                        AtomLocality                          atomLocality)
364 {
365     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
366     CommandStream commandStream = vCopyStreams_[atomLocality];
367     GMX_ASSERT(commandStream != nullptr, "No stream is valid for copying velocities with given atom locality.");
368
369     copyToDevice(d_v_, h_v, d_vSize_, atomLocality, commandStream);
370     // TODO: Remove When event-based synchronization is introduced
371     gpuStreamSynchronize(commandStream);
372     vReadyOnDevice_[atomLocality].markEvent(commandStream);
373 }
374
375 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::getVelocitiesReadyOnDeviceEvent(AtomLocality  atomLocality)
376 {
377     return &vReadyOnDevice_[atomLocality];
378 }
379
380
381 void StatePropagatorDataGpu::Impl::copyVelocitiesFromGpu(gmx::ArrayRef<gmx::RVec>  h_v,
382                                                          AtomLocality              atomLocality)
383 {
384     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
385     CommandStream commandStream = vCopyStreams_[atomLocality];
386     GMX_ASSERT(commandStream != nullptr, "No stream is valid for copying velocities with given atom locality.");
387
388     copyFromDevice(h_v, d_v_, d_vSize_, atomLocality, commandStream);
389     // TODO: Remove When event-based synchronization is introduced
390     gpuStreamSynchronize(commandStream);
391     vReadyOnHost_[atomLocality].markEvent(commandStream);
392 }
393
394 void StatePropagatorDataGpu::Impl::waitVelocitiesReadyOnHost(AtomLocality  atomLocality)
395 {
396     vReadyOnHost_[atomLocality].waitForEvent();
397 }
398
399
400 DeviceBuffer<float> StatePropagatorDataGpu::Impl::getForces()
401 {
402     return d_f_;
403 }
404
405 void StatePropagatorDataGpu::Impl::copyForcesToGpu(const gmx::ArrayRef<const gmx::RVec>  h_f,
406                                                    AtomLocality                          atomLocality)
407 {
408     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
409     CommandStream commandStream = fCopyStreams_[atomLocality];
410     GMX_ASSERT(commandStream != nullptr, "No stream is valid for copying forces with given atom locality.");
411
412     copyToDevice(d_f_, h_f, d_fSize_, atomLocality, commandStream);
413     fReadyOnDevice_[atomLocality].markEvent(commandStream);
414 }
415
416 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::getForcesReadyOnDeviceEvent(AtomLocality  atomLocality,
417                                                                                 bool          useGpuFBufferOps)
418 {
419     if ((atomLocality == AtomLocality::Local || atomLocality == AtomLocality::NonLocal) && useGpuFBufferOps)
420     {
421         return &fReducedOnDevice_;
422     }
423     else
424     {
425         return &fReadyOnDevice_[atomLocality];
426     }
427 }
428
429 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::fReducedOnDevice()
430 {
431     return &fReducedOnDevice_;
432 }
433
434 void StatePropagatorDataGpu::Impl::copyForcesFromGpu(gmx::ArrayRef<gmx::RVec>  h_f,
435                                                      AtomLocality              atomLocality)
436 {
437     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
438     CommandStream commandStream = fCopyStreams_[atomLocality];
439     GMX_ASSERT(commandStream != nullptr, "No stream is valid for copying forces with given atom locality.");
440
441     copyFromDevice(h_f, d_f_, d_fSize_, atomLocality, commandStream);
442     fReadyOnHost_[atomLocality].markEvent(commandStream);
443 }
444
445 void StatePropagatorDataGpu::Impl::waitForcesReadyOnHost(AtomLocality  atomLocality)
446 {
447     fReadyOnHost_[atomLocality].waitForEvent();
448 }
449
450 void* StatePropagatorDataGpu::Impl::getUpdateStream()
451 {
452     return updateStream_;
453 }
454
455 int StatePropagatorDataGpu::Impl::numAtomsLocal()
456 {
457     return numAtomsLocal_;
458 }
459
460 int StatePropagatorDataGpu::Impl::numAtomsAll()
461 {
462     return numAtomsAll_;
463 }
464
465
466
467 StatePropagatorDataGpu::StatePropagatorDataGpu(const void        *pmeStream,
468                                                const void        *localStream,
469                                                const void        *nonLocalStream,
470                                                const void        *deviceContext,
471                                                GpuApiCallBehavior transferKind,
472                                                int                paddingSize)
473     : impl_(new Impl(pmeStream,
474                      localStream,
475                      nonLocalStream,
476                      deviceContext,
477                      transferKind,
478                      paddingSize))
479 {
480 }
481
482 StatePropagatorDataGpu::StatePropagatorDataGpu(const void        *pmeStream,
483                                                const void        *deviceContext,
484                                                GpuApiCallBehavior transferKind,
485                                                int                paddingSize)
486     : impl_(new Impl(pmeStream,
487                      deviceContext,
488                      transferKind,
489                      paddingSize))
490 {
491 }
492
493 StatePropagatorDataGpu::StatePropagatorDataGpu(StatePropagatorDataGpu && /* other */) noexcept = default;
494
495 StatePropagatorDataGpu &StatePropagatorDataGpu::operator=(StatePropagatorDataGpu && /* other */) noexcept = default;
496
497 StatePropagatorDataGpu::~StatePropagatorDataGpu() = default;
498
499
500 void StatePropagatorDataGpu::reinit(int numAtomsLocal, int numAtomsAll)
501 {
502     return impl_->reinit(numAtomsLocal, numAtomsAll);
503 }
504
505 std::tuple<int, int> StatePropagatorDataGpu::getAtomRangesFromAtomLocality(AtomLocality  atomLocality)
506 {
507     return impl_->getAtomRangesFromAtomLocality(atomLocality);
508 }
509
510
511 DeviceBuffer<float> StatePropagatorDataGpu::getCoordinates()
512 {
513     return impl_->getCoordinates();
514 }
515
516 void StatePropagatorDataGpu::copyCoordinatesToGpu(const gmx::ArrayRef<const gmx::RVec>  h_x,
517                                                   AtomLocality                          atomLocality)
518 {
519     return impl_->copyCoordinatesToGpu(h_x, atomLocality);
520 }
521
522 GpuEventSynchronizer* StatePropagatorDataGpu::getCoordinatesReadyOnDeviceEvent(AtomLocality              atomLocality,
523                                                                                const SimulationWorkload &simulationWork,
524                                                                                const StepWorkload       &stepWork)
525 {
526     return impl_->getCoordinatesReadyOnDeviceEvent(atomLocality, simulationWork, stepWork);
527 }
528
529 GpuEventSynchronizer* StatePropagatorDataGpu::xUpdatedOnDevice()
530 {
531     return impl_->xUpdatedOnDevice();
532 }
533
534 void StatePropagatorDataGpu::copyCoordinatesFromGpu(gmx::ArrayRef<RVec>  h_x,
535                                                     AtomLocality         atomLocality)
536 {
537     return impl_->copyCoordinatesFromGpu(h_x, atomLocality);
538 }
539
540 void StatePropagatorDataGpu::waitCoordinatesReadyOnHost(AtomLocality  atomLocality)
541 {
542     return impl_->waitCoordinatesReadyOnHost(atomLocality);
543 }
544
545
546 DeviceBuffer<float> StatePropagatorDataGpu::getVelocities()
547 {
548     return impl_->getVelocities();
549 }
550
551 void StatePropagatorDataGpu::copyVelocitiesToGpu(const gmx::ArrayRef<const gmx::RVec>  h_v,
552                                                  AtomLocality                          atomLocality)
553 {
554     return impl_->copyVelocitiesToGpu(h_v, atomLocality);
555 }
556
557 GpuEventSynchronizer* StatePropagatorDataGpu::getVelocitiesReadyOnDeviceEvent(AtomLocality  atomLocality)
558 {
559     return impl_->getVelocitiesReadyOnDeviceEvent(atomLocality);
560 }
561
562 void StatePropagatorDataGpu::copyVelocitiesFromGpu(gmx::ArrayRef<RVec>  h_v,
563                                                    AtomLocality         atomLocality)
564 {
565     return impl_->copyVelocitiesFromGpu(h_v, atomLocality);
566 }
567
568 void StatePropagatorDataGpu::waitVelocitiesReadyOnHost(AtomLocality  atomLocality)
569 {
570     return impl_->waitVelocitiesReadyOnHost(atomLocality);
571 }
572
573
574 DeviceBuffer<float> StatePropagatorDataGpu::getForces()
575 {
576     return impl_->getForces();
577 }
578
579 void StatePropagatorDataGpu::copyForcesToGpu(const gmx::ArrayRef<const gmx::RVec>  h_f,
580                                              AtomLocality                          atomLocality)
581 {
582     return impl_->copyForcesToGpu(h_f, atomLocality);
583 }
584
585 GpuEventSynchronizer* StatePropagatorDataGpu::getForcesReadyOnDeviceEvent(AtomLocality  atomLocality,
586                                                                           bool          useGpuFBufferOps)
587 {
588     return impl_->getForcesReadyOnDeviceEvent(atomLocality, useGpuFBufferOps);
589 }
590
591 GpuEventSynchronizer* StatePropagatorDataGpu::fReducedOnDevice()
592 {
593     return impl_->fReducedOnDevice();
594 }
595
596 void StatePropagatorDataGpu::copyForcesFromGpu(gmx::ArrayRef<RVec>  h_f,
597                                                AtomLocality         atomLocality)
598 {
599     return impl_->copyForcesFromGpu(h_f, atomLocality);
600 }
601
602 void StatePropagatorDataGpu::waitForcesReadyOnHost(AtomLocality  atomLocality)
603 {
604     return impl_->waitForcesReadyOnHost(atomLocality);
605 }
606
607
608 void* StatePropagatorDataGpu::getUpdateStream()
609 {
610     return impl_->getUpdateStream();
611 }
612
613 int StatePropagatorDataGpu::numAtomsLocal()
614 {
615     return impl_->numAtomsLocal();
616 }
617
618 int StatePropagatorDataGpu::numAtomsAll()
619 {
620     return impl_->numAtomsAll();
621 }
622
623 }      // namespace gmx
624
625 #endif // GMX_GPU == GMX_GPU_NONE