Clear device force buffer if it was reallocated
[alexxy/gromacs.git] / src / gromacs / mdtypes / state_propagator_data_gpu_impl_gpu.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *
37  * \brief Definitions of interfaces for GPU state data propagator object.
38  *
39  * \author Artem Zhmurov <zhmurov@gmail.com>
40  *
41  * \ingroup module_mdtypes
42  */
43 #include "gmxpre.h"
44
45 #include "config.h"
46
47 #if GMX_GPU != GMX_GPU_NONE
48
49 #    if GMX_GPU == GMX_GPU_CUDA
50 #        include "gromacs/gpu_utils/cudautils.cuh"
51 #    endif
52 #    include "gromacs/gpu_utils/devicebuffer.h"
53 #    if GMX_GPU == GMX_GPU_OPENCL
54 #        include "gromacs/gpu_utils/oclutils.h"
55 #    endif
56 #    include "gromacs/math/vectypes.h"
57 #    include "gromacs/mdtypes/state_propagator_data_gpu.h"
58 #    include "gromacs/utility/classhelpers.h"
59
60 #    include "state_propagator_data_gpu_impl.h"
61
62 namespace gmx
63 {
64
65 StatePropagatorDataGpu::Impl::Impl(const void*        pmeStream,
66                                    const void*        localStream,
67                                    const void*        nonLocalStream,
68                                    const void*        deviceContext,
69                                    GpuApiCallBehavior transferKind,
70                                    int                paddingSize) :
71     transferKind_(transferKind),
72     paddingSize_(paddingSize)
73 {
74     static_assert(GMX_GPU != GMX_GPU_NONE,
75                   "This object should only be constructed on the GPU code-paths.");
76
77     // TODO: Refactor when the StreamManager is introduced.
78     if (GMX_GPU == GMX_GPU_OPENCL)
79     {
80         GMX_ASSERT(deviceContext != nullptr, "GPU context should be set in OpenCL builds.");
81         GMX_ASSERT(pmeStream != nullptr, "GPU PME stream should be set in OpenCL builds.");
82
83         // The update stream is set to the PME stream in OpenCL, since PME stream is the only stream created in the PME context.
84         pmeStream_     = *static_cast<const CommandStream*>(pmeStream);
85         updateStream_  = *static_cast<const CommandStream*>(pmeStream);
86         deviceContext_ = *static_cast<const DeviceContext*>(deviceContext);
87         GMX_UNUSED_VALUE(localStream);
88         GMX_UNUSED_VALUE(nonLocalStream);
89     }
90
91     if (GMX_GPU == GMX_GPU_CUDA)
92     {
93         if (pmeStream != nullptr)
94         {
95             pmeStream_ = *static_cast<const CommandStream*>(pmeStream);
96         }
97         if (localStream != nullptr)
98         {
99             localStream_ = *static_cast<const CommandStream*>(localStream);
100         }
101         if (nonLocalStream != nullptr)
102         {
103             nonLocalStream_ = *static_cast<const CommandStream*>(nonLocalStream);
104         }
105
106         // TODO: The update stream should be created only when it is needed.
107 #    if (GMX_GPU == GMX_GPU_CUDA)
108         cudaError_t stat;
109         stat = cudaStreamCreate(&updateStream_);
110         CU_RET_ERR(stat, "CUDA stream creation failed in StatePropagatorDataGpu");
111 #    endif
112         GMX_UNUSED_VALUE(deviceContext);
113     }
114
115     // Map the atom locality to the stream that will be used for coordinates,
116     // velocities and forces transfers. Same streams are used for H2D and D2H copies.
117     // Note, that nullptr stream is used here to indicate that the copy is not supported.
118     xCopyStreams_[AtomLocality::Local]    = updateStream_;
119     xCopyStreams_[AtomLocality::NonLocal] = nonLocalStream_;
120     xCopyStreams_[AtomLocality::All]      = updateStream_;
121
122     vCopyStreams_[AtomLocality::Local]    = updateStream_;
123     vCopyStreams_[AtomLocality::NonLocal] = nullptr;
124     vCopyStreams_[AtomLocality::All]      = updateStream_;
125
126     fCopyStreams_[AtomLocality::Local]    = localStream_;
127     fCopyStreams_[AtomLocality::NonLocal] = nonLocalStream_;
128     fCopyStreams_[AtomLocality::All]      = updateStream_;
129 }
130
131 StatePropagatorDataGpu::Impl::Impl(const void*        pmeStream,
132                                    const void*        deviceContext,
133                                    GpuApiCallBehavior transferKind,
134                                    int                paddingSize) :
135     transferKind_(transferKind),
136     paddingSize_(paddingSize)
137 {
138     static_assert(GMX_GPU != GMX_GPU_NONE,
139                   "This object should only be constructed on the GPU code-paths.");
140
141     if (GMX_GPU == GMX_GPU_OPENCL)
142     {
143         GMX_ASSERT(deviceContext != nullptr, "GPU context should be set in OpenCL builds.");
144         deviceContext_ = *static_cast<const DeviceContext*>(deviceContext);
145     }
146
147     GMX_ASSERT(pmeStream != nullptr, "GPU PME stream should be set.");
148     pmeStream_ = *static_cast<const CommandStream*>(pmeStream);
149
150     localStream_    = nullptr;
151     nonLocalStream_ = nullptr;
152     updateStream_   = nullptr;
153
154
155     // Only local/all coordinates are allowed to be copied in PME-only rank/ PME tests.
156     // This it temporary measure to make it safe to use this class in those cases.
157     xCopyStreams_[AtomLocality::Local]    = pmeStream_;
158     xCopyStreams_[AtomLocality::NonLocal] = nullptr;
159     xCopyStreams_[AtomLocality::All]      = pmeStream_;
160
161     vCopyStreams_[AtomLocality::Local]    = nullptr;
162     vCopyStreams_[AtomLocality::NonLocal] = nullptr;
163     vCopyStreams_[AtomLocality::All]      = nullptr;
164
165     fCopyStreams_[AtomLocality::Local]    = nullptr;
166     fCopyStreams_[AtomLocality::NonLocal] = nullptr;
167     fCopyStreams_[AtomLocality::All]      = nullptr;
168 }
169
170 StatePropagatorDataGpu::Impl::~Impl() {}
171
172 void StatePropagatorDataGpu::Impl::reinit(int numAtomsLocal, int numAtomsAll)
173 {
174     numAtomsLocal_ = numAtomsLocal;
175     numAtomsAll_   = numAtomsAll;
176
177     int numAtomsPadded;
178     if (paddingSize_ > 0)
179     {
180         numAtomsPadded = ((numAtomsAll_ + paddingSize_ - 1) / paddingSize_) * paddingSize_;
181     }
182     else
183     {
184         numAtomsPadded = numAtomsAll_;
185     }
186
187     reallocateDeviceBuffer(&d_x_, DIM * numAtomsPadded, &d_xSize_, &d_xCapacity_, deviceContext_);
188
189     const size_t paddingAllocationSize = numAtomsPadded - numAtomsAll_;
190     if (paddingAllocationSize > 0)
191     {
192         // The PME stream is used here because the padding region of d_x_ is only in the PME task.
193         clearDeviceBufferAsync(&d_x_, DIM * numAtomsAll_, DIM * paddingAllocationSize, pmeStream_);
194     }
195
196     reallocateDeviceBuffer(&d_v_, DIM * numAtomsAll_, &d_vSize_, &d_vCapacity_, deviceContext_);
197     const int d_fOldCapacity = d_fCapacity_;
198     reallocateDeviceBuffer(&d_f_, DIM * numAtomsAll_, &d_fSize_, &d_fCapacity_, deviceContext_);
199     // Clearing of the forces can be done in local stream since the nonlocal stream cannot reach
200     // the force accumulation stage before syncing with the local stream. Only done in CUDA,
201     // since the force buffer ops are not implemented in OpenCL.
202     if (GMX_GPU == GMX_GPU_CUDA && d_fCapacity_ != d_fOldCapacity)
203     {
204         clearDeviceBufferAsync(&d_f_, 0, d_fCapacity_, localStream_);
205     }
206 }
207
208 std::tuple<int, int> StatePropagatorDataGpu::Impl::getAtomRangesFromAtomLocality(AtomLocality atomLocality)
209 {
210     int atomsStartAt   = 0;
211     int numAtomsToCopy = 0;
212     switch (atomLocality)
213     {
214         case AtomLocality::All:
215             atomsStartAt   = 0;
216             numAtomsToCopy = numAtomsAll_;
217             break;
218         case AtomLocality::Local:
219             atomsStartAt   = 0;
220             numAtomsToCopy = numAtomsLocal_;
221             break;
222         case AtomLocality::NonLocal:
223             atomsStartAt   = numAtomsLocal_;
224             numAtomsToCopy = numAtomsAll_ - numAtomsLocal_;
225             break;
226         default:
227             GMX_RELEASE_ASSERT(false,
228                                "Wrong range of atoms requested in GPU state data manager. Should "
229                                "be All, Local or NonLocal.");
230     }
231     GMX_ASSERT(atomsStartAt >= 0,
232                "The first elemtnt to copy has negative index. Probably, the GPU propagator state "
233                "was not initialized.");
234     GMX_ASSERT(numAtomsToCopy >= 0,
235                "Number of atoms to copy is negative. Probably, the GPU propagator state was not "
236                "initialized.");
237     return std::make_tuple(atomsStartAt, numAtomsToCopy);
238 }
239
240 void StatePropagatorDataGpu::Impl::copyToDevice(DeviceBuffer<float>                  d_data,
241                                                 const gmx::ArrayRef<const gmx::RVec> h_data,
242                                                 int                                  dataSize,
243                                                 AtomLocality                         atomLocality,
244                                                 CommandStream                        commandStream)
245 {
246
247     GMX_UNUSED_VALUE(dataSize);
248
249     GMX_ASSERT(dataSize >= 0, "Trying to copy to device buffer before it was allocated.");
250
251     int atomsStartAt, numAtomsToCopy;
252     std::tie(atomsStartAt, numAtomsToCopy) = getAtomRangesFromAtomLocality(atomLocality);
253
254     int elementsStartAt   = atomsStartAt * DIM;
255     int numElementsToCopy = numAtomsToCopy * DIM;
256
257     if (numAtomsToCopy != 0)
258     {
259         GMX_ASSERT(elementsStartAt + numElementsToCopy <= dataSize,
260                    "The device allocation is smaller than requested copy range.");
261         GMX_ASSERT(atomsStartAt + numAtomsToCopy <= h_data.ssize(),
262                    "The host buffer is smaller than the requested copy range.");
263
264         copyToDeviceBuffer(&d_data, reinterpret_cast<const float*>(&h_data.data()[atomsStartAt]),
265                            elementsStartAt, numElementsToCopy, commandStream, transferKind_, nullptr);
266     }
267 }
268
269 void StatePropagatorDataGpu::Impl::copyFromDevice(gmx::ArrayRef<gmx::RVec> h_data,
270                                                   DeviceBuffer<float>      d_data,
271                                                   int                      dataSize,
272                                                   AtomLocality             atomLocality,
273                                                   CommandStream            commandStream)
274 {
275
276     GMX_UNUSED_VALUE(dataSize);
277
278     GMX_ASSERT(dataSize >= 0, "Trying to copy from device buffer before it was allocated.");
279
280     int atomsStartAt, numAtomsToCopy;
281     std::tie(atomsStartAt, numAtomsToCopy) = getAtomRangesFromAtomLocality(atomLocality);
282
283     int elementsStartAt   = atomsStartAt * DIM;
284     int numElementsToCopy = numAtomsToCopy * DIM;
285
286     if (numAtomsToCopy != 0)
287     {
288         GMX_ASSERT(elementsStartAt + numElementsToCopy <= dataSize,
289                    "The device allocation is smaller than requested copy range.");
290         GMX_ASSERT(atomsStartAt + numAtomsToCopy <= h_data.ssize(),
291                    "The host buffer is smaller than the requested copy range.");
292
293         copyFromDeviceBuffer(reinterpret_cast<float*>(&h_data.data()[atomsStartAt]), &d_data,
294                              elementsStartAt, numElementsToCopy, commandStream, transferKind_, nullptr);
295     }
296 }
297
298 DeviceBuffer<float> StatePropagatorDataGpu::Impl::getCoordinates()
299 {
300     return d_x_;
301 }
302
303 void StatePropagatorDataGpu::Impl::copyCoordinatesToGpu(const gmx::ArrayRef<const gmx::RVec> h_x,
304                                                         AtomLocality atomLocality)
305 {
306     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
307     CommandStream commandStream = xCopyStreams_[atomLocality];
308     GMX_ASSERT(commandStream != nullptr,
309                "No stream is valid for copying positions with given atom locality.");
310
311     copyToDevice(d_x_, h_x, d_xSize_, atomLocality, commandStream);
312
313     // markEvent is skipped in OpenCL as:
314     //   - it's not needed, copy is done in the same stream as the only consumer task (PME)
315     //   - we don't consume the events in OpenCL which is not allowed by GpuEventSynchronizer (would leak memory).
316     // TODO: remove this by adding an event-mark free flavor of this function
317     if (GMX_GPU == GMX_GPU_CUDA)
318     {
319         xReadyOnDevice_[atomLocality].markEvent(commandStream);
320     }
321 }
322
323 GpuEventSynchronizer*
324 StatePropagatorDataGpu::Impl::getCoordinatesReadyOnDeviceEvent(AtomLocality atomLocality,
325                                                                const SimulationWorkload& simulationWork,
326                                                                const StepWorkload&       stepWork)
327 {
328     // The provider of the coordinates may be different for local atoms. If the update is offloaded
329     // and this is not a neighbor search step, then the consumer needs to wait for the update
330     // to complete. Otherwise, the coordinates are copied from the host and we need to wait for
331     // the copy event. Non-local coordinates are always provided by the H2D copy.
332     //
333     // TODO: This should be reconsidered to support the halo exchange.
334     //
335     // In OpenCL no events are used as coordinate sync is not necessary
336     if (GMX_GPU == GMX_GPU_OPENCL)
337     {
338         return nullptr;
339     }
340     if (atomLocality == AtomLocality::Local && simulationWork.useGpuUpdate && !stepWork.doNeighborSearch)
341     {
342         return &xUpdatedOnDevice_;
343     }
344     else
345     {
346         return &xReadyOnDevice_[atomLocality];
347     }
348 }
349
350 void StatePropagatorDataGpu::Impl::waitCoordinatesCopiedToDevice(AtomLocality atomLocality)
351 {
352     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
353     xReadyOnDevice_[atomLocality].waitForEvent();
354 }
355
356 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::xUpdatedOnDevice()
357 {
358     return &xUpdatedOnDevice_;
359 }
360
361 void StatePropagatorDataGpu::Impl::copyCoordinatesFromGpu(gmx::ArrayRef<gmx::RVec> h_x, AtomLocality atomLocality)
362 {
363     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
364     CommandStream commandStream = xCopyStreams_[atomLocality];
365     GMX_ASSERT(commandStream != nullptr,
366                "No stream is valid for copying positions with given atom locality.");
367
368     copyFromDevice(h_x, d_x_, d_xSize_, atomLocality, commandStream);
369     // Note: unlike copyCoordinatesToGpu this is not used in OpenCL, and the conditional is not needed.
370     xReadyOnHost_[atomLocality].markEvent(commandStream);
371 }
372
373 void StatePropagatorDataGpu::Impl::waitCoordinatesReadyOnHost(AtomLocality atomLocality)
374 {
375     xReadyOnHost_[atomLocality].waitForEvent();
376 }
377
378
379 DeviceBuffer<float> StatePropagatorDataGpu::Impl::getVelocities()
380 {
381     return d_v_;
382 }
383
384 void StatePropagatorDataGpu::Impl::copyVelocitiesToGpu(const gmx::ArrayRef<const gmx::RVec> h_v,
385                                                        AtomLocality atomLocality)
386 {
387     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
388     CommandStream commandStream = vCopyStreams_[atomLocality];
389     GMX_ASSERT(commandStream != nullptr,
390                "No stream is valid for copying velocities with given atom locality.");
391
392     copyToDevice(d_v_, h_v, d_vSize_, atomLocality, commandStream);
393     vReadyOnDevice_[atomLocality].markEvent(commandStream);
394 }
395
396 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::getVelocitiesReadyOnDeviceEvent(AtomLocality atomLocality)
397 {
398     return &vReadyOnDevice_[atomLocality];
399 }
400
401
402 void StatePropagatorDataGpu::Impl::copyVelocitiesFromGpu(gmx::ArrayRef<gmx::RVec> h_v, AtomLocality atomLocality)
403 {
404     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
405     CommandStream commandStream = vCopyStreams_[atomLocality];
406     GMX_ASSERT(commandStream != nullptr,
407                "No stream is valid for copying velocities with given atom locality.");
408
409     copyFromDevice(h_v, d_v_, d_vSize_, atomLocality, commandStream);
410     vReadyOnHost_[atomLocality].markEvent(commandStream);
411 }
412
413 void StatePropagatorDataGpu::Impl::waitVelocitiesReadyOnHost(AtomLocality atomLocality)
414 {
415     vReadyOnHost_[atomLocality].waitForEvent();
416 }
417
418
419 DeviceBuffer<float> StatePropagatorDataGpu::Impl::getForces()
420 {
421     return d_f_;
422 }
423
424 void StatePropagatorDataGpu::Impl::copyForcesToGpu(const gmx::ArrayRef<const gmx::RVec> h_f,
425                                                    AtomLocality atomLocality)
426 {
427     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
428     CommandStream commandStream = fCopyStreams_[atomLocality];
429     GMX_ASSERT(commandStream != nullptr,
430                "No stream is valid for copying forces with given atom locality.");
431
432     copyToDevice(d_f_, h_f, d_fSize_, atomLocality, commandStream);
433     fReadyOnDevice_[atomLocality].markEvent(commandStream);
434 }
435
436 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::getForcesReadyOnDeviceEvent(AtomLocality atomLocality,
437                                                                                 bool useGpuFBufferOps)
438 {
439     if ((atomLocality == AtomLocality::Local || atomLocality == AtomLocality::NonLocal) && useGpuFBufferOps)
440     {
441         return &fReducedOnDevice_;
442     }
443     else
444     {
445         return &fReadyOnDevice_[atomLocality];
446     }
447 }
448
449 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::fReducedOnDevice()
450 {
451     return &fReducedOnDevice_;
452 }
453
454 void StatePropagatorDataGpu::Impl::copyForcesFromGpu(gmx::ArrayRef<gmx::RVec> h_f, AtomLocality atomLocality)
455 {
456     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
457     CommandStream commandStream = fCopyStreams_[atomLocality];
458     GMX_ASSERT(commandStream != nullptr,
459                "No stream is valid for copying forces with given atom locality.");
460
461     copyFromDevice(h_f, d_f_, d_fSize_, atomLocality, commandStream);
462     fReadyOnHost_[atomLocality].markEvent(commandStream);
463 }
464
465 void StatePropagatorDataGpu::Impl::waitForcesReadyOnHost(AtomLocality atomLocality)
466 {
467     fReadyOnHost_[atomLocality].waitForEvent();
468 }
469
470 void* StatePropagatorDataGpu::Impl::getUpdateStream()
471 {
472     return &updateStream_;
473 }
474
475 int StatePropagatorDataGpu::Impl::numAtomsLocal()
476 {
477     return numAtomsLocal_;
478 }
479
480 int StatePropagatorDataGpu::Impl::numAtomsAll()
481 {
482     return numAtomsAll_;
483 }
484
485
486 StatePropagatorDataGpu::StatePropagatorDataGpu(const void*        pmeStream,
487                                                const void*        localStream,
488                                                const void*        nonLocalStream,
489                                                const void*        deviceContext,
490                                                GpuApiCallBehavior transferKind,
491                                                int                paddingSize) :
492     impl_(new Impl(pmeStream, localStream, nonLocalStream, deviceContext, transferKind, paddingSize))
493 {
494 }
495
496 StatePropagatorDataGpu::StatePropagatorDataGpu(const void*        pmeStream,
497                                                const void*        deviceContext,
498                                                GpuApiCallBehavior transferKind,
499                                                int                paddingSize) :
500     impl_(new Impl(pmeStream, deviceContext, transferKind, paddingSize))
501 {
502 }
503
504 StatePropagatorDataGpu::StatePropagatorDataGpu(StatePropagatorDataGpu&& /* other */) noexcept = default;
505
506 StatePropagatorDataGpu& StatePropagatorDataGpu::operator=(StatePropagatorDataGpu&& /* other */) noexcept = default;
507
508 StatePropagatorDataGpu::~StatePropagatorDataGpu() = default;
509
510
511 void StatePropagatorDataGpu::reinit(int numAtomsLocal, int numAtomsAll)
512 {
513     return impl_->reinit(numAtomsLocal, numAtomsAll);
514 }
515
516 std::tuple<int, int> StatePropagatorDataGpu::getAtomRangesFromAtomLocality(AtomLocality atomLocality)
517 {
518     return impl_->getAtomRangesFromAtomLocality(atomLocality);
519 }
520
521
522 DeviceBuffer<float> StatePropagatorDataGpu::getCoordinates()
523 {
524     return impl_->getCoordinates();
525 }
526
527 void StatePropagatorDataGpu::copyCoordinatesToGpu(const gmx::ArrayRef<const gmx::RVec> h_x,
528                                                   AtomLocality                         atomLocality)
529 {
530     return impl_->copyCoordinatesToGpu(h_x, atomLocality);
531 }
532
533 GpuEventSynchronizer*
534 StatePropagatorDataGpu::getCoordinatesReadyOnDeviceEvent(AtomLocality              atomLocality,
535                                                          const SimulationWorkload& simulationWork,
536                                                          const StepWorkload&       stepWork)
537 {
538     return impl_->getCoordinatesReadyOnDeviceEvent(atomLocality, simulationWork, stepWork);
539 }
540
541 void StatePropagatorDataGpu::waitCoordinatesCopiedToDevice(AtomLocality atomLocality)
542 {
543     return impl_->waitCoordinatesCopiedToDevice(atomLocality);
544 }
545
546 GpuEventSynchronizer* StatePropagatorDataGpu::xUpdatedOnDevice()
547 {
548     return impl_->xUpdatedOnDevice();
549 }
550
551 void StatePropagatorDataGpu::copyCoordinatesFromGpu(gmx::ArrayRef<RVec> h_x, AtomLocality atomLocality)
552 {
553     return impl_->copyCoordinatesFromGpu(h_x, atomLocality);
554 }
555
556 void StatePropagatorDataGpu::waitCoordinatesReadyOnHost(AtomLocality atomLocality)
557 {
558     return impl_->waitCoordinatesReadyOnHost(atomLocality);
559 }
560
561
562 DeviceBuffer<float> StatePropagatorDataGpu::getVelocities()
563 {
564     return impl_->getVelocities();
565 }
566
567 void StatePropagatorDataGpu::copyVelocitiesToGpu(const gmx::ArrayRef<const gmx::RVec> h_v,
568                                                  AtomLocality                         atomLocality)
569 {
570     return impl_->copyVelocitiesToGpu(h_v, atomLocality);
571 }
572
573 GpuEventSynchronizer* StatePropagatorDataGpu::getVelocitiesReadyOnDeviceEvent(AtomLocality atomLocality)
574 {
575     return impl_->getVelocitiesReadyOnDeviceEvent(atomLocality);
576 }
577
578 void StatePropagatorDataGpu::copyVelocitiesFromGpu(gmx::ArrayRef<RVec> h_v, AtomLocality atomLocality)
579 {
580     return impl_->copyVelocitiesFromGpu(h_v, atomLocality);
581 }
582
583 void StatePropagatorDataGpu::waitVelocitiesReadyOnHost(AtomLocality atomLocality)
584 {
585     return impl_->waitVelocitiesReadyOnHost(atomLocality);
586 }
587
588
589 DeviceBuffer<float> StatePropagatorDataGpu::getForces()
590 {
591     return impl_->getForces();
592 }
593
594 void StatePropagatorDataGpu::copyForcesToGpu(const gmx::ArrayRef<const gmx::RVec> h_f, AtomLocality atomLocality)
595 {
596     return impl_->copyForcesToGpu(h_f, atomLocality);
597 }
598
599 GpuEventSynchronizer* StatePropagatorDataGpu::getForcesReadyOnDeviceEvent(AtomLocality atomLocality,
600                                                                           bool useGpuFBufferOps)
601 {
602     return impl_->getForcesReadyOnDeviceEvent(atomLocality, useGpuFBufferOps);
603 }
604
605 GpuEventSynchronizer* StatePropagatorDataGpu::fReducedOnDevice()
606 {
607     return impl_->fReducedOnDevice();
608 }
609
610 void StatePropagatorDataGpu::copyForcesFromGpu(gmx::ArrayRef<RVec> h_f, AtomLocality atomLocality)
611 {
612     return impl_->copyForcesFromGpu(h_f, atomLocality);
613 }
614
615 void StatePropagatorDataGpu::waitForcesReadyOnHost(AtomLocality atomLocality)
616 {
617     return impl_->waitForcesReadyOnHost(atomLocality);
618 }
619
620
621 void* StatePropagatorDataGpu::getUpdateStream()
622 {
623     return impl_->getUpdateStream();
624 }
625
626 int StatePropagatorDataGpu::numAtomsLocal()
627 {
628     return impl_->numAtomsLocal();
629 }
630
631 int StatePropagatorDataGpu::numAtomsAll()
632 {
633     return impl_->numAtomsAll();
634 }
635
636 } // namespace gmx
637
638 #endif // GMX_GPU == GMX_GPU_NONE