Merge branch release-2019 into release-2020
[alexxy/gromacs.git] / src / gromacs / mdtypes / state_propagator_data_gpu_impl_gpu.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *
37  * \brief Definitions of interfaces for GPU state data propagator object.
38  *
39  * \author Artem Zhmurov <zhmurov@gmail.com>
40  *
41  * \ingroup module_mdtypes
42  */
43 #include "gmxpre.h"
44
45 #include "config.h"
46
47 #if GMX_GPU != GMX_GPU_NONE
48
49 #    if GMX_GPU == GMX_GPU_CUDA
50 #        include "gromacs/gpu_utils/cudautils.cuh"
51 #    endif
52 #    include "gromacs/gpu_utils/devicebuffer.h"
53 #    if GMX_GPU == GMX_GPU_OPENCL
54 #        include "gromacs/gpu_utils/oclutils.h"
55 #    endif
56 #    include "gromacs/math/vectypes.h"
57 #    include "gromacs/mdtypes/state_propagator_data_gpu.h"
58 #    include "gromacs/timing/wallcycle.h"
59 #    include "gromacs/utility/classhelpers.h"
60
61 #    include "state_propagator_data_gpu_impl.h"
62
63
64 namespace gmx
65 {
66
67 StatePropagatorDataGpu::Impl::Impl(const void*        pmeStream,
68                                    const void*        localStream,
69                                    const void*        nonLocalStream,
70                                    const void*        deviceContext,
71                                    GpuApiCallBehavior transferKind,
72                                    int                paddingSize,
73                                    gmx_wallcycle*     wcycle) :
74     transferKind_(transferKind),
75     paddingSize_(paddingSize),
76     wcycle_(wcycle)
77 {
78     static_assert(GMX_GPU != GMX_GPU_NONE,
79                   "This object should only be constructed on the GPU code-paths.");
80
81     // TODO: Refactor when the StreamManager is introduced.
82     if (GMX_GPU == GMX_GPU_OPENCL)
83     {
84         GMX_ASSERT(deviceContext != nullptr, "GPU context should be set in OpenCL builds.");
85         GMX_ASSERT(pmeStream != nullptr, "GPU PME stream should be set in OpenCL builds.");
86
87         // The update stream is set to the PME stream in OpenCL, since PME stream is the only stream created in the PME context.
88         pmeStream_     = *static_cast<const CommandStream*>(pmeStream);
89         updateStream_  = *static_cast<const CommandStream*>(pmeStream);
90         deviceContext_ = *static_cast<const DeviceContext*>(deviceContext);
91         GMX_UNUSED_VALUE(localStream);
92         GMX_UNUSED_VALUE(nonLocalStream);
93     }
94
95     if (GMX_GPU == GMX_GPU_CUDA)
96     {
97         if (pmeStream != nullptr)
98         {
99             pmeStream_ = *static_cast<const CommandStream*>(pmeStream);
100         }
101         if (localStream != nullptr)
102         {
103             localStream_ = *static_cast<const CommandStream*>(localStream);
104         }
105         if (nonLocalStream != nullptr)
106         {
107             nonLocalStream_ = *static_cast<const CommandStream*>(nonLocalStream);
108         }
109
110         // TODO: The update stream should be created only when it is needed.
111 #    if (GMX_GPU == GMX_GPU_CUDA)
112         cudaError_t stat;
113         stat = cudaStreamCreate(&updateStream_);
114         CU_RET_ERR(stat, "CUDA stream creation failed in StatePropagatorDataGpu");
115 #    endif
116         GMX_UNUSED_VALUE(deviceContext);
117     }
118
119     // Map the atom locality to the stream that will be used for coordinates,
120     // velocities and forces transfers. Same streams are used for H2D and D2H copies.
121     // Note, that nullptr stream is used here to indicate that the copy is not supported.
122     xCopyStreams_[AtomLocality::Local]    = updateStream_;
123     xCopyStreams_[AtomLocality::NonLocal] = nonLocalStream_;
124     xCopyStreams_[AtomLocality::All]      = updateStream_;
125
126     vCopyStreams_[AtomLocality::Local]    = updateStream_;
127     vCopyStreams_[AtomLocality::NonLocal] = nullptr;
128     vCopyStreams_[AtomLocality::All]      = updateStream_;
129
130     fCopyStreams_[AtomLocality::Local]    = localStream_;
131     fCopyStreams_[AtomLocality::NonLocal] = nonLocalStream_;
132     fCopyStreams_[AtomLocality::All]      = updateStream_;
133 }
134
135 StatePropagatorDataGpu::Impl::Impl(const void*        pmeStream,
136                                    const void*        deviceContext,
137                                    GpuApiCallBehavior transferKind,
138                                    int                paddingSize,
139                                    gmx_wallcycle*     wcycle) :
140     transferKind_(transferKind),
141     paddingSize_(paddingSize),
142     wcycle_(wcycle)
143 {
144     static_assert(GMX_GPU != GMX_GPU_NONE,
145                   "This object should only be constructed on the GPU code-paths.");
146
147     if (GMX_GPU == GMX_GPU_OPENCL)
148     {
149         GMX_ASSERT(deviceContext != nullptr, "GPU context should be set in OpenCL builds.");
150         deviceContext_ = *static_cast<const DeviceContext*>(deviceContext);
151     }
152
153     GMX_ASSERT(pmeStream != nullptr, "GPU PME stream should be set.");
154     pmeStream_ = *static_cast<const CommandStream*>(pmeStream);
155
156     localStream_    = nullptr;
157     nonLocalStream_ = nullptr;
158     updateStream_   = nullptr;
159
160
161     // Only local/all coordinates are allowed to be copied in PME-only rank/ PME tests.
162     // This it temporary measure to make it safe to use this class in those cases.
163     xCopyStreams_[AtomLocality::Local]    = pmeStream_;
164     xCopyStreams_[AtomLocality::NonLocal] = nullptr;
165     xCopyStreams_[AtomLocality::All]      = pmeStream_;
166
167     vCopyStreams_[AtomLocality::Local]    = nullptr;
168     vCopyStreams_[AtomLocality::NonLocal] = nullptr;
169     vCopyStreams_[AtomLocality::All]      = nullptr;
170
171     fCopyStreams_[AtomLocality::Local]    = nullptr;
172     fCopyStreams_[AtomLocality::NonLocal] = nullptr;
173     fCopyStreams_[AtomLocality::All]      = nullptr;
174 }
175
176 StatePropagatorDataGpu::Impl::~Impl() {}
177
178 void StatePropagatorDataGpu::Impl::reinit(int numAtomsLocal, int numAtomsAll)
179 {
180     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
181     wallcycle_sub_start_nocount(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
182
183     numAtomsLocal_ = numAtomsLocal;
184     numAtomsAll_   = numAtomsAll;
185
186     int numAtomsPadded;
187     if (paddingSize_ > 0)
188     {
189         numAtomsPadded = ((numAtomsAll_ + paddingSize_ - 1) / paddingSize_) * paddingSize_;
190     }
191     else
192     {
193         numAtomsPadded = numAtomsAll_;
194     }
195
196     reallocateDeviceBuffer(&d_x_, DIM * numAtomsPadded, &d_xSize_, &d_xCapacity_, deviceContext_);
197
198     const size_t paddingAllocationSize = numAtomsPadded - numAtomsAll_;
199     if (paddingAllocationSize > 0)
200     {
201         // The PME stream is used here because the padding region of d_x_ is only in the PME task.
202         clearDeviceBufferAsync(&d_x_, DIM * numAtomsAll_, DIM * paddingAllocationSize, pmeStream_);
203     }
204
205     reallocateDeviceBuffer(&d_v_, DIM * numAtomsAll_, &d_vSize_, &d_vCapacity_, deviceContext_);
206     const int d_fOldCapacity = d_fCapacity_;
207     reallocateDeviceBuffer(&d_f_, DIM * numAtomsAll_, &d_fSize_, &d_fCapacity_, deviceContext_);
208     // Clearing of the forces can be done in local stream since the nonlocal stream cannot reach
209     // the force accumulation stage before syncing with the local stream. Only done in CUDA,
210     // since the force buffer ops are not implemented in OpenCL.
211     if (GMX_GPU == GMX_GPU_CUDA && d_fCapacity_ != d_fOldCapacity)
212     {
213         clearDeviceBufferAsync(&d_f_, 0, d_fCapacity_, localStream_);
214     }
215
216     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
217     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
218 }
219
220 std::tuple<int, int> StatePropagatorDataGpu::Impl::getAtomRangesFromAtomLocality(AtomLocality atomLocality)
221 {
222     int atomsStartAt   = 0;
223     int numAtomsToCopy = 0;
224     switch (atomLocality)
225     {
226         case AtomLocality::All:
227             atomsStartAt   = 0;
228             numAtomsToCopy = numAtomsAll_;
229             break;
230         case AtomLocality::Local:
231             atomsStartAt   = 0;
232             numAtomsToCopy = numAtomsLocal_;
233             break;
234         case AtomLocality::NonLocal:
235             atomsStartAt   = numAtomsLocal_;
236             numAtomsToCopy = numAtomsAll_ - numAtomsLocal_;
237             break;
238         default:
239             GMX_RELEASE_ASSERT(false,
240                                "Wrong range of atoms requested in GPU state data manager. Should "
241                                "be All, Local or NonLocal.");
242     }
243     GMX_ASSERT(atomsStartAt >= 0,
244                "The first elemtnt to copy has negative index. Probably, the GPU propagator state "
245                "was not initialized.");
246     GMX_ASSERT(numAtomsToCopy >= 0,
247                "Number of atoms to copy is negative. Probably, the GPU propagator state was not "
248                "initialized.");
249     return std::make_tuple(atomsStartAt, numAtomsToCopy);
250 }
251
252 void StatePropagatorDataGpu::Impl::copyToDevice(DeviceBuffer<float>                  d_data,
253                                                 const gmx::ArrayRef<const gmx::RVec> h_data,
254                                                 int                                  dataSize,
255                                                 AtomLocality                         atomLocality,
256                                                 CommandStream                        commandStream)
257 {
258     GMX_UNUSED_VALUE(dataSize);
259
260     GMX_ASSERT(dataSize >= 0, "Trying to copy to device buffer before it was allocated.");
261
262     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
263     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
264
265     int atomsStartAt, numAtomsToCopy;
266     std::tie(atomsStartAt, numAtomsToCopy) = getAtomRangesFromAtomLocality(atomLocality);
267
268     int elementsStartAt   = atomsStartAt * DIM;
269     int numElementsToCopy = numAtomsToCopy * DIM;
270
271     if (numAtomsToCopy != 0)
272     {
273         GMX_ASSERT(elementsStartAt + numElementsToCopy <= dataSize,
274                    "The device allocation is smaller than requested copy range.");
275         GMX_ASSERT(atomsStartAt + numAtomsToCopy <= h_data.ssize(),
276                    "The host buffer is smaller than the requested copy range.");
277
278         copyToDeviceBuffer(&d_data, reinterpret_cast<const float*>(&h_data.data()[atomsStartAt]),
279                            elementsStartAt, numElementsToCopy, commandStream, transferKind_, nullptr);
280     }
281
282     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
283     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
284 }
285
286 void StatePropagatorDataGpu::Impl::copyFromDevice(gmx::ArrayRef<gmx::RVec> h_data,
287                                                   DeviceBuffer<float>      d_data,
288                                                   int                      dataSize,
289                                                   AtomLocality             atomLocality,
290                                                   CommandStream            commandStream)
291 {
292     GMX_UNUSED_VALUE(dataSize);
293
294     GMX_ASSERT(dataSize >= 0, "Trying to copy from device buffer before it was allocated.");
295
296     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
297     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
298
299     int atomsStartAt, numAtomsToCopy;
300     std::tie(atomsStartAt, numAtomsToCopy) = getAtomRangesFromAtomLocality(atomLocality);
301
302     int elementsStartAt   = atomsStartAt * DIM;
303     int numElementsToCopy = numAtomsToCopy * DIM;
304
305     if (numAtomsToCopy != 0)
306     {
307         GMX_ASSERT(elementsStartAt + numElementsToCopy <= dataSize,
308                    "The device allocation is smaller than requested copy range.");
309         GMX_ASSERT(atomsStartAt + numAtomsToCopy <= h_data.ssize(),
310                    "The host buffer is smaller than the requested copy range.");
311
312         copyFromDeviceBuffer(reinterpret_cast<float*>(&h_data.data()[atomsStartAt]), &d_data,
313                              elementsStartAt, numElementsToCopy, commandStream, transferKind_, nullptr);
314     }
315
316     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
317     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
318 }
319
320 DeviceBuffer<float> StatePropagatorDataGpu::Impl::getCoordinates()
321 {
322     return d_x_;
323 }
324
325 void StatePropagatorDataGpu::Impl::copyCoordinatesToGpu(const gmx::ArrayRef<const gmx::RVec> h_x,
326                                                         AtomLocality atomLocality)
327 {
328     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
329     CommandStream commandStream = xCopyStreams_[atomLocality];
330     GMX_ASSERT(commandStream != nullptr,
331                "No stream is valid for copying positions with given atom locality.");
332
333     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
334     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
335
336     copyToDevice(d_x_, h_x, d_xSize_, atomLocality, commandStream);
337
338     // markEvent is skipped in OpenCL as:
339     //   - it's not needed, copy is done in the same stream as the only consumer task (PME)
340     //   - we don't consume the events in OpenCL which is not allowed by GpuEventSynchronizer (would leak memory).
341     // TODO: remove this by adding an event-mark free flavor of this function
342     if (GMX_GPU == GMX_GPU_CUDA)
343     {
344         xReadyOnDevice_[atomLocality].markEvent(commandStream);
345     }
346
347     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
348     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
349 }
350
351 GpuEventSynchronizer*
352 StatePropagatorDataGpu::Impl::getCoordinatesReadyOnDeviceEvent(AtomLocality atomLocality,
353                                                                const SimulationWorkload& simulationWork,
354                                                                const StepWorkload&       stepWork)
355 {
356     // The provider of the coordinates may be different for local atoms. If the update is offloaded
357     // and this is not a neighbor search step, then the consumer needs to wait for the update
358     // to complete. Otherwise, the coordinates are copied from the host and we need to wait for
359     // the copy event. Non-local coordinates are always provided by the H2D copy.
360     //
361     // TODO: This should be reconsidered to support the halo exchange.
362     //
363     // In OpenCL no events are used as coordinate sync is not necessary
364     if (GMX_GPU == GMX_GPU_OPENCL)
365     {
366         return nullptr;
367     }
368     if (atomLocality == AtomLocality::Local && simulationWork.useGpuUpdate && !stepWork.doNeighborSearch)
369     {
370         return &xUpdatedOnDevice_;
371     }
372     else
373     {
374         return &xReadyOnDevice_[atomLocality];
375     }
376 }
377
378 void StatePropagatorDataGpu::Impl::waitCoordinatesCopiedToDevice(AtomLocality atomLocality)
379 {
380     wallcycle_start(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
381     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
382     xReadyOnDevice_[atomLocality].waitForEvent();
383     wallcycle_stop(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
384 }
385
386 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::xUpdatedOnDevice()
387 {
388     return &xUpdatedOnDevice_;
389 }
390
391 void StatePropagatorDataGpu::Impl::copyCoordinatesFromGpu(gmx::ArrayRef<gmx::RVec> h_x, AtomLocality atomLocality)
392 {
393     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
394     CommandStream commandStream = xCopyStreams_[atomLocality];
395     GMX_ASSERT(commandStream != nullptr,
396                "No stream is valid for copying positions with given atom locality.");
397
398     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
399     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
400
401     copyFromDevice(h_x, d_x_, d_xSize_, atomLocality, commandStream);
402     // Note: unlike copyCoordinatesToGpu this is not used in OpenCL, and the conditional is not needed.
403     xReadyOnHost_[atomLocality].markEvent(commandStream);
404
405     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
406     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
407 }
408
409 void StatePropagatorDataGpu::Impl::waitCoordinatesReadyOnHost(AtomLocality atomLocality)
410 {
411     wallcycle_start(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
412     xReadyOnHost_[atomLocality].waitForEvent();
413     wallcycle_stop(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
414 }
415
416
417 DeviceBuffer<float> StatePropagatorDataGpu::Impl::getVelocities()
418 {
419     return d_v_;
420 }
421
422 void StatePropagatorDataGpu::Impl::copyVelocitiesToGpu(const gmx::ArrayRef<const gmx::RVec> h_v,
423                                                        AtomLocality atomLocality)
424 {
425     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
426     CommandStream commandStream = vCopyStreams_[atomLocality];
427     GMX_ASSERT(commandStream != nullptr,
428                "No stream is valid for copying velocities with given atom locality.");
429
430     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
431     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
432
433     copyToDevice(d_v_, h_v, d_vSize_, atomLocality, commandStream);
434     vReadyOnDevice_[atomLocality].markEvent(commandStream);
435
436     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
437     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
438 }
439
440 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::getVelocitiesReadyOnDeviceEvent(AtomLocality atomLocality)
441 {
442     return &vReadyOnDevice_[atomLocality];
443 }
444
445
446 void StatePropagatorDataGpu::Impl::copyVelocitiesFromGpu(gmx::ArrayRef<gmx::RVec> h_v, AtomLocality atomLocality)
447 {
448     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
449     CommandStream commandStream = vCopyStreams_[atomLocality];
450     GMX_ASSERT(commandStream != nullptr,
451                "No stream is valid for copying velocities with given atom locality.");
452
453     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
454     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
455
456     copyFromDevice(h_v, d_v_, d_vSize_, atomLocality, commandStream);
457     vReadyOnHost_[atomLocality].markEvent(commandStream);
458
459     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
460     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
461 }
462
463 void StatePropagatorDataGpu::Impl::waitVelocitiesReadyOnHost(AtomLocality atomLocality)
464 {
465     wallcycle_start(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
466     vReadyOnHost_[atomLocality].waitForEvent();
467     wallcycle_stop(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
468 }
469
470
471 DeviceBuffer<float> StatePropagatorDataGpu::Impl::getForces()
472 {
473     return d_f_;
474 }
475
476 void StatePropagatorDataGpu::Impl::copyForcesToGpu(const gmx::ArrayRef<const gmx::RVec> h_f,
477                                                    AtomLocality atomLocality)
478 {
479     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
480     CommandStream commandStream = fCopyStreams_[atomLocality];
481     GMX_ASSERT(commandStream != nullptr,
482                "No stream is valid for copying forces with given atom locality.");
483
484     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
485     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
486
487     copyToDevice(d_f_, h_f, d_fSize_, atomLocality, commandStream);
488     fReadyOnDevice_[atomLocality].markEvent(commandStream);
489
490     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
491     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
492 }
493
494 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::getForcesReadyOnDeviceEvent(AtomLocality atomLocality,
495                                                                                 bool useGpuFBufferOps)
496 {
497     if ((atomLocality == AtomLocality::Local || atomLocality == AtomLocality::NonLocal) && useGpuFBufferOps)
498     {
499         return &fReducedOnDevice_;
500     }
501     else
502     {
503         return &fReadyOnDevice_[atomLocality];
504     }
505 }
506
507 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::fReducedOnDevice()
508 {
509     return &fReducedOnDevice_;
510 }
511
512 void StatePropagatorDataGpu::Impl::copyForcesFromGpu(gmx::ArrayRef<gmx::RVec> h_f, AtomLocality atomLocality)
513 {
514     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
515     CommandStream commandStream = fCopyStreams_[atomLocality];
516     GMX_ASSERT(commandStream != nullptr,
517                "No stream is valid for copying forces with given atom locality.");
518
519     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
520     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
521
522     copyFromDevice(h_f, d_f_, d_fSize_, atomLocality, commandStream);
523     fReadyOnHost_[atomLocality].markEvent(commandStream);
524
525     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
526     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
527 }
528
529 void StatePropagatorDataGpu::Impl::waitForcesReadyOnHost(AtomLocality atomLocality)
530 {
531     wallcycle_start(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
532     fReadyOnHost_[atomLocality].waitForEvent();
533     wallcycle_stop(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
534 }
535
536 void* StatePropagatorDataGpu::Impl::getUpdateStream()
537 {
538     return &updateStream_;
539 }
540
541 int StatePropagatorDataGpu::Impl::numAtomsLocal()
542 {
543     return numAtomsLocal_;
544 }
545
546 int StatePropagatorDataGpu::Impl::numAtomsAll()
547 {
548     return numAtomsAll_;
549 }
550
551
552 StatePropagatorDataGpu::StatePropagatorDataGpu(const void*        pmeStream,
553                                                const void*        localStream,
554                                                const void*        nonLocalStream,
555                                                const void*        deviceContext,
556                                                GpuApiCallBehavior transferKind,
557                                                int                paddingSize,
558                                                gmx_wallcycle*     wcycle) :
559     impl_(new Impl(pmeStream, localStream, nonLocalStream, deviceContext, transferKind, paddingSize, wcycle))
560 {
561 }
562
563 StatePropagatorDataGpu::StatePropagatorDataGpu(const void*        pmeStream,
564                                                const void*        deviceContext,
565                                                GpuApiCallBehavior transferKind,
566                                                int                paddingSize,
567                                                gmx_wallcycle*     wcycle) :
568     impl_(new Impl(pmeStream, deviceContext, transferKind, paddingSize, wcycle))
569 {
570 }
571
572 StatePropagatorDataGpu::StatePropagatorDataGpu(StatePropagatorDataGpu&& /* other */) noexcept = default;
573
574 StatePropagatorDataGpu& StatePropagatorDataGpu::operator=(StatePropagatorDataGpu&& /* other */) noexcept = default;
575
576 StatePropagatorDataGpu::~StatePropagatorDataGpu() = default;
577
578
579 void StatePropagatorDataGpu::reinit(int numAtomsLocal, int numAtomsAll)
580 {
581     return impl_->reinit(numAtomsLocal, numAtomsAll);
582 }
583
584 std::tuple<int, int> StatePropagatorDataGpu::getAtomRangesFromAtomLocality(AtomLocality atomLocality)
585 {
586     return impl_->getAtomRangesFromAtomLocality(atomLocality);
587 }
588
589
590 DeviceBuffer<float> StatePropagatorDataGpu::getCoordinates()
591 {
592     return impl_->getCoordinates();
593 }
594
595 void StatePropagatorDataGpu::copyCoordinatesToGpu(const gmx::ArrayRef<const gmx::RVec> h_x,
596                                                   AtomLocality                         atomLocality)
597 {
598     return impl_->copyCoordinatesToGpu(h_x, atomLocality);
599 }
600
601 GpuEventSynchronizer*
602 StatePropagatorDataGpu::getCoordinatesReadyOnDeviceEvent(AtomLocality              atomLocality,
603                                                          const SimulationWorkload& simulationWork,
604                                                          const StepWorkload&       stepWork)
605 {
606     return impl_->getCoordinatesReadyOnDeviceEvent(atomLocality, simulationWork, stepWork);
607 }
608
609 void StatePropagatorDataGpu::waitCoordinatesCopiedToDevice(AtomLocality atomLocality)
610 {
611     return impl_->waitCoordinatesCopiedToDevice(atomLocality);
612 }
613
614 GpuEventSynchronizer* StatePropagatorDataGpu::xUpdatedOnDevice()
615 {
616     return impl_->xUpdatedOnDevice();
617 }
618
619 void StatePropagatorDataGpu::copyCoordinatesFromGpu(gmx::ArrayRef<RVec> h_x, AtomLocality atomLocality)
620 {
621     return impl_->copyCoordinatesFromGpu(h_x, atomLocality);
622 }
623
624 void StatePropagatorDataGpu::waitCoordinatesReadyOnHost(AtomLocality atomLocality)
625 {
626     return impl_->waitCoordinatesReadyOnHost(atomLocality);
627 }
628
629
630 DeviceBuffer<float> StatePropagatorDataGpu::getVelocities()
631 {
632     return impl_->getVelocities();
633 }
634
635 void StatePropagatorDataGpu::copyVelocitiesToGpu(const gmx::ArrayRef<const gmx::RVec> h_v,
636                                                  AtomLocality                         atomLocality)
637 {
638     return impl_->copyVelocitiesToGpu(h_v, atomLocality);
639 }
640
641 GpuEventSynchronizer* StatePropagatorDataGpu::getVelocitiesReadyOnDeviceEvent(AtomLocality atomLocality)
642 {
643     return impl_->getVelocitiesReadyOnDeviceEvent(atomLocality);
644 }
645
646 void StatePropagatorDataGpu::copyVelocitiesFromGpu(gmx::ArrayRef<RVec> h_v, AtomLocality atomLocality)
647 {
648     return impl_->copyVelocitiesFromGpu(h_v, atomLocality);
649 }
650
651 void StatePropagatorDataGpu::waitVelocitiesReadyOnHost(AtomLocality atomLocality)
652 {
653     return impl_->waitVelocitiesReadyOnHost(atomLocality);
654 }
655
656
657 DeviceBuffer<float> StatePropagatorDataGpu::getForces()
658 {
659     return impl_->getForces();
660 }
661
662 void StatePropagatorDataGpu::copyForcesToGpu(const gmx::ArrayRef<const gmx::RVec> h_f, AtomLocality atomLocality)
663 {
664     return impl_->copyForcesToGpu(h_f, atomLocality);
665 }
666
667 GpuEventSynchronizer* StatePropagatorDataGpu::getForcesReadyOnDeviceEvent(AtomLocality atomLocality,
668                                                                           bool useGpuFBufferOps)
669 {
670     return impl_->getForcesReadyOnDeviceEvent(atomLocality, useGpuFBufferOps);
671 }
672
673 GpuEventSynchronizer* StatePropagatorDataGpu::fReducedOnDevice()
674 {
675     return impl_->fReducedOnDevice();
676 }
677
678 void StatePropagatorDataGpu::copyForcesFromGpu(gmx::ArrayRef<RVec> h_f, AtomLocality atomLocality)
679 {
680     return impl_->copyForcesFromGpu(h_f, atomLocality);
681 }
682
683 void StatePropagatorDataGpu::waitForcesReadyOnHost(AtomLocality atomLocality)
684 {
685     return impl_->waitForcesReadyOnHost(atomLocality);
686 }
687
688
689 void* StatePropagatorDataGpu::getUpdateStream()
690 {
691     return impl_->getUpdateStream();
692 }
693
694 int StatePropagatorDataGpu::numAtomsLocal()
695 {
696     return impl_->numAtomsLocal();
697 }
698
699 int StatePropagatorDataGpu::numAtomsAll()
700 {
701     return impl_->numAtomsAll();
702 }
703
704 } // namespace gmx
705
706 #endif // GMX_GPU == GMX_GPU_NONE