Make DeviceContext into a proper class
[alexxy/gromacs.git] / src / gromacs / mdtypes / state_propagator_data_gpu_impl_gpu.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2019,2020, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *
37  * \brief Definitions of interfaces for GPU state data propagator object.
38  *
39  * \author Artem Zhmurov <zhmurov@gmail.com>
40  *
41  * \ingroup module_mdtypes
42  */
43 #include "gmxpre.h"
44
45 #include "config.h"
46
47 #if GMX_GPU != GMX_GPU_NONE
48
49 #    if GMX_GPU == GMX_GPU_CUDA
50 #        include "gromacs/gpu_utils/cudautils.cuh"
51 #    endif
52 #    include "gromacs/gpu_utils/devicebuffer.h"
53 #    include "gromacs/gpu_utils/gputraits.h"
54 #    if GMX_GPU == GMX_GPU_OPENCL
55 #        include "gromacs/gpu_utils/oclutils.h"
56 #    endif
57 #    include "gromacs/math/vectypes.h"
58 #    include "gromacs/mdtypes/state_propagator_data_gpu.h"
59 #    include "gromacs/timing/wallcycle.h"
60 #    include "gromacs/utility/classhelpers.h"
61
62 #    include "state_propagator_data_gpu_impl.h"
63
64
65 namespace gmx
66 {
67
68 StatePropagatorDataGpu::Impl::Impl(const void*          pmeStream,
69                                    const void*          localStream,
70                                    const void*          nonLocalStream,
71                                    const DeviceContext& deviceContext,
72                                    GpuApiCallBehavior   transferKind,
73                                    int                  paddingSize,
74                                    gmx_wallcycle*       wcycle) :
75     deviceContext_(deviceContext),
76     transferKind_(transferKind),
77     paddingSize_(paddingSize),
78     wcycle_(wcycle)
79 {
80     static_assert(GMX_GPU != GMX_GPU_NONE,
81                   "This object should only be constructed on the GPU code-paths.");
82
83     // TODO: Refactor when the StreamManager is introduced.
84     if (GMX_GPU == GMX_GPU_OPENCL)
85     {
86         GMX_ASSERT(pmeStream != nullptr, "GPU PME stream should be set in OpenCL builds.");
87
88         // The update stream is set to the PME stream in OpenCL, since PME stream is the only stream created in the PME context.
89         pmeStream_    = *static_cast<const CommandStream*>(pmeStream);
90         updateStream_ = *static_cast<const CommandStream*>(pmeStream);
91         GMX_UNUSED_VALUE(localStream);
92         GMX_UNUSED_VALUE(nonLocalStream);
93     }
94
95     if (GMX_GPU == GMX_GPU_CUDA)
96     {
97         if (pmeStream != nullptr)
98         {
99             pmeStream_ = *static_cast<const CommandStream*>(pmeStream);
100         }
101         if (localStream != nullptr)
102         {
103             localStream_ = *static_cast<const CommandStream*>(localStream);
104         }
105         if (nonLocalStream != nullptr)
106         {
107             nonLocalStream_ = *static_cast<const CommandStream*>(nonLocalStream);
108         }
109
110         // TODO: The update stream should be created only when it is needed.
111 #    if (GMX_GPU == GMX_GPU_CUDA)
112         cudaError_t stat;
113         stat = cudaStreamCreate(&updateStream_);
114         CU_RET_ERR(stat, "CUDA stream creation failed in StatePropagatorDataGpu");
115 #    endif
116     }
117
118     // Map the atom locality to the stream that will be used for coordinates,
119     // velocities and forces transfers. Same streams are used for H2D and D2H copies.
120     // Note, that nullptr stream is used here to indicate that the copy is not supported.
121     xCopyStreams_[AtomLocality::Local]    = updateStream_;
122     xCopyStreams_[AtomLocality::NonLocal] = nonLocalStream_;
123     xCopyStreams_[AtomLocality::All]      = updateStream_;
124
125     vCopyStreams_[AtomLocality::Local]    = updateStream_;
126     vCopyStreams_[AtomLocality::NonLocal] = nullptr;
127     vCopyStreams_[AtomLocality::All]      = updateStream_;
128
129     fCopyStreams_[AtomLocality::Local]    = localStream_;
130     fCopyStreams_[AtomLocality::NonLocal] = nonLocalStream_;
131     fCopyStreams_[AtomLocality::All]      = updateStream_;
132 }
133
134 StatePropagatorDataGpu::Impl::Impl(const void*          pmeStream,
135                                    const DeviceContext& deviceContext,
136                                    GpuApiCallBehavior   transferKind,
137                                    int                  paddingSize,
138                                    gmx_wallcycle*       wcycle) :
139     deviceContext_(deviceContext),
140     transferKind_(transferKind),
141     paddingSize_(paddingSize),
142     wcycle_(wcycle)
143 {
144     static_assert(GMX_GPU != GMX_GPU_NONE,
145                   "This object should only be constructed on the GPU code-paths.");
146
147     GMX_ASSERT(pmeStream != nullptr, "GPU PME stream should be set.");
148     pmeStream_ = *static_cast<const CommandStream*>(pmeStream);
149
150     localStream_    = nullptr;
151     nonLocalStream_ = nullptr;
152     updateStream_   = nullptr;
153
154
155     // Only local/all coordinates are allowed to be copied in PME-only rank/ PME tests.
156     // This it temporary measure to make it safe to use this class in those cases.
157     xCopyStreams_[AtomLocality::Local]    = pmeStream_;
158     xCopyStreams_[AtomLocality::NonLocal] = nullptr;
159     xCopyStreams_[AtomLocality::All]      = pmeStream_;
160
161     vCopyStreams_[AtomLocality::Local]    = nullptr;
162     vCopyStreams_[AtomLocality::NonLocal] = nullptr;
163     vCopyStreams_[AtomLocality::All]      = nullptr;
164
165     fCopyStreams_[AtomLocality::Local]    = nullptr;
166     fCopyStreams_[AtomLocality::NonLocal] = nullptr;
167     fCopyStreams_[AtomLocality::All]      = nullptr;
168 }
169
170 StatePropagatorDataGpu::Impl::~Impl() {}
171
172 void StatePropagatorDataGpu::Impl::reinit(int numAtomsLocal, int numAtomsAll)
173 {
174     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
175     wallcycle_sub_start_nocount(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
176
177     numAtomsLocal_ = numAtomsLocal;
178     numAtomsAll_   = numAtomsAll;
179
180     int numAtomsPadded;
181     if (paddingSize_ > 0)
182     {
183         numAtomsPadded = ((numAtomsAll_ + paddingSize_ - 1) / paddingSize_) * paddingSize_;
184     }
185     else
186     {
187         numAtomsPadded = numAtomsAll_;
188     }
189
190     reallocateDeviceBuffer(&d_x_, numAtomsPadded, &d_xSize_, &d_xCapacity_, deviceContext_);
191
192     const size_t paddingAllocationSize = numAtomsPadded - numAtomsAll_;
193     if (paddingAllocationSize > 0)
194     {
195         // The PME stream is used here because the padding region of d_x_ is only in the PME task.
196         clearDeviceBufferAsync(&d_x_, numAtomsAll_, paddingAllocationSize, pmeStream_);
197     }
198
199     reallocateDeviceBuffer(&d_v_, numAtomsAll_, &d_vSize_, &d_vCapacity_, deviceContext_);
200     const int d_fOldCapacity = d_fCapacity_;
201     reallocateDeviceBuffer(&d_f_, numAtomsAll_, &d_fSize_, &d_fCapacity_, deviceContext_);
202     // Clearing of the forces can be done in local stream since the nonlocal stream cannot reach
203     // the force accumulation stage before syncing with the local stream. Only done in CUDA,
204     // since the force buffer ops are not implemented in OpenCL.
205     if (GMX_GPU == GMX_GPU_CUDA && d_fCapacity_ != d_fOldCapacity)
206     {
207         clearDeviceBufferAsync(&d_f_, 0, d_fCapacity_, localStream_);
208     }
209
210     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
211     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
212 }
213
214 std::tuple<int, int> StatePropagatorDataGpu::Impl::getAtomRangesFromAtomLocality(AtomLocality atomLocality)
215 {
216     int atomsStartAt   = 0;
217     int numAtomsToCopy = 0;
218     switch (atomLocality)
219     {
220         case AtomLocality::All:
221             atomsStartAt   = 0;
222             numAtomsToCopy = numAtomsAll_;
223             break;
224         case AtomLocality::Local:
225             atomsStartAt   = 0;
226             numAtomsToCopy = numAtomsLocal_;
227             break;
228         case AtomLocality::NonLocal:
229             atomsStartAt   = numAtomsLocal_;
230             numAtomsToCopy = numAtomsAll_ - numAtomsLocal_;
231             break;
232         default:
233             GMX_RELEASE_ASSERT(false,
234                                "Wrong range of atoms requested in GPU state data manager. Should "
235                                "be All, Local or NonLocal.");
236     }
237     GMX_ASSERT(atomsStartAt >= 0,
238                "The first elemtnt to copy has negative index. Probably, the GPU propagator state "
239                "was not initialized.");
240     GMX_ASSERT(numAtomsToCopy >= 0,
241                "Number of atoms to copy is negative. Probably, the GPU propagator state was not "
242                "initialized.");
243     return std::make_tuple(atomsStartAt, numAtomsToCopy);
244 }
245
246 void StatePropagatorDataGpu::Impl::copyToDevice(DeviceBuffer<RVec>                   d_data,
247                                                 const gmx::ArrayRef<const gmx::RVec> h_data,
248                                                 int                                  dataSize,
249                                                 AtomLocality                         atomLocality,
250                                                 CommandStream                        commandStream)
251 {
252     GMX_UNUSED_VALUE(dataSize);
253
254     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
255
256     GMX_ASSERT(dataSize >= 0, "Trying to copy to device buffer before it was allocated.");
257
258     GMX_ASSERT(commandStream != nullptr,
259                "No stream is valid for copying with given atom locality.");
260     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
261     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
262
263     int atomsStartAt, numAtomsToCopy;
264     std::tie(atomsStartAt, numAtomsToCopy) = getAtomRangesFromAtomLocality(atomLocality);
265
266     if (numAtomsToCopy != 0)
267     {
268         GMX_ASSERT(atomsStartAt + numAtomsToCopy <= dataSize,
269                    "The device allocation is smaller than requested copy range.");
270         GMX_ASSERT(atomsStartAt + numAtomsToCopy <= h_data.ssize(),
271                    "The host buffer is smaller than the requested copy range.");
272
273         copyToDeviceBuffer(&d_data, reinterpret_cast<const RVec*>(&h_data.data()[atomsStartAt]),
274                            atomsStartAt, numAtomsToCopy, commandStream, transferKind_, nullptr);
275     }
276
277     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
278     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
279 }
280
281 void StatePropagatorDataGpu::Impl::copyFromDevice(gmx::ArrayRef<gmx::RVec> h_data,
282                                                   DeviceBuffer<RVec>       d_data,
283                                                   int                      dataSize,
284                                                   AtomLocality             atomLocality,
285                                                   CommandStream            commandStream)
286 {
287     GMX_UNUSED_VALUE(dataSize);
288
289     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
290
291     GMX_ASSERT(dataSize >= 0, "Trying to copy from device buffer before it was allocated.");
292
293     GMX_ASSERT(commandStream != nullptr,
294                "No stream is valid for copying with given atom locality.");
295     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
296     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
297
298     int atomsStartAt, numAtomsToCopy;
299     std::tie(atomsStartAt, numAtomsToCopy) = getAtomRangesFromAtomLocality(atomLocality);
300
301     if (numAtomsToCopy != 0)
302     {
303         GMX_ASSERT(atomsStartAt + numAtomsToCopy <= dataSize,
304                    "The device allocation is smaller than requested copy range.");
305         GMX_ASSERT(atomsStartAt + numAtomsToCopy <= h_data.ssize(),
306                    "The host buffer is smaller than the requested copy range.");
307
308         copyFromDeviceBuffer(reinterpret_cast<RVec*>(&h_data.data()[atomsStartAt]), &d_data,
309                              atomsStartAt, numAtomsToCopy, commandStream, transferKind_, nullptr);
310     }
311
312     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
313     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
314 }
315
316 DeviceBuffer<RVec> StatePropagatorDataGpu::Impl::getCoordinates()
317 {
318     return d_x_;
319 }
320
321 void StatePropagatorDataGpu::Impl::copyCoordinatesToGpu(const gmx::ArrayRef<const gmx::RVec> h_x,
322                                                         AtomLocality atomLocality)
323 {
324     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
325     CommandStream commandStream = xCopyStreams_[atomLocality];
326     GMX_ASSERT(commandStream != nullptr,
327                "No stream is valid for copying positions with given atom locality.");
328
329     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
330     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
331
332     copyToDevice(d_x_, h_x, d_xSize_, atomLocality, commandStream);
333
334     // markEvent is skipped in OpenCL as:
335     //   - it's not needed, copy is done in the same stream as the only consumer task (PME)
336     //   - we don't consume the events in OpenCL which is not allowed by GpuEventSynchronizer (would leak memory).
337     // TODO: remove this by adding an event-mark free flavor of this function
338     if (GMX_GPU == GMX_GPU_CUDA)
339     {
340         xReadyOnDevice_[atomLocality].markEvent(xCopyStreams_[atomLocality]);
341     }
342
343     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
344     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
345 }
346
347 GpuEventSynchronizer*
348 StatePropagatorDataGpu::Impl::getCoordinatesReadyOnDeviceEvent(AtomLocality atomLocality,
349                                                                const SimulationWorkload& simulationWork,
350                                                                const StepWorkload&       stepWork)
351 {
352     // The provider of the coordinates may be different for local atoms. If the update is offloaded
353     // and this is not a neighbor search step, then the consumer needs to wait for the update
354     // to complete. Otherwise, the coordinates are copied from the host and we need to wait for
355     // the copy event. Non-local coordinates are always provided by the H2D copy.
356     //
357     // TODO: This should be reconsidered to support the halo exchange.
358     //
359     // In OpenCL no events are used as coordinate sync is not necessary
360     if (GMX_GPU == GMX_GPU_OPENCL)
361     {
362         return nullptr;
363     }
364     if (atomLocality == AtomLocality::Local && simulationWork.useGpuUpdate && !stepWork.doNeighborSearch)
365     {
366         return &xUpdatedOnDevice_;
367     }
368     else
369     {
370         return &xReadyOnDevice_[atomLocality];
371     }
372 }
373
374 void StatePropagatorDataGpu::Impl::waitCoordinatesCopiedToDevice(AtomLocality atomLocality)
375 {
376     wallcycle_start(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
377     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
378     xReadyOnDevice_[atomLocality].waitForEvent();
379     wallcycle_stop(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
380 }
381
382 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::xUpdatedOnDevice()
383 {
384     return &xUpdatedOnDevice_;
385 }
386
387 void StatePropagatorDataGpu::Impl::copyCoordinatesFromGpu(gmx::ArrayRef<gmx::RVec> h_x, AtomLocality atomLocality)
388 {
389     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
390     CommandStream commandStream = xCopyStreams_[atomLocality];
391     GMX_ASSERT(commandStream != nullptr,
392                "No stream is valid for copying positions with given atom locality.");
393
394     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
395     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
396
397     copyFromDevice(h_x, d_x_, d_xSize_, atomLocality, commandStream);
398     // Note: unlike copyCoordinatesToGpu this is not used in OpenCL, and the conditional is not needed.
399     xReadyOnHost_[atomLocality].markEvent(commandStream);
400
401     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
402     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
403 }
404
405 void StatePropagatorDataGpu::Impl::waitCoordinatesReadyOnHost(AtomLocality atomLocality)
406 {
407     wallcycle_start(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
408     xReadyOnHost_[atomLocality].waitForEvent();
409     wallcycle_stop(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
410 }
411
412
413 DeviceBuffer<RVec> StatePropagatorDataGpu::Impl::getVelocities()
414 {
415     return d_v_;
416 }
417
418 void StatePropagatorDataGpu::Impl::copyVelocitiesToGpu(const gmx::ArrayRef<const gmx::RVec> h_v,
419                                                        AtomLocality atomLocality)
420 {
421     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
422     CommandStream commandStream = vCopyStreams_[atomLocality];
423     GMX_ASSERT(commandStream != nullptr,
424                "No stream is valid for copying velocities with given atom locality.");
425
426     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
427     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
428
429     copyToDevice(d_v_, h_v, d_vSize_, atomLocality, commandStream);
430     vReadyOnDevice_[atomLocality].markEvent(commandStream);
431
432     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
433     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
434 }
435
436 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::getVelocitiesReadyOnDeviceEvent(AtomLocality atomLocality)
437 {
438     return &vReadyOnDevice_[atomLocality];
439 }
440
441
442 void StatePropagatorDataGpu::Impl::copyVelocitiesFromGpu(gmx::ArrayRef<gmx::RVec> h_v, AtomLocality atomLocality)
443 {
444     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
445     CommandStream commandStream = vCopyStreams_[atomLocality];
446     GMX_ASSERT(commandStream != nullptr,
447                "No stream is valid for copying velocities with given atom locality.");
448
449     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
450     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
451
452     copyFromDevice(h_v, d_v_, d_vSize_, atomLocality, commandStream);
453     vReadyOnHost_[atomLocality].markEvent(commandStream);
454
455     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
456     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
457 }
458
459 void StatePropagatorDataGpu::Impl::waitVelocitiesReadyOnHost(AtomLocality atomLocality)
460 {
461     wallcycle_start(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
462     vReadyOnHost_[atomLocality].waitForEvent();
463     wallcycle_stop(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
464 }
465
466
467 DeviceBuffer<RVec> StatePropagatorDataGpu::Impl::getForces()
468 {
469     return d_f_;
470 }
471
472 void StatePropagatorDataGpu::Impl::copyForcesToGpu(const gmx::ArrayRef<const gmx::RVec> h_f,
473                                                    AtomLocality atomLocality)
474 {
475     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
476     CommandStream commandStream = fCopyStreams_[atomLocality];
477     GMX_ASSERT(commandStream != nullptr,
478                "No stream is valid for copying forces with given atom locality.");
479
480     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
481     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
482
483     copyToDevice(d_f_, h_f, d_fSize_, atomLocality, commandStream);
484     fReadyOnDevice_[atomLocality].markEvent(commandStream);
485
486     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
487     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
488 }
489
490 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::getForcesReadyOnDeviceEvent(AtomLocality atomLocality,
491                                                                                 bool useGpuFBufferOps)
492 {
493     if ((atomLocality == AtomLocality::Local || atomLocality == AtomLocality::NonLocal) && useGpuFBufferOps)
494     {
495         return &fReducedOnDevice_;
496     }
497     else
498     {
499         return &fReadyOnDevice_[atomLocality];
500     }
501 }
502
503 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::fReducedOnDevice()
504 {
505     return &fReducedOnDevice_;
506 }
507
508 void StatePropagatorDataGpu::Impl::copyForcesFromGpu(gmx::ArrayRef<gmx::RVec> h_f, AtomLocality atomLocality)
509 {
510     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
511     CommandStream commandStream = fCopyStreams_[atomLocality];
512     GMX_ASSERT(commandStream != nullptr,
513                "No stream is valid for copying forces with given atom locality.");
514
515     wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
516     wallcycle_sub_start(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
517
518     copyFromDevice(h_f, d_f_, d_fSize_, atomLocality, commandStream);
519     fReadyOnHost_[atomLocality].markEvent(commandStream);
520
521     wallcycle_sub_stop(wcycle_, ewcsLAUNCH_STATE_PROPAGATOR_DATA);
522     wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
523 }
524
525 void StatePropagatorDataGpu::Impl::waitForcesReadyOnHost(AtomLocality atomLocality)
526 {
527     wallcycle_start(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
528     fReadyOnHost_[atomLocality].waitForEvent();
529     wallcycle_stop(wcycle_, ewcWAIT_GPU_STATE_PROPAGATOR_DATA);
530 }
531
532 void* StatePropagatorDataGpu::Impl::getUpdateStream()
533 {
534     return &updateStream_;
535 }
536
537 int StatePropagatorDataGpu::Impl::numAtomsLocal()
538 {
539     return numAtomsLocal_;
540 }
541
542 int StatePropagatorDataGpu::Impl::numAtomsAll()
543 {
544     return numAtomsAll_;
545 }
546
547
548 StatePropagatorDataGpu::StatePropagatorDataGpu(const void*          pmeStream,
549                                                const void*          localStream,
550                                                const void*          nonLocalStream,
551                                                const DeviceContext& deviceContext,
552                                                GpuApiCallBehavior   transferKind,
553                                                int                  paddingSize,
554                                                gmx_wallcycle*       wcycle) :
555     impl_(new Impl(pmeStream, localStream, nonLocalStream, deviceContext, transferKind, paddingSize, wcycle))
556 {
557 }
558
559 StatePropagatorDataGpu::StatePropagatorDataGpu(const void*          pmeStream,
560                                                const DeviceContext& deviceContext,
561                                                GpuApiCallBehavior   transferKind,
562                                                int                  paddingSize,
563                                                gmx_wallcycle*       wcycle) :
564     impl_(new Impl(pmeStream, deviceContext, transferKind, paddingSize, wcycle))
565 {
566 }
567
568 StatePropagatorDataGpu::StatePropagatorDataGpu(StatePropagatorDataGpu&& /* other */) noexcept = default;
569
570 StatePropagatorDataGpu& StatePropagatorDataGpu::operator=(StatePropagatorDataGpu&& /* other */) noexcept = default;
571
572 StatePropagatorDataGpu::~StatePropagatorDataGpu() = default;
573
574
575 void StatePropagatorDataGpu::reinit(int numAtomsLocal, int numAtomsAll)
576 {
577     return impl_->reinit(numAtomsLocal, numAtomsAll);
578 }
579
580 std::tuple<int, int> StatePropagatorDataGpu::getAtomRangesFromAtomLocality(AtomLocality atomLocality)
581 {
582     return impl_->getAtomRangesFromAtomLocality(atomLocality);
583 }
584
585
586 DeviceBuffer<RVec> StatePropagatorDataGpu::getCoordinates()
587 {
588     return impl_->getCoordinates();
589 }
590
591 void StatePropagatorDataGpu::copyCoordinatesToGpu(const gmx::ArrayRef<const gmx::RVec> h_x,
592                                                   AtomLocality                         atomLocality)
593 {
594     return impl_->copyCoordinatesToGpu(h_x, atomLocality);
595 }
596
597 GpuEventSynchronizer*
598 StatePropagatorDataGpu::getCoordinatesReadyOnDeviceEvent(AtomLocality              atomLocality,
599                                                          const SimulationWorkload& simulationWork,
600                                                          const StepWorkload&       stepWork)
601 {
602     return impl_->getCoordinatesReadyOnDeviceEvent(atomLocality, simulationWork, stepWork);
603 }
604
605 void StatePropagatorDataGpu::waitCoordinatesCopiedToDevice(AtomLocality atomLocality)
606 {
607     return impl_->waitCoordinatesCopiedToDevice(atomLocality);
608 }
609
610 GpuEventSynchronizer* StatePropagatorDataGpu::xUpdatedOnDevice()
611 {
612     return impl_->xUpdatedOnDevice();
613 }
614
615 void StatePropagatorDataGpu::copyCoordinatesFromGpu(gmx::ArrayRef<RVec> h_x, AtomLocality atomLocality)
616 {
617     return impl_->copyCoordinatesFromGpu(h_x, atomLocality);
618 }
619
620 void StatePropagatorDataGpu::waitCoordinatesReadyOnHost(AtomLocality atomLocality)
621 {
622     return impl_->waitCoordinatesReadyOnHost(atomLocality);
623 }
624
625
626 DeviceBuffer<RVec> StatePropagatorDataGpu::getVelocities()
627 {
628     return impl_->getVelocities();
629 }
630
631 void StatePropagatorDataGpu::copyVelocitiesToGpu(const gmx::ArrayRef<const gmx::RVec> h_v,
632                                                  AtomLocality                         atomLocality)
633 {
634     return impl_->copyVelocitiesToGpu(h_v, atomLocality);
635 }
636
637 GpuEventSynchronizer* StatePropagatorDataGpu::getVelocitiesReadyOnDeviceEvent(AtomLocality atomLocality)
638 {
639     return impl_->getVelocitiesReadyOnDeviceEvent(atomLocality);
640 }
641
642 void StatePropagatorDataGpu::copyVelocitiesFromGpu(gmx::ArrayRef<RVec> h_v, AtomLocality atomLocality)
643 {
644     return impl_->copyVelocitiesFromGpu(h_v, atomLocality);
645 }
646
647 void StatePropagatorDataGpu::waitVelocitiesReadyOnHost(AtomLocality atomLocality)
648 {
649     return impl_->waitVelocitiesReadyOnHost(atomLocality);
650 }
651
652
653 DeviceBuffer<RVec> StatePropagatorDataGpu::getForces()
654 {
655     return impl_->getForces();
656 }
657
658 void StatePropagatorDataGpu::copyForcesToGpu(const gmx::ArrayRef<const gmx::RVec> h_f, AtomLocality atomLocality)
659 {
660     return impl_->copyForcesToGpu(h_f, atomLocality);
661 }
662
663 GpuEventSynchronizer* StatePropagatorDataGpu::getForcesReadyOnDeviceEvent(AtomLocality atomLocality,
664                                                                           bool useGpuFBufferOps)
665 {
666     return impl_->getForcesReadyOnDeviceEvent(atomLocality, useGpuFBufferOps);
667 }
668
669 GpuEventSynchronizer* StatePropagatorDataGpu::fReducedOnDevice()
670 {
671     return impl_->fReducedOnDevice();
672 }
673
674 void StatePropagatorDataGpu::copyForcesFromGpu(gmx::ArrayRef<RVec> h_f, AtomLocality atomLocality)
675 {
676     return impl_->copyForcesFromGpu(h_f, atomLocality);
677 }
678
679 void StatePropagatorDataGpu::waitForcesReadyOnHost(AtomLocality atomLocality)
680 {
681     return impl_->waitForcesReadyOnHost(atomLocality);
682 }
683
684
685 void* StatePropagatorDataGpu::getUpdateStream()
686 {
687     return impl_->getUpdateStream();
688 }
689
690 int StatePropagatorDataGpu::numAtomsLocal()
691 {
692     return impl_->numAtomsLocal();
693 }
694
695 int StatePropagatorDataGpu::numAtomsAll()
696 {
697     return impl_->numAtomsAll();
698 }
699
700 } // namespace gmx
701
702 #endif // GMX_GPU == GMX_GPU_NONE