Add management for velocities and forces copy events to StatePropagatorDataGpu
[alexxy/gromacs.git] / src / gromacs / mdtypes / state_propagator_data_gpu_impl_gpu.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *
37  * \brief Definitions of interfaces for GPU state data propagator object.
38  *
39  * \author Artem Zhmurov <zhmurov@gmail.com>
40  *
41  * \ingroup module_mdtypes
42  */
43 #include "gmxpre.h"
44
45 #include "config.h"
46
47 #if GMX_GPU != GMX_GPU_NONE
48
49 #if GMX_GPU == GMX_GPU_CUDA
50 #include "gromacs/gpu_utils/cudautils.cuh"
51 #endif
52 #include "gromacs/gpu_utils/devicebuffer.h"
53 #if GMX_GPU == GMX_GPU_OPENCL
54 #include "gromacs/gpu_utils/oclutils.h"
55 #endif
56 #include "gromacs/math/vectypes.h"
57 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
58 #include "gromacs/utility/classhelpers.h"
59
60 #include "state_propagator_data_gpu_impl.h"
61
62 namespace gmx
63 {
64
65 StatePropagatorDataGpu::Impl::Impl(const void            *pmeStream,
66                                    const void            *localStream,
67                                    const void            *nonLocalStream,
68                                    const void            *deviceContext,
69                                    GpuApiCallBehavior     transferKind,
70                                    int                    paddingSize) :
71     transferKind_(transferKind),
72     paddingSize_(paddingSize)
73 {
74     static_assert(GMX_GPU != GMX_GPU_NONE, "This object should only be constructed on the GPU code-paths.");
75     GMX_RELEASE_ASSERT(getenv("GMX_USE_GPU_BUFFER_OPS") == nullptr, "GPU buffer ops are not supported in this build.");
76
77     // TODO: Refactor when the StreamManager is introduced.
78     if (GMX_GPU == GMX_GPU_OPENCL)
79     {
80         GMX_ASSERT(deviceContext != nullptr, "GPU context should be set in OpenCL builds.");
81         GMX_ASSERT(pmeStream != nullptr, "GPU PME stream should be set in OpenCL builds.");
82
83         // The update stream is set to the PME stream in OpenCL, since PME stream is the only stream created in the PME context.
84         pmeStream_      = *static_cast<const CommandStream*>(pmeStream);
85         updateStream_   = *static_cast<const CommandStream*>(pmeStream);
86         deviceContext_  = *static_cast<const DeviceContext*>(deviceContext);
87         GMX_UNUSED_VALUE(localStream);
88         GMX_UNUSED_VALUE(nonLocalStream);
89     }
90
91     if (GMX_GPU == GMX_GPU_CUDA)
92     {
93         if (pmeStream != nullptr)
94         {
95             pmeStream_ = *static_cast<const CommandStream*>(pmeStream);
96         }
97         if (localStream != nullptr)
98         {
99             localStream_ = *static_cast<const CommandStream*>(localStream);
100         }
101         if (nonLocalStream != nullptr)
102         {
103             nonLocalStream_ = *static_cast<const CommandStream*>(nonLocalStream);
104         }
105
106         // TODO: The update stream should be created only when it is needed.
107 #if (GMX_GPU == GMX_GPU_CUDA)
108         cudaError_t stat;
109         stat = cudaStreamCreate(&updateStream_);
110         CU_RET_ERR(stat, "CUDA stream creation failed in StatePropagatorDataGpu");
111 #endif
112         GMX_UNUSED_VALUE(deviceContext);
113     }
114
115     // Map the atom locality to the stream that will be used for coordinates,
116     // velocities and forces transfers. Same streams are used for H2D and D2H copies.
117     // Note, that nullptr stream is used here to indicate that the copy is not supported.
118     xCopyStreams_[AtomLocality::Local]    = updateStream_;
119     xCopyStreams_[AtomLocality::NonLocal] = nonLocalStream_;
120     xCopyStreams_[AtomLocality::All]      = updateStream_;
121
122     vCopyStreams_[AtomLocality::Local]    = updateStream_;
123     vCopyStreams_[AtomLocality::NonLocal] = nullptr;
124     vCopyStreams_[AtomLocality::All]      = updateStream_;
125
126     fCopyStreams_[AtomLocality::Local]    = localStream_;
127     fCopyStreams_[AtomLocality::NonLocal] = nonLocalStream_;
128     fCopyStreams_[AtomLocality::All]      = nullptr;
129 }
130
131 StatePropagatorDataGpu::Impl::~Impl()
132 {
133 }
134
135 void StatePropagatorDataGpu::Impl::reinit(int numAtomsLocal, int numAtomsAll)
136 {
137     numAtomsLocal_ = numAtomsLocal;
138     numAtomsAll_   = numAtomsAll;
139
140     int numAtomsPadded;
141     if (paddingSize_ > 0)
142     {
143         numAtomsPadded = ((numAtomsAll_ + paddingSize_ - 1 ) / paddingSize_ )*paddingSize_;
144     }
145     else
146     {
147         numAtomsPadded = numAtomsAll_;
148     }
149
150     reallocateDeviceBuffer(&d_x_, DIM*numAtomsPadded, &d_xSize_, &d_xCapacity_, deviceContext_);
151
152     const size_t paddingAllocationSize = numAtomsPadded - numAtomsAll_;
153     if (paddingAllocationSize > 0)
154     {
155         // The PME stream is used here because the padding region of d_x_ is only in the PME task.
156         clearDeviceBufferAsync(&d_x_, DIM*numAtomsAll_, DIM*paddingAllocationSize, pmeStream_);
157     }
158
159     reallocateDeviceBuffer(&d_v_, DIM*numAtomsAll_, &d_vSize_, &d_vCapacity_, deviceContext_);
160     reallocateDeviceBuffer(&d_f_, DIM*numAtomsAll_, &d_fSize_, &d_fCapacity_, deviceContext_);
161
162 }
163
164 std::tuple<int, int> StatePropagatorDataGpu::Impl::getAtomRangesFromAtomLocality(AtomLocality  atomLocality)
165 {
166     int atomsStartAt   = 0;
167     int numAtomsToCopy = 0;
168     switch (atomLocality)
169     {
170         case AtomLocality::All:
171             atomsStartAt    = 0;
172             numAtomsToCopy  = numAtomsAll_;
173             break;
174         case AtomLocality::Local:
175             atomsStartAt    = 0;
176             numAtomsToCopy  = numAtomsLocal_;
177             break;
178         case AtomLocality::NonLocal:
179             atomsStartAt    = numAtomsLocal_;
180             numAtomsToCopy  = numAtomsAll_ - numAtomsLocal_;
181             break;
182         default:
183             GMX_RELEASE_ASSERT(false, "Wrong range of atoms requested in GPU state data manager. Should be All, Local or NonLocal.");
184     }
185     GMX_ASSERT(atomsStartAt   >= 0, "The first elemtnt to copy has negative index. Probably, the GPU propagator state was not initialized.");
186     GMX_ASSERT(numAtomsToCopy >= 0, "Number of atoms to copy is negative. Probably, the GPU propagator state was not initialized.");
187     return std::make_tuple(atomsStartAt, numAtomsToCopy);
188 }
189
190 void StatePropagatorDataGpu::Impl::copyToDevice(DeviceBuffer<float>                   d_data,
191                                                 const gmx::ArrayRef<const gmx::RVec>  h_data,
192                                                 int                                   dataSize,
193                                                 AtomLocality                          atomLocality,
194                                                 CommandStream                         commandStream)
195 {
196
197     GMX_UNUSED_VALUE(dataSize);
198
199     GMX_ASSERT(dataSize >= 0, "Trying to copy to device buffer before it was allocated.");
200
201     int atomsStartAt, numAtomsToCopy;
202     std::tie(atomsStartAt, numAtomsToCopy) = getAtomRangesFromAtomLocality(atomLocality);
203
204     int elementsStartAt   = atomsStartAt*DIM;
205     int numElementsToCopy = numAtomsToCopy*DIM;
206
207     if (numAtomsToCopy != 0)
208     {
209         GMX_ASSERT(elementsStartAt + numElementsToCopy <= dataSize, "The device allocation is smaller than requested copy range.");
210         GMX_ASSERT(atomsStartAt + numAtomsToCopy <= h_data.ssize(), "The host buffer is smaller than the requested copy range.");
211
212         copyToDeviceBuffer(&d_data, reinterpret_cast<const float *>(&h_data.data()[atomsStartAt]),
213                            elementsStartAt, numElementsToCopy,
214                            commandStream, transferKind_, nullptr);
215     }
216 }
217
218 void StatePropagatorDataGpu::Impl::copyFromDevice(gmx::ArrayRef<gmx::RVec>  h_data,
219                                                   DeviceBuffer<float>       d_data,
220                                                   int                       dataSize,
221                                                   AtomLocality              atomLocality,
222                                                   CommandStream             commandStream)
223 {
224
225     GMX_UNUSED_VALUE(dataSize);
226
227     GMX_ASSERT(dataSize >= 0, "Trying to copy from device buffer before it was allocated.");
228
229     int atomsStartAt, numAtomsToCopy;
230     std::tie(atomsStartAt, numAtomsToCopy) = getAtomRangesFromAtomLocality(atomLocality);
231
232     int elementsStartAt   = atomsStartAt*DIM;
233     int numElementsToCopy = numAtomsToCopy*DIM;
234
235     if (numAtomsToCopy != 0)
236     {
237         GMX_ASSERT(elementsStartAt + numElementsToCopy <= dataSize, "The device allocation is smaller than requested copy range.");
238         GMX_ASSERT(atomsStartAt + numAtomsToCopy <= h_data.ssize(), "The host buffer is smaller than the requested copy range.");
239
240         copyFromDeviceBuffer(reinterpret_cast<float*>(&h_data.data()[atomsStartAt]), &d_data,
241                              elementsStartAt, numElementsToCopy,
242                              commandStream, transferKind_, nullptr);
243     }
244 }
245
246 DeviceBuffer<float> StatePropagatorDataGpu::Impl::getCoordinates()
247 {
248     return d_x_;
249 }
250
251 void StatePropagatorDataGpu::Impl::copyCoordinatesToGpu(const gmx::ArrayRef<const gmx::RVec>  h_x,
252                                                         AtomLocality                          atomLocality)
253 {
254     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
255     CommandStream commandStream = xCopyStreams_[atomLocality];
256     GMX_ASSERT(commandStream != nullptr, "No stream is valid for copying positions with given atom locality.");
257
258     copyToDevice(d_x_, h_x, d_xSize_, atomLocality, commandStream);
259
260     // markEvent is skipped in OpenCL as:
261     //   - it's not needed, copy is done in the same stream as the only consumer task (PME)
262     //   - we don't consume the events in OpenCL which is not allowed by GpuEventSynchronizer (would leak memory).
263     // TODO: remove this by adding an event-mark free flavor of this function
264     if (GMX_GPU == GMX_GPU_CUDA)
265     {
266         xReadyOnDevice_[atomLocality].markEvent(commandStream);
267         // TODO: Remove When event-based synchronization is introduced
268         gpuStreamSynchronize(commandStream);
269     }
270 }
271
272 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::getCoordinatesReadyOnDeviceEvent(AtomLocality  atomLocality)
273 {
274     return &xReadyOnDevice_[atomLocality];
275 }
276
277 void StatePropagatorDataGpu::Impl::copyCoordinatesFromGpu(gmx::ArrayRef<gmx::RVec>  h_x,
278                                                           AtomLocality              atomLocality)
279 {
280     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
281     CommandStream commandStream = xCopyStreams_[atomLocality];
282     GMX_ASSERT(commandStream != nullptr, "No stream is valid for copying positions with given atom locality.");
283
284     copyFromDevice(h_x, d_x_, d_xSize_, atomLocality, commandStream);
285     // TODO: Remove When event-based synchronization is introduced
286     gpuStreamSynchronize(commandStream);
287     // Note: unlike copyCoordinatesToGpu this is not used in OpenCL, and the conditional is not needed.
288     xReadyOnHost_[atomLocality].markEvent(commandStream);
289 }
290
291 void StatePropagatorDataGpu::Impl::waitCoordinatesReadyOnHost(AtomLocality  atomLocality)
292 {
293     xReadyOnHost_[atomLocality].waitForEvent();
294 }
295
296
297 DeviceBuffer<float> StatePropagatorDataGpu::Impl::getVelocities()
298 {
299     return d_v_;
300 }
301
302 void StatePropagatorDataGpu::Impl::copyVelocitiesToGpu(const gmx::ArrayRef<const gmx::RVec>  h_v,
303                                                        AtomLocality                          atomLocality)
304 {
305     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
306     CommandStream commandStream = vCopyStreams_[atomLocality];
307     GMX_ASSERT(commandStream != nullptr, "No stream is valid for copying velocities with given atom locality.");
308
309     copyToDevice(d_v_, h_v, d_vSize_, atomLocality, commandStream);
310     // TODO: Remove When event-based synchronization is introduced
311     gpuStreamSynchronize(commandStream);
312     vReadyOnDevice_[atomLocality].markEvent(commandStream);
313 }
314
315 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::getVelocitiesReadyOnDeviceEvent(AtomLocality  atomLocality)
316 {
317     return &vReadyOnDevice_[atomLocality];
318 }
319
320
321 void StatePropagatorDataGpu::Impl::copyVelocitiesFromGpu(gmx::ArrayRef<gmx::RVec>  h_v,
322                                                          AtomLocality              atomLocality)
323 {
324     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
325     CommandStream commandStream = vCopyStreams_[atomLocality];
326     GMX_ASSERT(commandStream != nullptr, "No stream is valid for copying velocities with given atom locality.");
327
328     copyFromDevice(h_v, d_v_, d_vSize_, atomLocality, commandStream);
329     // TODO: Remove When event-based synchronization is introduced
330     gpuStreamSynchronize(commandStream);
331     vReadyOnHost_[atomLocality].markEvent(commandStream);
332 }
333
334 void StatePropagatorDataGpu::Impl::waitVelocitiesReadyOnHost(AtomLocality  atomLocality)
335 {
336     vReadyOnHost_[atomLocality].waitForEvent();
337 }
338
339
340 DeviceBuffer<float> StatePropagatorDataGpu::Impl::getForces()
341 {
342     return d_f_;
343 }
344
345 void StatePropagatorDataGpu::Impl::copyForcesToGpu(const gmx::ArrayRef<const gmx::RVec>  h_f,
346                                                    AtomLocality                          atomLocality)
347 {
348     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
349     CommandStream commandStream = fCopyStreams_[atomLocality];
350     GMX_ASSERT(commandStream != nullptr, "No stream is valid for copying forces with given atom locality.");
351
352     copyToDevice(d_f_, h_f, d_fSize_, atomLocality, commandStream);
353     // TODO: Remove When event-based synchronization is introduced
354     gpuStreamSynchronize(commandStream);
355     fReadyOnDevice_[atomLocality].markEvent(commandStream);
356 }
357
358 GpuEventSynchronizer* StatePropagatorDataGpu::Impl::getForcesReadyOnDeviceEvent(AtomLocality  atomLocality)
359 {
360     return &fReadyOnDevice_[atomLocality];
361 }
362
363
364 void StatePropagatorDataGpu::Impl::copyForcesFromGpu(gmx::ArrayRef<gmx::RVec>  h_f,
365                                                      AtomLocality              atomLocality)
366 {
367     GMX_ASSERT(atomLocality < AtomLocality::Count, "Wrong atom locality.");
368     CommandStream commandStream = fCopyStreams_[atomLocality];
369     GMX_ASSERT(commandStream != nullptr, "No stream is valid for copying forces with given atom locality.");
370
371     copyFromDevice(h_f, d_f_, d_fSize_, atomLocality, commandStream);
372     // TODO: Remove When event-based synchronization is introduced
373     gpuStreamSynchronize(commandStream);
374     fReadyOnHost_[atomLocality].markEvent(commandStream);
375 }
376
377 void StatePropagatorDataGpu::Impl::waitForcesReadyOnHost(AtomLocality  atomLocality)
378 {
379     fReadyOnHost_[atomLocality].waitForEvent();
380 }
381
382 void* StatePropagatorDataGpu::Impl::getUpdateStream()
383 {
384     return updateStream_;
385 }
386
387 int StatePropagatorDataGpu::Impl::numAtomsLocal()
388 {
389     return numAtomsLocal_;
390 }
391
392 int StatePropagatorDataGpu::Impl::numAtomsAll()
393 {
394     return numAtomsAll_;
395 }
396
397
398
399 StatePropagatorDataGpu::StatePropagatorDataGpu(const void        *pmeStream,
400                                                const void        *localStream,
401                                                const void        *nonLocalStream,
402                                                const void        *deviceContext,
403                                                GpuApiCallBehavior transferKind,
404                                                int                paddingSize)
405     : impl_(new Impl(pmeStream,
406                      localStream,
407                      nonLocalStream,
408                      deviceContext,
409                      transferKind,
410                      paddingSize))
411 {
412 }
413
414 StatePropagatorDataGpu::StatePropagatorDataGpu(StatePropagatorDataGpu && /* other */) noexcept = default;
415
416 StatePropagatorDataGpu &StatePropagatorDataGpu::operator=(StatePropagatorDataGpu && /* other */) noexcept = default;
417
418 StatePropagatorDataGpu::~StatePropagatorDataGpu() = default;
419
420
421 void StatePropagatorDataGpu::reinit(int numAtomsLocal, int numAtomsAll)
422 {
423     return impl_->reinit(numAtomsLocal, numAtomsAll);
424 }
425
426 std::tuple<int, int> StatePropagatorDataGpu::getAtomRangesFromAtomLocality(AtomLocality  atomLocality)
427 {
428     return impl_->getAtomRangesFromAtomLocality(atomLocality);
429 }
430
431
432 DeviceBuffer<float> StatePropagatorDataGpu::getCoordinates()
433 {
434     return impl_->getCoordinates();
435 }
436
437 void StatePropagatorDataGpu::copyCoordinatesToGpu(const gmx::ArrayRef<const gmx::RVec>  h_x,
438                                                   AtomLocality                          atomLocality)
439 {
440     return impl_->copyCoordinatesToGpu(h_x, atomLocality);
441 }
442
443 GpuEventSynchronizer* StatePropagatorDataGpu::getCoordinatesReadyOnDeviceEvent(AtomLocality  atomLocality)
444 {
445     return impl_->getCoordinatesReadyOnDeviceEvent(atomLocality);
446 }
447
448 void StatePropagatorDataGpu::copyCoordinatesFromGpu(gmx::ArrayRef<RVec>  h_x,
449                                                     AtomLocality         atomLocality)
450 {
451     return impl_->copyCoordinatesFromGpu(h_x, atomLocality);
452 }
453
454 void StatePropagatorDataGpu::waitCoordinatesReadyOnHost(AtomLocality  atomLocality)
455 {
456     return impl_->waitCoordinatesReadyOnHost(atomLocality);
457 }
458
459
460 DeviceBuffer<float> StatePropagatorDataGpu::getVelocities()
461 {
462     return impl_->getVelocities();
463 }
464
465 void StatePropagatorDataGpu::copyVelocitiesToGpu(const gmx::ArrayRef<const gmx::RVec>  h_v,
466                                                  AtomLocality                          atomLocality)
467 {
468     return impl_->copyVelocitiesToGpu(h_v, atomLocality);
469 }
470
471 GpuEventSynchronizer* StatePropagatorDataGpu::getVelocitiesReadyOnDeviceEvent(AtomLocality  atomLocality)
472 {
473     return impl_->getVelocitiesReadyOnDeviceEvent(atomLocality);
474 }
475
476 void StatePropagatorDataGpu::copyVelocitiesFromGpu(gmx::ArrayRef<RVec>  h_v,
477                                                    AtomLocality         atomLocality)
478 {
479     return impl_->copyVelocitiesFromGpu(h_v, atomLocality);
480 }
481
482 void StatePropagatorDataGpu::waitVelocitiesReadyOnHost(AtomLocality  atomLocality)
483 {
484     return impl_->waitVelocitiesReadyOnHost(atomLocality);
485 }
486
487
488 DeviceBuffer<float> StatePropagatorDataGpu::getForces()
489 {
490     return impl_->getForces();
491 }
492
493 void StatePropagatorDataGpu::copyForcesToGpu(const gmx::ArrayRef<const gmx::RVec>  h_f,
494                                              AtomLocality                          atomLocality)
495 {
496     return impl_->copyForcesToGpu(h_f, atomLocality);
497 }
498
499 GpuEventSynchronizer* StatePropagatorDataGpu::getForcesReadyOnDeviceEvent(AtomLocality  atomLocality)
500 {
501     return impl_->getForcesReadyOnDeviceEvent(atomLocality);
502 }
503
504 void StatePropagatorDataGpu::copyForcesFromGpu(gmx::ArrayRef<RVec>  h_f,
505                                                AtomLocality         atomLocality)
506 {
507     return impl_->copyForcesFromGpu(h_f, atomLocality);
508 }
509
510 void StatePropagatorDataGpu::waitForcesReadyOnHost(AtomLocality  atomLocality)
511 {
512     return impl_->waitForcesReadyOnHost(atomLocality);
513 }
514
515
516 void* StatePropagatorDataGpu::getUpdateStream()
517 {
518     return impl_->getUpdateStream();
519 }
520
521 int StatePropagatorDataGpu::numAtomsLocal()
522 {
523     return impl_->numAtomsLocal();
524 }
525
526 int StatePropagatorDataGpu::numAtomsAll()
527 {
528     return impl_->numAtomsAll();
529 }
530
531 }      // namespace gmx
532
533 #endif // GMX_GPU == GMX_GPU_NONE