2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef GMX_GPU_UTILS_DEVICEBUFFER_CUH
36 #define GMX_GPU_UTILS_DEVICEBUFFER_CUH
38 /*! \libinternal \file
39 * \brief Implements the DeviceBuffer type and routines for CUDA.
40 * Should only be included directly by the main DeviceBuffer file devicebuffer.h.
41 * TODO: the intent is for DeviceBuffer to become a class.
43 * \author Aleksei Iupinov <a.yupinov@gmail.com>
48 #include "gromacs/gpu_utils/gpu_utils.h" //only for GpuApiCallBehavior
49 #include "gromacs/gpu_utils/gputraits.cuh"
50 #include "gromacs/utility/gmxassert.h"
52 //! \brief A device-side buffer of ValueTypes
53 template<typename ValueType>
54 using DeviceBuffer = ValueType *;
57 * Allocates a device-side buffer.
58 * It is currently a caller's responsibility to call it only on not-yet allocated buffers.
60 * \tparam ValueType Raw value type of the \p buffer.
61 * \param[in,out] buffer Pointer to the device-side buffer.
62 * \param[in] numValues Number of values to accomodate.
63 * \param[in] context The buffer's dummy context - not managed explicitly in CUDA RT.
65 template <typename ValueType>
66 void allocateDeviceBuffer(DeviceBuffer<ValueType> *buffer,
68 Context /* context */)
70 GMX_ASSERT(buffer, "needs a buffer pointer");
71 cudaError_t stat = cudaMalloc((void **)buffer, numValues * sizeof(ValueType));
72 GMX_RELEASE_ASSERT(stat == cudaSuccess, "cudaMalloc failure");
76 * Frees a device-side buffer.
77 * This does not reset separately stored size/capacity integers,
78 * as this is planned to be a destructor of DeviceBuffer as a proper class,
79 * and no calls on \p buffer should be made afterwards.
81 * \param[in] buffer Pointer to the buffer to free.
83 template <typename DeviceBuffer>
84 void freeDeviceBuffer(DeviceBuffer *buffer)
86 GMX_ASSERT(buffer, "needs a buffer pointer");
89 GMX_RELEASE_ASSERT(cudaFree(*buffer) == cudaSuccess, "cudaFree failed");
94 * Performs the host-to-device data copy, synchronous or asynchronously on request.
96 * TODO: This is meant to gradually replace cu/ocl_copy_h2d.
98 * \tparam ValueType Raw value type of the \p buffer.
99 * \param[in,out] buffer Pointer to the device-side buffer
100 * \param[in] hostBuffer Pointer to the raw host-side memory, also typed \p ValueType
101 * \param[in] startingValueIndex Offset (in values) at the device-side buffer to copy into.
102 * \param[in] numValues Number of values to copy.
103 * \param[in] stream GPU stream to perform asynchronous copy in.
104 * \param[in] transferKind Copy type: synchronous or asynchronous.
105 * \param[out] timingEvent A dummy pointer to the H2D copy timing event to be filled in.
106 * Not used in CUDA implementation.
108 template <typename ValueType>
109 void copyToDeviceBuffer(DeviceBuffer<ValueType> *buffer,
110 const ValueType *hostBuffer,
111 size_t startingValueIndex,
113 CommandStream stream,
114 GpuApiCallBehavior transferKind,
115 CommandEvent */*timingEvent*/)
119 return; // such calls are actually made with empty domains
121 GMX_ASSERT(buffer, "needs a buffer pointer");
122 GMX_ASSERT(hostBuffer, "needs a host buffer pointer");
124 const size_t bytes = numValues * sizeof(ValueType);
126 switch (transferKind)
128 case GpuApiCallBehavior::Async:
129 GMX_ASSERT(isHostMemoryPinned(hostBuffer), "Source host buffer was not pinned for CUDA");
130 stat = cudaMemcpyAsync(*((ValueType **)buffer) + startingValueIndex, hostBuffer, bytes, cudaMemcpyHostToDevice, stream);
131 GMX_RELEASE_ASSERT(stat == cudaSuccess, "Asynchronous H2D copy failed");
134 case GpuApiCallBehavior::Sync:
135 stat = cudaMemcpy(*((ValueType **)buffer) + startingValueIndex, hostBuffer, bytes, cudaMemcpyHostToDevice);
136 GMX_RELEASE_ASSERT(stat == cudaSuccess, "Synchronous H2D copy failed");