2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2014,2015,2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef GMX_GPU_UTILS_CUDAUTILS_CUH
36 #define GMX_GPU_UTILS_CUDAUTILS_CUH
43 #include "gromacs/gpu_utils/gputraits.cuh"
44 #include "gromacs/math/vec.h"
45 #include "gromacs/math/vectypes.h"
46 #include "gromacs/utility/exceptions.h"
47 #include "gromacs/utility/fatalerror.h"
48 #include "gromacs/utility/gmxassert.h"
49 #include "gromacs/utility/stringutil.h"
56 /*! \brief Helper function to ensure no pending error silently
57 * disrupts error handling.
59 * Asserts in a debug build if an unhandled error is present. Issues a
60 * warning at run time otherwise.
62 * \todo This is similar to CU_CHECK_PREV_ERR, which should be
65 static inline void ensureNoPendingCudaError(const char *errorMessage)
67 // Ensure there is no pending error that would otherwise affect
68 // the behaviour of future error handling.
69 cudaError_t stat = cudaGetLastError();
70 if (stat == cudaSuccess)
75 // If we would find an error in a release build, we do not know
76 // what is appropriate to do about it, so assert only for debug
78 auto fullMessage = formatString("%s An unhandled error from a previous CUDA operation was detected. %s: %s",
79 errorMessage, cudaGetErrorName(stat), cudaGetErrorString(stat));
80 GMX_ASSERT(stat == cudaSuccess, fullMessage.c_str());
81 // TODO When we evolve a better logging framework, use that
82 // for release-build error reporting.
83 gmx_warning("%s", fullMessage.c_str());
89 enum class GpuApiCallBehavior;
91 /* TODO error checking needs to be rewritten. We have 2 types of error checks needed
92 based on where they occur in the code:
93 - non performance-critical: these errors are unsafe to be ignored and must be
94 _always_ checked for, e.g. initializations
95 - performance critical: handling errors might hurt performance so care need to be taken
96 when/if we should check for them at all, e.g. in cu_upload_X. However, we should be
97 able to turn the check for these errors on!
99 Probably we'll need two sets of the macros below...
102 #define CHECK_CUDA_ERRORS
104 #ifdef CHECK_CUDA_ERRORS
106 /*! Check for CUDA error on the return status of a CUDA RT API call. */
107 #define CU_RET_ERR(status, msg) \
109 if (status != cudaSuccess) \
111 gmx_fatal(FARGS, "%s: %s\n", msg, cudaGetErrorString(status)); \
115 /*! Check for any previously occurred uncaught CUDA error. */
116 #define CU_CHECK_PREV_ERR() \
118 cudaError_t _CU_CHECK_PREV_ERR_status = cudaGetLastError(); \
119 if (_CU_CHECK_PREV_ERR_status != cudaSuccess) { \
120 gmx_warning("Just caught a previously occurred CUDA error (%s), will try to continue.", cudaGetErrorString(_CU_CHECK_PREV_ERR_status)); \
124 #else /* CHECK_CUDA_ERRORS */
126 #define CU_RET_ERR(status, msg) do { } while (0)
127 #define CU_CHECK_PREV_ERR() do { } while (0)
129 #endif /* CHECK_CUDA_ERRORS */
131 /*! \brief CUDA device information.
133 * The CUDA device information is queried and set at detection and contains
134 * both information about the device/hardware returned by the runtime as well
135 * as additional data like support status.
137 struct gmx_device_info_t
139 int id; /* id of the CUDA device */
140 cudaDeviceProp prop; /* CUDA device properties */
141 int stat; /* result of the device check */
144 /*! Launches synchronous or asynchronous device to host memory copy.
146 * The copy is launched in stream s or if not specified, in stream 0.
148 int cu_copy_D2H(void *h_dest, void *d_src, size_t bytes, GpuApiCallBehavior transferKind, cudaStream_t s /*= 0*/);
150 /*! Launches synchronous host to device memory copy in stream 0. */
151 int cu_copy_D2H_sync(void * /*h_dest*/, void * /*d_src*/, size_t /*bytes*/);
153 /*! Launches asynchronous host to device memory copy in stream s. */
154 int cu_copy_D2H_async(void * /*h_dest*/, void * /*d_src*/, size_t /*bytes*/, cudaStream_t /*s = 0*/);
156 /*! Launches synchronous or asynchronous host to device memory copy.
158 * The copy is launched in stream s or if not specified, in stream 0.
160 int cu_copy_H2D(void *d_dest, void *h_src, size_t bytes, GpuApiCallBehavior transferKind, cudaStream_t /*s = 0*/);
162 /*! Launches synchronous host to device memory copy. */
163 int cu_copy_H2D_sync(void * /*d_dest*/, void * /*h_src*/, size_t /*bytes*/);
165 /*! Launches asynchronous host to device memory copy in stream s. */
166 int cu_copy_H2D_async(void * /*d_dest*/, void * /*h_src*/, size_t /*bytes*/, cudaStream_t /*s = 0*/);
168 // TODO: the 2 functions below are pretty much a constructor/destructor of a simple
169 // GPU table object. There is also almost self-contained fetchFromParamLookupTable()
170 // in cuda_kernel_utils.cuh. They could all live in a separate class/struct file.
172 /*! \brief Initialize parameter lookup table.
174 * Initializes device memory, copies data from host and binds
175 * a texture to allocated device memory to be used for parameter lookup.
177 * \tparam[in] T Raw data type
178 * \param[out] d_ptr device pointer to the memory to be allocated
179 * \param[out] texObj texture object to be initialized
180 * \param[in] h_ptr pointer to the host memory to be uploaded to the device
181 * \param[in] numElem number of elements in the h_ptr
182 * \param[in] devInfo pointer to the info struct of the device in use
184 template <typename T>
185 void initParamLookupTable(T * &d_ptr,
186 cudaTextureObject_t &texObj,
189 const gmx_device_info_t *devInfo);
191 /*! \brief Destroy parameter lookup table.
193 * Unbinds texture object, deallocates device memory.
195 * \tparam[in] T Raw data type
196 * \param[in] d_ptr Device pointer to the memory to be deallocated
197 * \param[in] texObj Texture object to be deinitialized
198 * \param[in] devInfo Pointer to the info struct of the device in use
200 template <typename T>
201 void destroyParamLookupTable(T *d_ptr,
202 cudaTextureObject_t texObj,
203 const gmx_device_info_t *devInfo);
205 /*! \brief Add a triplets stored in a float3 to an rvec variable.
207 * \param[out] a Rvec to increment
208 * \param[in] b Float triplet to increment with.
210 static inline void rvec_inc(rvec a, const float3 b)
212 rvec tmp = {b.x, b.y, b.z};
216 /*! \brief Wait for all taks in stream \p s to complete.
218 * \param[in] s stream to synchronize with
220 static inline void gpuStreamSynchronize(cudaStream_t s)
222 cudaError_t stat = cudaStreamSynchronize(s);
223 CU_RET_ERR(stat, "cudaStreamSynchronize failed");
226 /*! \brief Returns true if all tasks in \p s have completed.
228 * \param[in] s stream to check
230 * \returns True if all tasks enqueued in the stream \p s (at the time of this call) have completed.
232 static inline bool haveStreamTasksCompleted(cudaStream_t s)
234 cudaError_t stat = cudaStreamQuery(s);
236 if (stat == cudaErrorNotReady)
238 // work is still in progress in the stream
242 GMX_ASSERT(stat != cudaErrorInvalidResourceHandle, "Stream idnetifier not valid");
244 // cudaSuccess and cudaErrorNotReady are the expected return values
245 CU_RET_ERR(stat, "Unexpected cudaStreamQuery failure");
247 GMX_ASSERT(stat == cudaSuccess, "Values other than cudaSuccess should have been explicitly handled");
252 /* Kernel launch helpers */
255 * A function for setting up a single CUDA kernel argument.
256 * This is the tail of the compile-time recursive function below.
257 * It has to be seen by the compiler first.
259 * \tparam totalArgsCount Number of the kernel arguments
260 * \tparam KernelPtr Kernel function handle type
261 * \param[in] argIndex Index of the current argument
263 template <size_t totalArgsCount, typename KernelPtr>
264 void prepareGpuKernelArgument(KernelPtr /*kernel*/,
265 std::array<void *, totalArgsCount> */* kernelArgsPtr */,
266 size_t gmx_used_in_debug argIndex)
268 GMX_ASSERT(argIndex == totalArgsCount, "Tail expansion");
272 * Compile-time recursive function for setting up a single CUDA kernel argument.
273 * This function copies a kernel argument pointer \p argPtr into \p kernelArgsPtr,
274 * and calls itself on the next argument, eventually calling the tail function above.
276 * \tparam CurrentArg Type of the current argument
277 * \tparam RemainingArgs Types of remaining arguments after the current one
278 * \tparam totalArgsCount Number of the kernel arguments
279 * \tparam KernelPtr Kernel function handle type
280 * \param[in] kernel Kernel function handle
281 * \param[in,out] kernelArgsPtr Pointer to the argument array to be filled in
282 * \param[in] argIndex Index of the current argument
283 * \param[in] argPtr Pointer to the current argument
284 * \param[in] otherArgsPtrs Pack of pointers to arguments remaining to process after the current one
286 template <typename CurrentArg, typename ... RemainingArgs, size_t totalArgsCount, typename KernelPtr>
287 void prepareGpuKernelArgument(KernelPtr kernel,
288 std::array<void *, totalArgsCount> *kernelArgsPtr,
290 const CurrentArg *argPtr,
291 const RemainingArgs *... otherArgsPtrs)
293 (*kernelArgsPtr)[argIndex] = (void *)argPtr;
294 prepareGpuKernelArgument(kernel, kernelArgsPtr, argIndex + 1, otherArgsPtrs ...);
298 * A wrapper function for setting up all the CUDA kernel arguments.
299 * Calls the recursive functions above.
301 * \tparam Args Types of all the kernel arguments
302 * \param[in] kernel Kernel function handle
303 * \param[in] argsPtrs Pointers to all the kernel arguments
304 * \returns A prepared parameter pack to be used with launchGpuKernel() as the last argument.
306 template <typename ... Args>
307 std::array<void *, sizeof ... (Args)> prepareGpuKernelArguments(void (*kernel)(Args...),
308 const KernelLaunchConfig & /*config */,
309 const Args *... argsPtrs)
311 std::array<void *, sizeof ... (Args)> kernelArgs;
312 prepareGpuKernelArgument(kernel, &kernelArgs, 0, argsPtrs ...);
316 /*! \brief Launches the CUDA kernel and handles the errors.
318 * \tparam Args Types of all the kernel arguments
319 * \param[in] kernel Kernel function handle
320 * \param[in] config Kernel configuration for launching
321 * \param[in] kernelName Human readable kernel description, for error handling only
322 * \param[in] kernelArgs Array of the pointers to the kernel arguments, prepared by prepareGpuKernelArguments()
323 * \throws gmx::InternalError on kernel launch failure
325 template <typename... Args>
326 void launchGpuKernel(void (*kernel)(Args...),
327 const KernelLaunchConfig &config,
328 CommandEvent */*timingEvent */,
329 const char *kernelName,
330 const std::array<void *, sizeof ... (Args)> &kernelArgs)
332 dim3 blockSize(config.blockSize[0], config.blockSize[1], config.blockSize[2]);
333 dim3 gridSize(config.gridSize[0], config.gridSize[1], config.gridSize[2]);
334 cudaLaunchKernel((void *)kernel, gridSize, blockSize, const_cast<void **>(kernelArgs.data()), config.sharedMemorySize, config.stream);
336 cudaError_t status = cudaGetLastError();
337 if (cudaSuccess != status)
339 const std::string errorMessage = "GPU kernel (" + std::string(kernelName) +
340 ") failed to launch: " + std::string(cudaGetErrorString(status));
341 GMX_THROW(gmx::InternalError(errorMessage));