* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 2012,2014,2015,2016,2017 by the GROMACS development team.
- * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include <array>
#include <string>
+#include <type_traits>
#include "gromacs/gpu_utils/device_stream.h"
#include "gromacs/gpu_utils/gputraits.cuh"
*
* \returns A description of the API error. Returns '(CUDA error #0 (cudaSuccess): no error)' in case deviceError is cudaSuccess.
*/
-static inline std::string getDeviceErrorString(const cudaError_t deviceError)
+inline std::string getDeviceErrorString(const cudaError_t deviceError)
{
- return formatString("CUDA error #%d (%s): %s.", deviceError, cudaGetErrorName(deviceError),
+ return formatString("CUDA error #%d (%s): %s.",
+ deviceError,
+ cudaGetErrorName(deviceError),
cudaGetErrorString(deviceError));
}
*
* \throws InternalError if deviceError is not a success.
*/
-static inline void checkDeviceError(const cudaError_t deviceError, const std::string& errorMessage)
+inline void checkDeviceError(const cudaError_t deviceError, const std::string& errorMessage)
{
if (deviceError != cudaSuccess)
{
*
* \param[in] errorMessage Undecorated error message.
*/
-static inline void ensureNoPendingDeviceError(const std::string& errorMessage)
+inline void ensureNoPendingDeviceError(const std::string& errorMessage)
{
// Ensure there is no pending error that would otherwise affect
// the behaviour of future error handling.
#ifdef CHECK_CUDA_ERRORS
/*! Check for CUDA error on the return status of a CUDA RT API call. */
-# define CU_RET_ERR(deviceError, msg) \
- do \
- { \
- if (deviceError != cudaSuccess) \
- { \
- gmx_fatal(FARGS, "%s\n", (msg + gmx::getDeviceErrorString(deviceError)).c_str()); \
- } \
+# define CU_RET_ERR(deviceError, msg) \
+ do \
+ { \
+ if ((deviceError) != cudaSuccess) \
+ { \
+ gmx_fatal(FARGS, "%s\n", ((msg) + gmx::getDeviceErrorString(deviceError)).c_str()); \
+ } \
} while (0)
#else /* CHECK_CUDA_ERRORS */
const CurrentArg* argPtr,
const RemainingArgs*... otherArgsPtrs)
{
- (*kernelArgsPtr)[argIndex] = (void*)argPtr;
+ (*kernelArgsPtr)[argIndex] = const_cast<void*>(static_cast<const void*>(argPtr));
prepareGpuKernelArgument(kernel, kernelArgsPtr, argIndex + 1, otherArgsPtrs...);
}
* \param[in] deviceStream GPU stream to launch kernel in
* \param[in] kernelName Human readable kernel description, for error handling only
* \param[in] kernelArgs Array of the pointers to the kernel arguments, prepared by
- * prepareGpuKernelArguments() \throws gmx::InternalError on kernel launch failure
+ * prepareGpuKernelArguments()
+ * \throws gmx::InternalError on kernel launch failure
*/
template<typename... Args>
void launchGpuKernel(void (*kernel)(Args...),
{
dim3 blockSize(config.blockSize[0], config.blockSize[1], config.blockSize[2]);
dim3 gridSize(config.gridSize[0], config.gridSize[1], config.gridSize[2]);
- cudaLaunchKernel((void*)kernel, gridSize, blockSize, const_cast<void**>(kernelArgs.data()),
- config.sharedMemorySize, deviceStream.stream());
+ cudaLaunchKernel(reinterpret_cast<void*>(kernel),
+ gridSize,
+ blockSize,
+ const_cast<void**>(kernelArgs.data()),
+ config.sharedMemorySize,
+ deviceStream.stream());
gmx::ensureNoPendingDeviceError("GPU kernel (" + std::string(kernelName)
+ ") failed to launch.");