2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2014,2015,2016,2017 by the GROMACS development team.
5 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
36 #ifndef GMX_GPU_UTILS_CUDAUTILS_CUH
37 #define GMX_GPU_UTILS_CUDAUTILS_CUH
44 #include "gromacs/gpu_utils/device_stream.h"
45 #include "gromacs/gpu_utils/gputraits.cuh"
46 #include "gromacs/math/vec.h"
47 #include "gromacs/math/vectypes.h"
48 #include "gromacs/utility/exceptions.h"
49 #include "gromacs/utility/fatalerror.h"
50 #include "gromacs/utility/gmxassert.h"
51 #include "gromacs/utility/stringutil.h"
58 /*! \brief Add the API information on the specific error to the error message.
60 * \param[in] deviceError The error to assert cudaSuccess on.
62 * \returns A description of the API error. Returns '(CUDA error #0 (cudaSuccess): no error)' in case deviceError is cudaSuccess.
64 static inline std::string getDeviceErrorString(const cudaError_t deviceError)
66 return formatString("CUDA error #%d (%s): %s.", deviceError, cudaGetErrorName(deviceError),
67 cudaGetErrorString(deviceError));
70 /*! \brief Check if API returned an error and throw an exception with information on it.
72 * \param[in] deviceError The error to assert cudaSuccess on.
73 * \param[in] errorMessage Undecorated error message.
75 * \throws InternalError if deviceError is not a success.
77 static inline void checkDeviceError(const cudaError_t deviceError, const std::string& errorMessage)
79 if (deviceError != cudaSuccess)
81 GMX_THROW(gmx::InternalError(errorMessage + " " + getDeviceErrorString(deviceError)));
85 /*! \brief Helper function to ensure no pending error silently
86 * disrupts error handling.
88 * Asserts in a debug build if an unhandled error is present. Issues a
89 * warning at run time otherwise.
91 * \param[in] errorMessage Undecorated error message.
93 static inline void ensureNoPendingDeviceError(const std::string& errorMessage)
95 // Ensure there is no pending error that would otherwise affect
96 // the behaviour of future error handling.
97 cudaError_t deviceError = cudaGetLastError();
98 if (deviceError == cudaSuccess)
103 // If we would find an error in a release build, we do not know
104 // what is appropriate to do about it, so assert only for debug
106 const std::string fullErrorMessage =
107 errorMessage + " An unhandled error from a previous CUDA operation was detected. "
108 + gmx::getDeviceErrorString(deviceError);
109 GMX_ASSERT(deviceError == cudaSuccess, fullErrorMessage.c_str());
110 // TODO When we evolve a better logging framework, use that
111 // for release-build error reporting.
112 gmx_warning("%s", fullErrorMessage.c_str());
118 enum class GpuApiCallBehavior;
120 /* TODO error checking needs to be rewritten. We have 2 types of error checks needed
121 based on where they occur in the code:
122 - non performance-critical: these errors are unsafe to be ignored and must be
123 _always_ checked for, e.g. initializations
124 - performance critical: handling errors might hurt performance so care need to be taken
125 when/if we should check for them at all, e.g. in cu_upload_X. However, we should be
126 able to turn the check for these errors on!
128 Probably we'll need two sets of the macros below...
131 #define CHECK_CUDA_ERRORS
133 #ifdef CHECK_CUDA_ERRORS
135 /*! Check for CUDA error on the return status of a CUDA RT API call. */
136 # define CU_RET_ERR(deviceError, msg) \
139 if (deviceError != cudaSuccess) \
141 gmx_fatal(FARGS, "%s\n", (msg + gmx::getDeviceErrorString(deviceError)).c_str()); \
145 #else /* CHECK_CUDA_ERRORS */
147 # define CU_RET_ERR(status, msg) \
152 #endif /* CHECK_CUDA_ERRORS */
154 // TODO: the 2 functions below are pretty much a constructor/destructor of a simple
155 // GPU table object. There is also almost self-contained fetchFromParamLookupTable()
156 // in cuda_kernel_utils.cuh. They could all live in a separate class/struct file.
158 /*! \brief Add a triplets stored in a float3 to an rvec variable.
160 * \param[out] a Rvec to increment
161 * \param[in] b Float triplet to increment with.
163 static inline void rvec_inc(rvec a, const float3 b)
165 rvec tmp = { b.x, b.y, b.z };
169 /*! \brief Returns true if all tasks in \p s have completed.
171 * \param[in] deviceStream CUDA stream to check.
173 * \returns True if all tasks enqueued in the stream \p deviceStream (at the time of this call) have completed.
175 static inline bool haveStreamTasksCompleted(const DeviceStream& deviceStream)
177 cudaError_t stat = cudaStreamQuery(deviceStream.stream());
179 if (stat == cudaErrorNotReady)
181 // work is still in progress in the stream
185 GMX_ASSERT(stat != cudaErrorInvalidResourceHandle,
186 ("Stream identifier not valid. " + gmx::getDeviceErrorString(stat)).c_str());
188 // cudaSuccess and cudaErrorNotReady are the expected return values
189 CU_RET_ERR(stat, "Unexpected cudaStreamQuery failure. ");
191 GMX_ASSERT(stat == cudaSuccess,
192 ("Values other than cudaSuccess should have been explicitly handled. "
193 + gmx::getDeviceErrorString(stat))
199 /* Kernel launch helpers */
202 * A function for setting up a single CUDA kernel argument.
203 * This is the tail of the compile-time recursive function below.
204 * It has to be seen by the compiler first.
206 * \tparam totalArgsCount Number of the kernel arguments
207 * \tparam KernelPtr Kernel function handle type
208 * \param[in] argIndex Index of the current argument
210 template<size_t totalArgsCount, typename KernelPtr>
211 void prepareGpuKernelArgument(KernelPtr /*kernel*/,
212 std::array<void*, totalArgsCount>* /* kernelArgsPtr */,
213 size_t gmx_used_in_debug argIndex)
215 GMX_ASSERT(argIndex == totalArgsCount, "Tail expansion");
219 * Compile-time recursive function for setting up a single CUDA kernel argument.
220 * This function copies a kernel argument pointer \p argPtr into \p kernelArgsPtr,
221 * and calls itself on the next argument, eventually calling the tail function above.
223 * \tparam CurrentArg Type of the current argument
224 * \tparam RemainingArgs Types of remaining arguments after the current one
225 * \tparam totalArgsCount Number of the kernel arguments
226 * \tparam KernelPtr Kernel function handle type
227 * \param[in] kernel Kernel function handle
228 * \param[in,out] kernelArgsPtr Pointer to the argument array to be filled in
229 * \param[in] argIndex Index of the current argument
230 * \param[in] argPtr Pointer to the current argument
231 * \param[in] otherArgsPtrs Pack of pointers to arguments remaining to process after the current one
233 template<typename CurrentArg, typename... RemainingArgs, size_t totalArgsCount, typename KernelPtr>
234 void prepareGpuKernelArgument(KernelPtr kernel,
235 std::array<void*, totalArgsCount>* kernelArgsPtr,
237 const CurrentArg* argPtr,
238 const RemainingArgs*... otherArgsPtrs)
240 (*kernelArgsPtr)[argIndex] = (void*)argPtr;
241 prepareGpuKernelArgument(kernel, kernelArgsPtr, argIndex + 1, otherArgsPtrs...);
245 * A wrapper function for setting up all the CUDA kernel arguments.
246 * Calls the recursive functions above.
248 * \tparam KernelPtr Kernel function handle type
249 * \tparam Args Types of all the kernel arguments
250 * \param[in] kernel Kernel function handle
251 * \param[in] argsPtrs Pointers to all the kernel arguments
252 * \returns A prepared parameter pack to be used with launchGpuKernel() as the last argument.
254 template<typename KernelPtr, typename... Args>
255 std::array<void*, sizeof...(Args)> prepareGpuKernelArguments(KernelPtr kernel,
256 const KernelLaunchConfig& /*config */,
257 const Args*... argsPtrs)
259 std::array<void*, sizeof...(Args)> kernelArgs;
260 prepareGpuKernelArgument(kernel, &kernelArgs, 0, argsPtrs...);
264 /*! \brief Launches the CUDA kernel and handles the errors.
266 * \tparam Args Types of all the kernel arguments
267 * \param[in] kernel Kernel function handle
268 * \param[in] config Kernel configuration for launching
269 * \param[in] deviceStream GPU stream to launch kernel in
270 * \param[in] kernelName Human readable kernel description, for error handling only
271 * \param[in] kernelArgs Array of the pointers to the kernel arguments, prepared by
272 * prepareGpuKernelArguments() \throws gmx::InternalError on kernel launch failure
274 template<typename... Args>
275 void launchGpuKernel(void (*kernel)(Args...),
276 const KernelLaunchConfig& config,
277 const DeviceStream& deviceStream,
278 CommandEvent* /*timingEvent */,
279 const char* kernelName,
280 const std::array<void*, sizeof...(Args)>& kernelArgs)
282 dim3 blockSize(config.blockSize[0], config.blockSize[1], config.blockSize[2]);
283 dim3 gridSize(config.gridSize[0], config.gridSize[1], config.gridSize[2]);
284 cudaLaunchKernel((void*)kernel, gridSize, blockSize, const_cast<void**>(kernelArgs.data()),
285 config.sharedMemorySize, deviceStream.stream());
287 gmx::ensureNoPendingDeviceError("GPU kernel (" + std::string(kernelName)
288 + ") failed to launch.");