2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * Implements PmeGpuProgramImpl, which stores permanent PME GPU context-derived data,
39 * such as (compiled) kernel handles.
41 * \author Aleksei Iupinov <a.yupinov@gmail.com>
42 * \ingroup module_ewald
46 #include "gromacs/gpu_utils/gmxopencl.h"
47 #include "gromacs/gpu_utils/ocl_compiler.h"
48 #include "gromacs/utility/stringutil.h"
50 #include "pme_gpu_constants.h"
51 #include "pme_gpu_internal.h" // for GridOrdering enum
52 #include "pme_gpu_program_impl.h"
53 #include "pme_gpu_types_host.h"
56 PmeGpuProgramImpl::PmeGpuProgramImpl(const DeviceInformation* deviceInfo)
58 // Context creation (which should happen outside of this class: #2522)
59 cl_platform_id platformId = deviceInfo->oclPlatformId;
60 cl_device_id deviceId = deviceInfo->oclDeviceId;
61 cl_context_properties contextProperties[3];
62 contextProperties[0] = CL_CONTEXT_PLATFORM;
63 contextProperties[1] = reinterpret_cast<cl_context_properties>(platformId);
64 contextProperties[2] = 0; /* Terminates the list of properties */
67 deviceContext_.setContext(clCreateContext(contextProperties, 1, &deviceId, nullptr, nullptr, &clError));
68 if (clError != CL_SUCCESS)
70 const std::string errorString = gmx::formatString(
71 "Failed to create context for PME on GPU #%s:\n OpenCL error %d: %s",
72 deviceInfo->device_name, clError, ocl_get_error_string(clError).c_str());
73 GMX_THROW(gmx::InternalError(errorString));
77 warpSize = gmx::ocl::getDeviceWarpSize(deviceContext_.context(), deviceId);
78 // TODO: for Intel ideally we'd want to set these based on the compiler warp size
79 // but given that we've done no tuning for Intel iGPU, this is as good as anything.
80 spreadWorkGroupSize = std::min(c_spreadMaxWarpsPerBlock * warpSize, deviceInfo->maxWorkGroupSize);
81 solveMaxWorkGroupSize = std::min(c_solveMaxWarpsPerBlock * warpSize, deviceInfo->maxWorkGroupSize);
82 gatherWorkGroupSize = std::min(c_gatherMaxWarpsPerBlock * warpSize, deviceInfo->maxWorkGroupSize);
84 compileKernels(deviceInfo);
87 PmeGpuProgramImpl::~PmeGpuProgramImpl()
89 // TODO: log releasing errors
90 cl_int gmx_used_in_debug stat = 0;
91 stat |= clReleaseKernel(splineAndSpreadKernel);
92 stat |= clReleaseKernel(splineKernel);
93 stat |= clReleaseKernel(spreadKernel);
94 stat |= clReleaseKernel(gatherKernel);
95 stat |= clReleaseKernel(solveXYZKernel);
96 stat |= clReleaseKernel(solveXYZEnergyKernel);
97 stat |= clReleaseKernel(solveYZXKernel);
98 stat |= clReleaseKernel(solveYZXEnergyKernel);
99 GMX_ASSERT(stat == CL_SUCCESS,
100 gmx::formatString("Failed to release PME OpenCL resources %d: %s", stat,
101 ocl_get_error_string(stat).c_str())
105 /*! \brief Ensure that spread/gather kernels have been compiled to a suitable warp size
107 * On Intel the exec width/warp is decided at compile-time and can be
108 * smaller than the minimum order^2 required in spread/gather ATM which
109 * we need to check for.
111 static void checkRequiredWarpSize(cl_kernel kernel, const char* kernelName, const DeviceInformation* deviceInfo)
113 if (deviceInfo->deviceVendor == DeviceVendor::Intel)
115 size_t kernelWarpSize = gmx::ocl::getKernelWarpSize(kernel, deviceInfo->oclDeviceId);
117 if (kernelWarpSize < c_pmeSpreadGatherMinWarpSize)
119 const std::string errorString = gmx::formatString(
120 "PME OpenCL kernels require >=%d execution width, but the %s kernel "
121 "has been compiled for the device %s to a %zu width and therefore it can not "
122 "execute correctly.",
123 c_pmeSpreadGatherMinWarpSize, kernelName, deviceInfo->device_name, kernelWarpSize);
124 GMX_THROW(gmx::InternalError(errorString));
129 void PmeGpuProgramImpl::compileKernels(const DeviceInformation* deviceInfo)
131 // We might consider storing program as a member variable if it's needed later
132 cl_program program = nullptr;
133 /* Need to catch std::bad_alloc here and during compilation string handling. */
136 /* Here we pass macros and static const int variables defined in include
137 * files outside as macros, to avoid including those files
138 * in the JIT compilation that happens at runtime.
140 const std::string commonDefines = gmx::formatString(
143 "-DthreadsPerAtom=%d "
144 // forwarding from pme_grid.h, used for spline computation table sizes only
145 "-Dc_pmeMaxUnitcellShift=%f "
146 // forwarding PME behavior constants from pme_gpu_constants.h
148 "-Dc_skipNeutralAtoms=%d "
149 "-Dc_virialAndEnergyCount=%d "
150 // forwarding kernel work sizes
151 "-Dc_spreadWorkGroupSize=%zd "
152 "-Dc_solveMaxWorkGroupSize=%zd "
153 "-Dc_gatherWorkGroupSize=%zd "
154 // forwarding from vectypes.h
155 "-DDIM=%d -DXX=%d -DYY=%d -DZZ=%d "
156 // decomposition parameter placeholders
157 "-DwrapX=true -DwrapY=true ",
158 warpSize, c_pmeGpuOrder, c_pmeSpreadGatherThreadsPerAtom,
159 static_cast<float>(c_pmeMaxUnitcellShift), static_cast<int>(c_usePadding),
160 static_cast<int>(c_skipNeutralAtoms), c_virialAndEnergyCount, spreadWorkGroupSize,
161 solveMaxWorkGroupSize, gatherWorkGroupSize, DIM, XX, YY, ZZ);
164 /* TODO when we have a proper MPI-aware logging module,
165 the log output here should be written there */
166 program = gmx::ocl::compileProgram(stderr, "gromacs/ewald", "pme_program.cl",
167 commonDefines, deviceContext_.context(),
168 deviceInfo->oclDeviceId, deviceInfo->deviceVendor);
170 catch (gmx::GromacsException& e)
172 e.prependContext(gmx::formatString("Failed to compile PME kernels for GPU #%s\n",
173 deviceInfo->device_name));
177 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
179 constexpr cl_uint expectedKernelCount = 9;
180 // Has to be equal or larger than the number of kernel instances.
181 // If it is not, CL_INVALID_VALUE will be thrown.
182 std::vector<cl_kernel> kernels(expectedKernelCount, nullptr);
183 cl_uint actualKernelCount = 0;
184 cl_int clError = clCreateKernelsInProgram(program, kernels.size(), kernels.data(), &actualKernelCount);
185 if (clError != CL_SUCCESS)
187 const std::string errorString = gmx::formatString(
188 "Failed to create kernels for PME on GPU #%s:\n OpenCL error %d: %s",
189 deviceInfo->device_name, clError, ocl_get_error_string(clError).c_str());
190 GMX_THROW(gmx::InternalError(errorString));
192 kernels.resize(actualKernelCount);
194 std::array<char, 100> kernelNamesBuffer;
195 for (const auto& kernel : kernels)
197 clError = clGetKernelInfo(kernel, CL_KERNEL_FUNCTION_NAME, kernelNamesBuffer.size(),
198 kernelNamesBuffer.data(), nullptr);
199 if (clError != CL_SUCCESS)
201 const std::string errorString = gmx::formatString(
202 "Failed to parse kernels for PME on GPU #%s:\n OpenCL error %d: %s",
203 deviceInfo->device_name, clError, ocl_get_error_string(clError).c_str());
204 GMX_THROW(gmx::InternalError(errorString));
207 // The names below must correspond to those defined in pme_program.cl
208 // TODO use a map with string key instead?
209 if (!strcmp(kernelNamesBuffer.data(), "pmeSplineKernel"))
211 splineKernel = kernel;
213 else if (!strcmp(kernelNamesBuffer.data(), "pmeSplineAndSpreadKernel"))
215 splineAndSpreadKernel = kernel;
216 splineAndSpreadKernelWriteSplines = kernel;
217 checkRequiredWarpSize(splineAndSpreadKernel, kernelNamesBuffer.data(), deviceInfo);
219 else if (!strcmp(kernelNamesBuffer.data(), "pmeSpreadKernel"))
221 spreadKernel = kernel;
222 checkRequiredWarpSize(spreadKernel, kernelNamesBuffer.data(), deviceInfo);
224 else if (!strcmp(kernelNamesBuffer.data(), "pmeGatherKernel"))
226 gatherKernel = kernel;
227 gatherKernelReadSplines = kernel;
228 checkRequiredWarpSize(gatherKernel, kernelNamesBuffer.data(), deviceInfo);
230 else if (!strcmp(kernelNamesBuffer.data(), "pmeSolveYZXKernel"))
232 solveYZXKernel = kernel;
234 else if (!strcmp(kernelNamesBuffer.data(), "pmeSolveYZXEnergyKernel"))
236 solveYZXEnergyKernel = kernel;
238 else if (!strcmp(kernelNamesBuffer.data(), "pmeSolveXYZKernel"))
240 solveXYZKernel = kernel;
242 else if (!strcmp(kernelNamesBuffer.data(), "pmeSolveXYZEnergyKernel"))
244 solveXYZEnergyKernel = kernel;
247 clReleaseProgram(program);