2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * Implements PmeGpuProgramImpl, which stores permanent PME GPU context-derived data,
39 * such as (compiled) kernel handles.
41 * \author Aleksei Iupinov <a.yupinov@gmail.com>
42 * \ingroup module_ewald
46 #include "gromacs/gpu_utils/gmxopencl.h"
47 #include "gromacs/gpu_utils/ocl_compiler.h"
48 #include "gromacs/utility/stringutil.h"
50 #include "pme_gpu_constants.h"
51 #include "pme_gpu_internal.h" // for GridOrdering enum
52 #include "pme_gpu_program_impl.h"
53 #include "pme_gpu_types_host.h"
56 PmeGpuProgramImpl::PmeGpuProgramImpl(const gmx_device_info_t* deviceInfo)
58 // Context creation (which should happen outside of this class: #2522)
59 cl_platform_id platformId = deviceInfo->ocl_gpu_id.ocl_platform_id;
60 cl_device_id deviceId = deviceInfo->ocl_gpu_id.ocl_device_id;
61 cl_context_properties contextProperties[3];
62 contextProperties[0] = CL_CONTEXT_PLATFORM;
63 contextProperties[1] = reinterpret_cast<cl_context_properties>(platformId);
64 contextProperties[2] = 0; /* Terminates the list of properties */
67 context = clCreateContext(contextProperties, 1, &deviceId, nullptr, nullptr, &clError);
68 if (clError != CL_SUCCESS)
70 const std::string errorString = gmx::formatString(
71 "Failed to create context for PME on GPU #%s:\n OpenCL error %d: %s",
72 deviceInfo->device_name, clError, ocl_get_error_string(clError).c_str());
73 GMX_THROW(gmx::InternalError(errorString));
77 warpSize = gmx::ocl::getDeviceWarpSize(context, deviceId);
78 // TODO: for Intel ideally we'd want to set these based on the compiler warp size
79 // but given that we've done no tuning for Intel iGPU, this is as good as anything.
80 spreadWorkGroupSize = std::min(c_spreadMaxWarpsPerBlock * warpSize, deviceInfo->maxWorkGroupSize);
81 solveMaxWorkGroupSize = std::min(c_solveMaxWarpsPerBlock * warpSize, deviceInfo->maxWorkGroupSize);
82 gatherWorkGroupSize = std::min(c_gatherMaxWarpsPerBlock * warpSize, deviceInfo->maxWorkGroupSize);
84 compileKernels(deviceInfo);
87 PmeGpuProgramImpl::~PmeGpuProgramImpl()
89 // TODO: log releasing errors
90 cl_int gmx_used_in_debug stat = 0;
91 stat |= clReleaseKernel(splineAndSpreadKernel);
92 stat |= clReleaseKernel(splineKernel);
93 stat |= clReleaseKernel(spreadKernel);
94 stat |= clReleaseKernel(gatherKernel);
95 stat |= clReleaseKernel(gatherReduceWithInputKernel);
96 stat |= clReleaseKernel(solveXYZKernel);
97 stat |= clReleaseKernel(solveXYZEnergyKernel);
98 stat |= clReleaseKernel(solveYZXKernel);
99 stat |= clReleaseKernel(solveYZXEnergyKernel);
100 stat |= clReleaseContext(context);
101 GMX_ASSERT(stat == CL_SUCCESS,
102 gmx::formatString("Failed to release PME OpenCL resources %d: %s", stat,
103 ocl_get_error_string(stat).c_str())
107 /*! \brief Ensure that spread/gather kernels have been compiled to a suitable warp size
109 * On Intel the exec width/warp is decided at compile-time and can be
110 * smaller than the minimum order^2 required in spread/gather ATM which
111 * we need to check for.
113 static void checkRequiredWarpSize(cl_kernel kernel, const char* kernelName, const gmx_device_info_t* deviceInfo)
115 if (deviceInfo->vendor_e == OCL_VENDOR_INTEL)
117 size_t kernelWarpSize = gmx::ocl::getKernelWarpSize(kernel, deviceInfo->ocl_gpu_id.ocl_device_id);
119 if (kernelWarpSize < c_pmeSpreadGatherMinWarpSize)
121 const std::string errorString = gmx::formatString(
122 "PME OpenCL kernels require >=%d execution width, but the %s kernel "
123 "has been compiled for the device %s to a %zu width and therefore it can not "
124 "execute correctly.",
125 c_pmeSpreadGatherMinWarpSize, kernelName, deviceInfo->device_name, kernelWarpSize);
126 GMX_THROW(gmx::InternalError(errorString));
131 void PmeGpuProgramImpl::compileKernels(const gmx_device_info_t* deviceInfo)
133 // We might consider storing program as a member variable if it's needed later
134 cl_program program = nullptr;
135 /* Need to catch std::bad_alloc here and during compilation string handling. */
138 /* Here we pass macros and static const int variables defined in include
139 * files outside as macros, to avoid including those files
140 * in the JIT compilation that happens at runtime.
142 const std::string commonDefines = gmx::formatString(
145 "-DatomsPerWarp=%zd "
146 "-DthreadsPerAtom=%d "
147 // forwarding from pme_grid.h, used for spline computation table sizes only
148 "-Dc_pmeMaxUnitcellShift=%f "
149 // forwarding PME behavior constants from pme_gpu_constants.h
151 "-Dc_skipNeutralAtoms=%d "
152 "-Dc_virialAndEnergyCount=%d "
153 // forwarding kernel work sizes
154 "-Dc_spreadWorkGroupSize=%zd "
155 "-Dc_solveMaxWorkGroupSize=%zd "
156 "-Dc_gatherWorkGroupSize=%zd "
157 // forwarding from vectypes.h
158 "-DDIM=%d -DXX=%d -DYY=%d -DZZ=%d "
159 // decomposition parameter placeholders
160 "-DwrapX=true -DwrapY=true ",
161 warpSize, c_pmeGpuOrder, warpSize / c_pmeSpreadGatherThreadsPerAtom,
162 c_pmeSpreadGatherThreadsPerAtom, static_cast<float>(c_pmeMaxUnitcellShift),
163 static_cast<int>(c_usePadding), static_cast<int>(c_skipNeutralAtoms), c_virialAndEnergyCount,
164 spreadWorkGroupSize, solveMaxWorkGroupSize, gatherWorkGroupSize, DIM, XX, YY, ZZ);
167 /* TODO when we have a proper MPI-aware logging module,
168 the log output here should be written there */
169 program = gmx::ocl::compileProgram(stderr, "gromacs/ewald", "pme_program.cl", commonDefines,
170 context, deviceInfo->ocl_gpu_id.ocl_device_id,
171 deviceInfo->vendor_e);
173 catch (gmx::GromacsException& e)
175 e.prependContext(gmx::formatString("Failed to compile PME kernels for GPU #%s\n",
176 deviceInfo->device_name));
180 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
182 constexpr cl_uint expectedKernelCount = 9;
183 // Has to be equal or larger than the number of kernel instances.
184 // If it is not, CL_INVALID_VALUE will be thrown.
185 std::vector<cl_kernel> kernels(expectedKernelCount, nullptr);
186 cl_uint actualKernelCount = 0;
187 cl_int clError = clCreateKernelsInProgram(program, kernels.size(), kernels.data(), &actualKernelCount);
188 if (clError != CL_SUCCESS)
190 const std::string errorString = gmx::formatString(
191 "Failed to create kernels for PME on GPU #%s:\n OpenCL error %d: %s",
192 deviceInfo->device_name, clError, ocl_get_error_string(clError).c_str());
193 GMX_THROW(gmx::InternalError(errorString));
195 kernels.resize(actualKernelCount);
197 std::array<char, 100> kernelNamesBuffer;
198 for (const auto& kernel : kernels)
200 clError = clGetKernelInfo(kernel, CL_KERNEL_FUNCTION_NAME, kernelNamesBuffer.size(),
201 kernelNamesBuffer.data(), nullptr);
202 if (clError != CL_SUCCESS)
204 const std::string errorString = gmx::formatString(
205 "Failed to parse kernels for PME on GPU #%s:\n OpenCL error %d: %s",
206 deviceInfo->device_name, clError, ocl_get_error_string(clError).c_str());
207 GMX_THROW(gmx::InternalError(errorString));
210 // The names below must correspond to those defined in pme_program.cl
211 // TODO use a map with string key instead?
212 if (!strcmp(kernelNamesBuffer.data(), "pmeSplineKernel"))
214 splineKernel = kernel;
216 else if (!strcmp(kernelNamesBuffer.data(), "pmeSplineAndSpreadKernel"))
218 splineAndSpreadKernel = kernel;
219 splineAndSpreadKernelWriteSplines = kernel;
220 checkRequiredWarpSize(splineAndSpreadKernel, kernelNamesBuffer.data(), deviceInfo);
222 else if (!strcmp(kernelNamesBuffer.data(), "pmeSpreadKernel"))
224 spreadKernel = kernel;
225 checkRequiredWarpSize(spreadKernel, kernelNamesBuffer.data(), deviceInfo);
227 else if (!strcmp(kernelNamesBuffer.data(), "pmeGatherKernel"))
229 gatherKernel = kernel;
230 gatherKernelReadSplines = kernel;
231 checkRequiredWarpSize(gatherKernel, kernelNamesBuffer.data(), deviceInfo);
233 else if (!strcmp(kernelNamesBuffer.data(), "pmeGatherReduceWithInputKernel"))
235 gatherReduceWithInputKernel = kernel;
236 gatherReduceWithInputKernelReadSplines = kernel;
237 checkRequiredWarpSize(gatherReduceWithInputKernel, kernelNamesBuffer.data(), deviceInfo);
239 else if (!strcmp(kernelNamesBuffer.data(), "pmeSolveYZXKernel"))
241 solveYZXKernel = kernel;
243 else if (!strcmp(kernelNamesBuffer.data(), "pmeSolveYZXEnergyKernel"))
245 solveYZXEnergyKernel = kernel;
247 else if (!strcmp(kernelNamesBuffer.data(), "pmeSolveXYZKernel"))
249 solveXYZKernel = kernel;
251 else if (!strcmp(kernelNamesBuffer.data(), "pmeSolveXYZEnergyKernel"))
253 solveXYZEnergyKernel = kernel;
256 clReleaseProgram(program);