16bfac9aadf8fad87810782969f209c7f1cfde69
[alexxy/gromacs.git] / src / gromacs / ewald / pme-gpu-program-impl-ocl.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2018, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  * \brief
38  * Implements PmeGpuProgramImpl, which stores permanent PME GPU context-derived data,
39  * such as (compiled) kernel handles.
40  *
41  * \author Aleksei Iupinov <a.yupinov@gmail.com>
42  * \ingroup module_ewald
43  */
44 #include "gmxpre.h"
45
46 #include "gromacs/gpu_utils/gmxopencl.h"
47 #include "gromacs/gpu_utils/ocl_compiler.h"
48 #include "gromacs/utility/stringutil.h"
49
50 #include "pme-gpu-constants.h"
51 #include "pme-gpu-internal.h" // for GridOrdering enum
52 #include "pme-gpu-program-impl.h"
53 #include "pme-gpu-types-host.h"
54 #include "pme-grid.h"
55
56 PmeGpuProgramImpl::PmeGpuProgramImpl(const gmx_device_info_t *deviceInfo)
57 {
58     // Context creation (which should happen outside of this class: #2522)
59     cl_platform_id        platformId = deviceInfo->ocl_gpu_id.ocl_platform_id;
60     cl_device_id          deviceId   = deviceInfo->ocl_gpu_id.ocl_device_id;
61     cl_context_properties contextProperties[3];
62     contextProperties[0] = CL_CONTEXT_PLATFORM;
63     contextProperties[1] = reinterpret_cast<cl_context_properties>(platformId);
64     contextProperties[2] = 0; /* Terminates the list of properties */
65
66     cl_int  clError;
67     context = clCreateContext(contextProperties, 1, &deviceId, nullptr, nullptr, &clError);
68     if (clError != CL_SUCCESS)
69     {
70         const std::string errorString = gmx::formatString("Failed to create context for PME on GPU #%s:\n OpenCL error %d: %s",
71                                                           deviceInfo->device_name, clError, ocl_get_error_string(clError).c_str());
72         GMX_THROW(gmx::InternalError(errorString));
73     }
74
75     // kernel parameters
76     warpSize            = gmx::ocl::getDeviceWarpSize(context, deviceId);
77     // TODO: for Intel ideally we'd want to set these based on the compiler warp size
78     // but given that we've done no tuning for Intel iGPU, this is as good as anything.
79     spreadWorkGroupSize = std::min(c_spreadMaxWarpsPerBlock * warpSize,
80                                    deviceInfo->maxWorkGroupSize);
81     solveMaxWorkGroupSize = std::min(c_solveMaxWarpsPerBlock * warpSize,
82                                      deviceInfo->maxWorkGroupSize);
83     gatherWorkGroupSize = std::min(c_gatherMaxWarpsPerBlock * warpSize,
84                                    deviceInfo->maxWorkGroupSize);
85
86     compileKernels(deviceInfo);
87 }
88
89 PmeGpuProgramImpl::~PmeGpuProgramImpl()
90 {
91     // TODO: log releasing errors
92     cl_int gmx_used_in_debug stat = 0;
93     stat |= clReleaseKernel(splineAndSpreadKernel);
94     stat |= clReleaseKernel(splineKernel);
95     stat |= clReleaseKernel(spreadKernel);
96     stat |= clReleaseKernel(gatherKernel);
97     stat |= clReleaseKernel(gatherReduceWithInputKernel);
98     stat |= clReleaseKernel(solveXYZKernel);
99     stat |= clReleaseKernel(solveXYZEnergyKernel);
100     stat |= clReleaseKernel(solveYZXKernel);
101     stat |= clReleaseKernel(solveYZXEnergyKernel);
102     stat |= clReleaseContext(context);
103     GMX_ASSERT(stat == CL_SUCCESS, gmx::formatString("Failed to release PME OpenCL resources %d: %s",
104                                                      stat, ocl_get_error_string(stat).c_str()).c_str());
105 }
106
107 /*! \brief Ensure that spread/gather kernels have been compiled to a suitable warp size
108  *
109  * On Intel the exec width/warp is decided at compile-time and can be
110  * smaller than the minimum order^2 required in spread/gather ATM which
111  * we need to check for.
112  */
113 static void checkRequiredWarpSize(const cl_kernel          kernel,
114                                   const char*              kernelName,
115                                   const gmx_device_info_t *deviceInfo)
116 {
117     if (deviceInfo->vendor_e == OCL_VENDOR_INTEL)
118     {
119         size_t kernelWarpSize = gmx::ocl::getKernelWarpSize(kernel, deviceInfo->ocl_gpu_id.ocl_device_id);
120
121         if (kernelWarpSize < c_pmeSpreadGatherMinWarpSize)
122         {
123             const std::string errorString = gmx::formatString("PME OpenCL kernels require >=%d execution width, but the %s kernel "
124                                                               "has been compiled for the device %s to a %zu width and therefore it can not execute correctly.",
125                                                               c_pmeSpreadGatherMinWarpSize, kernelName,
126                                                               deviceInfo->device_name, kernelWarpSize);
127             GMX_THROW(gmx::InternalError(errorString));
128         }
129     }
130 }
131
132 void PmeGpuProgramImpl::compileKernels(const gmx_device_info_t *deviceInfo)
133 {
134     // We might consider storing program as a member variable if it's needed later
135     cl_program program = nullptr;
136     /* Need to catch std::bad_alloc here and during compilation string handling. */
137     try
138     {
139         /* Here we pass macros and static const int variables defined in include
140          * files outside as macros, to avoid including those files
141          * in the JIT compilation that happens at runtime.
142          */
143         const std::string commonDefines = gmx::formatString(
144                     "-Dwarp_size=%zd "
145                     "-Dorder=%d "
146                     "-DatomsPerWarp=%zd "
147                     "-DthreadsPerAtom=%d "
148                     // forwarding from pme-grid.h, used for spline computation table sizes only
149                     "-Dc_pmeMaxUnitcellShift=%f "
150                     // forwarding PME behavior constants from pme-gpu-constants.h
151                     "-Dc_usePadding=%d "
152                     "-Dc_skipNeutralAtoms=%d "
153                     "-Dc_virialAndEnergyCount=%d "
154                     // forwarding kernel work sizes
155                     "-Dc_spreadWorkGroupSize=%zd "
156                     "-Dc_solveMaxWorkGroupSize=%zd "
157                     "-Dc_gatherWorkGroupSize=%zd "
158                     // forwarding from vectypes.h
159                     "-DDIM=%d -DXX=%d -DYY=%d -DZZ=%d "
160                     // decomposition parameter placeholders
161                     "-DwrapX=true -DwrapY=true ",
162                     warpSize,
163                     c_pmeGpuOrder,
164                     warpSize / c_pmeSpreadGatherThreadsPerAtom,
165                     c_pmeSpreadGatherThreadsPerAtom,
166                     static_cast<float>(c_pmeMaxUnitcellShift),
167                     c_usePadding,
168                     c_skipNeutralAtoms,
169                     c_virialAndEnergyCount,
170                     spreadWorkGroupSize,
171                     solveMaxWorkGroupSize,
172                     gatherWorkGroupSize,
173                     DIM, XX, YY, ZZ);
174         try
175         {
176             /* TODO when we have a proper MPI-aware logging module,
177                the log output here should be written there */
178             program = gmx::ocl::compileProgram(stderr,
179                                                "gromacs/ewald",
180                                                "pme-program.cl",
181                                                commonDefines,
182                                                context,
183                                                deviceInfo->ocl_gpu_id.ocl_device_id,
184                                                deviceInfo->vendor_e);
185         }
186         catch (gmx::GromacsException &e)
187         {
188             e.prependContext(gmx::formatString("Failed to compile PME kernels for GPU #%s\n",
189                                                deviceInfo->device_name));
190             throw;
191         }
192     }
193     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
194
195     constexpr cl_uint      expectedKernelCount = 9;
196     // Has to be equal or larger than the number of kernel instances.
197     // If it is not, CL_INVALID_VALUE will be thrown.
198     std::vector<cl_kernel> kernels(expectedKernelCount, nullptr);
199     cl_uint                actualKernelCount = 0;
200     cl_int                 clError           = clCreateKernelsInProgram(program, kernels.size(), kernels.data(), &actualKernelCount);
201     if (clError != CL_SUCCESS)
202     {
203         const std::string errorString = gmx::formatString("Failed to create kernels for PME on GPU #%s:\n OpenCL error %d: %s",
204                                                           deviceInfo->device_name, clError, ocl_get_error_string(clError).c_str());
205         GMX_THROW(gmx::InternalError(errorString));
206     }
207     kernels.resize(actualKernelCount);
208
209     std::array<char, 100> kernelNamesBuffer;
210     for (const auto &kernel : kernels)
211     {
212         clError = clGetKernelInfo(kernel, CL_KERNEL_FUNCTION_NAME,
213                                   kernelNamesBuffer.size(), kernelNamesBuffer.data(), nullptr);
214         if (clError != CL_SUCCESS)
215         {
216             const std::string errorString = gmx::formatString("Failed to parse kernels for PME on GPU #%s:\n OpenCL error %d: %s",
217                                                               deviceInfo->device_name, clError, ocl_get_error_string(clError).c_str());
218             GMX_THROW(gmx::InternalError(errorString));
219         }
220
221         // The names below must correspond to those defined in pme-program.cl
222         // TODO use a map with string key instead?
223         if (!strcmp(kernelNamesBuffer.data(), "pmeSplineKernel"))
224         {
225             splineKernel = kernel;
226         }
227         else if (!strcmp(kernelNamesBuffer.data(), "pmeSplineAndSpreadKernel"))
228         {
229             splineAndSpreadKernel = kernel;
230             checkRequiredWarpSize(splineAndSpreadKernel, kernelNamesBuffer.data(), deviceInfo);
231         }
232         else if (!strcmp(kernelNamesBuffer.data(), "pmeSpreadKernel"))
233         {
234             spreadKernel = kernel;
235             checkRequiredWarpSize(spreadKernel, kernelNamesBuffer.data(), deviceInfo);
236         }
237         else if (!strcmp(kernelNamesBuffer.data(), "pmeGatherKernel"))
238         {
239             gatherKernel = kernel;
240             checkRequiredWarpSize(gatherKernel, kernelNamesBuffer.data(), deviceInfo);
241         }
242         else if (!strcmp(kernelNamesBuffer.data(), "pmeGatherReduceWithInputKernel"))
243         {
244             gatherReduceWithInputKernel = kernel;
245             checkRequiredWarpSize(gatherReduceWithInputKernel, kernelNamesBuffer.data(), deviceInfo);
246         }
247         else if (!strcmp(kernelNamesBuffer.data(), "pmeSolveYZXKernel"))
248         {
249             solveYZXKernel = kernel;
250         }
251         else if (!strcmp(kernelNamesBuffer.data(), "pmeSolveYZXEnergyKernel"))
252         {
253             solveYZXEnergyKernel = kernel;
254         }
255         else if (!strcmp(kernelNamesBuffer.data(), "pmeSolveXYZKernel"))
256         {
257             solveXYZKernel = kernel;
258         }
259         else if (!strcmp(kernelNamesBuffer.data(), "pmeSolveXYZEnergyKernel"))
260         {
261             solveXYZEnergyKernel = kernel;
262         }
263     }
264     clReleaseProgram(program);
265 }