2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements PME OpenCL solving kernel.
38 * When including this and other PME OpenCL kernel files, plenty of common
39 * constants/macros are expected to be defined.
40 * For details, please see how pme_program.cl is compiled in pme_gpu_program_impl_ocl.cpp.
42 * This file's solving kernel specifically expects the following definitions for its flavors:
44 * - gridOrdering must be defined to either XYZ or YZX
45 * and corresponds to the dimension order of the grid (GridOrdering enum in CUDA kernels);
46 * - computeEnergyAndVirial must evaluate to true or false, and expresses
47 * whether the reduction is performed.
49 * \author Aleksei Iupinov <a.yupinov@gmail.com>
52 #include "gromacs/gpu_utils/vectype_ops.clh"
54 #include "pme_gpu_types.h"
57 * PME complex grid solver kernel function.
58 * Please see the file description for additional defines which this kernel expects.
60 * \param[in] kernelParams Input PME GPU data in constant memory.
61 * \param[in] gm_splineModuli B-Spline moduli.
62 * \param[out] gm_virialAndEnergy Reduced virial and enrgy (only with computeEnergyAndVirial ==
63 * true) \param[in,out] gm_grid Fourier grid to transform.
65 __attribute__((work_group_size_hint(c_solveMaxWorkGroupSize, 1, 1)))
66 __kernel void CUSTOMIZED_KERNEL_NAME(pme_solve_kernel)(const struct PmeOpenCLKernelParams kernelParams,
67 __global const float* __restrict__ gm_splineModuli,
68 __global float* __restrict__ gm_virialAndEnergy,
69 __global float2* __restrict__ gm_grid)
71 /* This kernel supports 2 different grid dimension orderings: YZX and XYZ */
75 if (gridOrdering == YZX)
81 if (gridOrdering == XYZ)
88 __global const float* __restrict__ gm_splineValueMajor =
89 gm_splineModuli + kernelParams.grid.splineValuesOffset[majorDim];
90 __global const float* __restrict__ gm_splineValueMiddle =
91 gm_splineModuli + kernelParams.grid.splineValuesOffset[middleDim];
92 __global const float* __restrict__ gm_splineValueMinor =
93 gm_splineModuli + kernelParams.grid.splineValuesOffset[minorDim];
95 /* Various grid sizes and indices */
96 const int localOffsetMinor = 0; // unused
97 const int localOffsetMajor = 0; // unused
98 const int localOffsetMiddle = 0; // unused
99 const int localSizeMinor = kernelParams.grid.complexGridSizePadded[minorDim];
100 const int localSizeMiddle = kernelParams.grid.complexGridSizePadded[middleDim];
101 const int localCountMiddle = kernelParams.grid.complexGridSize[middleDim];
102 const int localCountMinor = kernelParams.grid.complexGridSize[minorDim];
103 const int nMajor = kernelParams.grid.realGridSize[majorDim];
104 const int nMiddle = kernelParams.grid.realGridSize[middleDim];
105 const int nMinor = kernelParams.grid.realGridSize[minorDim];
106 const int maxkMajor = (nMajor + 1) / 2; // X or Y
107 const int maxkMiddle = (nMiddle + 1) / 2; // Y OR Z => only check for !YZX
108 const int maxkMinor = (nMinor + 1) / 2; // Z or X => only check for YZX
110 /* Each thread works on one cell of the Fourier space complex 3D grid (gm_grid).
111 * Each block handles up to c_solveMaxWorkGroupSize cells -
112 * depending on the grid contiguous dimension size,
113 * that can range from a part of a single gridline to several complete gridlines.
115 const int threadLocalId = get_local_id(XX);
116 const int gridLineSize = localCountMinor;
117 const int gridLineIndex = threadLocalId / gridLineSize;
118 const int gridLineCellIndex = threadLocalId - gridLineSize * gridLineIndex;
119 const int gridLinesPerBlock = max((int)(get_local_size(XX)) / gridLineSize, 1);
120 const int activeWarps = ((int)get_local_size(XX) / warp_size);
121 assert((get_group_id(XX) * get_local_size(XX)) < MAX_INT);
122 const int indexMinor = (int)get_group_id(XX) * (int)get_local_size(XX) + gridLineCellIndex;
123 const int indexMiddle = (int)get_group_id(YY) * gridLinesPerBlock + gridLineIndex;
124 const int indexMajor = (int)get_group_id(ZZ);
126 /* Optional outputs */
135 assert(indexMajor < kernelParams.grid.complexGridSize[majorDim]);
136 if ((indexMiddle < localCountMiddle) & (indexMinor < localCountMinor)
137 & (gridLineIndex < gridLinesPerBlock))
139 /* The offset should be equal to the global thread index for coalesced access */
140 const int gridIndex = (indexMajor * localSizeMiddle + indexMiddle) * localSizeMinor + indexMinor;
141 __global float2* __restrict__ gm_gridCell = gm_grid + gridIndex;
143 const int kMajor = indexMajor + localOffsetMajor;
144 /* Checking either X in XYZ, or Y in YZX cases */
145 const float mMajor = (float)((kMajor < maxkMajor) ? kMajor : (kMajor - nMajor));
147 const int kMiddle = indexMiddle + localOffsetMiddle;
148 float mMiddle = (float)kMiddle;
149 /* Checking Y in XYZ case */
150 if (gridOrdering == XYZ)
152 mMiddle = (float)((kMiddle < maxkMiddle) ? kMiddle : (kMiddle - nMiddle));
154 const int kMinor = localOffsetMinor + indexMinor;
155 float mMinor = (float)kMinor;
156 /* Checking X in YZX case */
157 if (gridOrdering == YZX)
159 mMinor = (float)((kMinor < maxkMinor) ? kMinor : (kMinor - nMinor));
161 /* We should skip the k-space point (0,0,0) */
162 const bool notZeroPoint = (kMinor > 0) | (kMajor > 0) | (kMiddle > 0);
167 if (gridOrdering == YZX)
173 if (gridOrdering == XYZ)
180 /* 0.5 correction factor for the first and last components of a Z dimension */
181 float corner_fac = 1.0F;
182 const float z_corner_fac = 0.5F;
183 if (gridOrdering == YZX)
185 if ((kMiddle == 0) | (kMiddle == maxkMiddle))
187 corner_fac = z_corner_fac;
190 if (gridOrdering == XYZ)
192 if ((kMinor == 0) | (kMinor == maxkMinor))
194 corner_fac = z_corner_fac;
200 const float mhxk = mX * kernelParams.current.recipBox[XX][XX];
201 const float mhyk = mX * kernelParams.current.recipBox[XX][YY]
202 + mY * kernelParams.current.recipBox[YY][YY];
203 const float mhzk = mX * kernelParams.current.recipBox[XX][ZZ]
204 + mY * kernelParams.current.recipBox[YY][ZZ]
205 + mZ * kernelParams.current.recipBox[ZZ][ZZ];
207 const float m2k = mhxk * mhxk + mhyk * mhyk + mhzk * mhzk;
209 const float denom = m2k * M_PI_F * kernelParams.current.boxVolume * gm_splineValueMajor[kMajor]
210 * gm_splineValueMiddle[kMiddle] * gm_splineValueMinor[kMinor];
211 assert(isfinite(denom));
212 assert(denom != 0.0F);
213 const float tmp1 = exp(-kernelParams.grid.ewaldFactor * m2k);
214 const float etermk = kernelParams.constants.elFactor * tmp1 / denom;
216 float2 gridValue = *gm_gridCell;
217 const float2 oldGridValue = gridValue;
219 gridValue.x *= etermk;
220 gridValue.y *= etermk;
221 *gm_gridCell = gridValue;
223 if (computeEnergyAndVirial)
226 2.0F * (gridValue.x * oldGridValue.x + gridValue.y * oldGridValue.y);
228 const float vfactor = (kernelParams.grid.ewaldFactor + 1.0F / m2k) * 2.0F;
229 const float ets2 = corner_fac * tmp1k;
232 const float ets2vf = ets2 * vfactor;
234 virxx = ets2vf * mhxk * mhxk - ets2;
235 virxy = ets2vf * mhxk * mhyk;
236 virxz = ets2vf * mhxk * mhzk;
237 viryy = ets2vf * mhyk * mhyk - ets2;
238 viryz = ets2vf * mhyk * mhzk;
239 virzz = ets2vf * mhzk * mhzk - ets2;
244 // This is only for reduction below. OpenCL 1.2: all local memory must be declared on kernel scope.
245 __local float sm_virialAndEnergy[c_virialAndEnergyCount * warp_size];
247 /* Optional energy/virial reduction */
248 if (computeEnergyAndVirial)
250 // TODO: implement AMD intrinsics reduction, like with shuffles in CUDA version. #2514
252 /* Shared memory reduction with atomics.
253 * Each component is first reduced into warp_size positions in the shared memory;
254 * Then first few warps reduce everything further and add to the global memory.
255 * This can likely be improved, but is anyway faster than the previous straightforward reduction,
256 * which was using too much shared memory (for storing all 7 floats on each thread).
259 const int lane = threadLocalId & (warp_size - 1);
260 const int warpIndex = threadLocalId / warp_size;
261 const bool firstWarp = (warpIndex == 0);
264 sm_virialAndEnergy[0 * warp_size + lane] = virxx;
265 sm_virialAndEnergy[1 * warp_size + lane] = viryy;
266 sm_virialAndEnergy[2 * warp_size + lane] = virzz;
267 sm_virialAndEnergy[3 * warp_size + lane] = virxy;
268 sm_virialAndEnergy[4 * warp_size + lane] = virxz;
269 // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers, readability-magic-numbers)
270 sm_virialAndEnergy[5 * warp_size + lane] = viryz;
271 // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers, readability-magic-numbers)
272 sm_virialAndEnergy[6 * warp_size + lane] = energy;
274 barrier(CLK_LOCAL_MEM_FENCE);
277 atomicAdd_l_f(sm_virialAndEnergy + 0 * warp_size + lane, virxx);
278 atomicAdd_l_f(sm_virialAndEnergy + 1 * warp_size + lane, viryy);
279 atomicAdd_l_f(sm_virialAndEnergy + 2 * warp_size + lane, virzz);
280 atomicAdd_l_f(sm_virialAndEnergy + 3 * warp_size + lane, virxy);
281 atomicAdd_l_f(sm_virialAndEnergy + 4 * warp_size + lane, virxz);
282 // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers, readability-magic-numbers)
283 atomicAdd_l_f(sm_virialAndEnergy + 5 * warp_size + lane, viryz);
284 // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers, readability-magic-numbers)
285 atomicAdd_l_f(sm_virialAndEnergy + 6 * warp_size + lane, energy);
287 barrier(CLK_LOCAL_MEM_FENCE);
289 const int numIter = (c_virialAndEnergyCount + activeWarps - 1) / activeWarps;
290 for (int i = 0; i < numIter; i++)
292 const int componentIndex = i * activeWarps + warpIndex;
293 if (componentIndex < c_virialAndEnergyCount)
295 const int targetIndex = componentIndex * warp_size + lane;
297 for (int reductionStride = warp_size >> 1; reductionStride >= 1; reductionStride >>= 1)
299 if (lane < reductionStride)
301 sm_virialAndEnergy[targetIndex] +=
302 sm_virialAndEnergy[targetIndex + reductionStride];
304 #ifdef _NVIDIA_SOURCE_
305 /* FIXME: this execution happens within execution width aka warp, but somehow
306 * NVIDIA OpenCL of all things fails without the memory barrier here. #2519
308 barrier(CLK_LOCAL_MEM_FENCE);
313 atomicAdd_g_f(gm_virialAndEnergy + componentIndex, sm_virialAndEnergy[targetIndex]);