2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2021, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * Implements PmeGpuProgramImpl, which stores permanent PME GPU context-derived data,
39 * such as (compiled) kernel handles.
41 * \author Aleksei Iupinov <a.yupinov@gmail.com>
42 * \author Andrey Alekseenko <al42and@gmail.com>
43 * \ingroup module_ewald
47 #include "gromacs/hardware/device_information.h"
48 #include "gromacs/gpu_utils/gmxsycl.h"
49 #include "gromacs/gpu_utils/syclutils.h"
51 #include "pme_gpu_program_impl.h"
52 #include "pme_gather_sycl.h"
53 #include "pme_spread_sycl.h"
55 #include "pme_gpu_constants.h"
56 #include "pme_gpu_internal.h" // for GridOrdering enum
57 #include "pme_gpu_types_host.h"
59 // PME interpolation order
60 constexpr int c_pmeOrder = 4;
61 // These hardcoded spread/gather parameters refer to not-implemented PME GPU 2D decomposition in X/Y
62 constexpr bool c_wrapX = true;
63 constexpr bool c_wrapY = true;
65 static int subGroupSizeFromVendor(const DeviceInformation& deviceInfo)
67 switch (deviceInfo.deviceVendor)
69 case DeviceVendor::Amd: return 64; // Handle RDNA2 devices, Issue #3972.
70 case DeviceVendor::Intel: return 16; // TODO: Choose best value, Issue #4153.
71 case DeviceVendor::Nvidia: return 32;
72 default: GMX_RELEASE_ASSERT(false, "Unknown device vendor"); return 0;
76 #define INSTANTIATE_SPREAD_2( \
77 order, computeSplines, spreadCharges, numGrids, writeGlobal, threadsPerAtom, subGroupSize) \
78 extern template class PmeSplineAndSpreadKernel<order, computeSplines, spreadCharges, true, true, numGrids, writeGlobal, threadsPerAtom, subGroupSize>;
80 #define INSTANTIATE_SPREAD(order, numGrids, threadsPerAtom, subGroupSize) \
81 INSTANTIATE_SPREAD_2(order, true, true, numGrids, true, threadsPerAtom, subGroupSize); \
82 INSTANTIATE_SPREAD_2(order, true, false, numGrids, true, threadsPerAtom, subGroupSize); \
83 INSTANTIATE_SPREAD_2(order, false, true, numGrids, true, threadsPerAtom, subGroupSize); \
84 INSTANTIATE_SPREAD_2(order, true, true, numGrids, false, threadsPerAtom, subGroupSize);
86 #define INSTANTIATE_GATHER_2(order, numGrids, readGlobal, threadsPerAtom, subGroupSize) \
87 extern template class PmeGatherKernel<order, true, true, numGrids, readGlobal, threadsPerAtom, subGroupSize>;
89 #define INSTANTIATE_GATHER(order, numGrids, threadsPerAtom, subGroupSize) \
90 INSTANTIATE_GATHER_2(order, numGrids, true, threadsPerAtom, subGroupSize); \
91 INSTANTIATE_GATHER_2(order, numGrids, false, threadsPerAtom, subGroupSize);
93 #define INSTANTIATE_X(x, order, subGroupSize) \
94 INSTANTIATE_##x(order, 1, ThreadsPerAtom::Order, subGroupSize); \
95 INSTANTIATE_##x(order, 1, ThreadsPerAtom::OrderSquared, subGroupSize); \
96 INSTANTIATE_##x(order, 2, ThreadsPerAtom::Order, subGroupSize); \
97 INSTANTIATE_##x(order, 2, ThreadsPerAtom::OrderSquared, subGroupSize);
99 #define INSTANTIATE(order, subGroupSize) \
100 INSTANTIATE_X(SPREAD, order, subGroupSize); \
101 INSTANTIATE_X(GATHER, order, subGroupSize);
105 #elif GMX_SYCL_HIPSYCL
111 //! Helper function to set proper kernel functor pointers
112 template<int subGroupSize>
113 static void setKernelPointers(struct PmeGpuProgramImpl* pmeGpuProgram)
115 /* Not all combinations of the splineAndSpread, spline and Spread kernels are required
116 * If only the spline (without the spread) then it does not make sense not to write the data to global memory
117 * Similarly the spread kernel (without the spline) implies that we should read the spline data from global memory
119 pmeGpuProgram->splineAndSpreadKernelSingle =
120 new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::OrderSquared, subGroupSize>();
121 pmeGpuProgram->splineAndSpreadKernelThPerAtom4Single =
122 new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::Order, subGroupSize>();
123 pmeGpuProgram->splineAndSpreadKernelWriteSplinesSingle =
124 new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
125 pmeGpuProgram->splineAndSpreadKernelWriteSplinesThPerAtom4Single =
126 new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order, subGroupSize>();
127 pmeGpuProgram->splineKernelSingle =
128 new PmeSplineAndSpreadKernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
129 pmeGpuProgram->splineKernelThPerAtom4Single =
130 new PmeSplineAndSpreadKernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order, subGroupSize>();
131 pmeGpuProgram->spreadKernelSingle =
132 new PmeSplineAndSpreadKernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
133 pmeGpuProgram->spreadKernelThPerAtom4Single =
134 new PmeSplineAndSpreadKernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order, subGroupSize>();
135 pmeGpuProgram->splineAndSpreadKernelDual =
136 new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::OrderSquared, subGroupSize>();
137 pmeGpuProgram->splineAndSpreadKernelThPerAtom4Dual =
138 new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::Order, subGroupSize>();
139 pmeGpuProgram->splineAndSpreadKernelWriteSplinesDual =
140 new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
141 pmeGpuProgram->splineAndSpreadKernelWriteSplinesThPerAtom4Dual =
142 new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order, subGroupSize>();
143 pmeGpuProgram->splineKernelDual =
144 new PmeSplineAndSpreadKernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
145 pmeGpuProgram->splineKernelThPerAtom4Dual =
146 new PmeSplineAndSpreadKernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order, subGroupSize>();
147 pmeGpuProgram->spreadKernelDual =
148 new PmeSplineAndSpreadKernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
149 pmeGpuProgram->spreadKernelThPerAtom4Dual =
150 new PmeSplineAndSpreadKernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order, subGroupSize>();
151 pmeGpuProgram->gatherKernelSingle =
152 new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::OrderSquared, subGroupSize>();
153 pmeGpuProgram->gatherKernelThPerAtom4Single =
154 new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::Order, subGroupSize>();
155 pmeGpuProgram->gatherKernelReadSplinesSingle =
156 new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
157 pmeGpuProgram->gatherKernelReadSplinesThPerAtom4Single =
158 new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order, subGroupSize>();
159 pmeGpuProgram->gatherKernelDual =
160 new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::OrderSquared, subGroupSize>();
161 pmeGpuProgram->gatherKernelThPerAtom4Dual =
162 new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::Order, subGroupSize>();
163 pmeGpuProgram->gatherKernelReadSplinesDual =
164 new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
165 pmeGpuProgram->gatherKernelReadSplinesThPerAtom4Dual =
166 new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order, subGroupSize>();
169 PmeGpuProgramImpl::PmeGpuProgramImpl(const DeviceContext& deviceContext) :
170 deviceContext_(deviceContext)
173 warpSize_ = subGroupSizeFromVendor(deviceContext.deviceInfo());
174 spreadWorkGroupSize = c_spreadMaxWarpsPerBlock * warpSize_;
175 solveMaxWorkGroupSize = c_solveMaxWarpsPerBlock * warpSize_;
176 gatherWorkGroupSize = c_gatherMaxWarpsPerBlock * warpSize_;
181 case 16: setKernelPointers<16>(this); break;
182 #elif GMX_SYCL_HIPSYCL
183 case 32: setKernelPointers<32>(this); break;
184 case 64: setKernelPointers<64>(this); break;
186 default: GMX_RELEASE_ASSERT(false, "Invalid sub group size");
190 PmeGpuProgramImpl::~PmeGpuProgramImpl()
192 delete splineKernelSingle;
193 delete splineKernelThPerAtom4Single;
194 delete spreadKernelSingle;
195 delete spreadKernelThPerAtom4Single;
196 delete splineAndSpreadKernelSingle;
197 delete splineAndSpreadKernelThPerAtom4Single;
198 delete splineAndSpreadKernelWriteSplinesSingle;
199 delete splineAndSpreadKernelWriteSplinesThPerAtom4Single;
200 delete splineKernelDual;
201 delete splineKernelThPerAtom4Dual;
202 delete spreadKernelDual;
203 delete spreadKernelThPerAtom4Dual;
204 delete splineAndSpreadKernelDual;
205 delete splineAndSpreadKernelThPerAtom4Dual;
206 delete splineAndSpreadKernelWriteSplinesDual;
207 delete splineAndSpreadKernelWriteSplinesThPerAtom4Dual;