Implement PME solve in SYCL
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu_program_impl_sycl.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2021, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  * \brief
38  * Implements PmeGpuProgramImpl, which stores permanent PME GPU context-derived data,
39  * such as (compiled) kernel handles.
40  *
41  * \author Aleksei Iupinov <a.yupinov@gmail.com>
42  * \author Andrey Alekseenko <al42and@gmail.com>
43  * \ingroup module_ewald
44  */
45 #include "gmxpre.h"
46
47 #include "gromacs/hardware/device_information.h"
48 #include "gromacs/gpu_utils/gmxsycl.h"
49 #include "gromacs/gpu_utils/syclutils.h"
50
51 #include "pme_gpu_program_impl.h"
52 #include "pme_gather_sycl.h"
53 #include "pme_solve_sycl.h"
54 #include "pme_spread_sycl.h"
55
56 #include "pme_gpu_constants.h"
57 #include "pme_gpu_internal.h" // for GridOrdering enum
58 #include "pme_gpu_types_host.h"
59
60 // PME interpolation order
61 constexpr int c_pmeOrder = 4;
62 // These hardcoded spread/gather parameters refer to not-implemented PME GPU 2D decomposition in X/Y
63 constexpr bool c_wrapX = true;
64 constexpr bool c_wrapY = true;
65
66 constexpr int c_stateA = 0;
67 constexpr int c_stateB = 1;
68
69 static int subGroupSizeFromVendor(const DeviceInformation& deviceInfo)
70 {
71     switch (deviceInfo.deviceVendor)
72     {
73         case DeviceVendor::Amd: return 64;   // Handle RDNA2 devices, Issue #3972.
74         case DeviceVendor::Intel: return 16; // TODO: Choose best value, Issue #4153.
75         case DeviceVendor::Nvidia: return 32;
76         default: GMX_RELEASE_ASSERT(false, "Unknown device vendor"); return 0;
77     }
78 }
79
80 #define INSTANTIATE_SPREAD_2(                                                                      \
81         order, computeSplines, spreadCharges, numGrids, writeGlobal, threadsPerAtom, subGroupSize) \
82     extern template class PmeSplineAndSpreadKernel<order, computeSplines, spreadCharges, true, true, numGrids, writeGlobal, threadsPerAtom, subGroupSize>;
83
84 #define INSTANTIATE_SPREAD(order, numGrids, threadsPerAtom, subGroupSize)                   \
85     INSTANTIATE_SPREAD_2(order, true, true, numGrids, true, threadsPerAtom, subGroupSize);  \
86     INSTANTIATE_SPREAD_2(order, true, false, numGrids, true, threadsPerAtom, subGroupSize); \
87     INSTANTIATE_SPREAD_2(order, false, true, numGrids, true, threadsPerAtom, subGroupSize); \
88     INSTANTIATE_SPREAD_2(order, true, true, numGrids, false, threadsPerAtom, subGroupSize);
89
90 #define INSTANTIATE_GATHER_2(order, numGrids, readGlobal, threadsPerAtom, subGroupSize) \
91     extern template class PmeGatherKernel<order, true, true, numGrids, readGlobal, threadsPerAtom, subGroupSize>;
92
93 #define INSTANTIATE_GATHER(order, numGrids, threadsPerAtom, subGroupSize)      \
94     INSTANTIATE_GATHER_2(order, numGrids, true, threadsPerAtom, subGroupSize); \
95     INSTANTIATE_GATHER_2(order, numGrids, false, threadsPerAtom, subGroupSize);
96
97 #define INSTANTIATE_X(x, order, subGroupSize)                              \
98     INSTANTIATE_##x(order, 1, ThreadsPerAtom::Order, subGroupSize);        \
99     INSTANTIATE_##x(order, 1, ThreadsPerAtom::OrderSquared, subGroupSize); \
100     INSTANTIATE_##x(order, 2, ThreadsPerAtom::Order, subGroupSize);        \
101     INSTANTIATE_##x(order, 2, ThreadsPerAtom::OrderSquared, subGroupSize);
102
103 #define INSTANTIATE_SOLVE(subGroupSize)                                                     \
104     extern template class PmeSolveKernel<GridOrdering::XYZ, false, c_stateA, subGroupSize>; \
105     extern template class PmeSolveKernel<GridOrdering::XYZ, true, c_stateA, subGroupSize>;  \
106     extern template class PmeSolveKernel<GridOrdering::YZX, false, c_stateA, subGroupSize>; \
107     extern template class PmeSolveKernel<GridOrdering::YZX, true, c_stateA, subGroupSize>;  \
108     extern template class PmeSolveKernel<GridOrdering::XYZ, false, c_stateB, subGroupSize>; \
109     extern template class PmeSolveKernel<GridOrdering::XYZ, true, c_stateB, subGroupSize>;  \
110     extern template class PmeSolveKernel<GridOrdering::YZX, false, c_stateB, subGroupSize>; \
111     extern template class PmeSolveKernel<GridOrdering::YZX, true, c_stateB, subGroupSize>;
112
113 #define INSTANTIATE(order, subGroupSize)        \
114     INSTANTIATE_X(SPREAD, order, subGroupSize); \
115     INSTANTIATE_X(GATHER, order, subGroupSize); \
116     INSTANTIATE_SOLVE(subGroupSize);
117
118 #if GMX_SYCL_DPCPP
119 INSTANTIATE(4, 16);
120 #elif GMX_SYCL_HIPSYCL
121 INSTANTIATE(4, 32);
122 INSTANTIATE(4, 64);
123 #endif
124
125 //! Helper function to set proper kernel functor pointers
126 template<int subGroupSize>
127 static void setKernelPointers(struct PmeGpuProgramImpl* pmeGpuProgram)
128 {
129     /* Not all combinations of the splineAndSpread, spline and Spread kernels are required
130      * If only the spline (without the spread) then it does not make sense not to write the data to global memory
131      * Similarly the spread kernel (without the spline) implies that we should read the spline data from global memory
132      */
133     pmeGpuProgram->splineAndSpreadKernelSingle =
134             new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::OrderSquared, subGroupSize>();
135     pmeGpuProgram->splineAndSpreadKernelThPerAtom4Single =
136             new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::Order, subGroupSize>();
137     pmeGpuProgram->splineAndSpreadKernelWriteSplinesSingle =
138             new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
139     pmeGpuProgram->splineAndSpreadKernelWriteSplinesThPerAtom4Single =
140             new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order, subGroupSize>();
141     pmeGpuProgram->splineKernelSingle =
142             new PmeSplineAndSpreadKernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
143     pmeGpuProgram->splineKernelThPerAtom4Single =
144             new PmeSplineAndSpreadKernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order, subGroupSize>();
145     pmeGpuProgram->spreadKernelSingle =
146             new PmeSplineAndSpreadKernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
147     pmeGpuProgram->spreadKernelThPerAtom4Single =
148             new PmeSplineAndSpreadKernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order, subGroupSize>();
149     pmeGpuProgram->splineAndSpreadKernelDual =
150             new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::OrderSquared, subGroupSize>();
151     pmeGpuProgram->splineAndSpreadKernelThPerAtom4Dual =
152             new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::Order, subGroupSize>();
153     pmeGpuProgram->splineAndSpreadKernelWriteSplinesDual =
154             new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
155     pmeGpuProgram->splineAndSpreadKernelWriteSplinesThPerAtom4Dual =
156             new PmeSplineAndSpreadKernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order, subGroupSize>();
157     pmeGpuProgram->splineKernelDual =
158             new PmeSplineAndSpreadKernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
159     pmeGpuProgram->splineKernelThPerAtom4Dual =
160             new PmeSplineAndSpreadKernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order, subGroupSize>();
161     pmeGpuProgram->spreadKernelDual =
162             new PmeSplineAndSpreadKernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
163     pmeGpuProgram->spreadKernelThPerAtom4Dual =
164             new PmeSplineAndSpreadKernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order, subGroupSize>();
165     pmeGpuProgram->gatherKernelSingle =
166             new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::OrderSquared, subGroupSize>();
167     pmeGpuProgram->gatherKernelThPerAtom4Single =
168             new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::Order, subGroupSize>();
169     pmeGpuProgram->gatherKernelReadSplinesSingle =
170             new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
171     pmeGpuProgram->gatherKernelReadSplinesThPerAtom4Single =
172             new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order, subGroupSize>();
173     pmeGpuProgram->gatherKernelDual =
174             new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::OrderSquared, subGroupSize>();
175     pmeGpuProgram->gatherKernelThPerAtom4Dual =
176             new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::Order, subGroupSize>();
177     pmeGpuProgram->gatherKernelReadSplinesDual =
178             new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared, subGroupSize>();
179     pmeGpuProgram->gatherKernelReadSplinesThPerAtom4Dual =
180             new PmeGatherKernel<c_pmeOrder, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order, subGroupSize>();
181     pmeGpuProgram->solveXYZKernelA =
182             new PmeSolveKernel<GridOrdering::XYZ, false, c_stateA, subGroupSize>();
183     pmeGpuProgram->solveXYZEnergyKernelA =
184             new PmeSolveKernel<GridOrdering::XYZ, true, c_stateA, subGroupSize>();
185     pmeGpuProgram->solveYZXKernelA =
186             new PmeSolveKernel<GridOrdering::YZX, false, c_stateA, subGroupSize>();
187     pmeGpuProgram->solveYZXEnergyKernelA =
188             new PmeSolveKernel<GridOrdering::YZX, true, c_stateA, subGroupSize>();
189     pmeGpuProgram->solveXYZKernelB =
190             new PmeSolveKernel<GridOrdering::XYZ, false, c_stateB, subGroupSize>();
191     pmeGpuProgram->solveXYZEnergyKernelB =
192             new PmeSolveKernel<GridOrdering::XYZ, true, c_stateB, subGroupSize>();
193     pmeGpuProgram->solveYZXKernelB =
194             new PmeSolveKernel<GridOrdering::YZX, false, c_stateB, subGroupSize>();
195     pmeGpuProgram->solveYZXEnergyKernelB =
196             new PmeSolveKernel<GridOrdering::YZX, true, c_stateB, subGroupSize>();
197 }
198
199 PmeGpuProgramImpl::PmeGpuProgramImpl(const DeviceContext& deviceContext) :
200     deviceContext_(deviceContext)
201 {
202     // kernel parameters
203     warpSize_             = subGroupSizeFromVendor(deviceContext.deviceInfo());
204     spreadWorkGroupSize   = c_spreadMaxWarpsPerBlock * warpSize_;
205     solveMaxWorkGroupSize = c_solveMaxWarpsPerBlock * warpSize_;
206     gatherWorkGroupSize   = c_gatherMaxWarpsPerBlock * warpSize_;
207
208     switch (warpSize_)
209     {
210 #if GMX_SYCL_DPCPP
211         case 16: setKernelPointers<16>(this); break;
212 #elif GMX_SYCL_HIPSYCL
213         case 32: setKernelPointers<32>(this); break;
214         case 64: setKernelPointers<64>(this); break;
215 #endif
216         default: GMX_RELEASE_ASSERT(false, "Invalid sub group size");
217     }
218 }
219
220 PmeGpuProgramImpl::~PmeGpuProgramImpl()
221 {
222     delete splineKernelSingle;
223     delete splineKernelThPerAtom4Single;
224     delete spreadKernelSingle;
225     delete spreadKernelThPerAtom4Single;
226     delete splineAndSpreadKernelSingle;
227     delete splineAndSpreadKernelThPerAtom4Single;
228     delete splineAndSpreadKernelWriteSplinesSingle;
229     delete splineAndSpreadKernelWriteSplinesThPerAtom4Single;
230     delete splineKernelDual;
231     delete splineKernelThPerAtom4Dual;
232     delete spreadKernelDual;
233     delete spreadKernelThPerAtom4Dual;
234     delete splineAndSpreadKernelDual;
235     delete splineAndSpreadKernelThPerAtom4Dual;
236     delete splineAndSpreadKernelWriteSplinesDual;
237     delete splineAndSpreadKernelWriteSplinesThPerAtom4Dual;
238     delete gatherKernelSingle;
239     delete gatherKernelThPerAtom4Single;
240     delete gatherKernelReadSplinesSingle;
241     delete gatherKernelReadSplinesThPerAtom4Single;
242     delete gatherKernelDual;
243     delete gatherKernelThPerAtom4Dual;
244     delete gatherKernelReadSplinesDual;
245     delete gatherKernelReadSplinesThPerAtom4Dual;
246     delete solveYZXKernelA;
247     delete solveXYZKernelA;
248     delete solveYZXEnergyKernelA;
249     delete solveXYZEnergyKernelA;
250     delete solveYZXKernelB;
251     delete solveXYZKernelB;
252     delete solveYZXEnergyKernelB;
253     delete solveXYZEnergyKernelB;
254 }