2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief This file defines the PME GPU compile-time constants/macros,
37 * used both in device and host code.
39 * As OpenCL C is not aware of constexpr, most of this file is
40 * forwarded to the OpenCL kernel compilation as defines with same
41 * names, for the sake of code similarity.
43 * \todo The values are currently common to both CUDA and OpenCL
44 * implementations, but should be reconsidered when we tune the OpenCL
45 * implementation. See Redmine #2528.
47 * \author Aleksei Iupinov <a.yupinov@gmail.com>
48 * \ingroup module_ewald
51 #ifndef GMX_EWALD_PME_GPU_CONSTANTS_H
52 #define GMX_EWALD_PME_GPU_CONSTANTS_H
56 #if GMX_GPU == GMX_GPU_CUDA
57 # include "gromacs/gpu_utils/cuda_arch_utils.cuh" // for warp_size
60 /* General settings for PME GPU behaviour */
63 * false: Atoms with zero charges are processed by PME. Could introduce some overhead.
64 * true: Atoms with zero charges are not processed by PME. Adds branching to the spread/gather.
65 * Could be good for performance in specific systems with lots of neutral atoms.
66 * \todo Estimate performance differences.
68 constexpr bool c_skipNeutralAtoms = false;
71 * Number of PME solve output floating point numbers.
72 * 6 for symmetric virial matrix + 1 for reciprocal energy.
74 constexpr int c_virialAndEnergyCount = 7;
77 /* Macros concerning the data layout */
80 Here is a current memory layout for the theta/dtheta B-spline float parameter arrays.
81 This is the data in global memory used both by spreading and gathering kernels (with same scheduling).
82 This example has PME order 4 and 2 particles per warp/data chunk.
83 Each particle has 16 threads assigned to it, each thread works on 4 non-sequential global grid contributions.
85 ----------------------------------------------------------------------------
86 particles 0, 1 | particles 2, 3 | ...
87 ----------------------------------------------------------------------------
88 order index 0 | index 1 | index 2 | index 3 | order index 0 .....
89 ----------------------------------------------------------------------------
90 tx0 tx1 ty0 ty1 tz0 tz1 | ..........
91 ----------------------------------------------------------------------------
93 Each data chunk for a single warp is 24 floats. This goes both for theta and dtheta.
94 24 = 2 particles per warp * order 4 * 3 dimensions. 48 floats (1.5 warp size) per warp in total.
95 I have also tried intertwining theta and theta in a single array (they are used in pairs in gathering stage anyway)
96 and it didn't seem to make a performance difference.
98 The spline indexing is isolated in the 2 inline functions:
99 getSplineParamIndexBase() return a base shared memory index corresponding to the atom in the block;
100 getSplineParamIndex() consumes its results and adds offsets for dimension and spline value index.
102 The corresponding defines follow.
105 /*! \brief PME order parameter
107 * Note that the GPU code, unlike the CPU, only supports order 4.
109 constexpr int c_pmeGpuOrder = 4;
111 /*! \brief The number of GPU threads used for computing spread/gather
112 * contributions of a single atom, which relates to the PME order.
114 * TODO: this assumption leads to minimum execution width of 16. See Redmine #2516
116 enum class ThreadsPerAtom : int
118 /*! \brief Use a number of threads equal to the PME order (ie. 4)
120 * Only CUDA implements this. See Redmine #2516 */
122 //! Use a number of threads equal to the square of the PME order (ie. 16)
124 //! Size of the enumeration
129 * The execution widths for PME GPU kernels, used both on host and device for correct scheduling.
130 * TODO: those were tuned for CUDA with assumption of warp size 32; specialize those for OpenCL
132 * As noted below, these are very approximate maximum sizes; in run time we might have to use
133 * smaller block/workgroup sizes, depending on device capabilities.
136 //! Spreading max block width in warps picked among powers of 2 (2, 4, 8, 16) for max. occupancy and min. runtime in most cases
137 constexpr int c_spreadMaxWarpsPerBlock = 8;
139 //! Solving kernel max block width in warps picked among powers of 2 (2, 4, 8, 16) for max.
140 //! occupancy and min. runtime (560Ti (CC2.1), 660Ti (CC3.0) and 750 (CC5.0)))
141 constexpr int c_solveMaxWarpsPerBlock = 8;
143 //! Gathering max block width in warps - picked empirically among 2, 4, 8, 16 for max. occupancy and min. runtime
144 constexpr int c_gatherMaxWarpsPerBlock = 4;
146 #if GMX_GPU == GMX_GPU_CUDA
147 /* All the fields below are dependent on warp_size and should
148 * ideally be removed from the device-side code, as we have to
149 * do that for OpenCL already.
151 * They also express maximum desired block/workgroup sizes,
152 * while both with CUDA and OpenCL we have to treat the device
153 * runtime limitations gracefully as well.
156 //! Spreading max block size in threads
157 static constexpr int c_spreadMaxThreadsPerBlock = c_spreadMaxWarpsPerBlock * warp_size;
159 //! Solving kernel max block size in threads
160 static constexpr int c_solveMaxThreadsPerBlock = c_solveMaxWarpsPerBlock * warp_size;
162 //! Gathering max block size in threads
163 static constexpr int c_gatherMaxThreadsPerBlock = c_gatherMaxWarpsPerBlock * warp_size;
164 //! Gathering min blocks per CUDA multiprocessor
165 static constexpr int c_gatherMinBlocksPerMP = GMX_CUDA_MAX_THREADS_PER_MP / c_gatherMaxThreadsPerBlock;
167 #endif // GMX_GPU == GMX_GPU_CUDA