2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2012, The GROMACS Development Team
6 * Copyright (c) 2012,2013, by the GROMACS development team, led by
7 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8 * others, as listed in the AUTHORS file in the top-level source
9 * directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #ifndef _nbnxn_kernel_simd_utils_x86_256s_h_
38 #define _nbnxn_kernel_simd_utils_x86_256s_h_
40 /* This files contains all functions/macros for the SIMD kernels
41 * which have explicit dependencies on the j-cluster size and/or SIMD-width.
42 * The functionality which depends on the j-cluster size is:
45 * energy group pair energy storage
49 /* The 4xn kernel operates on 4-wide i-force registers */
50 #define gmx_mm_pr4 __m128
51 #define gmx_load_pr4 _mm_load_ps
52 #define gmx_store_pr4 _mm_store_ps
53 #define gmx_add_pr4 _mm_add_ps
56 /* Half-width operations are required for the 2xnn kernels */
58 /* Half-width SIMD real type */
59 #define gmx_mm_hpr __m128
61 /* Half-width SIMD operations */
62 /* Load reals at half-width aligned pointer b into half-width SIMD register a */
63 #define gmx_load_hpr(a, b) *(a) = _mm_load_ps(b)
64 /* Set all entries in half-width SIMD register *a to b */
65 #define gmx_set1_hpr(a, b) *(a) = _mm_set1_ps(b)
66 /* Load one real at b and one real at b+1 into halves of a, respectively */
67 #define gmx_load1p1_pr(a, b) *(a) = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm_load1_ps(b)), _mm_load1_ps(b+1), 0x1)
68 /* Load reals at half-width aligned pointer b into two halves of a */
69 #define gmx_loaddh_pr(a, b) *(a) = gmx_mm256_load4_ps(b)
70 /* To half-width SIMD register b into half width aligned memory a */
71 #define gmx_store_hpr(a, b) _mm_store_ps(a, b)
72 #define gmx_add_hpr _mm_add_ps
73 #define gmx_sub_hpr _mm_sub_ps
74 /* Sum over 4 half SIMD registers */
75 #define gmx_sum4_hpr gmx_mm256_sum4h_m128
77 static gmx_inline void
78 gmx_pr_to_2hpr(gmx_mm_pr a, gmx_mm_hpr *b, gmx_mm_hpr *c)
80 *b = _mm256_extractf128_ps(a, 0);
81 *c = _mm256_extractf128_ps(a, 1);
84 /* Store half width SIMD registers a and b in full width register *c */
85 static gmx_inline void
86 gmx_2hpr_to_pr(gmx_mm_hpr a, gmx_mm_hpr b, gmx_mm_pr *c)
88 *c = _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 0x1);
91 /* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
92 static gmx_inline void
93 gmx_shuffle_4_ps_fil01_to_2_ps(__m128 in0, __m128 in1, __m128 in2, __m128 in3,
94 __m128 *out0, __m128 *out1)
98 _c01 = _mm_movelh_ps(in0, in1);
99 _c23 = _mm_movelh_ps(in2, in3);
100 *out0 = _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(2, 0, 2, 0));
101 *out1 = _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(3, 1, 3, 1));
104 /* Collect element 2 of the 4 inputs to out */
105 static gmx_inline __m128
106 gmx_shuffle_4_ps_fil2_to_1_ps(__m128 in0, __m128 in1, __m128 in2, __m128 in3)
110 _c01 = _mm_shuffle_ps(in0, in1, _MM_SHUFFLE(3, 2, 3, 2));
111 _c23 = _mm_shuffle_ps(in2, in3, _MM_SHUFFLE(3, 2, 3, 2));
113 return _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(2, 0, 2, 0));
116 /* Sum the elements within each input register and return the sums */
117 static gmx_inline __m128
118 gmx_mm_transpose_sum4_pr(__m256 in0, __m256 in1,
119 __m256 in2, __m256 in3)
121 in0 = _mm256_hadd_ps(in0, in1);
122 in2 = _mm256_hadd_ps(in2, in3);
123 in1 = _mm256_hadd_ps(in0, in2);
125 return _mm_add_ps(_mm256_castps256_ps128(in1),
126 _mm256_extractf128_ps(in1, 1));
129 /* Sum the elements of halfs of each input register and return the sums */
130 static gmx_inline __m128
131 gmx_mm_transpose_sum4h_pr(__m256 in0, __m256 in2)
133 in0 = _mm256_hadd_ps(in0, _mm256_setzero_ps());
134 in2 = _mm256_hadd_ps(in2, _mm256_setzero_ps());
135 in0 = _mm256_hadd_ps(in0, in2);
136 in2 = _mm256_permute_ps(in0, _MM_SHUFFLE(2, 3, 0, 1));
138 return _mm_add_ps(_mm256_castps256_ps128(in0), _mm256_extractf128_ps(in2, 1));
141 /* Put two 128-bit 4-float registers into one 256-bit 8-float register */
142 static gmx_inline __m256
143 gmx_2_mm_to_m256(__m128 in0, __m128 in1)
145 return _mm256_insertf128_ps(_mm256_castps128_ps256(in0), in1, 1);
149 static gmx_inline void
150 load_lj_pair_params(const real *nbfp, const int *type, int aj,
151 __m256 *c6_S, __m256 *c12_S)
153 __m128 clj_S[UNROLLJ], c6t_S[2], c12t_S[2];
156 for (p = 0; p < UNROLLJ; p++)
158 /* Here we load 4 aligned floats, but we need just 2 */
159 clj_S[p] = _mm_load_ps(nbfp+type[aj+p]*NBFP_STRIDE);
161 gmx_shuffle_4_ps_fil01_to_2_ps(clj_S[0], clj_S[1], clj_S[2], clj_S[3],
162 &c6t_S[0], &c12t_S[0]);
163 gmx_shuffle_4_ps_fil01_to_2_ps(clj_S[4], clj_S[5], clj_S[6], clj_S[7],
164 &c6t_S[1], &c12t_S[1]);
166 *c6_S = gmx_2_mm_to_m256(c6t_S[0], c6t_S[1]);
167 *c12_S = gmx_2_mm_to_m256(c12t_S[0], c12t_S[1]);
172 static gmx_inline void
173 load_lj_pair_params2(const real *nbfp0, const real *nbfp1,
174 const int *type, int aj,
175 __m256 *c6_S, __m256 *c12_S)
177 __m128 clj_S0[UNROLLJ], clj_S1[UNROLLJ], c6t_S[2], c12t_S[2];
180 for (p = 0; p < UNROLLJ; p++)
182 /* Here we load 4 aligned floats, but we need just 2 */
183 clj_S0[p] = _mm_load_ps(nbfp0+type[aj+p]*NBFP_STRIDE);
185 for (p = 0; p < UNROLLJ; p++)
187 /* Here we load 4 aligned floats, but we need just 2 */
188 clj_S1[p] = _mm_load_ps(nbfp1+type[aj+p]*NBFP_STRIDE);
190 gmx_shuffle_4_ps_fil01_to_2_ps(clj_S0[0], clj_S0[1], clj_S0[2], clj_S0[3],
191 &c6t_S[0], &c12t_S[0]);
192 gmx_shuffle_4_ps_fil01_to_2_ps(clj_S1[0], clj_S1[1], clj_S1[2], clj_S1[3],
193 &c6t_S[1], &c12t_S[1]);
195 *c6_S = gmx_2_mm_to_m256(c6t_S[0], c6t_S[1]);
196 *c12_S = gmx_2_mm_to_m256(c12t_S[0], c12t_S[1]);
201 /* The load_table functions below are performance critical.
202 * The routines issue UNROLLI*UNROLLJ _mm_load_ps calls.
203 * As these all have latencies, scheduling is crucial.
204 * The Intel compilers and CPUs seem to do a good job at this.
205 * But AMD CPUs perform significantly worse with gcc than with icc.
206 * Performance is improved a bit by using the extract function UNROLLJ times,
207 * instead of doing an _mm_store_si128 for every i-particle.
208 * This is only faster when we use FDV0 formatted tables, where we also need
209 * to multiple the index by 4, which can be done by a SIMD bit shift.
210 * With single precision AVX, 8 extracts are much slower than 1 store.
211 * Because of this, the load_table_f macro always takes the ti parameter,
212 * but it is only used with AVX.
215 static gmx_inline void
216 load_table_f(const real *tab_coul_FDV0, gmx_epi32 ti_S, int *ti,
217 __m256 *ctab0_S, __m256 *ctab1_S)
219 __m128 ctab_S[8], ctabt_S[4];
222 /* Bit shifting would be faster, but AVX doesn't support that */
223 _mm256_store_si256((__m256i *)ti, ti_S);
224 for (j = 0; j < 8; j++)
226 ctab_S[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4);
228 gmx_shuffle_4_ps_fil01_to_2_ps(ctab_S[0], ctab_S[1], ctab_S[2], ctab_S[3],
229 &ctabt_S[0], &ctabt_S[2]);
230 gmx_shuffle_4_ps_fil01_to_2_ps(ctab_S[4], ctab_S[5], ctab_S[6], ctab_S[7],
231 &ctabt_S[1], &ctabt_S[3]);
233 *ctab0_S = gmx_2_mm_to_m256(ctabt_S[0], ctabt_S[1]);
234 *ctab1_S = gmx_2_mm_to_m256(ctabt_S[2], ctabt_S[3]);
237 static gmx_inline void
238 load_table_f_v(const real *tab_coul_FDV0, gmx_epi32 ti_S, int *ti,
239 __m256 *ctab0_S, __m256 *ctab1_S, __m256 *ctabv_S)
241 __m128 ctab_S[8], ctabt_S[4], ctabvt_S[2];
244 /* Bit shifting would be faster, but AVX doesn't support that */
245 _mm256_store_si256((__m256i *)ti, ti_S);
246 for (j = 0; j < 8; j++)
248 ctab_S[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4);
250 gmx_shuffle_4_ps_fil01_to_2_ps(ctab_S[0], ctab_S[1], ctab_S[2], ctab_S[3],
251 &ctabt_S[0], &ctabt_S[2]);
252 gmx_shuffle_4_ps_fil01_to_2_ps(ctab_S[4], ctab_S[5], ctab_S[6], ctab_S[7],
253 &ctabt_S[1], &ctabt_S[3]);
255 *ctab0_S = gmx_2_mm_to_m256(ctabt_S[0], ctabt_S[1]);
256 *ctab1_S = gmx_2_mm_to_m256(ctabt_S[2], ctabt_S[3]);
258 ctabvt_S[0] = gmx_shuffle_4_ps_fil2_to_1_ps(ctab_S[0], ctab_S[1],
259 ctab_S[2], ctab_S[3]);
260 ctabvt_S[1] = gmx_shuffle_4_ps_fil2_to_1_ps(ctab_S[4], ctab_S[5],
261 ctab_S[6], ctab_S[7]);
263 *ctabv_S = gmx_2_mm_to_m256(ctabvt_S[0], ctabvt_S[1]);
266 #endif /* _nbnxn_kernel_simd_utils_x86_s256s_h_ */