2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _nbnxn_kernel_simd_utils_x86_256s_h_
36 #define _nbnxn_kernel_simd_utils_x86_256s_h_
38 /* This files contains all functions/macros for the SIMD kernels
39 * which have explicit dependencies on the j-cluster size and/or SIMD-width.
40 * The functionality which depends on the j-cluster size is:
43 * energy group pair energy storage
46 typedef gmx_simd_real_t gmx_exclfilter;
47 static const int filter_stride = 1;
49 /* The 4xn kernel operates on 4-wide i-force registers */
50 #define gmx_mm_pr4 __m128
51 #define gmx_load_pr4 _mm_load_ps
52 #define gmx_store_pr4 _mm_store_ps
53 #define gmx_add_pr4 _mm_add_ps
56 #ifdef GMX_NBNXN_SIMD_2XNN
57 /* Half-width operations are required for the 2xnn kernels */
59 /* Half-width SIMD real type */
60 #define gmx_mm_hpr __m128
62 /* Half-width SIMD operations */
63 /* Load reals at half-width aligned pointer b into half-width SIMD register a */
64 #define gmx_load_hpr(a, b) *(a) = _mm_load_ps(b)
65 /* Set all entries in half-width SIMD register *a to b */
66 #define gmx_set1_hpr(a, b) *(a) = _mm_set1_ps(b)
67 /* Load one real at b and one real at b+1 into halves of a, respectively */
68 #define gmx_load1p1_pr(a, b) *(a) = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm_load1_ps(b)), _mm_load1_ps(b+1), 0x1)
69 /* Load reals at half-width aligned pointer b into two halves of a */
70 #define gmx_loaddh_pr(a, b) *(a) = gmx_mm256_load4_ps(b)
71 /* To half-width SIMD register b into half width aligned memory a */
72 #define gmx_store_hpr(a, b) _mm_store_ps(a, b)
73 #define gmx_add_hpr _mm_add_ps
74 #define gmx_sub_hpr _mm_sub_ps
75 /* Sum over 4 half SIMD registers */
76 #define gmx_sum4_hpr gmx_mm256_sum4h_m128
78 static gmx_inline void
79 gmx_pr_to_2hpr(gmx_simd_real_t a, gmx_mm_hpr *b, gmx_mm_hpr *c)
81 *b = _mm256_extractf128_ps(a, 0);
82 *c = _mm256_extractf128_ps(a, 1);
85 /* Store half width SIMD registers a and b in full width register *c */
86 static gmx_inline void
87 gmx_2hpr_to_pr(gmx_mm_hpr a, gmx_mm_hpr b, gmx_simd_real_t *c)
89 *c = _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 0x1);
92 #endif /* GMX_NBNXN_SIMD_2XNN */
94 /* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
95 static gmx_inline void
96 gmx_shuffle_4_ps_fil01_to_2_ps(__m128 in0, __m128 in1, __m128 in2, __m128 in3,
97 __m128 *out0, __m128 *out1)
101 _c01 = _mm_movelh_ps(in0, in1);
102 _c23 = _mm_movelh_ps(in2, in3);
103 *out0 = _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(2, 0, 2, 0));
104 *out1 = _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(3, 1, 3, 1));
107 /* Collect element 2 of the 4 inputs to out */
108 static gmx_inline __m128
109 gmx_shuffle_4_ps_fil2_to_1_ps(__m128 in0, __m128 in1, __m128 in2, __m128 in3)
113 _c01 = _mm_shuffle_ps(in0, in1, _MM_SHUFFLE(3, 2, 3, 2));
114 _c23 = _mm_shuffle_ps(in2, in3, _MM_SHUFFLE(3, 2, 3, 2));
116 return _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(2, 0, 2, 0));
119 /* Sum the elements within each input register and return the sums */
120 static gmx_inline __m128
121 gmx_mm_transpose_sum4_pr(__m256 in0, __m256 in1,
122 __m256 in2, __m256 in3)
124 in0 = _mm256_hadd_ps(in0, in1);
125 in2 = _mm256_hadd_ps(in2, in3);
126 in1 = _mm256_hadd_ps(in0, in2);
128 return _mm_add_ps(_mm256_castps256_ps128(in1),
129 _mm256_extractf128_ps(in1, 1));
132 /* Sum the elements of halfs of each input register and return the sums */
133 static gmx_inline __m128
134 gmx_mm_transpose_sum4h_pr(__m256 in0, __m256 in2)
136 in0 = _mm256_hadd_ps(in0, _mm256_setzero_ps());
137 in2 = _mm256_hadd_ps(in2, _mm256_setzero_ps());
138 in0 = _mm256_hadd_ps(in0, in2);
139 in2 = _mm256_permute_ps(in0, _MM_SHUFFLE(2, 3, 0, 1));
141 return _mm_add_ps(_mm256_castps256_ps128(in0), _mm256_extractf128_ps(in2, 1));
144 /* Put two 128-bit 4-float registers into one 256-bit 8-float register */
145 static gmx_inline __m256
146 gmx_2_mm_to_m256(__m128 in0, __m128 in1)
148 return _mm256_insertf128_ps(_mm256_castps128_ps256(in0), in1, 1);
152 static gmx_inline void
153 load_lj_pair_params(const real *nbfp, const int *type, int aj,
154 __m256 *c6_S, __m256 *c12_S)
156 __m128 clj_S[UNROLLJ], c6t_S[2], c12t_S[2];
159 for (p = 0; p < UNROLLJ; p++)
161 /* Here we load 4 aligned floats, but we need just 2 */
162 clj_S[p] = _mm_load_ps(nbfp+type[aj+p]*nbfp_stride);
164 gmx_shuffle_4_ps_fil01_to_2_ps(clj_S[0], clj_S[1], clj_S[2], clj_S[3],
165 &c6t_S[0], &c12t_S[0]);
166 gmx_shuffle_4_ps_fil01_to_2_ps(clj_S[4], clj_S[5], clj_S[6], clj_S[7],
167 &c6t_S[1], &c12t_S[1]);
169 *c6_S = gmx_2_mm_to_m256(c6t_S[0], c6t_S[1]);
170 *c12_S = gmx_2_mm_to_m256(c12t_S[0], c12t_S[1]);
175 static gmx_inline void
176 load_lj_pair_params2(const real *nbfp0, const real *nbfp1,
177 const int *type, int aj,
178 __m256 *c6_S, __m256 *c12_S)
180 __m128 clj_S0[UNROLLJ], clj_S1[UNROLLJ], c6t_S[2], c12t_S[2];
183 for (p = 0; p < UNROLLJ; p++)
185 /* Here we load 4 aligned floats, but we need just 2 */
186 clj_S0[p] = _mm_load_ps(nbfp0+type[aj+p]*nbfp_stride);
188 for (p = 0; p < UNROLLJ; p++)
190 /* Here we load 4 aligned floats, but we need just 2 */
191 clj_S1[p] = _mm_load_ps(nbfp1+type[aj+p]*nbfp_stride);
193 gmx_shuffle_4_ps_fil01_to_2_ps(clj_S0[0], clj_S0[1], clj_S0[2], clj_S0[3],
194 &c6t_S[0], &c12t_S[0]);
195 gmx_shuffle_4_ps_fil01_to_2_ps(clj_S1[0], clj_S1[1], clj_S1[2], clj_S1[3],
196 &c6t_S[1], &c12t_S[1]);
198 *c6_S = gmx_2_mm_to_m256(c6t_S[0], c6t_S[1]);
199 *c12_S = gmx_2_mm_to_m256(c12t_S[0], c12t_S[1]);
204 /* The load_table functions below are performance critical.
205 * The routines issue UNROLLI*UNROLLJ _mm_load_ps calls.
206 * As these all have latencies, scheduling is crucial.
207 * The Intel compilers and CPUs seem to do a good job at this.
208 * But AMD CPUs perform significantly worse with gcc than with icc.
209 * Performance is improved a bit by using the extract function UNROLLJ times,
210 * instead of doing an _mm_store_si128 for every i-particle.
211 * This is only faster when we use FDV0 formatted tables, where we also need
212 * to multiple the index by 4, which can be done by a SIMD bit shift.
213 * With single precision AVX, 8 extracts are much slower than 1 store.
214 * Because of this, the load_table_f function always takes the ti
215 * parameter, which should contain a buffer that is aligned with
216 * prepare_table_load_buffer(), but it is only used with full-width
219 static gmx_inline void
220 load_table_f(const real *tab_coul_FDV0, gmx_simd_int32_t ti_S, int *ti,
221 __m256 *ctab0_S, __m256 *ctab1_S)
223 __m128 ctab_S[8], ctabt_S[4];
226 /* Bit shifting would be faster, but AVX doesn't support that */
227 _mm256_store_si256((__m256i *)ti, ti_S);
228 for (j = 0; j < 8; j++)
230 ctab_S[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4);
232 gmx_shuffle_4_ps_fil01_to_2_ps(ctab_S[0], ctab_S[1], ctab_S[2], ctab_S[3],
233 &ctabt_S[0], &ctabt_S[2]);
234 gmx_shuffle_4_ps_fil01_to_2_ps(ctab_S[4], ctab_S[5], ctab_S[6], ctab_S[7],
235 &ctabt_S[1], &ctabt_S[3]);
237 *ctab0_S = gmx_2_mm_to_m256(ctabt_S[0], ctabt_S[1]);
238 *ctab1_S = gmx_2_mm_to_m256(ctabt_S[2], ctabt_S[3]);
241 static gmx_inline void
242 load_table_f_v(const real *tab_coul_FDV0, gmx_simd_int32_t ti_S, int *ti,
243 __m256 *ctab0_S, __m256 *ctab1_S, __m256 *ctabv_S)
245 __m128 ctab_S[8], ctabt_S[4], ctabvt_S[2];
248 /* Bit shifting would be faster, but AVX doesn't support that */
249 _mm256_store_si256((__m256i *)ti, ti_S);
250 for (j = 0; j < 8; j++)
252 ctab_S[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4);
254 gmx_shuffle_4_ps_fil01_to_2_ps(ctab_S[0], ctab_S[1], ctab_S[2], ctab_S[3],
255 &ctabt_S[0], &ctabt_S[2]);
256 gmx_shuffle_4_ps_fil01_to_2_ps(ctab_S[4], ctab_S[5], ctab_S[6], ctab_S[7],
257 &ctabt_S[1], &ctabt_S[3]);
259 *ctab0_S = gmx_2_mm_to_m256(ctabt_S[0], ctabt_S[1]);
260 *ctab1_S = gmx_2_mm_to_m256(ctabt_S[2], ctabt_S[3]);
262 ctabvt_S[0] = gmx_shuffle_4_ps_fil2_to_1_ps(ctab_S[0], ctab_S[1],
263 ctab_S[2], ctab_S[3]);
264 ctabvt_S[1] = gmx_shuffle_4_ps_fil2_to_1_ps(ctab_S[4], ctab_S[5],
265 ctab_S[6], ctab_S[7]);
267 *ctabv_S = gmx_2_mm_to_m256(ctabvt_S[0], ctabvt_S[1]);
270 static gmx_inline gmx_exclfilter
271 gmx_load1_exclfilter(int e)
273 return _mm256_castsi256_ps(_mm256_set1_epi32(e));
276 static gmx_inline gmx_exclfilter
277 gmx_load_exclusion_filter(const unsigned *i)
279 return gmx_simd_load_r((real *) (i));
282 static gmx_inline gmx_simd_bool_t
283 gmx_checkbitmask_pb(gmx_exclfilter m0, gmx_exclfilter m1)
285 return _mm256_cmp_ps(_mm256_cvtepi32_ps(_mm256_castps_si256(_mm256_and_ps(m0, m1))), _mm256_setzero_ps(), 0x0c);
288 #endif /* _nbnxn_kernel_simd_utils_x86_s256s_h_ */