2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _nbnxn_kernel_simd_utils_x86_256s_h_
36 #define _nbnxn_kernel_simd_utils_x86_256s_h_
40 /* This files contains all functions/macros for the SIMD kernels
41 * which have explicit dependencies on the j-cluster size and/or SIMD-width.
42 * The functionality which depends on the j-cluster size is:
45 * energy group pair energy storage
49 #ifdef GMX_NBNXN_SIMD_2XNN
50 /* Half-width operations are required for the 2xnn kernels */
52 /* Half-width SIMD real type */
53 #define gmx_mm_hpr __m128
55 /* Half-width SIMD operations */
56 /* Load reals at half-width aligned pointer b into half-width SIMD register a */
57 #define gmx_load_hpr(a, b) *(a) = _mm_load_ps(b)
58 /* Set all entries in half-width SIMD register *a to b */
59 #define gmx_set1_hpr(a, b) *(a) = _mm_set1_ps(b)
60 /* Load one real at b and one real at b+1 into halves of a, respectively */
61 #define gmx_load1p1_pr(a, b) *(a) = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm_load1_ps(b)), _mm_load1_ps(b+1), 0x1)
62 /* To half-width SIMD register b into half width aligned memory a */
63 #define gmx_store_hpr(a, b) _mm_store_ps(a, b)
64 #define gmx_add_hpr _mm_add_ps
65 #define gmx_sub_hpr _mm_sub_ps
67 /* Sum over 4 half SIMD registers */
68 static __m128 gmx_simdcall gmx_sum4_hpr(__m256 x, __m256 y)
72 sum = _mm256_add_ps(x, y);
73 return _mm_add_ps(_mm256_castps256_ps128(sum), _mm256_extractf128_ps(sum, 0x1));
76 /* Load reals at half-width aligned pointer b into two halves of a */
77 static gmx_inline void
78 gmx_loaddh_pr(gmx_simd_real_t *a, const real *b)
82 *a = _mm256_insertf128_ps(_mm256_castps128_ps256(tmp), tmp, 0x1);
85 static gmx_inline void gmx_simdcall
86 gmx_pr_to_2hpr(gmx_simd_real_t a, gmx_mm_hpr *b, gmx_mm_hpr *c)
88 *b = _mm256_extractf128_ps(a, 0);
89 *c = _mm256_extractf128_ps(a, 1);
92 /* Store half width SIMD registers a and b in full width register *c */
93 static gmx_inline void gmx_simdcall
94 gmx_2hpr_to_pr(gmx_mm_hpr a, gmx_mm_hpr b, gmx_simd_real_t *c)
96 *c = _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 0x1);
99 #endif /* GMX_NBNXN_SIMD_2XNN */
101 /* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
102 static gmx_inline void gmx_simdcall
103 gmx_shuffle_4_ps_fil01_to_2_ps(__m128 in0, __m128 in1, __m128 in2, __m128 in3,
104 __m128 *out0, __m128 *out1)
108 _c01 = _mm_movelh_ps(in0, in1);
109 _c23 = _mm_movelh_ps(in2, in3);
110 *out0 = _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(2, 0, 2, 0));
111 *out1 = _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(3, 1, 3, 1));
114 /* Collect element 2 of the 4 inputs to out */
115 static gmx_inline __m128 gmx_simdcall
116 gmx_shuffle_4_ps_fil2_to_1_ps(__m128 in0, __m128 in1, __m128 in2, __m128 in3)
120 _c01 = _mm_shuffle_ps(in0, in1, _MM_SHUFFLE(3, 2, 3, 2));
121 _c23 = _mm_shuffle_ps(in2, in3, _MM_SHUFFLE(3, 2, 3, 2));
123 return _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(2, 0, 2, 0));
126 /* Sum the elements within each input register and return the sums */
127 static gmx_inline __m128 gmx_simdcall
128 gmx_mm_transpose_sum4_pr(__m256 in0, __m256 in1,
129 __m256 in2, __m256 in3)
131 in0 = _mm256_hadd_ps(in0, in1);
132 in2 = _mm256_hadd_ps(in2, in3);
133 in1 = _mm256_hadd_ps(in0, in2);
135 return _mm_add_ps(_mm256_castps256_ps128(in1),
136 _mm256_extractf128_ps(in1, 1));
139 /* Sum the elements of halfs of each input register and return the sums */
140 static gmx_inline __m128 gmx_simdcall
141 gmx_mm_transpose_sum4h_pr(__m256 in0, __m256 in2)
143 in0 = _mm256_hadd_ps(in0, _mm256_setzero_ps());
144 in2 = _mm256_hadd_ps(in2, _mm256_setzero_ps());
145 in0 = _mm256_hadd_ps(in0, in2);
146 in2 = _mm256_permute_ps(in0, _MM_SHUFFLE(2, 3, 0, 1));
148 return _mm_add_ps(_mm256_castps256_ps128(in0), _mm256_extractf128_ps(in2, 1));
151 /* Put two 128-bit 4-float registers into one 256-bit 8-float register */
152 static gmx_inline __m256 gmx_simdcall
153 gmx_2_mm_to_m256(__m128 in0, __m128 in1)
155 return _mm256_insertf128_ps(_mm256_castps128_ps256(in0), in1, 1);
158 #if defined(UNROLLJ) && UNROLLJ == 8
159 static gmx_inline void
160 load_lj_pair_params(const real *nbfp, const int *type, int aj,
161 __m256 *c6_S, __m256 *c12_S)
163 __m128 clj_S[UNROLLJ], c6t_S[2], c12t_S[2];
166 for (p = 0; p < UNROLLJ; p++)
168 /* Here we load 4 aligned floats, but we need just 2 */
169 clj_S[p] = _mm_load_ps(nbfp+type[aj+p]*nbfp_stride);
171 gmx_shuffle_4_ps_fil01_to_2_ps(clj_S[0], clj_S[1], clj_S[2], clj_S[3],
172 &c6t_S[0], &c12t_S[0]);
173 gmx_shuffle_4_ps_fil01_to_2_ps(clj_S[4], clj_S[5], clj_S[6], clj_S[7],
174 &c6t_S[1], &c12t_S[1]);
176 *c6_S = gmx_2_mm_to_m256(c6t_S[0], c6t_S[1]);
177 *c12_S = gmx_2_mm_to_m256(c12t_S[0], c12t_S[1]);
181 #if defined(UNROLLJ) && UNROLLJ == 4
182 static gmx_inline void
183 load_lj_pair_params2(const real *nbfp0, const real *nbfp1,
184 const int *type, int aj,
185 __m256 *c6_S, __m256 *c12_S)
187 __m128 clj_S0[UNROLLJ], clj_S1[UNROLLJ], c6t_S[2], c12t_S[2];
190 for (p = 0; p < UNROLLJ; p++)
192 /* Here we load 4 aligned floats, but we need just 2 */
193 clj_S0[p] = _mm_load_ps(nbfp0+type[aj+p]*nbfp_stride);
195 for (p = 0; p < UNROLLJ; p++)
197 /* Here we load 4 aligned floats, but we need just 2 */
198 clj_S1[p] = _mm_load_ps(nbfp1+type[aj+p]*nbfp_stride);
200 gmx_shuffle_4_ps_fil01_to_2_ps(clj_S0[0], clj_S0[1], clj_S0[2], clj_S0[3],
201 &c6t_S[0], &c12t_S[0]);
202 gmx_shuffle_4_ps_fil01_to_2_ps(clj_S1[0], clj_S1[1], clj_S1[2], clj_S1[3],
203 &c6t_S[1], &c12t_S[1]);
205 *c6_S = gmx_2_mm_to_m256(c6t_S[0], c6t_S[1]);
206 *c12_S = gmx_2_mm_to_m256(c12t_S[0], c12t_S[1]);
210 /* The load_table functions below are performance critical.
211 * The routines issue UNROLLI*UNROLLJ _mm_load_ps calls.
212 * As these all have latencies, scheduling is crucial.
213 * The Intel compilers and CPUs seem to do a good job at this.
214 * But AMD CPUs perform significantly worse with gcc than with icc.
215 * Performance is improved a bit by using the extract function UNROLLJ times,
216 * instead of doing an _mm_store_si128 for every i-particle.
217 * This is only faster when we use FDV0 formatted tables, where we also need
218 * to multiple the index by 4, which can be done by a SIMD bit shift.
219 * With single precision AVX, 8 extracts are much slower than 1 store.
220 * Because of this, the load_table_f function always takes the ti
221 * parameter, which should contain a buffer that is aligned with
222 * prepare_table_load_buffer(), but it is only used with full-width
225 static gmx_inline void gmx_simdcall
226 load_table_f(const real *tab_coul_FDV0, gmx_simd_int32_t ti_S, int *ti,
227 __m256 *ctab0_S, __m256 *ctab1_S)
229 __m128 ctab_S[8], ctabt_S[4];
232 /* Bit shifting would be faster, but AVX doesn't support that */
233 _mm256_store_si256((__m256i *)ti, ti_S);
234 for (j = 0; j < 8; j++)
236 ctab_S[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4);
238 gmx_shuffle_4_ps_fil01_to_2_ps(ctab_S[0], ctab_S[1], ctab_S[2], ctab_S[3],
239 &ctabt_S[0], &ctabt_S[2]);
240 gmx_shuffle_4_ps_fil01_to_2_ps(ctab_S[4], ctab_S[5], ctab_S[6], ctab_S[7],
241 &ctabt_S[1], &ctabt_S[3]);
243 *ctab0_S = gmx_2_mm_to_m256(ctabt_S[0], ctabt_S[1]);
244 *ctab1_S = gmx_2_mm_to_m256(ctabt_S[2], ctabt_S[3]);
247 static gmx_inline void gmx_simdcall
248 load_table_f_v(const real *tab_coul_FDV0, gmx_simd_int32_t ti_S, int *ti,
249 __m256 *ctab0_S, __m256 *ctab1_S, __m256 *ctabv_S)
251 __m128 ctab_S[8], ctabt_S[4], ctabvt_S[2];
254 /* Bit shifting would be faster, but AVX doesn't support that */
255 _mm256_store_si256((__m256i *)ti, ti_S);
256 for (j = 0; j < 8; j++)
258 ctab_S[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4);
260 gmx_shuffle_4_ps_fil01_to_2_ps(ctab_S[0], ctab_S[1], ctab_S[2], ctab_S[3],
261 &ctabt_S[0], &ctabt_S[2]);
262 gmx_shuffle_4_ps_fil01_to_2_ps(ctab_S[4], ctab_S[5], ctab_S[6], ctab_S[7],
263 &ctabt_S[1], &ctabt_S[3]);
265 *ctab0_S = gmx_2_mm_to_m256(ctabt_S[0], ctabt_S[1]);
266 *ctab1_S = gmx_2_mm_to_m256(ctabt_S[2], ctabt_S[3]);
268 ctabvt_S[0] = gmx_shuffle_4_ps_fil2_to_1_ps(ctab_S[0], ctab_S[1],
269 ctab_S[2], ctab_S[3]);
270 ctabvt_S[1] = gmx_shuffle_4_ps_fil2_to_1_ps(ctab_S[4], ctab_S[5],
271 ctab_S[6], ctab_S[7]);
273 *ctabv_S = gmx_2_mm_to_m256(ctabvt_S[0], ctabvt_S[1]);
276 #ifdef GMX_SIMD_HAVE_FINT32_LOGICAL
278 typedef gmx_simd_int32_t gmx_exclfilter;
279 static const int filter_stride = GMX_SIMD_INT32_WIDTH/GMX_SIMD_REAL_WIDTH;
281 static gmx_inline gmx_exclfilter gmx_simdcall
282 gmx_load1_exclfilter(int e)
284 return _mm256_set1_epi32(e);
287 static gmx_inline gmx_exclfilter gmx_simdcall
288 gmx_load_exclusion_filter(const unsigned *i)
290 return gmx_simd_load_i(i);
293 static gmx_inline gmx_simd_bool_t gmx_simdcall
294 gmx_checkbitmask_pb(gmx_exclfilter m0, gmx_exclfilter m1)
296 return _mm256_castsi256_ps(_mm256_cmpeq_epi32(_mm256_andnot_si256(m0, m1), _mm256_setzero_si256()));
299 #else /* GMX_SIMD_HAVE_FINT32_LOGICAL */
301 /* No integer support, use a real to store the exclusion bits */
302 typedef gmx_simd_real_t gmx_exclfilter;
303 static const int filter_stride = 1;
305 static gmx_inline gmx_exclfilter gmx_simdcall
306 gmx_load1_exclfilter(int e)
308 return _mm256_castsi256_ps(_mm256_set1_epi32(e));
311 static gmx_inline gmx_exclfilter gmx_simdcall
312 gmx_load_exclusion_filter(const unsigned *i)
314 /* cppcheck-suppress invalidPointerCast */
315 return gmx_simd_load_r((real *) (i));
318 static gmx_inline gmx_simd_bool_t gmx_simdcall
319 gmx_checkbitmask_pb(gmx_exclfilter m0, gmx_exclfilter m1)
321 return _mm256_cmp_ps(_mm256_cvtepi32_ps(_mm256_castps_si256(_mm256_and_ps(m0, m1))), _mm256_setzero_ps(), 0x0c);
324 #endif /* GMX_SIMD_HAVE_FINT32_LOGICAL */
326 #endif /* _nbnxn_kernel_simd_utils_x86_s256s_h_ */