2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2012, The GROMACS Development Team
6 * Copyright (c) 2012,2013, by the GROMACS development team, led by
7 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8 * others, as listed in the AUTHORS file in the top-level source
9 * directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #ifndef _nbnxn_kernel_sse_utils_h_
38 #define _nbnxn_kernel_sse_utils_h_
40 /* This files contains all functions/macros for the SIMD kernels
41 * which have explicit dependencies on the j-cluster size and/or SIMD-width.
42 * The functionality which depends on the j-cluster size is:
45 * energy group pair energy storage
50 /* Transpose 2 double precision registers */
51 #define GMX_MM_TRANSPOSE2_OP_PD(in0, in1, out0, out1) \
53 out0 = _mm_unpacklo_pd(in0, in1); \
54 out1 = _mm_unpackhi_pd(in0, in1); \
57 #if GMX_NBNXN_SIMD_BITWIDTH == 128 || !defined GMX_DOUBLE
58 /* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
59 #define GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(in0, in1, in2, in3, out0, out1) \
62 _c01 = _mm_movelh_ps(in0, in1); \
63 _c23 = _mm_movelh_ps(in2, in3); \
64 out0 = _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(2, 0, 2, 0)); \
65 out1 = _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(3, 1, 3, 1)); \
68 /* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
69 #define GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(in0, in1, in2, in3, out0, out1) \
72 _c01 = _mm256_shuffle_pd(in0, in1, _MM_SHUFFLE(1, 0, 1, 0)); \
73 _c23 = _mm256_shuffle_pd(in2, in3, _MM_SHUFFLE(1, 0, 1, 0)); \
74 out0 = _mm256_shuffle_pd(_c01, _c23, _MM_SHUFFLE(2, 0, 2, 0)); \
75 out1 = _mm256_shuffle_pd(_c01, _c23, _MM_SHUFFLE(3, 1, 3, 1)); \
79 /* Collect element 2 of the 4 inputs to out */
80 #define GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(in0, in1, in2, in3, out) \
83 _c01 = _mm_shuffle_ps(in0, in1, _MM_SHUFFLE(3, 2, 3, 2)); \
84 _c23 = _mm_shuffle_ps(in2, in3, _MM_SHUFFLE(3, 2, 3, 2)); \
85 out = _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(2, 0, 2, 0)); \
88 #if GMX_NBNXN_SIMD_BITWIDTH == 128
90 /* Sum the elements within each input register and store the sums in out */
91 #define GMX_MM_TRANSPOSE_SUM4_PR(in0, in1, in2, in3, out) \
93 _MM_TRANSPOSE4_PS(in0, in1, in2, in3); \
94 in0 = _mm_add_ps(in0, in1); \
95 in2 = _mm_add_ps(in2, in3); \
96 out = _mm_add_ps(in0, in2); \
99 /* Sum the elements within each input register and store the sums in out */
100 #define GMX_MM_TRANSPOSE_SUM2_PD(in0, in1, out) \
102 GMX_MM_TRANSPOSE2_PD(in0, in1); \
103 out = _mm_add_pd(in0, in1); \
108 /* Sum the elements within each input register and store the sums in out */
109 #define GMX_MM_TRANSPOSE_SUM4_PR(in0, in1, in2, in3, out) \
111 in0 = _mm256_hadd_ps(in0, in1); \
112 in2 = _mm256_hadd_ps(in2, in3); \
113 in1 = _mm256_hadd_ps(in0, in2); \
114 out = _mm_add_ps(_mm256_castps256_ps128(in1), _mm256_extractf128_ps(in1, 1)); \
116 /* Sum the elements of halfs of each input register and store sums in out */
117 #define GMX_MM_TRANSPOSE_SUM4H_PR(in0, in2, out) \
119 in0 = _mm256_hadd_ps(in0, _mm256_setzero_ps()); \
120 in2 = _mm256_hadd_ps(in2, _mm256_setzero_ps()); \
121 in0 = _mm256_hadd_ps(in0, in2); \
122 in2 = _mm256_permute_ps(in0, _MM_SHUFFLE(2, 3, 0, 1)); \
123 out = _mm_add_ps(_mm256_castps256_ps128(in0), _mm256_extractf128_ps(in2, 1)); \
126 /* Sum the elements within each input register and store the sums in out */
127 #define GMX_MM_TRANSPOSE_SUM4_PR(in0, in1, in2, in3, out) \
129 in0 = _mm256_hadd_pd(in0, in1); \
130 in2 = _mm256_hadd_pd(in2, in3); \
131 out = _mm256_add_pd(_mm256_permute2f128_pd(in0, in2, 0x20), _mm256_permute2f128_pd(in0, in2, 0x31)); \
136 #if GMX_NBNXN_SIMD_BITWIDTH == 128
139 gmx_mm128_invsqrt_ps_single(__m128 x)
141 const __m128 half = _mm_set_ps(0.5, 0.5, 0.5, 0.5);
142 const __m128 three = _mm_set_ps(3.0, 3.0, 3.0, 3.0);
144 __m128 lu = _mm_rsqrt_ps(x);
146 return _mm_mul_ps(half, _mm_mul_ps(_mm_sub_ps(three, _mm_mul_ps(_mm_mul_ps(lu, lu), x)), lu));
149 /* Do 2 double precision invsqrt operations.
150 * Doing the SIMD rsqrt and the first Newton Raphson iteration
151 * in single precision gives full double precision accuracy.
152 * The speed is more than double that of two gmx_mm_invsqrt_pd calls.
154 #define GMX_MM128_INVSQRT2_PD(in0, in1, out0, out1) \
156 const __m128d half = _mm_set1_pd(0.5); \
157 const __m128d three = _mm_set1_pd(3.0); \
161 s = _mm_movelh_ps(_mm_cvtpd_ps(in0), _mm_cvtpd_ps(in1)); \
162 ir = gmx_mm128_invsqrt_ps_single(s); \
163 lu0 = _mm_cvtps_pd(ir); \
164 lu1 = _mm_cvtps_pd(_mm_movehl_ps(ir, ir)); \
165 out0 = _mm_mul_pd(half, _mm_mul_pd(_mm_sub_pd(three, _mm_mul_pd(_mm_mul_pd(lu0, lu0), in0)), lu0)); \
166 out1 = _mm_mul_pd(half, _mm_mul_pd(_mm_sub_pd(three, _mm_mul_pd(_mm_mul_pd(lu1, lu1), in1)), lu1)); \
169 #define GMX_MM_INVSQRT2_PD GMX_MM128_INVSQRT2_PD
173 #if GMX_NBNXN_SIMD_BITWIDTH == 256
176 gmx_mm256_invsqrt_ps_single(__m256 x)
178 const __m256 half = _mm256_set_ps(0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5);
179 const __m256 three = _mm256_set_ps(3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0);
181 __m256 lu = _mm256_rsqrt_ps(x);
183 return _mm256_mul_ps(half, _mm256_mul_ps(_mm256_sub_ps(three, _mm256_mul_ps(_mm256_mul_ps(lu, lu), x)), lu));
186 /* Do 4 double precision invsqrt operations.
187 * Doing the SIMD rsqrt and the first Newton Raphson iteration
188 * in single precision gives full double precision accuracy.
190 #define GMX_MM256_INVSQRT2_PD(in0, in1, out0, out1) \
192 const __m256d half = _mm256_set1_pd(0.5); \
193 const __m256d three = _mm256_set1_pd(3.0); \
197 s = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm256_cvtpd_ps(in0)), _mm256_cvtpd_ps(in1), 1); \
198 ir = gmx_mm256_invsqrt_ps_single(s); \
199 lu0 = _mm256_cvtps_pd(_mm256_castps256_ps128(ir)); \
200 lu1 = _mm256_cvtps_pd(_mm256_extractf128_ps(ir, 1)); \
201 out0 = _mm256_mul_pd(half, _mm256_mul_pd(_mm256_sub_pd(three, _mm256_mul_pd(_mm256_mul_pd(lu0, lu0), in0)), lu0)); \
202 out1 = _mm256_mul_pd(half, _mm256_mul_pd(_mm256_sub_pd(three, _mm256_mul_pd(_mm256_mul_pd(lu1, lu1), in1)), lu1)); \
205 #define GMX_MM_INVSQRT2_PD GMX_MM256_INVSQRT2_PD
209 /* Force and energy table load and interpolation routines */
211 #if GMX_NBNXN_SIMD_BITWIDTH == 128 && !defined GMX_DOUBLE
213 #define load_lj_pair_params(nbfp, type, aj, c6_SSE, c12_SSE) \
215 gmx_mm_pr clj_SSE[UNROLLJ]; \
218 for (p = 0; p < UNROLLJ; p++) \
220 /* Here we load 4 aligned floats, but we need just 2 */ \
221 clj_SSE[p] = gmx_load_pr(nbfp+type[aj+p]*NBFP_STRIDE); \
223 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[0], clj_SSE[1], clj_SSE[2], clj_SSE[3], c6_SSE, c12_SSE); \
228 #if GMX_NBNXN_SIMD_BITWIDTH == 256 && !defined GMX_DOUBLE
230 /* Put two 128-bit 4-float registers into one 256-bit 8-float register */
231 #define GMX_2_MM_TO_M256(in0, in1, out) \
233 out = _mm256_insertf128_ps(_mm256_castps128_ps256(in0), in1, 1); \
236 #define load_lj_pair_params(nbfp, type, aj, c6_SSE, c12_SSE) \
238 __m128 clj_SSE[UNROLLJ], c6t_SSE[2], c12t_SSE[2]; \
241 for (p = 0; p < UNROLLJ; p++) \
243 /* Here we load 4 aligned floats, but we need just 2 */ \
244 clj_SSE[p] = _mm_load_ps(nbfp+type[aj+p]*NBFP_STRIDE); \
246 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[0], clj_SSE[1], clj_SSE[2], clj_SSE[3], c6t_SSE[0], c12t_SSE[0]); \
247 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[4], clj_SSE[5], clj_SSE[6], clj_SSE[7], c6t_SSE[1], c12t_SSE[1]); \
249 GMX_2_MM_TO_M256(c6t_SSE[0], c6t_SSE[1], c6_SSE); \
250 GMX_2_MM_TO_M256(c12t_SSE[0], c12t_SSE[1], c12_SSE); \
253 #define load_lj_pair_params2(nbfp0, nbfp1, type, aj, c6_SSE, c12_SSE) \
255 __m128 clj_SSE0[UNROLLJ], clj_SSE1[UNROLLJ], c6t_SSE[2], c12t_SSE[2]; \
258 for (p = 0; p < UNROLLJ; p++) \
260 /* Here we load 4 aligned floats, but we need just 2 */ \
261 clj_SSE0[p] = _mm_load_ps(nbfp0+type[aj+p]*NBFP_STRIDE); \
263 for (p = 0; p < UNROLLJ; p++) \
265 /* Here we load 4 aligned floats, but we need just 2 */ \
266 clj_SSE1[p] = _mm_load_ps(nbfp1+type[aj+p]*NBFP_STRIDE); \
268 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE0[0], clj_SSE0[1], clj_SSE0[2], clj_SSE0[3], c6t_SSE[0], c12t_SSE[0]); \
269 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE1[0], clj_SSE1[1], clj_SSE1[2], clj_SSE1[3], c6t_SSE[1], c12t_SSE[1]); \
271 GMX_2_MM_TO_M256(c6t_SSE[0], c6t_SSE[1], c6_SSE); \
272 GMX_2_MM_TO_M256(c12t_SSE[0], c12t_SSE[1], c12_SSE); \
277 #if GMX_NBNXN_SIMD_BITWIDTH == 128 && defined GMX_DOUBLE
279 #define load_lj_pair_params(nbfp, type, aj, c6_SSE, c12_SSE) \
281 gmx_mm_pr clj_SSE[UNROLLJ]; \
284 for (p = 0; p < UNROLLJ; p++) \
286 clj_SSE[p] = gmx_load_pr(nbfp+type[aj+p]*NBFP_STRIDE); \
288 GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[0], clj_SSE[1], c6_SSE, c12_SSE); \
293 #if GMX_NBNXN_SIMD_BITWIDTH == 256 && defined GMX_DOUBLE
295 #define load_lj_pair_params(nbfp, type, aj, c6_SSE, c12_SSE) \
297 __m128d clj_SSE[UNROLLJ], c6t_SSE[2], c12t_SSE[2]; \
300 for (p = 0; p < UNROLLJ; p++) \
302 clj_SSE[p] = _mm_load_pd(nbfp+type[aj+p]*NBFP_STRIDE); \
304 GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[0], clj_SSE[1], c6t_SSE[0], c12t_SSE[0]); \
305 GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[2], clj_SSE[3], c6t_SSE[1], c12t_SSE[1]); \
306 GMX_2_M128D_TO_M256D(c6t_SSE[0], c6t_SSE[1], c6_SSE); \
307 GMX_2_M128D_TO_M256D(c12t_SSE[0], c12t_SSE[1], c12_SSE); \
313 /* The load_table functions below are performance critical.
314 * The routines issue UNROLLI*UNROLLJ _mm_load_ps calls.
315 * As these all have latencies, scheduling is crucial.
316 * The Intel compilers and CPUs seem to do a good job at this.
317 * But AMD CPUs perform significantly worse with gcc than with icc.
318 * Performance is improved a bit by using the extract function UNROLLJ times,
319 * instead of doing an _mm_store_si128 for every i-particle.
320 * This is only faster when we use FDV0 formatted tables, where we also need
321 * to multiple the index by 4, which can be done by a SIMD bit shift.
322 * With single precision AVX, 8 extracts are much slower than 1 store.
323 * Because of this, the load_table_f macro always takes the ti parameter,
324 * but it is only used with AVX.
327 #if GMX_NBNXN_SIMD_BITWIDTH == 128 && !defined GMX_DOUBLE
329 #define load_table_f(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE) \
332 __m128 ctab_SSE[4]; \
334 /* Table has 4 entries, left-shift index by 2 */ \
335 ti_SSE = _mm_slli_epi32(ti_SSE, 2); \
336 /* Without SSE4.1 the extract macro needs an immediate: unroll */ \
337 idx[0] = gmx_mm_extract_epi32(ti_SSE, 0); \
338 ctab_SSE[0] = _mm_load_ps(tab_coul_FDV0+idx[0]); \
339 idx[1] = gmx_mm_extract_epi32(ti_SSE, 1); \
340 ctab_SSE[1] = _mm_load_ps(tab_coul_FDV0+idx[1]); \
341 idx[2] = gmx_mm_extract_epi32(ti_SSE, 2); \
342 ctab_SSE[2] = _mm_load_ps(tab_coul_FDV0+idx[2]); \
343 idx[3] = gmx_mm_extract_epi32(ti_SSE, 3); \
344 ctab_SSE[3] = _mm_load_ps(tab_coul_FDV0+idx[3]); \
346 /* Shuffle the force table entries to a convenient order */ \
347 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0], ctab_SSE[1], ctab_SSE[2], ctab_SSE[3], ctab0_SSE, ctab1_SSE); \
350 #define load_table_f_v(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
353 __m128 ctab_SSE[4]; \
355 /* Table has 4 entries, left-shift index by 2 */ \
356 ti_SSE = _mm_slli_epi32(ti_SSE, 2); \
357 /* Without SSE4.1 the extract macro needs an immediate: unroll */ \
358 idx[0] = gmx_mm_extract_epi32(ti_SSE, 0); \
359 ctab_SSE[0] = _mm_load_ps(tab_coul_FDV0+idx[0]); \
360 idx[1] = gmx_mm_extract_epi32(ti_SSE, 1); \
361 ctab_SSE[1] = _mm_load_ps(tab_coul_FDV0+idx[1]); \
362 idx[2] = gmx_mm_extract_epi32(ti_SSE, 2); \
363 ctab_SSE[2] = _mm_load_ps(tab_coul_FDV0+idx[2]); \
364 idx[3] = gmx_mm_extract_epi32(ti_SSE, 3); \
365 ctab_SSE[3] = _mm_load_ps(tab_coul_FDV0+idx[3]); \
367 /* Shuffle the force table entries to a convenient order */ \
368 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0], ctab_SSE[1], ctab_SSE[2], ctab_SSE[3], ctab0_SSE, ctab1_SSE); \
369 /* Shuffle the energy table entries to a convenient order */ \
370 GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[0], ctab_SSE[1], ctab_SSE[2], ctab_SSE[3], ctabv_SSE); \
375 #if GMX_NBNXN_SIMD_BITWIDTH == 256 && !defined GMX_DOUBLE
377 #define load_table_f(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE) \
379 __m128 ctab_SSE[8], ctabt_SSE[4]; \
382 /* Bit shifting would be faster, but AVX doesn't support that */ \
383 _mm256_store_si256((__m256i *)ti, ti_SSE); \
384 for (j = 0; j < 8; j++) \
386 ctab_SSE[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4); \
388 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0], ctab_SSE[1], ctab_SSE[2], ctab_SSE[3], ctabt_SSE[0], ctabt_SSE[2]); \
389 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[4], ctab_SSE[5], ctab_SSE[6], ctab_SSE[7], ctabt_SSE[1], ctabt_SSE[3]); \
391 GMX_2_MM_TO_M256(ctabt_SSE[0], ctabt_SSE[1], ctab0_SSE); \
392 GMX_2_MM_TO_M256(ctabt_SSE[2], ctabt_SSE[3], ctab1_SSE); \
395 #define load_table_f_v(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
397 __m128 ctab_SSE[8], ctabt_SSE[4], ctabvt_SSE[2]; \
400 /* Bit shifting would be faster, but AVX doesn't support that */ \
401 _mm256_store_si256((__m256i *)ti, ti_SSE); \
402 for (j = 0; j < 8; j++) \
404 ctab_SSE[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4); \
406 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0], ctab_SSE[1], ctab_SSE[2], ctab_SSE[3], ctabt_SSE[0], ctabt_SSE[2]); \
407 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[4], ctab_SSE[5], ctab_SSE[6], ctab_SSE[7], ctabt_SSE[1], ctabt_SSE[3]); \
409 GMX_2_MM_TO_M256(ctabt_SSE[0], ctabt_SSE[1], ctab0_SSE); \
410 GMX_2_MM_TO_M256(ctabt_SSE[2], ctabt_SSE[3], ctab1_SSE); \
412 GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[0], ctab_SSE[1], ctab_SSE[2], ctab_SSE[3], ctabvt_SSE[0]); \
413 GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[4], ctab_SSE[5], ctab_SSE[6], ctab_SSE[7], ctabvt_SSE[1]); \
415 GMX_2_MM_TO_M256(ctabvt_SSE[0], ctabvt_SSE[1], ctabv_SSE); \
420 #if GMX_NBNXN_SIMD_BITWIDTH == 128 && defined GMX_DOUBLE
422 #define load_table_f(tab_coul_F, ti_SSE, ti, ctab0_SSE, ctab1_SSE) \
425 __m128d ctab_SSE[2]; \
427 /* Without SSE4.1 the extract macro needs an immediate: unroll */ \
428 idx[0] = gmx_mm_extract_epi32(ti_SSE, 0); \
429 ctab_SSE[0] = _mm_loadu_pd(tab_coul_F+idx[0]); \
430 idx[1] = gmx_mm_extract_epi32(ti_SSE, 1); \
431 ctab_SSE[1] = _mm_loadu_pd(tab_coul_F+idx[1]); \
433 /* Shuffle the force table entries to a convenient order */ \
434 GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0], ctab_SSE[1], ctab0_SSE, ctab1_SSE); \
435 /* The second force table entry should contain the difference */ \
436 ctab1_SSE = _mm_sub_pd(ctab1_SSE, ctab0_SSE); \
439 #define load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
442 __m128d ctab_SSE[4]; \
444 /* Without SSE4.1 the extract macro needs an immediate: unroll */ \
445 idx[0] = gmx_mm_extract_epi32(ti_SSE, 0); \
446 ctab_SSE[0] = _mm_loadu_pd(tab_coul_F+idx[0]); \
447 idx[1] = gmx_mm_extract_epi32(ti_SSE, 1); \
448 ctab_SSE[1] = _mm_loadu_pd(tab_coul_F+idx[1]); \
450 /* Shuffle the force table entries to a convenient order */ \
451 GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0], ctab_SSE[1], ctab0_SSE, ctab1_SSE); \
452 /* The second force table entry should contain the difference */ \
453 ctab1_SSE = _mm_sub_pd(ctab1_SSE, ctab0_SSE); \
455 ctab_SSE[2] = _mm_loadu_pd(tab_coul_V+idx[0]); \
456 ctab_SSE[3] = _mm_loadu_pd(tab_coul_V+idx[1]); \
458 /* Shuffle the energy table entries to a single register */ \
459 ctabv_SSE = _mm_shuffle_pd(ctab_SSE[2], ctab_SSE[3], _MM_SHUFFLE2(0, 0)); \
464 #if GMX_NBNXN_SIMD_BITWIDTH == 256 && defined GMX_DOUBLE
466 /* Put two 128-bit 2-double registers into one 256-bit 4-ouble register */
467 #define GMX_2_M128D_TO_M256D(in0, in1, out) \
469 out = _mm256_insertf128_pd(_mm256_castpd128_pd256(in0), in1, 1); \
472 #define load_table_f(tab_coul_F, ti_SSE, ti, ctab0_SSE, ctab1_SSE) \
474 __m128d ctab_SSE[4], tr_SSE[4]; \
477 _mm_store_si128((__m128i *)ti, ti_SSE); \
478 for (j = 0; j < 4; j++) \
480 ctab_SSE[j] = _mm_loadu_pd(tab_coul_F+ti[j]); \
482 /* Shuffle the force table entries to a convenient order */ \
483 GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0], ctab_SSE[1], tr_SSE[0], tr_SSE[1]); \
484 GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[2], ctab_SSE[3], tr_SSE[2], tr_SSE[3]); \
485 GMX_2_M128D_TO_M256D(tr_SSE[0], tr_SSE[2], ctab0_SSE); \
486 GMX_2_M128D_TO_M256D(tr_SSE[1], tr_SSE[3], ctab1_SSE); \
487 /* The second force table entry should contain the difference */ \
488 ctab1_SSE = _mm256_sub_pd(ctab1_SSE, ctab0_SSE); \
491 #define load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
493 __m128d ctab_SSE[8], tr_SSE[4]; \
496 _mm_store_si128((__m128i *)ti, ti_SSE); \
497 for (j = 0; j < 4; j++) \
499 ctab_SSE[j] = _mm_loadu_pd(tab_coul_F+ti[j]); \
501 /* Shuffle the force table entries to a convenient order */ \
502 GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0], ctab_SSE[1], tr_SSE[0], tr_SSE[1]); \
503 GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[2], ctab_SSE[3], tr_SSE[2], tr_SSE[3]); \
504 GMX_2_M128D_TO_M256D(tr_SSE[0], tr_SSE[2], ctab0_SSE); \
505 GMX_2_M128D_TO_M256D(tr_SSE[1], tr_SSE[3], ctab1_SSE); \
506 /* The second force table entry should contain the difference */ \
507 ctab1_SSE = _mm256_sub_pd(ctab1_SSE, ctab0_SSE); \
509 for (j = 0; j < 4; j++) \
511 ctab_SSE[4+j] = _mm_loadu_pd(tab_coul_V+ti[j]); \
513 /* Shuffle the energy table entries to a single register */ \
514 GMX_2_M128D_TO_M256D(_mm_shuffle_pd(ctab_SSE[4], ctab_SSE[5], _MM_SHUFFLE2(0, 0)), _mm_shuffle_pd(ctab_SSE[6], ctab_SSE[7], _MM_SHUFFLE2(0, 0)), ctabv_SSE); \
520 /* Add energy register to possibly multiple terms in the energy array.
521 * This function is the same for SSE/AVX single/double.
523 static inline void add_ener_grp(gmx_mm_pr e_SSE, real *v, const int *offset_jj)
527 /* We need to balance the number of store operations with
528 * the rapidly increases number of combinations of energy groups.
529 * We add to a temporary buffer for 1 i-group vs 2 j-groups.
531 for (jj = 0; jj < (UNROLLJ/2); jj++)
535 v_SSE = gmx_load_pr(v+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE);
536 gmx_store_pr(v+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE, gmx_add_pr(v_SSE, e_SSE));
540 #if defined GMX_X86_AVX_256 && GMX_SIMD_WIDTH_HERE == 8 && defined gmx_mm_hpr
541 /* As add_ener_grp above, but for two groups of UNROLLJ/2 stored in
542 * a single SIMD register.
544 static inline void add_ener_grp_halves(gmx_mm_pr e_SSE,
545 real *v0, real *v1, const int *offset_jj)
547 gmx_mm_hpr e_SSE0, e_SSE1;
550 e_SSE0 = _mm256_extractf128_ps(e_SSE, 0);
551 e_SSE1 = _mm256_extractf128_ps(e_SSE, 1);
553 for (jj = 0; jj < (UNROLLJ/2); jj++)
557 gmx_load_hpr(v_SSE, v0+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2);
558 gmx_store_hpr(v0+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2, gmx_add_hpr(v_SSE, e_SSE0));
560 for (jj = 0; jj < (UNROLLJ/2); jj++)
564 gmx_load_hpr(v_SSE, v1+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2);
565 gmx_store_hpr(v1+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2, gmx_add_hpr(v_SSE, e_SSE1));
570 #endif /* GMX_X86_SSE2 */
572 #endif /* _nbnxn_kernel_sse_utils_h_ */