2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2012, The GROMACS Development Team
6 * Copyright (c) 2012, by the GROMACS development team, led by
7 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8 * others, as listed in the AUTHORS file in the top-level source
9 * directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #ifndef _nbnxn_kernel_sse_utils_h_
38 #define _nbnxn_kernel_sse_utils_h_
40 /* This files contains all functions/macros for the SSE/AVX kernels
41 * which have explicit dependencies on the j-size / SIMD-width, which
42 * can be 2 (SSE-double), 4 (SSE-single,AVX-double) or 8 (AVX-single).
43 * The functionality which depends on the j-cluster size is:
46 * energy group pair energy storage
49 /* Transpose 2 double precision registers */
50 #define GMX_MM_TRANSPOSE2_OP_PD(in0,in1,out0,out1) \
52 out0 = _mm_unpacklo_pd(in0,in1); \
53 out1 = _mm_unpackhi_pd(in0,in1); \
56 #if defined GMX_MM128_HERE || !defined GMX_DOUBLE
57 /* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
58 #define GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(in0,in1,in2,in3,out0,out1) \
61 _c01 = _mm_movelh_ps(in0,in1); \
62 _c23 = _mm_movelh_ps(in2,in3); \
63 out0 = _mm_shuffle_ps(_c01,_c23,_MM_SHUFFLE(2,0,2,0)); \
64 out1 = _mm_shuffle_ps(_c01,_c23,_MM_SHUFFLE(3,1,3,1)); \
67 /* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
68 #define GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(in0,in1,in2,in3,out0,out1) \
71 _c01 = _mm256_shuffle_pd(in0,in1,_MM_SHUFFLE(1,0,1,0)); \
72 _c23 = _mm256_shuffle_pd(in2,in3,_MM_SHUFFLE(1,0,1,0)); \
73 out0 = _mm256_shuffle_pd(_c01,_c23,_MM_SHUFFLE(2,0,2,0)); \
74 out1 = _mm256_shuffle_pd(_c01,_c23,_MM_SHUFFLE(3,1,3,1)); \
78 /* Collect element 2 of the 4 inputs to out */
79 #define GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(in0,in1,in2,in3,out) \
82 _c01 = _mm_shuffle_ps(in0,in1,_MM_SHUFFLE(3,2,3,2)); \
83 _c23 = _mm_shuffle_ps(in2,in3,_MM_SHUFFLE(3,2,3,2)); \
84 out = _mm_shuffle_ps(_c01,_c23,_MM_SHUFFLE(2,0,2,0)); \
87 #ifndef GMX_MM256_HERE
89 /* Sum the elements within each input register and store the sums in out */
90 #define GMX_MM_TRANSPOSE_SUM4_PR(in0,in1,in2,in3,out) \
92 _MM_TRANSPOSE4_PS(in0,in1,in2,in3); \
93 in0 = _mm_add_ps(in0,in1); \
94 in2 = _mm_add_ps(in2,in3); \
95 out = _mm_add_ps(in0,in2); \
98 /* Sum the elements within each input register and store the sums in out */
99 #define GMX_MM_TRANSPOSE_SUM2_PD(in0,in1,out) \
101 GMX_MM_TRANSPOSE2_PD(in0,in1); \
102 out = _mm_add_pd(in0,in1); \
107 /* Sum the elements within each input register and store the sums in out */
108 #define GMX_MM_TRANSPOSE_SUM4_PR(in0,in1,in2,in3,out) \
110 in0 = _mm256_hadd_ps(in0,in1); \
111 in2 = _mm256_hadd_ps(in2,in3); \
112 in1 = _mm256_hadd_ps(in0,in2); \
113 out = _mm_add_ps(_mm256_castps256_ps128(in1),_mm256_extractf128_ps(in1,1)); \
115 /* Sum the elements of halfs of each input register and store sums in out */
116 #define GMX_MM_TRANSPOSE_SUM4H_PR(in0,in2,out) \
118 in0 = _mm256_hadd_ps(in0,_mm256_setzero_ps()); \
119 in2 = _mm256_hadd_ps(in2,_mm256_setzero_ps()); \
120 in0 = _mm256_hadd_ps(in0,in2); \
121 in2 = _mm256_permute_ps(in0,0b10110001); \
122 out = _mm_add_ps(_mm256_castps256_ps128(in0),_mm256_extractf128_ps(in2,1)); \
125 /* Sum the elements within each input register and store the sums in out */
126 #define GMX_MM_TRANSPOSE_SUM4_PR(in0,in1,in2,in3,out) \
128 in0 = _mm256_hadd_pd(in0,in1); \
129 in2 = _mm256_hadd_pd(in2,in3); \
130 out = _mm256_add_pd(_mm256_permute2f128_pd(in0,in2,0x20),_mm256_permute2f128_pd(in0,in2,0x31)); \
135 #ifdef GMX_MM128_HERE
138 gmx_mm128_invsqrt_ps_single(__m128 x)
140 const __m128 half = _mm_set_ps(0.5,0.5,0.5,0.5);
141 const __m128 three = _mm_set_ps(3.0,3.0,3.0,3.0);
143 __m128 lu = _mm_rsqrt_ps(x);
145 return _mm_mul_ps(half,_mm_mul_ps(_mm_sub_ps(three,_mm_mul_ps(_mm_mul_ps(lu,lu),x)),lu));
148 /* Do 2 double precision invsqrt operations.
149 * Doing the SIMD rsqrt and the first Newton Raphson iteration
150 * in single precision gives full double precision accuracy.
151 * The speed is more than double that of two gmx_mm_invsqrt_pd calls.
153 #define GMX_MM128_INVSQRT2_PD(in0,in1,out0,out1) \
155 const __m128d half = _mm_set1_pd(0.5); \
156 const __m128d three = _mm_set1_pd(3.0); \
160 s = _mm_movelh_ps(_mm_cvtpd_ps(in0),_mm_cvtpd_ps(in1)); \
161 ir = gmx_mm128_invsqrt_ps_single(s); \
162 lu0 = _mm_cvtps_pd(ir); \
163 lu1 = _mm_cvtps_pd(_mm_movehl_ps(ir,ir)); \
164 out0 = _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu0,lu0),in0)),lu0)); \
165 out1 = _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu1,lu1),in1)),lu1)); \
168 #define GMX_MM_INVSQRT2_PD GMX_MM128_INVSQRT2_PD
172 #ifdef GMX_MM256_HERE
175 gmx_mm256_invsqrt_ps_single(__m256 x)
177 const __m256 half = _mm256_set_ps(0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5);
178 const __m256 three = _mm256_set_ps(3.0,3.0,3.0,3.0,3.0,3.0,3.0,3.0);
180 __m256 lu = _mm256_rsqrt_ps(x);
182 return _mm256_mul_ps(half,_mm256_mul_ps(_mm256_sub_ps(three,_mm256_mul_ps(_mm256_mul_ps(lu,lu),x)),lu));
185 /* Do 4 double precision invsqrt operations.
186 * Doing the SIMD rsqrt and the first Newton Raphson iteration
187 * in single precision gives full double precision accuracy.
189 #define GMX_MM256_INVSQRT2_PD(in0,in1,out0,out1) \
191 const __m256d half = _mm256_set1_pd(0.5); \
192 const __m256d three = _mm256_set1_pd(3.0); \
196 s = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm256_cvtpd_ps(in0)),_mm256_cvtpd_ps(in1),1); \
197 ir = gmx_mm256_invsqrt_ps_single(s); \
198 lu0 = _mm256_cvtps_pd(_mm256_castps256_ps128(ir)); \
199 lu1 = _mm256_cvtps_pd(_mm256_extractf128_ps(ir,1)); \
200 out0 = _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu0,lu0),in0)),lu0)); \
201 out1 = _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu1,lu1),in1)),lu1)); \
204 #define GMX_MM_INVSQRT2_PD GMX_MM256_INVSQRT2_PD
208 /* Force and energy table load and interpolation routines */
210 #if defined GMX_MM128_HERE && !defined GMX_DOUBLE
212 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE) \
214 gmx_mm_pr clj_SSE[UNROLLJ]; \
217 for(p=0; p<UNROLLJ; p++) \
219 /* Here we load 4 aligned floats, but we need just 2 */ \
220 clj_SSE[p] = gmx_load_pr(nbfp+type[aj+p]*NBFP_STRIDE); \
222 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[0],clj_SSE[1],clj_SSE[2],clj_SSE[3],c6_SSE,c12_SSE); \
227 #if defined GMX_MM256_HERE && !defined GMX_DOUBLE
229 /* Put two 128-bit 4-float registers into one 256-bit 8-float register */
230 #define GMX_2_MM_TO_M256(in0,in1,out) \
232 out = _mm256_insertf128_ps(_mm256_castps128_ps256(in0),in1,1); \
235 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE) \
237 __m128 clj_SSE[UNROLLJ],c6t_SSE[2],c12t_SSE[2]; \
240 for(p=0; p<UNROLLJ; p++) \
242 /* Here we load 4 aligned floats, but we need just 2 */ \
243 clj_SSE[p] = _mm_load_ps(nbfp+type[aj+p]*NBFP_STRIDE); \
245 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[0],clj_SSE[1],clj_SSE[2],clj_SSE[3],c6t_SSE[0],c12t_SSE[0]); \
246 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[4],clj_SSE[5],clj_SSE[6],clj_SSE[7],c6t_SSE[1],c12t_SSE[1]); \
248 GMX_2_MM_TO_M256(c6t_SSE[0],c6t_SSE[1],c6_SSE); \
249 GMX_2_MM_TO_M256(c12t_SSE[0],c12t_SSE[1],c12_SSE); \
252 #define load_lj_pair_params2(nbfp,type,aj,c6_SSE,c12_SSE) \
254 __m128 clj_SSE[2*UNROLLJ],c6t_SSE[2],c12t_SSE[2]; \
257 for(p=0; p<2*UNROLLJ; p++) \
259 /* Here we load 4 aligned floats, but we need just 2 */ \
260 clj_SSE[p] = _mm_load_ps(nbfp+type[aj+p]*NBFP_STRIDE); \
262 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[0],clj_SSE[1],clj_SSE[2],clj_SSE[3],c6t_SSE[0],c12t_SSE[0]); \
263 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[4],clj_SSE[5],clj_SSE[6],clj_SSE[7],c6t_SSE[1],c12t_SSE[1]); \
265 GMX_2_MM_TO_M256(c6t_SSE[0],c6t_SSE[1],c6_SSE); \
266 GMX_2_MM_TO_M256(c12t_SSE[0],c12t_SSE[1],c12_SSE); \
271 #if defined GMX_MM128_HERE && defined GMX_DOUBLE
273 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE) \
275 gmx_mm_pr clj_SSE[UNROLLJ]; \
278 for(p=0; p<UNROLLJ; p++) \
280 clj_SSE[p] = gmx_load_pr(nbfp+type[aj+p]*NBFP_STRIDE); \
282 GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[0],clj_SSE[1],c6_SSE,c12_SSE); \
287 #if defined GMX_MM256_HERE && defined GMX_DOUBLE
289 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE) \
291 __m128d clj_SSE[UNROLLJ],c6t_SSE[2],c12t_SSE[2]; \
294 for(p=0; p<UNROLLJ; p++) \
296 clj_SSE[p] = _mm_load_pd(nbfp+type[aj+p]*NBFP_STRIDE); \
298 GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[0],clj_SSE[1],c6t_SSE[0],c12t_SSE[0]); \
299 GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[2],clj_SSE[3],c6t_SSE[1],c12t_SSE[1]); \
300 GMX_2_M128D_TO_M256D(c6t_SSE[0],c6t_SSE[1],c6_SSE); \
301 GMX_2_M128D_TO_M256D(c12t_SSE[0],c12t_SSE[1],c12_SSE); \
307 /* The load_table functions below are performance critical.
308 * The routines issue UNROLLI*UNROLLJ _mm_load_ps calls.
309 * As these all have latencies, scheduling is crucial.
310 * The Intel compilers and CPUs seem to do a good job at this.
311 * But AMD CPUs perform significantly worse with gcc than with icc.
312 * Performance is improved a bit by using the extract function UNROLLJ times,
313 * instead of doing an _mm_store_si128 for every i-particle.
314 * This is only faster when we use FDV0 formatted tables, where we also need
315 * to multiple the index by 4, which can be done by a SIMD bit shift.
316 * With single precision AVX, 8 extracts are much slower than 1 store.
317 * Because of this, the load_table_f macro always takes the ti parameter,
318 * but it is only used with AVX.
321 #if defined GMX_MM128_HERE && !defined GMX_DOUBLE
323 #define load_table_f(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE) \
326 __m128 ctab_SSE[4]; \
328 /* Table has 4 entries, left-shift index by 2 */ \
329 ti_SSE = _mm_slli_epi32(ti_SSE,2); \
330 /* Without SSE4.1 the extract macro needs an immediate: unroll */ \
331 idx[0] = gmx_mm_extract_epi32(ti_SSE,0); \
332 ctab_SSE[0] = _mm_load_ps(tab_coul_FDV0+idx[0]); \
333 idx[1] = gmx_mm_extract_epi32(ti_SSE,1); \
334 ctab_SSE[1] = _mm_load_ps(tab_coul_FDV0+idx[1]); \
335 idx[2] = gmx_mm_extract_epi32(ti_SSE,2); \
336 ctab_SSE[2] = _mm_load_ps(tab_coul_FDV0+idx[2]); \
337 idx[3] = gmx_mm_extract_epi32(ti_SSE,3); \
338 ctab_SSE[3] = _mm_load_ps(tab_coul_FDV0+idx[3]); \
340 /* Shuffle the force table entries to a convenient order */ \
341 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctab0_SSE,ctab1_SSE); \
344 #define load_table_f_v(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
347 __m128 ctab_SSE[4]; \
349 /* Table has 4 entries, left-shift index by 2 */ \
350 ti_SSE = _mm_slli_epi32(ti_SSE,2); \
351 /* Without SSE4.1 the extract macro needs an immediate: unroll */ \
352 idx[0] = gmx_mm_extract_epi32(ti_SSE,0); \
353 ctab_SSE[0] = _mm_load_ps(tab_coul_FDV0+idx[0]); \
354 idx[1] = gmx_mm_extract_epi32(ti_SSE,1); \
355 ctab_SSE[1] = _mm_load_ps(tab_coul_FDV0+idx[1]); \
356 idx[2] = gmx_mm_extract_epi32(ti_SSE,2); \
357 ctab_SSE[2] = _mm_load_ps(tab_coul_FDV0+idx[2]); \
358 idx[3] = gmx_mm_extract_epi32(ti_SSE,3); \
359 ctab_SSE[3] = _mm_load_ps(tab_coul_FDV0+idx[3]); \
361 /* Shuffle the force table entries to a convenient order */ \
362 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctab0_SSE,ctab1_SSE); \
363 /* Shuffle the energy table entries to a convenient order */ \
364 GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabv_SSE); \
369 #if defined GMX_MM256_HERE && !defined GMX_DOUBLE
371 #define load_table_f(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE) \
373 __m128 ctab_SSE[8],ctabt_SSE[4]; \
376 /* Bit shifting would be faster, but AVX doesn't support that */ \
377 _mm256_store_si256((__m256i *)ti,ti_SSE); \
380 ctab_SSE[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4); \
382 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabt_SSE[0],ctabt_SSE[2]); \
383 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[4],ctab_SSE[5],ctab_SSE[6],ctab_SSE[7],ctabt_SSE[1],ctabt_SSE[3]); \
385 GMX_2_MM_TO_M256(ctabt_SSE[0],ctabt_SSE[1],ctab0_SSE); \
386 GMX_2_MM_TO_M256(ctabt_SSE[2],ctabt_SSE[3],ctab1_SSE); \
389 #define load_table_f_v(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
391 __m128 ctab_SSE[8],ctabt_SSE[4],ctabvt_SSE[2]; \
394 /* Bit shifting would be faster, but AVX doesn't support that */ \
395 _mm256_store_si256((__m256i *)ti,ti_SSE); \
398 ctab_SSE[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4); \
400 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabt_SSE[0],ctabt_SSE[2]); \
401 GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[4],ctab_SSE[5],ctab_SSE[6],ctab_SSE[7],ctabt_SSE[1],ctabt_SSE[3]); \
403 GMX_2_MM_TO_M256(ctabt_SSE[0],ctabt_SSE[1],ctab0_SSE); \
404 GMX_2_MM_TO_M256(ctabt_SSE[2],ctabt_SSE[3],ctab1_SSE); \
406 GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabvt_SSE[0]); \
407 GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[4],ctab_SSE[5],ctab_SSE[6],ctab_SSE[7],ctabvt_SSE[1]); \
409 GMX_2_MM_TO_M256(ctabvt_SSE[0],ctabvt_SSE[1],ctabv_SSE); \
414 #if defined GMX_MM128_HERE && defined GMX_DOUBLE
416 #define load_table_f(tab_coul_F, ti_SSE, ti, ctab0_SSE, ctab1_SSE) \
419 __m128d ctab_SSE[2]; \
421 /* Without SSE4.1 the extract macro needs an immediate: unroll */ \
422 idx[0] = gmx_mm_extract_epi32(ti_SSE,0); \
423 ctab_SSE[0] = _mm_loadu_pd(tab_coul_F+idx[0]); \
424 idx[1] = gmx_mm_extract_epi32(ti_SSE,1); \
425 ctab_SSE[1] = _mm_loadu_pd(tab_coul_F+idx[1]); \
427 /* Shuffle the force table entries to a convenient order */ \
428 GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],ctab0_SSE,ctab1_SSE); \
429 /* The second force table entry should contain the difference */ \
430 ctab1_SSE = _mm_sub_pd(ctab1_SSE,ctab0_SSE); \
433 #define load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
436 __m128d ctab_SSE[4]; \
438 /* Without SSE4.1 the extract macro needs an immediate: unroll */ \
439 idx[0] = gmx_mm_extract_epi32(ti_SSE,0); \
440 ctab_SSE[0] = _mm_loadu_pd(tab_coul_F+idx[0]); \
441 idx[1] = gmx_mm_extract_epi32(ti_SSE,1); \
442 ctab_SSE[1] = _mm_loadu_pd(tab_coul_F+idx[1]); \
444 /* Shuffle the force table entries to a convenient order */ \
445 GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],ctab0_SSE,ctab1_SSE); \
446 /* The second force table entry should contain the difference */ \
447 ctab1_SSE = _mm_sub_pd(ctab1_SSE,ctab0_SSE); \
449 ctab_SSE[2] = _mm_loadu_pd(tab_coul_V+idx[0]); \
450 ctab_SSE[3] = _mm_loadu_pd(tab_coul_V+idx[1]); \
452 /* Shuffle the energy table entries to a single register */ \
453 ctabv_SSE = _mm_shuffle_pd(ctab_SSE[2],ctab_SSE[3],_MM_SHUFFLE2(0,0)); \
458 #if defined GMX_MM256_HERE && defined GMX_DOUBLE
460 /* Put two 128-bit 2-double registers into one 256-bit 4-ouble register */
461 #define GMX_2_M128D_TO_M256D(in0,in1,out) \
463 out = _mm256_insertf128_pd(_mm256_castpd128_pd256(in0),in1,1); \
466 #define load_table_f(tab_coul_F, ti_SSE, ti, ctab0_SSE, ctab1_SSE) \
468 __m128d ctab_SSE[4],tr_SSE[4]; \
471 _mm_store_si128((__m128i *)ti,ti_SSE); \
474 ctab_SSE[j] = _mm_loadu_pd(tab_coul_F+ti[j]); \
476 /* Shuffle the force table entries to a convenient order */ \
477 GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],tr_SSE[0],tr_SSE[1]); \
478 GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[2],ctab_SSE[3],tr_SSE[2],tr_SSE[3]); \
479 GMX_2_M128D_TO_M256D(tr_SSE[0],tr_SSE[2],ctab0_SSE); \
480 GMX_2_M128D_TO_M256D(tr_SSE[1],tr_SSE[3],ctab1_SSE); \
481 /* The second force table entry should contain the difference */ \
482 ctab1_SSE = _mm256_sub_pd(ctab1_SSE,ctab0_SSE); \
485 #define load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
487 __m128d ctab_SSE[8],tr_SSE[4]; \
490 _mm_store_si128((__m128i *)ti,ti_SSE); \
493 ctab_SSE[j] = _mm_loadu_pd(tab_coul_F+ti[j]); \
495 /* Shuffle the force table entries to a convenient order */ \
496 GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],tr_SSE[0],tr_SSE[1]); \
497 GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[2],ctab_SSE[3],tr_SSE[2],tr_SSE[3]); \
498 GMX_2_M128D_TO_M256D(tr_SSE[0],tr_SSE[2],ctab0_SSE); \
499 GMX_2_M128D_TO_M256D(tr_SSE[1],tr_SSE[3],ctab1_SSE); \
500 /* The second force table entry should contain the difference */ \
501 ctab1_SSE = _mm256_sub_pd(ctab1_SSE,ctab0_SSE); \
505 ctab_SSE[4+j] = _mm_loadu_pd(tab_coul_V+ti[j]); \
507 /* Shuffle the energy table entries to a single register */ \
508 GMX_2_M128D_TO_M256D(_mm_shuffle_pd(ctab_SSE[4],ctab_SSE[5],_MM_SHUFFLE2(0,0)),_mm_shuffle_pd(ctab_SSE[6],ctab_SSE[7],_MM_SHUFFLE2(0,0)),ctabv_SSE); \
514 /* Add energy register to possibly multiple terms in the energy array.
515 * This function is the same for SSE/AVX single/double.
517 static inline void add_ener_grp(gmx_mm_pr e_SSE,real *v,const int *offset_jj)
521 /* We need to balance the number of store operations with
522 * the rapidly increases number of combinations of energy groups.
523 * We add to a temporary buffer for 1 i-group vs 2 j-groups.
525 for(jj=0; jj<(UNROLLJ/2); jj++)
529 v_SSE = gmx_load_pr(v+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE);
530 gmx_store_pr(v+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE,gmx_add_pr(v_SSE,e_SSE));
534 #if defined GMX_X86_AVX_256 && GMX_SIMD_WIDTH_HERE == 8
535 /* As add_ener_grp above, but for two groups of UNROLLJ/2 stored in
536 * a single SIMD register.
538 static inline void add_ener_grp_halves(gmx_mm_pr e_SSE,
539 real *v0,real *v1,const int *offset_jj)
541 gmx_mm_hpr e_SSE0,e_SSE1;
544 e_SSE0 = _mm256_extractf128_ps(e_SSE,0);
545 e_SSE1 = _mm256_extractf128_ps(e_SSE,1);
547 for(jj=0; jj<(UNROLLJ/2); jj++)
551 v_SSE = gmx_load_hpr(v0+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2);
552 gmx_store_hpr(v0+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2,gmx_add_hpr(v_SSE,e_SSE0));
554 for(jj=0; jj<(UNROLLJ/2); jj++)
558 v_SSE = gmx_load_hpr(v1+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2);
559 gmx_store_hpr(v1+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2,gmx_add_hpr(v_SSE,e_SSE1));
564 #endif /* _nbnxn_kernel_sse_utils_h_ */