63a2e6a95eca23d52fcbb66d32b701cdb67e6717
[alexxy/gromacs.git] / src / mdlib / nbnxn_kernels / nbnxn_kernel_simd_utils.h
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2012, The GROMACS Development Team
6  * Copyright (c) 2012, by the GROMACS development team, led by
7  * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8  * others, as listed in the AUTHORS file in the top-level source
9  * directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #ifndef _nbnxn_kernel_sse_utils_h_
38 #define _nbnxn_kernel_sse_utils_h_
39
40 /* This files contains all functions/macros for the SSE/AVX kernels
41  * which have explicit dependencies on the j-size / SIMD-width, which
42  * can be 2 (SSE-double), 4 (SSE-single,AVX-double) or 8 (AVX-single).
43  * The functionality which depends on the j-cluster size is:
44  *   LJ-parameter lookup
45  *   force table lookup
46  *   energy group pair energy storage
47  */
48
49 /* Transpose 2 double precision registers */
50 #define GMX_MM_TRANSPOSE2_OP_PD(in0,in1,out0,out1)                      \
51 {                                                                       \
52     out0 = _mm_unpacklo_pd(in0,in1);                                    \
53     out1 = _mm_unpackhi_pd(in0,in1);                                    \
54 }
55
56 #if defined GMX_MM128_HERE || !defined GMX_DOUBLE
57 /* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
58 #define GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(in0,in1,in2,in3,out0,out1)    \
59 {                                                                       \
60     __m128 _c01,_c23;                                                   \
61     _c01 = _mm_movelh_ps(in0,in1);                                      \
62     _c23 = _mm_movelh_ps(in2,in3);                                      \
63     out0 = _mm_shuffle_ps(_c01,_c23,_MM_SHUFFLE(2,0,2,0));              \
64     out1 = _mm_shuffle_ps(_c01,_c23,_MM_SHUFFLE(3,1,3,1));              \
65 }
66 #else
67 /* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
68 #define GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(in0,in1,in2,in3,out0,out1)    \
69 {                                                                       \
70     __m256d _c01,_c23;                                                  \
71     _c01 = _mm256_shuffle_pd(in0,in1,_MM_SHUFFLE(1,0,1,0));             \
72     _c23 = _mm256_shuffle_pd(in2,in3,_MM_SHUFFLE(1,0,1,0));             \
73     out0 = _mm256_shuffle_pd(_c01,_c23,_MM_SHUFFLE(2,0,2,0));           \
74     out1 = _mm256_shuffle_pd(_c01,_c23,_MM_SHUFFLE(3,1,3,1));           \
75 }
76 #endif
77
78 /* Collect element 2 of the 4 inputs to out */
79 #define GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(in0,in1,in2,in3,out)           \
80 {                                                                       \
81     __m128 _c01,_c23;                                                   \
82     _c01 = _mm_shuffle_ps(in0,in1,_MM_SHUFFLE(3,2,3,2));                \
83     _c23 = _mm_shuffle_ps(in2,in3,_MM_SHUFFLE(3,2,3,2));                \
84     out  = _mm_shuffle_ps(_c01,_c23,_MM_SHUFFLE(2,0,2,0));              \
85 }
86
87 #ifndef GMX_MM256_HERE
88 #ifndef GMX_DOUBLE
89 /* Sum the elements within each input register and store the sums in out */
90 #define GMX_MM_TRANSPOSE_SUM4_PR(in0,in1,in2,in3,out)                   \
91 {                                                                       \
92     _MM_TRANSPOSE4_PS(in0,in1,in2,in3);                                 \
93     in0 = _mm_add_ps(in0,in1);                                          \
94     in2 = _mm_add_ps(in2,in3);                                          \
95     out  = _mm_add_ps(in0,in2);                                         \
96 }
97 #else
98 /* Sum the elements within each input register and store the sums in out */
99 #define GMX_MM_TRANSPOSE_SUM2_PD(in0,in1,out)                           \
100 {                                                                       \
101     GMX_MM_TRANSPOSE2_PD(in0,in1);                                      \
102     out  = _mm_add_pd(in0,in1);                                         \
103 }
104 #endif
105 #else
106 #ifndef GMX_DOUBLE
107 /* Sum the elements within each input register and store the sums in out */
108 #define GMX_MM_TRANSPOSE_SUM4_PR(in0,in1,in2,in3,out)                   \
109 {                                                                       \
110     in0 = _mm256_hadd_ps(in0,in1);                                      \
111     in2 = _mm256_hadd_ps(in2,in3);                                      \
112     in1 = _mm256_hadd_ps(in0,in2);                                      \
113     out = _mm_add_ps(_mm256_castps256_ps128(in1),_mm256_extractf128_ps(in1,1)); \
114 }
115 /* Sum the elements of halfs of each input register and store sums in out */
116 #define GMX_MM_TRANSPOSE_SUM4H_PR(in0,in2,out)                          \
117 {                                                                       \
118     in0 = _mm256_hadd_ps(in0,_mm256_setzero_ps());                      \
119     in2 = _mm256_hadd_ps(in2,_mm256_setzero_ps());                      \
120     in0 = _mm256_hadd_ps(in0,in2);                                      \
121     in2 = _mm256_permute_ps(in0,0b10110001);                            \
122     out = _mm_add_ps(_mm256_castps256_ps128(in0),_mm256_extractf128_ps(in2,1)); \
123 }
124 #else
125 /* Sum the elements within each input register and store the sums in out */
126 #define GMX_MM_TRANSPOSE_SUM4_PR(in0,in1,in2,in3,out)                   \
127 {                                                                       \
128     in0 = _mm256_hadd_pd(in0,in1);                                      \
129     in2 = _mm256_hadd_pd(in2,in3);                                      \
130     out = _mm256_add_pd(_mm256_permute2f128_pd(in0,in2,0x20),_mm256_permute2f128_pd(in0,in2,0x31)); \
131 }
132 #endif
133 #endif
134
135 #ifdef GMX_MM128_HERE
136
137 static inline __m128
138 gmx_mm128_invsqrt_ps_single(__m128 x)
139 {
140     const __m128 half  = _mm_set_ps(0.5,0.5,0.5,0.5);
141     const __m128 three = _mm_set_ps(3.0,3.0,3.0,3.0);
142     
143     __m128 lu = _mm_rsqrt_ps(x);
144     
145     return _mm_mul_ps(half,_mm_mul_ps(_mm_sub_ps(three,_mm_mul_ps(_mm_mul_ps(lu,lu),x)),lu));
146 }
147
148 /* Do 2 double precision invsqrt operations.
149  * Doing the SIMD rsqrt and the first Newton Raphson iteration
150  * in single precision gives full double precision accuracy.
151  * The speed is more than double that of two gmx_mm_invsqrt_pd calls.
152  */
153 #define GMX_MM128_INVSQRT2_PD(in0,in1,out0,out1)                        \
154 {                                                                       \
155     const __m128d half  = _mm_set1_pd(0.5);                             \
156     const __m128d three = _mm_set1_pd(3.0);                             \
157     __m128  s,ir;                                                       \
158     __m128d lu0,lu1;                                                    \
159                                                                         \
160     s    = _mm_movelh_ps(_mm_cvtpd_ps(in0),_mm_cvtpd_ps(in1));          \
161     ir   = gmx_mm128_invsqrt_ps_single(s);                              \
162     lu0  = _mm_cvtps_pd(ir);                                            \
163     lu1  = _mm_cvtps_pd(_mm_movehl_ps(ir,ir));                          \
164     out0 = _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu0,lu0),in0)),lu0)); \
165     out1 = _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu1,lu1),in1)),lu1)); \
166 }
167
168 #define GMX_MM_INVSQRT2_PD GMX_MM128_INVSQRT2_PD
169
170 #endif
171
172 #ifdef GMX_MM256_HERE
173
174 static inline __m256
175 gmx_mm256_invsqrt_ps_single(__m256 x)
176 {
177     const __m256 half  = _mm256_set_ps(0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5);
178     const __m256 three = _mm256_set_ps(3.0,3.0,3.0,3.0,3.0,3.0,3.0,3.0);
179     
180     __m256 lu = _mm256_rsqrt_ps(x);
181     
182     return _mm256_mul_ps(half,_mm256_mul_ps(_mm256_sub_ps(three,_mm256_mul_ps(_mm256_mul_ps(lu,lu),x)),lu));
183 }
184
185 /* Do 4 double precision invsqrt operations.
186  * Doing the SIMD rsqrt and the first Newton Raphson iteration
187  * in single precision gives full double precision accuracy.
188  */
189 #define GMX_MM256_INVSQRT2_PD(in0,in1,out0,out1)                        \
190 {                                                                       \
191     const __m256d half  = _mm256_set1_pd(0.5);                          \
192     const __m256d three = _mm256_set1_pd(3.0);                          \
193     __m256  s,ir;                                                       \
194     __m256d lu0,lu1;                                                    \
195                                                                         \
196     s    = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm256_cvtpd_ps(in0)),_mm256_cvtpd_ps(in1),1); \
197     ir   = gmx_mm256_invsqrt_ps_single(s);                              \
198     lu0  = _mm256_cvtps_pd(_mm256_castps256_ps128(ir));                 \
199     lu1  = _mm256_cvtps_pd(_mm256_extractf128_ps(ir,1));                \
200     out0 = _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu0,lu0),in0)),lu0)); \
201     out1 = _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu1,lu1),in1)),lu1)); \
202 }
203
204 #define GMX_MM_INVSQRT2_PD GMX_MM256_INVSQRT2_PD
205
206 #endif
207
208 /* Force and energy table load and interpolation routines */
209
210 #if defined GMX_MM128_HERE && !defined GMX_DOUBLE
211
212 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE)                \
213 {                                                                       \
214     gmx_mm_pr clj_SSE[UNROLLJ];                                         \
215     int p;                                                              \
216                                                                         \
217     for(p=0; p<UNROLLJ; p++)                                            \
218     {                                                                   \
219         /* Here we load 4 aligned floats, but we need just 2 */         \
220         clj_SSE[p] = gmx_load_pr(nbfp+type[aj+p]*NBFP_STRIDE);          \
221     }                                                                   \
222     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[0],clj_SSE[1],clj_SSE[2],clj_SSE[3],c6_SSE,c12_SSE); \
223 }
224
225 #endif
226
227 #if defined GMX_MM256_HERE && !defined GMX_DOUBLE
228
229 /* Put two 128-bit 4-float registers into one 256-bit 8-float register */
230 #define GMX_2_MM_TO_M256(in0,in1,out)                                   \
231 {                                                                       \
232     out = _mm256_insertf128_ps(_mm256_castps128_ps256(in0),in1,1);      \
233 }
234
235 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE)                \
236 {                                                                       \
237     __m128 clj_SSE[UNROLLJ],c6t_SSE[2],c12t_SSE[2];                     \
238     int p;                                                              \
239                                                                         \
240     for(p=0; p<UNROLLJ; p++)                                            \
241     {                                                                   \
242         /* Here we load 4 aligned floats, but we need just 2 */         \
243         clj_SSE[p] = _mm_load_ps(nbfp+type[aj+p]*NBFP_STRIDE);          \
244     }                                                                   \
245     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[0],clj_SSE[1],clj_SSE[2],clj_SSE[3],c6t_SSE[0],c12t_SSE[0]); \
246     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[4],clj_SSE[5],clj_SSE[6],clj_SSE[7],c6t_SSE[1],c12t_SSE[1]); \
247                                                                         \
248     GMX_2_MM_TO_M256(c6t_SSE[0],c6t_SSE[1],c6_SSE);                     \
249     GMX_2_MM_TO_M256(c12t_SSE[0],c12t_SSE[1],c12_SSE);                  \
250 }
251
252 #define load_lj_pair_params2(nbfp,type,aj,c6_SSE,c12_SSE)               \
253 {                                                                       \
254     __m128 clj_SSE[2*UNROLLJ],c6t_SSE[2],c12t_SSE[2];                   \
255     int p;                                                              \
256                                                                         \
257     for(p=0; p<2*UNROLLJ; p++)                                          \
258     {                                                                   \
259         /* Here we load 4 aligned floats, but we need just 2 */         \
260         clj_SSE[p] = _mm_load_ps(nbfp+type[aj+p]*NBFP_STRIDE);          \
261     }                                                                   \
262     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[0],clj_SSE[1],clj_SSE[2],clj_SSE[3],c6t_SSE[0],c12t_SSE[0]); \
263     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[4],clj_SSE[5],clj_SSE[6],clj_SSE[7],c6t_SSE[1],c12t_SSE[1]); \
264                                                                         \
265     GMX_2_MM_TO_M256(c6t_SSE[0],c6t_SSE[1],c6_SSE);                     \
266     GMX_2_MM_TO_M256(c12t_SSE[0],c12t_SSE[1],c12_SSE);                  \
267 }
268
269 #endif
270
271 #if defined GMX_MM128_HERE && defined GMX_DOUBLE
272
273 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE)                \
274 {                                                                       \
275     gmx_mm_pr clj_SSE[UNROLLJ];                                         \
276     int p;                                                              \
277                                                                         \
278     for(p=0; p<UNROLLJ; p++)                                            \
279     {                                                                   \
280         clj_SSE[p] = gmx_load_pr(nbfp+type[aj+p]*NBFP_STRIDE);          \
281     }                                                                   \
282     GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[0],clj_SSE[1],c6_SSE,c12_SSE);      \
283 }
284
285 #endif
286
287 #if defined GMX_MM256_HERE && defined GMX_DOUBLE
288
289 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE)                \
290 {                                                                       \
291     __m128d clj_SSE[UNROLLJ],c6t_SSE[2],c12t_SSE[2];                    \
292     int p;                                                              \
293                                                                         \
294     for(p=0; p<UNROLLJ; p++)                                            \
295     {                                                                   \
296         clj_SSE[p] = _mm_load_pd(nbfp+type[aj+p]*NBFP_STRIDE);          \
297     }                                                                   \
298     GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[0],clj_SSE[1],c6t_SSE[0],c12t_SSE[0]); \
299     GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[2],clj_SSE[3],c6t_SSE[1],c12t_SSE[1]); \
300     GMX_2_M128D_TO_M256D(c6t_SSE[0],c6t_SSE[1],c6_SSE);                 \
301     GMX_2_M128D_TO_M256D(c12t_SSE[0],c12t_SSE[1],c12_SSE);              \
302 }
303
304 #endif
305
306
307 /* The load_table functions below are performance critical.
308  * The routines issue UNROLLI*UNROLLJ _mm_load_ps calls.
309  * As these all have latencies, scheduling is crucial.
310  * The Intel compilers and CPUs seem to do a good job at this.
311  * But AMD CPUs perform significantly worse with gcc than with icc.
312  * Performance is improved a bit by using the extract function UNROLLJ times,
313  * instead of doing an _mm_store_si128 for every i-particle.
314  * This is only faster when we use FDV0 formatted tables, where we also need
315  * to multiple the index by 4, which can be done by a SIMD bit shift.
316  * With single precision AVX, 8 extracts are much slower than 1 store.
317  * Because of this, the load_table_f macro always takes the ti parameter,
318  * but it is only used with AVX.
319  */
320
321 #if defined GMX_MM128_HERE && !defined GMX_DOUBLE
322
323 #define load_table_f(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE)   \
324 {                                                                       \
325     int    idx[4];                                                      \
326     __m128 ctab_SSE[4];                                                 \
327                                                                         \
328     /* Table has 4 entries, left-shift index by 2 */                    \
329     ti_SSE = _mm_slli_epi32(ti_SSE,2);                                  \
330     /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
331     idx[0] = gmx_mm_extract_epi32(ti_SSE,0);                            \
332     ctab_SSE[0] = _mm_load_ps(tab_coul_FDV0+idx[0]);                    \
333     idx[1] = gmx_mm_extract_epi32(ti_SSE,1);                            \
334     ctab_SSE[1] = _mm_load_ps(tab_coul_FDV0+idx[1]);                    \
335     idx[2] = gmx_mm_extract_epi32(ti_SSE,2);                            \
336     ctab_SSE[2] = _mm_load_ps(tab_coul_FDV0+idx[2]);                    \
337     idx[3] = gmx_mm_extract_epi32(ti_SSE,3);                            \
338     ctab_SSE[3] = _mm_load_ps(tab_coul_FDV0+idx[3]);                    \
339                                                                         \
340     /* Shuffle the force table entries to a convenient order */         \
341     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctab0_SSE,ctab1_SSE); \
342 }
343
344 #define load_table_f_v(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
345 {                                                                       \
346     int    idx[4];                                                      \
347     __m128 ctab_SSE[4];                                                 \
348                                                                         \
349     /* Table has 4 entries, left-shift index by 2 */                    \
350     ti_SSE = _mm_slli_epi32(ti_SSE,2);                                  \
351     /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
352     idx[0] = gmx_mm_extract_epi32(ti_SSE,0);                            \
353     ctab_SSE[0] = _mm_load_ps(tab_coul_FDV0+idx[0]);                    \
354     idx[1] = gmx_mm_extract_epi32(ti_SSE,1);                            \
355     ctab_SSE[1] = _mm_load_ps(tab_coul_FDV0+idx[1]);                    \
356     idx[2] = gmx_mm_extract_epi32(ti_SSE,2);                            \
357     ctab_SSE[2] = _mm_load_ps(tab_coul_FDV0+idx[2]);                    \
358     idx[3] = gmx_mm_extract_epi32(ti_SSE,3);                            \
359     ctab_SSE[3] = _mm_load_ps(tab_coul_FDV0+idx[3]);                    \
360                                                                         \
361     /* Shuffle the force  table entries to a convenient order */        \
362     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctab0_SSE,ctab1_SSE); \
363     /* Shuffle the energy table entries to a convenient order */        \
364     GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabv_SSE); \
365 }
366
367 #endif
368
369 #if defined GMX_MM256_HERE && !defined GMX_DOUBLE
370
371 #define load_table_f(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE)   \
372 {                                                                       \
373     __m128 ctab_SSE[8],ctabt_SSE[4];                                    \
374     int    j;                                                           \
375                                                                         \
376     /* Bit shifting would be faster, but AVX doesn't support that */    \
377     _mm256_store_si256((__m256i *)ti,ti_SSE);                           \
378     for(j=0; j<8; j++)                                                  \
379     {                                                                   \
380         ctab_SSE[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4);               \
381     }                                                                   \
382     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabt_SSE[0],ctabt_SSE[2]); \
383     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[4],ctab_SSE[5],ctab_SSE[6],ctab_SSE[7],ctabt_SSE[1],ctabt_SSE[3]); \
384                                                                         \
385     GMX_2_MM_TO_M256(ctabt_SSE[0],ctabt_SSE[1],ctab0_SSE);              \
386     GMX_2_MM_TO_M256(ctabt_SSE[2],ctabt_SSE[3],ctab1_SSE);              \
387 }
388
389 #define load_table_f_v(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
390 {                                                                       \
391     __m128 ctab_SSE[8],ctabt_SSE[4],ctabvt_SSE[2];                      \
392     int    j;                                                           \
393                                                                         \
394     /* Bit shifting would be faster, but AVX doesn't support that */    \
395     _mm256_store_si256((__m256i *)ti,ti_SSE);                           \
396     for(j=0; j<8; j++)                                                  \
397     {                                                                   \
398         ctab_SSE[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4);               \
399     }                                                                   \
400     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabt_SSE[0],ctabt_SSE[2]); \
401     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[4],ctab_SSE[5],ctab_SSE[6],ctab_SSE[7],ctabt_SSE[1],ctabt_SSE[3]); \
402                                                                         \
403     GMX_2_MM_TO_M256(ctabt_SSE[0],ctabt_SSE[1],ctab0_SSE);              \
404     GMX_2_MM_TO_M256(ctabt_SSE[2],ctabt_SSE[3],ctab1_SSE);              \
405                                                                         \
406     GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabvt_SSE[0]); \
407     GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[4],ctab_SSE[5],ctab_SSE[6],ctab_SSE[7],ctabvt_SSE[1]); \
408                                                                         \
409     GMX_2_MM_TO_M256(ctabvt_SSE[0],ctabvt_SSE[1],ctabv_SSE);            \
410 }
411
412 #endif
413
414 #if defined GMX_MM128_HERE && defined GMX_DOUBLE
415
416 #define load_table_f(tab_coul_F, ti_SSE, ti, ctab0_SSE, ctab1_SSE)      \
417 {                                                                       \
418     int     idx[2];                                                     \
419     __m128d ctab_SSE[2];                                                \
420                                                                         \
421     /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
422     idx[0] = gmx_mm_extract_epi32(ti_SSE,0);                            \
423     ctab_SSE[0] = _mm_loadu_pd(tab_coul_F+idx[0]);                      \
424     idx[1] = gmx_mm_extract_epi32(ti_SSE,1);                            \
425     ctab_SSE[1] = _mm_loadu_pd(tab_coul_F+idx[1]);                      \
426                                                                         \
427     /* Shuffle the force table entries to a convenient order */         \
428     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],ctab0_SSE,ctab1_SSE); \
429     /* The second force table entry should contain the difference */    \
430     ctab1_SSE = _mm_sub_pd(ctab1_SSE,ctab0_SSE);                        \
431 }
432
433 #define load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
434 {                                                                       \
435     int     idx[2];                                                     \
436     __m128d ctab_SSE[4];                                                \
437                                                                         \
438     /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
439     idx[0] = gmx_mm_extract_epi32(ti_SSE,0);                            \
440     ctab_SSE[0] = _mm_loadu_pd(tab_coul_F+idx[0]);                      \
441     idx[1] = gmx_mm_extract_epi32(ti_SSE,1);                            \
442     ctab_SSE[1] = _mm_loadu_pd(tab_coul_F+idx[1]);                      \
443                                                                         \
444     /* Shuffle the force table entries to a convenient order */         \
445     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],ctab0_SSE,ctab1_SSE); \
446     /* The second force table entry should contain the difference */    \
447     ctab1_SSE = _mm_sub_pd(ctab1_SSE,ctab0_SSE);                        \
448                                                                         \
449     ctab_SSE[2] = _mm_loadu_pd(tab_coul_V+idx[0]);                      \
450     ctab_SSE[3] = _mm_loadu_pd(tab_coul_V+idx[1]);                      \
451                                                                         \
452     /* Shuffle the energy table entries to a single register */         \
453     ctabv_SSE = _mm_shuffle_pd(ctab_SSE[2],ctab_SSE[3],_MM_SHUFFLE2(0,0)); \
454 }
455
456 #endif
457
458 #if defined GMX_MM256_HERE && defined GMX_DOUBLE
459
460 /* Put two 128-bit 2-double registers into one 256-bit 4-ouble register */
461 #define GMX_2_M128D_TO_M256D(in0,in1,out)                               \
462 {                                                                       \
463     out = _mm256_insertf128_pd(_mm256_castpd128_pd256(in0),in1,1);      \
464 }
465
466 #define load_table_f(tab_coul_F, ti_SSE, ti, ctab0_SSE, ctab1_SSE)      \
467 {                                                                       \
468     __m128d ctab_SSE[4],tr_SSE[4];                                      \
469     int     j;                                                          \
470                                                                         \
471     _mm_store_si128((__m128i *)ti,ti_SSE);                              \
472     for(j=0; j<4; j++)                                                  \
473     {                                                                   \
474         ctab_SSE[j] = _mm_loadu_pd(tab_coul_F+ti[j]);                   \
475     }                                                                   \
476     /* Shuffle the force table entries to a convenient order */         \
477     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],tr_SSE[0],tr_SSE[1]); \
478     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[2],ctab_SSE[3],tr_SSE[2],tr_SSE[3]); \
479     GMX_2_M128D_TO_M256D(tr_SSE[0],tr_SSE[2],ctab0_SSE);                \
480     GMX_2_M128D_TO_M256D(tr_SSE[1],tr_SSE[3],ctab1_SSE);                \
481     /* The second force table entry should contain the difference */    \
482     ctab1_SSE = _mm256_sub_pd(ctab1_SSE,ctab0_SSE);                     \
483 }
484
485 #define load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
486 {                                                                       \
487     __m128d ctab_SSE[8],tr_SSE[4];                                      \
488     int     j;                                                          \
489                                                                         \
490     _mm_store_si128((__m128i *)ti,ti_SSE);                              \
491     for(j=0; j<4; j++)                                                  \
492     {                                                                   \
493         ctab_SSE[j] = _mm_loadu_pd(tab_coul_F+ti[j]);                   \
494     }                                                                   \
495     /* Shuffle the force table entries to a convenient order */         \
496     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],tr_SSE[0],tr_SSE[1]); \
497     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[2],ctab_SSE[3],tr_SSE[2],tr_SSE[3]); \
498     GMX_2_M128D_TO_M256D(tr_SSE[0],tr_SSE[2],ctab0_SSE);                \
499     GMX_2_M128D_TO_M256D(tr_SSE[1],tr_SSE[3],ctab1_SSE);                \
500     /* The second force table entry should contain the difference */    \
501     ctab1_SSE = _mm256_sub_pd(ctab1_SSE,ctab0_SSE);                     \
502                                                                         \
503     for(j=0; j<4; j++)                                                  \
504     {                                                                   \
505         ctab_SSE[4+j] = _mm_loadu_pd(tab_coul_V+ti[j]);                 \
506     }                                                                   \
507     /* Shuffle the energy table entries to a single register */         \
508     GMX_2_M128D_TO_M256D(_mm_shuffle_pd(ctab_SSE[4],ctab_SSE[5],_MM_SHUFFLE2(0,0)),_mm_shuffle_pd(ctab_SSE[6],ctab_SSE[7],_MM_SHUFFLE2(0,0)),ctabv_SSE); \
509 }
510
511 #endif
512
513
514 /* Add energy register to possibly multiple terms in the energy array.
515  * This function is the same for SSE/AVX single/double.
516  */
517 static inline void add_ener_grp(gmx_mm_pr e_SSE,real *v,const int *offset_jj)
518 {
519     int jj;
520
521     /* We need to balance the number of store operations with
522      * the rapidly increases number of combinations of energy groups.
523      * We add to a temporary buffer for 1 i-group vs 2 j-groups.
524      */
525     for(jj=0; jj<(UNROLLJ/2); jj++)
526     {
527         gmx_mm_pr v_SSE;
528
529         v_SSE = gmx_load_pr(v+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE);
530         gmx_store_pr(v+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE,gmx_add_pr(v_SSE,e_SSE));
531     }
532 }
533
534 #if defined GMX_X86_AVX_256 && GMX_SIMD_WIDTH_HERE == 8
535 /* As add_ener_grp above, but for two groups of UNROLLJ/2 stored in
536  * a single SIMD register.
537  */
538 static inline void add_ener_grp_halves(gmx_mm_pr e_SSE,
539                                        real *v0,real *v1,const int *offset_jj)
540 {
541     gmx_mm_hpr e_SSE0,e_SSE1;
542     int jj;
543
544     e_SSE0 = _mm256_extractf128_ps(e_SSE,0);
545     e_SSE1 = _mm256_extractf128_ps(e_SSE,1);
546
547     for(jj=0; jj<(UNROLLJ/2); jj++)
548     {
549         gmx_mm_hpr v_SSE;
550
551         v_SSE = gmx_load_hpr(v0+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2);
552         gmx_store_hpr(v0+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2,gmx_add_hpr(v_SSE,e_SSE0));
553     }
554     for(jj=0; jj<(UNROLLJ/2); jj++)
555     {
556         gmx_mm_hpr v_SSE;
557
558         v_SSE = gmx_load_hpr(v1+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2);
559         gmx_store_hpr(v1+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2,gmx_add_hpr(v_SSE,e_SSE1));
560     }
561 }
562 #endif
563
564 #endif /* _nbnxn_kernel_sse_utils_h_ */