Merge release-4-6 into master
[alexxy/gromacs.git] / src / gromacs / mdlib / nbnxn_kernels / nbnxn_kernel_simd_utils.h
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2012, The GROMACS Development Team
6  * Copyright (c) 2012, by the GROMACS development team, led by
7  * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8  * others, as listed in the AUTHORS file in the top-level source
9  * directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #ifndef _nbnxn_kernel_sse_utils_h_
38 #define _nbnxn_kernel_sse_utils_h_
39
40 /* This files contains all functions/macros for the SSE/AVX kernels
41  * which have explicit dependencies on the j-size / SIMD-width, which
42  * can be 2 (SSE-double), 4 (SSE-single,AVX-double) or 8 (AVX-single).
43  * The functionality which depends on the j-cluster size is:
44  *   LJ-parameter lookup
45  *   force table lookup
46  *   energy group pair energy storage
47  */
48
49 #define GMX_MM_TRANSPOSE2_OP_PD(in0,in1,out0,out1)                      \
50 {                                                                       \
51     out0 = _mm_shuffle_pd(in0,in1,_MM_SHUFFLE2(0,0));                   \
52     out1 = _mm_shuffle_pd(in0,in1,_MM_SHUFFLE2(1,1));                   \
53 }
54
55 #if defined GMX_MM128_HERE || !defined GMX_DOUBLE
56 #define GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(in0,in1,in2,in3,out0,out1)    \
57 {                                                                       \
58     __m128 _c01,_c23;                                                   \
59     _c01 = _mm_shuffle_ps(in0,in1,_MM_SHUFFLE(1,0,1,0));                \
60     _c23 = _mm_shuffle_ps(in2,in3,_MM_SHUFFLE(1,0,1,0));                \
61     out0 = _mm_shuffle_ps(_c01,_c23,_MM_SHUFFLE(2,0,2,0));              \
62     out1 = _mm_shuffle_ps(_c01,_c23,_MM_SHUFFLE(3,1,3,1));              \
63 }
64 #else
65 #define GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(in0,in1,in2,in3,out0,out1)    \
66 {                                                                       \
67     __m256d _c01,_c23;                                                  \
68     _c01 = _mm256_shuffle_pd(in0,in1,_MM_SHUFFLE(1,0,1,0));             \
69     _c23 = _mm256_shuffle_pd(in2,in3,_MM_SHUFFLE(1,0,1,0));             \
70     out0 = _mm256_shuffle_pd(_c01,_c23,_MM_SHUFFLE(2,0,2,0));           \
71     out1 = _mm256_shuffle_pd(_c01,_c23,_MM_SHUFFLE(3,1,3,1));           \
72 }
73 #endif
74
75 #define GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(in0,in1,in2,in3,out)           \
76 {                                                                       \
77     __m128 _c01,_c23;                                                   \
78     _c01 = _mm_shuffle_ps(in0,in1,_MM_SHUFFLE(3,2,3,2));                \
79     _c23 = _mm_shuffle_ps(in2,in3,_MM_SHUFFLE(3,2,3,2));                \
80     out  = _mm_shuffle_ps(_c01,_c23,_MM_SHUFFLE(2,0,2,0));              \
81 }
82
83 #ifndef GMX_MM256_HERE
84 #ifndef GMX_DOUBLE
85 #define GMX_MM_TRANSPOSE_SUM4_PR(i_SSE0,i_SSE1,i_SSE2,i_SSE3,o_SSE)     \
86 {                                                                       \
87     _MM_TRANSPOSE4_PS(i_SSE0,i_SSE1,i_SSE2,i_SSE3);                     \
88     i_SSE0 = _mm_add_ps(i_SSE0,i_SSE1);                                 \
89     i_SSE2 = _mm_add_ps(i_SSE2,i_SSE3);                                 \
90     o_SSE  = _mm_add_ps(i_SSE0,i_SSE2);                                 \
91 }
92 #else
93 #define GMX_MM_TRANSPOSE_SUM2_PD(i_SSE0,i_SSE1,o_SSE)                   \
94 {                                                                       \
95     GMX_MM_TRANSPOSE2_PD(i_SSE0,i_SSE1);                                \
96     o_SSE  = _mm_add_pd(i_SSE0,i_SSE1);                                 \
97 }
98 #endif
99 #else
100 #ifndef GMX_DOUBLE
101 #define GMX_MM_TRANSPOSE_SUM4_PR(i_SSE0,i_SSE1,i_SSE2,i_SSE3,o_SSE)     \
102 {                                                                       \
103     i_SSE0 = _mm256_hadd_ps(i_SSE0,i_SSE1);                             \
104     i_SSE2 = _mm256_hadd_ps(i_SSE2,i_SSE3);                             \
105     i_SSE1 = _mm256_hadd_ps(i_SSE0,i_SSE2);                             \
106     o_SSE  = _mm_add_ps(_mm256_castps256_ps128(i_SSE1),_mm256_extractf128_ps(i_SSE1,1)); \
107 }
108 #define GMX_MM_TRANSPOSE_SUM4H_PR(i_SSE0,i_SSE2,o_SSE)                  \
109 {                                                                       \
110     i_SSE0 = _mm256_hadd_ps(i_SSE0,_mm256_setzero_ps());                \
111     i_SSE2 = _mm256_hadd_ps(i_SSE2,_mm256_setzero_ps());                \
112     i_SSE0 = _mm256_hadd_ps(i_SSE0,i_SSE2);                             \
113     i_SSE2 = _mm256_permute_ps(i_SSE0,0b10110001);                      \
114     o_SSE  = _mm_add_ps(_mm256_castps256_ps128(i_SSE0),_mm256_extractf128_ps(i_SSE2,1)); \
115 }
116 #else
117 #define GMX_MM_TRANSPOSE_SUM4_PR(i_SSE0,i_SSE1,i_SSE2,i_SSE3,o_SSE)     \
118 {                                                                       \
119     i_SSE0 = _mm256_hadd_pd(i_SSE0,i_SSE1);                             \
120     i_SSE2 = _mm256_hadd_pd(i_SSE2,i_SSE3);                             \
121     o_SSE  = _mm256_add_pd(_mm256_permute2f128_pd(i_SSE0,i_SSE2,0x20),_mm256_permute2f128_pd(i_SSE0,i_SSE2,0x31)); \
122 }
123 #endif
124 #endif
125
126 #ifdef GMX_MM128_HERE
127
128 static inline __m128
129 gmx_mm128_invsqrt_ps_single(__m128 x)
130 {
131     const __m128 half  = _mm_set_ps(0.5,0.5,0.5,0.5);
132     const __m128 three = _mm_set_ps(3.0,3.0,3.0,3.0);
133     
134     __m128 lu = _mm_rsqrt_ps(x);
135     
136     return _mm_mul_ps(half,_mm_mul_ps(_mm_sub_ps(three,_mm_mul_ps(_mm_mul_ps(lu,lu),x)),lu));
137 }
138
139 /* Do 2/4 double precision invsqrt operations.
140  * Doing the SSE rsqrt and the first Newton Raphson iteration
141  * in single precision gives full double precision accuracy.
142  * The speed is more than twice as fast as two gmx_mm_invsqrt_pd calls.
143  */
144 #define GMX_MM128_INVSQRT2_PD(i_SSE0,i_SSE1,o_SSE0,o_SSE1)              \
145 {                                                                       \
146     const __m128d half  = _mm_set1_pd(0.5);                             \
147     const __m128d three = _mm_set1_pd(3.0);                             \
148     __m128  s_SSE,ir_SSE;                                               \
149     __m128d lu0,lu1;                                                    \
150                                                                         \
151     s_SSE  = _mm_movelh_ps(_mm_cvtpd_ps(i_SSE0),_mm_cvtpd_ps(i_SSE1));  \
152     ir_SSE = gmx_mm128_invsqrt_ps_single(s_SSE);                        \
153     lu0    = _mm_cvtps_pd(ir_SSE);                                      \
154     lu1    = _mm_cvtps_pd(_mm_movehl_ps(ir_SSE,ir_SSE));                \
155     o_SSE0 = _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu0,lu0),i_SSE0)),lu0)); \
156     o_SSE1 = _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu1,lu1),i_SSE1)),lu1)); \
157 }
158
159 #define GMX_MM_INVSQRT2_PD GMX_MM128_INVSQRT2_PD
160
161 #endif
162
163 #ifdef GMX_MM256_HERE
164
165 static inline __m256
166 gmx_mm256_invsqrt_ps_single(__m256 x)
167 {
168     const __m256 half  = _mm256_set_ps(0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5);
169     const __m256 three = _mm256_set_ps(3.0,3.0,3.0,3.0,3.0,3.0,3.0,3.0);
170     
171     __m256 lu = _mm256_rsqrt_ps(x);
172     
173     return _mm256_mul_ps(half,_mm256_mul_ps(_mm256_sub_ps(three,_mm256_mul_ps(_mm256_mul_ps(lu,lu),x)),lu));
174 }
175
176 #define GMX_MM256_INVSQRT2_PD(i_SSE0,i_SSE1,o_SSE0,o_SSE1)              \
177 {                                                                       \
178     const __m256d half  = _mm256_set1_pd(0.5);                          \
179     const __m256d three = _mm256_set1_pd(3.0);                          \
180     __m256  s_SSE,ir_SSE;                                               \
181     __m256d lu0,lu1;                                                    \
182                                                                         \
183     s_SSE  = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm256_cvtpd_ps(i_SSE0)),_mm256_cvtpd_ps(i_SSE1),1); \
184     ir_SSE = gmx_mm256_invsqrt_ps_single(s_SSE);                        \
185     lu0    = _mm256_cvtps_pd(_mm256_castps256_ps128(ir_SSE));           \
186     lu1    = _mm256_cvtps_pd(_mm256_extractf128_ps(ir_SSE,1));          \
187     o_SSE0 = _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu0,lu0),i_SSE0)),lu0)); \
188     o_SSE1 = _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu1,lu1),i_SSE1)),lu1)); \
189 }
190
191 #define GMX_MM_INVSQRT2_PD GMX_MM256_INVSQRT2_PD
192
193 #endif
194
195 /* Force and energy table load and interpolation routines */
196
197 #if defined GMX_MM128_HERE && !defined GMX_DOUBLE
198
199 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE)                \
200 {                                                                       \
201     gmx_mm_pr clj_SSE[UNROLLJ];                                         \
202     int p;                                                              \
203                                                                         \
204     for(p=0; p<UNROLLJ; p++)                                            \
205     {                                                                   \
206         /* Here we load 4 aligned floats, but we need just 2 */         \
207         clj_SSE[p] = gmx_load_pr(nbfp+type[aj+p]*NBFP_STRIDE);          \
208     }                                                                   \
209     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[0],clj_SSE[1],clj_SSE[2],clj_SSE[3],c6_SSE,c12_SSE); \
210 }
211
212 #endif
213
214 #if defined GMX_MM256_HERE && !defined GMX_DOUBLE
215
216 /* Put two 128-bit 4-float registers into one 256-bit 8-float register */
217 #define GMX_2_MM_TO_M256(in0,in1,out)                                   \
218 {                                                                       \
219     out = _mm256_insertf128_ps(_mm256_castps128_ps256(in0),in1,1);      \
220 }
221
222 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE)                \
223 {                                                                       \
224     __m128 clj_SSE[UNROLLJ],c6t_SSE[2],c12t_SSE[2];                     \
225     int p;                                                              \
226                                                                         \
227     for(p=0; p<UNROLLJ; p++)                                            \
228     {                                                                   \
229         /* Here we load 4 aligned floats, but we need just 2 */         \
230         clj_SSE[p] = _mm_load_ps(nbfp+type[aj+p]*NBFP_STRIDE);          \
231     }                                                                   \
232     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[0],clj_SSE[1],clj_SSE[2],clj_SSE[3],c6t_SSE[0],c12t_SSE[0]); \
233     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[4],clj_SSE[5],clj_SSE[6],clj_SSE[7],c6t_SSE[1],c12t_SSE[1]); \
234                                                                         \
235     GMX_2_MM_TO_M256(c6t_SSE[0],c6t_SSE[1],c6_SSE);                     \
236     GMX_2_MM_TO_M256(c12t_SSE[0],c12t_SSE[1],c12_SSE);                  \
237 }
238
239 #define load_lj_pair_params2(nbfp,type,aj,c6_SSE,c12_SSE)                \
240 {                                                                       \
241     __m128 clj_SSE[2*UNROLLJ],c6t_SSE[2],c12t_SSE[2];                     \
242     int p;                                                              \
243                                                                         \
244     for(p=0; p<2*UNROLLJ; p++)                                            \
245     {                                                                   \
246         /* Here we load 4 aligned floats, but we need just 2 */         \
247         clj_SSE[p] = _mm_load_ps(nbfp+type[aj+p]*NBFP_STRIDE);          \
248     }                                                                   \
249     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[0],clj_SSE[1],clj_SSE[2],clj_SSE[3],c6t_SSE[0],c12t_SSE[0]); \
250     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[4],clj_SSE[5],clj_SSE[6],clj_SSE[7],c6t_SSE[1],c12t_SSE[1]); \
251                                                                         \
252     GMX_2_MM_TO_M256(c6t_SSE[0],c6t_SSE[1],c6_SSE);                     \
253     GMX_2_MM_TO_M256(c12t_SSE[0],c12t_SSE[1],c12_SSE);                  \
254 }
255
256 #endif
257
258 #if defined GMX_MM128_HERE && defined GMX_DOUBLE
259
260 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE)                \
261 {                                                                       \
262     gmx_mm_pr clj_SSE[UNROLLJ];                                         \
263     int p;                                                              \
264                                                                         \
265     for(p=0; p<UNROLLJ; p++)                                            \
266     {                                                                   \
267         clj_SSE[p] = gmx_load_pr(nbfp+type[aj+p]*NBFP_STRIDE);          \
268     }                                                                   \
269     GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[0],clj_SSE[1],c6_SSE,c12_SSE);      \
270 }
271
272 #endif
273
274 #if defined GMX_MM256_HERE && defined GMX_DOUBLE
275
276 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE)                \
277 {                                                                       \
278     __m128d clj_SSE[UNROLLJ],c6t_SSE[2],c12t_SSE[2];                    \
279     int p;                                                              \
280                                                                         \
281     for(p=0; p<UNROLLJ; p++)                                            \
282     {                                                                   \
283         clj_SSE[p] = _mm_load_pd(nbfp+type[aj+p]*NBFP_STRIDE);          \
284     }                                                                   \
285     GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[0],clj_SSE[1],c6t_SSE[0],c12t_SSE[0]); \
286     GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[2],clj_SSE[3],c6t_SSE[1],c12t_SSE[1]); \
287     GMX_2_M128D_TO_M256D(c6t_SSE[0],c6t_SSE[1],c6_SSE);                 \
288     GMX_2_M128D_TO_M256D(c12t_SSE[0],c12t_SSE[1],c12_SSE);              \
289 }
290
291 #endif
292
293
294 /* The load_table functions below are performance critical.
295  * The routines issue UNROLLI*UNROLLJ _mm_load_ps calls.
296  * As these all have latencies, scheduling is crucial.
297  * The Intel compilers and CPUs seem to do a good job at this.
298  * But AMD CPUs perform significantly worse with gcc than with icc.
299  * Performance is improved a bit by using the extract function UNROLLJ times,
300  * instead of doing an _mm_store_si128 for every i-particle.
301  * With AVX this significantly deteriorates performance (8 extracts iso 4).
302  * Because of this, the load_table_f macro always takes the ti parameter,
303  * but it is only used with AVX.
304  */
305
306 #if defined GMX_MM128_HERE && !defined GMX_DOUBLE
307
308 #define load_table_f(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE)   \
309 {                                                                       \
310     int    idx[4];                                                      \
311     __m128 ctab_SSE[4];                                                 \
312                                                                         \
313     /* Table has 4 entries, left-shift index by 2 */                    \
314     ti_SSE = _mm_slli_epi32(ti_SSE,2);                                  \
315     /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
316     idx[0] = gmx_mm_extract_epi32(ti_SSE,0);                            \
317     ctab_SSE[0] = _mm_load_ps(tab_coul_FDV0+idx[0]);                    \
318     idx[1] = gmx_mm_extract_epi32(ti_SSE,1);                            \
319     ctab_SSE[1] = _mm_load_ps(tab_coul_FDV0+idx[1]);                    \
320     idx[2] = gmx_mm_extract_epi32(ti_SSE,2);                            \
321     ctab_SSE[2] = _mm_load_ps(tab_coul_FDV0+idx[2]);                    \
322     idx[3] = gmx_mm_extract_epi32(ti_SSE,3);                            \
323     ctab_SSE[3] = _mm_load_ps(tab_coul_FDV0+idx[3]);                    \
324                                                                         \
325     /* Shuffle the force table entries to a convenient order */         \
326     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctab0_SSE,ctab1_SSE); \
327 }
328
329 #define load_table_f_v(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
330 {                                                                       \
331     int    idx[4];                                                      \
332     __m128 ctab_SSE[4];                                                 \
333                                                                         \
334     /* Table has 4 entries, left-shift index by 2 */                    \
335     ti_SSE = _mm_slli_epi32(ti_SSE,2);                                  \
336     /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
337     idx[0] = gmx_mm_extract_epi32(ti_SSE,0);                            \
338     ctab_SSE[0] = _mm_load_ps(tab_coul_FDV0+idx[0]);                    \
339     idx[1] = gmx_mm_extract_epi32(ti_SSE,1);                            \
340     ctab_SSE[1] = _mm_load_ps(tab_coul_FDV0+idx[1]);                    \
341     idx[2] = gmx_mm_extract_epi32(ti_SSE,2);                            \
342     ctab_SSE[2] = _mm_load_ps(tab_coul_FDV0+idx[2]);                    \
343     idx[3] = gmx_mm_extract_epi32(ti_SSE,3);                            \
344     ctab_SSE[3] = _mm_load_ps(tab_coul_FDV0+idx[3]);                    \
345                                                                         \
346     /* Shuffle the force  table entries to a convenient order */        \
347     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctab0_SSE,ctab1_SSE); \
348     /* Shuffle the energy table entries to a convenient order */        \
349     GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabv_SSE); \
350 }
351
352 #endif
353
354 #if defined GMX_MM256_HERE && !defined GMX_DOUBLE
355
356 #define load_table_f(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE)   \
357 {                                                                       \
358     __m128 ctab_SSE[8],ctabt_SSE[4];                                    \
359     int    j;                                                           \
360                                                                         \
361     /* Bit shifting would be faster, but AVX doesn't support that */    \
362     _mm256_store_si256((__m256i *)ti,ti_SSE);                           \
363     for(j=0; j<8; j++)                                                  \
364     {                                                                   \
365         ctab_SSE[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4);               \
366     }                                                                   \
367     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabt_SSE[0],ctabt_SSE[2]); \
368     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[4],ctab_SSE[5],ctab_SSE[6],ctab_SSE[7],ctabt_SSE[1],ctabt_SSE[3]); \
369                                                                         \
370     GMX_2_MM_TO_M256(ctabt_SSE[0],ctabt_SSE[1],ctab0_SSE);              \
371     GMX_2_MM_TO_M256(ctabt_SSE[2],ctabt_SSE[3],ctab1_SSE);              \
372 }
373
374 #define load_table_f_v(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
375 {                                                                       \
376     __m128 ctab_SSE[8],ctabt_SSE[4],ctabvt_SSE[2];                      \
377     int    j;                                                           \
378                                                                         \
379     /* Bit shifting would be faster, but AVX doesn't support that */    \
380     _mm256_store_si256((__m256i *)ti,ti_SSE);                           \
381     for(j=0; j<8; j++)                                                  \
382     {                                                                   \
383         ctab_SSE[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4);               \
384     }                                                                   \
385     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabt_SSE[0],ctabt_SSE[2]); \
386     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[4],ctab_SSE[5],ctab_SSE[6],ctab_SSE[7],ctabt_SSE[1],ctabt_SSE[3]); \
387                                                                         \
388     GMX_2_MM_TO_M256(ctabt_SSE[0],ctabt_SSE[1],ctab0_SSE);              \
389     GMX_2_MM_TO_M256(ctabt_SSE[2],ctabt_SSE[3],ctab1_SSE);              \
390                                                                         \
391     GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabvt_SSE[0]); \
392     GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[4],ctab_SSE[5],ctab_SSE[6],ctab_SSE[7],ctabvt_SSE[1]); \
393                                                                         \
394     GMX_2_MM_TO_M256(ctabvt_SSE[0],ctabvt_SSE[1],ctabv_SSE);            \
395 }
396
397 #endif
398
399 #if defined GMX_MM128_HERE && defined GMX_DOUBLE
400
401 #define load_table_f(tab_coul_F, ti_SSE, ti, ctab0_SSE, ctab1_SSE)      \
402 {                                                                       \
403     int     idx[2];                                                     \
404     __m128d ctab_SSE[2];                                                \
405                                                                         \
406     /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
407     idx[0] = gmx_mm_extract_epi32(ti_SSE,0);                            \
408     ctab_SSE[0] = _mm_loadu_pd(tab_coul_F+idx[0]);                      \
409     idx[1] = gmx_mm_extract_epi32(ti_SSE,1);                            \
410     ctab_SSE[1] = _mm_loadu_pd(tab_coul_F+idx[1]);                      \
411                                                                         \
412     /* Shuffle the force table entries to a convenient order */         \
413     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],ctab0_SSE,ctab1_SSE); \
414     /* The second force table entry should contain the difference */    \
415     ctab1_SSE = _mm_sub_pd(ctab1_SSE,ctab0_SSE);                        \
416 }
417
418 #define load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
419 {                                                                       \
420     int     idx[2];                                                     \
421     __m128d ctab_SSE[4];                                                \
422                                                                         \
423     /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
424     idx[0] = gmx_mm_extract_epi32(ti_SSE,0);                            \
425     ctab_SSE[0] = _mm_loadu_pd(tab_coul_F+idx[0]);                      \
426     idx[1] = gmx_mm_extract_epi32(ti_SSE,1);                            \
427     ctab_SSE[1] = _mm_loadu_pd(tab_coul_F+idx[1]);                      \
428                                                                         \
429     /* Shuffle the force table entries to a convenient order */         \
430     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],ctab0_SSE,ctab1_SSE); \
431     /* The second force table entry should contain the difference */    \
432     ctab1_SSE = _mm_sub_pd(ctab1_SSE,ctab0_SSE);                        \
433                                                                         \
434     ctab_SSE[2] = _mm_loadu_pd(tab_coul_V+idx[0]);                      \
435     ctab_SSE[3] = _mm_loadu_pd(tab_coul_V+idx[1]);                      \
436                                                                         \
437     /* Shuffle the energy table entries to a single register */         \
438     ctabv_SSE = _mm_shuffle_pd(ctab_SSE[2],ctab_SSE[3],_MM_SHUFFLE2(0,0)); \
439 }
440
441 #endif
442
443 #if defined GMX_MM256_HERE && defined GMX_DOUBLE
444
445 /* Put two 128-bit 2-double registers into one 256-bit 4-ouble register */
446 #define GMX_2_M128D_TO_M256D(in0,in1,out)                               \
447 {                                                                       \
448     out = _mm256_insertf128_pd(_mm256_castpd128_pd256(in0),in1,1);      \
449 }
450
451 #define load_table_f(tab_coul_F, ti_SSE, ti, ctab0_SSE, ctab1_SSE)      \
452 {                                                                       \
453     __m128d ctab_SSE[4],tr_SSE[4];                                      \
454     int     j;                                                          \
455                                                                         \
456     _mm_store_si128((__m128i *)ti,ti_SSE);                              \
457     for(j=0; j<4; j++)                                                  \
458     {                                                                   \
459         ctab_SSE[j] = _mm_loadu_pd(tab_coul_F+ti[j]);                   \
460     }                                                                   \
461     /* Shuffle the force table entries to a convenient order */         \
462     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],tr_SSE[0],tr_SSE[1]); \
463     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[2],ctab_SSE[3],tr_SSE[2],tr_SSE[3]); \
464     GMX_2_M128D_TO_M256D(tr_SSE[0],tr_SSE[2],ctab0_SSE);                \
465     GMX_2_M128D_TO_M256D(tr_SSE[1],tr_SSE[3],ctab1_SSE);                \
466     /* The second force table entry should contain the difference */    \
467     ctab1_SSE = _mm256_sub_pd(ctab1_SSE,ctab0_SSE);                     \
468 }
469
470 #define load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
471 {                                                                       \
472     __m128d ctab_SSE[8],tr_SSE[4];                                      \
473     int     j;                                                          \
474                                                                         \
475     _mm_store_si128((__m128i *)ti,ti_SSE);                              \
476     for(j=0; j<4; j++)                                                  \
477     {                                                                   \
478         ctab_SSE[j] = _mm_loadu_pd(tab_coul_F+ti[j]);                   \
479     }                                                                   \
480     /* Shuffle the force table entries to a convenient order */         \
481     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],tr_SSE[0],tr_SSE[1]); \
482     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[2],ctab_SSE[3],tr_SSE[2],tr_SSE[3]); \
483     GMX_2_M128D_TO_M256D(tr_SSE[0],tr_SSE[2],ctab0_SSE);                \
484     GMX_2_M128D_TO_M256D(tr_SSE[1],tr_SSE[3],ctab1_SSE);                \
485     /* The second force table entry should contain the difference */    \
486     ctab1_SSE = _mm256_sub_pd(ctab1_SSE,ctab0_SSE);                     \
487                                                                         \
488     for(j=0; j<4; j++)                                                  \
489     {                                                                   \
490         ctab_SSE[4+j] = _mm_loadu_pd(tab_coul_V+ti[j]);                 \
491     }                                                                   \
492     /* Shuffle the energy table entries to a single register */         \
493     GMX_2_M128D_TO_M256D(_mm_shuffle_pd(ctab_SSE[4],ctab_SSE[5],_MM_SHUFFLE2(0,0)),_mm_shuffle_pd(ctab_SSE[6],ctab_SSE[7],_MM_SHUFFLE2(0,0)),ctabv_SSE); \
494 }
495
496 #endif
497
498
499 /* Add energy register to possibly multiple terms in the energy array.
500  * This function is the same for SSE/AVX single/double.
501  */
502 static inline void add_ener_grp(gmx_mm_pr e_SSE,real *v,const int *offset_jj)
503 {
504     int jj;
505
506     /* We need to balance the number of store operations with
507      * the rapidly increases number of combinations of energy groups.
508      * We add to a temporary buffer for 1 i-group vs 2 j-groups.
509      */
510     for(jj=0; jj<(UNROLLJ/2); jj++)
511     {
512         gmx_mm_pr v_SSE;
513
514         v_SSE = gmx_load_pr(v+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE);
515         gmx_store_pr(v+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE,gmx_add_pr(v_SSE,e_SSE));
516     }
517 }
518
519 #if defined GMX_X86_AVX_256 && GMX_SIMD_WIDTH_HERE == 8
520 /* As add_ener_grp above, but for two groups of UNROLLJ/2 stored in
521  * a single SIMD register.
522  */
523 static inline void add_ener_grp_halves(gmx_mm_pr e_SSE,
524                                        real *v0,real *v1,const int *offset_jj)
525 {
526     gmx_mm_hpr e_SSE0,e_SSE1;
527     int jj;
528
529     e_SSE0 = _mm256_extractf128_ps(e_SSE,0);
530     e_SSE1 = _mm256_extractf128_ps(e_SSE,1);
531
532     for(jj=0; jj<(UNROLLJ/2); jj++)
533     {
534         gmx_mm_hpr v_SSE;
535
536         v_SSE = gmx_load_hpr(v0+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2);
537         gmx_store_hpr(v0+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2,gmx_add_hpr(v_SSE,e_SSE0));
538     }
539     for(jj=0; jj<(UNROLLJ/2); jj++)
540     {
541         gmx_mm_hpr v_SSE;
542
543         v_SSE = gmx_load_hpr(v1+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2);
544         gmx_store_hpr(v1+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2,gmx_add_hpr(v_SSE,e_SSE1));
545     }
546 }
547 #endif
548
549 #endif /* _nbnxn_kernel_sse_utils_h_ */