Update copyright statements and change license to LGPL
[alexxy/gromacs.git] / src / mdlib / nbnxn_kernels / nbnxn_kernel_x86_simd_utils.h
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2012, The GROMACS Development Team
6  * Copyright (c) 2012, by the GROMACS development team, led by
7  * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8  * others, as listed in the AUTHORS file in the top-level source
9  * directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #ifndef _nbnxn_kernel_sse_utils_h_
38 #define _nbnxn_kernel_sse_utils_h_
39
40 /* This files contains all functions/macros for the SSE/AVX kernels
41  * which have explicit dependencies on the j-size / SIMD-width, which
42  * can be 2 (SSE-double), 4 (SSE-single,AVX-double) or 8 (AVX-single).
43  * The functionality which depends on the j-cluster size is:
44  *   LJ-parameter lookup
45  *   force table lookup
46  *   energy group pair energy storage
47  */
48
49 #define GMX_MM_TRANSPOSE2_OP_PD(in0,in1,out0,out1)                      \
50 {                                                                       \
51     out0 = _mm_shuffle_pd(in0,in1,_MM_SHUFFLE2(0,0));                   \
52     out1 = _mm_shuffle_pd(in0,in1,_MM_SHUFFLE2(1,1));                   \
53 }
54
55 #if defined GMX_MM128_HERE || !defined GMX_DOUBLE
56 #define GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(in0,in1,in2,in3,out0,out1)    \
57 {                                                                       \
58     __m128 _c01,_c23;                                                   \
59     _c01 = _mm_shuffle_ps(in0,in1,_MM_SHUFFLE(1,0,1,0));                \
60     _c23 = _mm_shuffle_ps(in2,in3,_MM_SHUFFLE(1,0,1,0));                \
61     out0 = _mm_shuffle_ps(_c01,_c23,_MM_SHUFFLE(2,0,2,0));              \
62     out1 = _mm_shuffle_ps(_c01,_c23,_MM_SHUFFLE(3,1,3,1));              \
63 }
64 #else
65 #define GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(in0,in1,in2,in3,out0,out1)    \
66 {                                                                       \
67     __m256d _c01,_c23;                                                  \
68     _c01 = _mm256_shuffle_pd(in0,in1,_MM_SHUFFLE(1,0,1,0));             \
69     _c23 = _mm256_shuffle_pd(in2,in3,_MM_SHUFFLE(1,0,1,0));             \
70     out0 = _mm256_shuffle_pd(_c01,_c23,_MM_SHUFFLE(2,0,2,0));           \
71     out1 = _mm256_shuffle_pd(_c01,_c23,_MM_SHUFFLE(3,1,3,1));           \
72 }
73 #endif
74
75 #define GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(in0,in1,in2,in3,out)           \
76 {                                                                       \
77     __m128 _c01,_c23;                                                   \
78     _c01 = _mm_shuffle_ps(in0,in1,_MM_SHUFFLE(3,2,3,2));                \
79     _c23 = _mm_shuffle_ps(in2,in3,_MM_SHUFFLE(3,2,3,2));                \
80     out  = _mm_shuffle_ps(_c01,_c23,_MM_SHUFFLE(2,0,2,0));              \
81 }
82
83 #ifndef GMX_MM256_HERE
84 #ifndef GMX_DOUBLE
85 #define GMX_MM_TRANSPOSE_SUM4_PR(i_SSE0,i_SSE1,i_SSE2,i_SSE3,o_SSE)     \
86 {                                                                       \
87     _MM_TRANSPOSE4_PS(i_SSE0,i_SSE1,i_SSE2,i_SSE3);                     \
88     i_SSE0 = _mm_add_ps(i_SSE0,i_SSE1);                                 \
89     i_SSE2 = _mm_add_ps(i_SSE2,i_SSE3);                                 \
90     o_SSE  = _mm_add_ps(i_SSE0,i_SSE2);                                 \
91 }
92 #else
93 #define GMX_MM_TRANSPOSE_SUM2_PD(i_SSE0,i_SSE1,o_SSE)                   \
94 {                                                                       \
95     GMX_MM_TRANSPOSE2_PD(i_SSE0,i_SSE1);                                \
96     o_SSE  = _mm_add_pd(i_SSE0,i_SSE1);                                 \
97 }
98 #endif
99 #else
100 #ifndef GMX_DOUBLE
101 #define GMX_MM_TRANSPOSE_SUM4_PR(i_SSE0,i_SSE1,i_SSE2,i_SSE3,o_SSE)     \
102 {                                                                       \
103     i_SSE0 = _mm256_hadd_ps(i_SSE0,i_SSE1);                             \
104     i_SSE2 = _mm256_hadd_ps(i_SSE2,i_SSE3);                             \
105     i_SSE1 = _mm256_hadd_ps(i_SSE0,i_SSE2);                             \
106     o_SSE  = _mm_add_ps(_mm256_castps256_ps128(i_SSE1),_mm256_extractf128_ps(i_SSE1,1)); \
107 }
108 #else
109 #define GMX_MM_TRANSPOSE_SUM4_PR(i_SSE0,i_SSE1,i_SSE2,i_SSE3,o_SSE)     \
110 {                                                                       \
111     i_SSE0 = _mm256_hadd_pd(i_SSE0,i_SSE1);                             \
112     i_SSE2 = _mm256_hadd_pd(i_SSE2,i_SSE3);                             \
113     o_SSE  = _mm256_add_pd(_mm256_permute2f128_pd(i_SSE0,i_SSE2,0x20),_mm256_permute2f128_pd(i_SSE0,i_SSE2,0x31)); \
114 }
115 #endif
116 #endif
117
118 #ifdef GMX_MM128_HERE
119
120 static inline __m128
121 gmx_mm128_invsqrt_ps_single(__m128 x)
122 {
123     const __m128 half  = _mm_set_ps(0.5,0.5,0.5,0.5);
124     const __m128 three = _mm_set_ps(3.0,3.0,3.0,3.0);
125     
126     __m128 lu = _mm_rsqrt_ps(x);
127     
128     return _mm_mul_ps(half,_mm_mul_ps(_mm_sub_ps(three,_mm_mul_ps(_mm_mul_ps(lu,lu),x)),lu));
129 }
130
131 /* Do 2/4 double precision invsqrt operations.
132  * Doing the SSE rsqrt and the first Newton Raphson iteration
133  * in single precision gives full double precision accuracy.
134  * The speed is more than twice as fast as two gmx_mm_invsqrt_pd calls.
135  */
136 #define GMX_MM128_INVSQRT2_PD(i_SSE0,i_SSE1,o_SSE0,o_SSE1)              \
137 {                                                                       \
138     const __m128d half  = _mm_set1_pd(0.5);                             \
139     const __m128d three = _mm_set1_pd(3.0);                             \
140     __m128  s_SSE,ir_SSE;                                               \
141     __m128d lu0,lu1;                                                    \
142                                                                         \
143     s_SSE  = _mm_movelh_ps(_mm_cvtpd_ps(i_SSE0),_mm_cvtpd_ps(i_SSE1));  \
144     ir_SSE = gmx_mm128_invsqrt_ps_single(s_SSE);                        \
145     lu0    = _mm_cvtps_pd(ir_SSE);                                      \
146     lu1    = _mm_cvtps_pd(_mm_movehl_ps(ir_SSE,ir_SSE));                \
147     o_SSE0 = _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu0,lu0),i_SSE0)),lu0)); \
148     o_SSE1 = _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu1,lu1),i_SSE1)),lu1)); \
149 }
150
151 #define GMX_MM_INVSQRT2_PD GMX_MM128_INVSQRT2_PD
152
153 #endif
154
155 #ifdef GMX_MM256_HERE
156
157 static inline __m256
158 gmx_mm256_invsqrt_ps_single(__m256 x)
159 {
160     const __m256 half  = _mm256_set_ps(0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5);
161     const __m256 three = _mm256_set_ps(3.0,3.0,3.0,3.0,3.0,3.0,3.0,3.0);
162     
163     __m256 lu = _mm256_rsqrt_ps(x);
164     
165     return _mm256_mul_ps(half,_mm256_mul_ps(_mm256_sub_ps(three,_mm256_mul_ps(_mm256_mul_ps(lu,lu),x)),lu));
166 }
167
168 #define GMX_MM256_INVSQRT2_PD(i_SSE0,i_SSE1,o_SSE0,o_SSE1)              \
169 {                                                                       \
170     const __m256d half  = _mm256_set1_pd(0.5);                          \
171     const __m256d three = _mm256_set1_pd(3.0);                          \
172     __m256  s_SSE,ir_SSE;                                               \
173     __m256d lu0,lu1;                                                    \
174                                                                         \
175     s_SSE  = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm256_cvtpd_ps(i_SSE0)),_mm256_cvtpd_ps(i_SSE1),1); \
176     ir_SSE = gmx_mm256_invsqrt_ps_single(s_SSE);                        \
177     lu0    = _mm256_cvtps_pd(_mm256_castps256_ps128(ir_SSE));           \
178     lu1    = _mm256_cvtps_pd(_mm256_extractf128_ps(ir_SSE,1));          \
179     o_SSE0 = _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu0,lu0),i_SSE0)),lu0)); \
180     o_SSE1 = _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu1,lu1),i_SSE1)),lu1)); \
181 }
182
183 #define GMX_MM_INVSQRT2_PD GMX_MM256_INVSQRT2_PD
184
185 #endif
186
187 /* Force and energy table load and interpolation routines */
188
189 #if defined GMX_MM128_HERE && !defined GMX_DOUBLE
190
191 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE)                \
192 {                                                                       \
193     gmx_mm_pr clj_SSE[UNROLLJ];                                         \
194     int p;                                                              \
195                                                                         \
196     for(p=0; p<UNROLLJ; p++)                                            \
197     {                                                                   \
198         /* Here we load 4 aligned floats, but we need just 2 */         \
199         clj_SSE[p] = gmx_load_pr(nbfp+type[aj+p]*NBFP_STRIDE);          \
200     }                                                                   \
201     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[0],clj_SSE[1],clj_SSE[2],clj_SSE[3],c6_SSE,c12_SSE); \
202 }
203
204 #endif
205
206 #if defined GMX_MM256_HERE && !defined GMX_DOUBLE
207
208 /* Put two 128-bit 4-float registers into one 256-bit 8-float register */
209 #define GMX_2_MM_TO_M256(in0,in1,out)                                   \
210 {                                                                       \
211     out = _mm256_insertf128_ps(_mm256_castps128_ps256(in0),in1,1);      \
212 }
213
214 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE)                \
215 {                                                                       \
216     __m128 clj_SSE[UNROLLJ],c6t_SSE[2],c12t_SSE[2];                     \
217     int p;                                                              \
218                                                                         \
219     for(p=0; p<UNROLLJ; p++)                                            \
220     {                                                                   \
221         /* Here we load 4 aligned floats, but we need just 2 */         \
222         clj_SSE[p] = _mm_load_ps(nbfp+type[aj+p]*NBFP_STRIDE);          \
223     }                                                                   \
224     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[0],clj_SSE[1],clj_SSE[2],clj_SSE[3],c6t_SSE[0],c12t_SSE[0]); \
225     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[4],clj_SSE[5],clj_SSE[6],clj_SSE[7],c6t_SSE[1],c12t_SSE[1]); \
226                                                                         \
227     GMX_2_MM_TO_M256(c6t_SSE[0],c6t_SSE[1],c6_SSE);                     \
228     GMX_2_MM_TO_M256(c12t_SSE[0],c12t_SSE[1],c12_SSE);                  \
229 }
230
231 #endif
232
233 #if defined GMX_MM128_HERE && defined GMX_DOUBLE
234
235 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE)                \
236 {                                                                       \
237     gmx_mm_pr clj_SSE[UNROLLJ];                                         \
238     int p;                                                              \
239                                                                         \
240     for(p=0; p<UNROLLJ; p++)                                            \
241     {                                                                   \
242         clj_SSE[p] = gmx_load_pr(nbfp+type[aj+p]*NBFP_STRIDE);          \
243     }                                                                   \
244     GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[0],clj_SSE[1],c6_SSE,c12_SSE);      \
245 }
246
247 #endif
248
249 #if defined GMX_MM256_HERE && defined GMX_DOUBLE
250
251 #define load_lj_pair_params(nbfp,type,aj,c6_SSE,c12_SSE)                \
252 {                                                                       \
253     __m128d clj_SSE[UNROLLJ],c6t_SSE[2],c12t_SSE[2];                    \
254     int p;                                                              \
255                                                                         \
256     for(p=0; p<UNROLLJ; p++)                                            \
257     {                                                                   \
258         clj_SSE[p] = _mm_load_pd(nbfp+type[aj+p]*NBFP_STRIDE);          \
259     }                                                                   \
260     GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[0],clj_SSE[1],c6t_SSE[0],c12t_SSE[0]); \
261     GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[2],clj_SSE[3],c6t_SSE[1],c12t_SSE[1]); \
262     GMX_2_M128D_TO_M256D(c6t_SSE[0],c6t_SSE[1],c6_SSE);                 \
263     GMX_2_M128D_TO_M256D(c12t_SSE[0],c12t_SSE[1],c12_SSE);              \
264 }
265
266 #endif
267
268
269 /* The load_table functions below are performance critical.
270  * The routines issue UNROLLI*UNROLLJ _mm_load_ps calls.
271  * As these all have latencies, scheduling is crucial.
272  * The Intel compilers and CPUs seem to do a good job at this.
273  * But AMD CPUs perform significantly worse with gcc than with icc.
274  * Performance is improved a bit by using the extract function UNROLLJ times,
275  * instead of doing an _mm_store_si128 for every i-particle.
276  * With AVX this significantly deteriorates performance (8 extracts iso 4).
277  * Because of this, the load_table_f macro always takes the ti parameter,
278  * but it is only used with AVX.
279  */
280
281 #if defined GMX_MM128_HERE && !defined GMX_DOUBLE
282
283 #define load_table_f(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE)   \
284 {                                                                       \
285     int    idx[4];                                                      \
286     __m128 ctab_SSE[4];                                                 \
287                                                                         \
288     /* Table has 4 entries, left-shift index by 2 */                    \
289     ti_SSE = _mm_slli_epi32(ti_SSE,2);                                  \
290     /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
291     idx[0] = gmx_mm_extract_epi32(ti_SSE,0);                            \
292     ctab_SSE[0] = _mm_load_ps(tab_coul_FDV0+idx[0]);                    \
293     idx[1] = gmx_mm_extract_epi32(ti_SSE,1);                            \
294     ctab_SSE[1] = _mm_load_ps(tab_coul_FDV0+idx[1]);                    \
295     idx[2] = gmx_mm_extract_epi32(ti_SSE,2);                            \
296     ctab_SSE[2] = _mm_load_ps(tab_coul_FDV0+idx[2]);                    \
297     idx[3] = gmx_mm_extract_epi32(ti_SSE,3);                            \
298     ctab_SSE[3] = _mm_load_ps(tab_coul_FDV0+idx[3]);                    \
299                                                                         \
300     /* Shuffle the force table entries to a convenient order */         \
301     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctab0_SSE,ctab1_SSE); \
302 }
303
304 #define load_table_f_v(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
305 {                                                                       \
306     int    idx[4];                                                      \
307     __m128 ctab_SSE[4];                                                 \
308                                                                         \
309     /* Table has 4 entries, left-shift index by 2 */                    \
310     ti_SSE = _mm_slli_epi32(ti_SSE,2);                                  \
311     /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
312     idx[0] = gmx_mm_extract_epi32(ti_SSE,0);                            \
313     ctab_SSE[0] = _mm_load_ps(tab_coul_FDV0+idx[0]);                    \
314     idx[1] = gmx_mm_extract_epi32(ti_SSE,1);                            \
315     ctab_SSE[1] = _mm_load_ps(tab_coul_FDV0+idx[1]);                    \
316     idx[2] = gmx_mm_extract_epi32(ti_SSE,2);                            \
317     ctab_SSE[2] = _mm_load_ps(tab_coul_FDV0+idx[2]);                    \
318     idx[3] = gmx_mm_extract_epi32(ti_SSE,3);                            \
319     ctab_SSE[3] = _mm_load_ps(tab_coul_FDV0+idx[3]);                    \
320                                                                         \
321     /* Shuffle the force  table entries to a convenient order */        \
322     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctab0_SSE,ctab1_SSE); \
323     /* Shuffle the energy table entries to a convenient order */        \
324     GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabv_SSE); \
325 }
326
327 #endif
328
329 #if defined GMX_MM256_HERE && !defined GMX_DOUBLE
330
331 #define load_table_f(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE)   \
332 {                                                                       \
333     __m128 ctab_SSE[8],ctabt_SSE[4];                                    \
334     int    j;                                                           \
335                                                                         \
336     /* Bit shifting would be faster, but AVX doesn't support that */    \
337     _mm256_store_si256((__m256i *)ti,ti_SSE);                           \
338     for(j=0; j<8; j++)                                                  \
339     {                                                                   \
340         ctab_SSE[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4);               \
341     }                                                                   \
342     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabt_SSE[0],ctabt_SSE[2]); \
343     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[4],ctab_SSE[5],ctab_SSE[6],ctab_SSE[7],ctabt_SSE[1],ctabt_SSE[3]); \
344                                                                         \
345     GMX_2_MM_TO_M256(ctabt_SSE[0],ctabt_SSE[1],ctab0_SSE);              \
346     GMX_2_MM_TO_M256(ctabt_SSE[2],ctabt_SSE[3],ctab1_SSE);              \
347 }
348
349 #define load_table_f_v(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
350 {                                                                       \
351     __m128 ctab_SSE[8],ctabt_SSE[4],ctabvt_SSE[2];                      \
352     int    j;                                                           \
353                                                                         \
354     /* Bit shifting would be faster, but AVX doesn't support that */    \
355     _mm256_store_si256((__m256i *)ti,ti_SSE);                           \
356     for(j=0; j<8; j++)                                                  \
357     {                                                                   \
358         ctab_SSE[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4);               \
359     }                                                                   \
360     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabt_SSE[0],ctabt_SSE[2]); \
361     GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[4],ctab_SSE[5],ctab_SSE[6],ctab_SSE[7],ctabt_SSE[1],ctabt_SSE[3]); \
362                                                                         \
363     GMX_2_MM_TO_M256(ctabt_SSE[0],ctabt_SSE[1],ctab0_SSE);              \
364     GMX_2_MM_TO_M256(ctabt_SSE[2],ctabt_SSE[3],ctab1_SSE);              \
365                                                                         \
366     GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[0],ctab_SSE[1],ctab_SSE[2],ctab_SSE[3],ctabvt_SSE[0]); \
367     GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[4],ctab_SSE[5],ctab_SSE[6],ctab_SSE[7],ctabvt_SSE[1]); \
368                                                                         \
369     GMX_2_MM_TO_M256(ctabvt_SSE[0],ctabvt_SSE[1],ctabv_SSE);            \
370 }
371
372 #endif
373
374 #if defined GMX_MM128_HERE && defined GMX_DOUBLE
375
376 #define load_table_f(tab_coul_F, ti_SSE, ti, ctab0_SSE, ctab1_SSE)      \
377 {                                                                       \
378     int     idx[2];                                                     \
379     __m128d ctab_SSE[2];                                                \
380                                                                         \
381     /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
382     idx[0] = gmx_mm_extract_epi32(ti_SSE,0);                            \
383     ctab_SSE[0] = _mm_loadu_pd(tab_coul_F+idx[0]);                      \
384     idx[1] = gmx_mm_extract_epi32(ti_SSE,1);                            \
385     ctab_SSE[1] = _mm_loadu_pd(tab_coul_F+idx[1]);                      \
386                                                                         \
387     /* Shuffle the force table entries to a convenient order */         \
388     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],ctab0_SSE,ctab1_SSE); \
389     /* The second force table entry should contain the difference */    \
390     ctab1_SSE = _mm_sub_pd(ctab1_SSE,ctab0_SSE);                        \
391 }
392
393 #define load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
394 {                                                                       \
395     int     idx[2];                                                     \
396     __m128d ctab_SSE[4];                                                \
397                                                                         \
398     /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
399     idx[0] = gmx_mm_extract_epi32(ti_SSE,0);                            \
400     ctab_SSE[0] = _mm_loadu_pd(tab_coul_F+idx[0]);                      \
401     idx[1] = gmx_mm_extract_epi32(ti_SSE,1);                            \
402     ctab_SSE[1] = _mm_loadu_pd(tab_coul_F+idx[1]);                      \
403                                                                         \
404     /* Shuffle the force table entries to a convenient order */         \
405     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],ctab0_SSE,ctab1_SSE); \
406     /* The second force table entry should contain the difference */    \
407     ctab1_SSE = _mm_sub_pd(ctab1_SSE,ctab0_SSE);                        \
408                                                                         \
409     ctab_SSE[2] = _mm_loadu_pd(tab_coul_V+idx[0]);                      \
410     ctab_SSE[3] = _mm_loadu_pd(tab_coul_V+idx[1]);                      \
411                                                                         \
412     /* Shuffle the energy table entries to a single register */         \
413     ctabv_SSE = _mm_shuffle_pd(ctab_SSE[2],ctab_SSE[3],_MM_SHUFFLE2(0,0)); \
414 }
415
416 #endif
417
418 #if defined GMX_MM256_HERE && defined GMX_DOUBLE
419
420 /* Put two 128-bit 2-double registers into one 256-bit 4-ouble register */
421 #define GMX_2_M128D_TO_M256D(in0,in1,out)                               \
422 {                                                                       \
423     out = _mm256_insertf128_pd(_mm256_castpd128_pd256(in0),in1,1);      \
424 }
425
426 #define load_table_f(tab_coul_F, ti_SSE, ti, ctab0_SSE, ctab1_SSE)      \
427 {                                                                       \
428     __m128d ctab_SSE[4],tr_SSE[4];                                      \
429     int     j;                                                          \
430                                                                         \
431     _mm_store_si128((__m128i *)ti,ti_SSE);                              \
432     for(j=0; j<4; j++)                                                  \
433     {                                                                   \
434         ctab_SSE[j] = _mm_loadu_pd(tab_coul_F+ti[j]);                   \
435     }                                                                   \
436     /* Shuffle the force table entries to a convenient order */         \
437     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],tr_SSE[0],tr_SSE[1]); \
438     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[2],ctab_SSE[3],tr_SSE[2],tr_SSE[3]); \
439     GMX_2_M128D_TO_M256D(tr_SSE[0],tr_SSE[2],ctab0_SSE);                \
440     GMX_2_M128D_TO_M256D(tr_SSE[1],tr_SSE[3],ctab1_SSE);                \
441     /* The second force table entry should contain the difference */    \
442     ctab1_SSE = _mm256_sub_pd(ctab1_SSE,ctab0_SSE);                     \
443 }
444
445 #define load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
446 {                                                                       \
447     __m128d ctab_SSE[8],tr_SSE[4];                                      \
448     int     j;                                                          \
449                                                                         \
450     _mm_store_si128((__m128i *)ti,ti_SSE);                              \
451     for(j=0; j<4; j++)                                                  \
452     {                                                                   \
453         ctab_SSE[j] = _mm_loadu_pd(tab_coul_F+ti[j]);                   \
454     }                                                                   \
455     /* Shuffle the force table entries to a convenient order */         \
456     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0],ctab_SSE[1],tr_SSE[0],tr_SSE[1]); \
457     GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[2],ctab_SSE[3],tr_SSE[2],tr_SSE[3]); \
458     GMX_2_M128D_TO_M256D(tr_SSE[0],tr_SSE[2],ctab0_SSE);                \
459     GMX_2_M128D_TO_M256D(tr_SSE[1],tr_SSE[3],ctab1_SSE);                \
460     /* The second force table entry should contain the difference */    \
461     ctab1_SSE = _mm256_sub_pd(ctab1_SSE,ctab0_SSE);                     \
462                                                                         \
463     for(j=0; j<4; j++)                                                  \
464     {                                                                   \
465         ctab_SSE[4+j] = _mm_loadu_pd(tab_coul_V+ti[j]);                 \
466     }                                                                   \
467     /* Shuffle the energy table entries to a single register */         \
468     GMX_2_M128D_TO_M256D(_mm_shuffle_pd(ctab_SSE[4],ctab_SSE[5],_MM_SHUFFLE2(0,0)),_mm_shuffle_pd(ctab_SSE[6],ctab_SSE[7],_MM_SHUFFLE2(0,0)),ctabv_SSE); \
469 }
470
471 #endif
472
473
474 /* Add energy register to possibly multiple terms in the energy array.
475  * This function is the same for SSE/AVX single/double.
476  */
477 static inline void add_ener_grp(gmx_mm_pr e_SSE,real *v,int *offset_jj)
478 {
479     int jj;
480
481     /* We need to balance the number of store operations with
482      * the rapidly increases number of combinations of energy groups.
483      * We add to a temporary buffer for 1 i-group vs 2 j-groups.
484      */
485     for(jj=0; jj<(UNROLLJ/2); jj++)
486     {
487         gmx_mm_pr v_SSE;
488
489         v_SSE = gmx_load_pr(v+offset_jj[jj]+jj*UNROLLJ);
490         gmx_store_pr(v+offset_jj[jj]+jj*UNROLLJ,gmx_add_pr(v_SSE,e_SSE));
491     }
492 }
493
494 #endif /* _nbnxn_kernel_sse_utils_h_ */