a9b5b1576ad14d7e170df44c38440e7f3554167f
[alexxy/gromacs.git] / src / mdlib / nbnxn_kernels / nbnxn_kernel_simd_utils.h
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2012, The GROMACS Development Team
6  * Copyright (c) 2012,2013, by the GROMACS development team, led by
7  * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8  * others, as listed in the AUTHORS file in the top-level source
9  * directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #ifndef _nbnxn_kernel_sse_utils_h_
38 #define _nbnxn_kernel_sse_utils_h_
39
40 /* This files contains all functions/macros for the SIMD kernels
41  * which have explicit dependencies on the j-cluster size and/or SIMD-width.
42  * The functionality which depends on the j-cluster size is:
43  *   LJ-parameter lookup
44  *   force table lookup
45  *   energy group pair energy storage
46  */
47
48 #ifdef GMX_X86_SSE2
49
50 /* Transpose 2 double precision registers */
51 #define GMX_MM_TRANSPOSE2_OP_PD(in0, in1, out0, out1)                      \
52     {                                                                       \
53         out0 = _mm_unpacklo_pd(in0, in1);                                    \
54         out1 = _mm_unpackhi_pd(in0, in1);                                    \
55     }
56
57 #if GMX_NBNXN_SIMD_BITWIDTH == 128 || !defined GMX_DOUBLE
58 /* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
59 #define GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(in0, in1, in2, in3, out0, out1)    \
60     {                                                                       \
61         __m128 _c01, _c23;                                                   \
62         _c01 = _mm_movelh_ps(in0, in1);                                      \
63         _c23 = _mm_movelh_ps(in2, in3);                                      \
64         out0 = _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(2, 0, 2, 0));              \
65         out1 = _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(3, 1, 3, 1));              \
66     }
67 #else
68 /* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
69 #define GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(in0, in1, in2, in3, out0, out1)    \
70     {                                                                       \
71         __m256d _c01, _c23;                                                  \
72         _c01 = _mm256_shuffle_pd(in0, in1, _MM_SHUFFLE(1, 0, 1, 0));             \
73         _c23 = _mm256_shuffle_pd(in2, in3, _MM_SHUFFLE(1, 0, 1, 0));             \
74         out0 = _mm256_shuffle_pd(_c01, _c23, _MM_SHUFFLE(2, 0, 2, 0));           \
75         out1 = _mm256_shuffle_pd(_c01, _c23, _MM_SHUFFLE(3, 1, 3, 1));           \
76     }
77 #endif
78
79 /* Collect element 2 of the 4 inputs to out */
80 #define GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(in0, in1, in2, in3, out)           \
81     {                                                                       \
82         __m128 _c01, _c23;                                                   \
83         _c01 = _mm_shuffle_ps(in0, in1, _MM_SHUFFLE(3, 2, 3, 2));                \
84         _c23 = _mm_shuffle_ps(in2, in3, _MM_SHUFFLE(3, 2, 3, 2));                \
85         out  = _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(2, 0, 2, 0));              \
86     }
87
88 #if GMX_NBNXN_SIMD_BITWIDTH == 128
89 #ifndef GMX_DOUBLE
90 /* Sum the elements within each input register and store the sums in out */
91 #define GMX_MM_TRANSPOSE_SUM4_PR(in0, in1, in2, in3, out)                   \
92     {                                                                       \
93         _MM_TRANSPOSE4_PS(in0, in1, in2, in3);                                 \
94         in0  = _mm_add_ps(in0, in1);                                          \
95         in2  = _mm_add_ps(in2, in3);                                          \
96         out  = _mm_add_ps(in0, in2);                                         \
97     }
98 #else
99 /* Sum the elements within each input register and store the sums in out */
100 #define GMX_MM_TRANSPOSE_SUM2_PD(in0, in1, out)                           \
101     {                                                                       \
102         GMX_MM_TRANSPOSE2_PD(in0, in1);                                      \
103         out  = _mm_add_pd(in0, in1);                                         \
104     }
105 #endif
106 #else
107 #ifndef GMX_DOUBLE
108 /* Sum the elements within each input register and store the sums in out */
109 #define GMX_MM_TRANSPOSE_SUM4_PR(in0, in1, in2, in3, out)                   \
110     {                                                                       \
111         in0 = _mm256_hadd_ps(in0, in1);                                      \
112         in2 = _mm256_hadd_ps(in2, in3);                                      \
113         in1 = _mm256_hadd_ps(in0, in2);                                      \
114         out = _mm_add_ps(_mm256_castps256_ps128(in1), _mm256_extractf128_ps(in1, 1)); \
115     }
116 /* Sum the elements of halfs of each input register and store sums in out */
117 #define GMX_MM_TRANSPOSE_SUM4H_PR(in0, in2, out)                          \
118     {                                                                       \
119         in0 = _mm256_hadd_ps(in0, _mm256_setzero_ps());                      \
120         in2 = _mm256_hadd_ps(in2, _mm256_setzero_ps());                      \
121         in0 = _mm256_hadd_ps(in0, in2);                                      \
122         in2 = _mm256_permute_ps(in0, _MM_SHUFFLE(2, 3, 0, 1));                  \
123         out = _mm_add_ps(_mm256_castps256_ps128(in0), _mm256_extractf128_ps(in2, 1)); \
124     }
125 #else
126 /* Sum the elements within each input register and store the sums in out */
127 #define GMX_MM_TRANSPOSE_SUM4_PR(in0, in1, in2, in3, out)                   \
128     {                                                                       \
129         in0 = _mm256_hadd_pd(in0, in1);                                      \
130         in2 = _mm256_hadd_pd(in2, in3);                                      \
131         out = _mm256_add_pd(_mm256_permute2f128_pd(in0, in2, 0x20), _mm256_permute2f128_pd(in0, in2, 0x31)); \
132     }
133 #endif
134 #endif
135
136 #if GMX_NBNXN_SIMD_BITWIDTH == 128
137
138 static inline __m128
139 gmx_mm128_invsqrt_ps_single(__m128 x)
140 {
141     const __m128 half  = _mm_set_ps(0.5, 0.5, 0.5, 0.5);
142     const __m128 three = _mm_set_ps(3.0, 3.0, 3.0, 3.0);
143
144     __m128       lu = _mm_rsqrt_ps(x);
145
146     return _mm_mul_ps(half, _mm_mul_ps(_mm_sub_ps(three, _mm_mul_ps(_mm_mul_ps(lu, lu), x)), lu));
147 }
148
149 /* Do 2 double precision invsqrt operations.
150  * Doing the SIMD rsqrt and the first Newton Raphson iteration
151  * in single precision gives full double precision accuracy.
152  * The speed is more than double that of two gmx_mm_invsqrt_pd calls.
153  */
154 #define GMX_MM128_INVSQRT2_PD(in0, in1, out0, out1)                        \
155     {                                                                       \
156         const __m128d half  = _mm_set1_pd(0.5);                             \
157         const __m128d three = _mm_set1_pd(3.0);                             \
158         __m128        s, ir;                                                       \
159         __m128d       lu0, lu1;                                                    \
160                                                                         \
161         s    = _mm_movelh_ps(_mm_cvtpd_ps(in0), _mm_cvtpd_ps(in1));          \
162         ir   = gmx_mm128_invsqrt_ps_single(s);                              \
163         lu0  = _mm_cvtps_pd(ir);                                            \
164         lu1  = _mm_cvtps_pd(_mm_movehl_ps(ir, ir));                          \
165         out0 = _mm_mul_pd(half, _mm_mul_pd(_mm_sub_pd(three, _mm_mul_pd(_mm_mul_pd(lu0, lu0), in0)), lu0)); \
166         out1 = _mm_mul_pd(half, _mm_mul_pd(_mm_sub_pd(three, _mm_mul_pd(_mm_mul_pd(lu1, lu1), in1)), lu1)); \
167     }
168
169 #define GMX_MM_INVSQRT2_PD GMX_MM128_INVSQRT2_PD
170
171 #endif
172
173 #if GMX_NBNXN_SIMD_BITWIDTH == 256
174
175 static inline __m256
176 gmx_mm256_invsqrt_ps_single(__m256 x)
177 {
178     const __m256 half  = _mm256_set_ps(0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5);
179     const __m256 three = _mm256_set_ps(3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0);
180
181     __m256       lu = _mm256_rsqrt_ps(x);
182
183     return _mm256_mul_ps(half, _mm256_mul_ps(_mm256_sub_ps(three, _mm256_mul_ps(_mm256_mul_ps(lu, lu), x)), lu));
184 }
185
186 /* Do 4 double precision invsqrt operations.
187  * Doing the SIMD rsqrt and the first Newton Raphson iteration
188  * in single precision gives full double precision accuracy.
189  */
190 #define GMX_MM256_INVSQRT2_PD(in0, in1, out0, out1)                        \
191     {                                                                       \
192         const __m256d half  = _mm256_set1_pd(0.5);                          \
193         const __m256d three = _mm256_set1_pd(3.0);                          \
194         __m256        s, ir;                                                       \
195         __m256d       lu0, lu1;                                                    \
196                                                                         \
197         s    = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm256_cvtpd_ps(in0)), _mm256_cvtpd_ps(in1), 1); \
198         ir   = gmx_mm256_invsqrt_ps_single(s);                              \
199         lu0  = _mm256_cvtps_pd(_mm256_castps256_ps128(ir));                 \
200         lu1  = _mm256_cvtps_pd(_mm256_extractf128_ps(ir, 1));                \
201         out0 = _mm256_mul_pd(half, _mm256_mul_pd(_mm256_sub_pd(three, _mm256_mul_pd(_mm256_mul_pd(lu0, lu0), in0)), lu0)); \
202         out1 = _mm256_mul_pd(half, _mm256_mul_pd(_mm256_sub_pd(three, _mm256_mul_pd(_mm256_mul_pd(lu1, lu1), in1)), lu1)); \
203     }
204
205 #define GMX_MM_INVSQRT2_PD GMX_MM256_INVSQRT2_PD
206
207 #endif
208
209 /* Force and energy table load and interpolation routines */
210
211 #if GMX_NBNXN_SIMD_BITWIDTH == 128 && !defined GMX_DOUBLE
212
213 #define load_lj_pair_params(nbfp, type, aj, c6_SSE, c12_SSE)                \
214     {                                                                       \
215         gmx_mm_pr clj_SSE[UNROLLJ];                                         \
216         int       p;                                                              \
217                                                                         \
218         for (p = 0; p < UNROLLJ; p++)                                            \
219         {                                                                   \
220             /* Here we load 4 aligned floats, but we need just 2 */         \
221             clj_SSE[p] = gmx_load_pr(nbfp+type[aj+p]*NBFP_STRIDE);          \
222         }                                                                   \
223         GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[0], clj_SSE[1], clj_SSE[2], clj_SSE[3], c6_SSE, c12_SSE); \
224     }
225
226 #endif
227
228 #if GMX_NBNXN_SIMD_BITWIDTH == 256 && !defined GMX_DOUBLE
229
230 /* Put two 128-bit 4-float registers into one 256-bit 8-float register */
231 #define GMX_2_MM_TO_M256(in0, in1, out)                                   \
232     {                                                                       \
233         out = _mm256_insertf128_ps(_mm256_castps128_ps256(in0), in1, 1);      \
234     }
235
236 #define load_lj_pair_params(nbfp, type, aj, c6_SSE, c12_SSE)                \
237     {                                                                       \
238         __m128 clj_SSE[UNROLLJ], c6t_SSE[2], c12t_SSE[2];                     \
239         int    p;                                                              \
240                                                                         \
241         for (p = 0; p < UNROLLJ; p++)                                            \
242         {                                                                   \
243             /* Here we load 4 aligned floats, but we need just 2 */         \
244             clj_SSE[p] = _mm_load_ps(nbfp+type[aj+p]*NBFP_STRIDE);          \
245         }                                                                   \
246         GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[0], clj_SSE[1], clj_SSE[2], clj_SSE[3], c6t_SSE[0], c12t_SSE[0]); \
247         GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE[4], clj_SSE[5], clj_SSE[6], clj_SSE[7], c6t_SSE[1], c12t_SSE[1]); \
248                                                                         \
249         GMX_2_MM_TO_M256(c6t_SSE[0], c6t_SSE[1], c6_SSE);                     \
250         GMX_2_MM_TO_M256(c12t_SSE[0], c12t_SSE[1], c12_SSE);                  \
251     }
252
253 #define load_lj_pair_params2(nbfp0, nbfp1, type, aj, c6_SSE, c12_SSE)        \
254     {                                                                       \
255         __m128 clj_SSE0[UNROLLJ], clj_SSE1[UNROLLJ], c6t_SSE[2], c12t_SSE[2];  \
256         int    p;                                                              \
257                                                                         \
258         for (p = 0; p < UNROLLJ; p++)                                            \
259         {                                                                   \
260             /* Here we load 4 aligned floats, but we need just 2 */         \
261             clj_SSE0[p] = _mm_load_ps(nbfp0+type[aj+p]*NBFP_STRIDE);        \
262         }                                                                   \
263         for (p = 0; p < UNROLLJ; p++)                                            \
264         {                                                                   \
265             /* Here we load 4 aligned floats, but we need just 2 */         \
266             clj_SSE1[p] = _mm_load_ps(nbfp1+type[aj+p]*NBFP_STRIDE);        \
267         }                                                                   \
268         GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE0[0], clj_SSE0[1], clj_SSE0[2], clj_SSE0[3], c6t_SSE[0], c12t_SSE[0]); \
269         GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(clj_SSE1[0], clj_SSE1[1], clj_SSE1[2], clj_SSE1[3], c6t_SSE[1], c12t_SSE[1]); \
270                                                                         \
271         GMX_2_MM_TO_M256(c6t_SSE[0], c6t_SSE[1], c6_SSE);                     \
272         GMX_2_MM_TO_M256(c12t_SSE[0], c12t_SSE[1], c12_SSE);                  \
273     }
274
275 #endif
276
277 #if GMX_NBNXN_SIMD_BITWIDTH == 128 && defined GMX_DOUBLE
278
279 #define load_lj_pair_params(nbfp, type, aj, c6_SSE, c12_SSE)                \
280     {                                                                       \
281         gmx_mm_pr clj_SSE[UNROLLJ];                                         \
282         int       p;                                                              \
283                                                                         \
284         for (p = 0; p < UNROLLJ; p++)                                            \
285         {                                                                   \
286             clj_SSE[p] = gmx_load_pr(nbfp+type[aj+p]*NBFP_STRIDE);          \
287         }                                                                   \
288         GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[0], clj_SSE[1], c6_SSE, c12_SSE);      \
289     }
290
291 #endif
292
293 #if GMX_NBNXN_SIMD_BITWIDTH == 256 && defined GMX_DOUBLE
294
295 #define load_lj_pair_params(nbfp, type, aj, c6_SSE, c12_SSE)                \
296     {                                                                       \
297         __m128d clj_SSE[UNROLLJ], c6t_SSE[2], c12t_SSE[2];                    \
298         int     p;                                                              \
299                                                                         \
300         for (p = 0; p < UNROLLJ; p++)                                            \
301         {                                                                   \
302             clj_SSE[p] = _mm_load_pd(nbfp+type[aj+p]*NBFP_STRIDE);          \
303         }                                                                   \
304         GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[0], clj_SSE[1], c6t_SSE[0], c12t_SSE[0]); \
305         GMX_MM_TRANSPOSE2_OP_PD(clj_SSE[2], clj_SSE[3], c6t_SSE[1], c12t_SSE[1]); \
306         GMX_2_M128D_TO_M256D(c6t_SSE[0], c6t_SSE[1], c6_SSE);                 \
307         GMX_2_M128D_TO_M256D(c12t_SSE[0], c12t_SSE[1], c12_SSE);              \
308     }
309
310 #endif
311
312
313 /* The load_table functions below are performance critical.
314  * The routines issue UNROLLI*UNROLLJ _mm_load_ps calls.
315  * As these all have latencies, scheduling is crucial.
316  * The Intel compilers and CPUs seem to do a good job at this.
317  * But AMD CPUs perform significantly worse with gcc than with icc.
318  * Performance is improved a bit by using the extract function UNROLLJ times,
319  * instead of doing an _mm_store_si128 for every i-particle.
320  * This is only faster when we use FDV0 formatted tables, where we also need
321  * to multiple the index by 4, which can be done by a SIMD bit shift.
322  * With single precision AVX, 8 extracts are much slower than 1 store.
323  * Because of this, the load_table_f macro always takes the ti parameter,
324  * but it is only used with AVX.
325  */
326
327 #if GMX_NBNXN_SIMD_BITWIDTH == 128 && !defined GMX_DOUBLE
328
329 #define load_table_f(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE)   \
330     {                                                                       \
331         int    idx[4];                                                      \
332         __m128 ctab_SSE[4];                                                 \
333                                                                         \
334         /* Table has 4 entries, left-shift index by 2 */                    \
335         ti_SSE = _mm_slli_epi32(ti_SSE, 2);                                  \
336         /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
337         idx[0]      = gmx_mm_extract_epi32(ti_SSE, 0);                            \
338         ctab_SSE[0] = _mm_load_ps(tab_coul_FDV0+idx[0]);                    \
339         idx[1]      = gmx_mm_extract_epi32(ti_SSE, 1);                            \
340         ctab_SSE[1] = _mm_load_ps(tab_coul_FDV0+idx[1]);                    \
341         idx[2]      = gmx_mm_extract_epi32(ti_SSE, 2);                            \
342         ctab_SSE[2] = _mm_load_ps(tab_coul_FDV0+idx[2]);                    \
343         idx[3]      = gmx_mm_extract_epi32(ti_SSE, 3);                            \
344         ctab_SSE[3] = _mm_load_ps(tab_coul_FDV0+idx[3]);                    \
345                                                                         \
346         /* Shuffle the force table entries to a convenient order */         \
347         GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0], ctab_SSE[1], ctab_SSE[2], ctab_SSE[3], ctab0_SSE, ctab1_SSE); \
348     }
349
350 #define load_table_f_v(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
351     {                                                                       \
352         int    idx[4];                                                      \
353         __m128 ctab_SSE[4];                                                 \
354                                                                         \
355         /* Table has 4 entries, left-shift index by 2 */                    \
356         ti_SSE = _mm_slli_epi32(ti_SSE, 2);                                  \
357         /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
358         idx[0]      = gmx_mm_extract_epi32(ti_SSE, 0);                            \
359         ctab_SSE[0] = _mm_load_ps(tab_coul_FDV0+idx[0]);                    \
360         idx[1]      = gmx_mm_extract_epi32(ti_SSE, 1);                            \
361         ctab_SSE[1] = _mm_load_ps(tab_coul_FDV0+idx[1]);                    \
362         idx[2]      = gmx_mm_extract_epi32(ti_SSE, 2);                            \
363         ctab_SSE[2] = _mm_load_ps(tab_coul_FDV0+idx[2]);                    \
364         idx[3]      = gmx_mm_extract_epi32(ti_SSE, 3);                            \
365         ctab_SSE[3] = _mm_load_ps(tab_coul_FDV0+idx[3]);                    \
366                                                                         \
367         /* Shuffle the force  table entries to a convenient order */        \
368         GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0], ctab_SSE[1], ctab_SSE[2], ctab_SSE[3], ctab0_SSE, ctab1_SSE); \
369         /* Shuffle the energy table entries to a convenient order */        \
370         GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[0], ctab_SSE[1], ctab_SSE[2], ctab_SSE[3], ctabv_SSE); \
371     }
372
373 #endif
374
375 #if GMX_NBNXN_SIMD_BITWIDTH == 256 && !defined GMX_DOUBLE
376
377 #define load_table_f(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE)   \
378     {                                                                       \
379         __m128 ctab_SSE[8], ctabt_SSE[4];                                    \
380         int    j;                                                           \
381                                                                         \
382         /* Bit shifting would be faster, but AVX doesn't support that */    \
383         _mm256_store_si256((__m256i *)ti, ti_SSE);                           \
384         for (j = 0; j < 8; j++)                                                  \
385         {                                                                   \
386             ctab_SSE[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4);               \
387         }                                                                   \
388         GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0], ctab_SSE[1], ctab_SSE[2], ctab_SSE[3], ctabt_SSE[0], ctabt_SSE[2]); \
389         GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[4], ctab_SSE[5], ctab_SSE[6], ctab_SSE[7], ctabt_SSE[1], ctabt_SSE[3]); \
390                                                                         \
391         GMX_2_MM_TO_M256(ctabt_SSE[0], ctabt_SSE[1], ctab0_SSE);              \
392         GMX_2_MM_TO_M256(ctabt_SSE[2], ctabt_SSE[3], ctab1_SSE);              \
393     }
394
395 #define load_table_f_v(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
396     {                                                                       \
397         __m128 ctab_SSE[8], ctabt_SSE[4], ctabvt_SSE[2];                      \
398         int    j;                                                           \
399                                                                         \
400         /* Bit shifting would be faster, but AVX doesn't support that */    \
401         _mm256_store_si256((__m256i *)ti, ti_SSE);                           \
402         for (j = 0; j < 8; j++)                                                  \
403         {                                                                   \
404             ctab_SSE[j] = _mm_load_ps(tab_coul_FDV0+ti[j]*4);               \
405         }                                                                   \
406         GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[0], ctab_SSE[1], ctab_SSE[2], ctab_SSE[3], ctabt_SSE[0], ctabt_SSE[2]); \
407         GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(ctab_SSE[4], ctab_SSE[5], ctab_SSE[6], ctab_SSE[7], ctabt_SSE[1], ctabt_SSE[3]); \
408                                                                         \
409         GMX_2_MM_TO_M256(ctabt_SSE[0], ctabt_SSE[1], ctab0_SSE);              \
410         GMX_2_MM_TO_M256(ctabt_SSE[2], ctabt_SSE[3], ctab1_SSE);              \
411                                                                         \
412         GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[0], ctab_SSE[1], ctab_SSE[2], ctab_SSE[3], ctabvt_SSE[0]); \
413         GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(ctab_SSE[4], ctab_SSE[5], ctab_SSE[6], ctab_SSE[7], ctabvt_SSE[1]); \
414                                                                         \
415         GMX_2_MM_TO_M256(ctabvt_SSE[0], ctabvt_SSE[1], ctabv_SSE);            \
416     }
417
418 #endif
419
420 #if GMX_NBNXN_SIMD_BITWIDTH == 128 && defined GMX_DOUBLE
421
422 #define load_table_f(tab_coul_F, ti_SSE, ti, ctab0_SSE, ctab1_SSE)      \
423     {                                                                       \
424         int     idx[2];                                                     \
425         __m128d ctab_SSE[2];                                                \
426                                                                         \
427         /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
428         idx[0]      = gmx_mm_extract_epi32(ti_SSE, 0);                            \
429         ctab_SSE[0] = _mm_loadu_pd(tab_coul_F+idx[0]);                      \
430         idx[1]      = gmx_mm_extract_epi32(ti_SSE, 1);                            \
431         ctab_SSE[1] = _mm_loadu_pd(tab_coul_F+idx[1]);                      \
432                                                                         \
433         /* Shuffle the force table entries to a convenient order */         \
434         GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0], ctab_SSE[1], ctab0_SSE, ctab1_SSE); \
435         /* The second force table entry should contain the difference */    \
436         ctab1_SSE = _mm_sub_pd(ctab1_SSE, ctab0_SSE);                        \
437     }
438
439 #define load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
440     {                                                                       \
441         int     idx[2];                                                     \
442         __m128d ctab_SSE[4];                                                \
443                                                                         \
444         /* Without SSE4.1 the extract macro needs an immediate: unroll */   \
445         idx[0]      = gmx_mm_extract_epi32(ti_SSE, 0);                            \
446         ctab_SSE[0] = _mm_loadu_pd(tab_coul_F+idx[0]);                      \
447         idx[1]      = gmx_mm_extract_epi32(ti_SSE, 1);                            \
448         ctab_SSE[1] = _mm_loadu_pd(tab_coul_F+idx[1]);                      \
449                                                                         \
450         /* Shuffle the force table entries to a convenient order */         \
451         GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0], ctab_SSE[1], ctab0_SSE, ctab1_SSE); \
452         /* The second force table entry should contain the difference */    \
453         ctab1_SSE = _mm_sub_pd(ctab1_SSE, ctab0_SSE);                        \
454                                                                         \
455         ctab_SSE[2] = _mm_loadu_pd(tab_coul_V+idx[0]);                      \
456         ctab_SSE[3] = _mm_loadu_pd(tab_coul_V+idx[1]);                      \
457                                                                         \
458         /* Shuffle the energy table entries to a single register */         \
459         ctabv_SSE = _mm_shuffle_pd(ctab_SSE[2], ctab_SSE[3], _MM_SHUFFLE2(0, 0)); \
460     }
461
462 #endif
463
464 #if GMX_NBNXN_SIMD_BITWIDTH == 256 && defined GMX_DOUBLE
465
466 /* Put two 128-bit 2-double registers into one 256-bit 4-ouble register */
467 #define GMX_2_M128D_TO_M256D(in0, in1, out)                               \
468     {                                                                       \
469         out = _mm256_insertf128_pd(_mm256_castpd128_pd256(in0), in1, 1);      \
470     }
471
472 #define load_table_f(tab_coul_F, ti_SSE, ti, ctab0_SSE, ctab1_SSE)      \
473     {                                                                       \
474         __m128d ctab_SSE[4], tr_SSE[4];                                      \
475         int     j;                                                          \
476                                                                         \
477         _mm_store_si128((__m128i *)ti, ti_SSE);                              \
478         for (j = 0; j < 4; j++)                                                  \
479         {                                                                   \
480             ctab_SSE[j] = _mm_loadu_pd(tab_coul_F+ti[j]);                   \
481         }                                                                   \
482         /* Shuffle the force table entries to a convenient order */         \
483         GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0], ctab_SSE[1], tr_SSE[0], tr_SSE[1]); \
484         GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[2], ctab_SSE[3], tr_SSE[2], tr_SSE[3]); \
485         GMX_2_M128D_TO_M256D(tr_SSE[0], tr_SSE[2], ctab0_SSE);                \
486         GMX_2_M128D_TO_M256D(tr_SSE[1], tr_SSE[3], ctab1_SSE);                \
487         /* The second force table entry should contain the difference */    \
488         ctab1_SSE = _mm256_sub_pd(ctab1_SSE, ctab0_SSE);                     \
489     }
490
491 #define load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE, ti, ctab0_SSE, ctab1_SSE, ctabv_SSE) \
492     {                                                                       \
493         __m128d ctab_SSE[8], tr_SSE[4];                                      \
494         int     j;                                                          \
495                                                                         \
496         _mm_store_si128((__m128i *)ti, ti_SSE);                              \
497         for (j = 0; j < 4; j++)                                                  \
498         {                                                                   \
499             ctab_SSE[j] = _mm_loadu_pd(tab_coul_F+ti[j]);                   \
500         }                                                                   \
501         /* Shuffle the force table entries to a convenient order */         \
502         GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[0], ctab_SSE[1], tr_SSE[0], tr_SSE[1]); \
503         GMX_MM_TRANSPOSE2_OP_PD(ctab_SSE[2], ctab_SSE[3], tr_SSE[2], tr_SSE[3]); \
504         GMX_2_M128D_TO_M256D(tr_SSE[0], tr_SSE[2], ctab0_SSE);                \
505         GMX_2_M128D_TO_M256D(tr_SSE[1], tr_SSE[3], ctab1_SSE);                \
506         /* The second force table entry should contain the difference */    \
507         ctab1_SSE = _mm256_sub_pd(ctab1_SSE, ctab0_SSE);                     \
508                                                                         \
509         for (j = 0; j < 4; j++)                                                  \
510         {                                                                   \
511             ctab_SSE[4+j] = _mm_loadu_pd(tab_coul_V+ti[j]);                 \
512         }                                                                   \
513         /* Shuffle the energy table entries to a single register */         \
514         GMX_2_M128D_TO_M256D(_mm_shuffle_pd(ctab_SSE[4], ctab_SSE[5], _MM_SHUFFLE2(0, 0)), _mm_shuffle_pd(ctab_SSE[6], ctab_SSE[7], _MM_SHUFFLE2(0, 0)), ctabv_SSE); \
515     }
516
517 #endif
518
519
520 /* Add energy register to possibly multiple terms in the energy array.
521  * This function is the same for SSE/AVX single/double.
522  */
523 static inline void add_ener_grp(gmx_mm_pr e_SSE, real *v, const int *offset_jj)
524 {
525     int jj;
526
527     /* We need to balance the number of store operations with
528      * the rapidly increases number of combinations of energy groups.
529      * We add to a temporary buffer for 1 i-group vs 2 j-groups.
530      */
531     for (jj = 0; jj < (UNROLLJ/2); jj++)
532     {
533         gmx_mm_pr v_SSE;
534
535         v_SSE = gmx_load_pr(v+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE);
536         gmx_store_pr(v+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE, gmx_add_pr(v_SSE, e_SSE));
537     }
538 }
539
540 #if defined GMX_X86_AVX_256 && GMX_SIMD_WIDTH_HERE == 8 && defined gmx_mm_hpr
541 /* As add_ener_grp above, but for two groups of UNROLLJ/2 stored in
542  * a single SIMD register.
543  */
544 static inline void add_ener_grp_halves(gmx_mm_pr e_SSE,
545                                        real *v0, real *v1, const int *offset_jj)
546 {
547     gmx_mm_hpr e_SSE0, e_SSE1;
548     int        jj;
549
550     e_SSE0 = _mm256_extractf128_ps(e_SSE, 0);
551     e_SSE1 = _mm256_extractf128_ps(e_SSE, 1);
552
553     for (jj = 0; jj < (UNROLLJ/2); jj++)
554     {
555         gmx_mm_hpr v_SSE;
556
557         gmx_load_hpr(v_SSE, v0+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2);
558         gmx_store_hpr(v0+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2, gmx_add_hpr(v_SSE, e_SSE0));
559     }
560     for (jj = 0; jj < (UNROLLJ/2); jj++)
561     {
562         gmx_mm_hpr v_SSE;
563
564         gmx_load_hpr(v_SSE, v1+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2);
565         gmx_store_hpr(v1+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2, gmx_add_hpr(v_SSE, e_SSE1));
566     }
567 }
568 #endif
569
570 #endif /* GMX_X86_SSE2 */
571
572 #endif /* _nbnxn_kernel_sse_utils_h_ */