2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _kernelutil_x86_avx_128_fma_single_h_
36 #define _kernelutil_x86_avx_128_fma_single_h_
40 #include <immintrin.h>
44 # include <x86intrin.h>
47 #define gmx_mm_castsi128_ps _mm_castsi128_ps
48 #define gmx_mm_extract_epi32 _mm_extract_epi32
50 /* Work around gcc bug with wrong type for mask formal parameter to maskload/maskstore */
51 #ifdef GMX_SIMD_X86_AVX_GCC_MASKLOAD_BUG
52 # define gmx_mm_maskload_ps(mem, mask) _mm_maskload_ps((mem), _mm_castsi128_ps(mask))
53 # define gmx_mm_maskstore_ps(mem, mask, x) _mm_maskstore_ps((mem), _mm_castsi128_ps(mask), (x))
54 # define gmx_mm256_maskload_ps(mem, mask) _mm256_maskload_ps((mem), _mm256_castsi256_ps(mask))
55 # define gmx_mm256_maskstore_ps(mem, mask, x) _mm256_maskstore_ps((mem), _mm256_castsi256_ps(mask), (x))
57 # define gmx_mm_maskload_ps(mem, mask) _mm_maskload_ps((mem), (mask))
58 # define gmx_mm_maskstore_ps(mem, mask, x) _mm_maskstore_ps((mem), (mask), (x))
59 # define gmx_mm256_maskload_ps(mem, mask) _mm256_maskload_ps((mem), (mask))
60 # define gmx_mm256_maskstore_ps(mem, mask, x) _mm256_maskstore_ps((mem), (mask), (x))
63 /* Normal sum of four xmm registers */
64 #define gmx_mm_sum4_ps(t0, t1, t2, t3) _mm_add_ps(_mm_add_ps(t0, t1), _mm_add_ps(t2, t3))
67 gmx_mm_any_lt(__m128 a, __m128 b)
69 return _mm_movemask_ps(_mm_cmplt_ps(a, b));
72 static gmx_inline __m128
73 gmx_mm_calc_rsq_ps(__m128 dx, __m128 dy, __m128 dz)
75 return _mm_macc_ps(dx, dx, _mm_macc_ps(dy, dy, _mm_mul_ps(dz, dz)));
78 /* Load a single value from 1-4 places, merge into xmm register */
80 static gmx_inline __m128
81 gmx_mm_load_4real_swizzle_ps(const float * gmx_restrict ptrA,
82 const float * gmx_restrict ptrB,
83 const float * gmx_restrict ptrC,
84 const float * gmx_restrict ptrD)
88 t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA), _mm_load_ss(ptrC));
89 t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB), _mm_load_ss(ptrD));
90 return _mm_unpacklo_ps(t1, t2);
94 static gmx_inline void
95 gmx_mm_store_4real_swizzle_ps(float * gmx_restrict ptrA,
96 float * gmx_restrict ptrB,
97 float * gmx_restrict ptrC,
98 float * gmx_restrict ptrD, __m128 xmm1)
102 t2 = _mm_permute_ps(xmm1, _MM_SHUFFLE(1, 1, 1, 1));
103 t3 = _mm_permute_ps(xmm1, _MM_SHUFFLE(2, 2, 2, 2));
104 t4 = _mm_permute_ps(xmm1, _MM_SHUFFLE(3, 3, 3, 3));
105 _mm_store_ss(ptrA, xmm1);
106 _mm_store_ss(ptrB, t2);
107 _mm_store_ss(ptrC, t3);
108 _mm_store_ss(ptrD, t4);
112 static gmx_inline void
113 gmx_mm_increment_4real_swizzle_ps(float * gmx_restrict ptrA,
114 float * gmx_restrict ptrB,
115 float * gmx_restrict ptrC,
116 float * gmx_restrict ptrD, __m128 xmm1)
120 tmp = gmx_mm_load_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD);
121 tmp = _mm_add_ps(tmp, xmm1);
122 gmx_mm_store_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD, tmp);
126 static gmx_inline void
127 gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
128 const float * gmx_restrict p2,
129 const float * gmx_restrict p3,
130 const float * gmx_restrict p4,
131 __m128 * gmx_restrict c6, __m128 * gmx_restrict c12)
133 __m128 t1, t2, t3, t4;
134 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p1);
135 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p2);
136 t3 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p3);
137 t4 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p4);
138 t1 = _mm_unpacklo_ps(t1, t3);
139 t2 = _mm_unpacklo_ps(t2, t4);
140 *c6 = _mm_unpacklo_ps(t1, t2);
141 *c12 = _mm_unpackhi_ps(t1, t2);
147 static gmx_inline void
148 gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
149 const float * gmx_restrict xyz,
150 __m128 * gmx_restrict x1,
151 __m128 * gmx_restrict y1,
152 __m128 * gmx_restrict z1)
154 __m128 t1, t2, t3, t4;
156 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
157 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz);
158 t3 = _mm_load_ss(xyz_shift+2);
159 t4 = _mm_load_ss(xyz+2);
160 t1 = _mm_add_ps(t1, t2);
161 t3 = _mm_add_ss(t3, t4);
163 *x1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
164 *y1 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
165 *z1 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
169 static gmx_inline void
170 gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
171 const float * gmx_restrict xyz,
172 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
173 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
174 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
177 __m128 t1, t2, t3, t4, t5, t6;
179 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
180 tB = _mm_load_ss(xyz_shift+2);
182 t1 = _mm_loadu_ps(xyz);
183 t2 = _mm_loadu_ps(xyz+4);
184 t3 = _mm_load_ss(xyz+8);
186 tA = _mm_movelh_ps(tA, tB);
187 t4 = _mm_permute_ps(tA, _MM_SHUFFLE(0, 2, 1, 0));
188 t5 = _mm_permute_ps(tA, _MM_SHUFFLE(1, 0, 2, 1));
189 t6 = _mm_permute_ps(tA, _MM_SHUFFLE(2, 1, 0, 2));
191 t1 = _mm_add_ps(t1, t4);
192 t2 = _mm_add_ps(t2, t5);
193 t3 = _mm_add_ss(t3, t6);
195 *x1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
196 *y1 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
197 *z1 = _mm_permute_ps(t1, _MM_SHUFFLE(2, 2, 2, 2));
198 *x2 = _mm_permute_ps(t1, _MM_SHUFFLE(3, 3, 3, 3));
199 *y2 = _mm_permute_ps(t2, _MM_SHUFFLE(0, 0, 0, 0));
200 *z2 = _mm_permute_ps(t2, _MM_SHUFFLE(1, 1, 1, 1));
201 *x3 = _mm_permute_ps(t2, _MM_SHUFFLE(2, 2, 2, 2));
202 *y3 = _mm_permute_ps(t2, _MM_SHUFFLE(3, 3, 3, 3));
203 *z3 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
207 static gmx_inline void
208 gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
209 const float * gmx_restrict xyz,
210 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
211 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
212 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
213 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
216 __m128 t1, t2, t3, t4, t5, t6;
218 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
219 tB = _mm_load_ss(xyz_shift+2);
221 t1 = _mm_loadu_ps(xyz);
222 t2 = _mm_loadu_ps(xyz+4);
223 t3 = _mm_loadu_ps(xyz+8);
225 tA = _mm_movelh_ps(tA, tB);
226 t4 = _mm_permute_ps(tA, _MM_SHUFFLE(0, 2, 1, 0));
227 t5 = _mm_permute_ps(tA, _MM_SHUFFLE(1, 0, 2, 1));
228 t6 = _mm_permute_ps(tA, _MM_SHUFFLE(2, 1, 0, 2));
230 t1 = _mm_add_ps(t1, t4);
231 t2 = _mm_add_ps(t2, t5);
232 t3 = _mm_add_ps(t3, t6);
234 *x1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
235 *y1 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
236 *z1 = _mm_permute_ps(t1, _MM_SHUFFLE(2, 2, 2, 2));
237 *x2 = _mm_permute_ps(t1, _MM_SHUFFLE(3, 3, 3, 3));
238 *y2 = _mm_permute_ps(t2, _MM_SHUFFLE(0, 0, 0, 0));
239 *z2 = _mm_permute_ps(t2, _MM_SHUFFLE(1, 1, 1, 1));
240 *x3 = _mm_permute_ps(t2, _MM_SHUFFLE(2, 2, 2, 2));
241 *y3 = _mm_permute_ps(t2, _MM_SHUFFLE(3, 3, 3, 3));
242 *z3 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
243 *x4 = _mm_permute_ps(t3, _MM_SHUFFLE(1, 1, 1, 1));
244 *y4 = _mm_permute_ps(t3, _MM_SHUFFLE(2, 2, 2, 2));
245 *z4 = _mm_permute_ps(t3, _MM_SHUFFLE(3, 3, 3, 3));
249 static gmx_inline void
250 gmx_mm_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
251 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
252 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1)
254 __m128 t1, t2, t3, t4;
255 __m128i mask = _mm_set_epi32(0, -1, -1, -1);
256 t1 = gmx_mm_maskload_ps(ptrA, mask);
257 t2 = gmx_mm_maskload_ps(ptrB, mask);
258 t3 = gmx_mm_maskload_ps(ptrC, mask);
259 t4 = gmx_mm_maskload_ps(ptrD, mask);
260 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
267 static gmx_inline void
268 gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
269 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
270 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
271 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
272 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
274 __m128 t1, t2, t3, t4;
275 t1 = _mm_loadu_ps(ptrA);
276 t2 = _mm_loadu_ps(ptrB);
277 t3 = _mm_loadu_ps(ptrC);
278 t4 = _mm_loadu_ps(ptrD);
279 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
284 t1 = _mm_loadu_ps(ptrA+4);
285 t2 = _mm_loadu_ps(ptrB+4);
286 t3 = _mm_loadu_ps(ptrC+4);
287 t4 = _mm_loadu_ps(ptrD+4);
288 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
293 t1 = _mm_load_ss(ptrA+8);
294 t2 = _mm_load_ss(ptrB+8);
295 t3 = _mm_load_ss(ptrC+8);
296 t4 = _mm_load_ss(ptrD+8);
297 t1 = _mm_unpacklo_ps(t1, t3);
298 t3 = _mm_unpacklo_ps(t2, t4);
299 *z3 = _mm_unpacklo_ps(t1, t3);
303 static gmx_inline void
304 gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
305 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
306 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
307 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
308 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
309 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
311 __m128 t1, t2, t3, t4;
312 t1 = _mm_loadu_ps(ptrA);
313 t2 = _mm_loadu_ps(ptrB);
314 t3 = _mm_loadu_ps(ptrC);
315 t4 = _mm_loadu_ps(ptrD);
316 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
321 t1 = _mm_loadu_ps(ptrA+4);
322 t2 = _mm_loadu_ps(ptrB+4);
323 t3 = _mm_loadu_ps(ptrC+4);
324 t4 = _mm_loadu_ps(ptrD+4);
325 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
330 t1 = _mm_loadu_ps(ptrA+8);
331 t2 = _mm_loadu_ps(ptrB+8);
332 t3 = _mm_loadu_ps(ptrC+8);
333 t4 = _mm_loadu_ps(ptrD+8);
334 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
342 static gmx_inline void
343 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
344 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
345 __m128 x1, __m128 y1, __m128 z1)
347 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
348 t5 = _mm_unpacklo_ps(y1, z1);
349 t6 = _mm_unpackhi_ps(y1, z1);
350 t7 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(1, 0, 0, 0));
351 t8 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(3, 2, 0, 1));
352 t9 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(1, 0, 0, 2));
353 t10 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(3, 2, 0, 3));
354 t1 = _mm_load_ss(ptrA);
355 t1 = _mm_loadh_pi(t1, (__m64 *)(ptrA+1));
356 t1 = _mm_sub_ps(t1, t7);
357 _mm_store_ss(ptrA, t1);
358 _mm_storeh_pi((__m64 *)(ptrA+1), t1);
359 t2 = _mm_load_ss(ptrB);
360 t2 = _mm_loadh_pi(t2, (__m64 *)(ptrB+1));
361 t2 = _mm_sub_ps(t2, t8);
362 _mm_store_ss(ptrB, t2);
363 _mm_storeh_pi((__m64 *)(ptrB+1), t2);
364 t3 = _mm_load_ss(ptrC);
365 t3 = _mm_loadh_pi(t3, (__m64 *)(ptrC+1));
366 t3 = _mm_sub_ps(t3, t9);
367 _mm_store_ss(ptrC, t3);
368 _mm_storeh_pi((__m64 *)(ptrC+1), t3);
369 t4 = _mm_load_ss(ptrD);
370 t4 = _mm_loadh_pi(t4, (__m64 *)(ptrD+1));
371 t4 = _mm_sub_ps(t4, t10);
372 _mm_store_ss(ptrD, t4);
373 _mm_storeh_pi((__m64 *)(ptrD+1), t4);
377 #if defined (_MSC_VER) && defined(_M_IX86)
378 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
379 #define gmx_mm_decrement_3rvec_4ptr_swizzle_ps(ptrA, ptrB, ptrC, ptrD, \
380 _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3) \
382 __m128 _t1, _t2, _t3, _t4, _t5, _t6, _t7, _t8, _t9, _t10; \
383 __m128 _t11, _t12, _t13, _t14, _t15, _t16, _t17, _t18, _t19; \
384 __m128 _t20, _t21, _t22, _t23, _t24, _t25; \
385 _t13 = _mm_unpackhi_ps(_x1, _y1); \
386 _x1 = _mm_unpacklo_ps(_x1, _y1); \
387 _t14 = _mm_unpackhi_ps(_z1, _x2); \
388 _z1 = _mm_unpacklo_ps(_z1, _x2); \
389 _t15 = _mm_unpackhi_ps(_y2, _z2); \
390 _y2 = _mm_unpacklo_ps(_y2, _z2); \
391 _t16 = _mm_unpackhi_ps(_x3, _y3); \
392 _x3 = _mm_unpacklo_ps(_x3, _y3); \
393 _t17 = _mm_permute_ps(_z3, _MM_SHUFFLE(0, 0, 0, 1)); \
394 _t18 = _mm_movehl_ps(_z3, _z3); \
395 _t19 = _mm_permute_ps(_t18, _MM_SHUFFLE(0, 0, 0, 1)); \
396 _t20 = _mm_movelh_ps(_x1, _z1); \
397 _t21 = _mm_movehl_ps(_z1, _x1); \
398 _t22 = _mm_movelh_ps(_t13, _t14); \
399 _t14 = _mm_movehl_ps(_t14, _t13); \
400 _t23 = _mm_movelh_ps(_y2, _x3); \
401 _t24 = _mm_movehl_ps(_x3, _y2); \
402 _t25 = _mm_movelh_ps(_t15, _t16); \
403 _t16 = _mm_movehl_ps(_t16, _t15); \
404 _t1 = _mm_loadu_ps(ptrA); \
405 _t2 = _mm_loadu_ps(ptrA+4); \
406 _t3 = _mm_load_ss(ptrA+8); \
407 _t1 = _mm_sub_ps(_t1, _t20); \
408 _t2 = _mm_sub_ps(_t2, _t23); \
409 _t3 = _mm_sub_ss(_t3, _z3); \
410 _mm_storeu_ps(ptrA, _t1); \
411 _mm_storeu_ps(ptrA+4, _t2); \
412 _mm_store_ss(ptrA+8, _t3); \
413 _t4 = _mm_loadu_ps(ptrB); \
414 _t5 = _mm_loadu_ps(ptrB+4); \
415 _t6 = _mm_load_ss(ptrB+8); \
416 _t4 = _mm_sub_ps(_t4, _t21); \
417 _t5 = _mm_sub_ps(_t5, _t24); \
418 _t6 = _mm_sub_ss(_t6, _t17); \
419 _mm_storeu_ps(ptrB, _t4); \
420 _mm_storeu_ps(ptrB+4, _t5); \
421 _mm_store_ss(ptrB+8, _t6); \
422 _t7 = _mm_loadu_ps(ptrC); \
423 _t8 = _mm_loadu_ps(ptrC+4); \
424 _t9 = _mm_load_ss(ptrC+8); \
425 _t7 = _mm_sub_ps(_t7, _t22); \
426 _t8 = _mm_sub_ps(_t8, _t25); \
427 _t9 = _mm_sub_ss(_t9, _t18); \
428 _mm_storeu_ps(ptrC, _t7); \
429 _mm_storeu_ps(ptrC+4, _t8); \
430 _mm_store_ss(ptrC+8, _t9); \
431 _t10 = _mm_loadu_ps(ptrD); \
432 _t11 = _mm_loadu_ps(ptrD+4); \
433 _t12 = _mm_load_ss(ptrD+8); \
434 _t10 = _mm_sub_ps(_t10, _t14); \
435 _t11 = _mm_sub_ps(_t11, _t16); \
436 _t12 = _mm_sub_ss(_t12, _t19); \
437 _mm_storeu_ps(ptrD, _t10); \
438 _mm_storeu_ps(ptrD+4, _t11); \
439 _mm_store_ss(ptrD+8, _t12); \
442 /* Real function for sane compilers */
443 static gmx_inline void
444 gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
445 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
446 __m128 x1, __m128 y1, __m128 z1,
447 __m128 x2, __m128 y2, __m128 z2,
448 __m128 x3, __m128 y3, __m128 z3)
450 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
451 __m128 t11, t12, t13, t14, t15, t16, t17, t18, t19;
452 __m128 t20, t21, t22, t23, t24, t25;
453 t13 = _mm_unpackhi_ps(x1, y1);
454 x1 = _mm_unpacklo_ps(x1, y1);
455 t14 = _mm_unpackhi_ps(z1, x2);
456 z1 = _mm_unpacklo_ps(z1, x2);
457 t15 = _mm_unpackhi_ps(y2, z2);
458 y2 = _mm_unpacklo_ps(y2, z2);
459 t16 = _mm_unpackhi_ps(x3, y3);
460 x3 = _mm_unpacklo_ps(x3, y3);
461 t17 = _mm_permute_ps(z3, _MM_SHUFFLE(0, 0, 0, 1));
462 t18 = _mm_movehl_ps(z3, z3);
463 t19 = _mm_permute_ps(t18, _MM_SHUFFLE(0, 0, 0, 1));
464 t20 = _mm_movelh_ps(x1, z1);
465 t21 = _mm_movehl_ps(z1, x1);
466 t22 = _mm_movelh_ps(t13, t14);
467 t14 = _mm_movehl_ps(t14, t13);
468 t23 = _mm_movelh_ps(y2, x3);
469 t24 = _mm_movehl_ps(x3, y2);
470 t25 = _mm_movelh_ps(t15, t16);
471 t16 = _mm_movehl_ps(t16, t15);
472 t1 = _mm_loadu_ps(ptrA);
473 t2 = _mm_loadu_ps(ptrA+4);
474 t3 = _mm_load_ss(ptrA+8);
475 t1 = _mm_sub_ps(t1, t20);
476 t2 = _mm_sub_ps(t2, t23);
477 t3 = _mm_sub_ss(t3, z3);
478 _mm_storeu_ps(ptrA, t1);
479 _mm_storeu_ps(ptrA+4, t2);
480 _mm_store_ss(ptrA+8, t3);
481 t4 = _mm_loadu_ps(ptrB);
482 t5 = _mm_loadu_ps(ptrB+4);
483 t6 = _mm_load_ss(ptrB+8);
484 t4 = _mm_sub_ps(t4, t21);
485 t5 = _mm_sub_ps(t5, t24);
486 t6 = _mm_sub_ss(t6, t17);
487 _mm_storeu_ps(ptrB, t4);
488 _mm_storeu_ps(ptrB+4, t5);
489 _mm_store_ss(ptrB+8, t6);
490 t7 = _mm_loadu_ps(ptrC);
491 t8 = _mm_loadu_ps(ptrC+4);
492 t9 = _mm_load_ss(ptrC+8);
493 t7 = _mm_sub_ps(t7, t22);
494 t8 = _mm_sub_ps(t8, t25);
495 t9 = _mm_sub_ss(t9, t18);
496 _mm_storeu_ps(ptrC, t7);
497 _mm_storeu_ps(ptrC+4, t8);
498 _mm_store_ss(ptrC+8, t9);
499 t10 = _mm_loadu_ps(ptrD);
500 t11 = _mm_loadu_ps(ptrD+4);
501 t12 = _mm_load_ss(ptrD+8);
502 t10 = _mm_sub_ps(t10, t14);
503 t11 = _mm_sub_ps(t11, t16);
504 t12 = _mm_sub_ss(t12, t19);
505 _mm_storeu_ps(ptrD, t10);
506 _mm_storeu_ps(ptrD+4, t11);
507 _mm_store_ss(ptrD+8, t12);
511 #if defined (_MSC_VER) && defined(_M_IX86)
512 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
513 #define gmx_mm_decrement_4rvec_4ptr_swizzle_ps(ptrA, ptrB, ptrC, ptrD, \
514 _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3, _x4, _y4, _z4) \
516 __m128 _t1, _t2, _t3, _t4, _t5, _t6, _t7, _t8, _t9, _t10, _t11; \
517 __m128 _t12, _t13, _t14, _t15, _t16, _t17, _t18, _t19, _t20, _t21, _t22; \
519 _t13 = _mm_unpackhi_ps(_x1, _y1); \
520 _x1 = _mm_unpacklo_ps(_x1, _y1); \
521 _t14 = _mm_unpackhi_ps(_z1, _x2); \
522 _z1 = _mm_unpacklo_ps(_z1, _x2); \
523 _t15 = _mm_unpackhi_ps(_y2, _z2); \
524 _y2 = _mm_unpacklo_ps(_y2, _z2); \
525 _t16 = _mm_unpackhi_ps(_x3, _y3); \
526 _x3 = _mm_unpacklo_ps(_x3, _y3); \
527 _t17 = _mm_unpackhi_ps(_z3, _x4); \
528 _z3 = _mm_unpacklo_ps(_z3, _x4); \
529 _t18 = _mm_unpackhi_ps(_y4, _z4); \
530 _y4 = _mm_unpacklo_ps(_y4, _z4); \
531 _t19 = _mm_movelh_ps(_x1, _z1); \
532 _z1 = _mm_movehl_ps(_z1, _x1); \
533 _t20 = _mm_movelh_ps(_t13, _t14); \
534 _t14 = _mm_movehl_ps(_t14, _t13); \
535 _t21 = _mm_movelh_ps(_y2, _x3); \
536 _x3 = _mm_movehl_ps(_x3, _y2); \
537 _t22 = _mm_movelh_ps(_t15, _t16); \
538 _t16 = _mm_movehl_ps(_t16, _t15); \
539 _t23 = _mm_movelh_ps(_z3, _y4); \
540 _y4 = _mm_movehl_ps(_y4, _z3); \
541 _t24 = _mm_movelh_ps(_t17, _t18); \
542 _t18 = _mm_movehl_ps(_t18, _t17); \
543 _t1 = _mm_loadu_ps(ptrA); \
544 _t2 = _mm_loadu_ps(ptrA+4); \
545 _t3 = _mm_loadu_ps(ptrA+8); \
546 _t1 = _mm_sub_ps(_t1, _t19); \
547 _t2 = _mm_sub_ps(_t2, _t21); \
548 _t3 = _mm_sub_ps(_t3, _t23); \
549 _mm_storeu_ps(ptrA, _t1); \
550 _mm_storeu_ps(ptrA+4, _t2); \
551 _mm_storeu_ps(ptrA+8, _t3); \
552 _t4 = _mm_loadu_ps(ptrB); \
553 _t5 = _mm_loadu_ps(ptrB+4); \
554 _t6 = _mm_loadu_ps(ptrB+8); \
555 _t4 = _mm_sub_ps(_t4, _z1); \
556 _t5 = _mm_sub_ps(_t5, _x3); \
557 _t6 = _mm_sub_ps(_t6, _y4); \
558 _mm_storeu_ps(ptrB, _t4); \
559 _mm_storeu_ps(ptrB+4, _t5); \
560 _mm_storeu_ps(ptrB+8, _t6); \
561 _t7 = _mm_loadu_ps(ptrC); \
562 _t8 = _mm_loadu_ps(ptrC+4); \
563 _t9 = _mm_loadu_ps(ptrC+8); \
564 _t7 = _mm_sub_ps(_t7, _t20); \
565 _t8 = _mm_sub_ps(_t8, _t22); \
566 _t9 = _mm_sub_ps(_t9, _t24); \
567 _mm_storeu_ps(ptrC, _t7); \
568 _mm_storeu_ps(ptrC+4, _t8); \
569 _mm_storeu_ps(ptrC+8, _t9); \
570 _t10 = _mm_loadu_ps(ptrD); \
571 _t11 = _mm_loadu_ps(ptrD+4); \
572 _t12 = _mm_loadu_ps(ptrD+8); \
573 _t10 = _mm_sub_ps(_t10, _t14); \
574 _t11 = _mm_sub_ps(_t11, _t16); \
575 _t12 = _mm_sub_ps(_t12, _t18); \
576 _mm_storeu_ps(ptrD, _t10); \
577 _mm_storeu_ps(ptrD+4, _t11); \
578 _mm_storeu_ps(ptrD+8, _t12); \
581 /* Real function for sane compilers */
582 static gmx_inline void
583 gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
584 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
585 __m128 x1, __m128 y1, __m128 z1,
586 __m128 x2, __m128 y2, __m128 z2,
587 __m128 x3, __m128 y3, __m128 z3,
588 __m128 x4, __m128 y4, __m128 z4)
590 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11;
591 __m128 t12, t13, t14, t15, t16, t17, t18, t19, t20, t21, t22;
593 t13 = _mm_unpackhi_ps(x1, y1);
594 x1 = _mm_unpacklo_ps(x1, y1);
595 t14 = _mm_unpackhi_ps(z1, x2);
596 z1 = _mm_unpacklo_ps(z1, x2);
597 t15 = _mm_unpackhi_ps(y2, z2);
598 y2 = _mm_unpacklo_ps(y2, z2);
599 t16 = _mm_unpackhi_ps(x3, y3);
600 x3 = _mm_unpacklo_ps(x3, y3);
601 t17 = _mm_unpackhi_ps(z3, x4);
602 z3 = _mm_unpacklo_ps(z3, x4);
603 t18 = _mm_unpackhi_ps(y4, z4);
604 y4 = _mm_unpacklo_ps(y4, z4);
605 t19 = _mm_movelh_ps(x1, z1);
606 z1 = _mm_movehl_ps(z1, x1);
607 t20 = _mm_movelh_ps(t13, t14);
608 t14 = _mm_movehl_ps(t14, t13);
609 t21 = _mm_movelh_ps(y2, x3);
610 x3 = _mm_movehl_ps(x3, y2);
611 t22 = _mm_movelh_ps(t15, t16);
612 t16 = _mm_movehl_ps(t16, t15);
613 t23 = _mm_movelh_ps(z3, y4);
614 y4 = _mm_movehl_ps(y4, z3);
615 t24 = _mm_movelh_ps(t17, t18);
616 t18 = _mm_movehl_ps(t18, t17);
617 t1 = _mm_loadu_ps(ptrA);
618 t2 = _mm_loadu_ps(ptrA+4);
619 t3 = _mm_loadu_ps(ptrA+8);
620 t1 = _mm_sub_ps(t1, t19);
621 t2 = _mm_sub_ps(t2, t21);
622 t3 = _mm_sub_ps(t3, t23);
623 _mm_storeu_ps(ptrA, t1);
624 _mm_storeu_ps(ptrA+4, t2);
625 _mm_storeu_ps(ptrA+8, t3);
626 t4 = _mm_loadu_ps(ptrB);
627 t5 = _mm_loadu_ps(ptrB+4);
628 t6 = _mm_loadu_ps(ptrB+8);
629 t4 = _mm_sub_ps(t4, z1);
630 t5 = _mm_sub_ps(t5, x3);
631 t6 = _mm_sub_ps(t6, y4);
632 _mm_storeu_ps(ptrB, t4);
633 _mm_storeu_ps(ptrB+4, t5);
634 _mm_storeu_ps(ptrB+8, t6);
635 t7 = _mm_loadu_ps(ptrC);
636 t8 = _mm_loadu_ps(ptrC+4);
637 t9 = _mm_loadu_ps(ptrC+8);
638 t7 = _mm_sub_ps(t7, t20);
639 t8 = _mm_sub_ps(t8, t22);
640 t9 = _mm_sub_ps(t9, t24);
641 _mm_storeu_ps(ptrC, t7);
642 _mm_storeu_ps(ptrC+4, t8);
643 _mm_storeu_ps(ptrC+8, t9);
644 t10 = _mm_loadu_ps(ptrD);
645 t11 = _mm_loadu_ps(ptrD+4);
646 t12 = _mm_loadu_ps(ptrD+8);
647 t10 = _mm_sub_ps(t10, t14);
648 t11 = _mm_sub_ps(t11, t16);
649 t12 = _mm_sub_ps(t12, t18);
650 _mm_storeu_ps(ptrD, t10);
651 _mm_storeu_ps(ptrD+4, t11);
652 _mm_storeu_ps(ptrD+8, t12);
656 static gmx_inline void
657 gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
658 float * gmx_restrict fptr,
659 float * gmx_restrict fshiftptr)
663 fix1 = _mm_hadd_ps(fix1, fix1);
664 fiy1 = _mm_hadd_ps(fiy1, fiz1);
666 fix1 = _mm_hadd_ps(fix1, fiy1); /* fiz1 fiy1 fix1 fix1 */
668 t2 = _mm_load_ss(fptr);
669 t2 = _mm_loadh_pi(t2, (__m64 *)(fptr+1));
670 t3 = _mm_load_ss(fshiftptr);
671 t3 = _mm_loadh_pi(t3, (__m64 *)(fshiftptr+1));
673 t2 = _mm_add_ps(t2, fix1);
674 t3 = _mm_add_ps(t3, fix1);
676 _mm_store_ss(fptr, t2);
677 _mm_storeh_pi((__m64 *)(fptr+1), t2);
678 _mm_store_ss(fshiftptr, t3);
679 _mm_storeh_pi((__m64 *)(fshiftptr+1), t3);
682 #if defined (_MSC_VER) && defined(_M_IX86)
683 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
684 #define gmx_mm_update_iforce_3atom_swizzle_ps(fix1, fiy1, fiz1, fix2, fiy2, fiz2, fix3, fiy3, fiz3, \
687 __m128 _t1, _t2, _t3, _t4; \
689 fix1 = _mm_hadd_ps(fix1, fiy1); \
690 fiz1 = _mm_hadd_ps(fiz1, fix2); \
691 fiy2 = _mm_hadd_ps(fiy2, fiz2); \
692 fix3 = _mm_hadd_ps(fix3, fiy3); \
693 fiz3 = _mm_hadd_ps(fiz3, fiz3); \
694 fix1 = _mm_hadd_ps(fix1, fiz1); \
695 fiy2 = _mm_hadd_ps(fiy2, fix3); \
696 fiz3 = _mm_hadd_ps(fiz3, fiz3); \
697 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) )); \
698 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4))); \
699 _mm_store_ss (fptr+8, _mm_add_ss(fiz3, _mm_load_ss(fptr+8) )); \
700 _t4 = _mm_load_ss(fshiftptr+2); \
701 _t4 = _mm_loadh_pi(_t4, (__m64 *)(fshiftptr)); \
702 _t1 = _mm_shuffle_ps(fiz3, fix1, _MM_SHUFFLE(1, 0, 0, 0)); \
703 _t2 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(3, 2, 2, 2)); \
704 _t3 = _mm_shuffle_ps(fiy2, fix1, _MM_SHUFFLE(3, 3, 0, 1)); \
705 _t3 = _mm_permute_ps(_t3, _MM_SHUFFLE(1, 2, 0, 0)); \
706 _t1 = _mm_add_ps(_t1, _t2); \
707 _t3 = _mm_add_ps(_t3, _t4); \
708 _t1 = _mm_add_ps(_t1, _t3); \
709 _mm_store_ss(fshiftptr+2, _t1); \
710 _mm_storeh_pi((__m64 *)(fshiftptr), _t1); \
713 /* Real function for sane compilers */
714 static gmx_inline void
715 gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
716 __m128 fix2, __m128 fiy2, __m128 fiz2,
717 __m128 fix3, __m128 fiy3, __m128 fiz3,
718 float * gmx_restrict fptr,
719 float * gmx_restrict fshiftptr)
721 __m128 t1, t2, t3, t4;
723 fix1 = _mm_hadd_ps(fix1, fiy1);
724 fiz1 = _mm_hadd_ps(fiz1, fix2);
725 fiy2 = _mm_hadd_ps(fiy2, fiz2);
726 fix3 = _mm_hadd_ps(fix3, fiy3);
727 fiz3 = _mm_hadd_ps(fiz3, fiz3);
729 fix1 = _mm_hadd_ps(fix1, fiz1); /* fix2 fiz1 fiy1 fix1 */
730 fiy2 = _mm_hadd_ps(fiy2, fix3); /* fiy3 fix3 fiz2 fiy2 */
731 fiz3 = _mm_hadd_ps(fiz3, fiz3); /* - - - fiz3 */
733 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
734 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
735 _mm_store_ss (fptr+8, _mm_add_ss(fiz3, _mm_load_ss(fptr+8) ));
737 t4 = _mm_load_ss(fshiftptr+2);
738 t4 = _mm_loadh_pi(t4, (__m64 *)(fshiftptr));
740 t1 = _mm_shuffle_ps(fiz3, fix1, _MM_SHUFFLE(1, 0, 0, 0)); /* fiy1 fix1 - fiz3 */
741 t2 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(3, 2, 2, 2)); /* fiy3 fix3 - fiz1 */
742 t3 = _mm_shuffle_ps(fiy2, fix1, _MM_SHUFFLE(3, 3, 0, 1)); /* fix2 fix2 fiy2 fiz2 */
743 t3 = _mm_permute_ps(t3, _MM_SHUFFLE(1, 2, 0, 0)); /* fiy2 fix2 - fiz2 */
745 t1 = _mm_add_ps(t1, t2);
746 t3 = _mm_add_ps(t3, t4);
747 t1 = _mm_add_ps(t1, t3); /* y x - z */
749 _mm_store_ss(fshiftptr+2, t1);
750 _mm_storeh_pi((__m64 *)(fshiftptr), t1);
754 #if defined (_MSC_VER) && defined(_M_IX86)
755 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
756 #define gmx_mm_update_iforce_4atom_swizzle_ps(fix1, fiy1, fiz1, fix2, fiy2, fiz2, fix3, fiy3, fiz3, fix4, fiy4, fiz4, \
759 __m128 _t1, _t2, _t3, _t4, _t5; \
761 fix1 = _mm_hadd_ps(fix1, fiy1); \
762 fiz1 = _mm_hadd_ps(fiz1, fix2); \
763 fiy2 = _mm_hadd_ps(fiy2, fiz2); \
764 fix3 = _mm_hadd_ps(fix3, fiy3); \
765 fiz3 = _mm_hadd_ps(fiz3, fix4); \
766 fiy4 = _mm_hadd_ps(fiy4, fiz4); \
767 fix1 = _mm_hadd_ps(fix1, fiz1); \
768 fiy2 = _mm_hadd_ps(fiy2, fix3); \
769 fiz3 = _mm_hadd_ps(fiz3, fiy4); \
770 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) )); \
771 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4))); \
772 _mm_storeu_ps(fptr+8, _mm_add_ps(fiz3, _mm_loadu_ps(fptr+8))); \
773 _t5 = _mm_load_ss(fshiftptr+2); \
774 _t5 = _mm_loadh_pi(_t5, (__m64 *)(fshiftptr)); \
775 _t1 = _mm_permute_ps(fix1, _MM_SHUFFLE(1, 0, 2, 2)); \
776 _t2 = _mm_permute_ps(fiy2, _MM_SHUFFLE(3, 2, 1, 1)); \
777 _t3 = _mm_permute_ps(fiz3, _MM_SHUFFLE(2, 1, 0, 0)); \
778 _t4 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(0, 0, 3, 3)); \
779 _t4 = _mm_shuffle_ps(fiz3, _t4, _MM_SHUFFLE(2, 0, 3, 3)); \
780 _t1 = _mm_add_ps(_t1, _t2); \
781 _t3 = _mm_add_ps(_t3, _t4); \
782 _t1 = _mm_add_ps(_t1, _t3); \
783 _t5 = _mm_add_ps(_t5, _t1); \
784 _mm_store_ss(fshiftptr+2, _t5); \
785 _mm_storeh_pi((__m64 *)(fshiftptr), _t5); \
788 /* Real function for sane compilers */
789 static gmx_inline void
790 gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
791 __m128 fix2, __m128 fiy2, __m128 fiz2,
792 __m128 fix3, __m128 fiy3, __m128 fiz3,
793 __m128 fix4, __m128 fiy4, __m128 fiz4,
794 float * gmx_restrict fptr,
795 float * gmx_restrict fshiftptr)
797 __m128 t1, t2, t3, t4, t5;
799 fix1 = _mm_hadd_ps(fix1, fiy1);
800 fiz1 = _mm_hadd_ps(fiz1, fix2);
801 fiy2 = _mm_hadd_ps(fiy2, fiz2);
802 fix3 = _mm_hadd_ps(fix3, fiy3);
803 fiz3 = _mm_hadd_ps(fiz3, fix4);
804 fiy4 = _mm_hadd_ps(fiy4, fiz4);
806 fix1 = _mm_hadd_ps(fix1, fiz1); /* fix2 fiz1 fiy1 fix1 */
807 fiy2 = _mm_hadd_ps(fiy2, fix3); /* fiy3 fix3 fiz2 fiy2 */
808 fiz3 = _mm_hadd_ps(fiz3, fiy4); /* fiz4 fiy4 fix4 fiz3 */
810 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
811 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
812 _mm_storeu_ps(fptr+8, _mm_add_ps(fiz3, _mm_loadu_ps(fptr+8)));
814 t5 = _mm_load_ss(fshiftptr+2);
815 t5 = _mm_loadh_pi(t5, (__m64 *)(fshiftptr));
817 t1 = _mm_permute_ps(fix1, _MM_SHUFFLE(1, 0, 2, 2));
818 t2 = _mm_permute_ps(fiy2, _MM_SHUFFLE(3, 2, 1, 1));
819 t3 = _mm_permute_ps(fiz3, _MM_SHUFFLE(2, 1, 0, 0));
820 t4 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(0, 0, 3, 3));
821 t4 = _mm_shuffle_ps(fiz3, t4, _MM_SHUFFLE(2, 0, 3, 3));
823 t1 = _mm_add_ps(t1, t2);
824 t3 = _mm_add_ps(t3, t4);
825 t1 = _mm_add_ps(t1, t3);
826 t5 = _mm_add_ps(t5, t1);
828 _mm_store_ss(fshiftptr+2, t5);
829 _mm_storeh_pi((__m64 *)(fshiftptr), t5);
834 static gmx_inline void
835 gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
837 pot1 = _mm_hadd_ps(pot1, pot1);
838 pot1 = _mm_hadd_ps(pot1, pot1);
839 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
842 static gmx_inline void
843 gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
844 __m128 pot2, float * gmx_restrict ptrB)
846 pot1 = _mm_hadd_ps(pot1, pot2);
847 pot1 = _mm_hadd_ps(pot1, pot1);
848 pot2 = _mm_permute_ps(pot1, _MM_SHUFFLE(0, 0, 0, 1));
849 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
850 _mm_store_ss(ptrB, _mm_add_ss(pot2, _mm_load_ss(ptrB)));
854 #endif /* _kernelutil_x86_avx_128_fma_single_h_ */