2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _kernelutil_x86_avx_128_fma_single_h_
36 #define _kernelutil_x86_avx_128_fma_single_h_
42 #include <immintrin.h>
46 # include <x86intrin.h>
49 #define gmx_mm_castsi128_ps _mm_castsi128_ps
50 #define gmx_mm_extract_epi32 _mm_extract_epi32
52 /* Work around gcc bug with wrong type for mask formal parameter to maskload/maskstore */
53 #ifdef GMX_SIMD_X86_AVX_GCC_MASKLOAD_BUG
54 # define gmx_mm_maskload_ps(mem, mask) _mm_maskload_ps((mem), _mm_castsi128_ps(mask))
55 # define gmx_mm_maskstore_ps(mem, mask, x) _mm_maskstore_ps((mem), _mm_castsi128_ps(mask), (x))
56 # define gmx_mm256_maskload_ps(mem, mask) _mm256_maskload_ps((mem), _mm256_castsi256_ps(mask))
57 # define gmx_mm256_maskstore_ps(mem, mask, x) _mm256_maskstore_ps((mem), _mm256_castsi256_ps(mask), (x))
59 # define gmx_mm_maskload_ps(mem, mask) _mm_maskload_ps((mem), (mask))
60 # define gmx_mm_maskstore_ps(mem, mask, x) _mm_maskstore_ps((mem), (mask), (x))
61 # define gmx_mm256_maskload_ps(mem, mask) _mm256_maskload_ps((mem), (mask))
62 # define gmx_mm256_maskstore_ps(mem, mask, x) _mm256_maskstore_ps((mem), (mask), (x))
65 /* Normal sum of four xmm registers */
66 #define gmx_mm_sum4_ps(t0, t1, t2, t3) _mm_add_ps(_mm_add_ps(t0, t1), _mm_add_ps(t2, t3))
68 static gmx_inline int gmx_simdcall
69 gmx_mm_any_lt(__m128 a, __m128 b)
71 return _mm_movemask_ps(_mm_cmplt_ps(a, b));
74 static gmx_inline __m128 gmx_simdcall
75 gmx_mm_calc_rsq_ps(__m128 dx, __m128 dy, __m128 dz)
77 return _mm_macc_ps(dx, dx, _mm_macc_ps(dy, dy, _mm_mul_ps(dz, dz)));
80 /* Load a single value from 1-4 places, merge into xmm register */
82 static gmx_inline __m128 gmx_simdcall
83 gmx_mm_load_4real_swizzle_ps(const float * gmx_restrict ptrA,
84 const float * gmx_restrict ptrB,
85 const float * gmx_restrict ptrC,
86 const float * gmx_restrict ptrD)
90 t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA), _mm_load_ss(ptrC));
91 t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB), _mm_load_ss(ptrD));
92 return _mm_unpacklo_ps(t1, t2);
96 static gmx_inline void gmx_simdcall
97 gmx_mm_store_4real_swizzle_ps(float * gmx_restrict ptrA,
98 float * gmx_restrict ptrB,
99 float * gmx_restrict ptrC,
100 float * gmx_restrict ptrD, __m128 xmm1)
104 t2 = _mm_permute_ps(xmm1, _MM_SHUFFLE(1, 1, 1, 1));
105 t3 = _mm_permute_ps(xmm1, _MM_SHUFFLE(2, 2, 2, 2));
106 t4 = _mm_permute_ps(xmm1, _MM_SHUFFLE(3, 3, 3, 3));
107 _mm_store_ss(ptrA, xmm1);
108 _mm_store_ss(ptrB, t2);
109 _mm_store_ss(ptrC, t3);
110 _mm_store_ss(ptrD, t4);
114 static gmx_inline void gmx_simdcall
115 gmx_mm_increment_4real_swizzle_ps(float * gmx_restrict ptrA,
116 float * gmx_restrict ptrB,
117 float * gmx_restrict ptrC,
118 float * gmx_restrict ptrD, __m128 xmm1)
122 tmp = gmx_mm_load_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD);
123 tmp = _mm_add_ps(tmp, xmm1);
124 gmx_mm_store_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD, tmp);
128 static gmx_inline void gmx_simdcall
129 gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
130 const float * gmx_restrict p2,
131 const float * gmx_restrict p3,
132 const float * gmx_restrict p4,
133 __m128 * gmx_restrict c6, __m128 * gmx_restrict c12)
135 __m128 t1, t2, t3, t4;
136 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p1);
137 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p2);
138 t3 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p3);
139 t4 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p4);
140 t1 = _mm_unpacklo_ps(t1, t3);
141 t2 = _mm_unpacklo_ps(t2, t4);
142 *c6 = _mm_unpacklo_ps(t1, t2);
143 *c12 = _mm_unpackhi_ps(t1, t2);
149 static gmx_inline void gmx_simdcall
150 gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
151 const float * gmx_restrict xyz,
152 __m128 * gmx_restrict x1,
153 __m128 * gmx_restrict y1,
154 __m128 * gmx_restrict z1)
156 __m128 t1, t2, t3, t4;
158 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
159 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz);
160 t3 = _mm_load_ss(xyz_shift+2);
161 t4 = _mm_load_ss(xyz+2);
162 t1 = _mm_add_ps(t1, t2);
163 t3 = _mm_add_ss(t3, t4);
165 *x1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
166 *y1 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
167 *z1 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
171 static gmx_inline void gmx_simdcall
172 gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
173 const float * gmx_restrict xyz,
174 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
175 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
176 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
179 __m128 t1, t2, t3, t4, t5, t6;
181 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
182 tB = _mm_load_ss(xyz_shift+2);
184 t1 = _mm_loadu_ps(xyz);
185 t2 = _mm_loadu_ps(xyz+4);
186 t3 = _mm_load_ss(xyz+8);
188 tA = _mm_movelh_ps(tA, tB);
189 t4 = _mm_permute_ps(tA, _MM_SHUFFLE(0, 2, 1, 0));
190 t5 = _mm_permute_ps(tA, _MM_SHUFFLE(1, 0, 2, 1));
191 t6 = _mm_permute_ps(tA, _MM_SHUFFLE(2, 1, 0, 2));
193 t1 = _mm_add_ps(t1, t4);
194 t2 = _mm_add_ps(t2, t5);
195 t3 = _mm_add_ss(t3, t6);
197 *x1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
198 *y1 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
199 *z1 = _mm_permute_ps(t1, _MM_SHUFFLE(2, 2, 2, 2));
200 *x2 = _mm_permute_ps(t1, _MM_SHUFFLE(3, 3, 3, 3));
201 *y2 = _mm_permute_ps(t2, _MM_SHUFFLE(0, 0, 0, 0));
202 *z2 = _mm_permute_ps(t2, _MM_SHUFFLE(1, 1, 1, 1));
203 *x3 = _mm_permute_ps(t2, _MM_SHUFFLE(2, 2, 2, 2));
204 *y3 = _mm_permute_ps(t2, _MM_SHUFFLE(3, 3, 3, 3));
205 *z3 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
209 static gmx_inline void gmx_simdcall
210 gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
211 const float * gmx_restrict xyz,
212 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
213 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
214 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
215 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
218 __m128 t1, t2, t3, t4, t5, t6;
220 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
221 tB = _mm_load_ss(xyz_shift+2);
223 t1 = _mm_loadu_ps(xyz);
224 t2 = _mm_loadu_ps(xyz+4);
225 t3 = _mm_loadu_ps(xyz+8);
227 tA = _mm_movelh_ps(tA, tB);
228 t4 = _mm_permute_ps(tA, _MM_SHUFFLE(0, 2, 1, 0));
229 t5 = _mm_permute_ps(tA, _MM_SHUFFLE(1, 0, 2, 1));
230 t6 = _mm_permute_ps(tA, _MM_SHUFFLE(2, 1, 0, 2));
232 t1 = _mm_add_ps(t1, t4);
233 t2 = _mm_add_ps(t2, t5);
234 t3 = _mm_add_ps(t3, t6);
236 *x1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
237 *y1 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
238 *z1 = _mm_permute_ps(t1, _MM_SHUFFLE(2, 2, 2, 2));
239 *x2 = _mm_permute_ps(t1, _MM_SHUFFLE(3, 3, 3, 3));
240 *y2 = _mm_permute_ps(t2, _MM_SHUFFLE(0, 0, 0, 0));
241 *z2 = _mm_permute_ps(t2, _MM_SHUFFLE(1, 1, 1, 1));
242 *x3 = _mm_permute_ps(t2, _MM_SHUFFLE(2, 2, 2, 2));
243 *y3 = _mm_permute_ps(t2, _MM_SHUFFLE(3, 3, 3, 3));
244 *z3 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
245 *x4 = _mm_permute_ps(t3, _MM_SHUFFLE(1, 1, 1, 1));
246 *y4 = _mm_permute_ps(t3, _MM_SHUFFLE(2, 2, 2, 2));
247 *z4 = _mm_permute_ps(t3, _MM_SHUFFLE(3, 3, 3, 3));
251 static gmx_inline void gmx_simdcall
252 gmx_mm_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
253 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
254 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1)
256 __m128 t1, t2, t3, t4;
257 __m128i mask = _mm_set_epi32(0, -1, -1, -1);
258 t1 = gmx_mm_maskload_ps(ptrA, mask);
259 t2 = gmx_mm_maskload_ps(ptrB, mask);
260 t3 = gmx_mm_maskload_ps(ptrC, mask);
261 t4 = gmx_mm_maskload_ps(ptrD, mask);
262 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
269 static gmx_inline void gmx_simdcall
270 gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
271 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
272 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
273 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
274 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
276 __m128 t1, t2, t3, t4;
277 t1 = _mm_loadu_ps(ptrA);
278 t2 = _mm_loadu_ps(ptrB);
279 t3 = _mm_loadu_ps(ptrC);
280 t4 = _mm_loadu_ps(ptrD);
281 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
286 t1 = _mm_loadu_ps(ptrA+4);
287 t2 = _mm_loadu_ps(ptrB+4);
288 t3 = _mm_loadu_ps(ptrC+4);
289 t4 = _mm_loadu_ps(ptrD+4);
290 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
295 t1 = _mm_load_ss(ptrA+8);
296 t2 = _mm_load_ss(ptrB+8);
297 t3 = _mm_load_ss(ptrC+8);
298 t4 = _mm_load_ss(ptrD+8);
299 t1 = _mm_unpacklo_ps(t1, t3);
300 t3 = _mm_unpacklo_ps(t2, t4);
301 *z3 = _mm_unpacklo_ps(t1, t3);
305 static gmx_inline void gmx_simdcall
306 gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
307 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
308 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
309 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
310 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
311 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
313 __m128 t1, t2, t3, t4;
314 t1 = _mm_loadu_ps(ptrA);
315 t2 = _mm_loadu_ps(ptrB);
316 t3 = _mm_loadu_ps(ptrC);
317 t4 = _mm_loadu_ps(ptrD);
318 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
323 t1 = _mm_loadu_ps(ptrA+4);
324 t2 = _mm_loadu_ps(ptrB+4);
325 t3 = _mm_loadu_ps(ptrC+4);
326 t4 = _mm_loadu_ps(ptrD+4);
327 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
332 t1 = _mm_loadu_ps(ptrA+8);
333 t2 = _mm_loadu_ps(ptrB+8);
334 t3 = _mm_loadu_ps(ptrC+8);
335 t4 = _mm_loadu_ps(ptrD+8);
336 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
344 static gmx_inline void gmx_simdcall
345 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
346 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
347 __m128 x1, __m128 y1, __m128 z1)
349 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
350 t5 = _mm_unpacklo_ps(y1, z1);
351 t6 = _mm_unpackhi_ps(y1, z1);
352 t7 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(1, 0, 0, 0));
353 t8 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(3, 2, 0, 1));
354 t9 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(1, 0, 0, 2));
355 t10 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(3, 2, 0, 3));
356 t1 = _mm_load_ss(ptrA);
357 t1 = _mm_loadh_pi(t1, (__m64 *)(ptrA+1));
358 t1 = _mm_sub_ps(t1, t7);
359 _mm_store_ss(ptrA, t1);
360 _mm_storeh_pi((__m64 *)(ptrA+1), t1);
361 t2 = _mm_load_ss(ptrB);
362 t2 = _mm_loadh_pi(t2, (__m64 *)(ptrB+1));
363 t2 = _mm_sub_ps(t2, t8);
364 _mm_store_ss(ptrB, t2);
365 _mm_storeh_pi((__m64 *)(ptrB+1), t2);
366 t3 = _mm_load_ss(ptrC);
367 t3 = _mm_loadh_pi(t3, (__m64 *)(ptrC+1));
368 t3 = _mm_sub_ps(t3, t9);
369 _mm_store_ss(ptrC, t3);
370 _mm_storeh_pi((__m64 *)(ptrC+1), t3);
371 t4 = _mm_load_ss(ptrD);
372 t4 = _mm_loadh_pi(t4, (__m64 *)(ptrD+1));
373 t4 = _mm_sub_ps(t4, t10);
374 _mm_store_ss(ptrD, t4);
375 _mm_storeh_pi((__m64 *)(ptrD+1), t4);
379 static gmx_inline void gmx_simdcall
380 gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
381 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
382 __m128 x1, __m128 y1, __m128 z1,
383 __m128 x2, __m128 y2, __m128 z2,
384 __m128 x3, __m128 y3, __m128 z3)
386 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
387 __m128 t11, t12, t13, t14, t15, t16, t17, t18, t19;
388 __m128 t20, t21, t22, t23, t24, t25;
389 t13 = _mm_unpackhi_ps(x1, y1);
390 x1 = _mm_unpacklo_ps(x1, y1);
391 t14 = _mm_unpackhi_ps(z1, x2);
392 z1 = _mm_unpacklo_ps(z1, x2);
393 t15 = _mm_unpackhi_ps(y2, z2);
394 y2 = _mm_unpacklo_ps(y2, z2);
395 t16 = _mm_unpackhi_ps(x3, y3);
396 x3 = _mm_unpacklo_ps(x3, y3);
397 t17 = _mm_permute_ps(z3, _MM_SHUFFLE(0, 0, 0, 1));
398 t18 = _mm_movehl_ps(z3, z3);
399 t19 = _mm_permute_ps(t18, _MM_SHUFFLE(0, 0, 0, 1));
400 t20 = _mm_movelh_ps(x1, z1);
401 t21 = _mm_movehl_ps(z1, x1);
402 t22 = _mm_movelh_ps(t13, t14);
403 t14 = _mm_movehl_ps(t14, t13);
404 t23 = _mm_movelh_ps(y2, x3);
405 t24 = _mm_movehl_ps(x3, y2);
406 t25 = _mm_movelh_ps(t15, t16);
407 t16 = _mm_movehl_ps(t16, t15);
408 t1 = _mm_loadu_ps(ptrA);
409 t2 = _mm_loadu_ps(ptrA+4);
410 t3 = _mm_load_ss(ptrA+8);
411 t1 = _mm_sub_ps(t1, t20);
412 t2 = _mm_sub_ps(t2, t23);
413 t3 = _mm_sub_ss(t3, z3);
414 _mm_storeu_ps(ptrA, t1);
415 _mm_storeu_ps(ptrA+4, t2);
416 _mm_store_ss(ptrA+8, t3);
417 t4 = _mm_loadu_ps(ptrB);
418 t5 = _mm_loadu_ps(ptrB+4);
419 t6 = _mm_load_ss(ptrB+8);
420 t4 = _mm_sub_ps(t4, t21);
421 t5 = _mm_sub_ps(t5, t24);
422 t6 = _mm_sub_ss(t6, t17);
423 _mm_storeu_ps(ptrB, t4);
424 _mm_storeu_ps(ptrB+4, t5);
425 _mm_store_ss(ptrB+8, t6);
426 t7 = _mm_loadu_ps(ptrC);
427 t8 = _mm_loadu_ps(ptrC+4);
428 t9 = _mm_load_ss(ptrC+8);
429 t7 = _mm_sub_ps(t7, t22);
430 t8 = _mm_sub_ps(t8, t25);
431 t9 = _mm_sub_ss(t9, t18);
432 _mm_storeu_ps(ptrC, t7);
433 _mm_storeu_ps(ptrC+4, t8);
434 _mm_store_ss(ptrC+8, t9);
435 t10 = _mm_loadu_ps(ptrD);
436 t11 = _mm_loadu_ps(ptrD+4);
437 t12 = _mm_load_ss(ptrD+8);
438 t10 = _mm_sub_ps(t10, t14);
439 t11 = _mm_sub_ps(t11, t16);
440 t12 = _mm_sub_ss(t12, t19);
441 _mm_storeu_ps(ptrD, t10);
442 _mm_storeu_ps(ptrD+4, t11);
443 _mm_store_ss(ptrD+8, t12);
447 static gmx_inline void gmx_simdcall
448 gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
449 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
450 __m128 x1, __m128 y1, __m128 z1,
451 __m128 x2, __m128 y2, __m128 z2,
452 __m128 x3, __m128 y3, __m128 z3,
453 __m128 x4, __m128 y4, __m128 z4)
455 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11;
456 __m128 t12, t13, t14, t15, t16, t17, t18, t19, t20, t21, t22;
458 t13 = _mm_unpackhi_ps(x1, y1);
459 x1 = _mm_unpacklo_ps(x1, y1);
460 t14 = _mm_unpackhi_ps(z1, x2);
461 z1 = _mm_unpacklo_ps(z1, x2);
462 t15 = _mm_unpackhi_ps(y2, z2);
463 y2 = _mm_unpacklo_ps(y2, z2);
464 t16 = _mm_unpackhi_ps(x3, y3);
465 x3 = _mm_unpacklo_ps(x3, y3);
466 t17 = _mm_unpackhi_ps(z3, x4);
467 z3 = _mm_unpacklo_ps(z3, x4);
468 t18 = _mm_unpackhi_ps(y4, z4);
469 y4 = _mm_unpacklo_ps(y4, z4);
470 t19 = _mm_movelh_ps(x1, z1);
471 z1 = _mm_movehl_ps(z1, x1);
472 t20 = _mm_movelh_ps(t13, t14);
473 t14 = _mm_movehl_ps(t14, t13);
474 t21 = _mm_movelh_ps(y2, x3);
475 x3 = _mm_movehl_ps(x3, y2);
476 t22 = _mm_movelh_ps(t15, t16);
477 t16 = _mm_movehl_ps(t16, t15);
478 t23 = _mm_movelh_ps(z3, y4);
479 y4 = _mm_movehl_ps(y4, z3);
480 t24 = _mm_movelh_ps(t17, t18);
481 t18 = _mm_movehl_ps(t18, t17);
482 t1 = _mm_loadu_ps(ptrA);
483 t2 = _mm_loadu_ps(ptrA+4);
484 t3 = _mm_loadu_ps(ptrA+8);
485 t1 = _mm_sub_ps(t1, t19);
486 t2 = _mm_sub_ps(t2, t21);
487 t3 = _mm_sub_ps(t3, t23);
488 _mm_storeu_ps(ptrA, t1);
489 _mm_storeu_ps(ptrA+4, t2);
490 _mm_storeu_ps(ptrA+8, t3);
491 t4 = _mm_loadu_ps(ptrB);
492 t5 = _mm_loadu_ps(ptrB+4);
493 t6 = _mm_loadu_ps(ptrB+8);
494 t4 = _mm_sub_ps(t4, z1);
495 t5 = _mm_sub_ps(t5, x3);
496 t6 = _mm_sub_ps(t6, y4);
497 _mm_storeu_ps(ptrB, t4);
498 _mm_storeu_ps(ptrB+4, t5);
499 _mm_storeu_ps(ptrB+8, t6);
500 t7 = _mm_loadu_ps(ptrC);
501 t8 = _mm_loadu_ps(ptrC+4);
502 t9 = _mm_loadu_ps(ptrC+8);
503 t7 = _mm_sub_ps(t7, t20);
504 t8 = _mm_sub_ps(t8, t22);
505 t9 = _mm_sub_ps(t9, t24);
506 _mm_storeu_ps(ptrC, t7);
507 _mm_storeu_ps(ptrC+4, t8);
508 _mm_storeu_ps(ptrC+8, t9);
509 t10 = _mm_loadu_ps(ptrD);
510 t11 = _mm_loadu_ps(ptrD+4);
511 t12 = _mm_loadu_ps(ptrD+8);
512 t10 = _mm_sub_ps(t10, t14);
513 t11 = _mm_sub_ps(t11, t16);
514 t12 = _mm_sub_ps(t12, t18);
515 _mm_storeu_ps(ptrD, t10);
516 _mm_storeu_ps(ptrD+4, t11);
517 _mm_storeu_ps(ptrD+8, t12);
521 static gmx_inline void gmx_simdcall
522 gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
523 float * gmx_restrict fptr,
524 float * gmx_restrict fshiftptr)
528 fix1 = _mm_hadd_ps(fix1, fix1);
529 fiy1 = _mm_hadd_ps(fiy1, fiz1);
531 fix1 = _mm_hadd_ps(fix1, fiy1); /* fiz1 fiy1 fix1 fix1 */
533 t2 = _mm_load_ss(fptr);
534 t2 = _mm_loadh_pi(t2, (__m64 *)(fptr+1));
535 t3 = _mm_load_ss(fshiftptr);
536 t3 = _mm_loadh_pi(t3, (__m64 *)(fshiftptr+1));
538 t2 = _mm_add_ps(t2, fix1);
539 t3 = _mm_add_ps(t3, fix1);
541 _mm_store_ss(fptr, t2);
542 _mm_storeh_pi((__m64 *)(fptr+1), t2);
543 _mm_store_ss(fshiftptr, t3);
544 _mm_storeh_pi((__m64 *)(fshiftptr+1), t3);
548 static gmx_inline void gmx_simdcall
549 gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
550 __m128 fix2, __m128 fiy2, __m128 fiz2,
551 __m128 fix3, __m128 fiy3, __m128 fiz3,
552 float * gmx_restrict fptr,
553 float * gmx_restrict fshiftptr)
555 __m128 t1, t2, t3, t4;
557 fix1 = _mm_hadd_ps(fix1, fiy1);
558 fiz1 = _mm_hadd_ps(fiz1, fix2);
559 fiy2 = _mm_hadd_ps(fiy2, fiz2);
560 fix3 = _mm_hadd_ps(fix3, fiy3);
561 fiz3 = _mm_hadd_ps(fiz3, fiz3);
563 fix1 = _mm_hadd_ps(fix1, fiz1); /* fix2 fiz1 fiy1 fix1 */
564 fiy2 = _mm_hadd_ps(fiy2, fix3); /* fiy3 fix3 fiz2 fiy2 */
565 fiz3 = _mm_hadd_ps(fiz3, fiz3); /* - - - fiz3 */
567 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
568 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
569 _mm_store_ss (fptr+8, _mm_add_ss(fiz3, _mm_load_ss(fptr+8) ));
571 t4 = _mm_load_ss(fshiftptr+2);
572 t4 = _mm_loadh_pi(t4, (__m64 *)(fshiftptr));
574 t1 = _mm_shuffle_ps(fiz3, fix1, _MM_SHUFFLE(1, 0, 0, 0)); /* fiy1 fix1 - fiz3 */
575 t2 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(3, 2, 2, 2)); /* fiy3 fix3 - fiz1 */
576 t3 = _mm_shuffle_ps(fiy2, fix1, _MM_SHUFFLE(3, 3, 0, 1)); /* fix2 fix2 fiy2 fiz2 */
577 t3 = _mm_permute_ps(t3, _MM_SHUFFLE(1, 2, 0, 0)); /* fiy2 fix2 - fiz2 */
579 t1 = _mm_add_ps(t1, t2);
580 t3 = _mm_add_ps(t3, t4);
581 t1 = _mm_add_ps(t1, t3); /* y x - z */
583 _mm_store_ss(fshiftptr+2, t1);
584 _mm_storeh_pi((__m64 *)(fshiftptr), t1);
588 static gmx_inline void gmx_simdcall
589 gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
590 __m128 fix2, __m128 fiy2, __m128 fiz2,
591 __m128 fix3, __m128 fiy3, __m128 fiz3,
592 __m128 fix4, __m128 fiy4, __m128 fiz4,
593 float * gmx_restrict fptr,
594 float * gmx_restrict fshiftptr)
596 __m128 t1, t2, t3, t4, t5;
598 fix1 = _mm_hadd_ps(fix1, fiy1);
599 fiz1 = _mm_hadd_ps(fiz1, fix2);
600 fiy2 = _mm_hadd_ps(fiy2, fiz2);
601 fix3 = _mm_hadd_ps(fix3, fiy3);
602 fiz3 = _mm_hadd_ps(fiz3, fix4);
603 fiy4 = _mm_hadd_ps(fiy4, fiz4);
605 fix1 = _mm_hadd_ps(fix1, fiz1); /* fix2 fiz1 fiy1 fix1 */
606 fiy2 = _mm_hadd_ps(fiy2, fix3); /* fiy3 fix3 fiz2 fiy2 */
607 fiz3 = _mm_hadd_ps(fiz3, fiy4); /* fiz4 fiy4 fix4 fiz3 */
609 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
610 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
611 _mm_storeu_ps(fptr+8, _mm_add_ps(fiz3, _mm_loadu_ps(fptr+8)));
613 t5 = _mm_load_ss(fshiftptr+2);
614 t5 = _mm_loadh_pi(t5, (__m64 *)(fshiftptr));
616 t1 = _mm_permute_ps(fix1, _MM_SHUFFLE(1, 0, 2, 2));
617 t2 = _mm_permute_ps(fiy2, _MM_SHUFFLE(3, 2, 1, 1));
618 t3 = _mm_permute_ps(fiz3, _MM_SHUFFLE(2, 1, 0, 0));
619 t4 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(0, 0, 3, 3));
620 t4 = _mm_shuffle_ps(fiz3, t4, _MM_SHUFFLE(2, 0, 3, 3));
622 t1 = _mm_add_ps(t1, t2);
623 t3 = _mm_add_ps(t3, t4);
624 t1 = _mm_add_ps(t1, t3);
625 t5 = _mm_add_ps(t5, t1);
627 _mm_store_ss(fshiftptr+2, t5);
628 _mm_storeh_pi((__m64 *)(fshiftptr), t5);
632 static gmx_inline void gmx_simdcall
633 gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
635 pot1 = _mm_hadd_ps(pot1, pot1);
636 pot1 = _mm_hadd_ps(pot1, pot1);
637 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
640 static gmx_inline void gmx_simdcall
641 gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
642 __m128 pot2, float * gmx_restrict ptrB)
644 pot1 = _mm_hadd_ps(pot1, pot2);
645 pot1 = _mm_hadd_ps(pot1, pot1);
646 pot2 = _mm_permute_ps(pot1, _MM_SHUFFLE(0, 0, 0, 1));
647 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
648 _mm_store_ss(ptrB, _mm_add_ss(pot2, _mm_load_ss(ptrB)));
652 #endif /* _kernelutil_x86_avx_128_fma_single_h_ */