2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _kernelutil_x86_avx_128_fma_single_h_
36 #define _kernelutil_x86_avx_128_fma_single_h_
41 #include "gromacs/simd/general_x86_avx_128_fma.h"
43 /* Normal sum of four xmm registers */
44 #define gmx_mm_sum4_ps(t0, t1, t2, t3) _mm_add_ps(_mm_add_ps(t0, t1), _mm_add_ps(t2, t3))
47 gmx_mm_any_lt(__m128 a, __m128 b)
49 return _mm_movemask_ps(_mm_cmplt_ps(a, b));
52 static gmx_inline __m128
53 gmx_mm_calc_rsq_ps(__m128 dx, __m128 dy, __m128 dz)
55 return _mm_macc_ps(dx, dx, _mm_macc_ps(dy, dy, _mm_mul_ps(dz, dz)));
58 /* Load a single value from 1-4 places, merge into xmm register */
60 static gmx_inline __m128
61 gmx_mm_load_4real_swizzle_ps(const float * gmx_restrict ptrA,
62 const float * gmx_restrict ptrB,
63 const float * gmx_restrict ptrC,
64 const float * gmx_restrict ptrD)
68 t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA), _mm_load_ss(ptrC));
69 t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB), _mm_load_ss(ptrD));
70 return _mm_unpacklo_ps(t1, t2);
74 static gmx_inline void
75 gmx_mm_store_4real_swizzle_ps(float * gmx_restrict ptrA,
76 float * gmx_restrict ptrB,
77 float * gmx_restrict ptrC,
78 float * gmx_restrict ptrD, __m128 xmm1)
82 t2 = _mm_permute_ps(xmm1, _MM_SHUFFLE(1, 1, 1, 1));
83 t3 = _mm_permute_ps(xmm1, _MM_SHUFFLE(2, 2, 2, 2));
84 t4 = _mm_permute_ps(xmm1, _MM_SHUFFLE(3, 3, 3, 3));
85 _mm_store_ss(ptrA, xmm1);
86 _mm_store_ss(ptrB, t2);
87 _mm_store_ss(ptrC, t3);
88 _mm_store_ss(ptrD, t4);
92 static gmx_inline void
93 gmx_mm_increment_4real_swizzle_ps(float * gmx_restrict ptrA,
94 float * gmx_restrict ptrB,
95 float * gmx_restrict ptrC,
96 float * gmx_restrict ptrD, __m128 xmm1)
100 tmp = gmx_mm_load_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD);
101 tmp = _mm_add_ps(tmp, xmm1);
102 gmx_mm_store_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD, tmp);
106 static gmx_inline void
107 gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
108 const float * gmx_restrict p2,
109 const float * gmx_restrict p3,
110 const float * gmx_restrict p4,
111 __m128 * gmx_restrict c6, __m128 * gmx_restrict c12)
113 __m128 t1, t2, t3, t4;
114 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p1);
115 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p2);
116 t3 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p3);
117 t4 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p4);
118 t1 = _mm_unpacklo_ps(t1, t3);
119 t2 = _mm_unpacklo_ps(t2, t4);
120 *c6 = _mm_unpacklo_ps(t1, t2);
121 *c12 = _mm_unpackhi_ps(t1, t2);
127 static gmx_inline void
128 gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
129 const float * gmx_restrict xyz,
130 __m128 * gmx_restrict x1,
131 __m128 * gmx_restrict y1,
132 __m128 * gmx_restrict z1)
134 __m128 t1, t2, t3, t4;
136 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
137 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz);
138 t3 = _mm_load_ss(xyz_shift+2);
139 t4 = _mm_load_ss(xyz+2);
140 t1 = _mm_add_ps(t1, t2);
141 t3 = _mm_add_ss(t3, t4);
143 *x1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
144 *y1 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
145 *z1 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
149 static gmx_inline void
150 gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
151 const float * gmx_restrict xyz,
152 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
153 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
154 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
157 __m128 t1, t2, t3, t4, t5, t6;
159 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
160 tB = _mm_load_ss(xyz_shift+2);
162 t1 = _mm_loadu_ps(xyz);
163 t2 = _mm_loadu_ps(xyz+4);
164 t3 = _mm_load_ss(xyz+8);
166 tA = _mm_movelh_ps(tA, tB);
167 t4 = _mm_permute_ps(tA, _MM_SHUFFLE(0, 2, 1, 0));
168 t5 = _mm_permute_ps(tA, _MM_SHUFFLE(1, 0, 2, 1));
169 t6 = _mm_permute_ps(tA, _MM_SHUFFLE(2, 1, 0, 2));
171 t1 = _mm_add_ps(t1, t4);
172 t2 = _mm_add_ps(t2, t5);
173 t3 = _mm_add_ss(t3, t6);
175 *x1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
176 *y1 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
177 *z1 = _mm_permute_ps(t1, _MM_SHUFFLE(2, 2, 2, 2));
178 *x2 = _mm_permute_ps(t1, _MM_SHUFFLE(3, 3, 3, 3));
179 *y2 = _mm_permute_ps(t2, _MM_SHUFFLE(0, 0, 0, 0));
180 *z2 = _mm_permute_ps(t2, _MM_SHUFFLE(1, 1, 1, 1));
181 *x3 = _mm_permute_ps(t2, _MM_SHUFFLE(2, 2, 2, 2));
182 *y3 = _mm_permute_ps(t2, _MM_SHUFFLE(3, 3, 3, 3));
183 *z3 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
187 static gmx_inline void
188 gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
189 const float * gmx_restrict xyz,
190 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
191 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
192 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
193 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
196 __m128 t1, t2, t3, t4, t5, t6;
198 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
199 tB = _mm_load_ss(xyz_shift+2);
201 t1 = _mm_loadu_ps(xyz);
202 t2 = _mm_loadu_ps(xyz+4);
203 t3 = _mm_loadu_ps(xyz+8);
205 tA = _mm_movelh_ps(tA, tB);
206 t4 = _mm_permute_ps(tA, _MM_SHUFFLE(0, 2, 1, 0));
207 t5 = _mm_permute_ps(tA, _MM_SHUFFLE(1, 0, 2, 1));
208 t6 = _mm_permute_ps(tA, _MM_SHUFFLE(2, 1, 0, 2));
210 t1 = _mm_add_ps(t1, t4);
211 t2 = _mm_add_ps(t2, t5);
212 t3 = _mm_add_ps(t3, t6);
214 *x1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
215 *y1 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
216 *z1 = _mm_permute_ps(t1, _MM_SHUFFLE(2, 2, 2, 2));
217 *x2 = _mm_permute_ps(t1, _MM_SHUFFLE(3, 3, 3, 3));
218 *y2 = _mm_permute_ps(t2, _MM_SHUFFLE(0, 0, 0, 0));
219 *z2 = _mm_permute_ps(t2, _MM_SHUFFLE(1, 1, 1, 1));
220 *x3 = _mm_permute_ps(t2, _MM_SHUFFLE(2, 2, 2, 2));
221 *y3 = _mm_permute_ps(t2, _MM_SHUFFLE(3, 3, 3, 3));
222 *z3 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
223 *x4 = _mm_permute_ps(t3, _MM_SHUFFLE(1, 1, 1, 1));
224 *y4 = _mm_permute_ps(t3, _MM_SHUFFLE(2, 2, 2, 2));
225 *z4 = _mm_permute_ps(t3, _MM_SHUFFLE(3, 3, 3, 3));
229 static gmx_inline void
230 gmx_mm_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
231 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
232 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1)
234 __m128 t1, t2, t3, t4;
235 __m128i mask = _mm_set_epi32(0, -1, -1, -1);
236 t1 = gmx_mm_maskload_ps(ptrA, mask);
237 t2 = gmx_mm_maskload_ps(ptrB, mask);
238 t3 = gmx_mm_maskload_ps(ptrC, mask);
239 t4 = gmx_mm_maskload_ps(ptrD, mask);
240 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
247 static gmx_inline void
248 gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
249 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
250 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
251 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
252 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
254 __m128 t1, t2, t3, t4;
255 t1 = _mm_loadu_ps(ptrA);
256 t2 = _mm_loadu_ps(ptrB);
257 t3 = _mm_loadu_ps(ptrC);
258 t4 = _mm_loadu_ps(ptrD);
259 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
264 t1 = _mm_loadu_ps(ptrA+4);
265 t2 = _mm_loadu_ps(ptrB+4);
266 t3 = _mm_loadu_ps(ptrC+4);
267 t4 = _mm_loadu_ps(ptrD+4);
268 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
273 t1 = _mm_load_ss(ptrA+8);
274 t2 = _mm_load_ss(ptrB+8);
275 t3 = _mm_load_ss(ptrC+8);
276 t4 = _mm_load_ss(ptrD+8);
277 t1 = _mm_unpacklo_ps(t1, t3);
278 t3 = _mm_unpacklo_ps(t2, t4);
279 *z3 = _mm_unpacklo_ps(t1, t3);
283 static gmx_inline void
284 gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
285 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
286 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
287 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
288 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
289 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
291 __m128 t1, t2, t3, t4;
292 t1 = _mm_loadu_ps(ptrA);
293 t2 = _mm_loadu_ps(ptrB);
294 t3 = _mm_loadu_ps(ptrC);
295 t4 = _mm_loadu_ps(ptrD);
296 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
301 t1 = _mm_loadu_ps(ptrA+4);
302 t2 = _mm_loadu_ps(ptrB+4);
303 t3 = _mm_loadu_ps(ptrC+4);
304 t4 = _mm_loadu_ps(ptrD+4);
305 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
310 t1 = _mm_loadu_ps(ptrA+8);
311 t2 = _mm_loadu_ps(ptrB+8);
312 t3 = _mm_loadu_ps(ptrC+8);
313 t4 = _mm_loadu_ps(ptrD+8);
314 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
322 static gmx_inline void
323 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
324 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
325 __m128 x1, __m128 y1, __m128 z1)
327 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
328 t5 = _mm_unpacklo_ps(y1, z1);
329 t6 = _mm_unpackhi_ps(y1, z1);
330 t7 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(1, 0, 0, 0));
331 t8 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(3, 2, 0, 1));
332 t9 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(1, 0, 0, 2));
333 t10 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(3, 2, 0, 3));
334 t1 = _mm_load_ss(ptrA);
335 t1 = _mm_loadh_pi(t1, (__m64 *)(ptrA+1));
336 t1 = _mm_sub_ps(t1, t7);
337 _mm_store_ss(ptrA, t1);
338 _mm_storeh_pi((__m64 *)(ptrA+1), t1);
339 t2 = _mm_load_ss(ptrB);
340 t2 = _mm_loadh_pi(t2, (__m64 *)(ptrB+1));
341 t2 = _mm_sub_ps(t2, t8);
342 _mm_store_ss(ptrB, t2);
343 _mm_storeh_pi((__m64 *)(ptrB+1), t2);
344 t3 = _mm_load_ss(ptrC);
345 t3 = _mm_loadh_pi(t3, (__m64 *)(ptrC+1));
346 t3 = _mm_sub_ps(t3, t9);
347 _mm_store_ss(ptrC, t3);
348 _mm_storeh_pi((__m64 *)(ptrC+1), t3);
349 t4 = _mm_load_ss(ptrD);
350 t4 = _mm_loadh_pi(t4, (__m64 *)(ptrD+1));
351 t4 = _mm_sub_ps(t4, t10);
352 _mm_store_ss(ptrD, t4);
353 _mm_storeh_pi((__m64 *)(ptrD+1), t4);
357 #if defined (_MSC_VER) && defined(_M_IX86)
358 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
359 #define gmx_mm_decrement_3rvec_4ptr_swizzle_ps(ptrA, ptrB, ptrC, ptrD, \
360 _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3) \
362 __m128 _t1, _t2, _t3, _t4, _t5, _t6, _t7, _t8, _t9, _t10; \
363 __m128 _t11, _t12, _t13, _t14, _t15, _t16, _t17, _t18, _t19; \
364 __m128 _t20, _t21, _t22, _t23, _t24, _t25; \
365 _t13 = _mm_unpackhi_ps(_x1, _y1); \
366 _x1 = _mm_unpacklo_ps(_x1, _y1); \
367 _t14 = _mm_unpackhi_ps(_z1, _x2); \
368 _z1 = _mm_unpacklo_ps(_z1, _x2); \
369 _t15 = _mm_unpackhi_ps(_y2, _z2); \
370 _y2 = _mm_unpacklo_ps(_y2, _z2); \
371 _t16 = _mm_unpackhi_ps(_x3, _y3); \
372 _x3 = _mm_unpacklo_ps(_x3, _y3); \
373 _t17 = _mm_permute_ps(_z3, _MM_SHUFFLE(0, 0, 0, 1)); \
374 _t18 = _mm_movehl_ps(_z3, _z3); \
375 _t19 = _mm_permute_ps(_t18, _MM_SHUFFLE(0, 0, 0, 1)); \
376 _t20 = _mm_movelh_ps(_x1, _z1); \
377 _t21 = _mm_movehl_ps(_z1, _x1); \
378 _t22 = _mm_movelh_ps(_t13, _t14); \
379 _t14 = _mm_movehl_ps(_t14, _t13); \
380 _t23 = _mm_movelh_ps(_y2, _x3); \
381 _t24 = _mm_movehl_ps(_x3, _y2); \
382 _t25 = _mm_movelh_ps(_t15, _t16); \
383 _t16 = _mm_movehl_ps(_t16, _t15); \
384 _t1 = _mm_loadu_ps(ptrA); \
385 _t2 = _mm_loadu_ps(ptrA+4); \
386 _t3 = _mm_load_ss(ptrA+8); \
387 _t1 = _mm_sub_ps(_t1, _t20); \
388 _t2 = _mm_sub_ps(_t2, _t23); \
389 _t3 = _mm_sub_ss(_t3, _z3); \
390 _mm_storeu_ps(ptrA, _t1); \
391 _mm_storeu_ps(ptrA+4, _t2); \
392 _mm_store_ss(ptrA+8, _t3); \
393 _t4 = _mm_loadu_ps(ptrB); \
394 _t5 = _mm_loadu_ps(ptrB+4); \
395 _t6 = _mm_load_ss(ptrB+8); \
396 _t4 = _mm_sub_ps(_t4, _t21); \
397 _t5 = _mm_sub_ps(_t5, _t24); \
398 _t6 = _mm_sub_ss(_t6, _t17); \
399 _mm_storeu_ps(ptrB, _t4); \
400 _mm_storeu_ps(ptrB+4, _t5); \
401 _mm_store_ss(ptrB+8, _t6); \
402 _t7 = _mm_loadu_ps(ptrC); \
403 _t8 = _mm_loadu_ps(ptrC+4); \
404 _t9 = _mm_load_ss(ptrC+8); \
405 _t7 = _mm_sub_ps(_t7, _t22); \
406 _t8 = _mm_sub_ps(_t8, _t25); \
407 _t9 = _mm_sub_ss(_t9, _t18); \
408 _mm_storeu_ps(ptrC, _t7); \
409 _mm_storeu_ps(ptrC+4, _t8); \
410 _mm_store_ss(ptrC+8, _t9); \
411 _t10 = _mm_loadu_ps(ptrD); \
412 _t11 = _mm_loadu_ps(ptrD+4); \
413 _t12 = _mm_load_ss(ptrD+8); \
414 _t10 = _mm_sub_ps(_t10, _t14); \
415 _t11 = _mm_sub_ps(_t11, _t16); \
416 _t12 = _mm_sub_ss(_t12, _t19); \
417 _mm_storeu_ps(ptrD, _t10); \
418 _mm_storeu_ps(ptrD+4, _t11); \
419 _mm_store_ss(ptrD+8, _t12); \
422 /* Real function for sane compilers */
423 static gmx_inline void
424 gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
425 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
426 __m128 x1, __m128 y1, __m128 z1,
427 __m128 x2, __m128 y2, __m128 z2,
428 __m128 x3, __m128 y3, __m128 z3)
430 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
431 __m128 t11, t12, t13, t14, t15, t16, t17, t18, t19;
432 __m128 t20, t21, t22, t23, t24, t25;
433 t13 = _mm_unpackhi_ps(x1, y1);
434 x1 = _mm_unpacklo_ps(x1, y1);
435 t14 = _mm_unpackhi_ps(z1, x2);
436 z1 = _mm_unpacklo_ps(z1, x2);
437 t15 = _mm_unpackhi_ps(y2, z2);
438 y2 = _mm_unpacklo_ps(y2, z2);
439 t16 = _mm_unpackhi_ps(x3, y3);
440 x3 = _mm_unpacklo_ps(x3, y3);
441 t17 = _mm_permute_ps(z3, _MM_SHUFFLE(0, 0, 0, 1));
442 t18 = _mm_movehl_ps(z3, z3);
443 t19 = _mm_permute_ps(t18, _MM_SHUFFLE(0, 0, 0, 1));
444 t20 = _mm_movelh_ps(x1, z1);
445 t21 = _mm_movehl_ps(z1, x1);
446 t22 = _mm_movelh_ps(t13, t14);
447 t14 = _mm_movehl_ps(t14, t13);
448 t23 = _mm_movelh_ps(y2, x3);
449 t24 = _mm_movehl_ps(x3, y2);
450 t25 = _mm_movelh_ps(t15, t16);
451 t16 = _mm_movehl_ps(t16, t15);
452 t1 = _mm_loadu_ps(ptrA);
453 t2 = _mm_loadu_ps(ptrA+4);
454 t3 = _mm_load_ss(ptrA+8);
455 t1 = _mm_sub_ps(t1, t20);
456 t2 = _mm_sub_ps(t2, t23);
457 t3 = _mm_sub_ss(t3, z3);
458 _mm_storeu_ps(ptrA, t1);
459 _mm_storeu_ps(ptrA+4, t2);
460 _mm_store_ss(ptrA+8, t3);
461 t4 = _mm_loadu_ps(ptrB);
462 t5 = _mm_loadu_ps(ptrB+4);
463 t6 = _mm_load_ss(ptrB+8);
464 t4 = _mm_sub_ps(t4, t21);
465 t5 = _mm_sub_ps(t5, t24);
466 t6 = _mm_sub_ss(t6, t17);
467 _mm_storeu_ps(ptrB, t4);
468 _mm_storeu_ps(ptrB+4, t5);
469 _mm_store_ss(ptrB+8, t6);
470 t7 = _mm_loadu_ps(ptrC);
471 t8 = _mm_loadu_ps(ptrC+4);
472 t9 = _mm_load_ss(ptrC+8);
473 t7 = _mm_sub_ps(t7, t22);
474 t8 = _mm_sub_ps(t8, t25);
475 t9 = _mm_sub_ss(t9, t18);
476 _mm_storeu_ps(ptrC, t7);
477 _mm_storeu_ps(ptrC+4, t8);
478 _mm_store_ss(ptrC+8, t9);
479 t10 = _mm_loadu_ps(ptrD);
480 t11 = _mm_loadu_ps(ptrD+4);
481 t12 = _mm_load_ss(ptrD+8);
482 t10 = _mm_sub_ps(t10, t14);
483 t11 = _mm_sub_ps(t11, t16);
484 t12 = _mm_sub_ss(t12, t19);
485 _mm_storeu_ps(ptrD, t10);
486 _mm_storeu_ps(ptrD+4, t11);
487 _mm_store_ss(ptrD+8, t12);
491 #if defined (_MSC_VER) && defined(_M_IX86)
492 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
493 #define gmx_mm_decrement_4rvec_4ptr_swizzle_ps(ptrA, ptrB, ptrC, ptrD, \
494 _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3, _x4, _y4, _z4) \
496 __m128 _t1, _t2, _t3, _t4, _t5, _t6, _t7, _t8, _t9, _t10, _t11; \
497 __m128 _t12, _t13, _t14, _t15, _t16, _t17, _t18, _t19, _t20, _t21, _t22; \
499 _t13 = _mm_unpackhi_ps(_x1, _y1); \
500 _x1 = _mm_unpacklo_ps(_x1, _y1); \
501 _t14 = _mm_unpackhi_ps(_z1, _x2); \
502 _z1 = _mm_unpacklo_ps(_z1, _x2); \
503 _t15 = _mm_unpackhi_ps(_y2, _z2); \
504 _y2 = _mm_unpacklo_ps(_y2, _z2); \
505 _t16 = _mm_unpackhi_ps(_x3, _y3); \
506 _x3 = _mm_unpacklo_ps(_x3, _y3); \
507 _t17 = _mm_unpackhi_ps(_z3, _x4); \
508 _z3 = _mm_unpacklo_ps(_z3, _x4); \
509 _t18 = _mm_unpackhi_ps(_y4, _z4); \
510 _y4 = _mm_unpacklo_ps(_y4, _z4); \
511 _t19 = _mm_movelh_ps(_x1, _z1); \
512 _z1 = _mm_movehl_ps(_z1, _x1); \
513 _t20 = _mm_movelh_ps(_t13, _t14); \
514 _t14 = _mm_movehl_ps(_t14, _t13); \
515 _t21 = _mm_movelh_ps(_y2, _x3); \
516 _x3 = _mm_movehl_ps(_x3, _y2); \
517 _t22 = _mm_movelh_ps(_t15, _t16); \
518 _t16 = _mm_movehl_ps(_t16, _t15); \
519 _t23 = _mm_movelh_ps(_z3, _y4); \
520 _y4 = _mm_movehl_ps(_y4, _z3); \
521 _t24 = _mm_movelh_ps(_t17, _t18); \
522 _t18 = _mm_movehl_ps(_t18, _t17); \
523 _t1 = _mm_loadu_ps(ptrA); \
524 _t2 = _mm_loadu_ps(ptrA+4); \
525 _t3 = _mm_loadu_ps(ptrA+8); \
526 _t1 = _mm_sub_ps(_t1, _t19); \
527 _t2 = _mm_sub_ps(_t2, _t21); \
528 _t3 = _mm_sub_ps(_t3, _t23); \
529 _mm_storeu_ps(ptrA, _t1); \
530 _mm_storeu_ps(ptrA+4, _t2); \
531 _mm_storeu_ps(ptrA+8, _t3); \
532 _t4 = _mm_loadu_ps(ptrB); \
533 _t5 = _mm_loadu_ps(ptrB+4); \
534 _t6 = _mm_loadu_ps(ptrB+8); \
535 _t4 = _mm_sub_ps(_t4, _z1); \
536 _t5 = _mm_sub_ps(_t5, _x3); \
537 _t6 = _mm_sub_ps(_t6, _y4); \
538 _mm_storeu_ps(ptrB, _t4); \
539 _mm_storeu_ps(ptrB+4, _t5); \
540 _mm_storeu_ps(ptrB+8, _t6); \
541 _t7 = _mm_loadu_ps(ptrC); \
542 _t8 = _mm_loadu_ps(ptrC+4); \
543 _t9 = _mm_loadu_ps(ptrC+8); \
544 _t7 = _mm_sub_ps(_t7, _t20); \
545 _t8 = _mm_sub_ps(_t8, _t22); \
546 _t9 = _mm_sub_ps(_t9, _t24); \
547 _mm_storeu_ps(ptrC, _t7); \
548 _mm_storeu_ps(ptrC+4, _t8); \
549 _mm_storeu_ps(ptrC+8, _t9); \
550 _t10 = _mm_loadu_ps(ptrD); \
551 _t11 = _mm_loadu_ps(ptrD+4); \
552 _t12 = _mm_loadu_ps(ptrD+8); \
553 _t10 = _mm_sub_ps(_t10, _t14); \
554 _t11 = _mm_sub_ps(_t11, _t16); \
555 _t12 = _mm_sub_ps(_t12, _t18); \
556 _mm_storeu_ps(ptrD, _t10); \
557 _mm_storeu_ps(ptrD+4, _t11); \
558 _mm_storeu_ps(ptrD+8, _t12); \
561 /* Real function for sane compilers */
562 static gmx_inline void
563 gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
564 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
565 __m128 x1, __m128 y1, __m128 z1,
566 __m128 x2, __m128 y2, __m128 z2,
567 __m128 x3, __m128 y3, __m128 z3,
568 __m128 x4, __m128 y4, __m128 z4)
570 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11;
571 __m128 t12, t13, t14, t15, t16, t17, t18, t19, t20, t21, t22;
573 t13 = _mm_unpackhi_ps(x1, y1);
574 x1 = _mm_unpacklo_ps(x1, y1);
575 t14 = _mm_unpackhi_ps(z1, x2);
576 z1 = _mm_unpacklo_ps(z1, x2);
577 t15 = _mm_unpackhi_ps(y2, z2);
578 y2 = _mm_unpacklo_ps(y2, z2);
579 t16 = _mm_unpackhi_ps(x3, y3);
580 x3 = _mm_unpacklo_ps(x3, y3);
581 t17 = _mm_unpackhi_ps(z3, x4);
582 z3 = _mm_unpacklo_ps(z3, x4);
583 t18 = _mm_unpackhi_ps(y4, z4);
584 y4 = _mm_unpacklo_ps(y4, z4);
585 t19 = _mm_movelh_ps(x1, z1);
586 z1 = _mm_movehl_ps(z1, x1);
587 t20 = _mm_movelh_ps(t13, t14);
588 t14 = _mm_movehl_ps(t14, t13);
589 t21 = _mm_movelh_ps(y2, x3);
590 x3 = _mm_movehl_ps(x3, y2);
591 t22 = _mm_movelh_ps(t15, t16);
592 t16 = _mm_movehl_ps(t16, t15);
593 t23 = _mm_movelh_ps(z3, y4);
594 y4 = _mm_movehl_ps(y4, z3);
595 t24 = _mm_movelh_ps(t17, t18);
596 t18 = _mm_movehl_ps(t18, t17);
597 t1 = _mm_loadu_ps(ptrA);
598 t2 = _mm_loadu_ps(ptrA+4);
599 t3 = _mm_loadu_ps(ptrA+8);
600 t1 = _mm_sub_ps(t1, t19);
601 t2 = _mm_sub_ps(t2, t21);
602 t3 = _mm_sub_ps(t3, t23);
603 _mm_storeu_ps(ptrA, t1);
604 _mm_storeu_ps(ptrA+4, t2);
605 _mm_storeu_ps(ptrA+8, t3);
606 t4 = _mm_loadu_ps(ptrB);
607 t5 = _mm_loadu_ps(ptrB+4);
608 t6 = _mm_loadu_ps(ptrB+8);
609 t4 = _mm_sub_ps(t4, z1);
610 t5 = _mm_sub_ps(t5, x3);
611 t6 = _mm_sub_ps(t6, y4);
612 _mm_storeu_ps(ptrB, t4);
613 _mm_storeu_ps(ptrB+4, t5);
614 _mm_storeu_ps(ptrB+8, t6);
615 t7 = _mm_loadu_ps(ptrC);
616 t8 = _mm_loadu_ps(ptrC+4);
617 t9 = _mm_loadu_ps(ptrC+8);
618 t7 = _mm_sub_ps(t7, t20);
619 t8 = _mm_sub_ps(t8, t22);
620 t9 = _mm_sub_ps(t9, t24);
621 _mm_storeu_ps(ptrC, t7);
622 _mm_storeu_ps(ptrC+4, t8);
623 _mm_storeu_ps(ptrC+8, t9);
624 t10 = _mm_loadu_ps(ptrD);
625 t11 = _mm_loadu_ps(ptrD+4);
626 t12 = _mm_loadu_ps(ptrD+8);
627 t10 = _mm_sub_ps(t10, t14);
628 t11 = _mm_sub_ps(t11, t16);
629 t12 = _mm_sub_ps(t12, t18);
630 _mm_storeu_ps(ptrD, t10);
631 _mm_storeu_ps(ptrD+4, t11);
632 _mm_storeu_ps(ptrD+8, t12);
636 static gmx_inline void
637 gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
638 float * gmx_restrict fptr,
639 float * gmx_restrict fshiftptr)
643 fix1 = _mm_hadd_ps(fix1, fix1);
644 fiy1 = _mm_hadd_ps(fiy1, fiz1);
646 fix1 = _mm_hadd_ps(fix1, fiy1); /* fiz1 fiy1 fix1 fix1 */
648 t2 = _mm_load_ss(fptr);
649 t2 = _mm_loadh_pi(t2, (__m64 *)(fptr+1));
650 t3 = _mm_load_ss(fshiftptr);
651 t3 = _mm_loadh_pi(t3, (__m64 *)(fshiftptr+1));
653 t2 = _mm_add_ps(t2, fix1);
654 t3 = _mm_add_ps(t3, fix1);
656 _mm_store_ss(fptr, t2);
657 _mm_storeh_pi((__m64 *)(fptr+1), t2);
658 _mm_store_ss(fshiftptr, t3);
659 _mm_storeh_pi((__m64 *)(fshiftptr+1), t3);
662 #if defined (_MSC_VER) && defined(_M_IX86)
663 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
664 #define gmx_mm_update_iforce_3atom_swizzle_ps(fix1, fiy1, fiz1, fix2, fiy2, fiz2, fix3, fiy3, fiz3, \
667 __m128 _t1, _t2, _t3, _t4; \
669 fix1 = _mm_hadd_ps(fix1, fiy1); \
670 fiz1 = _mm_hadd_ps(fiz1, fix2); \
671 fiy2 = _mm_hadd_ps(fiy2, fiz2); \
672 fix3 = _mm_hadd_ps(fix3, fiy3); \
673 fiz3 = _mm_hadd_ps(fiz3, fiz3); \
674 fix1 = _mm_hadd_ps(fix1, fiz1); \
675 fiy2 = _mm_hadd_ps(fiy2, fix3); \
676 fiz3 = _mm_hadd_ps(fiz3, fiz3); \
677 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) )); \
678 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4))); \
679 _mm_store_ss (fptr+8, _mm_add_ss(fiz3, _mm_load_ss(fptr+8) )); \
680 _t4 = _mm_load_ss(fshiftptr+2); \
681 _t4 = _mm_loadh_pi(_t4, (__m64 *)(fshiftptr)); \
682 _t1 = _mm_shuffle_ps(fiz3, fix1, _MM_SHUFFLE(1, 0, 0, 0)); \
683 _t2 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(3, 2, 2, 2)); \
684 _t3 = _mm_shuffle_ps(fiy2, fix1, _MM_SHUFFLE(3, 3, 0, 1)); \
685 _t3 = _mm_permute_ps(_t3, _MM_SHUFFLE(1, 2, 0, 0)); \
686 _t1 = _mm_add_ps(_t1, _t2); \
687 _t3 = _mm_add_ps(_t3, _t4); \
688 _t1 = _mm_add_ps(_t1, _t3); \
689 _mm_store_ss(fshiftptr+2, _t1); \
690 _mm_storeh_pi((__m64 *)(fshiftptr), _t1); \
693 /* Real function for sane compilers */
694 static gmx_inline void
695 gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
696 __m128 fix2, __m128 fiy2, __m128 fiz2,
697 __m128 fix3, __m128 fiy3, __m128 fiz3,
698 float * gmx_restrict fptr,
699 float * gmx_restrict fshiftptr)
701 __m128 t1, t2, t3, t4;
703 fix1 = _mm_hadd_ps(fix1, fiy1);
704 fiz1 = _mm_hadd_ps(fiz1, fix2);
705 fiy2 = _mm_hadd_ps(fiy2, fiz2);
706 fix3 = _mm_hadd_ps(fix3, fiy3);
707 fiz3 = _mm_hadd_ps(fiz3, fiz3);
709 fix1 = _mm_hadd_ps(fix1, fiz1); /* fix2 fiz1 fiy1 fix1 */
710 fiy2 = _mm_hadd_ps(fiy2, fix3); /* fiy3 fix3 fiz2 fiy2 */
711 fiz3 = _mm_hadd_ps(fiz3, fiz3); /* - - - fiz3 */
713 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
714 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
715 _mm_store_ss (fptr+8, _mm_add_ss(fiz3, _mm_load_ss(fptr+8) ));
717 t4 = _mm_load_ss(fshiftptr+2);
718 t4 = _mm_loadh_pi(t4, (__m64 *)(fshiftptr));
720 t1 = _mm_shuffle_ps(fiz3, fix1, _MM_SHUFFLE(1, 0, 0, 0)); /* fiy1 fix1 - fiz3 */
721 t2 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(3, 2, 2, 2)); /* fiy3 fix3 - fiz1 */
722 t3 = _mm_shuffle_ps(fiy2, fix1, _MM_SHUFFLE(3, 3, 0, 1)); /* fix2 fix2 fiy2 fiz2 */
723 t3 = _mm_permute_ps(t3, _MM_SHUFFLE(1, 2, 0, 0)); /* fiy2 fix2 - fiz2 */
725 t1 = _mm_add_ps(t1, t2);
726 t3 = _mm_add_ps(t3, t4);
727 t1 = _mm_add_ps(t1, t3); /* y x - z */
729 _mm_store_ss(fshiftptr+2, t1);
730 _mm_storeh_pi((__m64 *)(fshiftptr), t1);
734 #if defined (_MSC_VER) && defined(_M_IX86)
735 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
736 #define gmx_mm_update_iforce_4atom_swizzle_ps(fix1, fiy1, fiz1, fix2, fiy2, fiz2, fix3, fiy3, fiz3, fix4, fiy4, fiz4, \
739 __m128 _t1, _t2, _t3, _t4, _t5; \
741 fix1 = _mm_hadd_ps(fix1, fiy1); \
742 fiz1 = _mm_hadd_ps(fiz1, fix2); \
743 fiy2 = _mm_hadd_ps(fiy2, fiz2); \
744 fix3 = _mm_hadd_ps(fix3, fiy3); \
745 fiz3 = _mm_hadd_ps(fiz3, fix4); \
746 fiy4 = _mm_hadd_ps(fiy4, fiz4); \
747 fix1 = _mm_hadd_ps(fix1, fiz1); \
748 fiy2 = _mm_hadd_ps(fiy2, fix3); \
749 fiz3 = _mm_hadd_ps(fiz3, fiy4); \
750 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) )); \
751 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4))); \
752 _mm_storeu_ps(fptr+8, _mm_add_ps(fiz3, _mm_loadu_ps(fptr+8))); \
753 _t5 = _mm_load_ss(fshiftptr+2); \
754 _t5 = _mm_loadh_pi(_t5, (__m64 *)(fshiftptr)); \
755 _t1 = _mm_permute_ps(fix1, _MM_SHUFFLE(1, 0, 2, 2)); \
756 _t2 = _mm_permute_ps(fiy2, _MM_SHUFFLE(3, 2, 1, 1)); \
757 _t3 = _mm_permute_ps(fiz3, _MM_SHUFFLE(2, 1, 0, 0)); \
758 _t4 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(0, 0, 3, 3)); \
759 _t4 = _mm_shuffle_ps(fiz3, _t4, _MM_SHUFFLE(2, 0, 3, 3)); \
760 _t1 = _mm_add_ps(_t1, _t2); \
761 _t3 = _mm_add_ps(_t3, _t4); \
762 _t1 = _mm_add_ps(_t1, _t3); \
763 _t5 = _mm_add_ps(_t5, _t1); \
764 _mm_store_ss(fshiftptr+2, _t5); \
765 _mm_storeh_pi((__m64 *)(fshiftptr), _t5); \
768 /* Real function for sane compilers */
769 static gmx_inline void
770 gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
771 __m128 fix2, __m128 fiy2, __m128 fiz2,
772 __m128 fix3, __m128 fiy3, __m128 fiz3,
773 __m128 fix4, __m128 fiy4, __m128 fiz4,
774 float * gmx_restrict fptr,
775 float * gmx_restrict fshiftptr)
777 __m128 t1, t2, t3, t4, t5;
779 fix1 = _mm_hadd_ps(fix1, fiy1);
780 fiz1 = _mm_hadd_ps(fiz1, fix2);
781 fiy2 = _mm_hadd_ps(fiy2, fiz2);
782 fix3 = _mm_hadd_ps(fix3, fiy3);
783 fiz3 = _mm_hadd_ps(fiz3, fix4);
784 fiy4 = _mm_hadd_ps(fiy4, fiz4);
786 fix1 = _mm_hadd_ps(fix1, fiz1); /* fix2 fiz1 fiy1 fix1 */
787 fiy2 = _mm_hadd_ps(fiy2, fix3); /* fiy3 fix3 fiz2 fiy2 */
788 fiz3 = _mm_hadd_ps(fiz3, fiy4); /* fiz4 fiy4 fix4 fiz3 */
790 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
791 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
792 _mm_storeu_ps(fptr+8, _mm_add_ps(fiz3, _mm_loadu_ps(fptr+8)));
794 t5 = _mm_load_ss(fshiftptr+2);
795 t5 = _mm_loadh_pi(t5, (__m64 *)(fshiftptr));
797 t1 = _mm_permute_ps(fix1, _MM_SHUFFLE(1, 0, 2, 2));
798 t2 = _mm_permute_ps(fiy2, _MM_SHUFFLE(3, 2, 1, 1));
799 t3 = _mm_permute_ps(fiz3, _MM_SHUFFLE(2, 1, 0, 0));
800 t4 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(0, 0, 3, 3));
801 t4 = _mm_shuffle_ps(fiz3, t4, _MM_SHUFFLE(2, 0, 3, 3));
803 t1 = _mm_add_ps(t1, t2);
804 t3 = _mm_add_ps(t3, t4);
805 t1 = _mm_add_ps(t1, t3);
806 t5 = _mm_add_ps(t5, t1);
808 _mm_store_ss(fshiftptr+2, t5);
809 _mm_storeh_pi((__m64 *)(fshiftptr), t5);
814 static gmx_inline void
815 gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
817 pot1 = _mm_hadd_ps(pot1, pot1);
818 pot1 = _mm_hadd_ps(pot1, pot1);
819 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
822 static gmx_inline void
823 gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
824 __m128 pot2, float * gmx_restrict ptrB)
826 pot1 = _mm_hadd_ps(pot1, pot2);
827 pot1 = _mm_hadd_ps(pot1, pot1);
828 pot2 = _mm_permute_ps(pot1, _MM_SHUFFLE(0, 0, 0, 1));
829 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
830 _mm_store_ss(ptrB, _mm_add_ss(pot2, _mm_load_ss(ptrB)));
834 #endif /* _kernelutil_x86_avx_128_fma_single_h_ */