2 * This source code is part of
6 * Copyright (c) 2011-2012, The GROMACS Development Team
8 * Gromacs is a library for molecular simulation and trajectory analysis,
9 * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
10 * a full list of developers and information, check out http://www.gromacs.org
12 * This program is free software; you can redistribute it and/or modify it under
13 * the terms of the GNU Lesser General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option) any
16 * As a special exception, you may use this file as part of a free software
17 * library without restriction. Specifically, if other files instantiate
18 * templates or use macros or inline functions from this file, or you compile
19 * this file and link it with other files to produce an executable, this
20 * file does not by itself cause the resulting executable to be covered by
21 * the GNU Lesser General Public License.
23 * In plain-speak: do not worry about classes/macros/templates either - only
24 * changes to the library have to be LGPL, not an application linking with it.
26 * To help fund GROMACS development, we humbly ask that you cite
27 * the papers people have written on it - you can find them on the website!
29 #ifndef _kernelutil_x86_sse4_1_single_h_
30 #define _kernelutil_x86_sse4_1_single_h_
34 #include "gmx_x86_sse4_1.h"
39 /* Normal sum of four xmm registers */
40 #define gmx_mm_sum4_ps(t0, t1, t2, t3) _mm_add_ps(_mm_add_ps(t0, t1), _mm_add_ps(t2, t3))
42 static gmx_inline __m128
43 gmx_mm_calc_rsq_ps(__m128 dx, __m128 dy, __m128 dz)
45 return _mm_add_ps( _mm_add_ps( _mm_mul_ps(dx, dx), _mm_mul_ps(dy, dy) ), _mm_mul_ps(dz, dz) );
49 gmx_mm_any_lt(__m128 a, __m128 b)
51 return _mm_movemask_ps(_mm_cmplt_ps(a, b));
54 /* Load a single value from 1-4 places, merge into xmm register */
56 static gmx_inline __m128
57 gmx_mm_load_4real_swizzle_ps(const float * gmx_restrict ptrA,
58 const float * gmx_restrict ptrB,
59 const float * gmx_restrict ptrC,
60 const float * gmx_restrict ptrD)
64 t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA), _mm_load_ss(ptrC));
65 t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB), _mm_load_ss(ptrD));
66 return _mm_unpacklo_ps(t1, t2);
69 static gmx_inline void
70 gmx_mm_store_4real_swizzle_ps(float * gmx_restrict ptrA,
71 float * gmx_restrict ptrB,
72 float * gmx_restrict ptrC,
73 float * gmx_restrict ptrD,
78 t3 = _mm_movehl_ps(_mm_setzero_ps(), xmm1);
79 t2 = _mm_shuffle_ps(xmm1, xmm1, _MM_SHUFFLE(1, 1, 1, 1));
80 t4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(1, 1, 1, 1));
81 _mm_store_ss(ptrA, xmm1);
82 _mm_store_ss(ptrB, t2);
83 _mm_store_ss(ptrC, t3);
84 _mm_store_ss(ptrD, t4);
87 /* Similar to store, but increments value in memory */
88 static gmx_inline void
89 gmx_mm_increment_4real_swizzle_ps(float * gmx_restrict ptrA,
90 float * gmx_restrict ptrB,
91 float * gmx_restrict ptrC,
92 float * gmx_restrict ptrD, __m128 xmm1)
96 tmp = gmx_mm_load_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD);
97 tmp = _mm_add_ps(tmp, xmm1);
98 gmx_mm_store_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD, tmp);
102 static gmx_inline void
103 gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
104 const float * gmx_restrict p2,
105 const float * gmx_restrict p3,
106 const float * gmx_restrict p4,
107 __m128 * gmx_restrict c6,
108 __m128 * gmx_restrict c12)
110 __m128 t1, t2, t3, t4;
112 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p1); /* - - c12a c6a */
113 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p2); /* - - c12b c6b */
114 t3 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p3); /* - - c12c c6c */
115 t4 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p4); /* - - c12d c6d */
116 t1 = _mm_unpacklo_ps(t1, t2);
117 t2 = _mm_unpacklo_ps(t3, t4);
118 *c6 = _mm_movelh_ps(t1, t2);
119 *c12 = _mm_movehl_ps(t2, t1);
123 static gmx_inline void
124 gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
125 const float * gmx_restrict xyz,
126 __m128 * gmx_restrict x1,
127 __m128 * gmx_restrict y1,
128 __m128 * gmx_restrict z1)
130 __m128 t1, t2, t3, t4;
132 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
133 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz);
134 t3 = _mm_load_ss(xyz_shift+2);
135 t4 = _mm_load_ss(xyz+2);
136 t1 = _mm_add_ps(t1, t2);
137 t3 = _mm_add_ss(t3, t4);
139 *x1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0));
140 *y1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1));
141 *z1 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 0, 0));
145 static gmx_inline void
146 gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
147 const float * gmx_restrict xyz,
148 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
149 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
150 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
153 __m128 t1, t2, t3, t4, t5, t6;
155 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
156 tB = _mm_load_ss(xyz_shift+2);
158 t1 = _mm_loadu_ps(xyz);
159 t2 = _mm_loadu_ps(xyz+4);
160 t3 = _mm_load_ss(xyz+8);
162 tA = _mm_movelh_ps(tA, tB);
163 t4 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(0, 2, 1, 0));
164 t5 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(1, 0, 2, 1));
165 t6 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(2, 1, 0, 2));
167 t1 = _mm_add_ps(t1, t4);
168 t2 = _mm_add_ps(t2, t5);
169 t3 = _mm_add_ss(t3, t6);
171 *x1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0));
172 *y1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1));
173 *z1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2, 2, 2, 2));
174 *x2 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 3, 3));
175 *y2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(0, 0, 0, 0));
176 *z2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(1, 1, 1, 1));
177 *x3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2, 2, 2, 2));
178 *y3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3, 3, 3, 3));
179 *z3 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 0, 0));
183 static gmx_inline void
184 gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
185 const float * gmx_restrict xyz,
186 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
187 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
188 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
189 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
192 __m128 t1, t2, t3, t4, t5, t6;
194 tA = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
195 tB = _mm_load_ss(xyz_shift+2);
197 t1 = _mm_loadu_ps(xyz);
198 t2 = _mm_loadu_ps(xyz+4);
199 t3 = _mm_loadu_ps(xyz+8);
201 tA = _mm_movelh_ps(tA, tB);
202 t4 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(0, 2, 1, 0));
203 t5 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(1, 0, 2, 1));
204 t6 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(2, 1, 0, 2));
206 t1 = _mm_add_ps(t1, t4);
207 t2 = _mm_add_ps(t2, t5);
208 t3 = _mm_add_ps(t3, t6);
210 *x1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0));
211 *y1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1));
212 *z1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2, 2, 2, 2));
213 *x2 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 3, 3));
214 *y2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(0, 0, 0, 0));
215 *z2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(1, 1, 1, 1));
216 *x3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2, 2, 2, 2));
217 *y3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3, 3, 3, 3));
218 *z3 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 0, 0));
219 *x4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(1, 1, 1, 1));
220 *y4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2, 2, 2, 2));
221 *z4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3, 3, 3, 3));
225 static gmx_inline void
226 gmx_mm_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
227 const float * gmx_restrict ptrB,
228 const float * gmx_restrict ptrC,
229 const float * gmx_restrict ptrD,
230 __m128 * gmx_restrict x1,
231 __m128 * gmx_restrict y1,
232 __m128 * gmx_restrict z1)
234 __m128 t1, t2, t3, t4, t5, t6, t7, t8;
235 t1 = _mm_castpd_ps(_mm_load_sd((const double *)ptrA));
236 t2 = _mm_castpd_ps(_mm_load_sd((const double *)ptrB));
237 t3 = _mm_castpd_ps(_mm_load_sd((const double *)ptrC));
238 t4 = _mm_castpd_ps(_mm_load_sd((const double *)ptrD));
239 t5 = _mm_load_ss(ptrA+2);
240 t6 = _mm_load_ss(ptrB+2);
241 t7 = _mm_load_ss(ptrC+2);
242 t8 = _mm_load_ss(ptrD+2);
243 t1 = _mm_unpacklo_ps(t1, t2);
244 t3 = _mm_unpacklo_ps(t3, t4);
245 *x1 = _mm_movelh_ps(t1, t3);
246 *y1 = _mm_movehl_ps(t3, t1);
247 t5 = _mm_unpacklo_ps(t5, t6);
248 t7 = _mm_unpacklo_ps(t7, t8);
249 *z1 = _mm_movelh_ps(t5, t7);
253 static gmx_inline void
254 gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
255 const float * gmx_restrict ptrB,
256 const float * gmx_restrict ptrC,
257 const float * gmx_restrict ptrD,
258 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
259 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
260 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
262 __m128 t1, t2, t3, t4;
263 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrA ) );
264 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrB ) );
265 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrC ) );
266 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrD ) );
267 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
272 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA+4) ) );
273 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB+4) ) );
274 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC+4) ) );
275 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD+4) ) );
276 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
281 t1 = _mm_load_ss(ptrA+8);
282 t2 = _mm_load_ss(ptrB+8);
283 t3 = _mm_load_ss(ptrC+8);
284 t4 = _mm_load_ss(ptrD+8);
285 t1 = _mm_unpacklo_ps(t1, t3);
286 t3 = _mm_unpacklo_ps(t2, t4);
287 *z3 = _mm_unpacklo_ps(t1, t3);
291 static gmx_inline void
292 gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
293 const float * gmx_restrict ptrB,
294 const float * gmx_restrict ptrC,
295 const float * gmx_restrict ptrD,
296 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
297 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
298 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
299 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
301 __m128 t1, t2, t3, t4;
302 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA) ) );
303 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB) ) );
304 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC) ) );
305 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD) ) );
306 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
311 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA+4) ) );
312 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB+4) ) );
313 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC+4) ) );
314 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD+4) ) );
315 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
320 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA+8) ) );
321 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB+8) ) );
322 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC+8) ) );
323 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD+8) ) );
324 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
333 static gmx_inline void
334 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * ptrA,
338 __m128 x1, __m128 y1, __m128 z1)
340 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
341 t5 = _mm_unpacklo_ps(y1, z1);
342 t6 = _mm_unpackhi_ps(y1, z1);
343 t7 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(1, 0, 0, 0));
344 t8 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(3, 2, 0, 1));
345 t9 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(1, 0, 0, 2));
346 t10 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(3, 2, 0, 3));
347 t1 = _mm_load_ss(ptrA);
348 t1 = _mm_loadh_pi(t1, (__m64 *)(ptrA+1));
349 t1 = _mm_sub_ps(t1, t7);
350 _mm_store_ss(ptrA, t1);
351 _mm_storeh_pi((__m64 *)(ptrA+1), t1);
352 t2 = _mm_load_ss(ptrB);
353 t2 = _mm_loadh_pi(t2, (__m64 *)(ptrB+1));
354 t2 = _mm_sub_ps(t2, t8);
355 _mm_store_ss(ptrB, t2);
356 _mm_storeh_pi((__m64 *)(ptrB+1), t2);
357 t3 = _mm_load_ss(ptrC);
358 t3 = _mm_loadh_pi(t3, (__m64 *)(ptrC+1));
359 t3 = _mm_sub_ps(t3, t9);
360 _mm_store_ss(ptrC, t3);
361 _mm_storeh_pi((__m64 *)(ptrC+1), t3);
362 t4 = _mm_load_ss(ptrD);
363 t4 = _mm_loadh_pi(t4, (__m64 *)(ptrD+1));
364 t4 = _mm_sub_ps(t4, t10);
365 _mm_store_ss(ptrD, t4);
366 _mm_storeh_pi((__m64 *)(ptrD+1), t4);
371 #if defined (_MSC_VER) && defined(_M_IX86)
372 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
373 #define gmx_mm_decrement_3rvec_4ptr_swizzle_ps(ptrA, ptrB, ptrC, ptrD, \
374 _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3) \
376 __m128 _t1, _t2, _t3, _t4, _t5, _t6, _t7, _t8, _t9, _t10; \
377 __m128 _t11, _t12, _t13, _t14, _t15, _t16, _t17, _t18, _t19; \
378 __m128 _t20, _t21, _t22, _t23, _t24, _t25; \
379 _t13 = _mm_unpackhi_ps(_x1, _y1); \
380 _x1 = _mm_unpacklo_ps(_x1, _y1); \
381 _t14 = _mm_unpackhi_ps(_z1, _x2); \
382 _z1 = _mm_unpacklo_ps(_z1, _x2); \
383 _t15 = _mm_unpackhi_ps(_y2, _z2); \
384 _y2 = _mm_unpacklo_ps(_y2, _z2); \
385 _t16 = _mm_unpackhi_ps(_x3, _y3); \
386 _x3 = _mm_unpacklo_ps(_x3, _y3); \
387 _t17 = _mm_shuffle_ps(_z3, _z3, _MM_SHUFFLE(0, 0, 0, 1)); \
388 _t18 = _mm_movehl_ps(_z3, _z3); \
389 _t19 = _mm_shuffle_ps(_t18, _t18, _MM_SHUFFLE(0, 0, 0, 1)); \
390 _t20 = _mm_movelh_ps(_x1, _z1); \
391 _t21 = _mm_movehl_ps(_z1, _x1); \
392 _t22 = _mm_movelh_ps(_t13, _t14); \
393 _t14 = _mm_movehl_ps(_t14, _t13); \
394 _t23 = _mm_movelh_ps(_y2, _x3); \
395 _t24 = _mm_movehl_ps(_x3, _y2); \
396 _t25 = _mm_movelh_ps(_t15, _t16); \
397 _t16 = _mm_movehl_ps(_t16, _t15); \
398 _t1 = _mm_loadu_ps(ptrA); \
399 _t2 = _mm_loadu_ps(ptrA+4); \
400 _t3 = _mm_load_ss(ptrA+8); \
401 _t1 = _mm_sub_ps(_t1, _t20); \
402 _t2 = _mm_sub_ps(_t2, _t23); \
403 _t3 = _mm_sub_ss(_t3, _z3); \
404 _mm_storeu_ps(ptrA, _t1); \
405 _mm_storeu_ps(ptrA+4, _t2); \
406 _mm_store_ss(ptrA+8, _t3); \
407 _t4 = _mm_loadu_ps(ptrB); \
408 _t5 = _mm_loadu_ps(ptrB+4); \
409 _t6 = _mm_load_ss(ptrB+8); \
410 _t4 = _mm_sub_ps(_t4, _t21); \
411 _t5 = _mm_sub_ps(_t5, _t24); \
412 _t6 = _mm_sub_ss(_t6, _t17); \
413 _mm_storeu_ps(ptrB, _t4); \
414 _mm_storeu_ps(ptrB+4, _t5); \
415 _mm_store_ss(ptrB+8, _t6); \
416 _t7 = _mm_loadu_ps(ptrC); \
417 _t8 = _mm_loadu_ps(ptrC+4); \
418 _t9 = _mm_load_ss(ptrC+8); \
419 _t7 = _mm_sub_ps(_t7, _t22); \
420 _t8 = _mm_sub_ps(_t8, _t25); \
421 _t9 = _mm_sub_ss(_t9, _t18); \
422 _mm_storeu_ps(ptrC, _t7); \
423 _mm_storeu_ps(ptrC+4, _t8); \
424 _mm_store_ss(ptrC+8, _t9); \
425 _t10 = _mm_loadu_ps(ptrD); \
426 _t11 = _mm_loadu_ps(ptrD+4); \
427 _t12 = _mm_load_ss(ptrD+8); \
428 _t10 = _mm_sub_ps(_t10, _t14); \
429 _t11 = _mm_sub_ps(_t11, _t16); \
430 _t12 = _mm_sub_ss(_t12, _t19); \
431 _mm_storeu_ps(ptrD, _t10); \
432 _mm_storeu_ps(ptrD+4, _t11); \
433 _mm_store_ss(ptrD+8, _t12); \
436 /* Real function for sane compilers */
437 static gmx_inline void
438 gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
439 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
440 __m128 x1, __m128 y1, __m128 z1,
441 __m128 x2, __m128 y2, __m128 z2,
442 __m128 x3, __m128 y3, __m128 z3)
444 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
445 __m128 t11, t12, t13, t14, t15, t16, t17, t18, t19;
446 __m128 t20, t21, t22, t23, t24, t25;
448 t13 = _mm_unpackhi_ps(x1, y1);
449 x1 = _mm_unpacklo_ps(x1, y1);
450 t14 = _mm_unpackhi_ps(z1, x2);
451 z1 = _mm_unpacklo_ps(z1, x2);
452 t15 = _mm_unpackhi_ps(y2, z2);
453 y2 = _mm_unpacklo_ps(y2, z2);
454 t16 = _mm_unpackhi_ps(x3, y3);
455 x3 = _mm_unpacklo_ps(x3, y3);
456 t17 = _mm_shuffle_ps(z3, z3, _MM_SHUFFLE(0, 0, 0, 1));
457 t18 = _mm_movehl_ps(z3, z3);
458 t19 = _mm_shuffle_ps(t18, t18, _MM_SHUFFLE(0, 0, 0, 1));
459 t20 = _mm_movelh_ps(x1, z1);
460 t21 = _mm_movehl_ps(z1, x1);
461 t22 = _mm_movelh_ps(t13, t14);
462 t14 = _mm_movehl_ps(t14, t13);
463 t23 = _mm_movelh_ps(y2, x3);
464 t24 = _mm_movehl_ps(x3, y2);
465 t25 = _mm_movelh_ps(t15, t16);
466 t16 = _mm_movehl_ps(t16, t15);
467 t1 = _mm_loadu_ps(ptrA);
468 t2 = _mm_loadu_ps(ptrA+4);
469 t3 = _mm_load_ss(ptrA+8);
470 t4 = _mm_loadu_ps(ptrB);
471 t5 = _mm_loadu_ps(ptrB+4);
472 t6 = _mm_load_ss(ptrB+8);
473 t7 = _mm_loadu_ps(ptrC);
474 t8 = _mm_loadu_ps(ptrC+4);
475 t9 = _mm_load_ss(ptrC+8);
476 t10 = _mm_loadu_ps(ptrD);
477 t11 = _mm_loadu_ps(ptrD+4);
478 t12 = _mm_load_ss(ptrD+8);
480 t1 = _mm_sub_ps(t1, t20);
481 t2 = _mm_sub_ps(t2, t23);
482 t3 = _mm_sub_ss(t3, z3);
483 _mm_storeu_ps(ptrA, t1);
484 _mm_storeu_ps(ptrA+4, t2);
485 _mm_store_ss(ptrA+8, t3);
486 t4 = _mm_sub_ps(t4, t21);
487 t5 = _mm_sub_ps(t5, t24);
488 t6 = _mm_sub_ss(t6, t17);
489 _mm_storeu_ps(ptrB, t4);
490 _mm_storeu_ps(ptrB+4, t5);
491 _mm_store_ss(ptrB+8, t6);
492 t7 = _mm_sub_ps(t7, t22);
493 t8 = _mm_sub_ps(t8, t25);
494 t9 = _mm_sub_ss(t9, t18);
495 _mm_storeu_ps(ptrC, t7);
496 _mm_storeu_ps(ptrC+4, t8);
497 _mm_store_ss(ptrC+8, t9);
498 t10 = _mm_sub_ps(t10, t14);
499 t11 = _mm_sub_ps(t11, t16);
500 t12 = _mm_sub_ss(t12, t19);
501 _mm_storeu_ps(ptrD, t10);
502 _mm_storeu_ps(ptrD+4, t11);
503 _mm_store_ss(ptrD+8, t12);
507 #if defined (_MSC_VER) && defined(_M_IX86)
508 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
509 #define gmx_mm_decrement_4rvec_4ptr_swizzle_ps(ptrA, ptrB, ptrC, ptrD, \
510 _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3, _x4, _y4, _z4) \
512 __m128 _t1, _t2, _t3, _t4, _t5, _t6, _t7, _t8, _t9, _t10, _t11; \
513 __m128 _t12, _t13, _t14, _t15, _t16, _t17, _t18, _t19, _t20, _t21, _t22; \
515 _t13 = _mm_unpackhi_ps(_x1, _y1); \
516 _x1 = _mm_unpacklo_ps(_x1, _y1); \
517 _t14 = _mm_unpackhi_ps(_z1, _x2); \
518 _z1 = _mm_unpacklo_ps(_z1, _x2); \
519 _t15 = _mm_unpackhi_ps(_y2, _z2); \
520 _y2 = _mm_unpacklo_ps(_y2, _z2); \
521 _t16 = _mm_unpackhi_ps(_x3, _y3); \
522 _x3 = _mm_unpacklo_ps(_x3, _y3); \
523 _t17 = _mm_unpackhi_ps(_z3, _x4); \
524 _z3 = _mm_unpacklo_ps(_z3, _x4); \
525 _t18 = _mm_unpackhi_ps(_y4, _z4); \
526 _y4 = _mm_unpacklo_ps(_y4, _z4); \
527 _t19 = _mm_movelh_ps(_x1, _z1); \
528 _z1 = _mm_movehl_ps(_z1, _x1); \
529 _t20 = _mm_movelh_ps(_t13, _t14); \
530 _t14 = _mm_movehl_ps(_t14, _t13); \
531 _t21 = _mm_movelh_ps(_y2, _x3); \
532 _x3 = _mm_movehl_ps(_x3, _y2); \
533 _t22 = _mm_movelh_ps(_t15, _t16); \
534 _t16 = _mm_movehl_ps(_t16, _t15); \
535 _t23 = _mm_movelh_ps(_z3, _y4); \
536 _y4 = _mm_movehl_ps(_y4, _z3); \
537 _t24 = _mm_movelh_ps(_t17, _t18); \
538 _t18 = _mm_movehl_ps(_t18, _t17); \
539 _t1 = _mm_loadu_ps(ptrA); \
540 _t2 = _mm_loadu_ps(ptrA+4); \
541 _t3 = _mm_loadu_ps(ptrA+8); \
542 _t1 = _mm_sub_ps(_t1, _t19); \
543 _t2 = _mm_sub_ps(_t2, _t21); \
544 _t3 = _mm_sub_ps(_t3, _t23); \
545 _mm_storeu_ps(ptrA, _t1); \
546 _mm_storeu_ps(ptrA+4, _t2); \
547 _mm_storeu_ps(ptrA+8, _t3); \
548 _t4 = _mm_loadu_ps(ptrB); \
549 _t5 = _mm_loadu_ps(ptrB+4); \
550 _t6 = _mm_loadu_ps(ptrB+8); \
551 _t4 = _mm_sub_ps(_t4, _z1); \
552 _t5 = _mm_sub_ps(_t5, _x3); \
553 _t6 = _mm_sub_ps(_t6, _y4); \
554 _mm_storeu_ps(ptrB, _t4); \
555 _mm_storeu_ps(ptrB+4, _t5); \
556 _mm_storeu_ps(ptrB+8, _t6); \
557 _t7 = _mm_loadu_ps(ptrC); \
558 _t8 = _mm_loadu_ps(ptrC+4); \
559 _t9 = _mm_loadu_ps(ptrC+8); \
560 _t7 = _mm_sub_ps(_t7, _t20); \
561 _t8 = _mm_sub_ps(_t8, _t22); \
562 _t9 = _mm_sub_ps(_t9, _t24); \
563 _mm_storeu_ps(ptrC, _t7); \
564 _mm_storeu_ps(ptrC+4, _t8); \
565 _mm_storeu_ps(ptrC+8, _t9); \
566 _t10 = _mm_loadu_ps(ptrD); \
567 _t11 = _mm_loadu_ps(ptrD+4); \
568 _t12 = _mm_loadu_ps(ptrD+8); \
569 _t10 = _mm_sub_ps(_t10, _t14); \
570 _t11 = _mm_sub_ps(_t11, _t16); \
571 _t12 = _mm_sub_ps(_t12, _t18); \
572 _mm_storeu_ps(ptrD, _t10); \
573 _mm_storeu_ps(ptrD+4, _t11); \
574 _mm_storeu_ps(ptrD+8, _t12); \
577 /* Real function for sane compilers */
578 static gmx_inline void
579 gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
580 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
581 __m128 x1, __m128 y1, __m128 z1,
582 __m128 x2, __m128 y2, __m128 z2,
583 __m128 x3, __m128 y3, __m128 z3,
584 __m128 x4, __m128 y4, __m128 z4)
586 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11;
587 __m128 t12, t13, t14, t15, t16, t17, t18, t19, t20, t21, t22;
589 t13 = _mm_unpackhi_ps(x1, y1);
590 x1 = _mm_unpacklo_ps(x1, y1);
591 t14 = _mm_unpackhi_ps(z1, x2);
592 z1 = _mm_unpacklo_ps(z1, x2);
593 t15 = _mm_unpackhi_ps(y2, z2);
594 y2 = _mm_unpacklo_ps(y2, z2);
595 t16 = _mm_unpackhi_ps(x3, y3);
596 x3 = _mm_unpacklo_ps(x3, y3);
597 t17 = _mm_unpackhi_ps(z3, x4);
598 z3 = _mm_unpacklo_ps(z3, x4);
599 t18 = _mm_unpackhi_ps(y4, z4);
600 y4 = _mm_unpacklo_ps(y4, z4);
601 t19 = _mm_movelh_ps(x1, z1);
602 z1 = _mm_movehl_ps(z1, x1);
603 t20 = _mm_movelh_ps(t13, t14);
604 t14 = _mm_movehl_ps(t14, t13);
605 t21 = _mm_movelh_ps(y2, x3);
606 x3 = _mm_movehl_ps(x3, y2);
607 t22 = _mm_movelh_ps(t15, t16);
608 t16 = _mm_movehl_ps(t16, t15);
609 t23 = _mm_movelh_ps(z3, y4);
610 y4 = _mm_movehl_ps(y4, z3);
611 t24 = _mm_movelh_ps(t17, t18);
612 t18 = _mm_movehl_ps(t18, t17);
613 t1 = _mm_loadu_ps(ptrA);
614 t2 = _mm_loadu_ps(ptrA+4);
615 t3 = _mm_loadu_ps(ptrA+8);
616 t1 = _mm_sub_ps(t1, t19);
617 t2 = _mm_sub_ps(t2, t21);
618 t3 = _mm_sub_ps(t3, t23);
619 _mm_storeu_ps(ptrA, t1);
620 _mm_storeu_ps(ptrA+4, t2);
621 _mm_storeu_ps(ptrA+8, t3);
622 t4 = _mm_loadu_ps(ptrB);
623 t5 = _mm_loadu_ps(ptrB+4);
624 t6 = _mm_loadu_ps(ptrB+8);
625 t4 = _mm_sub_ps(t4, z1);
626 t5 = _mm_sub_ps(t5, x3);
627 t6 = _mm_sub_ps(t6, y4);
628 _mm_storeu_ps(ptrB, t4);
629 _mm_storeu_ps(ptrB+4, t5);
630 _mm_storeu_ps(ptrB+8, t6);
631 t7 = _mm_loadu_ps(ptrC);
632 t8 = _mm_loadu_ps(ptrC+4);
633 t9 = _mm_loadu_ps(ptrC+8);
634 t7 = _mm_sub_ps(t7, t20);
635 t8 = _mm_sub_ps(t8, t22);
636 t9 = _mm_sub_ps(t9, t24);
637 _mm_storeu_ps(ptrC, t7);
638 _mm_storeu_ps(ptrC+4, t8);
639 _mm_storeu_ps(ptrC+8, t9);
640 t10 = _mm_loadu_ps(ptrD);
641 t11 = _mm_loadu_ps(ptrD+4);
642 t12 = _mm_loadu_ps(ptrD+8);
643 t10 = _mm_sub_ps(t10, t14);
644 t11 = _mm_sub_ps(t11, t16);
645 t12 = _mm_sub_ps(t12, t18);
646 _mm_storeu_ps(ptrD, t10);
647 _mm_storeu_ps(ptrD+4, t11);
648 _mm_storeu_ps(ptrD+8, t12);
653 static gmx_inline void
654 gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
655 float * gmx_restrict fptr,
656 float * gmx_restrict fshiftptr)
660 fix1 = _mm_hadd_ps(fix1, fix1);
661 fiy1 = _mm_hadd_ps(fiy1, fiz1);
663 fix1 = _mm_hadd_ps(fix1, fiy1); /* fiz1 fiy1 fix1 fix1 */
665 t2 = _mm_load_ss(fptr);
666 t2 = _mm_loadh_pi(t2, (__m64 *)(fptr+1));
667 t3 = _mm_load_ss(fshiftptr);
668 t3 = _mm_loadh_pi(t3, (__m64 *)(fshiftptr+1));
670 t2 = _mm_add_ps(t2, fix1);
671 t3 = _mm_add_ps(t3, fix1);
673 _mm_store_ss(fptr, t2);
674 _mm_storeh_pi((__m64 *)(fptr+1), t2);
675 _mm_store_ss(fshiftptr, t3);
676 _mm_storeh_pi((__m64 *)(fshiftptr+1), t3);
679 #if defined (_MSC_VER) && defined(_M_IX86)
680 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
681 #define gmx_mm_update_iforce_3atom_swizzle_ps(fix1, fiy1, fiz1, fix2, fiy2, fiz2, fix3, fiy3, fiz3, \
684 __m128 _t1, _t2, _t3, _t4; \
686 fix1 = _mm_hadd_ps(fix1, fiy1); \
687 fiz1 = _mm_hadd_ps(fiz1, fix2); \
688 fiy2 = _mm_hadd_ps(fiy2, fiz2); \
689 fix3 = _mm_hadd_ps(fix3, fiy3); \
690 fiz3 = _mm_hadd_ps(fiz3, fiz3); \
691 fix1 = _mm_hadd_ps(fix1, fiz1); \
692 fiy2 = _mm_hadd_ps(fiy2, fix3); \
693 fiz3 = _mm_hadd_ps(fiz3, fiz3); \
694 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) )); \
695 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4))); \
696 _mm_store_ss (fptr+8, _mm_add_ss(fiz3, _mm_load_ss(fptr+8) )); \
697 _t4 = _mm_load_ss(fshiftptr+2); \
698 _t4 = _mm_loadh_pi(_t4, (__m64 *)(fshiftptr)); \
699 _t1 = _mm_shuffle_ps(fiz3, fix1, _MM_SHUFFLE(1, 0, 0, 0)); \
700 _t2 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(3, 2, 2, 2)); \
701 _t3 = _mm_shuffle_ps(fiy2, fix1, _MM_SHUFFLE(3, 3, 0, 1)); \
702 _t3 = _mm_shuffle_ps(_t3, _t3, _MM_SHUFFLE(1, 2, 0, 0)); \
703 _t1 = _mm_add_ps(_t1, _t2); \
704 _t3 = _mm_add_ps(_t3, _t4); \
705 _t1 = _mm_add_ps(_t1, _t3); \
706 _mm_store_ss(fshiftptr+2, _t1); \
707 _mm_storeh_pi((__m64 *)(fshiftptr), _t1); \
710 /* Real function for sane compilers */
711 static gmx_inline void
712 gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
713 __m128 fix2, __m128 fiy2, __m128 fiz2,
714 __m128 fix3, __m128 fiy3, __m128 fiz3,
715 float * gmx_restrict fptr,
716 float * gmx_restrict fshiftptr)
718 __m128 t1, t2, t3, t4;
720 fix1 = _mm_hadd_ps(fix1, fiy1);
721 fiz1 = _mm_hadd_ps(fiz1, fix2);
722 fiy2 = _mm_hadd_ps(fiy2, fiz2);
723 fix3 = _mm_hadd_ps(fix3, fiy3);
724 fiz3 = _mm_hadd_ps(fiz3, fiz3);
726 fix1 = _mm_hadd_ps(fix1, fiz1); /* fix2 fiz1 fiy1 fix1 */
727 fiy2 = _mm_hadd_ps(fiy2, fix3); /* fiy3 fix3 fiz2 fiy2 */
728 fiz3 = _mm_hadd_ps(fiz3, fiz3); /* - - - fiz3 */
730 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
731 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
732 _mm_store_ss (fptr+8, _mm_add_ss(fiz3, _mm_load_ss(fptr+8) ));
734 t4 = _mm_load_ss(fshiftptr+2);
735 t4 = _mm_loadh_pi(t4, (__m64 *)(fshiftptr));
737 t1 = _mm_shuffle_ps(fiz3, fix1, _MM_SHUFFLE(1, 0, 0, 0)); /* fiy1 fix1 - fiz3 */
738 t2 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(3, 2, 2, 2)); /* fiy3 fix3 - fiz1 */
739 t3 = _mm_shuffle_ps(fiy2, fix1, _MM_SHUFFLE(3, 3, 0, 1)); /* fix2 fix2 fiy2 fiz2 */
740 t3 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(1, 2, 0, 0)); /* fiy2 fix2 - fiz2 */
742 t1 = _mm_add_ps(t1, t2);
743 t3 = _mm_add_ps(t3, t4);
744 t1 = _mm_add_ps(t1, t3); /* y x - z */
746 _mm_store_ss(fshiftptr+2, t1);
747 _mm_storeh_pi((__m64 *)(fshiftptr), t1);
751 #if defined (_MSC_VER) && defined(_M_IX86)
752 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
753 #define gmx_mm_update_iforce_4atom_swizzle_ps(fix1, fiy1, fiz1, fix2, fiy2, fiz2, fix3, fiy3, fiz3, fix4, fiy4, fiz4, \
756 __m128 _t1, _t2, _t3, _t4, _t5; \
758 fix1 = _mm_hadd_ps(fix1, fiy1); \
759 fiz1 = _mm_hadd_ps(fiz1, fix2); \
760 fiy2 = _mm_hadd_ps(fiy2, fiz2); \
761 fix3 = _mm_hadd_ps(fix3, fiy3); \
762 fiz3 = _mm_hadd_ps(fiz3, fix4); \
763 fiy4 = _mm_hadd_ps(fiy4, fiz4); \
764 fix1 = _mm_hadd_ps(fix1, fiz1); \
765 fiy2 = _mm_hadd_ps(fiy2, fix3); \
766 fiz3 = _mm_hadd_ps(fiz3, fiy4); \
767 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) )); \
768 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4))); \
769 _mm_storeu_ps(fptr+8, _mm_add_ps(fiz3, _mm_loadu_ps(fptr+8))); \
770 _t5 = _mm_load_ss(fshiftptr+2); \
771 _t5 = _mm_loadh_pi(_t5, (__m64 *)(fshiftptr)); \
772 _t1 = _mm_shuffle_ps(fix1, fix1, _MM_SHUFFLE(1, 0, 2, 2)); \
773 _t2 = _mm_shuffle_ps(fiy2, fiy2, _MM_SHUFFLE(3, 2, 1, 1)); \
774 _t3 = _mm_shuffle_ps(fiz3, fiz3, _MM_SHUFFLE(2, 1, 0, 0)); \
775 _t4 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(0, 0, 3, 3)); \
776 _t4 = _mm_shuffle_ps(fiz3, _t4, _MM_SHUFFLE(2, 0, 3, 3)); \
777 _t1 = _mm_add_ps(_t1, _t2); \
778 _t3 = _mm_add_ps(_t3, _t4); \
779 _t1 = _mm_add_ps(_t1, _t3); \
780 _t5 = _mm_add_ps(_t5, _t1); \
781 _mm_store_ss(fshiftptr+2, _t5); \
782 _mm_storeh_pi((__m64 *)(fshiftptr), _t5); \
785 /* Real function for sane compilers */
786 static gmx_inline void
787 gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
788 __m128 fix2, __m128 fiy2, __m128 fiz2,
789 __m128 fix3, __m128 fiy3, __m128 fiz3,
790 __m128 fix4, __m128 fiy4, __m128 fiz4,
791 float * gmx_restrict fptr,
792 float * gmx_restrict fshiftptr)
794 __m128 t1, t2, t3, t4, t5;
796 fix1 = _mm_hadd_ps(fix1, fiy1);
797 fiz1 = _mm_hadd_ps(fiz1, fix2);
798 fiy2 = _mm_hadd_ps(fiy2, fiz2);
799 fix3 = _mm_hadd_ps(fix3, fiy3);
800 fiz3 = _mm_hadd_ps(fiz3, fix4);
801 fiy4 = _mm_hadd_ps(fiy4, fiz4);
803 fix1 = _mm_hadd_ps(fix1, fiz1); /* fix2 fiz1 fiy1 fix1 */
804 fiy2 = _mm_hadd_ps(fiy2, fix3); /* fiy3 fix3 fiz2 fiy2 */
805 fiz3 = _mm_hadd_ps(fiz3, fiy4); /* fiz4 fiy4 fix4 fiz3 */
807 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
808 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
809 _mm_storeu_ps(fptr+8, _mm_add_ps(fiz3, _mm_loadu_ps(fptr+8)));
811 t5 = _mm_load_ss(fshiftptr+2);
812 t5 = _mm_loadh_pi(t5, (__m64 *)(fshiftptr));
814 t1 = _mm_shuffle_ps(fix1, fix1, _MM_SHUFFLE(1, 0, 2, 2));
815 t2 = _mm_shuffle_ps(fiy2, fiy2, _MM_SHUFFLE(3, 2, 1, 1));
816 t3 = _mm_shuffle_ps(fiz3, fiz3, _MM_SHUFFLE(2, 1, 0, 0));
817 t4 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(0, 0, 3, 3));
818 t4 = _mm_shuffle_ps(fiz3, t4, _MM_SHUFFLE(2, 0, 3, 3));
820 t1 = _mm_add_ps(t1, t2);
821 t3 = _mm_add_ps(t3, t4);
822 t1 = _mm_add_ps(t1, t3);
823 t5 = _mm_add_ps(t5, t1);
825 _mm_store_ss(fshiftptr+2, t5);
826 _mm_storeh_pi((__m64 *)(fshiftptr), t5);
831 static gmx_inline void
832 gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
834 pot1 = _mm_add_ps(pot1, _mm_movehl_ps(_mm_setzero_ps(), pot1));
835 pot1 = _mm_add_ps(pot1, _mm_shuffle_ps(pot1, pot1, _MM_SHUFFLE(0, 0, 0, 1)));
836 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
839 static gmx_inline void
840 gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
841 __m128 pot2, float * gmx_restrict ptrB)
844 t1 = _mm_movehl_ps(pot2, pot1);
845 t2 = _mm_movelh_ps(pot1, pot2);
846 t1 = _mm_add_ps(t1, t2);
847 t2 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 1, 1));
848 pot1 = _mm_add_ps(t1, t2);
849 pot2 = _mm_movehl_ps(t2, pot1);
850 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
851 _mm_store_ss(ptrB, _mm_add_ss(pot2, _mm_load_ss(ptrB)));
855 #endif /* _kernelutil_x86_sse4_1_single_h_ */