2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _kernelutil_x86_sse2_single_h_
36 #define _kernelutil_x86_sse2_single_h_
38 /* We require SSE2 now! */
42 #include "gromacs/simd/general_x86_sse2.h"
45 /* Normal sum of four xmm registers */
46 #define gmx_mm_sum4_ps(t0, t1, t2, t3) _mm_add_ps(_mm_add_ps(t0, t1), _mm_add_ps(t2, t3))
48 static gmx_inline __m128
49 gmx_mm_calc_rsq_ps(__m128 dx, __m128 dy, __m128 dz)
51 return _mm_add_ps( _mm_add_ps( _mm_mul_ps(dx, dx), _mm_mul_ps(dy, dy) ), _mm_mul_ps(dz, dz) );
55 gmx_mm_any_lt(__m128 a, __m128 b)
57 return _mm_movemask_ps(_mm_cmplt_ps(a, b));
60 /* Load a single value from 1-4 places, merge into xmm register */
63 gmx_mm_load_4real_swizzle_ps(const float * gmx_restrict ptrA,
64 const float * gmx_restrict ptrB,
65 const float * gmx_restrict ptrC,
66 const float * gmx_restrict ptrD)
70 t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA), _mm_load_ss(ptrC));
71 t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB), _mm_load_ss(ptrD));
72 return _mm_unpacklo_ps(t1, t2);
76 gmx_mm_store_4real_swizzle_ps(float * gmx_restrict ptrA,
77 float * gmx_restrict ptrB,
78 float * gmx_restrict ptrC,
79 float * gmx_restrict ptrD,
84 t3 = _mm_movehl_ps(_mm_setzero_ps(), xmm1);
85 t2 = _mm_shuffle_ps(xmm1, xmm1, _MM_SHUFFLE(1, 1, 1, 1));
86 t4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(1, 1, 1, 1));
87 _mm_store_ss(ptrA, xmm1);
88 _mm_store_ss(ptrB, t2);
89 _mm_store_ss(ptrC, t3);
90 _mm_store_ss(ptrD, t4);
93 /* Similar to store, but increments value in memory */
95 gmx_mm_increment_4real_swizzle_ps(float * gmx_restrict ptrA,
96 float * gmx_restrict ptrB,
97 float * gmx_restrict ptrC,
98 float * gmx_restrict ptrD, __m128 xmm1)
102 tmp = gmx_mm_load_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD);
103 tmp = _mm_add_ps(tmp, xmm1);
104 gmx_mm_store_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD, tmp);
109 gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
110 const float * gmx_restrict p2,
111 const float * gmx_restrict p3,
112 const float * gmx_restrict p4,
113 __m128 * gmx_restrict c6,
114 __m128 * gmx_restrict c12)
116 __m128 t1, t2, t3, t4;
118 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p1); /* - - c12a c6a */
119 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p2); /* - - c12b c6b */
120 t3 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p3); /* - - c12c c6c */
121 t4 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p4); /* - - c12d c6d */
122 t1 = _mm_unpacklo_ps(t1, t2);
123 t2 = _mm_unpacklo_ps(t3, t4);
124 *c6 = _mm_movelh_ps(t1, t2);
125 *c12 = _mm_movehl_ps(t2, t1);
128 /* Routines to load 1-4 rvec from 4 places.
129 * We mainly use these to load coordinates. The extra routines
130 * are very efficient for the water-water loops, since we e.g.
131 * know that a TIP4p water has 4 atoms, so we should load 12 floats+shuffle.
135 static gmx_inline void
136 gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
137 const float * gmx_restrict xyz,
138 __m128 * gmx_restrict x1,
139 __m128 * gmx_restrict y1,
140 __m128 * gmx_restrict z1)
142 __m128 t1, t2, t3, t4;
144 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
145 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz);
146 t3 = _mm_load_ss(xyz_shift+2);
147 t4 = _mm_load_ss(xyz+2);
148 t1 = _mm_add_ps(t1, t2);
149 t3 = _mm_add_ss(t3, t4);
151 *x1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0));
152 *y1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1));
153 *z1 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 0, 0));
157 static gmx_inline void
158 gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
159 const float * gmx_restrict xyz,
160 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
161 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
162 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
165 __m128 t1, t2, t3, t4, t5, t6;
167 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
168 tB = _mm_load_ss(xyz_shift+2);
170 t1 = _mm_loadu_ps(xyz);
171 t2 = _mm_loadu_ps(xyz+4);
172 t3 = _mm_load_ss(xyz+8);
174 tA = _mm_movelh_ps(tA, tB);
175 t4 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(0, 2, 1, 0));
176 t5 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(1, 0, 2, 1));
177 t6 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(2, 1, 0, 2));
179 t1 = _mm_add_ps(t1, t4);
180 t2 = _mm_add_ps(t2, t5);
181 t3 = _mm_add_ss(t3, t6);
183 *x1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0));
184 *y1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1));
185 *z1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2, 2, 2, 2));
186 *x2 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 3, 3));
187 *y2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(0, 0, 0, 0));
188 *z2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(1, 1, 1, 1));
189 *x3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2, 2, 2, 2));
190 *y3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3, 3, 3, 3));
191 *z3 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 0, 0));
195 static gmx_inline void
196 gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
197 const float * gmx_restrict xyz,
198 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
199 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
200 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
201 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
204 __m128 t1, t2, t3, t4, t5, t6;
206 tA = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
207 tB = _mm_load_ss(xyz_shift+2);
209 t1 = _mm_loadu_ps(xyz);
210 t2 = _mm_loadu_ps(xyz+4);
211 t3 = _mm_loadu_ps(xyz+8);
213 tA = _mm_movelh_ps(tA, tB);
214 t4 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(0, 2, 1, 0));
215 t5 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(1, 0, 2, 1));
216 t6 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(2, 1, 0, 2));
218 t1 = _mm_add_ps(t1, t4);
219 t2 = _mm_add_ps(t2, t5);
220 t3 = _mm_add_ps(t3, t6);
222 *x1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0));
223 *y1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1));
224 *z1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2, 2, 2, 2));
225 *x2 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 3, 3));
226 *y2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(0, 0, 0, 0));
227 *z2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(1, 1, 1, 1));
228 *x3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2, 2, 2, 2));
229 *y3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3, 3, 3, 3));
230 *z3 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 0, 0));
231 *x4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(1, 1, 1, 1));
232 *y4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2, 2, 2, 2));
233 *z4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3, 3, 3, 3));
238 gmx_mm_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
239 const float * gmx_restrict ptrB,
240 const float * gmx_restrict ptrC,
241 const float * gmx_restrict ptrD,
242 __m128 * gmx_restrict x1,
243 __m128 * gmx_restrict y1,
244 __m128 * gmx_restrict z1)
246 __m128 t1, t2, t3, t4, t5, t6, t7, t8;
247 t1 = _mm_castpd_ps(_mm_load_sd((const double *)ptrA));
248 t2 = _mm_castpd_ps(_mm_load_sd((const double *)ptrB));
249 t3 = _mm_castpd_ps(_mm_load_sd((const double *)ptrC));
250 t4 = _mm_castpd_ps(_mm_load_sd((const double *)ptrD));
251 t5 = _mm_load_ss(ptrA+2);
252 t6 = _mm_load_ss(ptrB+2);
253 t7 = _mm_load_ss(ptrC+2);
254 t8 = _mm_load_ss(ptrD+2);
255 t1 = _mm_unpacklo_ps(t1, t2);
256 t3 = _mm_unpacklo_ps(t3, t4);
257 *x1 = _mm_movelh_ps(t1, t3);
258 *y1 = _mm_movehl_ps(t3, t1);
259 t5 = _mm_unpacklo_ps(t5, t6);
260 t7 = _mm_unpacklo_ps(t7, t8);
261 *z1 = _mm_movelh_ps(t5, t7);
266 gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
267 const float * gmx_restrict ptrB,
268 const float * gmx_restrict ptrC,
269 const float * gmx_restrict ptrD,
270 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
271 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
272 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
274 __m128 t1, t2, t3, t4;
275 t1 = _mm_loadu_ps(ptrA);
276 t2 = _mm_loadu_ps(ptrB);
277 t3 = _mm_loadu_ps(ptrC);
278 t4 = _mm_loadu_ps(ptrD);
279 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
284 t1 = _mm_loadu_ps(ptrA+4);
285 t2 = _mm_loadu_ps(ptrB+4);
286 t3 = _mm_loadu_ps(ptrC+4);
287 t4 = _mm_loadu_ps(ptrD+4);
288 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
293 t1 = _mm_load_ss(ptrA+8);
294 t2 = _mm_load_ss(ptrB+8);
295 t3 = _mm_load_ss(ptrC+8);
296 t4 = _mm_load_ss(ptrD+8);
297 t1 = _mm_unpacklo_ps(t1, t3);
298 t3 = _mm_unpacklo_ps(t2, t4);
299 *z3 = _mm_unpacklo_ps(t1, t3);
304 gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
305 const float * gmx_restrict ptrB,
306 const float * gmx_restrict ptrC,
307 const float * gmx_restrict ptrD,
308 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
309 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
310 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
311 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
313 __m128 t1, t2, t3, t4;
314 t1 = _mm_loadu_ps(ptrA);
315 t2 = _mm_loadu_ps(ptrB);
316 t3 = _mm_loadu_ps(ptrC);
317 t4 = _mm_loadu_ps(ptrD);
318 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
323 t1 = _mm_loadu_ps(ptrA+4);
324 t2 = _mm_loadu_ps(ptrB+4);
325 t3 = _mm_loadu_ps(ptrC+4);
326 t4 = _mm_loadu_ps(ptrD+4);
327 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
332 t1 = _mm_loadu_ps(ptrA+8);
333 t2 = _mm_loadu_ps(ptrB+8);
334 t3 = _mm_loadu_ps(ptrC+8);
335 t4 = _mm_loadu_ps(ptrD+8);
336 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
345 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA,
346 float * gmx_restrict ptrB,
347 float * gmx_restrict ptrC,
348 float * gmx_restrict ptrD,
349 __m128 x1, __m128 y1, __m128 z1)
351 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
352 t5 = _mm_unpacklo_ps(y1, z1);
353 t6 = _mm_unpackhi_ps(y1, z1);
354 t7 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(1, 0, 0, 0));
355 t8 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(3, 2, 0, 1));
356 t9 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(1, 0, 0, 2));
357 t10 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(3, 2, 0, 3));
358 t1 = _mm_load_ss(ptrA);
359 t1 = _mm_loadh_pi(t1, (__m64 *)(ptrA+1));
360 t1 = _mm_sub_ps(t1, t7);
361 _mm_store_ss(ptrA, t1);
362 _mm_storeh_pi((__m64 *)(ptrA+1), t1);
363 t2 = _mm_load_ss(ptrB);
364 t2 = _mm_loadh_pi(t2, (__m64 *)(ptrB+1));
365 t2 = _mm_sub_ps(t2, t8);
366 _mm_store_ss(ptrB, t2);
367 _mm_storeh_pi((__m64 *)(ptrB+1), t2);
368 t3 = _mm_load_ss(ptrC);
369 t3 = _mm_loadh_pi(t3, (__m64 *)(ptrC+1));
370 t3 = _mm_sub_ps(t3, t9);
371 _mm_store_ss(ptrC, t3);
372 _mm_storeh_pi((__m64 *)(ptrC+1), t3);
373 t4 = _mm_load_ss(ptrD);
374 t4 = _mm_loadh_pi(t4, (__m64 *)(ptrD+1));
375 t4 = _mm_sub_ps(t4, t10);
376 _mm_store_ss(ptrD, t4);
377 _mm_storeh_pi((__m64 *)(ptrD+1), t4);
382 #if defined (_MSC_VER) && defined(_M_IX86)
383 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
384 #define gmx_mm_decrement_3rvec_4ptr_swizzle_ps(ptrA, ptrB, ptrC, ptrD, \
385 _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3) \
387 __m128 _t1, _t2, _t3, _t4, _t5, _t6, _t7, _t8, _t9, _t10; \
388 __m128 _t11, _t12, _t13, _t14, _t15, _t16, _t17, _t18, _t19; \
389 __m128 _t20, _t21, _t22, _t23, _t24, _t25; \
390 _t13 = _mm_unpackhi_ps(_x1, _y1); \
391 _x1 = _mm_unpacklo_ps(_x1, _y1); \
392 _t14 = _mm_unpackhi_ps(_z1, _x2); \
393 _z1 = _mm_unpacklo_ps(_z1, _x2); \
394 _t15 = _mm_unpackhi_ps(_y2, _z2); \
395 _y2 = _mm_unpacklo_ps(_y2, _z2); \
396 _t16 = _mm_unpackhi_ps(_x3, _y3); \
397 _x3 = _mm_unpacklo_ps(_x3, _y3); \
398 _t17 = _mm_shuffle_ps(_z3, _z3, _MM_SHUFFLE(0, 0, 0, 1)); \
399 _t18 = _mm_movehl_ps(_z3, _z3); \
400 _t19 = _mm_shuffle_ps(_t18, _t18, _MM_SHUFFLE(0, 0, 0, 1)); \
401 _t20 = _mm_movelh_ps(_x1, _z1); \
402 _t21 = _mm_movehl_ps(_z1, _x1); \
403 _t22 = _mm_movelh_ps(_t13, _t14); \
404 _t14 = _mm_movehl_ps(_t14, _t13); \
405 _t23 = _mm_movelh_ps(_y2, _x3); \
406 _t24 = _mm_movehl_ps(_x3, _y2); \
407 _t25 = _mm_movelh_ps(_t15, _t16); \
408 _t16 = _mm_movehl_ps(_t16, _t15); \
409 _t1 = _mm_loadu_ps(ptrA); \
410 _t2 = _mm_loadu_ps(ptrA+4); \
411 _t3 = _mm_load_ss(ptrA+8); \
412 _t1 = _mm_sub_ps(_t1, _t20); \
413 _t2 = _mm_sub_ps(_t2, _t23); \
414 _t3 = _mm_sub_ss(_t3, _z3); \
415 _mm_storeu_ps(ptrA, _t1); \
416 _mm_storeu_ps(ptrA+4, _t2); \
417 _mm_store_ss(ptrA+8, _t3); \
418 _t4 = _mm_loadu_ps(ptrB); \
419 _t5 = _mm_loadu_ps(ptrB+4); \
420 _t6 = _mm_load_ss(ptrB+8); \
421 _t4 = _mm_sub_ps(_t4, _t21); \
422 _t5 = _mm_sub_ps(_t5, _t24); \
423 _t6 = _mm_sub_ss(_t6, _t17); \
424 _mm_storeu_ps(ptrB, _t4); \
425 _mm_storeu_ps(ptrB+4, _t5); \
426 _mm_store_ss(ptrB+8, _t6); \
427 _t7 = _mm_loadu_ps(ptrC); \
428 _t8 = _mm_loadu_ps(ptrC+4); \
429 _t9 = _mm_load_ss(ptrC+8); \
430 _t7 = _mm_sub_ps(_t7, _t22); \
431 _t8 = _mm_sub_ps(_t8, _t25); \
432 _t9 = _mm_sub_ss(_t9, _t18); \
433 _mm_storeu_ps(ptrC, _t7); \
434 _mm_storeu_ps(ptrC+4, _t8); \
435 _mm_store_ss(ptrC+8, _t9); \
436 _t10 = _mm_loadu_ps(ptrD); \
437 _t11 = _mm_loadu_ps(ptrD+4); \
438 _t12 = _mm_load_ss(ptrD+8); \
439 _t10 = _mm_sub_ps(_t10, _t14); \
440 _t11 = _mm_sub_ps(_t11, _t16); \
441 _t12 = _mm_sub_ss(_t12, _t19); \
442 _mm_storeu_ps(ptrD, _t10); \
443 _mm_storeu_ps(ptrD+4, _t11); \
444 _mm_store_ss(ptrD+8, _t12); \
447 /* Real function for sane compilers */
449 gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
450 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
451 __m128 x1, __m128 y1, __m128 z1,
452 __m128 x2, __m128 y2, __m128 z2,
453 __m128 x3, __m128 y3, __m128 z3)
455 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
456 __m128 t11, t12, t13, t14, t15, t16, t17, t18, t19;
457 __m128 t20, t21, t22, t23, t24, t25;
459 t13 = _mm_unpackhi_ps(x1, y1);
460 x1 = _mm_unpacklo_ps(x1, y1);
461 t14 = _mm_unpackhi_ps(z1, x2);
462 z1 = _mm_unpacklo_ps(z1, x2);
463 t15 = _mm_unpackhi_ps(y2, z2);
464 y2 = _mm_unpacklo_ps(y2, z2);
465 t16 = _mm_unpackhi_ps(x3, y3);
466 x3 = _mm_unpacklo_ps(x3, y3);
467 t17 = _mm_shuffle_ps(z3, z3, _MM_SHUFFLE(0, 0, 0, 1));
468 t18 = _mm_movehl_ps(z3, z3);
469 t19 = _mm_shuffle_ps(t18, t18, _MM_SHUFFLE(0, 0, 0, 1));
470 t20 = _mm_movelh_ps(x1, z1);
471 t21 = _mm_movehl_ps(z1, x1);
472 t22 = _mm_movelh_ps(t13, t14);
473 t14 = _mm_movehl_ps(t14, t13);
474 t23 = _mm_movelh_ps(y2, x3);
475 t24 = _mm_movehl_ps(x3, y2);
476 t25 = _mm_movelh_ps(t15, t16);
477 t16 = _mm_movehl_ps(t16, t15);
478 t1 = _mm_loadu_ps(ptrA);
479 t2 = _mm_loadu_ps(ptrA+4);
480 t3 = _mm_load_ss(ptrA+8);
481 t1 = _mm_sub_ps(t1, t20);
482 t2 = _mm_sub_ps(t2, t23);
483 t3 = _mm_sub_ss(t3, z3);
484 _mm_storeu_ps(ptrA, t1);
485 _mm_storeu_ps(ptrA+4, t2);
486 _mm_store_ss(ptrA+8, t3);
487 t4 = _mm_loadu_ps(ptrB);
488 t5 = _mm_loadu_ps(ptrB+4);
489 t6 = _mm_load_ss(ptrB+8);
490 t4 = _mm_sub_ps(t4, t21);
491 t5 = _mm_sub_ps(t5, t24);
492 t6 = _mm_sub_ss(t6, t17);
493 _mm_storeu_ps(ptrB, t4);
494 _mm_storeu_ps(ptrB+4, t5);
495 _mm_store_ss(ptrB+8, t6);
496 t7 = _mm_loadu_ps(ptrC);
497 t8 = _mm_loadu_ps(ptrC+4);
498 t9 = _mm_load_ss(ptrC+8);
499 t7 = _mm_sub_ps(t7, t22);
500 t8 = _mm_sub_ps(t8, t25);
501 t9 = _mm_sub_ss(t9, t18);
502 _mm_storeu_ps(ptrC, t7);
503 _mm_storeu_ps(ptrC+4, t8);
504 _mm_store_ss(ptrC+8, t9);
505 t10 = _mm_loadu_ps(ptrD);
506 t11 = _mm_loadu_ps(ptrD+4);
507 t12 = _mm_load_ss(ptrD+8);
508 t10 = _mm_sub_ps(t10, t14);
509 t11 = _mm_sub_ps(t11, t16);
510 t12 = _mm_sub_ss(t12, t19);
511 _mm_storeu_ps(ptrD, t10);
512 _mm_storeu_ps(ptrD+4, t11);
513 _mm_store_ss(ptrD+8, t12);
518 #if defined (_MSC_VER) && defined(_M_IX86)
519 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
520 #define gmx_mm_decrement_4rvec_4ptr_swizzle_ps(ptrA, ptrB, ptrC, ptrD, \
521 _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3, _x4, _y4, _z4) \
523 __m128 _t1, _t2, _t3, _t4, _t5, _t6, _t7, _t8, _t9, _t10, _t11; \
524 __m128 _t12, _t13, _t14, _t15, _t16, _t17, _t18, _t19, _t20, _t21, _t22; \
526 _t13 = _mm_unpackhi_ps(_x1, _y1); \
527 _x1 = _mm_unpacklo_ps(_x1, _y1); \
528 _t14 = _mm_unpackhi_ps(_z1, _x2); \
529 _z1 = _mm_unpacklo_ps(_z1, _x2); \
530 _t15 = _mm_unpackhi_ps(_y2, _z2); \
531 _y2 = _mm_unpacklo_ps(_y2, _z2); \
532 _t16 = _mm_unpackhi_ps(_x3, _y3); \
533 _x3 = _mm_unpacklo_ps(_x3, _y3); \
534 _t17 = _mm_unpackhi_ps(_z3, _x4); \
535 _z3 = _mm_unpacklo_ps(_z3, _x4); \
536 _t18 = _mm_unpackhi_ps(_y4, _z4); \
537 _y4 = _mm_unpacklo_ps(_y4, _z4); \
538 _t19 = _mm_movelh_ps(_x1, _z1); \
539 _z1 = _mm_movehl_ps(_z1, _x1); \
540 _t20 = _mm_movelh_ps(_t13, _t14); \
541 _t14 = _mm_movehl_ps(_t14, _t13); \
542 _t21 = _mm_movelh_ps(_y2, _x3); \
543 _x3 = _mm_movehl_ps(_x3, _y2); \
544 _t22 = _mm_movelh_ps(_t15, _t16); \
545 _t16 = _mm_movehl_ps(_t16, _t15); \
546 _t23 = _mm_movelh_ps(_z3, _y4); \
547 _y4 = _mm_movehl_ps(_y4, _z3); \
548 _t24 = _mm_movelh_ps(_t17, _t18); \
549 _t18 = _mm_movehl_ps(_t18, _t17); \
550 _t1 = _mm_loadu_ps(ptrA); \
551 _t2 = _mm_loadu_ps(ptrA+4); \
552 _t3 = _mm_loadu_ps(ptrA+8); \
553 _t1 = _mm_sub_ps(_t1, _t19); \
554 _t2 = _mm_sub_ps(_t2, _t21); \
555 _t3 = _mm_sub_ps(_t3, _t23); \
556 _mm_storeu_ps(ptrA, _t1); \
557 _mm_storeu_ps(ptrA+4, _t2); \
558 _mm_storeu_ps(ptrA+8, _t3); \
559 _t4 = _mm_loadu_ps(ptrB); \
560 _t5 = _mm_loadu_ps(ptrB+4); \
561 _t6 = _mm_loadu_ps(ptrB+8); \
562 _t4 = _mm_sub_ps(_t4, _z1); \
563 _t5 = _mm_sub_ps(_t5, _x3); \
564 _t6 = _mm_sub_ps(_t6, _y4); \
565 _mm_storeu_ps(ptrB, _t4); \
566 _mm_storeu_ps(ptrB+4, _t5); \
567 _mm_storeu_ps(ptrB+8, _t6); \
568 _t7 = _mm_loadu_ps(ptrC); \
569 _t8 = _mm_loadu_ps(ptrC+4); \
570 _t9 = _mm_loadu_ps(ptrC+8); \
571 _t7 = _mm_sub_ps(_t7, _t20); \
572 _t8 = _mm_sub_ps(_t8, _t22); \
573 _t9 = _mm_sub_ps(_t9, _t24); \
574 _mm_storeu_ps(ptrC, _t7); \
575 _mm_storeu_ps(ptrC+4, _t8); \
576 _mm_storeu_ps(ptrC+8, _t9); \
577 _t10 = _mm_loadu_ps(ptrD); \
578 _t11 = _mm_loadu_ps(ptrD+4); \
579 _t12 = _mm_loadu_ps(ptrD+8); \
580 _t10 = _mm_sub_ps(_t10, _t14); \
581 _t11 = _mm_sub_ps(_t11, _t16); \
582 _t12 = _mm_sub_ps(_t12, _t18); \
583 _mm_storeu_ps(ptrD, _t10); \
584 _mm_storeu_ps(ptrD+4, _t11); \
585 _mm_storeu_ps(ptrD+8, _t12); \
588 /* Real function for sane compilers */
590 gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
591 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
592 __m128 x1, __m128 y1, __m128 z1,
593 __m128 x2, __m128 y2, __m128 z2,
594 __m128 x3, __m128 y3, __m128 z3,
595 __m128 x4, __m128 y4, __m128 z4)
597 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11;
598 __m128 t12, t13, t14, t15, t16, t17, t18, t19, t20, t21, t22;
600 t13 = _mm_unpackhi_ps(x1, y1);
601 x1 = _mm_unpacklo_ps(x1, y1);
602 t14 = _mm_unpackhi_ps(z1, x2);
603 z1 = _mm_unpacklo_ps(z1, x2);
604 t15 = _mm_unpackhi_ps(y2, z2);
605 y2 = _mm_unpacklo_ps(y2, z2);
606 t16 = _mm_unpackhi_ps(x3, y3);
607 x3 = _mm_unpacklo_ps(x3, y3);
608 t17 = _mm_unpackhi_ps(z3, x4);
609 z3 = _mm_unpacklo_ps(z3, x4);
610 t18 = _mm_unpackhi_ps(y4, z4);
611 y4 = _mm_unpacklo_ps(y4, z4);
612 t19 = _mm_movelh_ps(x1, z1);
613 z1 = _mm_movehl_ps(z1, x1);
614 t20 = _mm_movelh_ps(t13, t14);
615 t14 = _mm_movehl_ps(t14, t13);
616 t21 = _mm_movelh_ps(y2, x3);
617 x3 = _mm_movehl_ps(x3, y2);
618 t22 = _mm_movelh_ps(t15, t16);
619 t16 = _mm_movehl_ps(t16, t15);
620 t23 = _mm_movelh_ps(z3, y4);
621 y4 = _mm_movehl_ps(y4, z3);
622 t24 = _mm_movelh_ps(t17, t18);
623 t18 = _mm_movehl_ps(t18, t17);
624 t1 = _mm_loadu_ps(ptrA);
625 t2 = _mm_loadu_ps(ptrA+4);
626 t3 = _mm_loadu_ps(ptrA+8);
627 t1 = _mm_sub_ps(t1, t19);
628 t2 = _mm_sub_ps(t2, t21);
629 t3 = _mm_sub_ps(t3, t23);
630 _mm_storeu_ps(ptrA, t1);
631 _mm_storeu_ps(ptrA+4, t2);
632 _mm_storeu_ps(ptrA+8, t3);
633 t4 = _mm_loadu_ps(ptrB);
634 t5 = _mm_loadu_ps(ptrB+4);
635 t6 = _mm_loadu_ps(ptrB+8);
636 t4 = _mm_sub_ps(t4, z1);
637 t5 = _mm_sub_ps(t5, x3);
638 t6 = _mm_sub_ps(t6, y4);
639 _mm_storeu_ps(ptrB, t4);
640 _mm_storeu_ps(ptrB+4, t5);
641 _mm_storeu_ps(ptrB+8, t6);
642 t7 = _mm_loadu_ps(ptrC);
643 t8 = _mm_loadu_ps(ptrC+4);
644 t9 = _mm_loadu_ps(ptrC+8);
645 t7 = _mm_sub_ps(t7, t20);
646 t8 = _mm_sub_ps(t8, t22);
647 t9 = _mm_sub_ps(t9, t24);
648 _mm_storeu_ps(ptrC, t7);
649 _mm_storeu_ps(ptrC+4, t8);
650 _mm_storeu_ps(ptrC+8, t9);
651 t10 = _mm_loadu_ps(ptrD);
652 t11 = _mm_loadu_ps(ptrD+4);
653 t12 = _mm_loadu_ps(ptrD+8);
654 t10 = _mm_sub_ps(t10, t14);
655 t11 = _mm_sub_ps(t11, t16);
656 t12 = _mm_sub_ps(t12, t18);
657 _mm_storeu_ps(ptrD, t10);
658 _mm_storeu_ps(ptrD+4, t11);
659 _mm_storeu_ps(ptrD+8, t12);
664 static gmx_inline void
665 gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
666 float * gmx_restrict fptr,
667 float * gmx_restrict fshiftptr)
673 _MM_TRANSPOSE4_PS(fix1, t1, fiy1, fiz1);
674 fix1 = _mm_add_ps(_mm_add_ps(fix1, t1), _mm_add_ps(fiy1, fiz1));
676 t2 = _mm_load_ss(fptr);
677 t2 = _mm_loadh_pi(t2, (__m64 *)(fptr+1));
678 t3 = _mm_load_ss(fshiftptr);
679 t3 = _mm_loadh_pi(t3, (__m64 *)(fshiftptr+1));
681 t2 = _mm_add_ps(t2, fix1);
682 t3 = _mm_add_ps(t3, fix1);
684 _mm_store_ss(fptr, t2);
685 _mm_storeh_pi((__m64 *)(fptr+1), t2);
686 _mm_store_ss(fshiftptr, t3);
687 _mm_storeh_pi((__m64 *)(fshiftptr+1), t3);
690 #if defined (_MSC_VER) && defined(_M_IX86)
691 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
692 #define gmx_mm_update_iforce_3atom_swizzle_ps(fix1, fiy1, fiz1, fix2, fiy2, fiz2, fix3, fiy3, fiz3, \
695 __m128 _t1, _t2, _t3, _t4; \
697 _MM_TRANSPOSE4_PS(fix1, fiy1, fiz1, fix2); \
698 _MM_TRANSPOSE4_PS(fiy2, fiz2, fix3, fiy3); \
699 _t2 = _mm_movehl_ps(_mm_setzero_ps(), fiz3); \
700 _t1 = _mm_shuffle_ps(fiz3, fiz3, _MM_SHUFFLE(0, 0, 0, 1)); \
701 _t3 = _mm_shuffle_ps(_t2, _t2, _MM_SHUFFLE(0, 0, 0, 1)); \
702 fix1 = _mm_add_ps(_mm_add_ps(fix1, fiy1), _mm_add_ps(fiz1, fix2)); \
703 fiy2 = _mm_add_ps(_mm_add_ps(fiy2, fiz2), _mm_add_ps(fix3, fiy3)); \
704 fiz3 = _mm_add_ss(_mm_add_ps(fiz3, _t1), _mm_add_ps(_t2, _t3)); \
705 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) )); \
706 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4))); \
707 _mm_store_ss (fptr+8, _mm_add_ss(fiz3, _mm_load_ss(fptr+8) )); \
708 _t4 = _mm_load_ss(fshiftptr+2); \
709 _t4 = _mm_loadh_pi(_t4, (__m64 *)(fshiftptr)); \
710 _t1 = _mm_shuffle_ps(fiz3, fix1, _MM_SHUFFLE(1, 0, 0, 0)); \
711 _t2 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(3, 2, 2, 2)); \
712 _t3 = _mm_shuffle_ps(fiy2, fix1, _MM_SHUFFLE(3, 3, 0, 1)); \
713 _t3 = _mm_shuffle_ps(_t3, _t3, _MM_SHUFFLE(1, 2, 0, 0)); \
714 _t1 = _mm_add_ps(_t1, _t2); \
715 _t3 = _mm_add_ps(_t3, _t4); \
716 _t1 = _mm_add_ps(_t1, _t3); \
717 _mm_store_ss(fshiftptr+2, _t1); \
718 _mm_storeh_pi((__m64 *)(fshiftptr), _t1); \
721 /* Real function for sane compilers */
722 static gmx_inline void
723 gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
724 __m128 fix2, __m128 fiy2, __m128 fiz2,
725 __m128 fix3, __m128 fiy3, __m128 fiz3,
726 float * gmx_restrict fptr,
727 float * gmx_restrict fshiftptr)
729 __m128 t1, t2, t3, t4;
732 _MM_TRANSPOSE4_PS(fix1, fiy1, fiz1, fix2);
733 _MM_TRANSPOSE4_PS(fiy2, fiz2, fix3, fiy3);
734 t2 = _mm_movehl_ps(_mm_setzero_ps(), fiz3);
735 t1 = _mm_shuffle_ps(fiz3, fiz3, _MM_SHUFFLE(0, 0, 0, 1));
736 t3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(0, 0, 0, 1));
738 fix1 = _mm_add_ps(_mm_add_ps(fix1, fiy1), _mm_add_ps(fiz1, fix2));
739 fiy2 = _mm_add_ps(_mm_add_ps(fiy2, fiz2), _mm_add_ps(fix3, fiy3));
740 fiz3 = _mm_add_ss(_mm_add_ps(fiz3, t1), _mm_add_ps(t2, t3));
742 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
743 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
744 _mm_store_ss (fptr+8, _mm_add_ss(fiz3, _mm_load_ss(fptr+8) ));
746 t4 = _mm_load_ss(fshiftptr+2);
747 t4 = _mm_loadh_pi(t4, (__m64 *)(fshiftptr));
749 t1 = _mm_shuffle_ps(fiz3, fix1, _MM_SHUFFLE(1, 0, 0, 0)); /* fiy1 fix1 - fiz3 */
750 t2 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(3, 2, 2, 2)); /* fiy3 fix3 - fiz1 */
751 t3 = _mm_shuffle_ps(fiy2, fix1, _MM_SHUFFLE(3, 3, 0, 1)); /* fix2 fix2 fiy2 fiz2 */
752 t3 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(1, 2, 0, 0)); /* fiy2 fix2 - fiz2 */
754 t1 = _mm_add_ps(t1, t2);
755 t3 = _mm_add_ps(t3, t4);
756 t1 = _mm_add_ps(t1, t3); /* y x - z */
758 _mm_store_ss(fshiftptr+2, t1);
759 _mm_storeh_pi((__m64 *)(fshiftptr), t1);
763 #if defined (_MSC_VER) && defined(_M_IX86)
764 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
765 #define gmx_mm_update_iforce_4atom_swizzle_ps(fix1, fiy1, fiz1, fix2, fiy2, fiz2, fix3, fiy3, fiz3, fix4, fiy4, fiz4, \
768 __m128 _t1, _t2, _t3, _t4, _t5; \
769 _MM_TRANSPOSE4_PS(fix1, fiy1, fiz1, fix2); \
770 _MM_TRANSPOSE4_PS(fiy2, fiz2, fix3, fiy3); \
771 _MM_TRANSPOSE4_PS(fiz3, fix4, fiy4, fiz4); \
772 fix1 = _mm_add_ps(_mm_add_ps(fix1, fiy1), _mm_add_ps(fiz1, fix2)); \
773 fiy2 = _mm_add_ps(_mm_add_ps(fiy2, fiz2), _mm_add_ps(fix3, fiy3)); \
774 fiz3 = _mm_add_ps(_mm_add_ps(fiz3, fix4), _mm_add_ps(fiy4, fiz4)); \
775 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) )); \
776 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4))); \
777 _mm_storeu_ps(fptr+8, _mm_add_ps(fiz3, _mm_loadu_ps(fptr+8))); \
778 _t5 = _mm_load_ss(fshiftptr+2); \
779 _t5 = _mm_loadh_pi(_t5, (__m64 *)(fshiftptr)); \
780 _t1 = _mm_shuffle_ps(fix1, fix1, _MM_SHUFFLE(1, 0, 2, 2)); \
781 _t2 = _mm_shuffle_ps(fiy2, fiy2, _MM_SHUFFLE(3, 2, 1, 1)); \
782 _t3 = _mm_shuffle_ps(fiz3, fiz3, _MM_SHUFFLE(2, 1, 0, 0)); \
783 _t4 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(0, 0, 3, 3)); \
784 _t4 = _mm_shuffle_ps(fiz3, _t4, _MM_SHUFFLE(2, 0, 3, 3)); \
785 _t1 = _mm_add_ps(_t1, _t2); \
786 _t3 = _mm_add_ps(_t3, _t4); \
787 _t1 = _mm_add_ps(_t1, _t3); \
788 _t5 = _mm_add_ps(_t5, _t1); \
789 _mm_store_ss(fshiftptr+2, _t5); \
790 _mm_storeh_pi((__m64 *)(fshiftptr), _t5); \
793 /* Real function for sane compilers */
794 static gmx_inline void
795 gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
796 __m128 fix2, __m128 fiy2, __m128 fiz2,
797 __m128 fix3, __m128 fiy3, __m128 fiz3,
798 __m128 fix4, __m128 fiy4, __m128 fiz4,
799 float * gmx_restrict fptr,
800 float * gmx_restrict fshiftptr)
802 __m128 t1, t2, t3, t4, t5;
805 _MM_TRANSPOSE4_PS(fix1, fiy1, fiz1, fix2);
806 _MM_TRANSPOSE4_PS(fiy2, fiz2, fix3, fiy3);
807 _MM_TRANSPOSE4_PS(fiz3, fix4, fiy4, fiz4);
809 fix1 = _mm_add_ps(_mm_add_ps(fix1, fiy1), _mm_add_ps(fiz1, fix2));
810 fiy2 = _mm_add_ps(_mm_add_ps(fiy2, fiz2), _mm_add_ps(fix3, fiy3));
811 fiz3 = _mm_add_ps(_mm_add_ps(fiz3, fix4), _mm_add_ps(fiy4, fiz4));
813 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
814 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
815 _mm_storeu_ps(fptr+8, _mm_add_ps(fiz3, _mm_loadu_ps(fptr+8)));
817 t5 = _mm_load_ss(fshiftptr+2);
818 t5 = _mm_loadh_pi(t5, (__m64 *)(fshiftptr));
820 t1 = _mm_shuffle_ps(fix1, fix1, _MM_SHUFFLE(1, 0, 2, 2));
821 t2 = _mm_shuffle_ps(fiy2, fiy2, _MM_SHUFFLE(3, 2, 1, 1));
822 t3 = _mm_shuffle_ps(fiz3, fiz3, _MM_SHUFFLE(2, 1, 0, 0));
823 t4 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(0, 0, 3, 3));
824 t4 = _mm_shuffle_ps(fiz3, t4, _MM_SHUFFLE(2, 0, 3, 3));
826 t1 = _mm_add_ps(t1, t2);
827 t3 = _mm_add_ps(t3, t4);
828 t1 = _mm_add_ps(t1, t3);
829 t5 = _mm_add_ps(t5, t1);
831 _mm_store_ss(fshiftptr+2, t5);
832 _mm_storeh_pi((__m64 *)(fshiftptr), t5);
838 gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
840 pot1 = _mm_add_ps(pot1, _mm_movehl_ps(_mm_setzero_ps(), pot1));
841 pot1 = _mm_add_ps(pot1, _mm_shuffle_ps(pot1, pot1, _MM_SHUFFLE(0, 0, 0, 1)));
842 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
846 gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
847 __m128 pot2, float * gmx_restrict ptrB)
850 t1 = _mm_movehl_ps(pot2, pot1);
851 t2 = _mm_movelh_ps(pot1, pot2);
852 t1 = _mm_add_ps(t1, t2);
853 t2 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 1, 1));
854 pot1 = _mm_add_ps(t1, t2);
855 pot2 = _mm_movehl_ps(t2, pot1);
856 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
857 _mm_store_ss(ptrB, _mm_add_ss(pot2, _mm_load_ss(ptrB)));
861 #endif /* _kernelutil_x86_sse2_single_h_ */