2 * This source code is part of
6 * Copyright (c) 2011-2012, The GROMACS Development Team
8 * Gromacs is a library for molecular simulation and trajectory analysis,
9 * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
10 * a full list of developers and information, check out http://www.gromacs.org
12 * This program is free software; you can redistribute it and/or modify it under
13 * the terms of the GNU Lesser General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option) any
16 * As a special exception, you may use this file as part of a free software
17 * library without restriction. Specifically, if other files instantiate
18 * templates or use macros or inline functions from this file, or you compile
19 * this file and link it with other files to produce an executable, this
20 * file does not by itself cause the resulting executable to be covered by
21 * the GNU Lesser General Public License.
23 * In plain-speak: do not worry about classes/macros/templates either - only
24 * changes to the library have to be LGPL, not an application linking with it.
26 * To help fund GROMACS development, we humbly ask that you cite
27 * the papers people have written on it - you can find them on the website!
29 #ifndef _kernelutil_x86_sse2_single_h_
30 #define _kernelutil_x86_sse2_single_h_
32 /* We require SSE2 now! */
36 #include "gmx_x86_sse2.h"
39 /* Normal sum of four xmm registers */
40 #define gmx_mm_sum4_ps(t0,t1,t2,t3) _mm_add_ps(_mm_add_ps(t0,t1),_mm_add_ps(t2,t3))
42 static gmx_inline __m128
43 gmx_mm_calc_rsq_ps(__m128 dx, __m128 dy, __m128 dz)
45 return _mm_add_ps( _mm_add_ps( _mm_mul_ps(dx,dx), _mm_mul_ps(dy,dy) ), _mm_mul_ps(dz,dz) );
49 gmx_mm_any_lt(__m128 a, __m128 b)
51 return _mm_movemask_ps(_mm_cmplt_ps(a,b));
54 /* Load a single value from 1-4 places, merge into xmm register */
57 gmx_mm_load_4real_swizzle_ps(const float * gmx_restrict ptrA,
58 const float * gmx_restrict ptrB,
59 const float * gmx_restrict ptrC,
60 const float * gmx_restrict ptrD)
64 t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA),_mm_load_ss(ptrC));
65 t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB),_mm_load_ss(ptrD));
66 return _mm_unpacklo_ps(t1,t2);
70 gmx_mm_store_4real_swizzle_ps(float * gmx_restrict ptrA,
71 float * gmx_restrict ptrB,
72 float * gmx_restrict ptrC,
73 float * gmx_restrict ptrD,
78 t3 = _mm_movehl_ps(_mm_setzero_ps(),xmm1);
79 t2 = _mm_shuffle_ps(xmm1,xmm1,_MM_SHUFFLE(1,1,1,1));
80 t4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
81 _mm_store_ss(ptrA,xmm1);
82 _mm_store_ss(ptrB,t2);
83 _mm_store_ss(ptrC,t3);
84 _mm_store_ss(ptrD,t4);
87 /* Similar to store, but increments value in memory */
89 gmx_mm_increment_4real_swizzle_ps(float * gmx_restrict ptrA,
90 float * gmx_restrict ptrB,
91 float * gmx_restrict ptrC,
92 float * gmx_restrict ptrD, __m128 xmm1)
96 tmp = gmx_mm_load_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD);
97 tmp = _mm_add_ps(tmp,xmm1);
98 gmx_mm_store_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD,tmp);
103 gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
104 const float * gmx_restrict p2,
105 const float * gmx_restrict p3,
106 const float * gmx_restrict p4,
107 __m128 * gmx_restrict c6,
108 __m128 * gmx_restrict c12)
112 t1 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p1); /* - - c12a c6a */
113 t2 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p2); /* - - c12b c6b */
114 t3 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p3); /* - - c12c c6c */
115 t4 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p4); /* - - c12d c6d */
116 t1 = _mm_unpacklo_ps(t1,t2);
117 t2 = _mm_unpacklo_ps(t3,t4);
118 *c6 = _mm_movelh_ps(t1,t2);
119 *c12 = _mm_movehl_ps(t2,t1);
122 /* Routines to load 1-4 rvec from 4 places.
123 * We mainly use these to load coordinates. The extra routines
124 * are very efficient for the water-water loops, since we e.g.
125 * know that a TIP4p water has 4 atoms, so we should load 12 floats+shuffle.
129 static gmx_inline void
130 gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
131 const float * gmx_restrict xyz,
132 __m128 * gmx_restrict x1,
133 __m128 * gmx_restrict y1,
134 __m128 * gmx_restrict z1)
138 t1 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
139 t2 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz);
140 t3 = _mm_load_ss(xyz_shift+2);
141 t4 = _mm_load_ss(xyz+2);
142 t1 = _mm_add_ps(t1,t2);
143 t3 = _mm_add_ss(t3,t4);
145 *x1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
146 *y1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
147 *z1 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
151 static gmx_inline void
152 gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
153 const float * gmx_restrict xyz,
154 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
155 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
156 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
159 __m128 t1,t2,t3,t4,t5,t6;
161 tA = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
162 tB = _mm_load_ss(xyz_shift+2);
164 t1 = _mm_loadu_ps(xyz);
165 t2 = _mm_loadu_ps(xyz+4);
166 t3 = _mm_load_ss(xyz+8);
168 tA = _mm_movelh_ps(tA,tB);
169 t4 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
170 t5 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
171 t6 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
173 t1 = _mm_add_ps(t1,t4);
174 t2 = _mm_add_ps(t2,t5);
175 t3 = _mm_add_ss(t3,t6);
177 *x1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
178 *y1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
179 *z1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
180 *x2 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
181 *y2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
182 *z2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
183 *x3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
184 *y3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
185 *z3 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
189 static gmx_inline void
190 gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
191 const float * gmx_restrict xyz,
192 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
193 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
194 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
195 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
198 __m128 t1,t2,t3,t4,t5,t6;
200 tA = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
201 tB = _mm_load_ss(xyz_shift+2);
203 t1 = _mm_loadu_ps(xyz);
204 t2 = _mm_loadu_ps(xyz+4);
205 t3 = _mm_loadu_ps(xyz+8);
207 tA = _mm_movelh_ps(tA,tB);
208 t4 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
209 t5 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
210 t6 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
212 t1 = _mm_add_ps(t1,t4);
213 t2 = _mm_add_ps(t2,t5);
214 t3 = _mm_add_ps(t3,t6);
216 *x1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
217 *y1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
218 *z1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
219 *x2 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
220 *y2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
221 *z2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
222 *x3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
223 *y3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
224 *z3 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
225 *x4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
226 *y4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(2,2,2,2));
227 *z4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(3,3,3,3));
232 gmx_mm_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
233 const float * gmx_restrict ptrB,
234 const float * gmx_restrict ptrC,
235 const float * gmx_restrict ptrD,
236 __m128 * gmx_restrict x1,
237 __m128 * gmx_restrict y1,
238 __m128 * gmx_restrict z1)
240 __m128 t1,t2,t3,t4,t5,t6,t7,t8;
241 t1 = _mm_castpd_ps(_mm_load_sd((const double *)ptrA));
242 t2 = _mm_castpd_ps(_mm_load_sd((const double *)ptrB));
243 t3 = _mm_castpd_ps(_mm_load_sd((const double *)ptrC));
244 t4 = _mm_castpd_ps(_mm_load_sd((const double *)ptrD));
245 t5 = _mm_load_ss(ptrA+2);
246 t6 = _mm_load_ss(ptrB+2);
247 t7 = _mm_load_ss(ptrC+2);
248 t8 = _mm_load_ss(ptrD+2);
249 t1 = _mm_unpacklo_ps(t1,t2);
250 t3 = _mm_unpacklo_ps(t3,t4);
251 *x1 = _mm_movelh_ps(t1,t3);
252 *y1 = _mm_movehl_ps(t3,t1);
253 t5 = _mm_unpacklo_ps(t5,t6);
254 t7 = _mm_unpacklo_ps(t7,t8);
255 *z1 = _mm_movelh_ps(t5,t7);
260 gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
261 const float * gmx_restrict ptrB,
262 const float * gmx_restrict ptrC,
263 const float * gmx_restrict ptrD,
264 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
265 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
266 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
269 t1 = _mm_loadu_ps(ptrA);
270 t2 = _mm_loadu_ps(ptrB);
271 t3 = _mm_loadu_ps(ptrC);
272 t4 = _mm_loadu_ps(ptrD);
273 _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
278 t1 = _mm_loadu_ps(ptrA+4);
279 t2 = _mm_loadu_ps(ptrB+4);
280 t3 = _mm_loadu_ps(ptrC+4);
281 t4 = _mm_loadu_ps(ptrD+4);
282 _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
287 t1 = _mm_load_ss(ptrA+8);
288 t2 = _mm_load_ss(ptrB+8);
289 t3 = _mm_load_ss(ptrC+8);
290 t4 = _mm_load_ss(ptrD+8);
291 t1 = _mm_unpacklo_ps(t1,t3);
292 t3 = _mm_unpacklo_ps(t2,t4);
293 *z3 = _mm_unpacklo_ps(t1,t3);
298 gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
299 const float * gmx_restrict ptrB,
300 const float * gmx_restrict ptrC,
301 const float * gmx_restrict ptrD,
302 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
303 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
304 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
305 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
308 t1 = _mm_loadu_ps(ptrA);
309 t2 = _mm_loadu_ps(ptrB);
310 t3 = _mm_loadu_ps(ptrC);
311 t4 = _mm_loadu_ps(ptrD);
312 _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
317 t1 = _mm_loadu_ps(ptrA+4);
318 t2 = _mm_loadu_ps(ptrB+4);
319 t3 = _mm_loadu_ps(ptrC+4);
320 t4 = _mm_loadu_ps(ptrD+4);
321 _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
326 t1 = _mm_loadu_ps(ptrA+8);
327 t2 = _mm_loadu_ps(ptrB+8);
328 t3 = _mm_loadu_ps(ptrC+8);
329 t4 = _mm_loadu_ps(ptrD+8);
330 _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
339 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA,
340 float * gmx_restrict ptrB,
341 float * gmx_restrict ptrC,
342 float * gmx_restrict ptrD,
343 __m128 x1, __m128 y1, __m128 z1)
345 __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
346 t5 = _mm_unpacklo_ps(y1,z1);
347 t6 = _mm_unpackhi_ps(y1,z1);
348 t7 = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(1,0,0,0));
349 t8 = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(3,2,0,1));
350 t9 = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(1,0,0,2));
351 t10 = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(3,2,0,3));
352 t1 = _mm_load_ss(ptrA);
353 t1 = _mm_loadh_pi(t1,(__m64 *)(ptrA+1));
354 t1 = _mm_sub_ps(t1,t7);
355 _mm_store_ss(ptrA,t1);
356 _mm_storeh_pi((__m64 *)(ptrA+1),t1);
357 t2 = _mm_load_ss(ptrB);
358 t2 = _mm_loadh_pi(t2,(__m64 *)(ptrB+1));
359 t2 = _mm_sub_ps(t2,t8);
360 _mm_store_ss(ptrB,t2);
361 _mm_storeh_pi((__m64 *)(ptrB+1),t2);
362 t3 = _mm_load_ss(ptrC);
363 t3 = _mm_loadh_pi(t3,(__m64 *)(ptrC+1));
364 t3 = _mm_sub_ps(t3,t9);
365 _mm_store_ss(ptrC,t3);
366 _mm_storeh_pi((__m64 *)(ptrC+1),t3);
367 t4 = _mm_load_ss(ptrD);
368 t4 = _mm_loadh_pi(t4,(__m64 *)(ptrD+1));
369 t4 = _mm_sub_ps(t4,t10);
370 _mm_store_ss(ptrD,t4);
371 _mm_storeh_pi((__m64 *)(ptrD+1),t4);
376 #if defined (_MSC_VER) && defined(_M_IX86)
377 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
378 #define gmx_mm_decrement_3rvec_4ptr_swizzle_ps(ptrA,ptrB,ptrC,ptrD, \
379 _x1,_y1,_z1,_x2,_y2,_z2,_x3,_y3,_z3) \
381 __m128 _t1,_t2,_t3,_t4,_t5,_t6,_t7,_t8,_t9,_t10;\
382 __m128 _t11,_t12,_t13,_t14,_t15,_t16,_t17,_t18,_t19;\
383 __m128 _t20,_t21,_t22,_t23,_t24,_t25;\
384 _t13 = _mm_unpackhi_ps(_x1,_y1);\
385 _x1 = _mm_unpacklo_ps(_x1,_y1);\
386 _t14 = _mm_unpackhi_ps(_z1,_x2);\
387 _z1 = _mm_unpacklo_ps(_z1,_x2);\
388 _t15 = _mm_unpackhi_ps(_y2,_z2);\
389 _y2 = _mm_unpacklo_ps(_y2,_z2);\
390 _t16 = _mm_unpackhi_ps(_x3,_y3);\
391 _x3 = _mm_unpacklo_ps(_x3,_y3);\
392 _t17 = _mm_shuffle_ps(_z3,_z3,_MM_SHUFFLE(0,0,0,1));\
393 _t18 = _mm_movehl_ps(_z3,_z3);\
394 _t19 = _mm_shuffle_ps(_t18,_t18,_MM_SHUFFLE(0,0,0,1));\
395 _t20 = _mm_movelh_ps(_x1,_z1);\
396 _t21 = _mm_movehl_ps(_z1,_x1);\
397 _t22 = _mm_movelh_ps(_t13,_t14);\
398 _t14 = _mm_movehl_ps(_t14,_t13);\
399 _t23 = _mm_movelh_ps(_y2,_x3);\
400 _t24 = _mm_movehl_ps(_x3,_y2);\
401 _t25 = _mm_movelh_ps(_t15,_t16);\
402 _t16 = _mm_movehl_ps(_t16,_t15);\
403 _t1 = _mm_loadu_ps(ptrA);\
404 _t2 = _mm_loadu_ps(ptrA+4);\
405 _t3 = _mm_load_ss(ptrA+8);\
406 _t1 = _mm_sub_ps(_t1,_t20);\
407 _t2 = _mm_sub_ps(_t2,_t23);\
408 _t3 = _mm_sub_ss(_t3,_z3);\
409 _mm_storeu_ps(ptrA,_t1);\
410 _mm_storeu_ps(ptrA+4,_t2);\
411 _mm_store_ss(ptrA+8,_t3);\
412 _t4 = _mm_loadu_ps(ptrB);\
413 _t5 = _mm_loadu_ps(ptrB+4);\
414 _t6 = _mm_load_ss(ptrB+8);\
415 _t4 = _mm_sub_ps(_t4,_t21);\
416 _t5 = _mm_sub_ps(_t5,_t24);\
417 _t6 = _mm_sub_ss(_t6,_t17);\
418 _mm_storeu_ps(ptrB,_t4);\
419 _mm_storeu_ps(ptrB+4,_t5);\
420 _mm_store_ss(ptrB+8,_t6);\
421 _t7 = _mm_loadu_ps(ptrC);\
422 _t8 = _mm_loadu_ps(ptrC+4);\
423 _t9 = _mm_load_ss(ptrC+8);\
424 _t7 = _mm_sub_ps(_t7,_t22);\
425 _t8 = _mm_sub_ps(_t8,_t25);\
426 _t9 = _mm_sub_ss(_t9,_t18);\
427 _mm_storeu_ps(ptrC,_t7);\
428 _mm_storeu_ps(ptrC+4,_t8);\
429 _mm_store_ss(ptrC+8,_t9);\
430 _t10 = _mm_loadu_ps(ptrD);\
431 _t11 = _mm_loadu_ps(ptrD+4);\
432 _t12 = _mm_load_ss(ptrD+8);\
433 _t10 = _mm_sub_ps(_t10,_t14);\
434 _t11 = _mm_sub_ps(_t11,_t16);\
435 _t12 = _mm_sub_ss(_t12,_t19);\
436 _mm_storeu_ps(ptrD,_t10);\
437 _mm_storeu_ps(ptrD+4,_t11);\
438 _mm_store_ss(ptrD+8,_t12);\
441 /* Real function for sane compilers */
443 gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
444 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
445 __m128 x1, __m128 y1, __m128 z1,
446 __m128 x2, __m128 y2, __m128 z2,
447 __m128 x3, __m128 y3, __m128 z3)
449 __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
450 __m128 t11,t12,t13,t14,t15,t16,t17,t18,t19;
451 __m128 t20,t21,t22,t23,t24,t25;
453 t13 = _mm_unpackhi_ps(x1,y1);
454 x1 = _mm_unpacklo_ps(x1,y1);
455 t14 = _mm_unpackhi_ps(z1,x2);
456 z1 = _mm_unpacklo_ps(z1,x2);
457 t15 = _mm_unpackhi_ps(y2,z2);
458 y2 = _mm_unpacklo_ps(y2,z2);
459 t16 = _mm_unpackhi_ps(x3,y3);
460 x3 = _mm_unpacklo_ps(x3,y3);
461 t17 = _mm_shuffle_ps(z3,z3,_MM_SHUFFLE(0,0,0,1));
462 t18 = _mm_movehl_ps(z3,z3);
463 t19 = _mm_shuffle_ps(t18,t18,_MM_SHUFFLE(0,0,0,1));
464 t20 = _mm_movelh_ps(x1,z1);
465 t21 = _mm_movehl_ps(z1,x1);
466 t22 = _mm_movelh_ps(t13,t14);
467 t14 = _mm_movehl_ps(t14,t13);
468 t23 = _mm_movelh_ps(y2,x3);
469 t24 = _mm_movehl_ps(x3,y2);
470 t25 = _mm_movelh_ps(t15,t16);
471 t16 = _mm_movehl_ps(t16,t15);
472 t1 = _mm_loadu_ps(ptrA);
473 t2 = _mm_loadu_ps(ptrA+4);
474 t3 = _mm_load_ss(ptrA+8);
475 t1 = _mm_sub_ps(t1,t20);
476 t2 = _mm_sub_ps(t2,t23);
477 t3 = _mm_sub_ss(t3,z3);
478 _mm_storeu_ps(ptrA,t1);
479 _mm_storeu_ps(ptrA+4,t2);
480 _mm_store_ss(ptrA+8,t3);
481 t4 = _mm_loadu_ps(ptrB);
482 t5 = _mm_loadu_ps(ptrB+4);
483 t6 = _mm_load_ss(ptrB+8);
484 t4 = _mm_sub_ps(t4,t21);
485 t5 = _mm_sub_ps(t5,t24);
486 t6 = _mm_sub_ss(t6,t17);
487 _mm_storeu_ps(ptrB,t4);
488 _mm_storeu_ps(ptrB+4,t5);
489 _mm_store_ss(ptrB+8,t6);
490 t7 = _mm_loadu_ps(ptrC);
491 t8 = _mm_loadu_ps(ptrC+4);
492 t9 = _mm_load_ss(ptrC+8);
493 t7 = _mm_sub_ps(t7,t22);
494 t8 = _mm_sub_ps(t8,t25);
495 t9 = _mm_sub_ss(t9,t18);
496 _mm_storeu_ps(ptrC,t7);
497 _mm_storeu_ps(ptrC+4,t8);
498 _mm_store_ss(ptrC+8,t9);
499 t10 = _mm_loadu_ps(ptrD);
500 t11 = _mm_loadu_ps(ptrD+4);
501 t12 = _mm_load_ss(ptrD+8);
502 t10 = _mm_sub_ps(t10,t14);
503 t11 = _mm_sub_ps(t11,t16);
504 t12 = _mm_sub_ss(t12,t19);
505 _mm_storeu_ps(ptrD,t10);
506 _mm_storeu_ps(ptrD+4,t11);
507 _mm_store_ss(ptrD+8,t12);
512 #if defined (_MSC_VER) && defined(_M_IX86)
513 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
514 #define gmx_mm_decrement_4rvec_4ptr_swizzle_ps(ptrA,ptrB,ptrC,ptrD, \
515 _x1,_y1,_z1,_x2,_y2,_z2,_x3,_y3,_z3,_x4,_y4,_z4) \
517 __m128 _t1,_t2,_t3,_t4,_t5,_t6,_t7,_t8,_t9,_t10,_t11;\
518 __m128 _t12,_t13,_t14,_t15,_t16,_t17,_t18,_t19,_t20,_t21,_t22;\
520 _t13 = _mm_unpackhi_ps(_x1,_y1);\
521 _x1 = _mm_unpacklo_ps(_x1,_y1);\
522 _t14 = _mm_unpackhi_ps(_z1,_x2);\
523 _z1 = _mm_unpacklo_ps(_z1,_x2);\
524 _t15 = _mm_unpackhi_ps(_y2,_z2);\
525 _y2 = _mm_unpacklo_ps(_y2,_z2);\
526 _t16 = _mm_unpackhi_ps(_x3,_y3);\
527 _x3 = _mm_unpacklo_ps(_x3,_y3);\
528 _t17 = _mm_unpackhi_ps(_z3,_x4);\
529 _z3 = _mm_unpacklo_ps(_z3,_x4);\
530 _t18 = _mm_unpackhi_ps(_y4,_z4);\
531 _y4 = _mm_unpacklo_ps(_y4,_z4);\
532 _t19 = _mm_movelh_ps(_x1,_z1);\
533 _z1 = _mm_movehl_ps(_z1,_x1);\
534 _t20 = _mm_movelh_ps(_t13,_t14);\
535 _t14 = _mm_movehl_ps(_t14,_t13);\
536 _t21 = _mm_movelh_ps(_y2,_x3);\
537 _x3 = _mm_movehl_ps(_x3,_y2);\
538 _t22 = _mm_movelh_ps(_t15,_t16);\
539 _t16 = _mm_movehl_ps(_t16,_t15);\
540 _t23 = _mm_movelh_ps(_z3,_y4);\
541 _y4 = _mm_movehl_ps(_y4,_z3);\
542 _t24 = _mm_movelh_ps(_t17,_t18);\
543 _t18 = _mm_movehl_ps(_t18,_t17);\
544 _t1 = _mm_loadu_ps(ptrA);\
545 _t2 = _mm_loadu_ps(ptrA+4);\
546 _t3 = _mm_loadu_ps(ptrA+8);\
547 _t1 = _mm_sub_ps(_t1,_t19);\
548 _t2 = _mm_sub_ps(_t2,_t21);\
549 _t3 = _mm_sub_ps(_t3,_t23);\
550 _mm_storeu_ps(ptrA,_t1);\
551 _mm_storeu_ps(ptrA+4,_t2);\
552 _mm_storeu_ps(ptrA+8,_t3);\
553 _t4 = _mm_loadu_ps(ptrB);\
554 _t5 = _mm_loadu_ps(ptrB+4);\
555 _t6 = _mm_loadu_ps(ptrB+8);\
556 _t4 = _mm_sub_ps(_t4,_z1);\
557 _t5 = _mm_sub_ps(_t5,_x3);\
558 _t6 = _mm_sub_ps(_t6,_y4);\
559 _mm_storeu_ps(ptrB,_t4);\
560 _mm_storeu_ps(ptrB+4,_t5);\
561 _mm_storeu_ps(ptrB+8,_t6);\
562 _t7 = _mm_loadu_ps(ptrC);\
563 _t8 = _mm_loadu_ps(ptrC+4);\
564 _t9 = _mm_loadu_ps(ptrC+8);\
565 _t7 = _mm_sub_ps(_t7,_t20);\
566 _t8 = _mm_sub_ps(_t8,_t22);\
567 _t9 = _mm_sub_ps(_t9,_t24);\
568 _mm_storeu_ps(ptrC,_t7);\
569 _mm_storeu_ps(ptrC+4,_t8);\
570 _mm_storeu_ps(ptrC+8,_t9);\
571 _t10 = _mm_loadu_ps(ptrD);\
572 _t11 = _mm_loadu_ps(ptrD+4);\
573 _t12 = _mm_loadu_ps(ptrD+8);\
574 _t10 = _mm_sub_ps(_t10,_t14);\
575 _t11 = _mm_sub_ps(_t11,_t16);\
576 _t12 = _mm_sub_ps(_t12,_t18);\
577 _mm_storeu_ps(ptrD,_t10);\
578 _mm_storeu_ps(ptrD+4,_t11);\
579 _mm_storeu_ps(ptrD+8,_t12);\
582 /* Real function for sane compilers */
584 gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
585 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
586 __m128 x1, __m128 y1, __m128 z1,
587 __m128 x2, __m128 y2, __m128 z2,
588 __m128 x3, __m128 y3, __m128 z3,
589 __m128 x4, __m128 y4, __m128 z4)
591 __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11;
592 __m128 t12,t13,t14,t15,t16,t17,t18,t19,t20,t21,t22;
594 t13 = _mm_unpackhi_ps(x1,y1);
595 x1 = _mm_unpacklo_ps(x1,y1);
596 t14 = _mm_unpackhi_ps(z1,x2);
597 z1 = _mm_unpacklo_ps(z1,x2);
598 t15 = _mm_unpackhi_ps(y2,z2);
599 y2 = _mm_unpacklo_ps(y2,z2);
600 t16 = _mm_unpackhi_ps(x3,y3);
601 x3 = _mm_unpacklo_ps(x3,y3);
602 t17 = _mm_unpackhi_ps(z3,x4);
603 z3 = _mm_unpacklo_ps(z3,x4);
604 t18 = _mm_unpackhi_ps(y4,z4);
605 y4 = _mm_unpacklo_ps(y4,z4);
606 t19 = _mm_movelh_ps(x1,z1);
607 z1 = _mm_movehl_ps(z1,x1);
608 t20 = _mm_movelh_ps(t13,t14);
609 t14 = _mm_movehl_ps(t14,t13);
610 t21 = _mm_movelh_ps(y2,x3);
611 x3 = _mm_movehl_ps(x3,y2);
612 t22 = _mm_movelh_ps(t15,t16);
613 t16 = _mm_movehl_ps(t16,t15);
614 t23 = _mm_movelh_ps(z3,y4);
615 y4 = _mm_movehl_ps(y4,z3);
616 t24 = _mm_movelh_ps(t17,t18);
617 t18 = _mm_movehl_ps(t18,t17);
618 t1 = _mm_loadu_ps(ptrA);
619 t2 = _mm_loadu_ps(ptrA+4);
620 t3 = _mm_loadu_ps(ptrA+8);
621 t1 = _mm_sub_ps(t1,t19);
622 t2 = _mm_sub_ps(t2,t21);
623 t3 = _mm_sub_ps(t3,t23);
624 _mm_storeu_ps(ptrA,t1);
625 _mm_storeu_ps(ptrA+4,t2);
626 _mm_storeu_ps(ptrA+8,t3);
627 t4 = _mm_loadu_ps(ptrB);
628 t5 = _mm_loadu_ps(ptrB+4);
629 t6 = _mm_loadu_ps(ptrB+8);
630 t4 = _mm_sub_ps(t4,z1);
631 t5 = _mm_sub_ps(t5,x3);
632 t6 = _mm_sub_ps(t6,y4);
633 _mm_storeu_ps(ptrB,t4);
634 _mm_storeu_ps(ptrB+4,t5);
635 _mm_storeu_ps(ptrB+8,t6);
636 t7 = _mm_loadu_ps(ptrC);
637 t8 = _mm_loadu_ps(ptrC+4);
638 t9 = _mm_loadu_ps(ptrC+8);
639 t7 = _mm_sub_ps(t7,t20);
640 t8 = _mm_sub_ps(t8,t22);
641 t9 = _mm_sub_ps(t9,t24);
642 _mm_storeu_ps(ptrC,t7);
643 _mm_storeu_ps(ptrC+4,t8);
644 _mm_storeu_ps(ptrC+8,t9);
645 t10 = _mm_loadu_ps(ptrD);
646 t11 = _mm_loadu_ps(ptrD+4);
647 t12 = _mm_loadu_ps(ptrD+8);
648 t10 = _mm_sub_ps(t10,t14);
649 t11 = _mm_sub_ps(t11,t16);
650 t12 = _mm_sub_ps(t12,t18);
651 _mm_storeu_ps(ptrD,t10);
652 _mm_storeu_ps(ptrD+4,t11);
653 _mm_storeu_ps(ptrD+8,t12);
658 static gmx_inline void
659 gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
660 float * gmx_restrict fptr,
661 float * gmx_restrict fshiftptr)
667 _MM_TRANSPOSE4_PS(fix1,t1,fiy1,fiz1);
668 fix1 = _mm_add_ps(_mm_add_ps(fix1,t1), _mm_add_ps(fiy1,fiz1));
670 t2 = _mm_load_ss(fptr);
671 t2 = _mm_loadh_pi(t2,(__m64 *)(fptr+1));
672 t3 = _mm_load_ss(fshiftptr);
673 t3 = _mm_loadh_pi(t3,(__m64 *)(fshiftptr+1));
675 t2 = _mm_add_ps(t2,fix1);
676 t3 = _mm_add_ps(t3,fix1);
678 _mm_store_ss(fptr,t2);
679 _mm_storeh_pi((__m64 *)(fptr+1),t2);
680 _mm_store_ss(fshiftptr,t3);
681 _mm_storeh_pi((__m64 *)(fshiftptr+1),t3);
684 #if defined (_MSC_VER) && defined(_M_IX86)
685 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
686 #define gmx_mm_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3, \
689 __m128 _t1,_t2,_t3,_t4;\
691 _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);\
692 _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);\
693 _t2 = _mm_movehl_ps(_mm_setzero_ps(),fiz3);\
694 _t1 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(0,0,0,1));\
695 _t3 = _mm_shuffle_ps(_t2,_t2,_MM_SHUFFLE(0,0,0,1));\
696 fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));\
697 fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));\
698 fiz3 = _mm_add_ss(_mm_add_ps(fiz3,_t1) , _mm_add_ps(_t2,_t3));\
699 _mm_storeu_ps(fptr, _mm_add_ps(fix1,_mm_loadu_ps(fptr) ));\
700 _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));\
701 _mm_store_ss (fptr+8,_mm_add_ss(fiz3,_mm_load_ss(fptr+8) ));\
702 _t4 = _mm_load_ss(fshiftptr+2);\
703 _t4 = _mm_loadh_pi(_t4,(__m64 *)(fshiftptr));\
704 _t1 = _mm_shuffle_ps(fiz3,fix1,_MM_SHUFFLE(1,0,0,0));\
705 _t2 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(3,2,2,2));\
706 _t3 = _mm_shuffle_ps(fiy2,fix1,_MM_SHUFFLE(3,3,0,1));\
707 _t3 = _mm_shuffle_ps(_t3 ,_t3 ,_MM_SHUFFLE(1,2,0,0));\
708 _t1 = _mm_add_ps(_t1,_t2);\
709 _t3 = _mm_add_ps(_t3,_t4);\
710 _t1 = _mm_add_ps(_t1,_t3);\
711 _mm_store_ss(fshiftptr+2,_t1);\
712 _mm_storeh_pi((__m64 *)(fshiftptr),_t1);\
715 /* Real function for sane compilers */
716 static gmx_inline void
717 gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
718 __m128 fix2, __m128 fiy2, __m128 fiz2,
719 __m128 fix3, __m128 fiy3, __m128 fiz3,
720 float * gmx_restrict fptr,
721 float * gmx_restrict fshiftptr)
726 _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);
727 _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);
728 t2 = _mm_movehl_ps(_mm_setzero_ps(),fiz3);
729 t1 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(0,0,0,1));
730 t3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,1));
732 fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));
733 fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));
734 fiz3 = _mm_add_ss(_mm_add_ps(fiz3,t1) , _mm_add_ps(t2,t3));
736 _mm_storeu_ps(fptr, _mm_add_ps(fix1,_mm_loadu_ps(fptr) ));
737 _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
738 _mm_store_ss (fptr+8,_mm_add_ss(fiz3,_mm_load_ss(fptr+8) ));
740 t4 = _mm_load_ss(fshiftptr+2);
741 t4 = _mm_loadh_pi(t4,(__m64 *)(fshiftptr));
743 t1 = _mm_shuffle_ps(fiz3,fix1,_MM_SHUFFLE(1,0,0,0)); /* fiy1 fix1 - fiz3 */
744 t2 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(3,2,2,2)); /* fiy3 fix3 - fiz1 */
745 t3 = _mm_shuffle_ps(fiy2,fix1,_MM_SHUFFLE(3,3,0,1)); /* fix2 fix2 fiy2 fiz2 */
746 t3 = _mm_shuffle_ps(t3 ,t3 ,_MM_SHUFFLE(1,2,0,0)); /* fiy2 fix2 - fiz2 */
748 t1 = _mm_add_ps(t1,t2);
749 t3 = _mm_add_ps(t3,t4);
750 t1 = _mm_add_ps(t1,t3); /* y x - z */
752 _mm_store_ss(fshiftptr+2,t1);
753 _mm_storeh_pi((__m64 *)(fshiftptr),t1);
757 #if defined (_MSC_VER) && defined(_M_IX86)
758 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
759 #define gmx_mm_update_iforce_4atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,fix4,fiy4,fiz4, \
762 __m128 _t1,_t2,_t3,_t4,_t5;\
763 _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);\
764 _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);\
765 _MM_TRANSPOSE4_PS(fiz3,fix4,fiy4,fiz4);\
766 fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));\
767 fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));\
768 fiz3 = _mm_add_ps(_mm_add_ps(fiz3,fix4), _mm_add_ps(fiy4,fiz4));\
769 _mm_storeu_ps(fptr, _mm_add_ps(fix1,_mm_loadu_ps(fptr) ));\
770 _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));\
771 _mm_storeu_ps(fptr+8,_mm_add_ps(fiz3,_mm_loadu_ps(fptr+8)));\
772 _t5 = _mm_load_ss(fshiftptr+2);\
773 _t5 = _mm_loadh_pi(_t5,(__m64 *)(fshiftptr));\
774 _t1 = _mm_shuffle_ps(fix1,fix1,_MM_SHUFFLE(1,0,2,2));\
775 _t2 = _mm_shuffle_ps(fiy2,fiy2,_MM_SHUFFLE(3,2,1,1));\
776 _t3 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(2,1,0,0));\
777 _t4 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(0,0,3,3));\
778 _t4 = _mm_shuffle_ps(fiz3,_t4 ,_MM_SHUFFLE(2,0,3,3));\
779 _t1 = _mm_add_ps(_t1,_t2);\
780 _t3 = _mm_add_ps(_t3,_t4);\
781 _t1 = _mm_add_ps(_t1,_t3);\
782 _t5 = _mm_add_ps(_t5,_t1);\
783 _mm_store_ss(fshiftptr+2,_t5);\
784 _mm_storeh_pi((__m64 *)(fshiftptr),_t5);\
787 /* Real function for sane compilers */
788 static gmx_inline void
789 gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
790 __m128 fix2, __m128 fiy2, __m128 fiz2,
791 __m128 fix3, __m128 fiy3, __m128 fiz3,
792 __m128 fix4, __m128 fiy4, __m128 fiz4,
793 float * gmx_restrict fptr,
794 float * gmx_restrict fshiftptr)
796 __m128 t1,t2,t3,t4,t5;
799 _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);
800 _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);
801 _MM_TRANSPOSE4_PS(fiz3,fix4,fiy4,fiz4);
803 fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));
804 fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));
805 fiz3 = _mm_add_ps(_mm_add_ps(fiz3,fix4), _mm_add_ps(fiy4,fiz4));
807 _mm_storeu_ps(fptr, _mm_add_ps(fix1,_mm_loadu_ps(fptr) ));
808 _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
809 _mm_storeu_ps(fptr+8,_mm_add_ps(fiz3,_mm_loadu_ps(fptr+8)));
811 t5 = _mm_load_ss(fshiftptr+2);
812 t5 = _mm_loadh_pi(t5,(__m64 *)(fshiftptr));
814 t1 = _mm_shuffle_ps(fix1,fix1,_MM_SHUFFLE(1,0,2,2));
815 t2 = _mm_shuffle_ps(fiy2,fiy2,_MM_SHUFFLE(3,2,1,1));
816 t3 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(2,1,0,0));
817 t4 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(0,0,3,3));
818 t4 = _mm_shuffle_ps(fiz3,t4 ,_MM_SHUFFLE(2,0,3,3));
820 t1 = _mm_add_ps(t1,t2);
821 t3 = _mm_add_ps(t3,t4);
822 t1 = _mm_add_ps(t1,t3);
823 t5 = _mm_add_ps(t5,t1);
825 _mm_store_ss(fshiftptr+2,t5);
826 _mm_storeh_pi((__m64 *)(fshiftptr),t5);
832 gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
834 pot1 = _mm_add_ps(pot1,_mm_movehl_ps(_mm_setzero_ps(),pot1));
835 pot1 = _mm_add_ps(pot1,_mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(0,0,0,1)));
836 _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
840 gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
841 __m128 pot2, float * gmx_restrict ptrB)
844 t1 = _mm_movehl_ps(pot2,pot1);
845 t2 = _mm_movelh_ps(pot1,pot2);
846 t1 = _mm_add_ps(t1,t2);
847 t2 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,1,1));
848 pot1 = _mm_add_ps(t1,t2);
849 pot2 = _mm_movehl_ps(t2,pot1);
850 _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
851 _mm_store_ss(ptrB,_mm_add_ss(pot2,_mm_load_ss(ptrB)));
855 #endif /* _kernelutil_x86_sse2_single_h_ */