2 * This source code is part of
6 * Copyright (c) 2011-2012, The GROMACS Development Team
8 * Gromacs is a library for molecular simulation and trajectory analysis,
9 * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
10 * a full list of developers and information, check out http://www.gromacs.org
12 * This program is free software; you can redistribute it and/or modify it under
13 * the terms of the GNU Lesser General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option) any
16 * As a special exception, you may use this file as part of a free software
17 * library without restriction. Specifically, if other files instantiate
18 * templates or use macros or inline functions from this file, or you compile
19 * this file and link it with other files to produce an executable, this
20 * file does not by itself cause the resulting executable to be covered by
21 * the GNU Lesser General Public License.
23 * In plain-speak: do not worry about classes/macros/templates either - only
24 * changes to the library have to be LGPL, not an application linking with it.
26 * To help fund GROMACS development, we humbly ask that you cite
27 * the papers people have written on it - you can find them on the website!
29 #ifndef _kernelutil_x86_sse4_1_single_h_
30 #define _kernelutil_x86_sse4_1_single_h_
34 #include "gmx_x86_sse4_1.h"
39 /* Normal sum of four xmm registers */
40 #define gmx_mm_sum4_ps(t0,t1,t2,t3) _mm_add_ps(_mm_add_ps(t0,t1),_mm_add_ps(t2,t3))
42 static gmx_inline __m128
43 gmx_mm_calc_rsq_ps(__m128 dx, __m128 dy, __m128 dz)
45 return _mm_add_ps( _mm_add_ps( _mm_mul_ps(dx,dx), _mm_mul_ps(dy,dy) ), _mm_mul_ps(dz,dz) );
49 gmx_mm_any_lt(__m128 a, __m128 b)
51 return _mm_movemask_ps(_mm_cmplt_ps(a,b));
54 /* Load a single value from 1-4 places, merge into xmm register */
56 static gmx_inline __m128
57 gmx_mm_load_4real_swizzle_ps(const float * gmx_restrict ptrA,
58 const float * gmx_restrict ptrB,
59 const float * gmx_restrict ptrC,
60 const float * gmx_restrict ptrD)
64 t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA),_mm_load_ss(ptrC));
65 t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB),_mm_load_ss(ptrD));
66 return _mm_unpacklo_ps(t1,t2);
69 static gmx_inline void
70 gmx_mm_store_4real_swizzle_ps(float * gmx_restrict ptrA,
71 float * gmx_restrict ptrB,
72 float * gmx_restrict ptrC,
73 float * gmx_restrict ptrD,
78 t3 = _mm_movehl_ps(_mm_setzero_ps(),xmm1);
79 t2 = _mm_shuffle_ps(xmm1,xmm1,_MM_SHUFFLE(1,1,1,1));
80 t4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
81 _mm_store_ss(ptrA,xmm1);
82 _mm_store_ss(ptrB,t2);
83 _mm_store_ss(ptrC,t3);
84 _mm_store_ss(ptrD,t4);
87 /* Similar to store, but increments value in memory */
88 static gmx_inline void
89 gmx_mm_increment_4real_swizzle_ps(float * gmx_restrict ptrA,
90 float * gmx_restrict ptrB,
91 float * gmx_restrict ptrC,
92 float * gmx_restrict ptrD, __m128 xmm1)
96 tmp = gmx_mm_load_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD);
97 tmp = _mm_add_ps(tmp,xmm1);
98 gmx_mm_store_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD,tmp);
102 static gmx_inline void
103 gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
104 const float * gmx_restrict p2,
105 const float * gmx_restrict p3,
106 const float * gmx_restrict p4,
107 __m128 * gmx_restrict c6,
108 __m128 * gmx_restrict c12)
112 t1 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p1); /* - - c12a c6a */
113 t2 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p2); /* - - c12b c6b */
114 t3 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p3); /* - - c12c c6c */
115 t4 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p4); /* - - c12d c6d */
116 t1 = _mm_unpacklo_ps(t1,t2);
117 t2 = _mm_unpacklo_ps(t3,t4);
118 *c6 = _mm_movelh_ps(t1,t2);
119 *c12 = _mm_movehl_ps(t2,t1);
123 static gmx_inline void
124 gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
125 const float * gmx_restrict xyz,
126 __m128 * gmx_restrict x1,
127 __m128 * gmx_restrict y1,
128 __m128 * gmx_restrict z1)
132 t1 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
133 t2 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz);
134 t3 = _mm_load_ss(xyz_shift+2);
135 t4 = _mm_load_ss(xyz+2);
136 t1 = _mm_add_ps(t1,t2);
137 t3 = _mm_add_ss(t3,t4);
139 *x1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
140 *y1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
141 *z1 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
145 static gmx_inline void
146 gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
147 const float * gmx_restrict xyz,
148 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
149 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
150 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
153 __m128 t1,t2,t3,t4,t5,t6;
155 tA = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
156 tB = _mm_load_ss(xyz_shift+2);
158 t1 = _mm_loadu_ps(xyz);
159 t2 = _mm_loadu_ps(xyz+4);
160 t3 = _mm_load_ss(xyz+8);
162 tA = _mm_movelh_ps(tA,tB);
163 t4 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
164 t5 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
165 t6 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
167 t1 = _mm_add_ps(t1,t4);
168 t2 = _mm_add_ps(t2,t5);
169 t3 = _mm_add_ss(t3,t6);
171 *x1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
172 *y1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
173 *z1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
174 *x2 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
175 *y2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
176 *z2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
177 *x3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
178 *y3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
179 *z3 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
183 static gmx_inline void
184 gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
185 const float * gmx_restrict xyz,
186 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
187 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
188 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
189 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
192 __m128 t1,t2,t3,t4,t5,t6;
194 tA = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
195 tB = _mm_load_ss(xyz_shift+2);
197 t1 = _mm_loadu_ps(xyz);
198 t2 = _mm_loadu_ps(xyz+4);
199 t3 = _mm_loadu_ps(xyz+8);
201 tA = _mm_movelh_ps(tA,tB);
202 t4 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
203 t5 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
204 t6 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
206 t1 = _mm_add_ps(t1,t4);
207 t2 = _mm_add_ps(t2,t5);
208 t3 = _mm_add_ps(t3,t6);
210 *x1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
211 *y1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
212 *z1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
213 *x2 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
214 *y2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
215 *z2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
216 *x3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
217 *y3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
218 *z3 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
219 *x4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
220 *y4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(2,2,2,2));
221 *z4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(3,3,3,3));
225 static gmx_inline void
226 gmx_mm_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
227 const float * gmx_restrict ptrB,
228 const float * gmx_restrict ptrC,
229 const float * gmx_restrict ptrD,
230 __m128 * gmx_restrict x1,
231 __m128 * gmx_restrict y1,
232 __m128 * gmx_restrict z1)
234 __m128 t1,t2,t3,t4,t5,t6,t7,t8;
235 t1 = _mm_castpd_ps(_mm_load_sd((const double *)ptrA));
236 t2 = _mm_castpd_ps(_mm_load_sd((const double *)ptrB));
237 t3 = _mm_castpd_ps(_mm_load_sd((const double *)ptrC));
238 t4 = _mm_castpd_ps(_mm_load_sd((const double *)ptrD));
239 t5 = _mm_load_ss(ptrA+2);
240 t6 = _mm_load_ss(ptrB+2);
241 t7 = _mm_load_ss(ptrC+2);
242 t8 = _mm_load_ss(ptrD+2);
243 t1 = _mm_unpacklo_ps(t1,t2);
244 t3 = _mm_unpacklo_ps(t3,t4);
245 *x1 = _mm_movelh_ps(t1,t3);
246 *y1 = _mm_movehl_ps(t3,t1);
247 t5 = _mm_unpacklo_ps(t5,t6);
248 t7 = _mm_unpacklo_ps(t7,t8);
249 *z1 = _mm_movelh_ps(t5,t7);
253 static gmx_inline void
254 gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
255 const float * gmx_restrict ptrB,
256 const float * gmx_restrict ptrC,
257 const float * gmx_restrict ptrD,
258 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
259 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
260 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
263 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrA ) );
264 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrB ) );
265 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrC ) );
266 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrD ) );
267 _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
272 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA+4) ) );
273 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB+4) ) );
274 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC+4) ) );
275 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD+4) ) );
276 _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
281 t1 = _mm_load_ss(ptrA+8);
282 t2 = _mm_load_ss(ptrB+8);
283 t3 = _mm_load_ss(ptrC+8);
284 t4 = _mm_load_ss(ptrD+8);
285 t1 = _mm_unpacklo_ps(t1,t3);
286 t3 = _mm_unpacklo_ps(t2,t4);
287 *z3 = _mm_unpacklo_ps(t1,t3);
291 static gmx_inline void
292 gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
293 const float * gmx_restrict ptrB,
294 const float * gmx_restrict ptrC,
295 const float * gmx_restrict ptrD,
296 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
297 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
298 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
299 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
302 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA) ) );
303 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB) ) );
304 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC) ) );
305 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD) ) );
306 _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
311 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA+4) ) );
312 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB+4) ) );
313 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC+4) ) );
314 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD+4) ) );
315 _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
320 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA+8) ) );
321 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB+8) ) );
322 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC+8) ) );
323 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD+8) ) );
324 _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
333 static gmx_inline void
334 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * ptrA,
338 __m128 x1, __m128 y1, __m128 z1)
340 __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
341 t5 = _mm_unpacklo_ps(y1,z1);
342 t6 = _mm_unpackhi_ps(y1,z1);
343 t7 = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(1,0,0,0));
344 t8 = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(3,2,0,1));
345 t9 = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(1,0,0,2));
346 t10 = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(3,2,0,3));
347 t1 = _mm_load_ss(ptrA);
348 t1 = _mm_loadh_pi(t1,(__m64 *)(ptrA+1));
349 t1 = _mm_sub_ps(t1,t7);
350 _mm_store_ss(ptrA,t1);
351 _mm_storeh_pi((__m64 *)(ptrA+1),t1);
352 t2 = _mm_load_ss(ptrB);
353 t2 = _mm_loadh_pi(t2,(__m64 *)(ptrB+1));
354 t2 = _mm_sub_ps(t2,t8);
355 _mm_store_ss(ptrB,t2);
356 _mm_storeh_pi((__m64 *)(ptrB+1),t2);
357 t3 = _mm_load_ss(ptrC);
358 t3 = _mm_loadh_pi(t3,(__m64 *)(ptrC+1));
359 t3 = _mm_sub_ps(t3,t9);
360 _mm_store_ss(ptrC,t3);
361 _mm_storeh_pi((__m64 *)(ptrC+1),t3);
362 t4 = _mm_load_ss(ptrD);
363 t4 = _mm_loadh_pi(t4,(__m64 *)(ptrD+1));
364 t4 = _mm_sub_ps(t4,t10);
365 _mm_store_ss(ptrD,t4);
366 _mm_storeh_pi((__m64 *)(ptrD+1),t4);
371 static gmx_inline void
372 gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
373 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
374 __m128 x1, __m128 y1, __m128 z1,
375 __m128 x2, __m128 y2, __m128 z2,
376 __m128 x3, __m128 y3, __m128 z3)
378 __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
379 __m128 t11,t12,t13,t14,t15,t16,t17,t18,t19;
380 __m128 t20,t21,t22,t23,t24,t25;
382 t13 = _mm_unpackhi_ps(x1,y1);
383 x1 = _mm_unpacklo_ps(x1,y1);
384 t14 = _mm_unpackhi_ps(z1,x2);
385 z1 = _mm_unpacklo_ps(z1,x2);
386 t15 = _mm_unpackhi_ps(y2,z2);
387 y2 = _mm_unpacklo_ps(y2,z2);
388 t16 = _mm_unpackhi_ps(x3,y3);
389 x3 = _mm_unpacklo_ps(x3,y3);
390 t17 = _mm_shuffle_ps(z3,z3,_MM_SHUFFLE(0,0,0,1));
391 t18 = _mm_movehl_ps(z3,z3);
392 t19 = _mm_shuffle_ps(t18,t18,_MM_SHUFFLE(0,0,0,1));
393 t20 = _mm_movelh_ps(x1,z1);
394 t21 = _mm_movehl_ps(z1,x1);
395 t22 = _mm_movelh_ps(t13,t14);
396 t14 = _mm_movehl_ps(t14,t13);
397 t23 = _mm_movelh_ps(y2,x3);
398 t24 = _mm_movehl_ps(x3,y2);
399 t25 = _mm_movelh_ps(t15,t16);
400 t16 = _mm_movehl_ps(t16,t15);
401 t1 = _mm_loadu_ps(ptrA);
402 t2 = _mm_loadu_ps(ptrA+4);
403 t3 = _mm_load_ss(ptrA+8);
404 t4 = _mm_loadu_ps(ptrB);
405 t5 = _mm_loadu_ps(ptrB+4);
406 t6 = _mm_load_ss(ptrB+8);
407 t7 = _mm_loadu_ps(ptrC);
408 t8 = _mm_loadu_ps(ptrC+4);
409 t9 = _mm_load_ss(ptrC+8);
410 t10 = _mm_loadu_ps(ptrD);
411 t11 = _mm_loadu_ps(ptrD+4);
412 t12 = _mm_load_ss(ptrD+8);
414 t1 = _mm_sub_ps(t1,t20);
415 t2 = _mm_sub_ps(t2,t23);
416 t3 = _mm_sub_ss(t3,z3);
417 _mm_storeu_ps(ptrA,t1);
418 _mm_storeu_ps(ptrA+4,t2);
419 _mm_store_ss(ptrA+8,t3);
420 t4 = _mm_sub_ps(t4,t21);
421 t5 = _mm_sub_ps(t5,t24);
422 t6 = _mm_sub_ss(t6,t17);
423 _mm_storeu_ps(ptrB,t4);
424 _mm_storeu_ps(ptrB+4,t5);
425 _mm_store_ss(ptrB+8,t6);
426 t7 = _mm_sub_ps(t7,t22);
427 t8 = _mm_sub_ps(t8,t25);
428 t9 = _mm_sub_ss(t9,t18);
429 _mm_storeu_ps(ptrC,t7);
430 _mm_storeu_ps(ptrC+4,t8);
431 _mm_store_ss(ptrC+8,t9);
432 t10 = _mm_sub_ps(t10,t14);
433 t11 = _mm_sub_ps(t11,t16);
434 t12 = _mm_sub_ss(t12,t19);
435 _mm_storeu_ps(ptrD,t10);
436 _mm_storeu_ps(ptrD+4,t11);
437 _mm_store_ss(ptrD+8,t12);
441 static gmx_inline void
442 gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
443 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
444 __m128 x1, __m128 y1, __m128 z1,
445 __m128 x2, __m128 y2, __m128 z2,
446 __m128 x3, __m128 y3, __m128 z3,
447 __m128 x4, __m128 y4, __m128 z4)
449 __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11;
450 __m128 t12,t13,t14,t15,t16,t17,t18,t19,t20,t21,t22;
452 t13 = _mm_unpackhi_ps(x1,y1);
453 x1 = _mm_unpacklo_ps(x1,y1);
454 t14 = _mm_unpackhi_ps(z1,x2);
455 z1 = _mm_unpacklo_ps(z1,x2);
456 t15 = _mm_unpackhi_ps(y2,z2);
457 y2 = _mm_unpacklo_ps(y2,z2);
458 t16 = _mm_unpackhi_ps(x3,y3);
459 x3 = _mm_unpacklo_ps(x3,y3);
460 t17 = _mm_unpackhi_ps(z3,x4);
461 z3 = _mm_unpacklo_ps(z3,x4);
462 t18 = _mm_unpackhi_ps(y4,z4);
463 y4 = _mm_unpacklo_ps(y4,z4);
464 t19 = _mm_movelh_ps(x1,z1);
465 z1 = _mm_movehl_ps(z1,x1);
466 t20 = _mm_movelh_ps(t13,t14);
467 t14 = _mm_movehl_ps(t14,t13);
468 t21 = _mm_movelh_ps(y2,x3);
469 x3 = _mm_movehl_ps(x3,y2);
470 t22 = _mm_movelh_ps(t15,t16);
471 t16 = _mm_movehl_ps(t16,t15);
472 t23 = _mm_movelh_ps(z3,y4);
473 y4 = _mm_movehl_ps(y4,z3);
474 t24 = _mm_movelh_ps(t17,t18);
475 t18 = _mm_movehl_ps(t18,t17);
476 t1 = _mm_loadu_ps(ptrA);
477 t2 = _mm_loadu_ps(ptrA+4);
478 t3 = _mm_loadu_ps(ptrA+8);
479 t1 = _mm_sub_ps(t1,t19);
480 t2 = _mm_sub_ps(t2,t21);
481 t3 = _mm_sub_ps(t3,t23);
482 _mm_storeu_ps(ptrA,t1);
483 _mm_storeu_ps(ptrA+4,t2);
484 _mm_storeu_ps(ptrA+8,t3);
485 t4 = _mm_loadu_ps(ptrB);
486 t5 = _mm_loadu_ps(ptrB+4);
487 t6 = _mm_loadu_ps(ptrB+8);
488 t4 = _mm_sub_ps(t4,z1);
489 t5 = _mm_sub_ps(t5,x3);
490 t6 = _mm_sub_ps(t6,y4);
491 _mm_storeu_ps(ptrB,t4);
492 _mm_storeu_ps(ptrB+4,t5);
493 _mm_storeu_ps(ptrB+8,t6);
494 t7 = _mm_loadu_ps(ptrC);
495 t8 = _mm_loadu_ps(ptrC+4);
496 t9 = _mm_loadu_ps(ptrC+8);
497 t7 = _mm_sub_ps(t7,t20);
498 t8 = _mm_sub_ps(t8,t22);
499 t9 = _mm_sub_ps(t9,t24);
500 _mm_storeu_ps(ptrC,t7);
501 _mm_storeu_ps(ptrC+4,t8);
502 _mm_storeu_ps(ptrC+8,t9);
503 t10 = _mm_loadu_ps(ptrD);
504 t11 = _mm_loadu_ps(ptrD+4);
505 t12 = _mm_loadu_ps(ptrD+8);
506 t10 = _mm_sub_ps(t10,t14);
507 t11 = _mm_sub_ps(t11,t16);
508 t12 = _mm_sub_ps(t12,t18);
509 _mm_storeu_ps(ptrD,t10);
510 _mm_storeu_ps(ptrD+4,t11);
511 _mm_storeu_ps(ptrD+8,t12);
516 static gmx_inline void
517 gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
518 float * gmx_restrict fptr,
519 float * gmx_restrict fshiftptr)
523 fix1 = _mm_hadd_ps(fix1,fix1);
524 fiy1 = _mm_hadd_ps(fiy1,fiz1);
526 fix1 = _mm_hadd_ps(fix1,fiy1); /* fiz1 fiy1 fix1 fix1 */
528 t2 = _mm_load_ss(fptr);
529 t2 = _mm_loadh_pi(t2,(__m64 *)(fptr+1));
530 t3 = _mm_load_ss(fshiftptr);
531 t3 = _mm_loadh_pi(t3,(__m64 *)(fshiftptr+1));
533 t2 = _mm_add_ps(t2,fix1);
534 t3 = _mm_add_ps(t3,fix1);
536 _mm_store_ss(fptr,t2);
537 _mm_storeh_pi((__m64 *)(fptr+1),t2);
538 _mm_store_ss(fshiftptr,t3);
539 _mm_storeh_pi((__m64 *)(fshiftptr+1),t3);
542 static gmx_inline void
543 gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
544 __m128 fix2, __m128 fiy2, __m128 fiz2,
545 __m128 fix3, __m128 fiy3, __m128 fiz3,
546 float * gmx_restrict fptr,
547 float * gmx_restrict fshiftptr)
551 fix1 = _mm_hadd_ps(fix1,fiy1);
552 fiz1 = _mm_hadd_ps(fiz1,fix2);
553 fiy2 = _mm_hadd_ps(fiy2,fiz2);
554 fix3 = _mm_hadd_ps(fix3,fiy3);
555 fiz3 = _mm_hadd_ps(fiz3,fiz3);
557 fix1 = _mm_hadd_ps(fix1,fiz1); /* fix2 fiz1 fiy1 fix1 */
558 fiy2 = _mm_hadd_ps(fiy2,fix3); /* fiy3 fix3 fiz2 fiy2 */
559 fiz3 = _mm_hadd_ps(fiz3,fiz3); /* - - - fiz3 */
561 _mm_storeu_ps(fptr, _mm_add_ps(fix1,_mm_loadu_ps(fptr) ));
562 _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
563 _mm_store_ss (fptr+8,_mm_add_ss(fiz3,_mm_load_ss(fptr+8) ));
565 t4 = _mm_load_ss(fshiftptr+2);
566 t4 = _mm_loadh_pi(t4,(__m64 *)(fshiftptr));
568 t1 = _mm_shuffle_ps(fiz3,fix1,_MM_SHUFFLE(1,0,0,0)); /* fiy1 fix1 - fiz3 */
569 t2 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(3,2,2,2)); /* fiy3 fix3 - fiz1 */
570 t3 = _mm_shuffle_ps(fiy2,fix1,_MM_SHUFFLE(3,3,0,1)); /* fix2 fix2 fiy2 fiz2 */
571 t3 = _mm_shuffle_ps(t3 ,t3 ,_MM_SHUFFLE(1,2,0,0)); /* fiy2 fix2 - fiz2 */
573 t1 = _mm_add_ps(t1,t2);
574 t3 = _mm_add_ps(t3,t4);
575 t1 = _mm_add_ps(t1,t3); /* y x - z */
577 _mm_store_ss(fshiftptr+2,t1);
578 _mm_storeh_pi((__m64 *)(fshiftptr),t1);
582 static gmx_inline void
583 gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
584 __m128 fix2, __m128 fiy2, __m128 fiz2,
585 __m128 fix3, __m128 fiy3, __m128 fiz3,
586 __m128 fix4, __m128 fiy4, __m128 fiz4,
587 float * gmx_restrict fptr,
588 float * gmx_restrict fshiftptr)
590 __m128 t1,t2,t3,t4,t5;
592 fix1 = _mm_hadd_ps(fix1,fiy1);
593 fiz1 = _mm_hadd_ps(fiz1,fix2);
594 fiy2 = _mm_hadd_ps(fiy2,fiz2);
595 fix3 = _mm_hadd_ps(fix3,fiy3);
596 fiz3 = _mm_hadd_ps(fiz3,fix4);
597 fiy4 = _mm_hadd_ps(fiy4,fiz4);
599 fix1 = _mm_hadd_ps(fix1,fiz1); /* fix2 fiz1 fiy1 fix1 */
600 fiy2 = _mm_hadd_ps(fiy2,fix3); /* fiy3 fix3 fiz2 fiy2 */
601 fiz3 = _mm_hadd_ps(fiz3,fiy4); /* fiz4 fiy4 fix4 fiz3 */
603 _mm_storeu_ps(fptr, _mm_add_ps(fix1,_mm_loadu_ps(fptr) ));
604 _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
605 _mm_storeu_ps(fptr+8,_mm_add_ps(fiz3,_mm_loadu_ps(fptr+8)));
607 t5 = _mm_load_ss(fshiftptr+2);
608 t5 = _mm_loadh_pi(t5,(__m64 *)(fshiftptr));
610 t1 = _mm_shuffle_ps(fix1,fix1,_MM_SHUFFLE(1,0,2,2));
611 t2 = _mm_shuffle_ps(fiy2,fiy2,_MM_SHUFFLE(3,2,1,1));
612 t3 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(2,1,0,0));
613 t4 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(0,0,3,3));
614 t4 = _mm_shuffle_ps(fiz3,t4 ,_MM_SHUFFLE(2,0,3,3));
616 t1 = _mm_add_ps(t1,t2);
617 t3 = _mm_add_ps(t3,t4);
618 t1 = _mm_add_ps(t1,t3);
619 t5 = _mm_add_ps(t5,t1);
621 _mm_store_ss(fshiftptr+2,t5);
622 _mm_storeh_pi((__m64 *)(fshiftptr),t5);
627 static gmx_inline void
628 gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
630 pot1 = _mm_add_ps(pot1,_mm_movehl_ps(_mm_setzero_ps(),pot1));
631 pot1 = _mm_add_ps(pot1,_mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(0,0,0,1)));
632 _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
635 static gmx_inline void
636 gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
637 __m128 pot2, float * gmx_restrict ptrB)
640 t1 = _mm_movehl_ps(pot2,pot1);
641 t2 = _mm_movelh_ps(pot1,pot2);
642 t1 = _mm_add_ps(t1,t2);
643 t2 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,1,1));
644 pot1 = _mm_add_ps(t1,t2);
645 pot2 = _mm_movehl_ps(t2,pot1);
646 _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
647 _mm_store_ss(ptrB,_mm_add_ss(pot2,_mm_load_ss(ptrB)));
651 static gmx_inline void
652 gmx_mm_update_4pot_ps(__m128 pot1, float * gmx_restrict ptrA,
653 __m128 pot2, float * gmx_restrict ptrB,
654 __m128 pot3, float * gmx_restrict ptrC,
655 __m128 pot4, float * gmx_restrict ptrD)
657 _MM_TRANSPOSE4_PS(pot1,pot2,pot3,pot4);
658 pot1 = _mm_add_ps(_mm_add_ps(pot1,pot2),_mm_add_ps(pot3,pot4));
659 pot2 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(1,1,1,1));
660 pot3 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(2,2,2,2));
661 pot4 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(3,3,3,3));
662 _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
663 _mm_store_ss(ptrB,_mm_add_ss(pot2,_mm_load_ss(ptrB)));
664 _mm_store_ss(ptrC,_mm_add_ss(pot3,_mm_load_ss(ptrC)));
665 _mm_store_ss(ptrD,_mm_add_ss(pot4,_mm_load_ss(ptrD)));
669 #endif /* _kernelutil_x86_sse4_1_single_h_ */