2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2011-2012, The GROMACS Development Team
5 * Copyright (c) 2012, by the GROMACS development team, led by
6 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
7 * others, as listed in the AUTHORS file in the top-level source
8 * directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
36 #ifndef _kernelutil_x86_sse2_single_h_
37 #define _kernelutil_x86_sse2_single_h_
39 /* We require SSE2 now! */
43 #include "gmx_x86_sse2.h"
46 /* Normal sum of four xmm registers */
47 #define gmx_mm_sum4_ps(t0,t1,t2,t3) _mm_add_ps(_mm_add_ps(t0,t1),_mm_add_ps(t2,t3))
49 static gmx_inline __m128
50 gmx_mm_calc_rsq_ps(__m128 dx, __m128 dy, __m128 dz)
52 return _mm_add_ps( _mm_add_ps( _mm_mul_ps(dx,dx), _mm_mul_ps(dy,dy) ), _mm_mul_ps(dz,dz) );
56 gmx_mm_any_lt(__m128 a, __m128 b)
58 return _mm_movemask_ps(_mm_cmplt_ps(a,b));
61 /* Load a single value from 1-4 places, merge into xmm register */
64 gmx_mm_load_4real_swizzle_ps(const float * gmx_restrict ptrA,
65 const float * gmx_restrict ptrB,
66 const float * gmx_restrict ptrC,
67 const float * gmx_restrict ptrD)
71 t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA),_mm_load_ss(ptrC));
72 t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB),_mm_load_ss(ptrD));
73 return _mm_unpacklo_ps(t1,t2);
77 gmx_mm_store_4real_swizzle_ps(float * gmx_restrict ptrA,
78 float * gmx_restrict ptrB,
79 float * gmx_restrict ptrC,
80 float * gmx_restrict ptrD,
85 t3 = _mm_movehl_ps(_mm_setzero_ps(),xmm1);
86 t2 = _mm_shuffle_ps(xmm1,xmm1,_MM_SHUFFLE(1,1,1,1));
87 t4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
88 _mm_store_ss(ptrA,xmm1);
89 _mm_store_ss(ptrB,t2);
90 _mm_store_ss(ptrC,t3);
91 _mm_store_ss(ptrD,t4);
94 /* Similar to store, but increments value in memory */
96 gmx_mm_increment_4real_swizzle_ps(float * gmx_restrict ptrA,
97 float * gmx_restrict ptrB,
98 float * gmx_restrict ptrC,
99 float * gmx_restrict ptrD, __m128 xmm1)
103 tmp = gmx_mm_load_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD);
104 tmp = _mm_add_ps(tmp,xmm1);
105 gmx_mm_store_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD,tmp);
110 gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
111 const float * gmx_restrict p2,
112 const float * gmx_restrict p3,
113 const float * gmx_restrict p4,
114 __m128 * gmx_restrict c6,
115 __m128 * gmx_restrict c12)
119 t1 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p1); /* - - c12a c6a */
120 t2 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p2); /* - - c12b c6b */
121 t3 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p3); /* - - c12c c6c */
122 t4 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p4); /* - - c12d c6d */
123 t1 = _mm_unpacklo_ps(t1,t2);
124 t2 = _mm_unpacklo_ps(t3,t4);
125 *c6 = _mm_movelh_ps(t1,t2);
126 *c12 = _mm_movehl_ps(t2,t1);
129 /* Routines to load 1-4 rvec from 4 places.
130 * We mainly use these to load coordinates. The extra routines
131 * are very efficient for the water-water loops, since we e.g.
132 * know that a TIP4p water has 4 atoms, so we should load 12 floats+shuffle.
136 static gmx_inline void
137 gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
138 const float * gmx_restrict xyz,
139 __m128 * gmx_restrict x1,
140 __m128 * gmx_restrict y1,
141 __m128 * gmx_restrict z1)
145 t1 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
146 t2 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz);
147 t3 = _mm_load_ss(xyz_shift+2);
148 t4 = _mm_load_ss(xyz+2);
149 t1 = _mm_add_ps(t1,t2);
150 t3 = _mm_add_ss(t3,t4);
152 *x1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
153 *y1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
154 *z1 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
158 static gmx_inline void
159 gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
160 const float * gmx_restrict xyz,
161 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
162 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
163 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
166 __m128 t1,t2,t3,t4,t5,t6;
168 tA = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
169 tB = _mm_load_ss(xyz_shift+2);
171 t1 = _mm_loadu_ps(xyz);
172 t2 = _mm_loadu_ps(xyz+4);
173 t3 = _mm_load_ss(xyz+8);
175 tA = _mm_movelh_ps(tA,tB);
176 t4 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
177 t5 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
178 t6 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
180 t1 = _mm_add_ps(t1,t4);
181 t2 = _mm_add_ps(t2,t5);
182 t3 = _mm_add_ss(t3,t6);
184 *x1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
185 *y1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
186 *z1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
187 *x2 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
188 *y2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
189 *z2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
190 *x3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
191 *y3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
192 *z3 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
196 static gmx_inline void
197 gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
198 const float * gmx_restrict xyz,
199 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
200 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
201 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
202 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
205 __m128 t1,t2,t3,t4,t5,t6;
207 tA = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
208 tB = _mm_load_ss(xyz_shift+2);
210 t1 = _mm_loadu_ps(xyz);
211 t2 = _mm_loadu_ps(xyz+4);
212 t3 = _mm_loadu_ps(xyz+8);
214 tA = _mm_movelh_ps(tA,tB);
215 t4 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
216 t5 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
217 t6 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
219 t1 = _mm_add_ps(t1,t4);
220 t2 = _mm_add_ps(t2,t5);
221 t3 = _mm_add_ps(t3,t6);
223 *x1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
224 *y1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
225 *z1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
226 *x2 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
227 *y2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
228 *z2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
229 *x3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
230 *y3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
231 *z3 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
232 *x4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
233 *y4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(2,2,2,2));
234 *z4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(3,3,3,3));
239 gmx_mm_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
240 const float * gmx_restrict ptrB,
241 const float * gmx_restrict ptrC,
242 const float * gmx_restrict ptrD,
243 __m128 * gmx_restrict x1,
244 __m128 * gmx_restrict y1,
245 __m128 * gmx_restrict z1)
247 __m128 t1,t2,t3,t4,t5,t6,t7,t8;
248 t1 = _mm_castpd_ps(_mm_load_sd((const double *)ptrA));
249 t2 = _mm_castpd_ps(_mm_load_sd((const double *)ptrB));
250 t3 = _mm_castpd_ps(_mm_load_sd((const double *)ptrC));
251 t4 = _mm_castpd_ps(_mm_load_sd((const double *)ptrD));
252 t5 = _mm_load_ss(ptrA+2);
253 t6 = _mm_load_ss(ptrB+2);
254 t7 = _mm_load_ss(ptrC+2);
255 t8 = _mm_load_ss(ptrD+2);
256 t1 = _mm_unpacklo_ps(t1,t2);
257 t3 = _mm_unpacklo_ps(t3,t4);
258 *x1 = _mm_movelh_ps(t1,t3);
259 *y1 = _mm_movehl_ps(t3,t1);
260 t5 = _mm_unpacklo_ps(t5,t6);
261 t7 = _mm_unpacklo_ps(t7,t8);
262 *z1 = _mm_movelh_ps(t5,t7);
267 gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
268 const float * gmx_restrict ptrB,
269 const float * gmx_restrict ptrC,
270 const float * gmx_restrict ptrD,
271 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
272 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
273 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
276 t1 = _mm_loadu_ps(ptrA);
277 t2 = _mm_loadu_ps(ptrB);
278 t3 = _mm_loadu_ps(ptrC);
279 t4 = _mm_loadu_ps(ptrD);
280 _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
285 t1 = _mm_loadu_ps(ptrA+4);
286 t2 = _mm_loadu_ps(ptrB+4);
287 t3 = _mm_loadu_ps(ptrC+4);
288 t4 = _mm_loadu_ps(ptrD+4);
289 _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
294 t1 = _mm_load_ss(ptrA+8);
295 t2 = _mm_load_ss(ptrB+8);
296 t3 = _mm_load_ss(ptrC+8);
297 t4 = _mm_load_ss(ptrD+8);
298 t1 = _mm_unpacklo_ps(t1,t3);
299 t3 = _mm_unpacklo_ps(t2,t4);
300 *z3 = _mm_unpacklo_ps(t1,t3);
305 gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
306 const float * gmx_restrict ptrB,
307 const float * gmx_restrict ptrC,
308 const float * gmx_restrict ptrD,
309 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
310 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
311 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
312 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
315 t1 = _mm_loadu_ps(ptrA);
316 t2 = _mm_loadu_ps(ptrB);
317 t3 = _mm_loadu_ps(ptrC);
318 t4 = _mm_loadu_ps(ptrD);
319 _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
324 t1 = _mm_loadu_ps(ptrA+4);
325 t2 = _mm_loadu_ps(ptrB+4);
326 t3 = _mm_loadu_ps(ptrC+4);
327 t4 = _mm_loadu_ps(ptrD+4);
328 _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
333 t1 = _mm_loadu_ps(ptrA+8);
334 t2 = _mm_loadu_ps(ptrB+8);
335 t3 = _mm_loadu_ps(ptrC+8);
336 t4 = _mm_loadu_ps(ptrD+8);
337 _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
346 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA,
347 float * gmx_restrict ptrB,
348 float * gmx_restrict ptrC,
349 float * gmx_restrict ptrD,
350 __m128 x1, __m128 y1, __m128 z1)
352 __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
353 t5 = _mm_unpacklo_ps(y1,z1);
354 t6 = _mm_unpackhi_ps(y1,z1);
355 t7 = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(1,0,0,0));
356 t8 = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(3,2,0,1));
357 t9 = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(1,0,0,2));
358 t10 = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(3,2,0,3));
359 t1 = _mm_load_ss(ptrA);
360 t1 = _mm_loadh_pi(t1,(__m64 *)(ptrA+1));
361 t1 = _mm_sub_ps(t1,t7);
362 _mm_store_ss(ptrA,t1);
363 _mm_storeh_pi((__m64 *)(ptrA+1),t1);
364 t2 = _mm_load_ss(ptrB);
365 t2 = _mm_loadh_pi(t2,(__m64 *)(ptrB+1));
366 t2 = _mm_sub_ps(t2,t8);
367 _mm_store_ss(ptrB,t2);
368 _mm_storeh_pi((__m64 *)(ptrB+1),t2);
369 t3 = _mm_load_ss(ptrC);
370 t3 = _mm_loadh_pi(t3,(__m64 *)(ptrC+1));
371 t3 = _mm_sub_ps(t3,t9);
372 _mm_store_ss(ptrC,t3);
373 _mm_storeh_pi((__m64 *)(ptrC+1),t3);
374 t4 = _mm_load_ss(ptrD);
375 t4 = _mm_loadh_pi(t4,(__m64 *)(ptrD+1));
376 t4 = _mm_sub_ps(t4,t10);
377 _mm_store_ss(ptrD,t4);
378 _mm_storeh_pi((__m64 *)(ptrD+1),t4);
383 #if defined (_MSC_VER) && defined(_M_IX86)
384 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
385 #define gmx_mm_decrement_3rvec_4ptr_swizzle_ps(ptrA,ptrB,ptrC,ptrD, \
386 _x1,_y1,_z1,_x2,_y2,_z2,_x3,_y3,_z3) \
388 __m128 _t1,_t2,_t3,_t4,_t5,_t6,_t7,_t8,_t9,_t10;\
389 __m128 _t11,_t12,_t13,_t14,_t15,_t16,_t17,_t18,_t19;\
390 __m128 _t20,_t21,_t22,_t23,_t24,_t25;\
391 _t13 = _mm_unpackhi_ps(_x1,_y1);\
392 _x1 = _mm_unpacklo_ps(_x1,_y1);\
393 _t14 = _mm_unpackhi_ps(_z1,_x2);\
394 _z1 = _mm_unpacklo_ps(_z1,_x2);\
395 _t15 = _mm_unpackhi_ps(_y2,_z2);\
396 _y2 = _mm_unpacklo_ps(_y2,_z2);\
397 _t16 = _mm_unpackhi_ps(_x3,_y3);\
398 _x3 = _mm_unpacklo_ps(_x3,_y3);\
399 _t17 = _mm_shuffle_ps(_z3,_z3,_MM_SHUFFLE(0,0,0,1));\
400 _t18 = _mm_movehl_ps(_z3,_z3);\
401 _t19 = _mm_shuffle_ps(_t18,_t18,_MM_SHUFFLE(0,0,0,1));\
402 _t20 = _mm_movelh_ps(_x1,_z1);\
403 _t21 = _mm_movehl_ps(_z1,_x1);\
404 _t22 = _mm_movelh_ps(_t13,_t14);\
405 _t14 = _mm_movehl_ps(_t14,_t13);\
406 _t23 = _mm_movelh_ps(_y2,_x3);\
407 _t24 = _mm_movehl_ps(_x3,_y2);\
408 _t25 = _mm_movelh_ps(_t15,_t16);\
409 _t16 = _mm_movehl_ps(_t16,_t15);\
410 _t1 = _mm_loadu_ps(ptrA);\
411 _t2 = _mm_loadu_ps(ptrA+4);\
412 _t3 = _mm_load_ss(ptrA+8);\
413 _t1 = _mm_sub_ps(_t1,_t20);\
414 _t2 = _mm_sub_ps(_t2,_t23);\
415 _t3 = _mm_sub_ss(_t3,_z3);\
416 _mm_storeu_ps(ptrA,_t1);\
417 _mm_storeu_ps(ptrA+4,_t2);\
418 _mm_store_ss(ptrA+8,_t3);\
419 _t4 = _mm_loadu_ps(ptrB);\
420 _t5 = _mm_loadu_ps(ptrB+4);\
421 _t6 = _mm_load_ss(ptrB+8);\
422 _t4 = _mm_sub_ps(_t4,_t21);\
423 _t5 = _mm_sub_ps(_t5,_t24);\
424 _t6 = _mm_sub_ss(_t6,_t17);\
425 _mm_storeu_ps(ptrB,_t4);\
426 _mm_storeu_ps(ptrB+4,_t5);\
427 _mm_store_ss(ptrB+8,_t6);\
428 _t7 = _mm_loadu_ps(ptrC);\
429 _t8 = _mm_loadu_ps(ptrC+4);\
430 _t9 = _mm_load_ss(ptrC+8);\
431 _t7 = _mm_sub_ps(_t7,_t22);\
432 _t8 = _mm_sub_ps(_t8,_t25);\
433 _t9 = _mm_sub_ss(_t9,_t18);\
434 _mm_storeu_ps(ptrC,_t7);\
435 _mm_storeu_ps(ptrC+4,_t8);\
436 _mm_store_ss(ptrC+8,_t9);\
437 _t10 = _mm_loadu_ps(ptrD);\
438 _t11 = _mm_loadu_ps(ptrD+4);\
439 _t12 = _mm_load_ss(ptrD+8);\
440 _t10 = _mm_sub_ps(_t10,_t14);\
441 _t11 = _mm_sub_ps(_t11,_t16);\
442 _t12 = _mm_sub_ss(_t12,_t19);\
443 _mm_storeu_ps(ptrD,_t10);\
444 _mm_storeu_ps(ptrD+4,_t11);\
445 _mm_store_ss(ptrD+8,_t12);\
448 /* Real function for sane compilers */
450 gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
451 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
452 __m128 x1, __m128 y1, __m128 z1,
453 __m128 x2, __m128 y2, __m128 z2,
454 __m128 x3, __m128 y3, __m128 z3)
456 __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
457 __m128 t11,t12,t13,t14,t15,t16,t17,t18,t19;
458 __m128 t20,t21,t22,t23,t24,t25;
460 t13 = _mm_unpackhi_ps(x1,y1);
461 x1 = _mm_unpacklo_ps(x1,y1);
462 t14 = _mm_unpackhi_ps(z1,x2);
463 z1 = _mm_unpacklo_ps(z1,x2);
464 t15 = _mm_unpackhi_ps(y2,z2);
465 y2 = _mm_unpacklo_ps(y2,z2);
466 t16 = _mm_unpackhi_ps(x3,y3);
467 x3 = _mm_unpacklo_ps(x3,y3);
468 t17 = _mm_shuffle_ps(z3,z3,_MM_SHUFFLE(0,0,0,1));
469 t18 = _mm_movehl_ps(z3,z3);
470 t19 = _mm_shuffle_ps(t18,t18,_MM_SHUFFLE(0,0,0,1));
471 t20 = _mm_movelh_ps(x1,z1);
472 t21 = _mm_movehl_ps(z1,x1);
473 t22 = _mm_movelh_ps(t13,t14);
474 t14 = _mm_movehl_ps(t14,t13);
475 t23 = _mm_movelh_ps(y2,x3);
476 t24 = _mm_movehl_ps(x3,y2);
477 t25 = _mm_movelh_ps(t15,t16);
478 t16 = _mm_movehl_ps(t16,t15);
479 t1 = _mm_loadu_ps(ptrA);
480 t2 = _mm_loadu_ps(ptrA+4);
481 t3 = _mm_load_ss(ptrA+8);
482 t1 = _mm_sub_ps(t1,t20);
483 t2 = _mm_sub_ps(t2,t23);
484 t3 = _mm_sub_ss(t3,z3);
485 _mm_storeu_ps(ptrA,t1);
486 _mm_storeu_ps(ptrA+4,t2);
487 _mm_store_ss(ptrA+8,t3);
488 t4 = _mm_loadu_ps(ptrB);
489 t5 = _mm_loadu_ps(ptrB+4);
490 t6 = _mm_load_ss(ptrB+8);
491 t4 = _mm_sub_ps(t4,t21);
492 t5 = _mm_sub_ps(t5,t24);
493 t6 = _mm_sub_ss(t6,t17);
494 _mm_storeu_ps(ptrB,t4);
495 _mm_storeu_ps(ptrB+4,t5);
496 _mm_store_ss(ptrB+8,t6);
497 t7 = _mm_loadu_ps(ptrC);
498 t8 = _mm_loadu_ps(ptrC+4);
499 t9 = _mm_load_ss(ptrC+8);
500 t7 = _mm_sub_ps(t7,t22);
501 t8 = _mm_sub_ps(t8,t25);
502 t9 = _mm_sub_ss(t9,t18);
503 _mm_storeu_ps(ptrC,t7);
504 _mm_storeu_ps(ptrC+4,t8);
505 _mm_store_ss(ptrC+8,t9);
506 t10 = _mm_loadu_ps(ptrD);
507 t11 = _mm_loadu_ps(ptrD+4);
508 t12 = _mm_load_ss(ptrD+8);
509 t10 = _mm_sub_ps(t10,t14);
510 t11 = _mm_sub_ps(t11,t16);
511 t12 = _mm_sub_ss(t12,t19);
512 _mm_storeu_ps(ptrD,t10);
513 _mm_storeu_ps(ptrD+4,t11);
514 _mm_store_ss(ptrD+8,t12);
519 #if defined (_MSC_VER) && defined(_M_IX86)
520 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
521 #define gmx_mm_decrement_4rvec_4ptr_swizzle_ps(ptrA,ptrB,ptrC,ptrD, \
522 _x1,_y1,_z1,_x2,_y2,_z2,_x3,_y3,_z3,_x4,_y4,_z4) \
524 __m128 _t1,_t2,_t3,_t4,_t5,_t6,_t7,_t8,_t9,_t10,_t11;\
525 __m128 _t12,_t13,_t14,_t15,_t16,_t17,_t18,_t19,_t20,_t21,_t22;\
527 _t13 = _mm_unpackhi_ps(_x1,_y1);\
528 _x1 = _mm_unpacklo_ps(_x1,_y1);\
529 _t14 = _mm_unpackhi_ps(_z1,_x2);\
530 _z1 = _mm_unpacklo_ps(_z1,_x2);\
531 _t15 = _mm_unpackhi_ps(_y2,_z2);\
532 _y2 = _mm_unpacklo_ps(_y2,_z2);\
533 _t16 = _mm_unpackhi_ps(_x3,_y3);\
534 _x3 = _mm_unpacklo_ps(_x3,_y3);\
535 _t17 = _mm_unpackhi_ps(_z3,_x4);\
536 _z3 = _mm_unpacklo_ps(_z3,_x4);\
537 _t18 = _mm_unpackhi_ps(_y4,_z4);\
538 _y4 = _mm_unpacklo_ps(_y4,_z4);\
539 _t19 = _mm_movelh_ps(_x1,_z1);\
540 _z1 = _mm_movehl_ps(_z1,_x1);\
541 _t20 = _mm_movelh_ps(_t13,_t14);\
542 _t14 = _mm_movehl_ps(_t14,_t13);\
543 _t21 = _mm_movelh_ps(_y2,_x3);\
544 _x3 = _mm_movehl_ps(_x3,_y2);\
545 _t22 = _mm_movelh_ps(_t15,_t16);\
546 _t16 = _mm_movehl_ps(_t16,_t15);\
547 _t23 = _mm_movelh_ps(_z3,_y4);\
548 _y4 = _mm_movehl_ps(_y4,_z3);\
549 _t24 = _mm_movelh_ps(_t17,_t18);\
550 _t18 = _mm_movehl_ps(_t18,_t17);\
551 _t1 = _mm_loadu_ps(ptrA);\
552 _t2 = _mm_loadu_ps(ptrA+4);\
553 _t3 = _mm_loadu_ps(ptrA+8);\
554 _t1 = _mm_sub_ps(_t1,_t19);\
555 _t2 = _mm_sub_ps(_t2,_t21);\
556 _t3 = _mm_sub_ps(_t3,_t23);\
557 _mm_storeu_ps(ptrA,_t1);\
558 _mm_storeu_ps(ptrA+4,_t2);\
559 _mm_storeu_ps(ptrA+8,_t3);\
560 _t4 = _mm_loadu_ps(ptrB);\
561 _t5 = _mm_loadu_ps(ptrB+4);\
562 _t6 = _mm_loadu_ps(ptrB+8);\
563 _t4 = _mm_sub_ps(_t4,_z1);\
564 _t5 = _mm_sub_ps(_t5,_x3);\
565 _t6 = _mm_sub_ps(_t6,_y4);\
566 _mm_storeu_ps(ptrB,_t4);\
567 _mm_storeu_ps(ptrB+4,_t5);\
568 _mm_storeu_ps(ptrB+8,_t6);\
569 _t7 = _mm_loadu_ps(ptrC);\
570 _t8 = _mm_loadu_ps(ptrC+4);\
571 _t9 = _mm_loadu_ps(ptrC+8);\
572 _t7 = _mm_sub_ps(_t7,_t20);\
573 _t8 = _mm_sub_ps(_t8,_t22);\
574 _t9 = _mm_sub_ps(_t9,_t24);\
575 _mm_storeu_ps(ptrC,_t7);\
576 _mm_storeu_ps(ptrC+4,_t8);\
577 _mm_storeu_ps(ptrC+8,_t9);\
578 _t10 = _mm_loadu_ps(ptrD);\
579 _t11 = _mm_loadu_ps(ptrD+4);\
580 _t12 = _mm_loadu_ps(ptrD+8);\
581 _t10 = _mm_sub_ps(_t10,_t14);\
582 _t11 = _mm_sub_ps(_t11,_t16);\
583 _t12 = _mm_sub_ps(_t12,_t18);\
584 _mm_storeu_ps(ptrD,_t10);\
585 _mm_storeu_ps(ptrD+4,_t11);\
586 _mm_storeu_ps(ptrD+8,_t12);\
589 /* Real function for sane compilers */
591 gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
592 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
593 __m128 x1, __m128 y1, __m128 z1,
594 __m128 x2, __m128 y2, __m128 z2,
595 __m128 x3, __m128 y3, __m128 z3,
596 __m128 x4, __m128 y4, __m128 z4)
598 __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11;
599 __m128 t12,t13,t14,t15,t16,t17,t18,t19,t20,t21,t22;
601 t13 = _mm_unpackhi_ps(x1,y1);
602 x1 = _mm_unpacklo_ps(x1,y1);
603 t14 = _mm_unpackhi_ps(z1,x2);
604 z1 = _mm_unpacklo_ps(z1,x2);
605 t15 = _mm_unpackhi_ps(y2,z2);
606 y2 = _mm_unpacklo_ps(y2,z2);
607 t16 = _mm_unpackhi_ps(x3,y3);
608 x3 = _mm_unpacklo_ps(x3,y3);
609 t17 = _mm_unpackhi_ps(z3,x4);
610 z3 = _mm_unpacklo_ps(z3,x4);
611 t18 = _mm_unpackhi_ps(y4,z4);
612 y4 = _mm_unpacklo_ps(y4,z4);
613 t19 = _mm_movelh_ps(x1,z1);
614 z1 = _mm_movehl_ps(z1,x1);
615 t20 = _mm_movelh_ps(t13,t14);
616 t14 = _mm_movehl_ps(t14,t13);
617 t21 = _mm_movelh_ps(y2,x3);
618 x3 = _mm_movehl_ps(x3,y2);
619 t22 = _mm_movelh_ps(t15,t16);
620 t16 = _mm_movehl_ps(t16,t15);
621 t23 = _mm_movelh_ps(z3,y4);
622 y4 = _mm_movehl_ps(y4,z3);
623 t24 = _mm_movelh_ps(t17,t18);
624 t18 = _mm_movehl_ps(t18,t17);
625 t1 = _mm_loadu_ps(ptrA);
626 t2 = _mm_loadu_ps(ptrA+4);
627 t3 = _mm_loadu_ps(ptrA+8);
628 t1 = _mm_sub_ps(t1,t19);
629 t2 = _mm_sub_ps(t2,t21);
630 t3 = _mm_sub_ps(t3,t23);
631 _mm_storeu_ps(ptrA,t1);
632 _mm_storeu_ps(ptrA+4,t2);
633 _mm_storeu_ps(ptrA+8,t3);
634 t4 = _mm_loadu_ps(ptrB);
635 t5 = _mm_loadu_ps(ptrB+4);
636 t6 = _mm_loadu_ps(ptrB+8);
637 t4 = _mm_sub_ps(t4,z1);
638 t5 = _mm_sub_ps(t5,x3);
639 t6 = _mm_sub_ps(t6,y4);
640 _mm_storeu_ps(ptrB,t4);
641 _mm_storeu_ps(ptrB+4,t5);
642 _mm_storeu_ps(ptrB+8,t6);
643 t7 = _mm_loadu_ps(ptrC);
644 t8 = _mm_loadu_ps(ptrC+4);
645 t9 = _mm_loadu_ps(ptrC+8);
646 t7 = _mm_sub_ps(t7,t20);
647 t8 = _mm_sub_ps(t8,t22);
648 t9 = _mm_sub_ps(t9,t24);
649 _mm_storeu_ps(ptrC,t7);
650 _mm_storeu_ps(ptrC+4,t8);
651 _mm_storeu_ps(ptrC+8,t9);
652 t10 = _mm_loadu_ps(ptrD);
653 t11 = _mm_loadu_ps(ptrD+4);
654 t12 = _mm_loadu_ps(ptrD+8);
655 t10 = _mm_sub_ps(t10,t14);
656 t11 = _mm_sub_ps(t11,t16);
657 t12 = _mm_sub_ps(t12,t18);
658 _mm_storeu_ps(ptrD,t10);
659 _mm_storeu_ps(ptrD+4,t11);
660 _mm_storeu_ps(ptrD+8,t12);
665 static gmx_inline void
666 gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
667 float * gmx_restrict fptr,
668 float * gmx_restrict fshiftptr)
674 _MM_TRANSPOSE4_PS(fix1,t1,fiy1,fiz1);
675 fix1 = _mm_add_ps(_mm_add_ps(fix1,t1), _mm_add_ps(fiy1,fiz1));
677 t2 = _mm_load_ss(fptr);
678 t2 = _mm_loadh_pi(t2,(__m64 *)(fptr+1));
679 t3 = _mm_load_ss(fshiftptr);
680 t3 = _mm_loadh_pi(t3,(__m64 *)(fshiftptr+1));
682 t2 = _mm_add_ps(t2,fix1);
683 t3 = _mm_add_ps(t3,fix1);
685 _mm_store_ss(fptr,t2);
686 _mm_storeh_pi((__m64 *)(fptr+1),t2);
687 _mm_store_ss(fshiftptr,t3);
688 _mm_storeh_pi((__m64 *)(fshiftptr+1),t3);
691 #if defined (_MSC_VER) && defined(_M_IX86)
692 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
693 #define gmx_mm_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3, \
696 __m128 _t1,_t2,_t3,_t4;\
698 _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);\
699 _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);\
700 _t2 = _mm_movehl_ps(_mm_setzero_ps(),fiz3);\
701 _t1 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(0,0,0,1));\
702 _t3 = _mm_shuffle_ps(_t2,_t2,_MM_SHUFFLE(0,0,0,1));\
703 fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));\
704 fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));\
705 fiz3 = _mm_add_ss(_mm_add_ps(fiz3,_t1) , _mm_add_ps(_t2,_t3));\
706 _mm_storeu_ps(fptr, _mm_add_ps(fix1,_mm_loadu_ps(fptr) ));\
707 _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));\
708 _mm_store_ss (fptr+8,_mm_add_ss(fiz3,_mm_load_ss(fptr+8) ));\
709 _t4 = _mm_load_ss(fshiftptr+2);\
710 _t4 = _mm_loadh_pi(_t4,(__m64 *)(fshiftptr));\
711 _t1 = _mm_shuffle_ps(fiz3,fix1,_MM_SHUFFLE(1,0,0,0));\
712 _t2 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(3,2,2,2));\
713 _t3 = _mm_shuffle_ps(fiy2,fix1,_MM_SHUFFLE(3,3,0,1));\
714 _t3 = _mm_shuffle_ps(_t3 ,_t3 ,_MM_SHUFFLE(1,2,0,0));\
715 _t1 = _mm_add_ps(_t1,_t2);\
716 _t3 = _mm_add_ps(_t3,_t4);\
717 _t1 = _mm_add_ps(_t1,_t3);\
718 _mm_store_ss(fshiftptr+2,_t1);\
719 _mm_storeh_pi((__m64 *)(fshiftptr),_t1);\
722 /* Real function for sane compilers */
723 static gmx_inline void
724 gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
725 __m128 fix2, __m128 fiy2, __m128 fiz2,
726 __m128 fix3, __m128 fiy3, __m128 fiz3,
727 float * gmx_restrict fptr,
728 float * gmx_restrict fshiftptr)
733 _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);
734 _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);
735 t2 = _mm_movehl_ps(_mm_setzero_ps(),fiz3);
736 t1 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(0,0,0,1));
737 t3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,1));
739 fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));
740 fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));
741 fiz3 = _mm_add_ss(_mm_add_ps(fiz3,t1) , _mm_add_ps(t2,t3));
743 _mm_storeu_ps(fptr, _mm_add_ps(fix1,_mm_loadu_ps(fptr) ));
744 _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
745 _mm_store_ss (fptr+8,_mm_add_ss(fiz3,_mm_load_ss(fptr+8) ));
747 t4 = _mm_load_ss(fshiftptr+2);
748 t4 = _mm_loadh_pi(t4,(__m64 *)(fshiftptr));
750 t1 = _mm_shuffle_ps(fiz3,fix1,_MM_SHUFFLE(1,0,0,0)); /* fiy1 fix1 - fiz3 */
751 t2 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(3,2,2,2)); /* fiy3 fix3 - fiz1 */
752 t3 = _mm_shuffle_ps(fiy2,fix1,_MM_SHUFFLE(3,3,0,1)); /* fix2 fix2 fiy2 fiz2 */
753 t3 = _mm_shuffle_ps(t3 ,t3 ,_MM_SHUFFLE(1,2,0,0)); /* fiy2 fix2 - fiz2 */
755 t1 = _mm_add_ps(t1,t2);
756 t3 = _mm_add_ps(t3,t4);
757 t1 = _mm_add_ps(t1,t3); /* y x - z */
759 _mm_store_ss(fshiftptr+2,t1);
760 _mm_storeh_pi((__m64 *)(fshiftptr),t1);
764 #if defined (_MSC_VER) && defined(_M_IX86)
765 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
766 #define gmx_mm_update_iforce_4atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,fix4,fiy4,fiz4, \
769 __m128 _t1,_t2,_t3,_t4,_t5;\
770 _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);\
771 _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);\
772 _MM_TRANSPOSE4_PS(fiz3,fix4,fiy4,fiz4);\
773 fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));\
774 fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));\
775 fiz3 = _mm_add_ps(_mm_add_ps(fiz3,fix4), _mm_add_ps(fiy4,fiz4));\
776 _mm_storeu_ps(fptr, _mm_add_ps(fix1,_mm_loadu_ps(fptr) ));\
777 _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));\
778 _mm_storeu_ps(fptr+8,_mm_add_ps(fiz3,_mm_loadu_ps(fptr+8)));\
779 _t5 = _mm_load_ss(fshiftptr+2);\
780 _t5 = _mm_loadh_pi(_t5,(__m64 *)(fshiftptr));\
781 _t1 = _mm_shuffle_ps(fix1,fix1,_MM_SHUFFLE(1,0,2,2));\
782 _t2 = _mm_shuffle_ps(fiy2,fiy2,_MM_SHUFFLE(3,2,1,1));\
783 _t3 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(2,1,0,0));\
784 _t4 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(0,0,3,3));\
785 _t4 = _mm_shuffle_ps(fiz3,_t4 ,_MM_SHUFFLE(2,0,3,3));\
786 _t1 = _mm_add_ps(_t1,_t2);\
787 _t3 = _mm_add_ps(_t3,_t4);\
788 _t1 = _mm_add_ps(_t1,_t3);\
789 _t5 = _mm_add_ps(_t5,_t1);\
790 _mm_store_ss(fshiftptr+2,_t5);\
791 _mm_storeh_pi((__m64 *)(fshiftptr),_t5);\
794 /* Real function for sane compilers */
795 static gmx_inline void
796 gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
797 __m128 fix2, __m128 fiy2, __m128 fiz2,
798 __m128 fix3, __m128 fiy3, __m128 fiz3,
799 __m128 fix4, __m128 fiy4, __m128 fiz4,
800 float * gmx_restrict fptr,
801 float * gmx_restrict fshiftptr)
803 __m128 t1,t2,t3,t4,t5;
806 _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);
807 _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);
808 _MM_TRANSPOSE4_PS(fiz3,fix4,fiy4,fiz4);
810 fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));
811 fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));
812 fiz3 = _mm_add_ps(_mm_add_ps(fiz3,fix4), _mm_add_ps(fiy4,fiz4));
814 _mm_storeu_ps(fptr, _mm_add_ps(fix1,_mm_loadu_ps(fptr) ));
815 _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
816 _mm_storeu_ps(fptr+8,_mm_add_ps(fiz3,_mm_loadu_ps(fptr+8)));
818 t5 = _mm_load_ss(fshiftptr+2);
819 t5 = _mm_loadh_pi(t5,(__m64 *)(fshiftptr));
821 t1 = _mm_shuffle_ps(fix1,fix1,_MM_SHUFFLE(1,0,2,2));
822 t2 = _mm_shuffle_ps(fiy2,fiy2,_MM_SHUFFLE(3,2,1,1));
823 t3 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(2,1,0,0));
824 t4 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(0,0,3,3));
825 t4 = _mm_shuffle_ps(fiz3,t4 ,_MM_SHUFFLE(2,0,3,3));
827 t1 = _mm_add_ps(t1,t2);
828 t3 = _mm_add_ps(t3,t4);
829 t1 = _mm_add_ps(t1,t3);
830 t5 = _mm_add_ps(t5,t1);
832 _mm_store_ss(fshiftptr+2,t5);
833 _mm_storeh_pi((__m64 *)(fshiftptr),t5);
839 gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
841 pot1 = _mm_add_ps(pot1,_mm_movehl_ps(_mm_setzero_ps(),pot1));
842 pot1 = _mm_add_ps(pot1,_mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(0,0,0,1)));
843 _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
847 gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
848 __m128 pot2, float * gmx_restrict ptrB)
851 t1 = _mm_movehl_ps(pot2,pot1);
852 t2 = _mm_movelh_ps(pot1,pot2);
853 t1 = _mm_add_ps(t1,t2);
854 t2 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,1,1));
855 pot1 = _mm_add_ps(t1,t2);
856 pot2 = _mm_movehl_ps(t2,pot1);
857 _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
858 _mm_store_ss(ptrB,_mm_add_ss(pot2,_mm_load_ss(ptrB)));
862 #endif /* _kernelutil_x86_sse2_single_h_ */