2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _kernelutil_x86_sse2_single_h_
36 #define _kernelutil_x86_sse2_single_h_
38 /* We require SSE2 now! */
42 #define gmx_mm_castsi128_ps _mm_castsi128_ps
44 #define gmx_mm_extract_epi32(x, imm) _mm_cvtsi128_si32(_mm_srli_si128((x), 4 * (imm)))
47 /* Normal sum of four xmm registers */
48 #define gmx_mm_sum4_ps(t0, t1, t2, t3) _mm_add_ps(_mm_add_ps(t0, t1), _mm_add_ps(t2, t3))
50 static gmx_inline __m128 gmx_simdcall
51 gmx_mm_calc_rsq_ps(__m128 dx, __m128 dy, __m128 dz)
53 return _mm_add_ps( _mm_add_ps( _mm_mul_ps(dx, dx), _mm_mul_ps(dy, dy) ), _mm_mul_ps(dz, dz) );
56 static gmx_inline int gmx_simdcall
57 gmx_mm_any_lt(__m128 a, __m128 b)
59 return _mm_movemask_ps(_mm_cmplt_ps(a, b));
62 /* Load a single value from 1-4 places, merge into xmm register */
64 static gmx_inline __m128 gmx_simdcall
65 gmx_mm_load_4real_swizzle_ps(const float * gmx_restrict ptrA,
66 const float * gmx_restrict ptrB,
67 const float * gmx_restrict ptrC,
68 const float * gmx_restrict ptrD)
72 t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA), _mm_load_ss(ptrC));
73 t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB), _mm_load_ss(ptrD));
74 return _mm_unpacklo_ps(t1, t2);
77 static gmx_inline void gmx_simdcall
78 gmx_mm_store_4real_swizzle_ps(float * gmx_restrict ptrA,
79 float * gmx_restrict ptrB,
80 float * gmx_restrict ptrC,
81 float * gmx_restrict ptrD,
86 t3 = _mm_movehl_ps(_mm_setzero_ps(), xmm1);
87 t2 = _mm_shuffle_ps(xmm1, xmm1, _MM_SHUFFLE(1, 1, 1, 1));
88 t4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(1, 1, 1, 1));
89 _mm_store_ss(ptrA, xmm1);
90 _mm_store_ss(ptrB, t2);
91 _mm_store_ss(ptrC, t3);
92 _mm_store_ss(ptrD, t4);
95 /* Similar to store, but increments value in memory */
96 static gmx_inline void gmx_simdcall
97 gmx_mm_increment_4real_swizzle_ps(float * gmx_restrict ptrA,
98 float * gmx_restrict ptrB,
99 float * gmx_restrict ptrC,
100 float * gmx_restrict ptrD, __m128 xmm1)
104 tmp = gmx_mm_load_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD);
105 tmp = _mm_add_ps(tmp, xmm1);
106 gmx_mm_store_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD, tmp);
110 static gmx_inline void gmx_simdcall
111 gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
112 const float * gmx_restrict p2,
113 const float * gmx_restrict p3,
114 const float * gmx_restrict p4,
115 __m128 * gmx_restrict c6,
116 __m128 * gmx_restrict c12)
118 __m128 t1, t2, t3, t4;
120 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p1); /* - - c12a c6a */
121 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p2); /* - - c12b c6b */
122 t3 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p3); /* - - c12c c6c */
123 t4 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p4); /* - - c12d c6d */
124 t1 = _mm_unpacklo_ps(t1, t2);
125 t2 = _mm_unpacklo_ps(t3, t4);
126 *c6 = _mm_movelh_ps(t1, t2);
127 *c12 = _mm_movehl_ps(t2, t1);
130 /* Routines to load 1-4 rvec from 4 places.
131 * We mainly use these to load coordinates. The extra routines
132 * are very efficient for the water-water loops, since we e.g.
133 * know that a TIP4p water has 4 atoms, so we should load 12 floats+shuffle.
137 static gmx_inline void gmx_simdcall
138 gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
139 const float * gmx_restrict xyz,
140 __m128 * gmx_restrict x1,
141 __m128 * gmx_restrict y1,
142 __m128 * gmx_restrict z1)
144 __m128 t1, t2, t3, t4;
146 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
147 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz);
148 t3 = _mm_load_ss(xyz_shift+2);
149 t4 = _mm_load_ss(xyz+2);
150 t1 = _mm_add_ps(t1, t2);
151 t3 = _mm_add_ss(t3, t4);
153 *x1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0));
154 *y1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1));
155 *z1 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 0, 0));
159 static gmx_inline void gmx_simdcall
160 gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
161 const float * gmx_restrict xyz,
162 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
163 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
164 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
167 __m128 t1, t2, t3, t4, t5, t6;
169 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
170 tB = _mm_load_ss(xyz_shift+2);
172 t1 = _mm_loadu_ps(xyz);
173 t2 = _mm_loadu_ps(xyz+4);
174 t3 = _mm_load_ss(xyz+8);
176 tA = _mm_movelh_ps(tA, tB);
177 t4 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(0, 2, 1, 0));
178 t5 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(1, 0, 2, 1));
179 t6 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(2, 1, 0, 2));
181 t1 = _mm_add_ps(t1, t4);
182 t2 = _mm_add_ps(t2, t5);
183 t3 = _mm_add_ss(t3, t6);
185 *x1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0));
186 *y1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1));
187 *z1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2, 2, 2, 2));
188 *x2 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 3, 3));
189 *y2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(0, 0, 0, 0));
190 *z2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(1, 1, 1, 1));
191 *x3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2, 2, 2, 2));
192 *y3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3, 3, 3, 3));
193 *z3 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 0, 0));
197 static gmx_inline void gmx_simdcall
198 gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
199 const float * gmx_restrict xyz,
200 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
201 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
202 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
203 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
206 __m128 t1, t2, t3, t4, t5, t6;
208 tA = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
209 tB = _mm_load_ss(xyz_shift+2);
211 t1 = _mm_loadu_ps(xyz);
212 t2 = _mm_loadu_ps(xyz+4);
213 t3 = _mm_loadu_ps(xyz+8);
215 tA = _mm_movelh_ps(tA, tB);
216 t4 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(0, 2, 1, 0));
217 t5 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(1, 0, 2, 1));
218 t6 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(2, 1, 0, 2));
220 t1 = _mm_add_ps(t1, t4);
221 t2 = _mm_add_ps(t2, t5);
222 t3 = _mm_add_ps(t3, t6);
224 *x1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0));
225 *y1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1));
226 *z1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2, 2, 2, 2));
227 *x2 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 3, 3));
228 *y2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(0, 0, 0, 0));
229 *z2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(1, 1, 1, 1));
230 *x3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2, 2, 2, 2));
231 *y3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3, 3, 3, 3));
232 *z3 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 0, 0));
233 *x4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(1, 1, 1, 1));
234 *y4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2, 2, 2, 2));
235 *z4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3, 3, 3, 3));
239 static gmx_inline void gmx_simdcall
240 gmx_mm_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
241 const float * gmx_restrict ptrB,
242 const float * gmx_restrict ptrC,
243 const float * gmx_restrict ptrD,
244 __m128 * gmx_restrict x1,
245 __m128 * gmx_restrict y1,
246 __m128 * gmx_restrict z1)
248 __m128 t1, t2, t3, t4, t5, t6, t7, t8;
249 t1 = _mm_castpd_ps(_mm_load_sd((const double *)ptrA));
250 t2 = _mm_castpd_ps(_mm_load_sd((const double *)ptrB));
251 t3 = _mm_castpd_ps(_mm_load_sd((const double *)ptrC));
252 t4 = _mm_castpd_ps(_mm_load_sd((const double *)ptrD));
253 t5 = _mm_load_ss(ptrA+2);
254 t6 = _mm_load_ss(ptrB+2);
255 t7 = _mm_load_ss(ptrC+2);
256 t8 = _mm_load_ss(ptrD+2);
257 t1 = _mm_unpacklo_ps(t1, t2);
258 t3 = _mm_unpacklo_ps(t3, t4);
259 *x1 = _mm_movelh_ps(t1, t3);
260 *y1 = _mm_movehl_ps(t3, t1);
261 t5 = _mm_unpacklo_ps(t5, t6);
262 t7 = _mm_unpacklo_ps(t7, t8);
263 *z1 = _mm_movelh_ps(t5, t7);
267 static gmx_inline void gmx_simdcall
268 gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
269 const float * gmx_restrict ptrB,
270 const float * gmx_restrict ptrC,
271 const float * gmx_restrict ptrD,
272 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
273 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
274 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
276 __m128 t1, t2, t3, t4;
277 t1 = _mm_loadu_ps(ptrA);
278 t2 = _mm_loadu_ps(ptrB);
279 t3 = _mm_loadu_ps(ptrC);
280 t4 = _mm_loadu_ps(ptrD);
281 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
286 t1 = _mm_loadu_ps(ptrA+4);
287 t2 = _mm_loadu_ps(ptrB+4);
288 t3 = _mm_loadu_ps(ptrC+4);
289 t4 = _mm_loadu_ps(ptrD+4);
290 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
295 t1 = _mm_load_ss(ptrA+8);
296 t2 = _mm_load_ss(ptrB+8);
297 t3 = _mm_load_ss(ptrC+8);
298 t4 = _mm_load_ss(ptrD+8);
299 t1 = _mm_unpacklo_ps(t1, t3);
300 t3 = _mm_unpacklo_ps(t2, t4);
301 *z3 = _mm_unpacklo_ps(t1, t3);
305 static gmx_inline void gmx_simdcall
306 gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
307 const float * gmx_restrict ptrB,
308 const float * gmx_restrict ptrC,
309 const float * gmx_restrict ptrD,
310 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
311 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
312 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
313 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
315 __m128 t1, t2, t3, t4;
316 t1 = _mm_loadu_ps(ptrA);
317 t2 = _mm_loadu_ps(ptrB);
318 t3 = _mm_loadu_ps(ptrC);
319 t4 = _mm_loadu_ps(ptrD);
320 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
325 t1 = _mm_loadu_ps(ptrA+4);
326 t2 = _mm_loadu_ps(ptrB+4);
327 t3 = _mm_loadu_ps(ptrC+4);
328 t4 = _mm_loadu_ps(ptrD+4);
329 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
334 t1 = _mm_loadu_ps(ptrA+8);
335 t2 = _mm_loadu_ps(ptrB+8);
336 t3 = _mm_loadu_ps(ptrC+8);
337 t4 = _mm_loadu_ps(ptrD+8);
338 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
346 static gmx_inline void gmx_simdcall
347 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA,
348 float * gmx_restrict ptrB,
349 float * gmx_restrict ptrC,
350 float * gmx_restrict ptrD,
351 __m128 x1, __m128 y1, __m128 z1)
353 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
354 t5 = _mm_unpacklo_ps(y1, z1);
355 t6 = _mm_unpackhi_ps(y1, z1);
356 t7 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(1, 0, 0, 0));
357 t8 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(3, 2, 0, 1));
358 t9 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(1, 0, 0, 2));
359 t10 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(3, 2, 0, 3));
360 t1 = _mm_load_ss(ptrA);
361 t1 = _mm_loadh_pi(t1, (__m64 *)(ptrA+1));
362 t1 = _mm_sub_ps(t1, t7);
363 _mm_store_ss(ptrA, t1);
364 _mm_storeh_pi((__m64 *)(ptrA+1), t1);
365 t2 = _mm_load_ss(ptrB);
366 t2 = _mm_loadh_pi(t2, (__m64 *)(ptrB+1));
367 t2 = _mm_sub_ps(t2, t8);
368 _mm_store_ss(ptrB, t2);
369 _mm_storeh_pi((__m64 *)(ptrB+1), t2);
370 t3 = _mm_load_ss(ptrC);
371 t3 = _mm_loadh_pi(t3, (__m64 *)(ptrC+1));
372 t3 = _mm_sub_ps(t3, t9);
373 _mm_store_ss(ptrC, t3);
374 _mm_storeh_pi((__m64 *)(ptrC+1), t3);
375 t4 = _mm_load_ss(ptrD);
376 t4 = _mm_loadh_pi(t4, (__m64 *)(ptrD+1));
377 t4 = _mm_sub_ps(t4, t10);
378 _mm_store_ss(ptrD, t4);
379 _mm_storeh_pi((__m64 *)(ptrD+1), t4);
384 static gmx_inline void gmx_simdcall
385 gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
386 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
387 __m128 x1, __m128 y1, __m128 z1,
388 __m128 x2, __m128 y2, __m128 z2,
389 __m128 x3, __m128 y3, __m128 z3)
391 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
392 __m128 t11, t12, t13, t14, t15, t16, t17, t18, t19;
393 __m128 t20, t21, t22, t23, t24, t25;
395 t13 = _mm_unpackhi_ps(x1, y1);
396 x1 = _mm_unpacklo_ps(x1, y1);
397 t14 = _mm_unpackhi_ps(z1, x2);
398 z1 = _mm_unpacklo_ps(z1, x2);
399 t15 = _mm_unpackhi_ps(y2, z2);
400 y2 = _mm_unpacklo_ps(y2, z2);
401 t16 = _mm_unpackhi_ps(x3, y3);
402 x3 = _mm_unpacklo_ps(x3, y3);
403 t17 = _mm_shuffle_ps(z3, z3, _MM_SHUFFLE(0, 0, 0, 1));
404 t18 = _mm_movehl_ps(z3, z3);
405 t19 = _mm_shuffle_ps(t18, t18, _MM_SHUFFLE(0, 0, 0, 1));
406 t20 = _mm_movelh_ps(x1, z1);
407 t21 = _mm_movehl_ps(z1, x1);
408 t22 = _mm_movelh_ps(t13, t14);
409 t14 = _mm_movehl_ps(t14, t13);
410 t23 = _mm_movelh_ps(y2, x3);
411 t24 = _mm_movehl_ps(x3, y2);
412 t25 = _mm_movelh_ps(t15, t16);
413 t16 = _mm_movehl_ps(t16, t15);
414 t1 = _mm_loadu_ps(ptrA);
415 t2 = _mm_loadu_ps(ptrA+4);
416 t3 = _mm_load_ss(ptrA+8);
417 t1 = _mm_sub_ps(t1, t20);
418 t2 = _mm_sub_ps(t2, t23);
419 t3 = _mm_sub_ss(t3, z3);
420 _mm_storeu_ps(ptrA, t1);
421 _mm_storeu_ps(ptrA+4, t2);
422 _mm_store_ss(ptrA+8, t3);
423 t4 = _mm_loadu_ps(ptrB);
424 t5 = _mm_loadu_ps(ptrB+4);
425 t6 = _mm_load_ss(ptrB+8);
426 t4 = _mm_sub_ps(t4, t21);
427 t5 = _mm_sub_ps(t5, t24);
428 t6 = _mm_sub_ss(t6, t17);
429 _mm_storeu_ps(ptrB, t4);
430 _mm_storeu_ps(ptrB+4, t5);
431 _mm_store_ss(ptrB+8, t6);
432 t7 = _mm_loadu_ps(ptrC);
433 t8 = _mm_loadu_ps(ptrC+4);
434 t9 = _mm_load_ss(ptrC+8);
435 t7 = _mm_sub_ps(t7, t22);
436 t8 = _mm_sub_ps(t8, t25);
437 t9 = _mm_sub_ss(t9, t18);
438 _mm_storeu_ps(ptrC, t7);
439 _mm_storeu_ps(ptrC+4, t8);
440 _mm_store_ss(ptrC+8, t9);
441 t10 = _mm_loadu_ps(ptrD);
442 t11 = _mm_loadu_ps(ptrD+4);
443 t12 = _mm_load_ss(ptrD+8);
444 t10 = _mm_sub_ps(t10, t14);
445 t11 = _mm_sub_ps(t11, t16);
446 t12 = _mm_sub_ss(t12, t19);
447 _mm_storeu_ps(ptrD, t10);
448 _mm_storeu_ps(ptrD+4, t11);
449 _mm_store_ss(ptrD+8, t12);
452 static gmx_inline void gmx_simdcall
453 gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
454 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
455 __m128 x1, __m128 y1, __m128 z1,
456 __m128 x2, __m128 y2, __m128 z2,
457 __m128 x3, __m128 y3, __m128 z3,
458 __m128 x4, __m128 y4, __m128 z4)
460 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11;
461 __m128 t12, t13, t14, t15, t16, t17, t18, t19, t20, t21, t22;
463 t13 = _mm_unpackhi_ps(x1, y1);
464 x1 = _mm_unpacklo_ps(x1, y1);
465 t14 = _mm_unpackhi_ps(z1, x2);
466 z1 = _mm_unpacklo_ps(z1, x2);
467 t15 = _mm_unpackhi_ps(y2, z2);
468 y2 = _mm_unpacklo_ps(y2, z2);
469 t16 = _mm_unpackhi_ps(x3, y3);
470 x3 = _mm_unpacklo_ps(x3, y3);
471 t17 = _mm_unpackhi_ps(z3, x4);
472 z3 = _mm_unpacklo_ps(z3, x4);
473 t18 = _mm_unpackhi_ps(y4, z4);
474 y4 = _mm_unpacklo_ps(y4, z4);
475 t19 = _mm_movelh_ps(x1, z1);
476 z1 = _mm_movehl_ps(z1, x1);
477 t20 = _mm_movelh_ps(t13, t14);
478 t14 = _mm_movehl_ps(t14, t13);
479 t21 = _mm_movelh_ps(y2, x3);
480 x3 = _mm_movehl_ps(x3, y2);
481 t22 = _mm_movelh_ps(t15, t16);
482 t16 = _mm_movehl_ps(t16, t15);
483 t23 = _mm_movelh_ps(z3, y4);
484 y4 = _mm_movehl_ps(y4, z3);
485 t24 = _mm_movelh_ps(t17, t18);
486 t18 = _mm_movehl_ps(t18, t17);
487 t1 = _mm_loadu_ps(ptrA);
488 t2 = _mm_loadu_ps(ptrA+4);
489 t3 = _mm_loadu_ps(ptrA+8);
490 t1 = _mm_sub_ps(t1, t19);
491 t2 = _mm_sub_ps(t2, t21);
492 t3 = _mm_sub_ps(t3, t23);
493 _mm_storeu_ps(ptrA, t1);
494 _mm_storeu_ps(ptrA+4, t2);
495 _mm_storeu_ps(ptrA+8, t3);
496 t4 = _mm_loadu_ps(ptrB);
497 t5 = _mm_loadu_ps(ptrB+4);
498 t6 = _mm_loadu_ps(ptrB+8);
499 t4 = _mm_sub_ps(t4, z1);
500 t5 = _mm_sub_ps(t5, x3);
501 t6 = _mm_sub_ps(t6, y4);
502 _mm_storeu_ps(ptrB, t4);
503 _mm_storeu_ps(ptrB+4, t5);
504 _mm_storeu_ps(ptrB+8, t6);
505 t7 = _mm_loadu_ps(ptrC);
506 t8 = _mm_loadu_ps(ptrC+4);
507 t9 = _mm_loadu_ps(ptrC+8);
508 t7 = _mm_sub_ps(t7, t20);
509 t8 = _mm_sub_ps(t8, t22);
510 t9 = _mm_sub_ps(t9, t24);
511 _mm_storeu_ps(ptrC, t7);
512 _mm_storeu_ps(ptrC+4, t8);
513 _mm_storeu_ps(ptrC+8, t9);
514 t10 = _mm_loadu_ps(ptrD);
515 t11 = _mm_loadu_ps(ptrD+4);
516 t12 = _mm_loadu_ps(ptrD+8);
517 t10 = _mm_sub_ps(t10, t14);
518 t11 = _mm_sub_ps(t11, t16);
519 t12 = _mm_sub_ps(t12, t18);
520 _mm_storeu_ps(ptrD, t10);
521 _mm_storeu_ps(ptrD+4, t11);
522 _mm_storeu_ps(ptrD+8, t12);
526 static gmx_inline void gmx_simdcall
527 gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
528 float * gmx_restrict fptr,
529 float * gmx_restrict fshiftptr)
535 _MM_TRANSPOSE4_PS(fix1, t1, fiy1, fiz1);
536 fix1 = _mm_add_ps(_mm_add_ps(fix1, t1), _mm_add_ps(fiy1, fiz1));
538 t2 = _mm_load_ss(fptr);
539 t2 = _mm_loadh_pi(t2, (__m64 *)(fptr+1));
540 t3 = _mm_load_ss(fshiftptr);
541 t3 = _mm_loadh_pi(t3, (__m64 *)(fshiftptr+1));
543 t2 = _mm_add_ps(t2, fix1);
544 t3 = _mm_add_ps(t3, fix1);
546 _mm_store_ss(fptr, t2);
547 _mm_storeh_pi((__m64 *)(fptr+1), t2);
548 _mm_store_ss(fshiftptr, t3);
549 _mm_storeh_pi((__m64 *)(fshiftptr+1), t3);
552 static gmx_inline void gmx_simdcall
553 gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
554 __m128 fix2, __m128 fiy2, __m128 fiz2,
555 __m128 fix3, __m128 fiy3, __m128 fiz3,
556 float * gmx_restrict fptr,
557 float * gmx_restrict fshiftptr)
559 __m128 t1, t2, t3, t4;
562 _MM_TRANSPOSE4_PS(fix1, fiy1, fiz1, fix2);
563 _MM_TRANSPOSE4_PS(fiy2, fiz2, fix3, fiy3);
564 t2 = _mm_movehl_ps(_mm_setzero_ps(), fiz3);
565 t1 = _mm_shuffle_ps(fiz3, fiz3, _MM_SHUFFLE(0, 0, 0, 1));
566 t3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(0, 0, 0, 1));
568 fix1 = _mm_add_ps(_mm_add_ps(fix1, fiy1), _mm_add_ps(fiz1, fix2));
569 fiy2 = _mm_add_ps(_mm_add_ps(fiy2, fiz2), _mm_add_ps(fix3, fiy3));
570 fiz3 = _mm_add_ss(_mm_add_ps(fiz3, t1), _mm_add_ps(t2, t3));
572 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
573 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
574 _mm_store_ss (fptr+8, _mm_add_ss(fiz3, _mm_load_ss(fptr+8) ));
576 t4 = _mm_load_ss(fshiftptr+2);
577 t4 = _mm_loadh_pi(t4, (__m64 *)(fshiftptr));
579 t1 = _mm_shuffle_ps(fiz3, fix1, _MM_SHUFFLE(1, 0, 0, 0)); /* fiy1 fix1 - fiz3 */
580 t2 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(3, 2, 2, 2)); /* fiy3 fix3 - fiz1 */
581 t3 = _mm_shuffle_ps(fiy2, fix1, _MM_SHUFFLE(3, 3, 0, 1)); /* fix2 fix2 fiy2 fiz2 */
582 t3 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(1, 2, 0, 0)); /* fiy2 fix2 - fiz2 */
584 t1 = _mm_add_ps(t1, t2);
585 t3 = _mm_add_ps(t3, t4);
586 t1 = _mm_add_ps(t1, t3); /* y x - z */
588 _mm_store_ss(fshiftptr+2, t1);
589 _mm_storeh_pi((__m64 *)(fshiftptr), t1);
592 static gmx_inline void gmx_simdcall
593 gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
594 __m128 fix2, __m128 fiy2, __m128 fiz2,
595 __m128 fix3, __m128 fiy3, __m128 fiz3,
596 __m128 fix4, __m128 fiy4, __m128 fiz4,
597 float * gmx_restrict fptr,
598 float * gmx_restrict fshiftptr)
600 __m128 t1, t2, t3, t4, t5;
603 _MM_TRANSPOSE4_PS(fix1, fiy1, fiz1, fix2);
604 _MM_TRANSPOSE4_PS(fiy2, fiz2, fix3, fiy3);
605 _MM_TRANSPOSE4_PS(fiz3, fix4, fiy4, fiz4);
607 fix1 = _mm_add_ps(_mm_add_ps(fix1, fiy1), _mm_add_ps(fiz1, fix2));
608 fiy2 = _mm_add_ps(_mm_add_ps(fiy2, fiz2), _mm_add_ps(fix3, fiy3));
609 fiz3 = _mm_add_ps(_mm_add_ps(fiz3, fix4), _mm_add_ps(fiy4, fiz4));
611 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
612 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
613 _mm_storeu_ps(fptr+8, _mm_add_ps(fiz3, _mm_loadu_ps(fptr+8)));
615 t5 = _mm_load_ss(fshiftptr+2);
616 t5 = _mm_loadh_pi(t5, (__m64 *)(fshiftptr));
618 t1 = _mm_shuffle_ps(fix1, fix1, _MM_SHUFFLE(1, 0, 2, 2));
619 t2 = _mm_shuffle_ps(fiy2, fiy2, _MM_SHUFFLE(3, 2, 1, 1));
620 t3 = _mm_shuffle_ps(fiz3, fiz3, _MM_SHUFFLE(2, 1, 0, 0));
621 t4 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(0, 0, 3, 3));
622 t4 = _mm_shuffle_ps(fiz3, t4, _MM_SHUFFLE(2, 0, 3, 3));
624 t1 = _mm_add_ps(t1, t2);
625 t3 = _mm_add_ps(t3, t4);
626 t1 = _mm_add_ps(t1, t3);
627 t5 = _mm_add_ps(t5, t1);
629 _mm_store_ss(fshiftptr+2, t5);
630 _mm_storeh_pi((__m64 *)(fshiftptr), t5);
635 static gmx_inline void gmx_simdcall
636 gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
638 pot1 = _mm_add_ps(pot1, _mm_movehl_ps(_mm_setzero_ps(), pot1));
639 pot1 = _mm_add_ps(pot1, _mm_shuffle_ps(pot1, pot1, _MM_SHUFFLE(0, 0, 0, 1)));
640 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
643 static gmx_inline void gmx_simdcall
644 gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
645 __m128 pot2, float * gmx_restrict ptrB)
648 t1 = _mm_movehl_ps(pot2, pot1);
649 t2 = _mm_movelh_ps(pot1, pot2);
650 t1 = _mm_add_ps(t1, t2);
651 t2 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 1, 1));
652 pot1 = _mm_add_ps(t1, t2);
653 pot2 = _mm_movehl_ps(t2, pot1);
654 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
655 _mm_store_ss(ptrB, _mm_add_ss(pot2, _mm_load_ss(ptrB)));
659 #endif /* _kernelutil_x86_sse2_single_h_ */