2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _kernelutil_x86_sse4_1_single_h_
36 #define _kernelutil_x86_sse4_1_single_h_
43 #define gmx_mm_castsi128_ps _mm_castsi128_ps
44 #define gmx_mm_extract_epi32 _mm_extract_epi32
46 /* Normal sum of four xmm registers */
47 #define gmx_mm_sum4_ps(t0, t1, t2, t3) _mm_add_ps(_mm_add_ps(t0, t1), _mm_add_ps(t2, t3))
49 static gmx_inline __m128 gmx_simdcall
50 gmx_mm_calc_rsq_ps(__m128 dx, __m128 dy, __m128 dz)
52 return _mm_add_ps( _mm_add_ps( _mm_mul_ps(dx, dx), _mm_mul_ps(dy, dy) ), _mm_mul_ps(dz, dz) );
55 static gmx_inline int gmx_simdcall
56 gmx_mm_any_lt(__m128 a, __m128 b)
58 return _mm_movemask_ps(_mm_cmplt_ps(a, b));
61 /* Load a single value from 1-4 places, merge into xmm register */
63 static gmx_inline __m128 gmx_simdcall
64 gmx_mm_load_4real_swizzle_ps(const float * gmx_restrict ptrA,
65 const float * gmx_restrict ptrB,
66 const float * gmx_restrict ptrC,
67 const float * gmx_restrict ptrD)
71 t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA), _mm_load_ss(ptrC));
72 t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB), _mm_load_ss(ptrD));
73 return _mm_unpacklo_ps(t1, t2);
76 static gmx_inline void gmx_simdcall
77 gmx_mm_store_4real_swizzle_ps(float * gmx_restrict ptrA,
78 float * gmx_restrict ptrB,
79 float * gmx_restrict ptrC,
80 float * gmx_restrict ptrD,
85 t3 = _mm_movehl_ps(_mm_setzero_ps(), xmm1);
86 t2 = _mm_shuffle_ps(xmm1, xmm1, _MM_SHUFFLE(1, 1, 1, 1));
87 t4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(1, 1, 1, 1));
88 _mm_store_ss(ptrA, xmm1);
89 _mm_store_ss(ptrB, t2);
90 _mm_store_ss(ptrC, t3);
91 _mm_store_ss(ptrD, t4);
94 /* Similar to store, but increments value in memory */
95 static gmx_inline void gmx_simdcall
96 gmx_mm_increment_4real_swizzle_ps(float * gmx_restrict ptrA,
97 float * gmx_restrict ptrB,
98 float * gmx_restrict ptrC,
99 float * gmx_restrict ptrD, __m128 xmm1)
103 tmp = gmx_mm_load_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD);
104 tmp = _mm_add_ps(tmp, xmm1);
105 gmx_mm_store_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD, tmp);
109 static gmx_inline void gmx_simdcall
110 gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
111 const float * gmx_restrict p2,
112 const float * gmx_restrict p3,
113 const float * gmx_restrict p4,
114 __m128 * gmx_restrict c6,
115 __m128 * gmx_restrict c12)
117 __m128 t1, t2, t3, t4;
119 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p1); /* - - c12a c6a */
120 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p2); /* - - c12b c6b */
121 t3 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p3); /* - - c12c c6c */
122 t4 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p4); /* - - c12d c6d */
123 t1 = _mm_unpacklo_ps(t1, t2);
124 t2 = _mm_unpacklo_ps(t3, t4);
125 *c6 = _mm_movelh_ps(t1, t2);
126 *c12 = _mm_movehl_ps(t2, t1);
130 static gmx_inline void gmx_simdcall
131 gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
132 const float * gmx_restrict xyz,
133 __m128 * gmx_restrict x1,
134 __m128 * gmx_restrict y1,
135 __m128 * gmx_restrict z1)
137 __m128 t1, t2, t3, t4;
139 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
140 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz);
141 t3 = _mm_load_ss(xyz_shift+2);
142 t4 = _mm_load_ss(xyz+2);
143 t1 = _mm_add_ps(t1, t2);
144 t3 = _mm_add_ss(t3, t4);
146 *x1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0));
147 *y1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1));
148 *z1 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 0, 0));
152 static gmx_inline void gmx_simdcall
153 gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
154 const float * gmx_restrict xyz,
155 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
156 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
157 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
160 __m128 t1, t2, t3, t4, t5, t6;
162 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
163 tB = _mm_load_ss(xyz_shift+2);
165 t1 = _mm_loadu_ps(xyz);
166 t2 = _mm_loadu_ps(xyz+4);
167 t3 = _mm_load_ss(xyz+8);
169 tA = _mm_movelh_ps(tA, tB);
170 t4 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(0, 2, 1, 0));
171 t5 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(1, 0, 2, 1));
172 t6 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(2, 1, 0, 2));
174 t1 = _mm_add_ps(t1, t4);
175 t2 = _mm_add_ps(t2, t5);
176 t3 = _mm_add_ss(t3, t6);
178 *x1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0));
179 *y1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1));
180 *z1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2, 2, 2, 2));
181 *x2 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 3, 3));
182 *y2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(0, 0, 0, 0));
183 *z2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(1, 1, 1, 1));
184 *x3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2, 2, 2, 2));
185 *y3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3, 3, 3, 3));
186 *z3 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 0, 0));
190 static gmx_inline void gmx_simdcall
191 gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
192 const float * gmx_restrict xyz,
193 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
194 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
195 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
196 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
199 __m128 t1, t2, t3, t4, t5, t6;
201 tA = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
202 tB = _mm_load_ss(xyz_shift+2);
204 t1 = _mm_loadu_ps(xyz);
205 t2 = _mm_loadu_ps(xyz+4);
206 t3 = _mm_loadu_ps(xyz+8);
208 tA = _mm_movelh_ps(tA, tB);
209 t4 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(0, 2, 1, 0));
210 t5 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(1, 0, 2, 1));
211 t6 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(2, 1, 0, 2));
213 t1 = _mm_add_ps(t1, t4);
214 t2 = _mm_add_ps(t2, t5);
215 t3 = _mm_add_ps(t3, t6);
217 *x1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0));
218 *y1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1));
219 *z1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2, 2, 2, 2));
220 *x2 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 3, 3));
221 *y2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(0, 0, 0, 0));
222 *z2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(1, 1, 1, 1));
223 *x3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2, 2, 2, 2));
224 *y3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3, 3, 3, 3));
225 *z3 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 0, 0));
226 *x4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(1, 1, 1, 1));
227 *y4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2, 2, 2, 2));
228 *z4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3, 3, 3, 3));
232 static gmx_inline void gmx_simdcall
233 gmx_mm_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
234 const float * gmx_restrict ptrB,
235 const float * gmx_restrict ptrC,
236 const float * gmx_restrict ptrD,
237 __m128 * gmx_restrict x1,
238 __m128 * gmx_restrict y1,
239 __m128 * gmx_restrict z1)
241 __m128 t1, t2, t3, t4, t5, t6, t7, t8;
242 t1 = _mm_castpd_ps(_mm_load_sd((const double *)ptrA));
243 t2 = _mm_castpd_ps(_mm_load_sd((const double *)ptrB));
244 t3 = _mm_castpd_ps(_mm_load_sd((const double *)ptrC));
245 t4 = _mm_castpd_ps(_mm_load_sd((const double *)ptrD));
246 t5 = _mm_load_ss(ptrA+2);
247 t6 = _mm_load_ss(ptrB+2);
248 t7 = _mm_load_ss(ptrC+2);
249 t8 = _mm_load_ss(ptrD+2);
250 t1 = _mm_unpacklo_ps(t1, t2);
251 t3 = _mm_unpacklo_ps(t3, t4);
252 *x1 = _mm_movelh_ps(t1, t3);
253 *y1 = _mm_movehl_ps(t3, t1);
254 t5 = _mm_unpacklo_ps(t5, t6);
255 t7 = _mm_unpacklo_ps(t7, t8);
256 *z1 = _mm_movelh_ps(t5, t7);
260 static gmx_inline void gmx_simdcall
261 gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
262 const float * gmx_restrict ptrB,
263 const float * gmx_restrict ptrC,
264 const float * gmx_restrict ptrD,
265 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
266 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
267 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
269 __m128 t1, t2, t3, t4;
270 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrA ) );
271 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrB ) );
272 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrC ) );
273 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrD ) );
274 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
279 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA+4) ) );
280 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB+4) ) );
281 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC+4) ) );
282 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD+4) ) );
283 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
288 t1 = _mm_load_ss(ptrA+8);
289 t2 = _mm_load_ss(ptrB+8);
290 t3 = _mm_load_ss(ptrC+8);
291 t4 = _mm_load_ss(ptrD+8);
292 t1 = _mm_unpacklo_ps(t1, t3);
293 t3 = _mm_unpacklo_ps(t2, t4);
294 *z3 = _mm_unpacklo_ps(t1, t3);
298 static gmx_inline void gmx_simdcall
299 gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
300 const float * gmx_restrict ptrB,
301 const float * gmx_restrict ptrC,
302 const float * gmx_restrict ptrD,
303 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
304 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
305 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
306 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
308 __m128 t1, t2, t3, t4;
309 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA) ) );
310 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB) ) );
311 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC) ) );
312 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD) ) );
313 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
318 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA+4) ) );
319 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB+4) ) );
320 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC+4) ) );
321 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD+4) ) );
322 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
327 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA+8) ) );
328 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB+8) ) );
329 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC+8) ) );
330 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD+8) ) );
331 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
340 static gmx_inline void gmx_simdcall
341 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * ptrA,
345 __m128 x1, __m128 y1, __m128 z1)
347 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
348 t5 = _mm_unpacklo_ps(y1, z1);
349 t6 = _mm_unpackhi_ps(y1, z1);
350 t7 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(1, 0, 0, 0));
351 t8 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(3, 2, 0, 1));
352 t9 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(1, 0, 0, 2));
353 t10 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(3, 2, 0, 3));
354 t1 = _mm_load_ss(ptrA);
355 t1 = _mm_loadh_pi(t1, (__m64 *)(ptrA+1));
356 t1 = _mm_sub_ps(t1, t7);
357 _mm_store_ss(ptrA, t1);
358 _mm_storeh_pi((__m64 *)(ptrA+1), t1);
359 t2 = _mm_load_ss(ptrB);
360 t2 = _mm_loadh_pi(t2, (__m64 *)(ptrB+1));
361 t2 = _mm_sub_ps(t2, t8);
362 _mm_store_ss(ptrB, t2);
363 _mm_storeh_pi((__m64 *)(ptrB+1), t2);
364 t3 = _mm_load_ss(ptrC);
365 t3 = _mm_loadh_pi(t3, (__m64 *)(ptrC+1));
366 t3 = _mm_sub_ps(t3, t9);
367 _mm_store_ss(ptrC, t3);
368 _mm_storeh_pi((__m64 *)(ptrC+1), t3);
369 t4 = _mm_load_ss(ptrD);
370 t4 = _mm_loadh_pi(t4, (__m64 *)(ptrD+1));
371 t4 = _mm_sub_ps(t4, t10);
372 _mm_store_ss(ptrD, t4);
373 _mm_storeh_pi((__m64 *)(ptrD+1), t4);
378 static gmx_inline void gmx_simdcall
379 gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
380 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
381 __m128 x1, __m128 y1, __m128 z1,
382 __m128 x2, __m128 y2, __m128 z2,
383 __m128 x3, __m128 y3, __m128 z3)
385 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
386 __m128 t11, t12, t13, t14, t15, t16, t17, t18, t19;
387 __m128 t20, t21, t22, t23, t24, t25;
389 t13 = _mm_unpackhi_ps(x1, y1);
390 x1 = _mm_unpacklo_ps(x1, y1);
391 t14 = _mm_unpackhi_ps(z1, x2);
392 z1 = _mm_unpacklo_ps(z1, x2);
393 t15 = _mm_unpackhi_ps(y2, z2);
394 y2 = _mm_unpacklo_ps(y2, z2);
395 t16 = _mm_unpackhi_ps(x3, y3);
396 x3 = _mm_unpacklo_ps(x3, y3);
397 t17 = _mm_shuffle_ps(z3, z3, _MM_SHUFFLE(0, 0, 0, 1));
398 t18 = _mm_movehl_ps(z3, z3);
399 t19 = _mm_shuffle_ps(t18, t18, _MM_SHUFFLE(0, 0, 0, 1));
400 t20 = _mm_movelh_ps(x1, z1);
401 t21 = _mm_movehl_ps(z1, x1);
402 t22 = _mm_movelh_ps(t13, t14);
403 t14 = _mm_movehl_ps(t14, t13);
404 t23 = _mm_movelh_ps(y2, x3);
405 t24 = _mm_movehl_ps(x3, y2);
406 t25 = _mm_movelh_ps(t15, t16);
407 t16 = _mm_movehl_ps(t16, t15);
408 t1 = _mm_loadu_ps(ptrA);
409 t2 = _mm_loadu_ps(ptrA+4);
410 t3 = _mm_load_ss(ptrA+8);
411 t4 = _mm_loadu_ps(ptrB);
412 t5 = _mm_loadu_ps(ptrB+4);
413 t6 = _mm_load_ss(ptrB+8);
414 t7 = _mm_loadu_ps(ptrC);
415 t8 = _mm_loadu_ps(ptrC+4);
416 t9 = _mm_load_ss(ptrC+8);
417 t10 = _mm_loadu_ps(ptrD);
418 t11 = _mm_loadu_ps(ptrD+4);
419 t12 = _mm_load_ss(ptrD+8);
421 t1 = _mm_sub_ps(t1, t20);
422 t2 = _mm_sub_ps(t2, t23);
423 t3 = _mm_sub_ss(t3, z3);
424 _mm_storeu_ps(ptrA, t1);
425 _mm_storeu_ps(ptrA+4, t2);
426 _mm_store_ss(ptrA+8, t3);
427 t4 = _mm_sub_ps(t4, t21);
428 t5 = _mm_sub_ps(t5, t24);
429 t6 = _mm_sub_ss(t6, t17);
430 _mm_storeu_ps(ptrB, t4);
431 _mm_storeu_ps(ptrB+4, t5);
432 _mm_store_ss(ptrB+8, t6);
433 t7 = _mm_sub_ps(t7, t22);
434 t8 = _mm_sub_ps(t8, t25);
435 t9 = _mm_sub_ss(t9, t18);
436 _mm_storeu_ps(ptrC, t7);
437 _mm_storeu_ps(ptrC+4, t8);
438 _mm_store_ss(ptrC+8, t9);
439 t10 = _mm_sub_ps(t10, t14);
440 t11 = _mm_sub_ps(t11, t16);
441 t12 = _mm_sub_ss(t12, t19);
442 _mm_storeu_ps(ptrD, t10);
443 _mm_storeu_ps(ptrD+4, t11);
444 _mm_store_ss(ptrD+8, t12);
448 static gmx_inline void gmx_simdcall
449 gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
450 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
451 __m128 x1, __m128 y1, __m128 z1,
452 __m128 x2, __m128 y2, __m128 z2,
453 __m128 x3, __m128 y3, __m128 z3,
454 __m128 x4, __m128 y4, __m128 z4)
456 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11;
457 __m128 t12, t13, t14, t15, t16, t17, t18, t19, t20, t21, t22;
459 t13 = _mm_unpackhi_ps(x1, y1);
460 x1 = _mm_unpacklo_ps(x1, y1);
461 t14 = _mm_unpackhi_ps(z1, x2);
462 z1 = _mm_unpacklo_ps(z1, x2);
463 t15 = _mm_unpackhi_ps(y2, z2);
464 y2 = _mm_unpacklo_ps(y2, z2);
465 t16 = _mm_unpackhi_ps(x3, y3);
466 x3 = _mm_unpacklo_ps(x3, y3);
467 t17 = _mm_unpackhi_ps(z3, x4);
468 z3 = _mm_unpacklo_ps(z3, x4);
469 t18 = _mm_unpackhi_ps(y4, z4);
470 y4 = _mm_unpacklo_ps(y4, z4);
471 t19 = _mm_movelh_ps(x1, z1);
472 z1 = _mm_movehl_ps(z1, x1);
473 t20 = _mm_movelh_ps(t13, t14);
474 t14 = _mm_movehl_ps(t14, t13);
475 t21 = _mm_movelh_ps(y2, x3);
476 x3 = _mm_movehl_ps(x3, y2);
477 t22 = _mm_movelh_ps(t15, t16);
478 t16 = _mm_movehl_ps(t16, t15);
479 t23 = _mm_movelh_ps(z3, y4);
480 y4 = _mm_movehl_ps(y4, z3);
481 t24 = _mm_movelh_ps(t17, t18);
482 t18 = _mm_movehl_ps(t18, t17);
483 t1 = _mm_loadu_ps(ptrA);
484 t2 = _mm_loadu_ps(ptrA+4);
485 t3 = _mm_loadu_ps(ptrA+8);
486 t1 = _mm_sub_ps(t1, t19);
487 t2 = _mm_sub_ps(t2, t21);
488 t3 = _mm_sub_ps(t3, t23);
489 _mm_storeu_ps(ptrA, t1);
490 _mm_storeu_ps(ptrA+4, t2);
491 _mm_storeu_ps(ptrA+8, t3);
492 t4 = _mm_loadu_ps(ptrB);
493 t5 = _mm_loadu_ps(ptrB+4);
494 t6 = _mm_loadu_ps(ptrB+8);
495 t4 = _mm_sub_ps(t4, z1);
496 t5 = _mm_sub_ps(t5, x3);
497 t6 = _mm_sub_ps(t6, y4);
498 _mm_storeu_ps(ptrB, t4);
499 _mm_storeu_ps(ptrB+4, t5);
500 _mm_storeu_ps(ptrB+8, t6);
501 t7 = _mm_loadu_ps(ptrC);
502 t8 = _mm_loadu_ps(ptrC+4);
503 t9 = _mm_loadu_ps(ptrC+8);
504 t7 = _mm_sub_ps(t7, t20);
505 t8 = _mm_sub_ps(t8, t22);
506 t9 = _mm_sub_ps(t9, t24);
507 _mm_storeu_ps(ptrC, t7);
508 _mm_storeu_ps(ptrC+4, t8);
509 _mm_storeu_ps(ptrC+8, t9);
510 t10 = _mm_loadu_ps(ptrD);
511 t11 = _mm_loadu_ps(ptrD+4);
512 t12 = _mm_loadu_ps(ptrD+8);
513 t10 = _mm_sub_ps(t10, t14);
514 t11 = _mm_sub_ps(t11, t16);
515 t12 = _mm_sub_ps(t12, t18);
516 _mm_storeu_ps(ptrD, t10);
517 _mm_storeu_ps(ptrD+4, t11);
518 _mm_storeu_ps(ptrD+8, t12);
522 static gmx_inline void gmx_simdcall
523 gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
524 float * gmx_restrict fptr,
525 float * gmx_restrict fshiftptr)
529 fix1 = _mm_hadd_ps(fix1, fix1);
530 fiy1 = _mm_hadd_ps(fiy1, fiz1);
532 fix1 = _mm_hadd_ps(fix1, fiy1); /* fiz1 fiy1 fix1 fix1 */
534 t2 = _mm_load_ss(fptr);
535 t2 = _mm_loadh_pi(t2, (__m64 *)(fptr+1));
536 t3 = _mm_load_ss(fshiftptr);
537 t3 = _mm_loadh_pi(t3, (__m64 *)(fshiftptr+1));
539 t2 = _mm_add_ps(t2, fix1);
540 t3 = _mm_add_ps(t3, fix1);
542 _mm_store_ss(fptr, t2);
543 _mm_storeh_pi((__m64 *)(fptr+1), t2);
544 _mm_store_ss(fshiftptr, t3);
545 _mm_storeh_pi((__m64 *)(fshiftptr+1), t3);
549 static gmx_inline void gmx_simdcall
550 gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
551 __m128 fix2, __m128 fiy2, __m128 fiz2,
552 __m128 fix3, __m128 fiy3, __m128 fiz3,
553 float * gmx_restrict fptr,
554 float * gmx_restrict fshiftptr)
556 __m128 t1, t2, t3, t4;
558 fix1 = _mm_hadd_ps(fix1, fiy1);
559 fiz1 = _mm_hadd_ps(fiz1, fix2);
560 fiy2 = _mm_hadd_ps(fiy2, fiz2);
561 fix3 = _mm_hadd_ps(fix3, fiy3);
562 fiz3 = _mm_hadd_ps(fiz3, fiz3);
564 fix1 = _mm_hadd_ps(fix1, fiz1); /* fix2 fiz1 fiy1 fix1 */
565 fiy2 = _mm_hadd_ps(fiy2, fix3); /* fiy3 fix3 fiz2 fiy2 */
566 fiz3 = _mm_hadd_ps(fiz3, fiz3); /* - - - fiz3 */
568 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
569 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
570 _mm_store_ss (fptr+8, _mm_add_ss(fiz3, _mm_load_ss(fptr+8) ));
572 t4 = _mm_load_ss(fshiftptr+2);
573 t4 = _mm_loadh_pi(t4, (__m64 *)(fshiftptr));
575 t1 = _mm_shuffle_ps(fiz3, fix1, _MM_SHUFFLE(1, 0, 0, 0)); /* fiy1 fix1 - fiz3 */
576 t2 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(3, 2, 2, 2)); /* fiy3 fix3 - fiz1 */
577 t3 = _mm_shuffle_ps(fiy2, fix1, _MM_SHUFFLE(3, 3, 0, 1)); /* fix2 fix2 fiy2 fiz2 */
578 t3 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(1, 2, 0, 0)); /* fiy2 fix2 - fiz2 */
580 t1 = _mm_add_ps(t1, t2);
581 t3 = _mm_add_ps(t3, t4);
582 t1 = _mm_add_ps(t1, t3); /* y x - z */
584 _mm_store_ss(fshiftptr+2, t1);
585 _mm_storeh_pi((__m64 *)(fshiftptr), t1);
589 static gmx_inline void gmx_simdcall
590 gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
591 __m128 fix2, __m128 fiy2, __m128 fiz2,
592 __m128 fix3, __m128 fiy3, __m128 fiz3,
593 __m128 fix4, __m128 fiy4, __m128 fiz4,
594 float * gmx_restrict fptr,
595 float * gmx_restrict fshiftptr)
597 __m128 t1, t2, t3, t4, t5;
599 fix1 = _mm_hadd_ps(fix1, fiy1);
600 fiz1 = _mm_hadd_ps(fiz1, fix2);
601 fiy2 = _mm_hadd_ps(fiy2, fiz2);
602 fix3 = _mm_hadd_ps(fix3, fiy3);
603 fiz3 = _mm_hadd_ps(fiz3, fix4);
604 fiy4 = _mm_hadd_ps(fiy4, fiz4);
606 fix1 = _mm_hadd_ps(fix1, fiz1); /* fix2 fiz1 fiy1 fix1 */
607 fiy2 = _mm_hadd_ps(fiy2, fix3); /* fiy3 fix3 fiz2 fiy2 */
608 fiz3 = _mm_hadd_ps(fiz3, fiy4); /* fiz4 fiy4 fix4 fiz3 */
610 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
611 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
612 _mm_storeu_ps(fptr+8, _mm_add_ps(fiz3, _mm_loadu_ps(fptr+8)));
614 t5 = _mm_load_ss(fshiftptr+2);
615 t5 = _mm_loadh_pi(t5, (__m64 *)(fshiftptr));
617 t1 = _mm_shuffle_ps(fix1, fix1, _MM_SHUFFLE(1, 0, 2, 2));
618 t2 = _mm_shuffle_ps(fiy2, fiy2, _MM_SHUFFLE(3, 2, 1, 1));
619 t3 = _mm_shuffle_ps(fiz3, fiz3, _MM_SHUFFLE(2, 1, 0, 0));
620 t4 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(0, 0, 3, 3));
621 t4 = _mm_shuffle_ps(fiz3, t4, _MM_SHUFFLE(2, 0, 3, 3));
623 t1 = _mm_add_ps(t1, t2);
624 t3 = _mm_add_ps(t3, t4);
625 t1 = _mm_add_ps(t1, t3);
626 t5 = _mm_add_ps(t5, t1);
628 _mm_store_ss(fshiftptr+2, t5);
629 _mm_storeh_pi((__m64 *)(fshiftptr), t5);
633 static gmx_inline void gmx_simdcall
634 gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
636 pot1 = _mm_add_ps(pot1, _mm_movehl_ps(_mm_setzero_ps(), pot1));
637 pot1 = _mm_add_ps(pot1, _mm_shuffle_ps(pot1, pot1, _MM_SHUFFLE(0, 0, 0, 1)));
638 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
641 static gmx_inline void gmx_simdcall
642 gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
643 __m128 pot2, float * gmx_restrict ptrB)
646 t1 = _mm_movehl_ps(pot2, pot1);
647 t2 = _mm_movelh_ps(pot1, pot2);
648 t1 = _mm_add_ps(t1, t2);
649 t2 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 1, 1));
650 pot1 = _mm_add_ps(t1, t2);
651 pot2 = _mm_movehl_ps(t2, pot1);
652 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
653 _mm_store_ss(ptrB, _mm_add_ss(pot2, _mm_load_ss(ptrB)));
657 #endif /* _kernelutil_x86_sse4_1_single_h_ */