2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _kernelutil_x86_sse4_1_single_h_
36 #define _kernelutil_x86_sse4_1_single_h_
42 #include <smmintrin.h>
47 #define gmx_mm_castsi128_ps _mm_castsi128_ps
48 #define gmx_mm_extract_epi32 _mm_extract_epi32
50 /* Normal sum of four xmm registers */
51 #define gmx_mm_sum4_ps(t0, t1, t2, t3) _mm_add_ps(_mm_add_ps(t0, t1), _mm_add_ps(t2, t3))
53 static gmx_inline __m128 gmx_simdcall
54 gmx_mm_calc_rsq_ps(__m128 dx, __m128 dy, __m128 dz)
56 return _mm_add_ps( _mm_add_ps( _mm_mul_ps(dx, dx), _mm_mul_ps(dy, dy) ), _mm_mul_ps(dz, dz) );
59 static gmx_inline int gmx_simdcall
60 gmx_mm_any_lt(__m128 a, __m128 b)
62 return _mm_movemask_ps(_mm_cmplt_ps(a, b));
65 /* Load a single value from 1-4 places, merge into xmm register */
67 static gmx_inline __m128 gmx_simdcall
68 gmx_mm_load_4real_swizzle_ps(const float * gmx_restrict ptrA,
69 const float * gmx_restrict ptrB,
70 const float * gmx_restrict ptrC,
71 const float * gmx_restrict ptrD)
75 t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA), _mm_load_ss(ptrC));
76 t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB), _mm_load_ss(ptrD));
77 return _mm_unpacklo_ps(t1, t2);
80 static gmx_inline void gmx_simdcall
81 gmx_mm_store_4real_swizzle_ps(float * gmx_restrict ptrA,
82 float * gmx_restrict ptrB,
83 float * gmx_restrict ptrC,
84 float * gmx_restrict ptrD,
89 t3 = _mm_movehl_ps(_mm_setzero_ps(), xmm1);
90 t2 = _mm_shuffle_ps(xmm1, xmm1, _MM_SHUFFLE(1, 1, 1, 1));
91 t4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(1, 1, 1, 1));
92 _mm_store_ss(ptrA, xmm1);
93 _mm_store_ss(ptrB, t2);
94 _mm_store_ss(ptrC, t3);
95 _mm_store_ss(ptrD, t4);
98 /* Similar to store, but increments value in memory */
99 static gmx_inline void gmx_simdcall
100 gmx_mm_increment_4real_swizzle_ps(float * gmx_restrict ptrA,
101 float * gmx_restrict ptrB,
102 float * gmx_restrict ptrC,
103 float * gmx_restrict ptrD, __m128 xmm1)
107 tmp = gmx_mm_load_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD);
108 tmp = _mm_add_ps(tmp, xmm1);
109 gmx_mm_store_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD, tmp);
113 static gmx_inline void gmx_simdcall
114 gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
115 const float * gmx_restrict p2,
116 const float * gmx_restrict p3,
117 const float * gmx_restrict p4,
118 __m128 * gmx_restrict c6,
119 __m128 * gmx_restrict c12)
121 __m128 t1, t2, t3, t4;
123 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p1); /* - - c12a c6a */
124 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p2); /* - - c12b c6b */
125 t3 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p3); /* - - c12c c6c */
126 t4 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p4); /* - - c12d c6d */
127 t1 = _mm_unpacklo_ps(t1, t2);
128 t2 = _mm_unpacklo_ps(t3, t4);
129 *c6 = _mm_movelh_ps(t1, t2);
130 *c12 = _mm_movehl_ps(t2, t1);
134 static gmx_inline void gmx_simdcall
135 gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
136 const float * gmx_restrict xyz,
137 __m128 * gmx_restrict x1,
138 __m128 * gmx_restrict y1,
139 __m128 * gmx_restrict z1)
141 __m128 t1, t2, t3, t4;
143 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
144 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz);
145 t3 = _mm_load_ss(xyz_shift+2);
146 t4 = _mm_load_ss(xyz+2);
147 t1 = _mm_add_ps(t1, t2);
148 t3 = _mm_add_ss(t3, t4);
150 *x1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0));
151 *y1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1));
152 *z1 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 0, 0));
156 static gmx_inline void gmx_simdcall
157 gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
158 const float * gmx_restrict xyz,
159 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
160 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
161 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
164 __m128 t1, t2, t3, t4, t5, t6;
166 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
167 tB = _mm_load_ss(xyz_shift+2);
169 t1 = _mm_loadu_ps(xyz);
170 t2 = _mm_loadu_ps(xyz+4);
171 t3 = _mm_load_ss(xyz+8);
173 tA = _mm_movelh_ps(tA, tB);
174 t4 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(0, 2, 1, 0));
175 t5 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(1, 0, 2, 1));
176 t6 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(2, 1, 0, 2));
178 t1 = _mm_add_ps(t1, t4);
179 t2 = _mm_add_ps(t2, t5);
180 t3 = _mm_add_ss(t3, t6);
182 *x1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0));
183 *y1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1));
184 *z1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2, 2, 2, 2));
185 *x2 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 3, 3));
186 *y2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(0, 0, 0, 0));
187 *z2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(1, 1, 1, 1));
188 *x3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2, 2, 2, 2));
189 *y3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3, 3, 3, 3));
190 *z3 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 0, 0));
194 static gmx_inline void gmx_simdcall
195 gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
196 const float * gmx_restrict xyz,
197 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
198 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
199 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
200 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
203 __m128 t1, t2, t3, t4, t5, t6;
205 tA = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
206 tB = _mm_load_ss(xyz_shift+2);
208 t1 = _mm_loadu_ps(xyz);
209 t2 = _mm_loadu_ps(xyz+4);
210 t3 = _mm_loadu_ps(xyz+8);
212 tA = _mm_movelh_ps(tA, tB);
213 t4 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(0, 2, 1, 0));
214 t5 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(1, 0, 2, 1));
215 t6 = _mm_shuffle_ps(tA, tA, _MM_SHUFFLE(2, 1, 0, 2));
217 t1 = _mm_add_ps(t1, t4);
218 t2 = _mm_add_ps(t2, t5);
219 t3 = _mm_add_ps(t3, t6);
221 *x1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0));
222 *y1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1));
223 *z1 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2, 2, 2, 2));
224 *x2 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 3, 3));
225 *y2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(0, 0, 0, 0));
226 *z2 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(1, 1, 1, 1));
227 *x3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2, 2, 2, 2));
228 *y3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3, 3, 3, 3));
229 *z3 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 0, 0));
230 *x4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(1, 1, 1, 1));
231 *y4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2, 2, 2, 2));
232 *z4 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3, 3, 3, 3));
236 static gmx_inline void gmx_simdcall
237 gmx_mm_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
238 const float * gmx_restrict ptrB,
239 const float * gmx_restrict ptrC,
240 const float * gmx_restrict ptrD,
241 __m128 * gmx_restrict x1,
242 __m128 * gmx_restrict y1,
243 __m128 * gmx_restrict z1)
245 __m128 t1, t2, t3, t4, t5, t6, t7, t8;
246 t1 = _mm_castpd_ps(_mm_load_sd((const double *)ptrA));
247 t2 = _mm_castpd_ps(_mm_load_sd((const double *)ptrB));
248 t3 = _mm_castpd_ps(_mm_load_sd((const double *)ptrC));
249 t4 = _mm_castpd_ps(_mm_load_sd((const double *)ptrD));
250 t5 = _mm_load_ss(ptrA+2);
251 t6 = _mm_load_ss(ptrB+2);
252 t7 = _mm_load_ss(ptrC+2);
253 t8 = _mm_load_ss(ptrD+2);
254 t1 = _mm_unpacklo_ps(t1, t2);
255 t3 = _mm_unpacklo_ps(t3, t4);
256 *x1 = _mm_movelh_ps(t1, t3);
257 *y1 = _mm_movehl_ps(t3, t1);
258 t5 = _mm_unpacklo_ps(t5, t6);
259 t7 = _mm_unpacklo_ps(t7, t8);
260 *z1 = _mm_movelh_ps(t5, t7);
264 static gmx_inline void gmx_simdcall
265 gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
266 const float * gmx_restrict ptrB,
267 const float * gmx_restrict ptrC,
268 const float * gmx_restrict ptrD,
269 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
270 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
271 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
273 __m128 t1, t2, t3, t4;
274 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)ptrA ) );
275 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)ptrB ) );
276 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)ptrC ) );
277 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)ptrD ) );
278 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
283 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrA+4) ) );
284 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrB+4) ) );
285 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrC+4) ) );
286 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrD+4) ) );
287 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
292 t1 = _mm_load_ss(ptrA+8);
293 t2 = _mm_load_ss(ptrB+8);
294 t3 = _mm_load_ss(ptrC+8);
295 t4 = _mm_load_ss(ptrD+8);
296 t1 = _mm_unpacklo_ps(t1, t3);
297 t3 = _mm_unpacklo_ps(t2, t4);
298 *z3 = _mm_unpacklo_ps(t1, t3);
302 static gmx_inline void gmx_simdcall
303 gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
304 const float * gmx_restrict ptrB,
305 const float * gmx_restrict ptrC,
306 const float * gmx_restrict ptrD,
307 __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
308 __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
309 __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
310 __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
312 __m128 t1, t2, t3, t4;
313 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrA) ) );
314 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrB) ) );
315 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrC) ) );
316 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrD) ) );
317 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
322 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrA+4) ) );
323 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrB+4) ) );
324 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrC+4) ) );
325 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrD+4) ) );
326 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
331 t1 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrA+8) ) );
332 t2 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrB+8) ) );
333 t3 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrC+8) ) );
334 t4 = gmx_mm_castsi128_ps( _mm_lddqu_si128( (__m128i *)(ptrD+8) ) );
335 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
344 static gmx_inline void gmx_simdcall
345 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * ptrA,
349 __m128 x1, __m128 y1, __m128 z1)
351 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
352 t5 = _mm_unpacklo_ps(y1, z1);
353 t6 = _mm_unpackhi_ps(y1, z1);
354 t7 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(1, 0, 0, 0));
355 t8 = _mm_shuffle_ps(x1, t5, _MM_SHUFFLE(3, 2, 0, 1));
356 t9 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(1, 0, 0, 2));
357 t10 = _mm_shuffle_ps(x1, t6, _MM_SHUFFLE(3, 2, 0, 3));
358 t1 = _mm_load_ss(ptrA);
359 t1 = _mm_loadh_pi(t1, (__m64 *)(ptrA+1));
360 t1 = _mm_sub_ps(t1, t7);
361 _mm_store_ss(ptrA, t1);
362 _mm_storeh_pi((__m64 *)(ptrA+1), t1);
363 t2 = _mm_load_ss(ptrB);
364 t2 = _mm_loadh_pi(t2, (__m64 *)(ptrB+1));
365 t2 = _mm_sub_ps(t2, t8);
366 _mm_store_ss(ptrB, t2);
367 _mm_storeh_pi((__m64 *)(ptrB+1), t2);
368 t3 = _mm_load_ss(ptrC);
369 t3 = _mm_loadh_pi(t3, (__m64 *)(ptrC+1));
370 t3 = _mm_sub_ps(t3, t9);
371 _mm_store_ss(ptrC, t3);
372 _mm_storeh_pi((__m64 *)(ptrC+1), t3);
373 t4 = _mm_load_ss(ptrD);
374 t4 = _mm_loadh_pi(t4, (__m64 *)(ptrD+1));
375 t4 = _mm_sub_ps(t4, t10);
376 _mm_store_ss(ptrD, t4);
377 _mm_storeh_pi((__m64 *)(ptrD+1), t4);
382 static gmx_inline void gmx_simdcall
383 gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
384 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
385 __m128 x1, __m128 y1, __m128 z1,
386 __m128 x2, __m128 y2, __m128 z2,
387 __m128 x3, __m128 y3, __m128 z3)
389 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
390 __m128 t11, t12, t13, t14, t15, t16, t17, t18, t19;
391 __m128 t20, t21, t22, t23, t24, t25;
393 t13 = _mm_unpackhi_ps(x1, y1);
394 x1 = _mm_unpacklo_ps(x1, y1);
395 t14 = _mm_unpackhi_ps(z1, x2);
396 z1 = _mm_unpacklo_ps(z1, x2);
397 t15 = _mm_unpackhi_ps(y2, z2);
398 y2 = _mm_unpacklo_ps(y2, z2);
399 t16 = _mm_unpackhi_ps(x3, y3);
400 x3 = _mm_unpacklo_ps(x3, y3);
401 t17 = _mm_shuffle_ps(z3, z3, _MM_SHUFFLE(0, 0, 0, 1));
402 t18 = _mm_movehl_ps(z3, z3);
403 t19 = _mm_shuffle_ps(t18, t18, _MM_SHUFFLE(0, 0, 0, 1));
404 t20 = _mm_movelh_ps(x1, z1);
405 t21 = _mm_movehl_ps(z1, x1);
406 t22 = _mm_movelh_ps(t13, t14);
407 t14 = _mm_movehl_ps(t14, t13);
408 t23 = _mm_movelh_ps(y2, x3);
409 t24 = _mm_movehl_ps(x3, y2);
410 t25 = _mm_movelh_ps(t15, t16);
411 t16 = _mm_movehl_ps(t16, t15);
412 t1 = _mm_loadu_ps(ptrA);
413 t2 = _mm_loadu_ps(ptrA+4);
414 t3 = _mm_load_ss(ptrA+8);
415 t4 = _mm_loadu_ps(ptrB);
416 t5 = _mm_loadu_ps(ptrB+4);
417 t6 = _mm_load_ss(ptrB+8);
418 t7 = _mm_loadu_ps(ptrC);
419 t8 = _mm_loadu_ps(ptrC+4);
420 t9 = _mm_load_ss(ptrC+8);
421 t10 = _mm_loadu_ps(ptrD);
422 t11 = _mm_loadu_ps(ptrD+4);
423 t12 = _mm_load_ss(ptrD+8);
425 t1 = _mm_sub_ps(t1, t20);
426 t2 = _mm_sub_ps(t2, t23);
427 t3 = _mm_sub_ss(t3, z3);
428 _mm_storeu_ps(ptrA, t1);
429 _mm_storeu_ps(ptrA+4, t2);
430 _mm_store_ss(ptrA+8, t3);
431 t4 = _mm_sub_ps(t4, t21);
432 t5 = _mm_sub_ps(t5, t24);
433 t6 = _mm_sub_ss(t6, t17);
434 _mm_storeu_ps(ptrB, t4);
435 _mm_storeu_ps(ptrB+4, t5);
436 _mm_store_ss(ptrB+8, t6);
437 t7 = _mm_sub_ps(t7, t22);
438 t8 = _mm_sub_ps(t8, t25);
439 t9 = _mm_sub_ss(t9, t18);
440 _mm_storeu_ps(ptrC, t7);
441 _mm_storeu_ps(ptrC+4, t8);
442 _mm_store_ss(ptrC+8, t9);
443 t10 = _mm_sub_ps(t10, t14);
444 t11 = _mm_sub_ps(t11, t16);
445 t12 = _mm_sub_ss(t12, t19);
446 _mm_storeu_ps(ptrD, t10);
447 _mm_storeu_ps(ptrD+4, t11);
448 _mm_store_ss(ptrD+8, t12);
452 static gmx_inline void gmx_simdcall
453 gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
454 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
455 __m128 x1, __m128 y1, __m128 z1,
456 __m128 x2, __m128 y2, __m128 z2,
457 __m128 x3, __m128 y3, __m128 z3,
458 __m128 x4, __m128 y4, __m128 z4)
460 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11;
461 __m128 t12, t13, t14, t15, t16, t17, t18, t19, t20, t21, t22;
463 t13 = _mm_unpackhi_ps(x1, y1);
464 x1 = _mm_unpacklo_ps(x1, y1);
465 t14 = _mm_unpackhi_ps(z1, x2);
466 z1 = _mm_unpacklo_ps(z1, x2);
467 t15 = _mm_unpackhi_ps(y2, z2);
468 y2 = _mm_unpacklo_ps(y2, z2);
469 t16 = _mm_unpackhi_ps(x3, y3);
470 x3 = _mm_unpacklo_ps(x3, y3);
471 t17 = _mm_unpackhi_ps(z3, x4);
472 z3 = _mm_unpacklo_ps(z3, x4);
473 t18 = _mm_unpackhi_ps(y4, z4);
474 y4 = _mm_unpacklo_ps(y4, z4);
475 t19 = _mm_movelh_ps(x1, z1);
476 z1 = _mm_movehl_ps(z1, x1);
477 t20 = _mm_movelh_ps(t13, t14);
478 t14 = _mm_movehl_ps(t14, t13);
479 t21 = _mm_movelh_ps(y2, x3);
480 x3 = _mm_movehl_ps(x3, y2);
481 t22 = _mm_movelh_ps(t15, t16);
482 t16 = _mm_movehl_ps(t16, t15);
483 t23 = _mm_movelh_ps(z3, y4);
484 y4 = _mm_movehl_ps(y4, z3);
485 t24 = _mm_movelh_ps(t17, t18);
486 t18 = _mm_movehl_ps(t18, t17);
487 t1 = _mm_loadu_ps(ptrA);
488 t2 = _mm_loadu_ps(ptrA+4);
489 t3 = _mm_loadu_ps(ptrA+8);
490 t1 = _mm_sub_ps(t1, t19);
491 t2 = _mm_sub_ps(t2, t21);
492 t3 = _mm_sub_ps(t3, t23);
493 _mm_storeu_ps(ptrA, t1);
494 _mm_storeu_ps(ptrA+4, t2);
495 _mm_storeu_ps(ptrA+8, t3);
496 t4 = _mm_loadu_ps(ptrB);
497 t5 = _mm_loadu_ps(ptrB+4);
498 t6 = _mm_loadu_ps(ptrB+8);
499 t4 = _mm_sub_ps(t4, z1);
500 t5 = _mm_sub_ps(t5, x3);
501 t6 = _mm_sub_ps(t6, y4);
502 _mm_storeu_ps(ptrB, t4);
503 _mm_storeu_ps(ptrB+4, t5);
504 _mm_storeu_ps(ptrB+8, t6);
505 t7 = _mm_loadu_ps(ptrC);
506 t8 = _mm_loadu_ps(ptrC+4);
507 t9 = _mm_loadu_ps(ptrC+8);
508 t7 = _mm_sub_ps(t7, t20);
509 t8 = _mm_sub_ps(t8, t22);
510 t9 = _mm_sub_ps(t9, t24);
511 _mm_storeu_ps(ptrC, t7);
512 _mm_storeu_ps(ptrC+4, t8);
513 _mm_storeu_ps(ptrC+8, t9);
514 t10 = _mm_loadu_ps(ptrD);
515 t11 = _mm_loadu_ps(ptrD+4);
516 t12 = _mm_loadu_ps(ptrD+8);
517 t10 = _mm_sub_ps(t10, t14);
518 t11 = _mm_sub_ps(t11, t16);
519 t12 = _mm_sub_ps(t12, t18);
520 _mm_storeu_ps(ptrD, t10);
521 _mm_storeu_ps(ptrD+4, t11);
522 _mm_storeu_ps(ptrD+8, t12);
526 static gmx_inline void gmx_simdcall
527 gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
528 float * gmx_restrict fptr,
529 float * gmx_restrict fshiftptr)
533 fix1 = _mm_hadd_ps(fix1, fix1);
534 fiy1 = _mm_hadd_ps(fiy1, fiz1);
536 fix1 = _mm_hadd_ps(fix1, fiy1); /* fiz1 fiy1 fix1 fix1 */
538 t2 = _mm_load_ss(fptr);
539 t2 = _mm_loadh_pi(t2, (__m64 *)(fptr+1));
540 t3 = _mm_load_ss(fshiftptr);
541 t3 = _mm_loadh_pi(t3, (__m64 *)(fshiftptr+1));
543 t2 = _mm_add_ps(t2, fix1);
544 t3 = _mm_add_ps(t3, fix1);
546 _mm_store_ss(fptr, t2);
547 _mm_storeh_pi((__m64 *)(fptr+1), t2);
548 _mm_store_ss(fshiftptr, t3);
549 _mm_storeh_pi((__m64 *)(fshiftptr+1), t3);
553 static gmx_inline void gmx_simdcall
554 gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
555 __m128 fix2, __m128 fiy2, __m128 fiz2,
556 __m128 fix3, __m128 fiy3, __m128 fiz3,
557 float * gmx_restrict fptr,
558 float * gmx_restrict fshiftptr)
560 __m128 t1, t2, t3, t4;
562 fix1 = _mm_hadd_ps(fix1, fiy1);
563 fiz1 = _mm_hadd_ps(fiz1, fix2);
564 fiy2 = _mm_hadd_ps(fiy2, fiz2);
565 fix3 = _mm_hadd_ps(fix3, fiy3);
566 fiz3 = _mm_hadd_ps(fiz3, fiz3);
568 fix1 = _mm_hadd_ps(fix1, fiz1); /* fix2 fiz1 fiy1 fix1 */
569 fiy2 = _mm_hadd_ps(fiy2, fix3); /* fiy3 fix3 fiz2 fiy2 */
570 fiz3 = _mm_hadd_ps(fiz3, fiz3); /* - - - fiz3 */
572 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
573 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
574 _mm_store_ss (fptr+8, _mm_add_ss(fiz3, _mm_load_ss(fptr+8) ));
576 t4 = _mm_load_ss(fshiftptr+2);
577 t4 = _mm_loadh_pi(t4, (__m64 *)(fshiftptr));
579 t1 = _mm_shuffle_ps(fiz3, fix1, _MM_SHUFFLE(1, 0, 0, 0)); /* fiy1 fix1 - fiz3 */
580 t2 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(3, 2, 2, 2)); /* fiy3 fix3 - fiz1 */
581 t3 = _mm_shuffle_ps(fiy2, fix1, _MM_SHUFFLE(3, 3, 0, 1)); /* fix2 fix2 fiy2 fiz2 */
582 t3 = _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(1, 2, 0, 0)); /* fiy2 fix2 - fiz2 */
584 t1 = _mm_add_ps(t1, t2);
585 t3 = _mm_add_ps(t3, t4);
586 t1 = _mm_add_ps(t1, t3); /* y x - z */
588 _mm_store_ss(fshiftptr+2, t1);
589 _mm_storeh_pi((__m64 *)(fshiftptr), t1);
593 static gmx_inline void gmx_simdcall
594 gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
595 __m128 fix2, __m128 fiy2, __m128 fiz2,
596 __m128 fix3, __m128 fiy3, __m128 fiz3,
597 __m128 fix4, __m128 fiy4, __m128 fiz4,
598 float * gmx_restrict fptr,
599 float * gmx_restrict fshiftptr)
601 __m128 t1, t2, t3, t4, t5;
603 fix1 = _mm_hadd_ps(fix1, fiy1);
604 fiz1 = _mm_hadd_ps(fiz1, fix2);
605 fiy2 = _mm_hadd_ps(fiy2, fiz2);
606 fix3 = _mm_hadd_ps(fix3, fiy3);
607 fiz3 = _mm_hadd_ps(fiz3, fix4);
608 fiy4 = _mm_hadd_ps(fiy4, fiz4);
610 fix1 = _mm_hadd_ps(fix1, fiz1); /* fix2 fiz1 fiy1 fix1 */
611 fiy2 = _mm_hadd_ps(fiy2, fix3); /* fiy3 fix3 fiz2 fiy2 */
612 fiz3 = _mm_hadd_ps(fiz3, fiy4); /* fiz4 fiy4 fix4 fiz3 */
614 _mm_storeu_ps(fptr, _mm_add_ps(fix1, _mm_loadu_ps(fptr) ));
615 _mm_storeu_ps(fptr+4, _mm_add_ps(fiy2, _mm_loadu_ps(fptr+4)));
616 _mm_storeu_ps(fptr+8, _mm_add_ps(fiz3, _mm_loadu_ps(fptr+8)));
618 t5 = _mm_load_ss(fshiftptr+2);
619 t5 = _mm_loadh_pi(t5, (__m64 *)(fshiftptr));
621 t1 = _mm_shuffle_ps(fix1, fix1, _MM_SHUFFLE(1, 0, 2, 2));
622 t2 = _mm_shuffle_ps(fiy2, fiy2, _MM_SHUFFLE(3, 2, 1, 1));
623 t3 = _mm_shuffle_ps(fiz3, fiz3, _MM_SHUFFLE(2, 1, 0, 0));
624 t4 = _mm_shuffle_ps(fix1, fiy2, _MM_SHUFFLE(0, 0, 3, 3));
625 t4 = _mm_shuffle_ps(fiz3, t4, _MM_SHUFFLE(2, 0, 3, 3));
627 t1 = _mm_add_ps(t1, t2);
628 t3 = _mm_add_ps(t3, t4);
629 t1 = _mm_add_ps(t1, t3);
630 t5 = _mm_add_ps(t5, t1);
632 _mm_store_ss(fshiftptr+2, t5);
633 _mm_storeh_pi((__m64 *)(fshiftptr), t5);
637 static gmx_inline void gmx_simdcall
638 gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
640 pot1 = _mm_add_ps(pot1, _mm_movehl_ps(_mm_setzero_ps(), pot1));
641 pot1 = _mm_add_ps(pot1, _mm_shuffle_ps(pot1, pot1, _MM_SHUFFLE(0, 0, 0, 1)));
642 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
645 static gmx_inline void gmx_simdcall
646 gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
647 __m128 pot2, float * gmx_restrict ptrB)
650 t1 = _mm_movehl_ps(pot2, pot1);
651 t2 = _mm_movelh_ps(pot1, pot2);
652 t1 = _mm_add_ps(t1, t2);
653 t2 = _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 1, 1));
654 pot1 = _mm_add_ps(t1, t2);
655 pot2 = _mm_movehl_ps(t2, pot1);
656 _mm_store_ss(ptrA, _mm_add_ss(pot1, _mm_load_ss(ptrA)));
657 _mm_store_ss(ptrB, _mm_add_ss(pot2, _mm_load_ss(ptrB)));
662 # define SSE41_FLOAT_NEGZERO ({ const union { int fi; float f; } _gmx_fzero = {-2147483648}; _gmx_fzero.f; })
664 # define SSE41_FLOAT_NEGZERO (-0.0f)
667 static gmx_inline __m128 gmx_simdcall
668 sse41_set_exponent_f(__m128 x)
670 const __m128i expbias = _mm_set1_epi32(127);
671 __m128i iexp = _mm_cvtps_epi32(x);
673 iexp = _mm_slli_epi32(_mm_add_epi32(iexp, expbias), 23);
674 return _mm_castsi128_ps(iexp);
677 static gmx_inline __m128 gmx_simdcall
678 sse41_exp_f(__m128 x)
680 const __m128 argscale = _mm_set1_ps(1.44269504088896341f);
681 /* Lower bound: Disallow numbers that would lead to an IEEE fp exponent reaching +-127. */
682 const __m128 arglimit = _mm_set1_ps(126.0f);
683 const __m128 invargscale0 = _mm_set1_ps(-0.693145751953125f);
684 const __m128 invargscale1 = _mm_set1_ps(-1.428606765330187045e-06f);
685 const __m128 CC4 = _mm_set1_ps(0.00136324646882712841033936f);
686 const __m128 CC3 = _mm_set1_ps(0.00836596917361021041870117f);
687 const __m128 CC2 = _mm_set1_ps(0.0416710823774337768554688f);
688 const __m128 CC1 = _mm_set1_ps(0.166665524244308471679688f);
689 const __m128 CC0 = _mm_set1_ps(0.499999850988388061523438f);
690 const __m128 one = _mm_set1_ps(1.0f);
696 y = _mm_mul_ps(x, argscale);
697 fexppart = sse41_set_exponent_f(y);
698 intpart = _mm_cvtepi32_ps(_mm_cvtps_epi32(y));
699 valuemask = _mm_cmple_ps(_mm_andnot_ps(_mm_set1_ps(SSE41_FLOAT_NEGZERO), y), arglimit);
700 fexppart = _mm_and_ps(fexppart, valuemask);
702 /* Extended precision arithmetics */
703 x = _mm_add_ps(_mm_mul_ps(invargscale0, intpart), x);
704 x = _mm_add_ps(_mm_mul_ps(invargscale1, intpart), x);
706 p = _mm_add_ps(_mm_mul_ps(CC4, x), CC3);
707 p = _mm_add_ps(_mm_mul_ps(p, x), CC2);
708 p = _mm_add_ps(_mm_mul_ps(p, x), CC1);
709 p = _mm_add_ps(_mm_mul_ps(p, x), CC0);
710 p = _mm_add_ps(_mm_mul_ps(_mm_mul_ps(x, x), p), x);
711 p = _mm_add_ps(p, one);
712 x = _mm_mul_ps(p, fexppart);
716 static gmx_inline __m128 gmx_simdcall
717 sse41_invsqrt_f(__m128 x)
719 __m128 lu = _mm_rsqrt_ps(x);
721 return _mm_mul_ps(_mm_set1_ps(0.5f), _mm_mul_ps(_mm_sub_ps(_mm_set1_ps(3.0f), _mm_mul_ps(_mm_mul_ps(lu, lu), x)), lu));
724 static gmx_inline __m128 gmx_simdcall
725 sse41_inv_f(__m128 x)
727 __m128 lu = _mm_rcp_ps(x);
729 return _mm_mul_ps(lu, _mm_sub_ps(_mm_set1_ps(2.0f), _mm_mul_ps(lu, x)));
732 #endif /* _kernelutil_x86_sse4_1_single_h_ */