2 * This source code is part of
6 * Copyright (c) 2011-2012, The GROMACS Development Team
8 * Gromacs is a library for molecular simulation and trajectory analysis,
9 * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
10 * a full list of developers and information, check out http://www.gromacs.org
12 * This program is free software; you can redistribute it and/or modify it under
13 * the terms of the GNU Lesser General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option) any
16 * As a special exception, you may use this file as part of a free software
17 * library without restriction. Specifically, if other files instantiate
18 * templates or use macros or inline functions from this file, or you compile
19 * this file and link it with other files to produce an executable, this
20 * file does not by itself cause the resulting executable to be covered by
21 * the GNU Lesser General Public License.
23 * In plain-speak: do not worry about classes/macros/templates either - only
24 * changes to the library have to be LGPL, not an application linking with it.
26 * To help fund GROMACS development, we humbly ask that you cite
27 * the papers people have written on it - you can find them on the website!
29 #ifndef _kernelutil_x86_avx_128_fma_double_h_
30 #define _kernelutil_x86_avx_128_fma_double_h_
32 #include "gmx_x86_avx_128_fma.h"
36 gmx_mm_any_lt(__m128d a, __m128d b)
38 return _mm_movemask_pd(_mm_cmplt_pd(a,b));
42 static gmx_inline __m128d
43 gmx_mm_calc_rsq_pd(__m128d dx, __m128d dy, __m128d dz)
45 return _mm_macc_pd(dx,dx,_mm_macc_pd(dy,dy,_mm_mul_pd(dz,dz)));
48 /* Normal sum of four ymm registers */
49 #define gmx_mm_sum4_pd(t0,t1,t2,t3) _mm_add_pd(_mm_add_pd(t0,t1),_mm_add_pd(t2,t3))
53 /* Load a double value from 1-2 places, merge into xmm register */
57 gmx_mm_load_2real_swizzle_pd(const double * gmx_restrict ptrA,
58 const double * gmx_restrict ptrB)
60 return _mm_unpacklo_pd(_mm_load_sd(ptrA),_mm_load_sd(ptrB));
64 gmx_mm_load_1real_pd(const double * gmx_restrict ptrA)
66 return _mm_load_sd(ptrA);
71 gmx_mm_store_2real_swizzle_pd(double * gmx_restrict ptrA,
72 double * gmx_restrict ptrB,
77 t2 = _mm_unpackhi_pd(xmm1,xmm1);
78 _mm_store_sd(ptrA,xmm1);
79 _mm_store_sd(ptrB,t2);
83 gmx_mm_store_1real_pd(double * gmx_restrict ptrA, __m128d xmm1)
85 _mm_store_sd(ptrA,xmm1);
89 /* Similar to store, but increments value in memory */
91 gmx_mm_increment_2real_swizzle_pd(double * gmx_restrict ptrA,
92 double * gmx_restrict ptrB, __m128d xmm1)
96 t1 = _mm_unpackhi_pd(xmm1,xmm1);
97 xmm1 = _mm_add_sd(xmm1,_mm_load_sd(ptrA));
98 t1 = _mm_add_sd(t1,_mm_load_sd(ptrB));
99 _mm_store_sd(ptrA,xmm1);
100 _mm_store_sd(ptrB,t1);
104 gmx_mm_increment_1real_pd(double * gmx_restrict ptrA, __m128d xmm1)
108 tmp = gmx_mm_load_1real_pd(ptrA);
109 tmp = _mm_add_sd(tmp,xmm1);
110 gmx_mm_store_1real_pd(ptrA,tmp);
115 static gmx_inline void
116 gmx_mm_load_2pair_swizzle_pd(const double * gmx_restrict p1,
117 const double * gmx_restrict p2,
118 __m128d * gmx_restrict c6,
119 __m128d * gmx_restrict c12)
123 /* The c6/c12 array should be aligned */
124 t1 = _mm_loadu_pd(p1);
125 t2 = _mm_loadu_pd(p2);
126 *c6 = _mm_unpacklo_pd(t1,t2);
127 *c12 = _mm_unpackhi_pd(t1,t2);
130 static gmx_inline void
131 gmx_mm_load_1pair_swizzle_pd(const double * gmx_restrict p1,
132 __m128d * gmx_restrict c6,
133 __m128d * gmx_restrict c12)
135 *c6 = _mm_load_sd(p1);
136 *c12 = _mm_load_sd(p1+1);
140 static gmx_inline void
141 gmx_mm_load_shift_and_1rvec_broadcast_pd(const double * gmx_restrict xyz_shift,
142 const double * gmx_restrict xyz,
143 __m128d * gmx_restrict x1,
144 __m128d * gmx_restrict y1,
145 __m128d * gmx_restrict z1)
147 __m128d mem_xy,mem_z,mem_sxy,mem_sz;
149 mem_xy = _mm_loadu_pd(xyz);
150 mem_z = _mm_load_sd(xyz+2);
151 mem_sxy = _mm_loadu_pd(xyz_shift);
152 mem_sz = _mm_load_sd(xyz_shift+2);
154 mem_xy = _mm_add_pd(mem_xy,mem_sxy);
155 mem_z = _mm_add_pd(mem_z,mem_sz);
157 *x1 = _mm_shuffle_pd(mem_xy,mem_xy,_MM_SHUFFLE2(0,0));
158 *y1 = _mm_shuffle_pd(mem_xy,mem_xy,_MM_SHUFFLE2(1,1));
159 *z1 = _mm_shuffle_pd(mem_z,mem_z,_MM_SHUFFLE2(0,0));
163 static gmx_inline void
164 gmx_mm_load_shift_and_3rvec_broadcast_pd(const double * gmx_restrict xyz_shift,
165 const double * gmx_restrict xyz,
166 __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
167 __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
168 __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3)
170 __m128d t1,t2,t3,t4,t5,sxy,sz,szx,syz;
172 t1 = _mm_loadu_pd(xyz);
173 t2 = _mm_loadu_pd(xyz+2);
174 t3 = _mm_loadu_pd(xyz+4);
175 t4 = _mm_loadu_pd(xyz+6);
176 t5 = _mm_load_sd(xyz+8);
178 sxy = _mm_loadu_pd(xyz_shift);
179 sz = _mm_load_sd(xyz_shift+2);
180 szx = _mm_shuffle_pd(sz,sxy,_MM_SHUFFLE2(0,0));
181 syz = _mm_shuffle_pd(sxy,sz,_MM_SHUFFLE2(0,1));
183 t1 = _mm_add_pd(t1,sxy);
184 t2 = _mm_add_pd(t2,szx);
185 t3 = _mm_add_pd(t3,syz);
186 t4 = _mm_add_pd(t4,sxy);
187 t5 = _mm_add_sd(t5,sz);
189 *x1 = _mm_shuffle_pd(t1,t1,_MM_SHUFFLE2(0,0));
190 *y1 = _mm_shuffle_pd(t1,t1,_MM_SHUFFLE2(1,1));
191 *z1 = _mm_shuffle_pd(t2,t2,_MM_SHUFFLE2(0,0));
192 *x2 = _mm_shuffle_pd(t2,t2,_MM_SHUFFLE2(1,1));
193 *y2 = _mm_shuffle_pd(t3,t3,_MM_SHUFFLE2(0,0));
194 *z2 = _mm_shuffle_pd(t3,t3,_MM_SHUFFLE2(1,1));
195 *x3 = _mm_shuffle_pd(t4,t4,_MM_SHUFFLE2(0,0));
196 *y3 = _mm_shuffle_pd(t4,t4,_MM_SHUFFLE2(1,1));
197 *z3 = _mm_shuffle_pd(t5,t5,_MM_SHUFFLE2(0,0));
201 static gmx_inline void
202 gmx_mm_load_shift_and_4rvec_broadcast_pd(const double * gmx_restrict xyz_shift,
203 const double * gmx_restrict xyz,
204 __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
205 __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
206 __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3,
207 __m128d * gmx_restrict x4, __m128d * gmx_restrict y4, __m128d * gmx_restrict z4)
209 __m128d t1,t2,t3,t4,t5,t6,sxy,sz,szx,syz;
211 t1 = _mm_loadu_pd(xyz);
212 t2 = _mm_loadu_pd(xyz+2);
213 t3 = _mm_loadu_pd(xyz+4);
214 t4 = _mm_loadu_pd(xyz+6);
215 t5 = _mm_loadu_pd(xyz+8);
216 t6 = _mm_loadu_pd(xyz+10);
218 sxy = _mm_loadu_pd(xyz_shift);
219 sz = _mm_load_sd(xyz_shift+2);
220 szx = _mm_shuffle_pd(sz,sxy,_MM_SHUFFLE2(0,0));
221 syz = _mm_shuffle_pd(sxy,sz,_MM_SHUFFLE2(0,1));
223 t1 = _mm_add_pd(t1,sxy);
224 t2 = _mm_add_pd(t2,szx);
225 t3 = _mm_add_pd(t3,syz);
226 t4 = _mm_add_pd(t4,sxy);
227 t5 = _mm_add_pd(t5,szx);
228 t6 = _mm_add_pd(t6,syz);
230 *x1 = _mm_shuffle_pd(t1,t1,_MM_SHUFFLE2(0,0));
231 *y1 = _mm_shuffle_pd(t1,t1,_MM_SHUFFLE2(1,1));
232 *z1 = _mm_shuffle_pd(t2,t2,_MM_SHUFFLE2(0,0));
233 *x2 = _mm_shuffle_pd(t2,t2,_MM_SHUFFLE2(1,1));
234 *y2 = _mm_shuffle_pd(t3,t3,_MM_SHUFFLE2(0,0));
235 *z2 = _mm_shuffle_pd(t3,t3,_MM_SHUFFLE2(1,1));
236 *x3 = _mm_shuffle_pd(t4,t4,_MM_SHUFFLE2(0,0));
237 *y3 = _mm_shuffle_pd(t4,t4,_MM_SHUFFLE2(1,1));
238 *z3 = _mm_shuffle_pd(t5,t5,_MM_SHUFFLE2(0,0));
239 *x4 = _mm_shuffle_pd(t5,t5,_MM_SHUFFLE2(1,1));
240 *y4 = _mm_shuffle_pd(t6,t6,_MM_SHUFFLE2(0,0));
241 *z4 = _mm_shuffle_pd(t6,t6,_MM_SHUFFLE2(1,1));
246 static gmx_inline void
247 gmx_mm_load_1rvec_1ptr_swizzle_pd(const double * gmx_restrict p1,
248 __m128d * gmx_restrict x, __m128d * gmx_restrict y, __m128d * gmx_restrict z)
250 *x = _mm_load_sd(p1);
251 *y = _mm_load_sd(p1+1);
252 *z = _mm_load_sd(p1+2);
255 static gmx_inline void
256 gmx_mm_load_3rvec_1ptr_swizzle_pd(const double * gmx_restrict p1,
257 __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
258 __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
259 __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3)
261 *x1 = _mm_load_sd(p1);
262 *y1 = _mm_load_sd(p1+1);
263 *z1 = _mm_load_sd(p1+2);
264 *x2 = _mm_load_sd(p1+3);
265 *y2 = _mm_load_sd(p1+4);
266 *z2 = _mm_load_sd(p1+5);
267 *x3 = _mm_load_sd(p1+6);
268 *y3 = _mm_load_sd(p1+7);
269 *z3 = _mm_load_sd(p1+8);
272 static gmx_inline void
273 gmx_mm_load_4rvec_1ptr_swizzle_pd(const double * gmx_restrict p1,
274 __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
275 __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
276 __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3,
277 __m128d * gmx_restrict x4, __m128d * gmx_restrict y4, __m128d * gmx_restrict z4)
279 *x1 = _mm_load_sd(p1);
280 *y1 = _mm_load_sd(p1+1);
281 *z1 = _mm_load_sd(p1+2);
282 *x2 = _mm_load_sd(p1+3);
283 *y2 = _mm_load_sd(p1+4);
284 *z2 = _mm_load_sd(p1+5);
285 *x3 = _mm_load_sd(p1+6);
286 *y3 = _mm_load_sd(p1+7);
287 *z3 = _mm_load_sd(p1+8);
288 *x4 = _mm_load_sd(p1+9);
289 *y4 = _mm_load_sd(p1+10);
290 *z4 = _mm_load_sd(p1+11);
294 static gmx_inline void
295 gmx_mm_load_1rvec_2ptr_swizzle_pd(const double * gmx_restrict ptrA,
296 const double * gmx_restrict ptrB,
297 __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1)
300 t1 = _mm_loadu_pd(ptrA);
301 t2 = _mm_loadu_pd(ptrB);
302 t3 = _mm_load_sd(ptrA+2);
303 t4 = _mm_load_sd(ptrB+2);
304 GMX_MM_TRANSPOSE2_PD(t1,t2);
307 *z1 = _mm_unpacklo_pd(t3,t4);
310 static gmx_inline void
311 gmx_mm_load_3rvec_2ptr_swizzle_pd(const double * gmx_restrict ptrA, const double * gmx_restrict ptrB,
312 __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
313 __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
314 __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3)
316 __m128d t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
317 t1 = _mm_loadu_pd(ptrA);
318 t2 = _mm_loadu_pd(ptrB);
319 t3 = _mm_loadu_pd(ptrA+2);
320 t4 = _mm_loadu_pd(ptrB+2);
321 t5 = _mm_loadu_pd(ptrA+4);
322 t6 = _mm_loadu_pd(ptrB+4);
323 t7 = _mm_loadu_pd(ptrA+6);
324 t8 = _mm_loadu_pd(ptrB+6);
325 t9 = _mm_load_sd(ptrA+8);
326 t10 = _mm_load_sd(ptrB+8);
327 GMX_MM_TRANSPOSE2_PD(t1,t2);
328 GMX_MM_TRANSPOSE2_PD(t3,t4);
329 GMX_MM_TRANSPOSE2_PD(t5,t6);
330 GMX_MM_TRANSPOSE2_PD(t7,t8);
339 *z3 = _mm_unpacklo_pd(t9,t10);
343 static gmx_inline void
344 gmx_mm_load_4rvec_2ptr_swizzle_pd(const double * gmx_restrict ptrA, const double * gmx_restrict ptrB,
345 __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
346 __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
347 __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3,
348 __m128d * gmx_restrict x4, __m128d * gmx_restrict y4, __m128d * gmx_restrict z4)
350 __m128d t1,t2,t3,t4,t5,t6;
351 t1 = _mm_loadu_pd(ptrA);
352 t2 = _mm_loadu_pd(ptrB);
353 t3 = _mm_loadu_pd(ptrA+2);
354 t4 = _mm_loadu_pd(ptrB+2);
355 t5 = _mm_loadu_pd(ptrA+4);
356 t6 = _mm_loadu_pd(ptrB+4);
357 GMX_MM_TRANSPOSE2_PD(t1,t2);
358 GMX_MM_TRANSPOSE2_PD(t3,t4);
359 GMX_MM_TRANSPOSE2_PD(t5,t6);
366 t1 = _mm_loadu_pd(ptrA+6);
367 t2 = _mm_loadu_pd(ptrB+6);
368 t3 = _mm_loadu_pd(ptrA+8);
369 t4 = _mm_loadu_pd(ptrB+8);
370 t5 = _mm_loadu_pd(ptrA+10);
371 t6 = _mm_loadu_pd(ptrB+10);
372 GMX_MM_TRANSPOSE2_PD(t1,t2);
373 GMX_MM_TRANSPOSE2_PD(t3,t4);
374 GMX_MM_TRANSPOSE2_PD(t5,t6);
384 /* Routines to decrement rvec in memory, typically use for j particle force updates */
386 gmx_mm_decrement_1rvec_1ptr_noswizzle_pd(double * gmx_restrict ptrA,
387 __m128d xy, __m128d z)
391 t1 = _mm_loadu_pd(ptrA);
392 t2 = _mm_load_sd(ptrA+2);
394 t1 = _mm_sub_pd(t1,xy);
395 t2 = _mm_sub_sd(t2,z);
397 _mm_storeu_pd(ptrA,t1);
398 _mm_store_sd(ptrA+2,t2);
403 gmx_mm_decrement_3rvec_1ptr_noswizzle_pd(double * gmx_restrict ptrA,
404 __m128d xy1, __m128d z1,
405 __m128d xy2, __m128d z2,
406 __m128d xy3, __m128d z3)
409 __m128d tA,tB,tC,tD,tE;
411 tA = _mm_loadu_pd(ptrA);
412 tB = _mm_loadu_pd(ptrA+2);
413 tC = _mm_loadu_pd(ptrA+4);
414 tD = _mm_loadu_pd(ptrA+6);
415 tE = _mm_load_sd(ptrA+8);
418 t1 = _mm_shuffle_pd(z1,xy2,_MM_SHUFFLE2(0,1)); /* x2 z1 */
419 t2 = _mm_shuffle_pd(xy2,z2,_MM_SHUFFLE2(0,1)); /* z2 y2 */
422 tA = _mm_sub_pd(tA,xy1);
423 tB = _mm_sub_pd(tB,t1);
424 tC = _mm_sub_pd(tC,t2);
425 tD = _mm_sub_pd(tD,xy3);
426 tE = _mm_sub_sd(tE,z3);
428 _mm_storeu_pd(ptrA,tA);
429 _mm_storeu_pd(ptrA+2,tB);
430 _mm_storeu_pd(ptrA+4,tC);
431 _mm_storeu_pd(ptrA+6,tD);
432 _mm_store_sd(ptrA+8,tE);
436 gmx_mm_decrement_4rvec_1ptr_noswizzle_pd(double * gmx_restrict ptrA,
437 __m128d xy1, __m128d z1,
438 __m128d xy2, __m128d z2,
439 __m128d xy3, __m128d z3,
440 __m128d xy4, __m128d z4)
443 __m128d tA,tB,tC,tD,tE,tF;
445 tA = _mm_loadu_pd(ptrA);
446 tB = _mm_loadu_pd(ptrA+2);
447 tC = _mm_loadu_pd(ptrA+4);
448 tD = _mm_loadu_pd(ptrA+6);
449 tE = _mm_loadu_pd(ptrA+8);
450 tF = _mm_loadu_pd(ptrA+10);
453 t1 = _mm_shuffle_pd(z1,xy2,_MM_SHUFFLE2(0,0)); /* x2 z1 */
454 t2 = _mm_shuffle_pd(xy2,z2,_MM_SHUFFLE2(0,1)); /* z2 y2 */
456 t3 = _mm_shuffle_pd(z3,xy4,_MM_SHUFFLE2(0,0)); /* x4 z3 */
457 t4 = _mm_shuffle_pd(xy4,z4,_MM_SHUFFLE2(0,1)); /* z4 y4 */
459 tA = _mm_sub_pd(tA,xy1);
460 tB = _mm_sub_pd(tB,t1);
461 tC = _mm_sub_pd(tC,t2);
462 tD = _mm_sub_pd(tD,xy3);
463 tE = _mm_sub_pd(tE,t3);
464 tF = _mm_sub_pd(tF,t4);
466 _mm_storeu_pd(ptrA,tA);
467 _mm_storeu_pd(ptrA+2,tB);
468 _mm_storeu_pd(ptrA+4,tC);
469 _mm_storeu_pd(ptrA+6,tD);
470 _mm_storeu_pd(ptrA+8,tE);
471 _mm_storeu_pd(ptrA+10,tF);
476 gmx_mm_decrement_1rvec_1ptr_swizzle_pd(double * gmx_restrict ptrA,
477 __m128d x1, __m128d y1, __m128d z1)
481 t1 = _mm_load_sd(ptrA);
482 t2 = _mm_load_sd(ptrA+1);
483 t3 = _mm_load_sd(ptrA+2);
485 t1 = _mm_sub_sd(t1,x1);
486 t2 = _mm_sub_sd(t2,y1);
487 t3 = _mm_sub_sd(t3,z1);
488 _mm_store_sd(ptrA,t1);
489 _mm_store_sd(ptrA+1,t2);
490 _mm_store_sd(ptrA+2,t3);
495 gmx_mm_decrement_3rvec_1ptr_swizzle_pd(double * gmx_restrict ptrA,
496 __m128d x1, __m128d y1, __m128d z1,
497 __m128d x2, __m128d y2, __m128d z2,
498 __m128d x3, __m128d y3, __m128d z3)
500 __m128d t1,t2,t3,t4,t5;
502 t1 = _mm_loadu_pd(ptrA);
503 t2 = _mm_loadu_pd(ptrA+2);
504 t3 = _mm_loadu_pd(ptrA+4);
505 t4 = _mm_loadu_pd(ptrA+6);
506 t5 = _mm_load_sd(ptrA+8);
508 x1 = _mm_unpacklo_pd(x1,y1);
509 z1 = _mm_unpacklo_pd(z1,x2);
510 y2 = _mm_unpacklo_pd(y2,z2);
511 x3 = _mm_unpacklo_pd(x3,y3);
512 /* nothing to be done for z3 */
514 t1 = _mm_sub_pd(t1,x1);
515 t2 = _mm_sub_pd(t2,z1);
516 t3 = _mm_sub_pd(t3,y2);
517 t4 = _mm_sub_pd(t4,x3);
518 t5 = _mm_sub_sd(t5,z3);
519 _mm_storeu_pd(ptrA,t1);
520 _mm_storeu_pd(ptrA+2,t2);
521 _mm_storeu_pd(ptrA+4,t3);
522 _mm_storeu_pd(ptrA+6,t4);
523 _mm_store_sd(ptrA+8,t5);
528 gmx_mm_decrement_4rvec_1ptr_swizzle_pd(double * gmx_restrict ptrA,
529 __m128d x1, __m128d y1, __m128d z1,
530 __m128d x2, __m128d y2, __m128d z2,
531 __m128d x3, __m128d y3, __m128d z3,
532 __m128d x4, __m128d y4, __m128d z4)
534 __m128d t1,t2,t3,t4,t5,t6;
536 t1 = _mm_loadu_pd(ptrA);
537 t2 = _mm_loadu_pd(ptrA+2);
538 t3 = _mm_loadu_pd(ptrA+4);
539 t4 = _mm_loadu_pd(ptrA+6);
540 t5 = _mm_loadu_pd(ptrA+8);
541 t6 = _mm_loadu_pd(ptrA+10);
543 x1 = _mm_unpacklo_pd(x1,y1);
544 z1 = _mm_unpacklo_pd(z1,x2);
545 y2 = _mm_unpacklo_pd(y2,z2);
546 x3 = _mm_unpacklo_pd(x3,y3);
547 z3 = _mm_unpacklo_pd(z3,x4);
548 y4 = _mm_unpacklo_pd(y4,z4);
550 _mm_storeu_pd(ptrA, _mm_sub_pd( t1,x1 ));
551 _mm_storeu_pd(ptrA+2, _mm_sub_pd( t2,z1 ));
552 _mm_storeu_pd(ptrA+4, _mm_sub_pd( t3,y2 ));
553 _mm_storeu_pd(ptrA+6, _mm_sub_pd( t4,x3 ));
554 _mm_storeu_pd(ptrA+8, _mm_sub_pd( t5,z3 ));
555 _mm_storeu_pd(ptrA+10, _mm_sub_pd( t6,y4 ));
559 gmx_mm_decrement_1rvec_2ptr_swizzle_pd(double * gmx_restrict ptrA, double * gmx_restrict ptrB,
560 __m128d x1, __m128d y1, __m128d z1)
562 __m128d t1,t2,t3,t4,t5,t6,t7;
564 t1 = _mm_loadu_pd(ptrA);
565 t2 = _mm_load_sd(ptrA+2);
566 t3 = _mm_loadu_pd(ptrB);
567 t4 = _mm_load_sd(ptrB+2);
569 t5 = _mm_unpacklo_pd(x1,y1);
570 t6 = _mm_unpackhi_pd(x1,y1);
571 t7 = _mm_unpackhi_pd(z1,z1);
573 t1 = _mm_sub_pd(t1,t5);
574 t2 = _mm_sub_sd(t2,z1);
576 t3 = _mm_sub_pd(t3,t6);
577 t4 = _mm_sub_sd(t4,t7);
579 _mm_storeu_pd(ptrA,t1);
580 _mm_store_sd(ptrA+2,t2);
581 _mm_storeu_pd(ptrB,t3);
582 _mm_store_sd(ptrB+2,t4);
587 gmx_mm_decrement_3rvec_2ptr_swizzle_pd(double * gmx_restrict ptrA, double * gmx_restrict ptrB,
588 __m128d x1, __m128d y1, __m128d z1,
589 __m128d x2, __m128d y2, __m128d z2,
590 __m128d x3, __m128d y3, __m128d z3)
592 __m128d t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
593 __m128d tA,tB,tC,tD,tE,tF,tG,tH,tI;
595 t1 = _mm_loadu_pd(ptrA);
596 t2 = _mm_loadu_pd(ptrA+2);
597 t3 = _mm_loadu_pd(ptrA+4);
598 t4 = _mm_loadu_pd(ptrA+6);
599 t5 = _mm_load_sd(ptrA+8);
600 t6 = _mm_loadu_pd(ptrB);
601 t7 = _mm_loadu_pd(ptrB+2);
602 t8 = _mm_loadu_pd(ptrB+4);
603 t9 = _mm_loadu_pd(ptrB+6);
604 t10 = _mm_load_sd(ptrB+8);
606 tA = _mm_unpacklo_pd(x1,y1);
607 tB = _mm_unpackhi_pd(x1,y1);
608 tC = _mm_unpacklo_pd(z1,x2);
609 tD = _mm_unpackhi_pd(z1,x2);
610 tE = _mm_unpacklo_pd(y2,z2);
611 tF = _mm_unpackhi_pd(y2,z2);
612 tG = _mm_unpacklo_pd(x3,y3);
613 tH = _mm_unpackhi_pd(x3,y3);
614 tI = _mm_unpackhi_pd(z3,z3);
616 t1 = _mm_sub_pd(t1,tA);
617 t2 = _mm_sub_pd(t2,tC);
618 t3 = _mm_sub_pd(t3,tE);
619 t4 = _mm_sub_pd(t4,tG);
620 t5 = _mm_sub_sd(t5,z3);
622 t6 = _mm_sub_pd(t6,tB);
623 t7 = _mm_sub_pd(t7,tD);
624 t8 = _mm_sub_pd(t8,tF);
625 t9 = _mm_sub_pd(t9,tH);
626 t10 = _mm_sub_sd(t10,tI);
628 _mm_storeu_pd(ptrA,t1);
629 _mm_storeu_pd(ptrA+2,t2);
630 _mm_storeu_pd(ptrA+4,t3);
631 _mm_storeu_pd(ptrA+6,t4);
632 _mm_store_sd(ptrA+8,t5);
633 _mm_storeu_pd(ptrB,t6);
634 _mm_storeu_pd(ptrB+2,t7);
635 _mm_storeu_pd(ptrB+4,t8);
636 _mm_storeu_pd(ptrB+6,t9);
637 _mm_store_sd(ptrB+8,t10);
642 gmx_mm_decrement_4rvec_2ptr_swizzle_pd(double * gmx_restrict ptrA, double * gmx_restrict ptrB,
643 __m128d x1, __m128d y1, __m128d z1,
644 __m128d x2, __m128d y2, __m128d z2,
645 __m128d x3, __m128d y3, __m128d z3,
646 __m128d x4, __m128d y4, __m128d z4)
648 __m128d t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
649 __m128d tA,tB,tC,tD,tE,tF,tG,tH,tI,tJ,tK,tL;
651 t1 = _mm_loadu_pd(ptrA);
652 t2 = _mm_loadu_pd(ptrA+2);
653 t3 = _mm_loadu_pd(ptrA+4);
654 t4 = _mm_loadu_pd(ptrA+6);
655 t5 = _mm_loadu_pd(ptrA+8);
656 t6 = _mm_loadu_pd(ptrA+10);
657 t7 = _mm_loadu_pd(ptrB);
658 t8 = _mm_loadu_pd(ptrB+2);
659 t9 = _mm_loadu_pd(ptrB+4);
660 t10 = _mm_loadu_pd(ptrB+6);
661 t11 = _mm_loadu_pd(ptrB+8);
662 t12 = _mm_loadu_pd(ptrB+10);
664 tA = _mm_unpacklo_pd(x1,y1);
665 tB = _mm_unpackhi_pd(x1,y1);
666 tC = _mm_unpacklo_pd(z1,x2);
667 tD = _mm_unpackhi_pd(z1,x2);
668 tE = _mm_unpacklo_pd(y2,z2);
669 tF = _mm_unpackhi_pd(y2,z2);
670 tG = _mm_unpacklo_pd(x3,y3);
671 tH = _mm_unpackhi_pd(x3,y3);
672 tI = _mm_unpacklo_pd(z3,x4);
673 tJ = _mm_unpackhi_pd(z3,x4);
674 tK = _mm_unpacklo_pd(y4,z4);
675 tL = _mm_unpackhi_pd(y4,z4);
677 t1 = _mm_sub_pd(t1,tA);
678 t2 = _mm_sub_pd(t2,tC);
679 t3 = _mm_sub_pd(t3,tE);
680 t4 = _mm_sub_pd(t4,tG);
681 t5 = _mm_sub_pd(t5,tI);
682 t6 = _mm_sub_pd(t6,tK);
684 t7 = _mm_sub_pd(t7,tB);
685 t8 = _mm_sub_pd(t8,tD);
686 t9 = _mm_sub_pd(t9,tF);
687 t10 = _mm_sub_pd(t10,tH);
688 t11 = _mm_sub_pd(t11,tJ);
689 t12 = _mm_sub_pd(t12,tL);
691 _mm_storeu_pd(ptrA, t1);
692 _mm_storeu_pd(ptrA+2,t2);
693 _mm_storeu_pd(ptrA+4,t3);
694 _mm_storeu_pd(ptrA+6,t4);
695 _mm_storeu_pd(ptrA+8,t5);
696 _mm_storeu_pd(ptrA+10,t6);
697 _mm_storeu_pd(ptrB, t7);
698 _mm_storeu_pd(ptrB+2,t8);
699 _mm_storeu_pd(ptrB+4,t9);
700 _mm_storeu_pd(ptrB+6,t10);
701 _mm_storeu_pd(ptrB+8,t11);
702 _mm_storeu_pd(ptrB+10,t12);
707 static gmx_inline void
708 gmx_mm_update_iforce_1atom_swizzle_pd(__m128d fix1, __m128d fiy1, __m128d fiz1,
709 double * gmx_restrict fptr,
710 double * gmx_restrict fshiftptr)
712 fix1 = _mm_hadd_pd(fix1,fiy1);
713 fiz1 = _mm_hadd_pd(fiz1,fiz1);
715 _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr), fix1 ));
716 _mm_store_sd( fptr+2, _mm_add_sd( _mm_load_sd(fptr+2), fiz1 ));
718 _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 ));
719 _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 ));
722 static gmx_inline void
723 gmx_mm_update_iforce_3atom_swizzle_pd(__m128d fix1, __m128d fiy1, __m128d fiz1,
724 __m128d fix2, __m128d fiy2, __m128d fiz2,
725 __m128d fix3, __m128d fiy3, __m128d fiz3,
726 double * gmx_restrict fptr,
727 double * gmx_restrict fshiftptr)
731 fix1 = _mm_hadd_pd(fix1,fiy1);
732 fiz1 = _mm_hadd_pd(fiz1,fix2);
733 fiy2 = _mm_hadd_pd(fiy2,fiz2);
734 fix3 = _mm_hadd_pd(fix3,fiy3);
735 fiz3 = _mm_hadd_pd(fiz3,fiz3);
737 _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr), fix1 ));
738 _mm_storeu_pd( fptr+2, _mm_add_pd( _mm_loadu_pd(fptr+2), fiz1 ));
739 _mm_storeu_pd( fptr+4, _mm_add_pd( _mm_loadu_pd(fptr+4), fiy2 ));
740 _mm_storeu_pd( fptr+6, _mm_add_pd( _mm_loadu_pd(fptr+6), fix3 ));
741 _mm_store_sd( fptr+8, _mm_add_sd( _mm_load_sd(fptr+8), fiz3 ));
743 fix1 = _mm_add_pd(fix1,fix3);
744 t1 = _mm_shuffle_pd(fiz1,fiy2,_MM_SHUFFLE2(0,1));
745 fix1 = _mm_add_pd(fix1,t1); /* x and y sums */
747 t2 = _mm_shuffle_pd(fiy2,fiy2,_MM_SHUFFLE2(1,1));
748 fiz1 = _mm_add_sd(fiz1,fiz3);
749 fiz1 = _mm_add_sd(fiz1,t2); /* z sum */
751 _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 ));
752 _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 ));
756 static gmx_inline void
757 gmx_mm_update_iforce_4atom_swizzle_pd(__m128d fix1, __m128d fiy1, __m128d fiz1,
758 __m128d fix2, __m128d fiy2, __m128d fiz2,
759 __m128d fix3, __m128d fiy3, __m128d fiz3,
760 __m128d fix4, __m128d fiy4, __m128d fiz4,
761 double * gmx_restrict fptr,
762 double * gmx_restrict fshiftptr)
766 fix1 = _mm_hadd_pd(fix1,fiy1);
767 fiz1 = _mm_hadd_pd(fiz1,fix2);
768 fiy2 = _mm_hadd_pd(fiy2,fiz2);
769 fix3 = _mm_hadd_pd(fix3,fiy3);
770 fiz3 = _mm_hadd_pd(fiz3,fix4);
771 fiy4 = _mm_hadd_pd(fiy4,fiz4);
773 _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr), fix1 ));
774 _mm_storeu_pd( fptr+2, _mm_add_pd( _mm_loadu_pd(fptr+2), fiz1 ));
775 _mm_storeu_pd( fptr+4, _mm_add_pd( _mm_loadu_pd(fptr+4), fiy2 ));
776 _mm_storeu_pd( fptr+6, _mm_add_pd( _mm_loadu_pd(fptr+6), fix3 ));
777 _mm_storeu_pd( fptr+8, _mm_add_pd( _mm_loadu_pd(fptr+8), fiz3 ));
778 _mm_storeu_pd( fptr+10, _mm_add_pd( _mm_loadu_pd(fptr+10), fiy4 ));
780 t1 = _mm_shuffle_pd(fiz1,fiy2,_MM_SHUFFLE2(0,1));
781 fix1 = _mm_add_pd(fix1,t1);
782 t2 = _mm_shuffle_pd(fiz3,fiy4,_MM_SHUFFLE2(0,1));
783 fix3 = _mm_add_pd(fix3,t2);
784 fix1 = _mm_add_pd(fix1,fix3); /* x and y sums */
786 fiz1 = _mm_add_sd(fiz1, _mm_unpackhi_pd(fiy2,fiy2));
787 fiz3 = _mm_add_sd(fiz3, _mm_unpackhi_pd(fiy4,fiy4));
788 fiz1 = _mm_add_sd(fiz1,fiz3); /* z sum */
790 _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 ));
791 _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 ));
796 static gmx_inline void
797 gmx_mm_update_1pot_pd(__m128d pot1, double * gmx_restrict ptrA)
799 pot1 = _mm_hadd_pd(pot1,pot1);
800 _mm_store_sd(ptrA,_mm_add_sd(pot1,_mm_load_sd(ptrA)));
803 static gmx_inline void
804 gmx_mm_update_2pot_pd(__m128d pot1, double * gmx_restrict ptrA,
805 __m128d pot2, double * gmx_restrict ptrB)
807 pot1 = _mm_hadd_pd(pot1,pot2);
808 pot2 = _mm_unpackhi_pd(pot1,pot1);
810 _mm_store_sd(ptrA,_mm_add_sd(pot1,_mm_load_sd(ptrA)));
811 _mm_store_sd(ptrB,_mm_add_sd(pot2,_mm_load_sd(ptrB)));
815 #endif /* _kernelutil_x86_avx_128_fma_double_h_ */