2 * This source code is part of
6 * Copyright (c) 2011-2012, The GROMACS Development Team
8 * Gromacs is a library for molecular simulation and trajectory analysis,
9 * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
10 * a full list of developers and information, check out http://www.gromacs.org
12 * This program is free software; you can redistribute it and/or modify it under
13 * the terms of the GNU Lesser General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option) any
16 * As a special exception, you may use this file as part of a free software
17 * library without restriction. Specifically, if other files instantiate
18 * templates or use macros or inline functions from this file, or you compile
19 * this file and link it with other files to produce an executable, this
20 * file does not by itself cause the resulting executable to be covered by
21 * the GNU Lesser General Public License.
23 * In plain-speak: do not worry about classes/macros/templates either - only
24 * changes to the library have to be LGPL, not an application linking with it.
26 * To help fund GROMACS development, we humbly ask that you cite
27 * the papers people have written on it - you can find them on the website!
29 #ifndef _kernelutil_x86_avx_128_fma_double_h_
30 #define _kernelutil_x86_avx_128_fma_double_h_
32 #include "gmx_x86_avx_128_fma.h"
36 gmx_mm_any_lt(__m128d a, __m128d b)
38 return _mm_movemask_pd(_mm_cmplt_pd(a, b));
42 static gmx_inline __m128d
43 gmx_mm_calc_rsq_pd(__m128d dx, __m128d dy, __m128d dz)
45 return _mm_macc_pd(dx, dx, _mm_macc_pd(dy, dy, _mm_mul_pd(dz, dz)));
48 /* Normal sum of four ymm registers */
49 #define gmx_mm_sum4_pd(t0, t1, t2, t3) _mm_add_pd(_mm_add_pd(t0, t1), _mm_add_pd(t2, t3))
53 /* Load a double value from 1-2 places, merge into xmm register */
57 gmx_mm_load_2real_swizzle_pd(const double * gmx_restrict ptrA,
58 const double * gmx_restrict ptrB)
60 return _mm_unpacklo_pd(_mm_load_sd(ptrA), _mm_load_sd(ptrB));
64 gmx_mm_load_1real_pd(const double * gmx_restrict ptrA)
66 return _mm_load_sd(ptrA);
71 gmx_mm_store_2real_swizzle_pd(double * gmx_restrict ptrA,
72 double * gmx_restrict ptrB,
77 t2 = _mm_unpackhi_pd(xmm1, xmm1);
78 _mm_store_sd(ptrA, xmm1);
79 _mm_store_sd(ptrB, t2);
83 gmx_mm_store_1real_pd(double * gmx_restrict ptrA, __m128d xmm1)
85 _mm_store_sd(ptrA, xmm1);
89 /* Similar to store, but increments value in memory */
91 gmx_mm_increment_2real_swizzle_pd(double * gmx_restrict ptrA,
92 double * gmx_restrict ptrB, __m128d xmm1)
96 t1 = _mm_unpackhi_pd(xmm1, xmm1);
97 xmm1 = _mm_add_sd(xmm1, _mm_load_sd(ptrA));
98 t1 = _mm_add_sd(t1, _mm_load_sd(ptrB));
99 _mm_store_sd(ptrA, xmm1);
100 _mm_store_sd(ptrB, t1);
104 gmx_mm_increment_1real_pd(double * gmx_restrict ptrA, __m128d xmm1)
108 tmp = gmx_mm_load_1real_pd(ptrA);
109 tmp = _mm_add_sd(tmp, xmm1);
110 gmx_mm_store_1real_pd(ptrA, tmp);
115 static gmx_inline void
116 gmx_mm_load_2pair_swizzle_pd(const double * gmx_restrict p1,
117 const double * gmx_restrict p2,
118 __m128d * gmx_restrict c6,
119 __m128d * gmx_restrict c12)
123 /* The c6/c12 array should be aligned */
124 t1 = _mm_loadu_pd(p1);
125 t2 = _mm_loadu_pd(p2);
126 *c6 = _mm_unpacklo_pd(t1, t2);
127 *c12 = _mm_unpackhi_pd(t1, t2);
130 static gmx_inline void
131 gmx_mm_load_1pair_swizzle_pd(const double * gmx_restrict p1,
132 __m128d * gmx_restrict c6,
133 __m128d * gmx_restrict c12)
135 *c6 = _mm_load_sd(p1);
136 *c12 = _mm_load_sd(p1+1);
140 static gmx_inline void
141 gmx_mm_load_shift_and_1rvec_broadcast_pd(const double * gmx_restrict xyz_shift,
142 const double * gmx_restrict xyz,
143 __m128d * gmx_restrict x1,
144 __m128d * gmx_restrict y1,
145 __m128d * gmx_restrict z1)
147 __m128d mem_xy, mem_z, mem_sxy, mem_sz;
149 mem_xy = _mm_loadu_pd(xyz);
150 mem_z = _mm_load_sd(xyz+2);
151 mem_sxy = _mm_loadu_pd(xyz_shift);
152 mem_sz = _mm_load_sd(xyz_shift+2);
154 mem_xy = _mm_add_pd(mem_xy, mem_sxy);
155 mem_z = _mm_add_pd(mem_z, mem_sz);
157 *x1 = _mm_shuffle_pd(mem_xy, mem_xy, _MM_SHUFFLE2(0, 0));
158 *y1 = _mm_shuffle_pd(mem_xy, mem_xy, _MM_SHUFFLE2(1, 1));
159 *z1 = _mm_shuffle_pd(mem_z, mem_z, _MM_SHUFFLE2(0, 0));
163 static gmx_inline void
164 gmx_mm_load_shift_and_3rvec_broadcast_pd(const double * gmx_restrict xyz_shift,
165 const double * gmx_restrict xyz,
166 __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
167 __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
168 __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3)
170 __m128d t1, t2, t3, t4, t5, sxy, sz, szx, syz;
172 t1 = _mm_loadu_pd(xyz);
173 t2 = _mm_loadu_pd(xyz+2);
174 t3 = _mm_loadu_pd(xyz+4);
175 t4 = _mm_loadu_pd(xyz+6);
176 t5 = _mm_load_sd(xyz+8);
178 sxy = _mm_loadu_pd(xyz_shift);
179 sz = _mm_load_sd(xyz_shift+2);
180 szx = _mm_shuffle_pd(sz, sxy, _MM_SHUFFLE2(0, 0));
181 syz = _mm_shuffle_pd(sxy, sz, _MM_SHUFFLE2(0, 1));
183 t1 = _mm_add_pd(t1, sxy);
184 t2 = _mm_add_pd(t2, szx);
185 t3 = _mm_add_pd(t3, syz);
186 t4 = _mm_add_pd(t4, sxy);
187 t5 = _mm_add_sd(t5, sz);
189 *x1 = _mm_shuffle_pd(t1, t1, _MM_SHUFFLE2(0, 0));
190 *y1 = _mm_shuffle_pd(t1, t1, _MM_SHUFFLE2(1, 1));
191 *z1 = _mm_shuffle_pd(t2, t2, _MM_SHUFFLE2(0, 0));
192 *x2 = _mm_shuffle_pd(t2, t2, _MM_SHUFFLE2(1, 1));
193 *y2 = _mm_shuffle_pd(t3, t3, _MM_SHUFFLE2(0, 0));
194 *z2 = _mm_shuffle_pd(t3, t3, _MM_SHUFFLE2(1, 1));
195 *x3 = _mm_shuffle_pd(t4, t4, _MM_SHUFFLE2(0, 0));
196 *y3 = _mm_shuffle_pd(t4, t4, _MM_SHUFFLE2(1, 1));
197 *z3 = _mm_shuffle_pd(t5, t5, _MM_SHUFFLE2(0, 0));
201 static gmx_inline void
202 gmx_mm_load_shift_and_4rvec_broadcast_pd(const double * gmx_restrict xyz_shift,
203 const double * gmx_restrict xyz,
204 __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
205 __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
206 __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3,
207 __m128d * gmx_restrict x4, __m128d * gmx_restrict y4, __m128d * gmx_restrict z4)
209 __m128d t1, t2, t3, t4, t5, t6, sxy, sz, szx, syz;
211 t1 = _mm_loadu_pd(xyz);
212 t2 = _mm_loadu_pd(xyz+2);
213 t3 = _mm_loadu_pd(xyz+4);
214 t4 = _mm_loadu_pd(xyz+6);
215 t5 = _mm_loadu_pd(xyz+8);
216 t6 = _mm_loadu_pd(xyz+10);
218 sxy = _mm_loadu_pd(xyz_shift);
219 sz = _mm_load_sd(xyz_shift+2);
220 szx = _mm_shuffle_pd(sz, sxy, _MM_SHUFFLE2(0, 0));
221 syz = _mm_shuffle_pd(sxy, sz, _MM_SHUFFLE2(0, 1));
223 t1 = _mm_add_pd(t1, sxy);
224 t2 = _mm_add_pd(t2, szx);
225 t3 = _mm_add_pd(t3, syz);
226 t4 = _mm_add_pd(t4, sxy);
227 t5 = _mm_add_pd(t5, szx);
228 t6 = _mm_add_pd(t6, syz);
230 *x1 = _mm_shuffle_pd(t1, t1, _MM_SHUFFLE2(0, 0));
231 *y1 = _mm_shuffle_pd(t1, t1, _MM_SHUFFLE2(1, 1));
232 *z1 = _mm_shuffle_pd(t2, t2, _MM_SHUFFLE2(0, 0));
233 *x2 = _mm_shuffle_pd(t2, t2, _MM_SHUFFLE2(1, 1));
234 *y2 = _mm_shuffle_pd(t3, t3, _MM_SHUFFLE2(0, 0));
235 *z2 = _mm_shuffle_pd(t3, t3, _MM_SHUFFLE2(1, 1));
236 *x3 = _mm_shuffle_pd(t4, t4, _MM_SHUFFLE2(0, 0));
237 *y3 = _mm_shuffle_pd(t4, t4, _MM_SHUFFLE2(1, 1));
238 *z3 = _mm_shuffle_pd(t5, t5, _MM_SHUFFLE2(0, 0));
239 *x4 = _mm_shuffle_pd(t5, t5, _MM_SHUFFLE2(1, 1));
240 *y4 = _mm_shuffle_pd(t6, t6, _MM_SHUFFLE2(0, 0));
241 *z4 = _mm_shuffle_pd(t6, t6, _MM_SHUFFLE2(1, 1));
246 static gmx_inline void
247 gmx_mm_load_1rvec_1ptr_swizzle_pd(const double * gmx_restrict p1,
248 __m128d * gmx_restrict x, __m128d * gmx_restrict y, __m128d * gmx_restrict z)
250 *x = _mm_load_sd(p1);
251 *y = _mm_load_sd(p1+1);
252 *z = _mm_load_sd(p1+2);
255 static gmx_inline void
256 gmx_mm_load_3rvec_1ptr_swizzle_pd(const double * gmx_restrict p1,
257 __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
258 __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
259 __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3)
261 *x1 = _mm_load_sd(p1);
262 *y1 = _mm_load_sd(p1+1);
263 *z1 = _mm_load_sd(p1+2);
264 *x2 = _mm_load_sd(p1+3);
265 *y2 = _mm_load_sd(p1+4);
266 *z2 = _mm_load_sd(p1+5);
267 *x3 = _mm_load_sd(p1+6);
268 *y3 = _mm_load_sd(p1+7);
269 *z3 = _mm_load_sd(p1+8);
272 static gmx_inline void
273 gmx_mm_load_4rvec_1ptr_swizzle_pd(const double * gmx_restrict p1,
274 __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
275 __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
276 __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3,
277 __m128d * gmx_restrict x4, __m128d * gmx_restrict y4, __m128d * gmx_restrict z4)
279 *x1 = _mm_load_sd(p1);
280 *y1 = _mm_load_sd(p1+1);
281 *z1 = _mm_load_sd(p1+2);
282 *x2 = _mm_load_sd(p1+3);
283 *y2 = _mm_load_sd(p1+4);
284 *z2 = _mm_load_sd(p1+5);
285 *x3 = _mm_load_sd(p1+6);
286 *y3 = _mm_load_sd(p1+7);
287 *z3 = _mm_load_sd(p1+8);
288 *x4 = _mm_load_sd(p1+9);
289 *y4 = _mm_load_sd(p1+10);
290 *z4 = _mm_load_sd(p1+11);
294 static gmx_inline void
295 gmx_mm_load_1rvec_2ptr_swizzle_pd(const double * gmx_restrict ptrA,
296 const double * gmx_restrict ptrB,
297 __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1)
299 __m128d t1, t2, t3, t4;
300 t1 = _mm_loadu_pd(ptrA);
301 t2 = _mm_loadu_pd(ptrB);
302 t3 = _mm_load_sd(ptrA+2);
303 t4 = _mm_load_sd(ptrB+2);
304 GMX_MM_TRANSPOSE2_PD(t1, t2);
307 *z1 = _mm_unpacklo_pd(t3, t4);
310 static gmx_inline void
311 gmx_mm_load_3rvec_2ptr_swizzle_pd(const double * gmx_restrict ptrA, const double * gmx_restrict ptrB,
312 __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
313 __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
314 __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3)
316 __m128d t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
317 t1 = _mm_loadu_pd(ptrA);
318 t2 = _mm_loadu_pd(ptrB);
319 t3 = _mm_loadu_pd(ptrA+2);
320 t4 = _mm_loadu_pd(ptrB+2);
321 t5 = _mm_loadu_pd(ptrA+4);
322 t6 = _mm_loadu_pd(ptrB+4);
323 t7 = _mm_loadu_pd(ptrA+6);
324 t8 = _mm_loadu_pd(ptrB+6);
325 t9 = _mm_load_sd(ptrA+8);
326 t10 = _mm_load_sd(ptrB+8);
327 GMX_MM_TRANSPOSE2_PD(t1, t2);
328 GMX_MM_TRANSPOSE2_PD(t3, t4);
329 GMX_MM_TRANSPOSE2_PD(t5, t6);
330 GMX_MM_TRANSPOSE2_PD(t7, t8);
339 *z3 = _mm_unpacklo_pd(t9, t10);
343 static gmx_inline void
344 gmx_mm_load_4rvec_2ptr_swizzle_pd(const double * gmx_restrict ptrA, const double * gmx_restrict ptrB,
345 __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
346 __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
347 __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3,
348 __m128d * gmx_restrict x4, __m128d * gmx_restrict y4, __m128d * gmx_restrict z4)
350 __m128d t1, t2, t3, t4, t5, t6;
351 t1 = _mm_loadu_pd(ptrA);
352 t2 = _mm_loadu_pd(ptrB);
353 t3 = _mm_loadu_pd(ptrA+2);
354 t4 = _mm_loadu_pd(ptrB+2);
355 t5 = _mm_loadu_pd(ptrA+4);
356 t6 = _mm_loadu_pd(ptrB+4);
357 GMX_MM_TRANSPOSE2_PD(t1, t2);
358 GMX_MM_TRANSPOSE2_PD(t3, t4);
359 GMX_MM_TRANSPOSE2_PD(t5, t6);
366 t1 = _mm_loadu_pd(ptrA+6);
367 t2 = _mm_loadu_pd(ptrB+6);
368 t3 = _mm_loadu_pd(ptrA+8);
369 t4 = _mm_loadu_pd(ptrB+8);
370 t5 = _mm_loadu_pd(ptrA+10);
371 t6 = _mm_loadu_pd(ptrB+10);
372 GMX_MM_TRANSPOSE2_PD(t1, t2);
373 GMX_MM_TRANSPOSE2_PD(t3, t4);
374 GMX_MM_TRANSPOSE2_PD(t5, t6);
384 /* Routines to decrement rvec in memory, typically use for j particle force updates */
386 gmx_mm_decrement_1rvec_1ptr_swizzle_pd(double * gmx_restrict ptrA,
387 __m128d x1, __m128d y1, __m128d z1)
391 t1 = _mm_load_sd(ptrA);
392 t2 = _mm_load_sd(ptrA+1);
393 t3 = _mm_load_sd(ptrA+2);
395 t1 = _mm_sub_sd(t1, x1);
396 t2 = _mm_sub_sd(t2, y1);
397 t3 = _mm_sub_sd(t3, z1);
398 _mm_store_sd(ptrA, t1);
399 _mm_store_sd(ptrA+1, t2);
400 _mm_store_sd(ptrA+2, t3);
404 #if defined (_MSC_VER) && defined(_M_IX86)
405 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
406 #define gmx_mm_decrement_3rvec_1ptr_swizzle_pd(ptrA, _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3) \
408 __m128d _t1, _t2, _t3, _t4, _t5; \
409 _t1 = _mm_loadu_pd(ptrA); \
410 _t2 = _mm_loadu_pd(ptrA+2); \
411 _t3 = _mm_loadu_pd(ptrA+4); \
412 _t4 = _mm_loadu_pd(ptrA+6); \
413 _t5 = _mm_load_sd(ptrA+8); \
414 _x1 = _mm_unpacklo_pd(_x1, _y1); \
415 _z1 = _mm_unpacklo_pd(_z1, _x2); \
416 _y2 = _mm_unpacklo_pd(_y2, _z2); \
417 _x3 = _mm_unpacklo_pd(_x3, _y3); \
418 _t1 = _mm_sub_pd(_t1, _x1); \
419 _t2 = _mm_sub_pd(_t2, _z1); \
420 _t3 = _mm_sub_pd(_t3, _y2); \
421 _t4 = _mm_sub_pd(_t4, _x3); \
422 _t5 = _mm_sub_sd(_t5, _z3); \
423 _mm_storeu_pd(ptrA, _t1); \
424 _mm_storeu_pd(ptrA+2, _t2); \
425 _mm_storeu_pd(ptrA+4, _t3); \
426 _mm_storeu_pd(ptrA+6, _t4); \
427 _mm_store_sd(ptrA+8, _t5); \
430 /* Real function for sane compilers */
432 gmx_mm_decrement_3rvec_1ptr_swizzle_pd(double * gmx_restrict ptrA,
433 __m128d x1, __m128d y1, __m128d z1,
434 __m128d x2, __m128d y2, __m128d z2,
435 __m128d x3, __m128d y3, __m128d z3)
437 __m128d t1, t2, t3, t4, t5;
439 t1 = _mm_loadu_pd(ptrA);
440 t2 = _mm_loadu_pd(ptrA+2);
441 t3 = _mm_loadu_pd(ptrA+4);
442 t4 = _mm_loadu_pd(ptrA+6);
443 t5 = _mm_load_sd(ptrA+8);
445 x1 = _mm_unpacklo_pd(x1, y1);
446 z1 = _mm_unpacklo_pd(z1, x2);
447 y2 = _mm_unpacklo_pd(y2, z2);
448 x3 = _mm_unpacklo_pd(x3, y3);
449 /* nothing to be done for z3 */
451 t1 = _mm_sub_pd(t1, x1);
452 t2 = _mm_sub_pd(t2, z1);
453 t3 = _mm_sub_pd(t3, y2);
454 t4 = _mm_sub_pd(t4, x3);
455 t5 = _mm_sub_sd(t5, z3);
456 _mm_storeu_pd(ptrA, t1);
457 _mm_storeu_pd(ptrA+2, t2);
458 _mm_storeu_pd(ptrA+4, t3);
459 _mm_storeu_pd(ptrA+6, t4);
460 _mm_store_sd(ptrA+8, t5);
465 #if defined (_MSC_VER) && defined(_M_IX86)
466 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
467 #define gmx_mm_decrement_4rvec_1ptr_swizzle_pd(ptrA, _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3, _x4, _y4, _z4) \
469 __m128d _t1, _t2, _t3, _t4, _t5, _t6; \
470 _t1 = _mm_loadu_pd(ptrA); \
471 _t2 = _mm_loadu_pd(ptrA+2); \
472 _t3 = _mm_loadu_pd(ptrA+4); \
473 _t4 = _mm_loadu_pd(ptrA+6); \
474 _t5 = _mm_loadu_pd(ptrA+8); \
475 _t6 = _mm_loadu_pd(ptrA+10); \
476 _x1 = _mm_unpacklo_pd(_x1, _y1); \
477 _z1 = _mm_unpacklo_pd(_z1, _x2); \
478 _y2 = _mm_unpacklo_pd(_y2, _z2); \
479 _x3 = _mm_unpacklo_pd(_x3, _y3); \
480 _z3 = _mm_unpacklo_pd(_z3, _x4); \
481 _y4 = _mm_unpacklo_pd(_y4, _z4); \
482 _mm_storeu_pd(ptrA, _mm_sub_pd( _t1, _x1 )); \
483 _mm_storeu_pd(ptrA+2, _mm_sub_pd( _t2, _z1 )); \
484 _mm_storeu_pd(ptrA+4, _mm_sub_pd( _t3, _y2 )); \
485 _mm_storeu_pd(ptrA+6, _mm_sub_pd( _t4, _x3 )); \
486 _mm_storeu_pd(ptrA+8, _mm_sub_pd( _t5, _z3 )); \
487 _mm_storeu_pd(ptrA+10, _mm_sub_pd( _t6, _y4 )); \
490 /* Real function for sane compilers */
492 gmx_mm_decrement_4rvec_1ptr_swizzle_pd(double * gmx_restrict ptrA,
493 __m128d x1, __m128d y1, __m128d z1,
494 __m128d x2, __m128d y2, __m128d z2,
495 __m128d x3, __m128d y3, __m128d z3,
496 __m128d x4, __m128d y4, __m128d z4)
498 __m128d t1, t2, t3, t4, t5, t6;
500 t1 = _mm_loadu_pd(ptrA);
501 t2 = _mm_loadu_pd(ptrA+2);
502 t3 = _mm_loadu_pd(ptrA+4);
503 t4 = _mm_loadu_pd(ptrA+6);
504 t5 = _mm_loadu_pd(ptrA+8);
505 t6 = _mm_loadu_pd(ptrA+10);
507 x1 = _mm_unpacklo_pd(x1, y1);
508 z1 = _mm_unpacklo_pd(z1, x2);
509 y2 = _mm_unpacklo_pd(y2, z2);
510 x3 = _mm_unpacklo_pd(x3, y3);
511 z3 = _mm_unpacklo_pd(z3, x4);
512 y4 = _mm_unpacklo_pd(y4, z4);
514 _mm_storeu_pd(ptrA, _mm_sub_pd( t1, x1 ));
515 _mm_storeu_pd(ptrA+2, _mm_sub_pd( t2, z1 ));
516 _mm_storeu_pd(ptrA+4, _mm_sub_pd( t3, y2 ));
517 _mm_storeu_pd(ptrA+6, _mm_sub_pd( t4, x3 ));
518 _mm_storeu_pd(ptrA+8, _mm_sub_pd( t5, z3 ));
519 _mm_storeu_pd(ptrA+10, _mm_sub_pd( t6, y4 ));
525 gmx_mm_decrement_1rvec_2ptr_swizzle_pd(double * gmx_restrict ptrA, double * gmx_restrict ptrB,
526 __m128d x1, __m128d y1, __m128d z1)
528 __m128d t1, t2, t3, t4, t5, t6, t7;
530 t1 = _mm_loadu_pd(ptrA);
531 t2 = _mm_load_sd(ptrA+2);
532 t3 = _mm_loadu_pd(ptrB);
533 t4 = _mm_load_sd(ptrB+2);
535 t5 = _mm_unpacklo_pd(x1, y1);
536 t6 = _mm_unpackhi_pd(x1, y1);
537 t7 = _mm_unpackhi_pd(z1, z1);
539 t1 = _mm_sub_pd(t1, t5);
540 t2 = _mm_sub_sd(t2, z1);
542 t3 = _mm_sub_pd(t3, t6);
543 t4 = _mm_sub_sd(t4, t7);
545 _mm_storeu_pd(ptrA, t1);
546 _mm_store_sd(ptrA+2, t2);
547 _mm_storeu_pd(ptrB, t3);
548 _mm_store_sd(ptrB+2, t4);
552 #if defined (_MSC_VER) && defined(_M_IX86)
553 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
554 #define gmx_mm_decrement_3rvec_2ptr_swizzle_pd(ptrA, ptrB, _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3) \
556 __m128d _t1, _t2, _t3, _t4, _t5, _t6, _t7, _t8, _t9, _t10; \
557 __m128d _tA, _tB, _tC, _tD, _tE, _tF, _tG, _tH, _tI; \
558 _t1 = _mm_loadu_pd(ptrA); \
559 _t2 = _mm_loadu_pd(ptrA+2); \
560 _t3 = _mm_loadu_pd(ptrA+4); \
561 _t4 = _mm_loadu_pd(ptrA+6); \
562 _t5 = _mm_load_sd(ptrA+8); \
563 _t6 = _mm_loadu_pd(ptrB); \
564 _t7 = _mm_loadu_pd(ptrB+2); \
565 _t8 = _mm_loadu_pd(ptrB+4); \
566 _t9 = _mm_loadu_pd(ptrB+6); \
567 _t10 = _mm_load_sd(ptrB+8); \
568 _tA = _mm_unpacklo_pd(_x1, _y1); \
569 _tB = _mm_unpackhi_pd(_x1, _y1); \
570 _tC = _mm_unpacklo_pd(_z1, _x2); \
571 _tD = _mm_unpackhi_pd(_z1, _x2); \
572 _tE = _mm_unpacklo_pd(_y2, _z2); \
573 _tF = _mm_unpackhi_pd(_y2, _z2); \
574 _tG = _mm_unpacklo_pd(_x3, _y3); \
575 _tH = _mm_unpackhi_pd(_x3, _y3); \
576 _tI = _mm_unpackhi_pd(_z3, _z3); \
577 _t1 = _mm_sub_pd(_t1, _tA); \
578 _t2 = _mm_sub_pd(_t2, _tC); \
579 _t3 = _mm_sub_pd(_t3, _tE); \
580 _t4 = _mm_sub_pd(_t4, _tG); \
581 _t5 = _mm_sub_sd(_t5, _z3); \
582 _t6 = _mm_sub_pd(_t6, _tB); \
583 _t7 = _mm_sub_pd(_t7, _tD); \
584 _t8 = _mm_sub_pd(_t8, _tF); \
585 _t9 = _mm_sub_pd(_t9, _tH); \
586 _t10 = _mm_sub_sd(_t10, _tI); \
587 _mm_storeu_pd(ptrA, _t1); \
588 _mm_storeu_pd(ptrA+2, _t2); \
589 _mm_storeu_pd(ptrA+4, _t3); \
590 _mm_storeu_pd(ptrA+6, _t4); \
591 _mm_store_sd(ptrA+8, _t5); \
592 _mm_storeu_pd(ptrB, _t6); \
593 _mm_storeu_pd(ptrB+2, _t7); \
594 _mm_storeu_pd(ptrB+4, _t8); \
595 _mm_storeu_pd(ptrB+6, _t9); \
596 _mm_store_sd(ptrB+8, _t10); \
599 /* Real function for sane compilers */
601 gmx_mm_decrement_3rvec_2ptr_swizzle_pd(double * gmx_restrict ptrA, double * gmx_restrict ptrB,
602 __m128d x1, __m128d y1, __m128d z1,
603 __m128d x2, __m128d y2, __m128d z2,
604 __m128d x3, __m128d y3, __m128d z3)
606 __m128d t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
607 __m128d tA, tB, tC, tD, tE, tF, tG, tH, tI;
609 t1 = _mm_loadu_pd(ptrA);
610 t2 = _mm_loadu_pd(ptrA+2);
611 t3 = _mm_loadu_pd(ptrA+4);
612 t4 = _mm_loadu_pd(ptrA+6);
613 t5 = _mm_load_sd(ptrA+8);
614 t6 = _mm_loadu_pd(ptrB);
615 t7 = _mm_loadu_pd(ptrB+2);
616 t8 = _mm_loadu_pd(ptrB+4);
617 t9 = _mm_loadu_pd(ptrB+6);
618 t10 = _mm_load_sd(ptrB+8);
620 tA = _mm_unpacklo_pd(x1, y1);
621 tB = _mm_unpackhi_pd(x1, y1);
622 tC = _mm_unpacklo_pd(z1, x2);
623 tD = _mm_unpackhi_pd(z1, x2);
624 tE = _mm_unpacklo_pd(y2, z2);
625 tF = _mm_unpackhi_pd(y2, z2);
626 tG = _mm_unpacklo_pd(x3, y3);
627 tH = _mm_unpackhi_pd(x3, y3);
628 tI = _mm_unpackhi_pd(z3, z3);
630 t1 = _mm_sub_pd(t1, tA);
631 t2 = _mm_sub_pd(t2, tC);
632 t3 = _mm_sub_pd(t3, tE);
633 t4 = _mm_sub_pd(t4, tG);
634 t5 = _mm_sub_sd(t5, z3);
636 t6 = _mm_sub_pd(t6, tB);
637 t7 = _mm_sub_pd(t7, tD);
638 t8 = _mm_sub_pd(t8, tF);
639 t9 = _mm_sub_pd(t9, tH);
640 t10 = _mm_sub_sd(t10, tI);
642 _mm_storeu_pd(ptrA, t1);
643 _mm_storeu_pd(ptrA+2, t2);
644 _mm_storeu_pd(ptrA+4, t3);
645 _mm_storeu_pd(ptrA+6, t4);
646 _mm_store_sd(ptrA+8, t5);
647 _mm_storeu_pd(ptrB, t6);
648 _mm_storeu_pd(ptrB+2, t7);
649 _mm_storeu_pd(ptrB+4, t8);
650 _mm_storeu_pd(ptrB+6, t9);
651 _mm_store_sd(ptrB+8, t10);
656 #if defined (_MSC_VER) && defined(_M_IX86)
657 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
658 #define gmx_mm_decrement_4rvec_2ptr_swizzle_pd(ptrA, ptrB, _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3, _x4, _y4, _z4) \
660 __m128d _t1, _t2, _t3, _t4, _t5, _t6, _t7, _t8, _t9, _t10, _t11, _t12; \
661 __m128d _tA, _tB, _tC, _tD, _tE, _tF, _tG, _tH, _tI, _tJ, _tK, _tL; \
662 _t1 = _mm_loadu_pd(ptrA); \
663 _t2 = _mm_loadu_pd(ptrA+2); \
664 _t3 = _mm_loadu_pd(ptrA+4); \
665 _t4 = _mm_loadu_pd(ptrA+6); \
666 _t5 = _mm_loadu_pd(ptrA+8); \
667 _t6 = _mm_loadu_pd(ptrA+10); \
668 _t7 = _mm_loadu_pd(ptrB); \
669 _t8 = _mm_loadu_pd(ptrB+2); \
670 _t9 = _mm_loadu_pd(ptrB+4); \
671 _t10 = _mm_loadu_pd(ptrB+6); \
672 _t11 = _mm_loadu_pd(ptrB+8); \
673 _t12 = _mm_loadu_pd(ptrB+10); \
674 _tA = _mm_unpacklo_pd(_x1, _y1); \
675 _tB = _mm_unpackhi_pd(_x1, _y1); \
676 _tC = _mm_unpacklo_pd(_z1, _x2); \
677 _tD = _mm_unpackhi_pd(_z1, _x2); \
678 _tE = _mm_unpacklo_pd(_y2, _z2); \
679 _tF = _mm_unpackhi_pd(_y2, _z2); \
680 _tG = _mm_unpacklo_pd(_x3, _y3); \
681 _tH = _mm_unpackhi_pd(_x3, _y3); \
682 _tI = _mm_unpacklo_pd(_z3, _x4); \
683 _tJ = _mm_unpackhi_pd(_z3, _x4); \
684 _tK = _mm_unpacklo_pd(_y4, _z4); \
685 _tL = _mm_unpackhi_pd(_y4, _z4); \
686 _t1 = _mm_sub_pd(_t1, _tA); \
687 _t2 = _mm_sub_pd(_t2, _tC); \
688 _t3 = _mm_sub_pd(_t3, _tE); \
689 _t4 = _mm_sub_pd(_t4, _tG); \
690 _t5 = _mm_sub_pd(_t5, _tI); \
691 _t6 = _mm_sub_pd(_t6, _tK); \
692 _t7 = _mm_sub_pd(_t7, _tB); \
693 _t8 = _mm_sub_pd(_t8, _tD); \
694 _t9 = _mm_sub_pd(_t9, _tF); \
695 _t10 = _mm_sub_pd(_t10, _tH); \
696 _t11 = _mm_sub_pd(_t11, _tJ); \
697 _t12 = _mm_sub_pd(_t12, _tL); \
698 _mm_storeu_pd(ptrA, _t1); \
699 _mm_storeu_pd(ptrA+2, _t2); \
700 _mm_storeu_pd(ptrA+4, _t3); \
701 _mm_storeu_pd(ptrA+6, _t4); \
702 _mm_storeu_pd(ptrA+8, _t5); \
703 _mm_storeu_pd(ptrA+10, _t6); \
704 _mm_storeu_pd(ptrB, _t7); \
705 _mm_storeu_pd(ptrB+2, _t8); \
706 _mm_storeu_pd(ptrB+4, _t9); \
707 _mm_storeu_pd(ptrB+6, _t10); \
708 _mm_storeu_pd(ptrB+8, _t11); \
709 _mm_storeu_pd(ptrB+10, _t12); \
712 /* Real function for sane compilers */
714 gmx_mm_decrement_4rvec_2ptr_swizzle_pd(double * gmx_restrict ptrA, double * gmx_restrict ptrB,
715 __m128d x1, __m128d y1, __m128d z1,
716 __m128d x2, __m128d y2, __m128d z2,
717 __m128d x3, __m128d y3, __m128d z3,
718 __m128d x4, __m128d y4, __m128d z4)
720 __m128d t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
721 __m128d tA, tB, tC, tD, tE, tF, tG, tH, tI, tJ, tK, tL;
723 t1 = _mm_loadu_pd(ptrA);
724 t2 = _mm_loadu_pd(ptrA+2);
725 t3 = _mm_loadu_pd(ptrA+4);
726 t4 = _mm_loadu_pd(ptrA+6);
727 t5 = _mm_loadu_pd(ptrA+8);
728 t6 = _mm_loadu_pd(ptrA+10);
729 t7 = _mm_loadu_pd(ptrB);
730 t8 = _mm_loadu_pd(ptrB+2);
731 t9 = _mm_loadu_pd(ptrB+4);
732 t10 = _mm_loadu_pd(ptrB+6);
733 t11 = _mm_loadu_pd(ptrB+8);
734 t12 = _mm_loadu_pd(ptrB+10);
736 tA = _mm_unpacklo_pd(x1, y1);
737 tB = _mm_unpackhi_pd(x1, y1);
738 tC = _mm_unpacklo_pd(z1, x2);
739 tD = _mm_unpackhi_pd(z1, x2);
740 tE = _mm_unpacklo_pd(y2, z2);
741 tF = _mm_unpackhi_pd(y2, z2);
742 tG = _mm_unpacklo_pd(x3, y3);
743 tH = _mm_unpackhi_pd(x3, y3);
744 tI = _mm_unpacklo_pd(z3, x4);
745 tJ = _mm_unpackhi_pd(z3, x4);
746 tK = _mm_unpacklo_pd(y4, z4);
747 tL = _mm_unpackhi_pd(y4, z4);
749 t1 = _mm_sub_pd(t1, tA);
750 t2 = _mm_sub_pd(t2, tC);
751 t3 = _mm_sub_pd(t3, tE);
752 t4 = _mm_sub_pd(t4, tG);
753 t5 = _mm_sub_pd(t5, tI);
754 t6 = _mm_sub_pd(t6, tK);
756 t7 = _mm_sub_pd(t7, tB);
757 t8 = _mm_sub_pd(t8, tD);
758 t9 = _mm_sub_pd(t9, tF);
759 t10 = _mm_sub_pd(t10, tH);
760 t11 = _mm_sub_pd(t11, tJ);
761 t12 = _mm_sub_pd(t12, tL);
763 _mm_storeu_pd(ptrA, t1);
764 _mm_storeu_pd(ptrA+2, t2);
765 _mm_storeu_pd(ptrA+4, t3);
766 _mm_storeu_pd(ptrA+6, t4);
767 _mm_storeu_pd(ptrA+8, t5);
768 _mm_storeu_pd(ptrA+10, t6);
769 _mm_storeu_pd(ptrB, t7);
770 _mm_storeu_pd(ptrB+2, t8);
771 _mm_storeu_pd(ptrB+4, t9);
772 _mm_storeu_pd(ptrB+6, t10);
773 _mm_storeu_pd(ptrB+8, t11);
774 _mm_storeu_pd(ptrB+10, t12);
779 static gmx_inline void
780 gmx_mm_update_iforce_1atom_swizzle_pd(__m128d fix1, __m128d fiy1, __m128d fiz1,
781 double * gmx_restrict fptr,
782 double * gmx_restrict fshiftptr)
784 fix1 = _mm_hadd_pd(fix1, fiy1);
785 fiz1 = _mm_hadd_pd(fiz1, fiz1);
787 _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr), fix1 ));
788 _mm_store_sd( fptr+2, _mm_add_sd( _mm_load_sd(fptr+2), fiz1 ));
790 _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 ));
791 _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 ));
794 #if defined (_MSC_VER) && defined(_M_IX86)
795 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
796 #define gmx_mm_update_iforce_3atom_swizzle_pd(fix1, fiy1, fiz1, fix2, fiy2, fiz2, fix3, fiy3, fiz3, \
800 fix1 = _mm_hadd_pd(fix1, fiy1); \
801 fiz1 = _mm_hadd_pd(fiz1, fix2); \
802 fiy2 = _mm_hadd_pd(fiy2, fiz2); \
803 fix3 = _mm_hadd_pd(fix3, fiy3); \
804 fiz3 = _mm_hadd_pd(fiz3, fiz3); \
805 _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr), fix1 )); \
806 _mm_storeu_pd( fptr+2, _mm_add_pd( _mm_loadu_pd(fptr+2), fiz1 )); \
807 _mm_storeu_pd( fptr+4, _mm_add_pd( _mm_loadu_pd(fptr+4), fiy2 )); \
808 _mm_storeu_pd( fptr+6, _mm_add_pd( _mm_loadu_pd(fptr+6), fix3 )); \
809 _mm_store_sd( fptr+8, _mm_add_sd( _mm_load_sd(fptr+8), fiz3 )); \
810 fix1 = _mm_add_pd(fix1, fix3); \
811 _t1 = _mm_shuffle_pd(fiz1, fiy2, _MM_SHUFFLE2(0, 1)); \
812 fix1 = _mm_add_pd(fix1, _t1); \
813 _t2 = _mm_shuffle_pd(fiy2, fiy2, _MM_SHUFFLE2(1, 1)); \
814 fiz1 = _mm_add_sd(fiz1, fiz3); \
815 fiz1 = _mm_add_sd(fiz1, _t2); \
816 _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 )); \
817 _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 )); \
820 /* Real function for sane compilers */
821 static gmx_inline void
822 gmx_mm_update_iforce_3atom_swizzle_pd(__m128d fix1, __m128d fiy1, __m128d fiz1,
823 __m128d fix2, __m128d fiy2, __m128d fiz2,
824 __m128d fix3, __m128d fiy3, __m128d fiz3,
825 double * gmx_restrict fptr,
826 double * gmx_restrict fshiftptr)
830 fix1 = _mm_hadd_pd(fix1, fiy1);
831 fiz1 = _mm_hadd_pd(fiz1, fix2);
832 fiy2 = _mm_hadd_pd(fiy2, fiz2);
833 fix3 = _mm_hadd_pd(fix3, fiy3);
834 fiz3 = _mm_hadd_pd(fiz3, fiz3);
836 _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr), fix1 ));
837 _mm_storeu_pd( fptr+2, _mm_add_pd( _mm_loadu_pd(fptr+2), fiz1 ));
838 _mm_storeu_pd( fptr+4, _mm_add_pd( _mm_loadu_pd(fptr+4), fiy2 ));
839 _mm_storeu_pd( fptr+6, _mm_add_pd( _mm_loadu_pd(fptr+6), fix3 ));
840 _mm_store_sd( fptr+8, _mm_add_sd( _mm_load_sd(fptr+8), fiz3 ));
842 fix1 = _mm_add_pd(fix1, fix3);
843 t1 = _mm_shuffle_pd(fiz1, fiy2, _MM_SHUFFLE2(0, 1));
844 fix1 = _mm_add_pd(fix1, t1); /* x and y sums */
846 t2 = _mm_shuffle_pd(fiy2, fiy2, _MM_SHUFFLE2(1, 1));
847 fiz1 = _mm_add_sd(fiz1, fiz3);
848 fiz1 = _mm_add_sd(fiz1, t2); /* z sum */
850 _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 ));
851 _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 ));
855 #if defined (_MSC_VER) && defined(_M_IX86)
856 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
857 #define gmx_mm_update_iforce_4atom_swizzle_pd(fix1, fiy1, fiz1, fix2, fiy2, fiz2, fix3, fiy3, fiz3, fix4, fiy4, fiz4, \
861 fix1 = _mm_hadd_pd(fix1, fiy1); \
862 fiz1 = _mm_hadd_pd(fiz1, fix2); \
863 fiy2 = _mm_hadd_pd(fiy2, fiz2); \
864 fix3 = _mm_hadd_pd(fix3, fiy3); \
865 fiz3 = _mm_hadd_pd(fiz3, fix4); \
866 fiy4 = _mm_hadd_pd(fiy4, fiz4); \
867 _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr), fix1 )); \
868 _mm_storeu_pd( fptr+2, _mm_add_pd( _mm_loadu_pd(fptr+2), fiz1 )); \
869 _mm_storeu_pd( fptr+4, _mm_add_pd( _mm_loadu_pd(fptr+4), fiy2 )); \
870 _mm_storeu_pd( fptr+6, _mm_add_pd( _mm_loadu_pd(fptr+6), fix3 )); \
871 _mm_storeu_pd( fptr+8, _mm_add_pd( _mm_loadu_pd(fptr+8), fiz3 )); \
872 _mm_storeu_pd( fptr+10, _mm_add_pd( _mm_loadu_pd(fptr+10), fiy4 )); \
873 _t1 = _mm_shuffle_pd(fiz1, fiy2, _MM_SHUFFLE2(0, 1)); \
874 fix1 = _mm_add_pd(fix1, _t1); \
875 _t2 = _mm_shuffle_pd(fiz3, fiy4, _MM_SHUFFLE2(0, 1)); \
876 fix3 = _mm_add_pd(fix3, _t2); \
877 fix1 = _mm_add_pd(fix1, fix3); \
878 fiz1 = _mm_add_sd(fiz1, _mm_unpackhi_pd(fiy2, fiy2)); \
879 fiz3 = _mm_add_sd(fiz3, _mm_unpackhi_pd(fiy4, fiy4)); \
880 fiz1 = _mm_add_sd(fiz1, fiz3); \
881 _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 )); \
882 _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 )); \
885 /* Real function for sane compilers */
886 static gmx_inline void
887 gmx_mm_update_iforce_4atom_swizzle_pd(__m128d fix1, __m128d fiy1, __m128d fiz1,
888 __m128d fix2, __m128d fiy2, __m128d fiz2,
889 __m128d fix3, __m128d fiy3, __m128d fiz3,
890 __m128d fix4, __m128d fiy4, __m128d fiz4,
891 double * gmx_restrict fptr,
892 double * gmx_restrict fshiftptr)
896 fix1 = _mm_hadd_pd(fix1, fiy1);
897 fiz1 = _mm_hadd_pd(fiz1, fix2);
898 fiy2 = _mm_hadd_pd(fiy2, fiz2);
899 fix3 = _mm_hadd_pd(fix3, fiy3);
900 fiz3 = _mm_hadd_pd(fiz3, fix4);
901 fiy4 = _mm_hadd_pd(fiy4, fiz4);
903 _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr), fix1 ));
904 _mm_storeu_pd( fptr+2, _mm_add_pd( _mm_loadu_pd(fptr+2), fiz1 ));
905 _mm_storeu_pd( fptr+4, _mm_add_pd( _mm_loadu_pd(fptr+4), fiy2 ));
906 _mm_storeu_pd( fptr+6, _mm_add_pd( _mm_loadu_pd(fptr+6), fix3 ));
907 _mm_storeu_pd( fptr+8, _mm_add_pd( _mm_loadu_pd(fptr+8), fiz3 ));
908 _mm_storeu_pd( fptr+10, _mm_add_pd( _mm_loadu_pd(fptr+10), fiy4 ));
910 t1 = _mm_shuffle_pd(fiz1, fiy2, _MM_SHUFFLE2(0, 1));
911 fix1 = _mm_add_pd(fix1, t1);
912 t2 = _mm_shuffle_pd(fiz3, fiy4, _MM_SHUFFLE2(0, 1));
913 fix3 = _mm_add_pd(fix3, t2);
914 fix1 = _mm_add_pd(fix1, fix3); /* x and y sums */
916 fiz1 = _mm_add_sd(fiz1, _mm_unpackhi_pd(fiy2, fiy2));
917 fiz3 = _mm_add_sd(fiz3, _mm_unpackhi_pd(fiy4, fiy4));
918 fiz1 = _mm_add_sd(fiz1, fiz3); /* z sum */
920 _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 ));
921 _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 ));
926 static gmx_inline void
927 gmx_mm_update_1pot_pd(__m128d pot1, double * gmx_restrict ptrA)
929 pot1 = _mm_hadd_pd(pot1, pot1);
930 _mm_store_sd(ptrA, _mm_add_sd(pot1, _mm_load_sd(ptrA)));
933 static gmx_inline void
934 gmx_mm_update_2pot_pd(__m128d pot1, double * gmx_restrict ptrA,
935 __m128d pot2, double * gmx_restrict ptrB)
937 pot1 = _mm_hadd_pd(pot1, pot2);
938 pot2 = _mm_unpackhi_pd(pot1, pot1);
940 _mm_store_sd(ptrA, _mm_add_sd(pot1, _mm_load_sd(ptrA)));
941 _mm_store_sd(ptrB, _mm_add_sd(pot2, _mm_load_sd(ptrB)));
945 #endif /* _kernelutil_x86_avx_128_fma_double_h_ */