Merge release-4-6 into master
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_128_fma_double / kernelutil_x86_avx_128_fma_double.h
1 /*
2  *                This source code is part of
3  *
4  *                 G   R   O   M   A   C   S
5  *
6  * Copyright (c) 2011-2012, The GROMACS Development Team
7  *
8  * Gromacs is a library for molecular simulation and trajectory analysis,
9  * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
10  * a full list of developers and information, check out http://www.gromacs.org
11  *
12  * This program is free software; you can redistribute it and/or modify it under 
13  * the terms of the GNU Lesser General Public License as published by the Free 
14  * Software Foundation; either version 2 of the License, or (at your option) any 
15  * later version.
16  * As a special exception, you may use this file as part of a free software
17  * library without restriction.  Specifically, if other files instantiate
18  * templates or use macros or inline functions from this file, or you compile
19  * this file and link it with other files to produce an executable, this
20  * file does not by itself cause the resulting executable to be covered by
21  * the GNU Lesser General Public License.  
22  *
23  * In plain-speak: do not worry about classes/macros/templates either - only
24  * changes to the library have to be LGPL, not an application linking with it.
25  *
26  * To help fund GROMACS development, we humbly ask that you cite
27  * the papers people have written on it - you can find them on the website!
28  */
29 #ifndef _kernelutil_x86_avx_128_fma_double_h_
30 #define _kernelutil_x86_avx_128_fma_double_h_
31
32 #include "gmx_x86_avx_128_fma.h"
33
34
35 static int
36 gmx_mm_any_lt(__m128d a, __m128d b)
37 {
38     return _mm_movemask_pd(_mm_cmplt_pd(a,b));
39 }
40
41
42 static gmx_inline __m128d
43 gmx_mm_calc_rsq_pd(__m128d dx, __m128d dy, __m128d dz)
44 {
45     return _mm_macc_pd(dx,dx,_mm_macc_pd(dy,dy,_mm_mul_pd(dz,dz)));
46 }
47
48 /* Normal sum of four ymm registers */
49 #define gmx_mm_sum4_pd(t0,t1,t2,t3)  _mm_add_pd(_mm_add_pd(t0,t1),_mm_add_pd(t2,t3))
50
51
52
53 /* Load a double value from 1-2 places, merge into xmm register */
54
55
56 static __m128d
57 gmx_mm_load_2real_swizzle_pd(const double * gmx_restrict ptrA,
58                              const double * gmx_restrict ptrB)
59 {
60     return _mm_unpacklo_pd(_mm_load_sd(ptrA),_mm_load_sd(ptrB));
61 }
62
63 static __m128d
64 gmx_mm_load_1real_pd(const double * gmx_restrict ptrA)
65 {
66     return _mm_load_sd(ptrA);
67 }
68
69
70 static void
71 gmx_mm_store_2real_swizzle_pd(double * gmx_restrict ptrA,
72                               double * gmx_restrict ptrB,
73                               __m128d xmm1)
74 {
75     __m128d t2;
76     
77     t2       = _mm_unpackhi_pd(xmm1,xmm1);
78     _mm_store_sd(ptrA,xmm1);                                           
79     _mm_store_sd(ptrB,t2);                                         
80 }
81
82 static void
83 gmx_mm_store_1real_pd(double * gmx_restrict ptrA, __m128d xmm1)
84 {
85     _mm_store_sd(ptrA,xmm1);                                        
86 }
87
88
89 /* Similar to store, but increments value in memory */
90 static void
91 gmx_mm_increment_2real_swizzle_pd(double * gmx_restrict ptrA,
92                                   double * gmx_restrict ptrB, __m128d xmm1)
93 {
94     __m128d t1;
95     
96     t1   = _mm_unpackhi_pd(xmm1,xmm1);
97     xmm1 = _mm_add_sd(xmm1,_mm_load_sd(ptrA));
98     t1   = _mm_add_sd(t1,_mm_load_sd(ptrB));
99     _mm_store_sd(ptrA,xmm1);
100     _mm_store_sd(ptrB,t1);
101 }
102
103 static void
104 gmx_mm_increment_1real_pd(double * gmx_restrict ptrA, __m128d xmm1)
105 {
106     __m128d tmp;
107     
108     tmp = gmx_mm_load_1real_pd(ptrA);
109     tmp = _mm_add_sd(tmp,xmm1);
110     gmx_mm_store_1real_pd(ptrA,tmp);
111 }
112
113
114
115 static gmx_inline void
116 gmx_mm_load_2pair_swizzle_pd(const double * gmx_restrict p1,
117                              const double * gmx_restrict p2,
118                              __m128d * gmx_restrict c6,
119                              __m128d * gmx_restrict c12)
120 {
121     __m128d t1,t2,t3;
122     
123     /* The c6/c12 array should be aligned */
124     t1   = _mm_loadu_pd(p1);
125     t2   = _mm_loadu_pd(p2);
126     *c6  = _mm_unpacklo_pd(t1,t2);  
127     *c12 = _mm_unpackhi_pd(t1,t2);                    
128 }
129
130 static gmx_inline void
131 gmx_mm_load_1pair_swizzle_pd(const double * gmx_restrict p1,
132                              __m128d * gmx_restrict c6,
133                              __m128d * gmx_restrict c12)
134 {
135     *c6     = _mm_load_sd(p1);
136     *c12    = _mm_load_sd(p1+1);
137 }
138
139
140 static gmx_inline void
141 gmx_mm_load_shift_and_1rvec_broadcast_pd(const double * gmx_restrict xyz_shift,
142                                          const double * gmx_restrict xyz,
143                                          __m128d * gmx_restrict x1,
144                                          __m128d * gmx_restrict y1,
145                                          __m128d * gmx_restrict z1)
146 {
147     __m128d mem_xy,mem_z,mem_sxy,mem_sz;
148     
149     mem_xy  = _mm_loadu_pd(xyz);
150     mem_z   = _mm_load_sd(xyz+2);
151     mem_sxy = _mm_loadu_pd(xyz_shift);
152     mem_sz  = _mm_load_sd(xyz_shift+2);
153     
154     mem_xy  = _mm_add_pd(mem_xy,mem_sxy);
155     mem_z   = _mm_add_pd(mem_z,mem_sz);
156     
157     *x1  = _mm_shuffle_pd(mem_xy,mem_xy,_MM_SHUFFLE2(0,0));
158     *y1  = _mm_shuffle_pd(mem_xy,mem_xy,_MM_SHUFFLE2(1,1));
159     *z1  = _mm_shuffle_pd(mem_z,mem_z,_MM_SHUFFLE2(0,0));
160 }
161
162
163 static gmx_inline void
164 gmx_mm_load_shift_and_3rvec_broadcast_pd(const double * gmx_restrict xyz_shift,
165                                          const double * gmx_restrict xyz,
166                                          __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
167                                          __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
168                                          __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3)
169 {
170     __m128d t1,t2,t3,t4,t5,sxy,sz,szx,syz;
171     
172     t1  = _mm_loadu_pd(xyz);
173     t2  = _mm_loadu_pd(xyz+2);
174     t3  = _mm_loadu_pd(xyz+4);
175     t4  = _mm_loadu_pd(xyz+6);
176     t5  = _mm_load_sd(xyz+8);
177     
178     sxy = _mm_loadu_pd(xyz_shift);
179     sz  = _mm_load_sd(xyz_shift+2);
180     szx = _mm_shuffle_pd(sz,sxy,_MM_SHUFFLE2(0,0));
181     syz = _mm_shuffle_pd(sxy,sz,_MM_SHUFFLE2(0,1));
182     
183     t1  = _mm_add_pd(t1,sxy);
184     t2  = _mm_add_pd(t2,szx);
185     t3  = _mm_add_pd(t3,syz);
186     t4  = _mm_add_pd(t4,sxy);
187     t5  = _mm_add_sd(t5,sz);
188     
189     *x1  = _mm_shuffle_pd(t1,t1,_MM_SHUFFLE2(0,0));
190     *y1  = _mm_shuffle_pd(t1,t1,_MM_SHUFFLE2(1,1));
191     *z1  = _mm_shuffle_pd(t2,t2,_MM_SHUFFLE2(0,0));
192     *x2  = _mm_shuffle_pd(t2,t2,_MM_SHUFFLE2(1,1));
193     *y2  = _mm_shuffle_pd(t3,t3,_MM_SHUFFLE2(0,0));
194     *z2  = _mm_shuffle_pd(t3,t3,_MM_SHUFFLE2(1,1));
195     *x3  = _mm_shuffle_pd(t4,t4,_MM_SHUFFLE2(0,0));
196     *y3  = _mm_shuffle_pd(t4,t4,_MM_SHUFFLE2(1,1));
197     *z3  = _mm_shuffle_pd(t5,t5,_MM_SHUFFLE2(0,0));
198 }
199
200
201 static gmx_inline void
202 gmx_mm_load_shift_and_4rvec_broadcast_pd(const double * gmx_restrict xyz_shift,
203                                          const double * gmx_restrict xyz,
204                                          __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
205                                          __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
206                                          __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3,
207                                          __m128d * gmx_restrict x4, __m128d * gmx_restrict y4, __m128d * gmx_restrict z4)
208 {
209     __m128d t1,t2,t3,t4,t5,t6,sxy,sz,szx,syz;
210     
211     t1  = _mm_loadu_pd(xyz);
212     t2  = _mm_loadu_pd(xyz+2);
213     t3  = _mm_loadu_pd(xyz+4);
214     t4  = _mm_loadu_pd(xyz+6);
215     t5  = _mm_loadu_pd(xyz+8);
216     t6  = _mm_loadu_pd(xyz+10);
217     
218     sxy = _mm_loadu_pd(xyz_shift);
219     sz  = _mm_load_sd(xyz_shift+2);
220     szx = _mm_shuffle_pd(sz,sxy,_MM_SHUFFLE2(0,0));
221     syz = _mm_shuffle_pd(sxy,sz,_MM_SHUFFLE2(0,1));
222     
223     t1  = _mm_add_pd(t1,sxy);
224     t2  = _mm_add_pd(t2,szx);
225     t3  = _mm_add_pd(t3,syz);
226     t4  = _mm_add_pd(t4,sxy);
227     t5  = _mm_add_pd(t5,szx);
228     t6  = _mm_add_pd(t6,syz);
229     
230     *x1  = _mm_shuffle_pd(t1,t1,_MM_SHUFFLE2(0,0));
231     *y1  = _mm_shuffle_pd(t1,t1,_MM_SHUFFLE2(1,1));
232     *z1  = _mm_shuffle_pd(t2,t2,_MM_SHUFFLE2(0,0));
233     *x2  = _mm_shuffle_pd(t2,t2,_MM_SHUFFLE2(1,1));
234     *y2  = _mm_shuffle_pd(t3,t3,_MM_SHUFFLE2(0,0));
235     *z2  = _mm_shuffle_pd(t3,t3,_MM_SHUFFLE2(1,1));
236     *x3  = _mm_shuffle_pd(t4,t4,_MM_SHUFFLE2(0,0));
237     *y3  = _mm_shuffle_pd(t4,t4,_MM_SHUFFLE2(1,1));
238     *z3  = _mm_shuffle_pd(t5,t5,_MM_SHUFFLE2(0,0));
239     *x4  = _mm_shuffle_pd(t5,t5,_MM_SHUFFLE2(1,1));
240     *y4  = _mm_shuffle_pd(t6,t6,_MM_SHUFFLE2(0,0));
241     *z4  = _mm_shuffle_pd(t6,t6,_MM_SHUFFLE2(1,1));
242 }
243
244
245
246 static gmx_inline void
247 gmx_mm_load_1rvec_1ptr_swizzle_pd(const double * gmx_restrict p1,
248                                   __m128d * gmx_restrict x, __m128d * gmx_restrict y, __m128d * gmx_restrict z)
249 {
250          *x            = _mm_load_sd(p1);
251      *y            = _mm_load_sd(p1+1);
252      *z            = _mm_load_sd(p1+2);
253 }
254
255 static gmx_inline void
256 gmx_mm_load_3rvec_1ptr_swizzle_pd(const double * gmx_restrict p1,
257                                   __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
258                                   __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
259                                   __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3)
260 {
261          *x1            = _mm_load_sd(p1);
262      *y1            = _mm_load_sd(p1+1);
263      *z1            = _mm_load_sd(p1+2);
264          *x2            = _mm_load_sd(p1+3);
265      *y2            = _mm_load_sd(p1+4);
266      *z2            = _mm_load_sd(p1+5);
267          *x3            = _mm_load_sd(p1+6);
268      *y3            = _mm_load_sd(p1+7);
269      *z3            = _mm_load_sd(p1+8);
270 }
271
272 static gmx_inline void
273 gmx_mm_load_4rvec_1ptr_swizzle_pd(const double * gmx_restrict p1,
274                                   __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
275                                   __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
276                                   __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3,
277                                   __m128d * gmx_restrict x4, __m128d * gmx_restrict y4, __m128d * gmx_restrict z4)
278 {
279     *x1            = _mm_load_sd(p1);
280     *y1            = _mm_load_sd(p1+1);
281     *z1            = _mm_load_sd(p1+2);
282     *x2            = _mm_load_sd(p1+3);
283     *y2            = _mm_load_sd(p1+4);
284     *z2            = _mm_load_sd(p1+5);
285     *x3            = _mm_load_sd(p1+6);
286     *y3            = _mm_load_sd(p1+7);
287     *z3            = _mm_load_sd(p1+8);
288     *x4            = _mm_load_sd(p1+9);
289     *y4            = _mm_load_sd(p1+10);
290     *z4            = _mm_load_sd(p1+11);
291 }
292
293
294 static gmx_inline void
295 gmx_mm_load_1rvec_2ptr_swizzle_pd(const double * gmx_restrict ptrA,
296                                   const double * gmx_restrict ptrB,
297                                   __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1)
298 {
299     __m128d t1,t2,t3,t4;
300     t1           = _mm_loadu_pd(ptrA);
301     t2           = _mm_loadu_pd(ptrB);
302     t3           = _mm_load_sd(ptrA+2);
303     t4           = _mm_load_sd(ptrB+2);
304     GMX_MM_TRANSPOSE2_PD(t1,t2);
305     *x1          = t1;
306     *y1          = t2;
307     *z1          = _mm_unpacklo_pd(t3,t4);
308 }
309
310 static gmx_inline void
311 gmx_mm_load_3rvec_2ptr_swizzle_pd(const double * gmx_restrict ptrA, const double * gmx_restrict ptrB,
312                                   __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
313                                   __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
314                                   __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3)
315 {
316 __m128d t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
317     t1           = _mm_loadu_pd(ptrA);
318     t2           = _mm_loadu_pd(ptrB);
319     t3           = _mm_loadu_pd(ptrA+2);
320     t4           = _mm_loadu_pd(ptrB+2);
321     t5           = _mm_loadu_pd(ptrA+4);
322     t6           = _mm_loadu_pd(ptrB+4);
323     t7           = _mm_loadu_pd(ptrA+6);
324     t8           = _mm_loadu_pd(ptrB+6);
325     t9           = _mm_load_sd(ptrA+8);
326     t10          = _mm_load_sd(ptrB+8);
327     GMX_MM_TRANSPOSE2_PD(t1,t2);
328     GMX_MM_TRANSPOSE2_PD(t3,t4);
329     GMX_MM_TRANSPOSE2_PD(t5,t6);
330     GMX_MM_TRANSPOSE2_PD(t7,t8);
331     *x1          = t1;
332     *y1          = t2;
333     *z1          = t3;
334     *x2          = t4;
335     *y2          = t5;
336     *z2          = t6;
337     *x3          = t7;
338     *y3          = t8;
339     *z3          = _mm_unpacklo_pd(t9,t10);
340 }
341
342
343 static gmx_inline void
344 gmx_mm_load_4rvec_2ptr_swizzle_pd(const double * gmx_restrict ptrA, const double * gmx_restrict ptrB,
345                                   __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
346                                   __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
347                                   __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3,
348                                   __m128d * gmx_restrict x4, __m128d * gmx_restrict y4, __m128d * gmx_restrict z4)
349 {
350     __m128d t1,t2,t3,t4,t5,t6;
351     t1           = _mm_loadu_pd(ptrA);
352     t2           = _mm_loadu_pd(ptrB);
353     t3           = _mm_loadu_pd(ptrA+2);
354     t4           = _mm_loadu_pd(ptrB+2);
355     t5           = _mm_loadu_pd(ptrA+4);
356     t6           = _mm_loadu_pd(ptrB+4);
357     GMX_MM_TRANSPOSE2_PD(t1,t2);
358     GMX_MM_TRANSPOSE2_PD(t3,t4);
359     GMX_MM_TRANSPOSE2_PD(t5,t6);
360     *x1          = t1;
361     *y1          = t2;
362     *z1          = t3;
363     *x2          = t4;
364     *y2          = t5;
365     *z2          = t6;
366     t1           = _mm_loadu_pd(ptrA+6);
367     t2           = _mm_loadu_pd(ptrB+6);
368     t3           = _mm_loadu_pd(ptrA+8);
369     t4           = _mm_loadu_pd(ptrB+8);
370     t5           = _mm_loadu_pd(ptrA+10);
371     t6           = _mm_loadu_pd(ptrB+10);
372     GMX_MM_TRANSPOSE2_PD(t1,t2);
373     GMX_MM_TRANSPOSE2_PD(t3,t4);
374     GMX_MM_TRANSPOSE2_PD(t5,t6);
375     *x3          = t1;
376     *y3          = t2;
377     *z3          = t3;
378     *x4          = t4;
379     *y4          = t5;
380     *z4          = t6;
381 }
382
383
384 /* Routines to decrement rvec in memory, typically use for j particle force updates */
385 static void
386 gmx_mm_decrement_1rvec_1ptr_noswizzle_pd(double * gmx_restrict ptrA,
387                                          __m128d xy, __m128d z)
388 {
389     __m128d t1,t2;
390     
391     t1 = _mm_loadu_pd(ptrA);
392     t2 = _mm_load_sd(ptrA+2);
393     
394     t1 = _mm_sub_pd(t1,xy);
395     t2 = _mm_sub_sd(t2,z);
396     
397     _mm_storeu_pd(ptrA,t1);
398     _mm_store_sd(ptrA+2,t2);
399 }
400
401
402 static void
403 gmx_mm_decrement_3rvec_1ptr_noswizzle_pd(double * gmx_restrict ptrA,
404                                          __m128d xy1, __m128d z1,
405                                          __m128d xy2, __m128d z2,
406                                          __m128d xy3, __m128d z3)
407 {
408     __m128d t1,t2;
409     __m128d tA,tB,tC,tD,tE;
410     
411     tA   = _mm_loadu_pd(ptrA);
412     tB   = _mm_loadu_pd(ptrA+2);
413     tC   = _mm_loadu_pd(ptrA+4);
414     tD   = _mm_loadu_pd(ptrA+6);
415     tE   = _mm_load_sd(ptrA+8);
416     
417     /* xy1: y1 x1 */
418     t1   = _mm_shuffle_pd(z1,xy2,_MM_SHUFFLE2(0,1)); /* x2 z1 */
419     t2   = _mm_shuffle_pd(xy2,z2,_MM_SHUFFLE2(0,1)); /* z2 y2 */
420     /* xy3: y3 x3 */
421     
422     tA   = _mm_sub_pd(tA,xy1);
423     tB   = _mm_sub_pd(tB,t1);
424     tC   = _mm_sub_pd(tC,t2);
425     tD   = _mm_sub_pd(tD,xy3);
426     tE   = _mm_sub_sd(tE,z3);
427     
428     _mm_storeu_pd(ptrA,tA);
429     _mm_storeu_pd(ptrA+2,tB);
430     _mm_storeu_pd(ptrA+4,tC);
431     _mm_storeu_pd(ptrA+6,tD);
432     _mm_store_sd(ptrA+8,tE);
433 }
434
435 static void
436 gmx_mm_decrement_4rvec_1ptr_noswizzle_pd(double * gmx_restrict ptrA,
437                                          __m128d xy1, __m128d z1,
438                                          __m128d xy2, __m128d z2,
439                                          __m128d xy3, __m128d z3,
440                                          __m128d xy4, __m128d z4)
441 {
442     __m128d t1,t2,t3,t4;
443     __m128d tA,tB,tC,tD,tE,tF;
444     
445     tA   = _mm_loadu_pd(ptrA);
446     tB   = _mm_loadu_pd(ptrA+2);
447     tC   = _mm_loadu_pd(ptrA+4);
448     tD   = _mm_loadu_pd(ptrA+6);
449     tE   = _mm_loadu_pd(ptrA+8);
450     tF   = _mm_loadu_pd(ptrA+10);
451     
452     /* xy1: y1 x1 */
453     t1   = _mm_shuffle_pd(z1,xy2,_MM_SHUFFLE2(0,0)); /* x2 z1 */
454     t2   = _mm_shuffle_pd(xy2,z2,_MM_SHUFFLE2(0,1)); /* z2 y2 */
455     /* xy3: y3 x3 */
456     t3   = _mm_shuffle_pd(z3,xy4,_MM_SHUFFLE2(0,0)); /* x4 z3 */
457     t4   = _mm_shuffle_pd(xy4,z4,_MM_SHUFFLE2(0,1)); /* z4 y4 */
458     
459     tA   = _mm_sub_pd(tA,xy1);
460     tB   = _mm_sub_pd(tB,t1);
461     tC   = _mm_sub_pd(tC,t2);
462     tD   = _mm_sub_pd(tD,xy3);
463     tE   = _mm_sub_pd(tE,t3);
464     tF   = _mm_sub_pd(tF,t4);
465     
466     _mm_storeu_pd(ptrA,tA);
467     _mm_storeu_pd(ptrA+2,tB);
468     _mm_storeu_pd(ptrA+4,tC);
469     _mm_storeu_pd(ptrA+6,tD);
470     _mm_storeu_pd(ptrA+8,tE);
471     _mm_storeu_pd(ptrA+10,tF);
472 }
473
474
475 static void
476 gmx_mm_decrement_1rvec_1ptr_swizzle_pd(double * gmx_restrict ptrA,
477                                        __m128d x1, __m128d y1, __m128d z1)
478 {
479     __m128d t1,t2,t3;
480     
481     t1           = _mm_load_sd(ptrA);
482     t2           = _mm_load_sd(ptrA+1);
483     t3           = _mm_load_sd(ptrA+2);
484     
485     t1           = _mm_sub_sd(t1,x1);
486     t2           = _mm_sub_sd(t2,y1);
487     t3           = _mm_sub_sd(t3,z1);
488     _mm_store_sd(ptrA,t1);
489     _mm_store_sd(ptrA+1,t2);
490     _mm_store_sd(ptrA+2,t3);
491 }
492
493
494 static void
495 gmx_mm_decrement_3rvec_1ptr_swizzle_pd(double * gmx_restrict ptrA,
496                                        __m128d x1, __m128d y1, __m128d z1,
497                                        __m128d x2, __m128d y2, __m128d z2,
498                                        __m128d x3, __m128d y3, __m128d z3) 
499 {
500     __m128d t1,t2,t3,t4,t5;
501     
502     t1          = _mm_loadu_pd(ptrA);
503     t2          = _mm_loadu_pd(ptrA+2);
504     t3          = _mm_loadu_pd(ptrA+4);
505     t4          = _mm_loadu_pd(ptrA+6);
506     t5          = _mm_load_sd(ptrA+8);
507     
508     x1          = _mm_unpacklo_pd(x1,y1);
509     z1          = _mm_unpacklo_pd(z1,x2);
510     y2          = _mm_unpacklo_pd(y2,z2);
511     x3          = _mm_unpacklo_pd(x3,y3);
512     /* nothing to be done for z3 */
513     
514     t1          = _mm_sub_pd(t1,x1);
515     t2          = _mm_sub_pd(t2,z1);
516     t3          = _mm_sub_pd(t3,y2);
517     t4          = _mm_sub_pd(t4,x3);
518     t5          = _mm_sub_sd(t5,z3);
519     _mm_storeu_pd(ptrA,t1);
520     _mm_storeu_pd(ptrA+2,t2);
521     _mm_storeu_pd(ptrA+4,t3);
522     _mm_storeu_pd(ptrA+6,t4);
523     _mm_store_sd(ptrA+8,t5);
524 }
525
526
527 static void
528 gmx_mm_decrement_4rvec_1ptr_swizzle_pd(double * gmx_restrict ptrA,
529                                        __m128d x1, __m128d y1, __m128d z1,
530                                        __m128d x2, __m128d y2, __m128d z2,
531                                        __m128d x3, __m128d y3, __m128d z3,
532                                        __m128d x4, __m128d y4, __m128d z4) 
533 {
534     __m128d t1,t2,t3,t4,t5,t6;
535     
536     t1          = _mm_loadu_pd(ptrA);
537     t2          = _mm_loadu_pd(ptrA+2);
538     t3          = _mm_loadu_pd(ptrA+4);
539     t4          = _mm_loadu_pd(ptrA+6);
540     t5          = _mm_loadu_pd(ptrA+8);
541     t6          = _mm_loadu_pd(ptrA+10);
542     
543     x1          = _mm_unpacklo_pd(x1,y1);
544     z1          = _mm_unpacklo_pd(z1,x2);
545     y2          = _mm_unpacklo_pd(y2,z2);
546     x3          = _mm_unpacklo_pd(x3,y3);
547     z3          = _mm_unpacklo_pd(z3,x4);
548     y4          = _mm_unpacklo_pd(y4,z4);
549     
550     _mm_storeu_pd(ptrA,    _mm_sub_pd( t1,x1 ));
551     _mm_storeu_pd(ptrA+2,  _mm_sub_pd( t2,z1 ));
552     _mm_storeu_pd(ptrA+4,  _mm_sub_pd( t3,y2 ));
553     _mm_storeu_pd(ptrA+6,  _mm_sub_pd( t4,x3 ));
554     _mm_storeu_pd(ptrA+8,  _mm_sub_pd( t5,z3 ));
555     _mm_storeu_pd(ptrA+10, _mm_sub_pd( t6,y4 ));
556 }
557
558 static void
559 gmx_mm_decrement_1rvec_2ptr_swizzle_pd(double * gmx_restrict ptrA, double * gmx_restrict ptrB,
560                                        __m128d x1, __m128d y1, __m128d z1)
561 {
562     __m128d t1,t2,t3,t4,t5,t6,t7;
563     
564     t1          = _mm_loadu_pd(ptrA);
565     t2          = _mm_load_sd(ptrA+2);
566     t3          = _mm_loadu_pd(ptrB);
567     t4          = _mm_load_sd(ptrB+2);
568     
569     t5          = _mm_unpacklo_pd(x1,y1);
570     t6          = _mm_unpackhi_pd(x1,y1);
571     t7          = _mm_unpackhi_pd(z1,z1);
572     
573     t1          = _mm_sub_pd(t1,t5);
574     t2          = _mm_sub_sd(t2,z1);
575     
576     t3          = _mm_sub_pd(t3,t6);
577     t4          = _mm_sub_sd(t4,t7);
578     
579     _mm_storeu_pd(ptrA,t1);
580     _mm_store_sd(ptrA+2,t2);
581     _mm_storeu_pd(ptrB,t3);
582     _mm_store_sd(ptrB+2,t4);
583 }
584
585
586 static void
587 gmx_mm_decrement_3rvec_2ptr_swizzle_pd(double * gmx_restrict ptrA, double * gmx_restrict ptrB,
588                                        __m128d x1, __m128d y1, __m128d z1,
589                                        __m128d x2, __m128d y2, __m128d z2,
590                                        __m128d x3, __m128d y3, __m128d z3) 
591 {
592     __m128d t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
593     __m128d tA,tB,tC,tD,tE,tF,tG,tH,tI;
594     
595     t1          = _mm_loadu_pd(ptrA);
596     t2          = _mm_loadu_pd(ptrA+2);
597     t3          = _mm_loadu_pd(ptrA+4);
598     t4          = _mm_loadu_pd(ptrA+6);
599     t5          = _mm_load_sd(ptrA+8);
600     t6          = _mm_loadu_pd(ptrB);
601     t7          = _mm_loadu_pd(ptrB+2);
602     t8          = _mm_loadu_pd(ptrB+4);
603     t9          = _mm_loadu_pd(ptrB+6);
604     t10         = _mm_load_sd(ptrB+8);
605     
606     tA          = _mm_unpacklo_pd(x1,y1);
607     tB          = _mm_unpackhi_pd(x1,y1);
608     tC          = _mm_unpacklo_pd(z1,x2);
609     tD          = _mm_unpackhi_pd(z1,x2);
610     tE          = _mm_unpacklo_pd(y2,z2);
611     tF          = _mm_unpackhi_pd(y2,z2);
612     tG          = _mm_unpacklo_pd(x3,y3);
613     tH          = _mm_unpackhi_pd(x3,y3);
614     tI          = _mm_unpackhi_pd(z3,z3);
615     
616     t1          = _mm_sub_pd(t1,tA);
617     t2          = _mm_sub_pd(t2,tC);
618     t3          = _mm_sub_pd(t3,tE);
619     t4          = _mm_sub_pd(t4,tG);
620     t5          = _mm_sub_sd(t5,z3);
621     
622     t6          = _mm_sub_pd(t6,tB);
623     t7          = _mm_sub_pd(t7,tD);
624     t8          = _mm_sub_pd(t8,tF);
625     t9          = _mm_sub_pd(t9,tH);
626     t10         = _mm_sub_sd(t10,tI);
627     
628     _mm_storeu_pd(ptrA,t1);
629     _mm_storeu_pd(ptrA+2,t2);
630     _mm_storeu_pd(ptrA+4,t3);
631     _mm_storeu_pd(ptrA+6,t4);
632     _mm_store_sd(ptrA+8,t5);
633     _mm_storeu_pd(ptrB,t6);
634     _mm_storeu_pd(ptrB+2,t7);
635     _mm_storeu_pd(ptrB+4,t8);
636     _mm_storeu_pd(ptrB+6,t9);
637     _mm_store_sd(ptrB+8,t10);
638 }
639
640
641 static void
642 gmx_mm_decrement_4rvec_2ptr_swizzle_pd(double * gmx_restrict ptrA, double * gmx_restrict ptrB,
643                                        __m128d x1, __m128d y1, __m128d z1,
644                                        __m128d x2, __m128d y2, __m128d z2,
645                                        __m128d x3, __m128d y3, __m128d z3,
646                                        __m128d x4, __m128d y4, __m128d z4) 
647 {
648     __m128d t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
649     __m128d tA,tB,tC,tD,tE,tF,tG,tH,tI,tJ,tK,tL;
650     
651     t1          = _mm_loadu_pd(ptrA);
652     t2          = _mm_loadu_pd(ptrA+2);
653     t3          = _mm_loadu_pd(ptrA+4);
654     t4          = _mm_loadu_pd(ptrA+6);
655     t5          = _mm_loadu_pd(ptrA+8);
656     t6          = _mm_loadu_pd(ptrA+10);
657     t7          = _mm_loadu_pd(ptrB);
658     t8          = _mm_loadu_pd(ptrB+2);
659     t9          = _mm_loadu_pd(ptrB+4);
660     t10         = _mm_loadu_pd(ptrB+6);
661     t11         = _mm_loadu_pd(ptrB+8);
662     t12         = _mm_loadu_pd(ptrB+10);
663     
664     tA          = _mm_unpacklo_pd(x1,y1);
665     tB          = _mm_unpackhi_pd(x1,y1);
666     tC          = _mm_unpacklo_pd(z1,x2);
667     tD          = _mm_unpackhi_pd(z1,x2);
668     tE          = _mm_unpacklo_pd(y2,z2);
669     tF          = _mm_unpackhi_pd(y2,z2);
670     tG          = _mm_unpacklo_pd(x3,y3);
671     tH          = _mm_unpackhi_pd(x3,y3);
672     tI          = _mm_unpacklo_pd(z3,x4);
673     tJ          = _mm_unpackhi_pd(z3,x4);
674     tK          = _mm_unpacklo_pd(y4,z4);
675     tL          = _mm_unpackhi_pd(y4,z4);
676     
677     t1          = _mm_sub_pd(t1,tA);
678     t2          = _mm_sub_pd(t2,tC);
679     t3          = _mm_sub_pd(t3,tE);
680     t4          = _mm_sub_pd(t4,tG);
681     t5          = _mm_sub_pd(t5,tI);
682     t6          = _mm_sub_pd(t6,tK);
683     
684     t7          = _mm_sub_pd(t7,tB);
685     t8          = _mm_sub_pd(t8,tD);
686     t9          = _mm_sub_pd(t9,tF);
687     t10         = _mm_sub_pd(t10,tH);
688     t11         = _mm_sub_pd(t11,tJ);
689     t12         = _mm_sub_pd(t12,tL);
690     
691     _mm_storeu_pd(ptrA,  t1);
692     _mm_storeu_pd(ptrA+2,t2);
693     _mm_storeu_pd(ptrA+4,t3);
694     _mm_storeu_pd(ptrA+6,t4);
695     _mm_storeu_pd(ptrA+8,t5);
696     _mm_storeu_pd(ptrA+10,t6);
697     _mm_storeu_pd(ptrB,  t7);
698     _mm_storeu_pd(ptrB+2,t8);
699     _mm_storeu_pd(ptrB+4,t9);
700     _mm_storeu_pd(ptrB+6,t10);
701     _mm_storeu_pd(ptrB+8,t11);
702     _mm_storeu_pd(ptrB+10,t12);
703 }
704
705
706
707 static gmx_inline void
708 gmx_mm_update_iforce_1atom_swizzle_pd(__m128d fix1, __m128d fiy1, __m128d fiz1,
709                                       double * gmx_restrict fptr,
710                                       double * gmx_restrict fshiftptr)
711 {
712     fix1 = _mm_hadd_pd(fix1,fiy1);
713     fiz1 = _mm_hadd_pd(fiz1,fiz1);
714     
715     _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr), fix1 ));
716     _mm_store_sd( fptr+2, _mm_add_sd( _mm_load_sd(fptr+2), fiz1 ));
717     
718     _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 ));
719     _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 ));
720 }
721
722 static gmx_inline void
723 gmx_mm_update_iforce_3atom_swizzle_pd(__m128d fix1, __m128d fiy1, __m128d fiz1,
724                                       __m128d fix2, __m128d fiy2, __m128d fiz2,
725                                       __m128d fix3, __m128d fiy3, __m128d fiz3,
726                                       double * gmx_restrict fptr,
727                                       double * gmx_restrict fshiftptr)
728 {
729     __m128d t1,t2;
730     
731     fix1 = _mm_hadd_pd(fix1,fiy1);
732     fiz1 = _mm_hadd_pd(fiz1,fix2);
733     fiy2 = _mm_hadd_pd(fiy2,fiz2);
734     fix3 = _mm_hadd_pd(fix3,fiy3);
735     fiz3 = _mm_hadd_pd(fiz3,fiz3);
736     
737     _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr), fix1 ));
738     _mm_storeu_pd( fptr+2, _mm_add_pd( _mm_loadu_pd(fptr+2), fiz1 ));
739     _mm_storeu_pd( fptr+4, _mm_add_pd( _mm_loadu_pd(fptr+4), fiy2 ));
740     _mm_storeu_pd( fptr+6, _mm_add_pd( _mm_loadu_pd(fptr+6), fix3 ));
741     _mm_store_sd( fptr+8, _mm_add_sd( _mm_load_sd(fptr+8), fiz3 ));
742     
743     fix1 = _mm_add_pd(fix1,fix3);
744     t1   = _mm_shuffle_pd(fiz1,fiy2,_MM_SHUFFLE2(0,1));
745     fix1 = _mm_add_pd(fix1,t1); /* x and y sums */
746     
747     t2   = _mm_shuffle_pd(fiy2,fiy2,_MM_SHUFFLE2(1,1));
748     fiz1 = _mm_add_sd(fiz1,fiz3);
749     fiz1 = _mm_add_sd(fiz1,t2); /* z sum */
750     
751     _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 ));
752     _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 ));
753 }
754
755
756 static gmx_inline void
757 gmx_mm_update_iforce_4atom_swizzle_pd(__m128d fix1, __m128d fiy1, __m128d fiz1,
758                                       __m128d fix2, __m128d fiy2, __m128d fiz2,
759                                       __m128d fix3, __m128d fiy3, __m128d fiz3,
760                                       __m128d fix4, __m128d fiy4, __m128d fiz4,
761                                       double * gmx_restrict fptr,
762                                       double * gmx_restrict fshiftptr)
763 {
764     __m128d t1,t2;
765     
766     fix1 = _mm_hadd_pd(fix1,fiy1);
767     fiz1 = _mm_hadd_pd(fiz1,fix2);
768     fiy2 = _mm_hadd_pd(fiy2,fiz2);
769     fix3 = _mm_hadd_pd(fix3,fiy3);
770     fiz3 = _mm_hadd_pd(fiz3,fix4);
771     fiy4 = _mm_hadd_pd(fiy4,fiz4);
772     
773     _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr),       fix1 ));
774     _mm_storeu_pd( fptr+2, _mm_add_pd( _mm_loadu_pd(fptr+2),   fiz1 ));
775     _mm_storeu_pd( fptr+4, _mm_add_pd( _mm_loadu_pd(fptr+4),   fiy2 ));
776     _mm_storeu_pd( fptr+6, _mm_add_pd( _mm_loadu_pd(fptr+6),   fix3 ));
777     _mm_storeu_pd( fptr+8, _mm_add_pd( _mm_loadu_pd(fptr+8),   fiz3 ));
778     _mm_storeu_pd( fptr+10, _mm_add_pd( _mm_loadu_pd(fptr+10), fiy4 ));
779     
780     t1 = _mm_shuffle_pd(fiz1,fiy2,_MM_SHUFFLE2(0,1));
781     fix1 = _mm_add_pd(fix1,t1);
782     t2 = _mm_shuffle_pd(fiz3,fiy4,_MM_SHUFFLE2(0,1));
783     fix3 = _mm_add_pd(fix3,t2);
784     fix1 = _mm_add_pd(fix1,fix3); /* x and y sums */
785     
786     fiz1 = _mm_add_sd(fiz1, _mm_unpackhi_pd(fiy2,fiy2));
787     fiz3 = _mm_add_sd(fiz3, _mm_unpackhi_pd(fiy4,fiy4));
788     fiz1 = _mm_add_sd(fiz1,fiz3); /* z sum */
789     
790     _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 ));
791     _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 ));
792 }
793
794
795
796 static gmx_inline void
797 gmx_mm_update_1pot_pd(__m128d pot1, double * gmx_restrict ptrA)
798 {
799     pot1 = _mm_hadd_pd(pot1,pot1);
800     _mm_store_sd(ptrA,_mm_add_sd(pot1,_mm_load_sd(ptrA)));
801 }
802
803 static gmx_inline void
804 gmx_mm_update_2pot_pd(__m128d pot1, double * gmx_restrict ptrA,
805                       __m128d pot2, double * gmx_restrict ptrB)
806 {
807     pot1 = _mm_hadd_pd(pot1,pot2);
808     pot2 = _mm_unpackhi_pd(pot1,pot1);
809     
810     _mm_store_sd(ptrA,_mm_add_sd(pot1,_mm_load_sd(ptrA)));
811     _mm_store_sd(ptrB,_mm_add_sd(pot2,_mm_load_sd(ptrB)));
812 }
813
814
815 #endif /* _kernelutil_x86_avx_128_fma_double_h_ */