Merge release-4-6 into master
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_sse2_double / kernelutil_x86_sse2_double.h
1 /*
2  *                This source code is part of
3  *
4  *                 G   R   O   M   A   C   S
5  *
6  * Copyright (c) 2011-2012, The GROMACS Development Team
7  *
8  * Gromacs is a library for molecular simulation and trajectory analysis,
9  * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
10  * a full list of developers and information, check out http://www.gromacs.org
11  *
12  * This program is free software; you can redistribute it and/or modify it under
13  * the terms of the GNU Lesser General Public License as published by the Free
14  * Software Foundation; either version 2 of the License, or (at your option) any
15  * later version.
16  * As a special exception, you may use this file as part of a free software
17  * library without restriction.  Specifically, if other files instantiate
18  * templates or use macros or inline functions from this file, or you compile
19  * this file and link it with other files to produce an executable, this
20  * file does not by itself cause the resulting executable to be covered by
21  * the GNU Lesser General Public License.
22  *
23  * In plain-speak: do not worry about classes/macros/templates either - only
24  * changes to the library have to be LGPL, not an application linking with it.
25  *
26  * To help fund GROMACS development, we humbly ask that you cite
27  * the papers people have written on it - you can find them on the website!
28  */
29 #ifndef _kernelutil_x86_sse2_double_h_
30 #define _kernelutil_x86_sse2_double_h_
31
32 #include <math.h>
33
34 #include "gmx_x86_sse2.h"
35
36 #include <stdio.h>
37
38
39 /* Normal sum of four ymm registers */
40 #define gmx_mm_sum4_pd(t0,t1,t2,t3)  _mm_add_pd(_mm_add_pd(t0,t1),_mm_add_pd(t2,t3))
41
42 static int
43 gmx_mm_any_lt(__m128d a, __m128d b)
44 {
45     return _mm_movemask_pd(_mm_cmplt_pd(a,b));
46 }
47
48 static gmx_inline __m128d
49 gmx_mm_calc_rsq_pd(__m128d dx, __m128d dy, __m128d dz)
50 {
51     return _mm_add_pd( _mm_add_pd( _mm_mul_pd(dx,dx), _mm_mul_pd(dy,dy) ), _mm_mul_pd(dz,dz) );
52 }
53
54
55 /* Load a double value from 1-2 places, merge into xmm register */
56 static gmx_inline __m128d
57 gmx_mm_load_2real_swizzle_pd(const double * gmx_restrict ptrA,
58                              const double * gmx_restrict ptrB)
59 {
60     return _mm_unpacklo_pd(_mm_load_sd(ptrA),_mm_load_sd(ptrB));
61 }
62
63 static gmx_inline __m128d
64 gmx_mm_load_1real_pd(const double * gmx_restrict ptrA)
65 {
66     return _mm_load_sd(ptrA);
67 }
68
69
70 static gmx_inline void
71 gmx_mm_store_2real_swizzle_pd(double * gmx_restrict ptrA,
72                               double * gmx_restrict ptrB,
73                               __m128d xmm1)
74 {
75     __m128d t2;
76
77     t2       = _mm_unpackhi_pd(xmm1,xmm1);
78     _mm_store_sd(ptrA,xmm1);
79     _mm_store_sd(ptrB,t2);
80 }
81
82 static gmx_inline void
83 gmx_mm_store_1real_pd(double * gmx_restrict ptrA, __m128d xmm1)
84 {
85     _mm_store_sd(ptrA,xmm1);
86 }
87
88
89 /* Similar to store, but increments value in memory */
90 static gmx_inline void
91 gmx_mm_increment_2real_swizzle_pd(double * gmx_restrict ptrA,
92                                   double * gmx_restrict ptrB, __m128d xmm1)
93 {
94     __m128d t1;
95
96     t1   = _mm_unpackhi_pd(xmm1,xmm1);
97     xmm1 = _mm_add_sd(xmm1,_mm_load_sd(ptrA));
98     t1   = _mm_add_sd(t1,_mm_load_sd(ptrB));
99     _mm_store_sd(ptrA,xmm1);
100     _mm_store_sd(ptrB,t1);
101 }
102
103 static gmx_inline void
104 gmx_mm_increment_1real_pd(double * gmx_restrict ptrA, __m128d xmm1)
105 {
106     __m128d tmp;
107
108     tmp = gmx_mm_load_1real_pd(ptrA);
109     tmp = _mm_add_sd(tmp,xmm1);
110     gmx_mm_store_1real_pd(ptrA,tmp);
111 }
112
113
114 static gmx_inline void
115 gmx_mm_load_2pair_swizzle_pd(const double * gmx_restrict p1,
116                              const double * gmx_restrict p2,
117                              __m128d * gmx_restrict c6,
118                              __m128d * gmx_restrict c12)
119 {
120     __m128d t1,t2,t3;
121
122     t1   = _mm_loadu_pd(p1);
123     t2   = _mm_loadu_pd(p2);
124     *c6  = _mm_unpacklo_pd(t1,t2);
125     *c12 = _mm_unpackhi_pd(t1,t2);
126 }
127
128 static gmx_inline void
129 gmx_mm_load_1pair_swizzle_pd(const double * gmx_restrict p1,
130                              __m128d * gmx_restrict c6,
131                              __m128d * gmx_restrict c12)
132 {
133     *c6     = _mm_load_sd(p1);
134     *c12    = _mm_load_sd(p1+1);
135 }
136
137
138
139 static gmx_inline void
140 gmx_mm_load_shift_and_1rvec_broadcast_pd(const double * gmx_restrict xyz_shift,
141                                          const double * gmx_restrict xyz,
142                                          __m128d * gmx_restrict x1,
143                                          __m128d * gmx_restrict y1,
144                                          __m128d * gmx_restrict z1)
145 {
146     __m128d mem_xy,mem_z,mem_sxy,mem_sz;
147
148     mem_xy  = _mm_loadu_pd(xyz);
149     mem_z   = _mm_load_sd(xyz+2);
150     mem_sxy = _mm_loadu_pd(xyz_shift);
151     mem_sz  = _mm_load_sd(xyz_shift+2);
152
153     mem_xy  = _mm_add_pd(mem_xy,mem_sxy);
154     mem_z   = _mm_add_pd(mem_z,mem_sz);
155
156     *x1  = _mm_shuffle_pd(mem_xy,mem_xy,_MM_SHUFFLE2(0,0));
157     *y1  = _mm_shuffle_pd(mem_xy,mem_xy,_MM_SHUFFLE2(1,1));
158     *z1  = _mm_shuffle_pd(mem_z,mem_z,_MM_SHUFFLE2(0,0));
159 }
160
161
162 static gmx_inline void
163 gmx_mm_load_shift_and_3rvec_broadcast_pd(const double * gmx_restrict xyz_shift,
164                                          const double * gmx_restrict xyz,
165                                          __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
166                                          __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
167                                          __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3)
168 {
169     __m128d t1,t2,t3,t4,t5,sxy,sz,szx,syz;
170
171     t1  = _mm_loadu_pd(xyz);
172     t2  = _mm_loadu_pd(xyz+2);
173     t3  = _mm_loadu_pd(xyz+4);
174     t4  = _mm_loadu_pd(xyz+6);
175     t5  = _mm_load_sd(xyz+8);
176
177     sxy = _mm_loadu_pd(xyz_shift);
178     sz  = _mm_load_sd(xyz_shift+2);
179     szx = _mm_shuffle_pd(sz,sxy,_MM_SHUFFLE2(0,0));
180     syz = _mm_shuffle_pd(sxy,sz,_MM_SHUFFLE2(0,1));
181
182     t1  = _mm_add_pd(t1,sxy);
183     t2  = _mm_add_pd(t2,szx);
184     t3  = _mm_add_pd(t3,syz);
185     t4  = _mm_add_pd(t4,sxy);
186     t5  = _mm_add_sd(t5,sz);
187
188     *x1  = _mm_shuffle_pd(t1,t1,_MM_SHUFFLE2(0,0));
189     *y1  = _mm_shuffle_pd(t1,t1,_MM_SHUFFLE2(1,1));
190     *z1  = _mm_shuffle_pd(t2,t2,_MM_SHUFFLE2(0,0));
191     *x2  = _mm_shuffle_pd(t2,t2,_MM_SHUFFLE2(1,1));
192     *y2  = _mm_shuffle_pd(t3,t3,_MM_SHUFFLE2(0,0));
193     *z2  = _mm_shuffle_pd(t3,t3,_MM_SHUFFLE2(1,1));
194     *x3  = _mm_shuffle_pd(t4,t4,_MM_SHUFFLE2(0,0));
195     *y3  = _mm_shuffle_pd(t4,t4,_MM_SHUFFLE2(1,1));
196     *z3  = _mm_shuffle_pd(t5,t5,_MM_SHUFFLE2(0,0));
197 }
198
199
200 static gmx_inline void
201 gmx_mm_load_shift_and_4rvec_broadcast_pd(const double * gmx_restrict xyz_shift,
202                                          const double * gmx_restrict xyz,
203                                          __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
204                                          __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
205                                          __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3,
206                                          __m128d * gmx_restrict x4, __m128d * gmx_restrict y4, __m128d * gmx_restrict z4)
207 {
208     __m128d t1,t2,t3,t4,t5,t6,sxy,sz,szx,syz;
209
210     t1  = _mm_loadu_pd(xyz);
211     t2  = _mm_loadu_pd(xyz+2);
212     t3  = _mm_loadu_pd(xyz+4);
213     t4  = _mm_loadu_pd(xyz+6);
214     t5  = _mm_loadu_pd(xyz+8);
215     t6  = _mm_loadu_pd(xyz+10);
216
217     sxy = _mm_loadu_pd(xyz_shift);
218     sz  = _mm_load_sd(xyz_shift+2);
219     szx = _mm_shuffle_pd(sz,sxy,_MM_SHUFFLE2(0,0));
220     syz = _mm_shuffle_pd(sxy,sz,_MM_SHUFFLE2(0,1));
221
222     t1  = _mm_add_pd(t1,sxy);
223     t2  = _mm_add_pd(t2,szx);
224     t3  = _mm_add_pd(t3,syz);
225     t4  = _mm_add_pd(t4,sxy);
226     t5  = _mm_add_pd(t5,szx);
227     t6  = _mm_add_pd(t6,syz);
228
229     *x1  = _mm_shuffle_pd(t1,t1,_MM_SHUFFLE2(0,0));
230     *y1  = _mm_shuffle_pd(t1,t1,_MM_SHUFFLE2(1,1));
231     *z1  = _mm_shuffle_pd(t2,t2,_MM_SHUFFLE2(0,0));
232     *x2  = _mm_shuffle_pd(t2,t2,_MM_SHUFFLE2(1,1));
233     *y2  = _mm_shuffle_pd(t3,t3,_MM_SHUFFLE2(0,0));
234     *z2  = _mm_shuffle_pd(t3,t3,_MM_SHUFFLE2(1,1));
235     *x3  = _mm_shuffle_pd(t4,t4,_MM_SHUFFLE2(0,0));
236     *y3  = _mm_shuffle_pd(t4,t4,_MM_SHUFFLE2(1,1));
237     *z3  = _mm_shuffle_pd(t5,t5,_MM_SHUFFLE2(0,0));
238     *x4  = _mm_shuffle_pd(t5,t5,_MM_SHUFFLE2(1,1));
239     *y4  = _mm_shuffle_pd(t6,t6,_MM_SHUFFLE2(0,0));
240     *z4  = _mm_shuffle_pd(t6,t6,_MM_SHUFFLE2(1,1));
241 }
242
243
244
245
246 static gmx_inline void
247 gmx_mm_load_1rvec_1ptr_swizzle_pd(const double * gmx_restrict p1,
248                                   __m128d * gmx_restrict x, __m128d * gmx_restrict y, __m128d * gmx_restrict z)
249 {
250          *x            = _mm_load_sd(p1);
251      *y            = _mm_load_sd(p1+1);
252      *z            = _mm_load_sd(p1+2);
253 }
254
255 static gmx_inline void
256 gmx_mm_load_3rvec_1ptr_swizzle_pd(const double * gmx_restrict p1,
257                                   __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
258                                   __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
259                                   __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3)
260 {
261          *x1            = _mm_load_sd(p1);
262      *y1            = _mm_load_sd(p1+1);
263      *z1            = _mm_load_sd(p1+2);
264          *x2            = _mm_load_sd(p1+3);
265      *y2            = _mm_load_sd(p1+4);
266      *z2            = _mm_load_sd(p1+5);
267          *x3            = _mm_load_sd(p1+6);
268      *y3            = _mm_load_sd(p1+7);
269      *z3            = _mm_load_sd(p1+8);
270 }
271
272 static gmx_inline void
273 gmx_mm_load_4rvec_1ptr_swizzle_pd(const double * gmx_restrict p1,
274                                   __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
275                                   __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
276                                   __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3,
277                                   __m128d * gmx_restrict x4, __m128d * gmx_restrict y4, __m128d * gmx_restrict z4)
278 {
279     *x1            = _mm_load_sd(p1);
280     *y1            = _mm_load_sd(p1+1);
281     *z1            = _mm_load_sd(p1+2);
282     *x2            = _mm_load_sd(p1+3);
283     *y2            = _mm_load_sd(p1+4);
284     *z2            = _mm_load_sd(p1+5);
285     *x3            = _mm_load_sd(p1+6);
286     *y3            = _mm_load_sd(p1+7);
287     *z3            = _mm_load_sd(p1+8);
288     *x4            = _mm_load_sd(p1+9);
289     *y4            = _mm_load_sd(p1+10);
290     *z4            = _mm_load_sd(p1+11);
291 }
292
293
294 static gmx_inline void
295 gmx_mm_load_1rvec_2ptr_swizzle_pd(const double * gmx_restrict ptrA,
296                                   const double * gmx_restrict ptrB,
297                                   __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1)
298 {
299     __m128d t1,t2,t3,t4;
300     t1           = _mm_loadu_pd(ptrA);
301     t2           = _mm_loadu_pd(ptrB);
302     t3           = _mm_load_sd(ptrA+2);
303     t4           = _mm_load_sd(ptrB+2);
304     GMX_MM_TRANSPOSE2_PD(t1,t2);
305     *x1          = t1;
306     *y1          = t2;
307     *z1          = _mm_unpacklo_pd(t3,t4);
308 }
309
310
311 static gmx_inline void
312 gmx_mm_load_3rvec_2ptr_swizzle_pd(const double * gmx_restrict ptrA, const double * gmx_restrict ptrB,
313                                   __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
314                                   __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
315                                   __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3)
316 {
317     __m128d t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
318     t1           = _mm_loadu_pd(ptrA);
319     t2           = _mm_loadu_pd(ptrB);
320     t3           = _mm_loadu_pd(ptrA+2);
321     t4           = _mm_loadu_pd(ptrB+2);
322     t5           = _mm_loadu_pd(ptrA+4);
323     t6           = _mm_loadu_pd(ptrB+4);
324     t7           = _mm_loadu_pd(ptrA+6);
325     t8           = _mm_loadu_pd(ptrB+6);
326     t9           = _mm_load_sd(ptrA+8);
327     t10          = _mm_load_sd(ptrB+8);
328     GMX_MM_TRANSPOSE2_PD(t1,t2);
329     GMX_MM_TRANSPOSE2_PD(t3,t4);
330     GMX_MM_TRANSPOSE2_PD(t5,t6);
331     GMX_MM_TRANSPOSE2_PD(t7,t8);
332     *x1          = t1;
333     *y1          = t2;
334     *z1          = t3;
335     *x2          = t4;
336     *y2          = t5;
337     *z2          = t6;
338     *x3          = t7;
339     *y3          = t8;
340     *z3          = _mm_unpacklo_pd(t9,t10);
341 }
342
343
344 static gmx_inline void
345 gmx_mm_load_4rvec_2ptr_swizzle_pd(const double * gmx_restrict ptrA, const double * gmx_restrict ptrB,
346                                   __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
347                                   __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
348                                   __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3,
349                                   __m128d * gmx_restrict x4, __m128d * gmx_restrict y4, __m128d * gmx_restrict z4)
350 {
351     __m128d t1,t2,t3,t4,t5,t6;
352     t1           = _mm_loadu_pd(ptrA);
353     t2           = _mm_loadu_pd(ptrB);
354     t3           = _mm_loadu_pd(ptrA+2);
355     t4           = _mm_loadu_pd(ptrB+2);
356     t5           = _mm_loadu_pd(ptrA+4);
357     t6           = _mm_loadu_pd(ptrB+4);
358     GMX_MM_TRANSPOSE2_PD(t1,t2);
359     GMX_MM_TRANSPOSE2_PD(t3,t4);
360     GMX_MM_TRANSPOSE2_PD(t5,t6);
361     *x1          = t1;
362     *y1          = t2;
363     *z1          = t3;
364     *x2          = t4;
365     *y2          = t5;
366     *z2          = t6;
367     t1           = _mm_loadu_pd(ptrA+6);
368     t2           = _mm_loadu_pd(ptrB+6);
369     t3           = _mm_loadu_pd(ptrA+8);
370     t4           = _mm_loadu_pd(ptrB+8);
371     t5           = _mm_loadu_pd(ptrA+10);
372     t6           = _mm_loadu_pd(ptrB+10);
373     GMX_MM_TRANSPOSE2_PD(t1,t2);
374     GMX_MM_TRANSPOSE2_PD(t3,t4);
375     GMX_MM_TRANSPOSE2_PD(t5,t6);
376     *x3          = t1;
377     *y3          = t2;
378     *z3          = t3;
379     *x4          = t4;
380     *y4          = t5;
381     *z4          = t6;
382 }
383
384
385 /* Routines to decrement rvec in memory, typically use for j particle force updates */
386 static gmx_inline void
387 gmx_mm_decrement_1rvec_1ptr_noswizzle_pd(double * gmx_restrict ptrA,
388                                          __m128d xy, __m128d z)
389 {
390     __m128d t1,t2;
391
392     t1 = _mm_loadu_pd(ptrA);
393     t2 = _mm_load_sd(ptrA+2);
394
395     t1 = _mm_sub_pd(t1,xy);
396     t2 = _mm_sub_sd(t2,z);
397
398     _mm_storeu_pd(ptrA,t1);
399     _mm_store_sd(ptrA+2,t2);
400 }
401
402 static gmx_inline void
403 gmx_mm_decrement_3rvec_1ptr_noswizzle_pd(double * gmx_restrict ptrA,
404                                          __m128d xy1, __m128d z1,
405                                          __m128d xy2, __m128d z2,
406                                          __m128d xy3, __m128d z3)
407 {
408     __m128d t1,t2;
409     __m128d tA,tB,tC,tD,tE;
410
411     tA   = _mm_loadu_pd(ptrA);
412     tB   = _mm_loadu_pd(ptrA+2);
413     tC   = _mm_loadu_pd(ptrA+4);
414     tD   = _mm_loadu_pd(ptrA+6);
415     tE   = _mm_load_sd(ptrA+8);
416
417     /* xy1: y1 x1 */
418     t1   = _mm_shuffle_pd(z1,xy2,_MM_SHUFFLE2(0,1)); /* x2 z1 */
419     t2   = _mm_shuffle_pd(xy2,z2,_MM_SHUFFLE2(0,1)); /* z2 y2 */
420     /* xy3: y3 x3 */
421
422     tA   = _mm_sub_pd(tA,xy1);
423     tB   = _mm_sub_pd(tB,t1);
424     tC   = _mm_sub_pd(tC,t2);
425     tD   = _mm_sub_pd(tD,xy3);
426     tE   = _mm_sub_sd(tE,z3);
427
428     _mm_storeu_pd(ptrA,tA);
429     _mm_storeu_pd(ptrA+2,tB);
430     _mm_storeu_pd(ptrA+4,tC);
431     _mm_storeu_pd(ptrA+6,tD);
432     _mm_store_sd(ptrA+8,tE);
433 }
434
435 static gmx_inline void
436 gmx_mm_decrement_4rvec_1ptr_noswizzle_pd(double * gmx_restrict ptrA,
437                                          __m128d xy1, __m128d z1,
438                                          __m128d xy2, __m128d z2,
439                                          __m128d xy3, __m128d z3,
440                                          __m128d xy4, __m128d z4)
441 {
442     __m128d t1,t2,t3,t4;
443     __m128d tA,tB,tC,tD,tE,tF;
444
445     tA   = _mm_loadu_pd(ptrA);
446     tB   = _mm_loadu_pd(ptrA+2);
447     tC   = _mm_loadu_pd(ptrA+4);
448     tD   = _mm_loadu_pd(ptrA+6);
449     tE   = _mm_loadu_pd(ptrA+8);
450     tF   = _mm_loadu_pd(ptrA+10);
451
452     /* xy1: y1 x1 */
453     t1   = _mm_shuffle_pd(z1,xy2,_MM_SHUFFLE2(0,0)); /* x2 z1 */
454     t2   = _mm_shuffle_pd(xy2,z2,_MM_SHUFFLE2(0,1)); /* z2 y2 */
455     /* xy3: y3 x3 */
456     t3   = _mm_shuffle_pd(z3,xy4,_MM_SHUFFLE2(0,0)); /* x4 z3 */
457     t4   = _mm_shuffle_pd(xy4,z4,_MM_SHUFFLE2(0,1)); /* z4 y4 */
458
459     tA   = _mm_sub_pd(tA,xy1);
460     tB   = _mm_sub_pd(tB,t1);
461     tC   = _mm_sub_pd(tC,t2);
462     tD   = _mm_sub_pd(tD,xy3);
463     tE   = _mm_sub_pd(tE,t3);
464     tF   = _mm_sub_pd(tF,t4);
465
466     _mm_storeu_pd(ptrA,tA);
467     _mm_storeu_pd(ptrA+2,tB);
468     _mm_storeu_pd(ptrA+4,tC);
469     _mm_storeu_pd(ptrA+6,tD);
470     _mm_storeu_pd(ptrA+8,tE);
471     _mm_storeu_pd(ptrA+10,tF);
472 }
473
474 static gmx_inline void
475 gmx_mm_decrement_1rvec_1ptr_swizzle_pd(double * gmx_restrict ptrA,
476                                        __m128d x1, __m128d y1, __m128d z1)
477 {
478     __m128d t1,t2,t3;
479
480     t1           = _mm_load_sd(ptrA);
481     t2           = _mm_load_sd(ptrA+1);
482     t3           = _mm_load_sd(ptrA+2);
483
484     t1           = _mm_sub_sd(t1,x1);
485     t2           = _mm_sub_sd(t2,y1);
486     t3           = _mm_sub_sd(t3,z1);
487     _mm_store_sd(ptrA,t1);
488     _mm_store_sd(ptrA+1,t2);
489     _mm_store_sd(ptrA+2,t3);
490 }
491
492
493 static gmx_inline void
494 gmx_mm_decrement_3rvec_1ptr_swizzle_pd(double * gmx_restrict ptrA,
495                                        __m128d x1, __m128d y1, __m128d z1,
496                                        __m128d x2, __m128d y2, __m128d z2,
497                                        __m128d x3, __m128d y3, __m128d z3)
498 {
499     __m128d t1,t2,t3,t4,t5;
500
501     t1          = _mm_loadu_pd(ptrA);
502     t2          = _mm_loadu_pd(ptrA+2);
503     t3          = _mm_loadu_pd(ptrA+4);
504     t4          = _mm_loadu_pd(ptrA+6);
505     t5          = _mm_load_sd(ptrA+8);
506
507     x1          = _mm_unpacklo_pd(x1,y1);
508     z1          = _mm_unpacklo_pd(z1,x2);
509     y2          = _mm_unpacklo_pd(y2,z2);
510     x3          = _mm_unpacklo_pd(x3,y3);
511     /* nothing to be done for z3 */
512
513     t1          = _mm_sub_pd(t1,x1);
514     t2          = _mm_sub_pd(t2,z1);
515     t3          = _mm_sub_pd(t3,y2);
516     t4          = _mm_sub_pd(t4,x3);
517     t5          = _mm_sub_sd(t5,z3);
518     _mm_storeu_pd(ptrA,t1);
519     _mm_storeu_pd(ptrA+2,t2);
520     _mm_storeu_pd(ptrA+4,t3);
521     _mm_storeu_pd(ptrA+6,t4);
522     _mm_store_sd(ptrA+8,t5);
523 }
524
525
526 static gmx_inline void
527 gmx_mm_decrement_4rvec_1ptr_swizzle_pd(double * gmx_restrict ptrA,
528                                        __m128d x1, __m128d y1, __m128d z1,
529                                        __m128d x2, __m128d y2, __m128d z2,
530                                        __m128d x3, __m128d y3, __m128d z3,
531                                        __m128d x4, __m128d y4, __m128d z4)
532 {
533     __m128d t1,t2,t3,t4,t5,t6;
534
535     t1          = _mm_loadu_pd(ptrA);
536     t2          = _mm_loadu_pd(ptrA+2);
537     t3          = _mm_loadu_pd(ptrA+4);
538     t4          = _mm_loadu_pd(ptrA+6);
539     t5          = _mm_loadu_pd(ptrA+8);
540     t6          = _mm_loadu_pd(ptrA+10);
541
542     x1          = _mm_unpacklo_pd(x1,y1);
543     z1          = _mm_unpacklo_pd(z1,x2);
544     y2          = _mm_unpacklo_pd(y2,z2);
545     x3          = _mm_unpacklo_pd(x3,y3);
546     z3          = _mm_unpacklo_pd(z3,x4);
547     y4          = _mm_unpacklo_pd(y4,z4);
548
549     _mm_storeu_pd(ptrA,    _mm_sub_pd( t1,x1 ));
550     _mm_storeu_pd(ptrA+2,  _mm_sub_pd( t2,z1 ));
551     _mm_storeu_pd(ptrA+4,  _mm_sub_pd( t3,y2 ));
552     _mm_storeu_pd(ptrA+6,  _mm_sub_pd( t4,x3 ));
553     _mm_storeu_pd(ptrA+8,  _mm_sub_pd( t5,z3 ));
554     _mm_storeu_pd(ptrA+10, _mm_sub_pd( t6,y4 ));
555 }
556
557 static gmx_inline void
558 gmx_mm_decrement_1rvec_2ptr_swizzle_pd(double * gmx_restrict ptrA, double * gmx_restrict ptrB,
559                                        __m128d x1, __m128d y1, __m128d z1)
560 {
561     __m128d t1,t2,t3,t4,t5,t6,t7;
562
563     t1          = _mm_loadu_pd(ptrA);
564     t2          = _mm_load_sd(ptrA+2);
565     t3          = _mm_loadu_pd(ptrB);
566     t4          = _mm_load_sd(ptrB+2);
567
568     t5          = _mm_unpacklo_pd(x1,y1);
569     t6          = _mm_unpackhi_pd(x1,y1);
570     t7          = _mm_unpackhi_pd(z1,z1);
571
572     t1          = _mm_sub_pd(t1,t5);
573     t2          = _mm_sub_sd(t2,z1);
574
575     t3          = _mm_sub_pd(t3,t6);
576     t4          = _mm_sub_sd(t4,t7);
577
578     _mm_storeu_pd(ptrA,t1);
579     _mm_store_sd(ptrA+2,t2);
580     _mm_storeu_pd(ptrB,t3);
581     _mm_store_sd(ptrB+2,t4);
582 }
583
584 static gmx_inline void
585 gmx_mm_decrement_3rvec_2ptr_swizzle_pd(double * gmx_restrict ptrA, double * gmx_restrict ptrB,
586                                        __m128d x1, __m128d y1, __m128d z1,
587                                        __m128d x2, __m128d y2, __m128d z2,
588                                        __m128d x3, __m128d y3, __m128d z3)
589 {
590     __m128d t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
591     __m128d tA,tB,tC,tD,tE,tF,tG,tH,tI;
592
593     t1          = _mm_loadu_pd(ptrA);
594     t2          = _mm_loadu_pd(ptrA+2);
595     t3          = _mm_loadu_pd(ptrA+4);
596     t4          = _mm_loadu_pd(ptrA+6);
597     t5          = _mm_load_sd(ptrA+8);
598     t6          = _mm_loadu_pd(ptrB);
599     t7          = _mm_loadu_pd(ptrB+2);
600     t8          = _mm_loadu_pd(ptrB+4);
601     t9          = _mm_loadu_pd(ptrB+6);
602     t10         = _mm_load_sd(ptrB+8);
603
604     tA          = _mm_unpacklo_pd(x1,y1);
605     tB          = _mm_unpackhi_pd(x1,y1);
606     tC          = _mm_unpacklo_pd(z1,x2);
607     tD          = _mm_unpackhi_pd(z1,x2);
608     tE          = _mm_unpacklo_pd(y2,z2);
609     tF          = _mm_unpackhi_pd(y2,z2);
610     tG          = _mm_unpacklo_pd(x3,y3);
611     tH          = _mm_unpackhi_pd(x3,y3);
612     tI          = _mm_unpackhi_pd(z3,z3);
613
614     t1          = _mm_sub_pd(t1,tA);
615     t2          = _mm_sub_pd(t2,tC);
616     t3          = _mm_sub_pd(t3,tE);
617     t4          = _mm_sub_pd(t4,tG);
618     t5          = _mm_sub_sd(t5,z3);
619
620     t6          = _mm_sub_pd(t6,tB);
621     t7          = _mm_sub_pd(t7,tD);
622     t8          = _mm_sub_pd(t8,tF);
623     t9          = _mm_sub_pd(t9,tH);
624     t10         = _mm_sub_sd(t10,tI);
625
626     _mm_storeu_pd(ptrA,t1);
627     _mm_storeu_pd(ptrA+2,t2);
628     _mm_storeu_pd(ptrA+4,t3);
629     _mm_storeu_pd(ptrA+6,t4);
630     _mm_store_sd(ptrA+8,t5);
631     _mm_storeu_pd(ptrB,t6);
632     _mm_storeu_pd(ptrB+2,t7);
633     _mm_storeu_pd(ptrB+4,t8);
634     _mm_storeu_pd(ptrB+6,t9);
635     _mm_store_sd(ptrB+8,t10);
636 }
637
638
639 static gmx_inline void
640 gmx_mm_decrement_4rvec_2ptr_swizzle_pd(double * gmx_restrict ptrA, double * gmx_restrict ptrB,
641                                        __m128d x1, __m128d y1, __m128d z1,
642                                        __m128d x2, __m128d y2, __m128d z2,
643                                        __m128d x3, __m128d y3, __m128d z3,
644                                        __m128d x4, __m128d y4, __m128d z4)
645 {
646     __m128d t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
647     __m128d tA,tB,tC,tD,tE,tF,tG,tH,tI,tJ,tK,tL;
648
649     t1          = _mm_loadu_pd(ptrA);
650     t2          = _mm_loadu_pd(ptrA+2);
651     t3          = _mm_loadu_pd(ptrA+4);
652     t4          = _mm_loadu_pd(ptrA+6);
653     t5          = _mm_loadu_pd(ptrA+8);
654     t6          = _mm_loadu_pd(ptrA+10);
655     t7          = _mm_loadu_pd(ptrB);
656     t8          = _mm_loadu_pd(ptrB+2);
657     t9          = _mm_loadu_pd(ptrB+4);
658     t10         = _mm_loadu_pd(ptrB+6);
659     t11         = _mm_loadu_pd(ptrB+8);
660     t12         = _mm_loadu_pd(ptrB+10);
661
662     tA          = _mm_unpacklo_pd(x1,y1);
663     tB          = _mm_unpackhi_pd(x1,y1);
664     tC          = _mm_unpacklo_pd(z1,x2);
665     tD          = _mm_unpackhi_pd(z1,x2);
666     tE          = _mm_unpacklo_pd(y2,z2);
667     tF          = _mm_unpackhi_pd(y2,z2);
668     tG          = _mm_unpacklo_pd(x3,y3);
669     tH          = _mm_unpackhi_pd(x3,y3);
670     tI          = _mm_unpacklo_pd(z3,x4);
671     tJ          = _mm_unpackhi_pd(z3,x4);
672     tK          = _mm_unpacklo_pd(y4,z4);
673     tL          = _mm_unpackhi_pd(y4,z4);
674
675     t1          = _mm_sub_pd(t1,tA);
676     t2          = _mm_sub_pd(t2,tC);
677     t3          = _mm_sub_pd(t3,tE);
678     t4          = _mm_sub_pd(t4,tG);
679     t5          = _mm_sub_pd(t5,tI);
680     t6          = _mm_sub_pd(t6,tK);
681
682     t7          = _mm_sub_pd(t7,tB);
683     t8          = _mm_sub_pd(t8,tD);
684     t9          = _mm_sub_pd(t9,tF);
685     t10         = _mm_sub_pd(t10,tH);
686     t11         = _mm_sub_pd(t11,tJ);
687     t12         = _mm_sub_pd(t12,tL);
688
689     _mm_storeu_pd(ptrA,  t1);
690     _mm_storeu_pd(ptrA+2,t2);
691     _mm_storeu_pd(ptrA+4,t3);
692     _mm_storeu_pd(ptrA+6,t4);
693     _mm_storeu_pd(ptrA+8,t5);
694     _mm_storeu_pd(ptrA+10,t6);
695     _mm_storeu_pd(ptrB,  t7);
696     _mm_storeu_pd(ptrB+2,t8);
697     _mm_storeu_pd(ptrB+4,t9);
698     _mm_storeu_pd(ptrB+6,t10);
699     _mm_storeu_pd(ptrB+8,t11);
700     _mm_storeu_pd(ptrB+10,t12);
701 }
702
703
704
705
706
707 static gmx_inline void
708 gmx_mm_update_iforce_1atom_swizzle_pd(__m128d fix1, __m128d fiy1, __m128d fiz1,
709                                       double * gmx_restrict fptr,
710                                       double * gmx_restrict fshiftptr)
711 {
712     __m128d t1,t2,t3;
713
714     /* transpose data */
715     t1 = fix1;
716     fix1 = _mm_unpacklo_pd(fix1,fiy1); /* y0 x0 */
717     fiy1 = _mm_unpackhi_pd(t1,fiy1);   /* y1 x1 */
718
719     fix1 = _mm_add_pd(fix1,fiy1);
720     fiz1 = _mm_add_sd( fiz1, _mm_unpackhi_pd(fiz1,fiz1 ));
721
722     _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr), fix1 ));
723     _mm_store_sd( fptr+2, _mm_add_sd( _mm_load_sd(fptr+2), fiz1 ));
724
725     _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 ));
726     _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 ));
727 }
728
729 static gmx_inline void
730 gmx_mm_update_iforce_3atom_swizzle_pd(__m128d fix1, __m128d fiy1, __m128d fiz1,
731                                       __m128d fix2, __m128d fiy2, __m128d fiz2,
732                                       __m128d fix3, __m128d fiy3, __m128d fiz3,
733                                       double * gmx_restrict fptr,
734                                       double * gmx_restrict fshiftptr)
735 {
736     __m128d t1,t2;
737
738     /* transpose data */
739     GMX_MM_TRANSPOSE2_PD(fix1,fiy1);
740     GMX_MM_TRANSPOSE2_PD(fiz1,fix2);
741     GMX_MM_TRANSPOSE2_PD(fiy2,fiz2);
742     t1 = fix3;
743     fix3 = _mm_unpacklo_pd(fix3,fiy3); /* y0 x0 */
744     fiy3 = _mm_unpackhi_pd(t1,fiy3);   /* y1 x1 */
745
746     fix1 = _mm_add_pd(fix1,fiy1);
747     fiz1 = _mm_add_pd(fiz1,fix2);
748     fiy2 = _mm_add_pd(fiy2,fiz2);
749
750     fix3 = _mm_add_pd(fix3,fiy3);
751     fiz3 = _mm_add_sd( fiz3, _mm_unpackhi_pd(fiz3,fiz3));
752
753     _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr), fix1 ));
754     _mm_storeu_pd( fptr+2, _mm_add_pd( _mm_loadu_pd(fptr+2), fiz1 ));
755     _mm_storeu_pd( fptr+4, _mm_add_pd( _mm_loadu_pd(fptr+4), fiy2 ));
756     _mm_storeu_pd( fptr+6, _mm_add_pd( _mm_loadu_pd(fptr+6), fix3 ));
757     _mm_store_sd( fptr+8, _mm_add_sd( _mm_load_sd(fptr+8), fiz3 ));
758
759     fix1 = _mm_add_pd(fix1,fix3);
760     t1   = _mm_shuffle_pd(fiz1,fiy2,_MM_SHUFFLE2(0,1));
761     fix1 = _mm_add_pd(fix1,t1); /* x and y sums */
762
763     t2   = _mm_shuffle_pd(fiy2,fiy2,_MM_SHUFFLE2(1,1));
764     fiz1 = _mm_add_sd(fiz1,fiz3);
765     fiz1 = _mm_add_sd(fiz1,t2); /* z sum */
766
767     _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 ));
768     _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 ));
769 }
770
771
772 static gmx_inline void
773 gmx_mm_update_iforce_4atom_swizzle_pd(__m128d fix1, __m128d fiy1, __m128d fiz1,
774                                       __m128d fix2, __m128d fiy2, __m128d fiz2,
775                                       __m128d fix3, __m128d fiy3, __m128d fiz3,
776                                       __m128d fix4, __m128d fiy4, __m128d fiz4,
777                                       double * gmx_restrict fptr,
778                                       double * gmx_restrict fshiftptr)
779 {
780     __m128d t1,t2;
781
782     /* transpose data */
783     GMX_MM_TRANSPOSE2_PD(fix1,fiy1);
784     GMX_MM_TRANSPOSE2_PD(fiz1,fix2);
785     GMX_MM_TRANSPOSE2_PD(fiy2,fiz2);
786     GMX_MM_TRANSPOSE2_PD(fix3,fiy3);
787     GMX_MM_TRANSPOSE2_PD(fiz3,fix4);
788     GMX_MM_TRANSPOSE2_PD(fiy4,fiz4);
789
790     fix1 = _mm_add_pd(fix1,fiy1);
791     fiz1 = _mm_add_pd(fiz1,fix2);
792     fiy2 = _mm_add_pd(fiy2,fiz2);
793     fix3 = _mm_add_pd(fix3,fiy3);
794     fiz3 = _mm_add_pd(fiz3,fix4);
795     fiy4 = _mm_add_pd(fiy4,fiz4);
796     
797     _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr),       fix1 ));
798     _mm_storeu_pd( fptr+2, _mm_add_pd( _mm_loadu_pd(fptr+2),   fiz1 ));
799     _mm_storeu_pd( fptr+4, _mm_add_pd( _mm_loadu_pd(fptr+4),   fiy2 ));
800     _mm_storeu_pd( fptr+6, _mm_add_pd( _mm_loadu_pd(fptr+6),   fix3 ));
801     _mm_storeu_pd( fptr+8, _mm_add_pd( _mm_loadu_pd(fptr+8),   fiz3 ));
802     _mm_storeu_pd( fptr+10, _mm_add_pd( _mm_loadu_pd(fptr+10), fiy4 ));
803
804     t1 = _mm_shuffle_pd(fiz1,fiy2,_MM_SHUFFLE2(0,1));
805     fix1 = _mm_add_pd(fix1,t1);
806     t2 = _mm_shuffle_pd(fiz3,fiy4,_MM_SHUFFLE2(0,1));
807     fix3 = _mm_add_pd(fix3,t2);
808     fix1 = _mm_add_pd(fix1,fix3); /* x and y sums */
809
810     fiz1 = _mm_add_sd(fiz1, _mm_unpackhi_pd(fiy2,fiy2));
811     fiz3 = _mm_add_sd(fiz3, _mm_unpackhi_pd(fiy4,fiy4));
812     fiz1 = _mm_add_sd(fiz1,fiz3); /* z sum */
813
814     _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 ));
815     _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 ));
816 }
817
818
819
820 static gmx_inline void
821 gmx_mm_update_1pot_pd(__m128d pot1, double * gmx_restrict ptrA)
822 {
823     pot1 = _mm_add_pd(pot1, _mm_unpackhi_pd(pot1,pot1));
824     _mm_store_sd(ptrA,_mm_add_sd(pot1,_mm_load_sd(ptrA)));
825 }
826
827 static gmx_inline void
828 gmx_mm_update_2pot_pd(__m128d pot1, double * gmx_restrict ptrA,
829                       __m128d pot2, double * gmx_restrict ptrB)
830 {
831     GMX_MM_TRANSPOSE2_PD(pot1,pot2);
832     pot1 = _mm_add_pd(pot1,pot2);
833     pot2 = _mm_unpackhi_pd(pot1,pot1);
834
835     _mm_store_sd(ptrA,_mm_add_sd(pot1,_mm_load_sd(ptrA)));
836     _mm_store_sd(ptrB,_mm_add_sd(pot2,_mm_load_sd(ptrB)));
837 }
838
839
840 #endif /* _kernelutil_x86_sse2_double_h_ */