Merge release-4-6 into master
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_sse2_single / kernelutil_x86_sse2_single.h
1 /*
2  *                This source code is part of
3  *
4  *                 G   R   O   M   A   C   S
5  *
6  * Copyright (c) 2011-2012, The GROMACS Development Team
7  *
8  * Gromacs is a library for molecular simulation and trajectory analysis,
9  * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
10  * a full list of developers and information, check out http://www.gromacs.org
11  *
12  * This program is free software; you can redistribute it and/or modify it under 
13  * the terms of the GNU Lesser General Public License as published by the Free 
14  * Software Foundation; either version 2 of the License, or (at your option) any 
15  * later version.
16  * As a special exception, you may use this file as part of a free software
17  * library without restriction.  Specifically, if other files instantiate
18  * templates or use macros or inline functions from this file, or you compile
19  * this file and link it with other files to produce an executable, this
20  * file does not by itself cause the resulting executable to be covered by
21  * the GNU Lesser General Public License.  
22  *
23  * In plain-speak: do not worry about classes/macros/templates either - only
24  * changes to the library have to be LGPL, not an application linking with it.
25  *
26  * To help fund GROMACS development, we humbly ask that you cite
27  * the papers people have written on it - you can find them on the website!
28  */
29 #ifndef _kernelutil_x86_sse2_single_h_
30 #define _kernelutil_x86_sse2_single_h_
31
32 /* We require SSE2 now! */
33
34 #include <math.h> 
35
36 #include "gmx_x86_sse2.h"
37
38
39 /* Normal sum of four xmm registers */
40 #define gmx_mm_sum4_ps(t0,t1,t2,t3)  _mm_add_ps(_mm_add_ps(t0,t1),_mm_add_ps(t2,t3))
41
42 static gmx_inline __m128
43 gmx_mm_calc_rsq_ps(__m128 dx, __m128 dy, __m128 dz)
44 {
45     return _mm_add_ps( _mm_add_ps( _mm_mul_ps(dx,dx), _mm_mul_ps(dy,dy) ), _mm_mul_ps(dz,dz) );
46 }
47
48 static int
49 gmx_mm_any_lt(__m128 a, __m128 b)
50 {
51     return _mm_movemask_ps(_mm_cmplt_ps(a,b));
52 }
53
54 /* Load a single value from 1-4 places, merge into xmm register */
55
56 static __m128
57 gmx_mm_load_4real_swizzle_ps(const float * gmx_restrict ptrA,
58                              const float * gmx_restrict ptrB,
59                              const float * gmx_restrict ptrC,
60                              const float * gmx_restrict ptrD)
61 {
62     __m128 t1,t2;
63
64     t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA),_mm_load_ss(ptrC));
65     t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB),_mm_load_ss(ptrD));
66     return _mm_unpacklo_ps(t1,t2);
67 }
68
69 static void
70 gmx_mm_store_4real_swizzle_ps(float * gmx_restrict ptrA,
71                               float * gmx_restrict ptrB,
72                               float * gmx_restrict ptrC,
73                               float * gmx_restrict ptrD,
74                               __m128 xmm1)
75 {
76     __m128 t2,t3,t4;
77
78     t3       = _mm_movehl_ps(_mm_setzero_ps(),xmm1);
79     t2       = _mm_shuffle_ps(xmm1,xmm1,_MM_SHUFFLE(1,1,1,1));
80     t4       = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
81     _mm_store_ss(ptrA,xmm1);
82     _mm_store_ss(ptrB,t2);
83     _mm_store_ss(ptrC,t3);
84     _mm_store_ss(ptrD,t4);
85 }
86
87 /* Similar to store, but increments value in memory */
88 static void
89 gmx_mm_increment_4real_swizzle_ps(float * gmx_restrict ptrA,
90                                   float * gmx_restrict ptrB,
91                                   float * gmx_restrict ptrC,
92                                   float * gmx_restrict ptrD, __m128 xmm1)
93 {
94     __m128 tmp;
95
96     tmp = gmx_mm_load_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD);
97     tmp = _mm_add_ps(tmp,xmm1);
98     gmx_mm_store_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD,tmp);
99 }
100
101
102 static void
103 gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
104                              const float * gmx_restrict p2,
105                              const float * gmx_restrict p3,
106                              const float * gmx_restrict p4,
107                              __m128 * gmx_restrict c6,
108                              __m128 * gmx_restrict c12)
109 {
110     __m128 t1,t2,t3,t4;
111
112     t1   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p1);   /* - - c12a  c6a */
113     t2   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p2);   /* - - c12b  c6b */
114     t3   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p3);   /* - - c12c  c6c */
115     t4   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p4);   /* - - c12d  c6d */
116     t1   = _mm_unpacklo_ps(t1,t2);
117     t2   = _mm_unpacklo_ps(t3,t4);
118     *c6  = _mm_movelh_ps(t1,t2);
119     *c12 = _mm_movehl_ps(t2,t1);
120 }
121
122 /* Routines to load 1-4 rvec from 4 places.
123  * We mainly use these to load coordinates. The extra routines
124  * are very efficient for the water-water loops, since we e.g.
125  * know that a TIP4p water has 4 atoms, so we should load 12 floats+shuffle.
126  */
127
128
129 static gmx_inline void
130 gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
131                                          const float * gmx_restrict xyz,
132                                          __m128 * gmx_restrict x1,
133                                          __m128 * gmx_restrict y1,
134                                          __m128 * gmx_restrict z1)
135 {
136     __m128 t1,t2,t3,t4;
137     
138     t1   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
139     t2   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz);
140     t3   = _mm_load_ss(xyz_shift+2);
141     t4   = _mm_load_ss(xyz+2);
142     t1   = _mm_add_ps(t1,t2);
143     t3   = _mm_add_ss(t3,t4);
144     
145     *x1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
146     *y1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
147     *z1  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
148 }
149
150
151 static gmx_inline void
152 gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
153                                          const float * gmx_restrict xyz,
154                                          __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
155                                          __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
156                                          __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
157 {
158     __m128 tA,tB;
159     __m128 t1,t2,t3,t4,t5,t6;
160     
161     tA   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
162     tB   = _mm_load_ss(xyz_shift+2);
163     
164     t1   = _mm_loadu_ps(xyz);
165     t2   = _mm_loadu_ps(xyz+4);
166     t3   = _mm_load_ss(xyz+8);
167     
168     tA   = _mm_movelh_ps(tA,tB);
169     t4   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
170     t5   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
171     t6   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
172     
173     t1   = _mm_add_ps(t1,t4);
174     t2   = _mm_add_ps(t2,t5);
175     t3   = _mm_add_ss(t3,t6);
176     
177     *x1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
178     *y1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
179     *z1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
180     *x2  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
181     *y2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
182     *z2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
183     *x3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
184     *y3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
185     *z3  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
186 }
187
188
189 static gmx_inline void
190 gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
191                                          const float * gmx_restrict xyz,
192                                          __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
193                                          __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
194                                          __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
195                                          __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
196 {
197     __m128 tA,tB;
198     __m128 t1,t2,t3,t4,t5,t6;
199     
200     tA   = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
201     tB   = _mm_load_ss(xyz_shift+2);
202     
203     t1   = _mm_loadu_ps(xyz);
204     t2   = _mm_loadu_ps(xyz+4);
205     t3   = _mm_loadu_ps(xyz+8);
206     
207     tA   = _mm_movelh_ps(tA,tB);
208     t4   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
209     t5   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
210     t6   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
211     
212     t1   = _mm_add_ps(t1,t4);
213     t2   = _mm_add_ps(t2,t5);
214     t3   = _mm_add_ps(t3,t6);
215     
216     *x1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
217     *y1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
218     *z1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
219     *x2  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
220     *y2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
221     *z2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
222     *x3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
223     *y3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
224     *z3  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
225     *x4  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
226     *y4  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(2,2,2,2));
227     *z4  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(3,3,3,3));
228 }
229
230
231 static void
232 gmx_mm_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
233                                   const float * gmx_restrict ptrB,
234                                   const float * gmx_restrict ptrC,
235                                   const float * gmx_restrict ptrD,
236                                   __m128 *      gmx_restrict x1,
237                                   __m128 *      gmx_restrict y1,
238                                   __m128 *      gmx_restrict z1)
239 {
240     __m128 t1,t2,t3,t4,t5,t6,t7,t8;
241     t1   = _mm_castpd_ps(_mm_load_sd((const double *)ptrA));
242     t2   = _mm_castpd_ps(_mm_load_sd((const double *)ptrB));
243     t3   = _mm_castpd_ps(_mm_load_sd((const double *)ptrC));
244     t4   = _mm_castpd_ps(_mm_load_sd((const double *)ptrD));
245     t5 = _mm_load_ss(ptrA+2);
246     t6 = _mm_load_ss(ptrB+2);
247     t7 = _mm_load_ss(ptrC+2);
248     t8 = _mm_load_ss(ptrD+2);
249     t1 = _mm_unpacklo_ps(t1,t2);
250     t3 = _mm_unpacklo_ps(t3,t4);
251     *x1 = _mm_movelh_ps(t1,t3);
252     *y1 = _mm_movehl_ps(t3,t1);
253     t5  = _mm_unpacklo_ps(t5,t6);
254     t7  = _mm_unpacklo_ps(t7,t8);
255     *z1 = _mm_movelh_ps(t5,t7);
256 }
257
258
259 static void
260 gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
261                                   const float * gmx_restrict ptrB,
262                                   const float * gmx_restrict ptrC,
263                                   const float * gmx_restrict ptrD,
264                                   __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
265                                   __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
266                                   __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3) 
267 {
268     __m128 t1,t2,t3,t4;
269     t1            = _mm_loadu_ps(ptrA);
270     t2            = _mm_loadu_ps(ptrB);
271     t3            = _mm_loadu_ps(ptrC);
272     t4            = _mm_loadu_ps(ptrD);
273     _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
274     *x1           = t1;
275     *y1           = t2;
276     *z1           = t3;
277     *x2           = t4;
278     t1            = _mm_loadu_ps(ptrA+4);
279     t2            = _mm_loadu_ps(ptrB+4);
280     t3            = _mm_loadu_ps(ptrC+4);
281     t4            = _mm_loadu_ps(ptrD+4);
282     _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
283     *y2           = t1;
284     *z2           = t2;
285     *x3           = t3;
286     *y3           = t4;
287     t1            = _mm_load_ss(ptrA+8);
288     t2            = _mm_load_ss(ptrB+8);
289     t3            = _mm_load_ss(ptrC+8);
290     t4            = _mm_load_ss(ptrD+8);
291     t1            = _mm_unpacklo_ps(t1,t3);
292     t3            = _mm_unpacklo_ps(t2,t4);
293     *z3           = _mm_unpacklo_ps(t1,t3);
294 }
295
296
297 static void
298 gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
299                                   const float * gmx_restrict ptrB,
300                                   const float * gmx_restrict ptrC,
301                                   const float * gmx_restrict ptrD,
302                                   __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
303                                   __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
304                                   __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
305                                   __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4) 
306 {
307     __m128 t1,t2,t3,t4;
308     t1            = _mm_loadu_ps(ptrA);
309     t2            = _mm_loadu_ps(ptrB);
310     t3            = _mm_loadu_ps(ptrC);
311     t4            = _mm_loadu_ps(ptrD);
312     _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
313     *x1           = t1;
314     *y1           = t2;
315     *z1           = t3;
316     *x2           = t4;
317     t1            = _mm_loadu_ps(ptrA+4);
318     t2            = _mm_loadu_ps(ptrB+4);
319     t3            = _mm_loadu_ps(ptrC+4);
320     t4            = _mm_loadu_ps(ptrD+4);
321     _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
322     *y2           = t1;
323     *z2           = t2;
324     *x3           = t3;
325     *y3           = t4;
326     t1            = _mm_loadu_ps(ptrA+8);
327     t2            = _mm_loadu_ps(ptrB+8);
328     t3            = _mm_loadu_ps(ptrC+8);
329     t4            = _mm_loadu_ps(ptrD+8);
330     _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
331     *z3           = t1;
332     *x4           = t2;
333     *y4           = t3;
334     *z4           = t4;
335 }
336
337
338 static void
339 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA,
340                                        float * gmx_restrict ptrB,
341                                        float * gmx_restrict ptrC,
342                                        float * gmx_restrict ptrD,
343                                        __m128 x1, __m128 y1, __m128 z1)
344 {
345     __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
346     t5          = _mm_unpacklo_ps(y1,z1);
347     t6          = _mm_unpackhi_ps(y1,z1);
348     t7          = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(1,0,0,0));
349     t8          = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(3,2,0,1));
350     t9          = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(1,0,0,2));
351     t10         = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(3,2,0,3));
352     t1          = _mm_load_ss(ptrA);
353     t1          = _mm_loadh_pi(t1,(__m64 *)(ptrA+1));
354     t1          = _mm_sub_ps(t1,t7);
355     _mm_store_ss(ptrA,t1);
356     _mm_storeh_pi((__m64 *)(ptrA+1),t1);
357     t2          = _mm_load_ss(ptrB);
358     t2          = _mm_loadh_pi(t2,(__m64 *)(ptrB+1));
359     t2          = _mm_sub_ps(t2,t8);
360     _mm_store_ss(ptrB,t2);
361     _mm_storeh_pi((__m64 *)(ptrB+1),t2);
362     t3          = _mm_load_ss(ptrC);
363     t3          = _mm_loadh_pi(t3,(__m64 *)(ptrC+1));
364     t3          = _mm_sub_ps(t3,t9);
365     _mm_store_ss(ptrC,t3);
366     _mm_storeh_pi((__m64 *)(ptrC+1),t3);
367     t4          = _mm_load_ss(ptrD);
368     t4          = _mm_loadh_pi(t4,(__m64 *)(ptrD+1));
369     t4          = _mm_sub_ps(t4,t10);
370     _mm_store_ss(ptrD,t4);
371     _mm_storeh_pi((__m64 *)(ptrD+1),t4);
372 }
373
374
375
376 static void
377 gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
378                                        float * gmx_restrict ptrC, float * gmx_restrict ptrD,
379                                        __m128 x1, __m128 y1, __m128 z1,
380                                        __m128 x2, __m128 y2, __m128 z2,
381                                        __m128 x3, __m128 y3, __m128 z3) 
382 {
383     __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
384     __m128 t11,t12,t13,t14,t15,t16,t17,t18,t19;
385     __m128 t20,t21,t22,t23,t24,t25;
386
387     t13         = _mm_unpackhi_ps(x1,y1);
388     x1          = _mm_unpacklo_ps(x1,y1);
389     t14         = _mm_unpackhi_ps(z1,x2);
390     z1          = _mm_unpacklo_ps(z1,x2);
391     t15         = _mm_unpackhi_ps(y2,z2);
392     y2          = _mm_unpacklo_ps(y2,z2);
393     t16         = _mm_unpackhi_ps(x3,y3);
394     x3          = _mm_unpacklo_ps(x3,y3);
395     t17         = _mm_shuffle_ps(z3,z3,_MM_SHUFFLE(0,0,0,1));
396     t18         = _mm_movehl_ps(z3,z3);
397     t19         = _mm_shuffle_ps(t18,t18,_MM_SHUFFLE(0,0,0,1));
398     t20         = _mm_movelh_ps(x1,z1);
399     t21         = _mm_movehl_ps(z1,x1);
400     t22         = _mm_movelh_ps(t13,t14);
401     t14         = _mm_movehl_ps(t14,t13);
402     t23         = _mm_movelh_ps(y2,x3);
403     t24         = _mm_movehl_ps(x3,y2);
404     t25         = _mm_movelh_ps(t15,t16);
405     t16         = _mm_movehl_ps(t16,t15);
406     t1          = _mm_loadu_ps(ptrA);
407     t2          = _mm_loadu_ps(ptrA+4);
408     t3          = _mm_load_ss(ptrA+8);
409     t1          = _mm_sub_ps(t1,t20);
410     t2          = _mm_sub_ps(t2,t23);
411     t3          = _mm_sub_ss(t3,z3);
412     _mm_storeu_ps(ptrA,t1);
413     _mm_storeu_ps(ptrA+4,t2);
414     _mm_store_ss(ptrA+8,t3);
415     t4          = _mm_loadu_ps(ptrB);
416     t5          = _mm_loadu_ps(ptrB+4);
417     t6          = _mm_load_ss(ptrB+8);
418     t4          = _mm_sub_ps(t4,t21);
419     t5          = _mm_sub_ps(t5,t24);
420     t6          = _mm_sub_ss(t6,t17);
421     _mm_storeu_ps(ptrB,t4);
422     _mm_storeu_ps(ptrB+4,t5);
423     _mm_store_ss(ptrB+8,t6);
424     t7          = _mm_loadu_ps(ptrC);
425     t8          = _mm_loadu_ps(ptrC+4);
426     t9          = _mm_load_ss(ptrC+8);
427     t7          = _mm_sub_ps(t7,t22);
428     t8          = _mm_sub_ps(t8,t25);
429     t9          = _mm_sub_ss(t9,t18);
430     _mm_storeu_ps(ptrC,t7);
431     _mm_storeu_ps(ptrC+4,t8);
432     _mm_store_ss(ptrC+8,t9);
433     t10         = _mm_loadu_ps(ptrD);
434     t11         = _mm_loadu_ps(ptrD+4);
435     t12         = _mm_load_ss(ptrD+8);
436     t10         = _mm_sub_ps(t10,t14);
437     t11         = _mm_sub_ps(t11,t16);
438     t12         = _mm_sub_ss(t12,t19);
439     _mm_storeu_ps(ptrD,t10);
440     _mm_storeu_ps(ptrD+4,t11);
441     _mm_store_ss(ptrD+8,t12);
442 }
443
444
445 static void
446 gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
447                                        float * gmx_restrict ptrC, float * gmx_restrict ptrD,
448                                        __m128 x1, __m128 y1, __m128 z1,
449                                        __m128 x2, __m128 y2, __m128 z2,
450                                        __m128 x3, __m128 y3, __m128 z3,
451                                        __m128 x4, __m128 y4, __m128 z4) 
452 {
453     __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11;
454     __m128 t12,t13,t14,t15,t16,t17,t18,t19,t20,t21,t22;
455     __m128 t23,t24;
456     t13         = _mm_unpackhi_ps(x1,y1);
457     x1          = _mm_unpacklo_ps(x1,y1);
458     t14         = _mm_unpackhi_ps(z1,x2);
459     z1          = _mm_unpacklo_ps(z1,x2);
460     t15         = _mm_unpackhi_ps(y2,z2);
461     y2          = _mm_unpacklo_ps(y2,z2);
462     t16         = _mm_unpackhi_ps(x3,y3);
463     x3          = _mm_unpacklo_ps(x3,y3);
464     t17         = _mm_unpackhi_ps(z3,x4);
465     z3          = _mm_unpacklo_ps(z3,x4);
466     t18         = _mm_unpackhi_ps(y4,z4);
467     y4          = _mm_unpacklo_ps(y4,z4);
468     t19         = _mm_movelh_ps(x1,z1);
469     z1          = _mm_movehl_ps(z1,x1);
470     t20         = _mm_movelh_ps(t13,t14);
471     t14         = _mm_movehl_ps(t14,t13);
472     t21         = _mm_movelh_ps(y2,x3);
473     x3          = _mm_movehl_ps(x3,y2);
474     t22         = _mm_movelh_ps(t15,t16);
475     t16         = _mm_movehl_ps(t16,t15);
476     t23         = _mm_movelh_ps(z3,y4);
477     y4          = _mm_movehl_ps(y4,z3);
478     t24         = _mm_movelh_ps(t17,t18);
479     t18         = _mm_movehl_ps(t18,t17);
480     t1          = _mm_loadu_ps(ptrA);
481     t2          = _mm_loadu_ps(ptrA+4);
482     t3          = _mm_loadu_ps(ptrA+8);
483     t1          = _mm_sub_ps(t1,t19);
484     t2          = _mm_sub_ps(t2,t21);
485     t3          = _mm_sub_ps(t3,t23);
486     _mm_storeu_ps(ptrA,t1);
487     _mm_storeu_ps(ptrA+4,t2);
488     _mm_storeu_ps(ptrA+8,t3);
489     t4          = _mm_loadu_ps(ptrB);
490     t5          = _mm_loadu_ps(ptrB+4);
491     t6          = _mm_loadu_ps(ptrB+8);
492     t4          = _mm_sub_ps(t4,z1);
493     t5          = _mm_sub_ps(t5,x3);
494     t6          = _mm_sub_ps(t6,y4);
495     _mm_storeu_ps(ptrB,t4);
496     _mm_storeu_ps(ptrB+4,t5);
497     _mm_storeu_ps(ptrB+8,t6);
498     t7          = _mm_loadu_ps(ptrC);
499     t8          = _mm_loadu_ps(ptrC+4);
500     t9          = _mm_loadu_ps(ptrC+8);
501     t7          = _mm_sub_ps(t7,t20);
502     t8          = _mm_sub_ps(t8,t22);
503     t9          = _mm_sub_ps(t9,t24);
504     _mm_storeu_ps(ptrC,t7);
505     _mm_storeu_ps(ptrC+4,t8);
506     _mm_storeu_ps(ptrC+8,t9);
507     t10         = _mm_loadu_ps(ptrD);
508     t11         = _mm_loadu_ps(ptrD+4);
509     t12         = _mm_loadu_ps(ptrD+8);
510     t10         = _mm_sub_ps(t10,t14);
511     t11         = _mm_sub_ps(t11,t16);
512     t12         = _mm_sub_ps(t12,t18);
513     _mm_storeu_ps(ptrD,t10);
514     _mm_storeu_ps(ptrD+4,t11);
515     _mm_storeu_ps(ptrD+8,t12);
516 }
517
518
519
520 static gmx_inline void
521 gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
522                                       float * gmx_restrict fptr,
523                                       float * gmx_restrict fshiftptr)
524 {
525     __m128 t1,t2,t3;
526
527     /* transpose data */
528     t1 = fix1;
529     _MM_TRANSPOSE4_PS(fix1,t1,fiy1,fiz1);
530     fix1 = _mm_add_ps(_mm_add_ps(fix1,t1), _mm_add_ps(fiy1,fiz1));
531
532     t2 = _mm_load_ss(fptr);
533     t2 = _mm_loadh_pi(t2,(__m64 *)(fptr+1));
534     t3 = _mm_load_ss(fshiftptr);
535     t3 = _mm_loadh_pi(t3,(__m64 *)(fshiftptr+1));
536
537     t2 = _mm_add_ps(t2,fix1);
538     t3 = _mm_add_ps(t3,fix1);
539
540     _mm_store_ss(fptr,t2);
541     _mm_storeh_pi((__m64 *)(fptr+1),t2);
542     _mm_store_ss(fshiftptr,t3);
543     _mm_storeh_pi((__m64 *)(fshiftptr+1),t3);
544 }
545
546 static gmx_inline void
547 gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
548                                       __m128 fix2, __m128 fiy2, __m128 fiz2,
549                                       __m128 fix3, __m128 fiy3, __m128 fiz3,
550                                       float * gmx_restrict fptr,
551                                       float * gmx_restrict fshiftptr)
552 {
553     __m128 t1,t2,t3,t4;
554
555     /* transpose data */
556     _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);
557     _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);
558     t2   = _mm_movehl_ps(_mm_setzero_ps(),fiz3);
559     t1   = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(0,0,0,1));
560     t3   = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,1));
561
562     fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));
563     fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));
564     fiz3 = _mm_add_ss(_mm_add_ps(fiz3,t1)  , _mm_add_ps(t2,t3));
565
566     _mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));
567     _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
568     _mm_store_ss (fptr+8,_mm_add_ss(fiz3,_mm_load_ss(fptr+8) ));
569
570     t4 = _mm_load_ss(fshiftptr+2);
571     t4 = _mm_loadh_pi(t4,(__m64 *)(fshiftptr));
572
573     t1 = _mm_shuffle_ps(fiz3,fix1,_MM_SHUFFLE(1,0,0,0));   /* fiy1 fix1  -   fiz3 */
574     t2 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(3,2,2,2));   /* fiy3 fix3  -   fiz1 */
575     t3 = _mm_shuffle_ps(fiy2,fix1,_MM_SHUFFLE(3,3,0,1));   /* fix2 fix2 fiy2 fiz2 */
576     t3 = _mm_shuffle_ps(t3  ,t3  ,_MM_SHUFFLE(1,2,0,0));   /* fiy2 fix2  -   fiz2 */
577
578     t1 = _mm_add_ps(t1,t2);
579     t3 = _mm_add_ps(t3,t4);
580     t1 = _mm_add_ps(t1,t3); /* y x - z */
581
582     _mm_store_ss(fshiftptr+2,t1);
583     _mm_storeh_pi((__m64 *)(fshiftptr),t1);
584 }
585
586
587 static gmx_inline void
588 gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
589                                       __m128 fix2, __m128 fiy2, __m128 fiz2,
590                                       __m128 fix3, __m128 fiy3, __m128 fiz3,
591                                       __m128 fix4, __m128 fiy4, __m128 fiz4,
592                                       float * gmx_restrict fptr,
593                                       float * gmx_restrict fshiftptr)
594 {
595     __m128 t1,t2,t3,t4,t5;
596
597     /* transpose data */
598     _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);
599     _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);
600     _MM_TRANSPOSE4_PS(fiz3,fix4,fiy4,fiz4);
601
602     fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));
603     fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));
604     fiz3 = _mm_add_ps(_mm_add_ps(fiz3,fix4), _mm_add_ps(fiy4,fiz4));
605
606     _mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));
607     _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
608     _mm_storeu_ps(fptr+8,_mm_add_ps(fiz3,_mm_loadu_ps(fptr+8)));
609
610     t5 = _mm_load_ss(fshiftptr+2);
611     t5 = _mm_loadh_pi(t5,(__m64 *)(fshiftptr));
612
613     t1 = _mm_shuffle_ps(fix1,fix1,_MM_SHUFFLE(1,0,2,2));
614     t2 = _mm_shuffle_ps(fiy2,fiy2,_MM_SHUFFLE(3,2,1,1));
615     t3 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(2,1,0,0));
616     t4 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(0,0,3,3));
617     t4 = _mm_shuffle_ps(fiz3,t4  ,_MM_SHUFFLE(2,0,3,3));
618
619     t1 = _mm_add_ps(t1,t2);
620     t3 = _mm_add_ps(t3,t4);
621     t1 = _mm_add_ps(t1,t3);
622     t5 = _mm_add_ps(t5,t1);
623
624     _mm_store_ss(fshiftptr+2,t5);
625     _mm_storeh_pi((__m64 *)(fshiftptr),t5);
626 }
627
628
629
630 static void
631 gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
632 {
633     pot1 = _mm_add_ps(pot1,_mm_movehl_ps(_mm_setzero_ps(),pot1));
634     pot1 = _mm_add_ps(pot1,_mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(0,0,0,1)));
635     _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
636 }
637
638 static void
639 gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
640                       __m128 pot2, float * gmx_restrict ptrB)
641 {
642     __m128 t1,t2;
643     t1   = _mm_movehl_ps(pot2,pot1);
644     t2   = _mm_movelh_ps(pot1,pot2);
645     t1   = _mm_add_ps(t1,t2);
646     t2   = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,1,1));
647     pot1 = _mm_add_ps(t1,t2);
648     pot2 = _mm_movehl_ps(t2,pot1);
649     _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
650     _mm_store_ss(ptrB,_mm_add_ss(pot2,_mm_load_ss(ptrB)));
651 }
652
653
654 static void
655 gmx_mm_update_4pot_ps(__m128 pot1, float * gmx_restrict ptrA,
656                       __m128 pot2, float * gmx_restrict ptrB,
657                       __m128 pot3, float * gmx_restrict ptrC,
658                       __m128 pot4, float * gmx_restrict ptrD)
659 {
660     _MM_TRANSPOSE4_PS(pot1,pot2,pot3,pot4);
661     pot1 = _mm_add_ps(_mm_add_ps(pot1,pot2),_mm_add_ps(pot3,pot4));
662     pot2 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(1,1,1,1));
663     pot3 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(2,2,2,2));
664     pot4 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(3,3,3,3));
665     _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
666     _mm_store_ss(ptrB,_mm_add_ss(pot2,_mm_load_ss(ptrB)));
667     _mm_store_ss(ptrC,_mm_add_ss(pot3,_mm_load_ss(ptrC)));
668     _mm_store_ss(ptrD,_mm_add_ss(pot4,_mm_load_ss(ptrD)));
669 }
670
671
672 #endif /* _kernelutil_x86_sse2_single_h_ */