Merge release-4-6 into master
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_sse2_single / kernelutil_x86_sse2_single.h
index e0b324f582de5960fbdd5e4d624954808e9edde2..278312182055bc47f5848cedc9de2c41b7c46191 100644 (file)
@@ -31,7 +31,7 @@
 
 /* We require SSE2 now! */
 
-#include <math.h> 
+#include <math.h>
 
 #include "gmx_x86_sse2.h"
 
@@ -128,20 +128,20 @@ gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
 
 static gmx_inline void
 gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
-                                         const float * gmx_restrict xyz,
-                                         __m128 * gmx_restrict x1,
-                                         __m128 * gmx_restrict y1,
-                                         __m128 * gmx_restrict z1)
+        const float * gmx_restrict xyz,
+        __m128 * gmx_restrict x1,
+        __m128 * gmx_restrict y1,
+        __m128 * gmx_restrict z1)
 {
     __m128 t1,t2,t3,t4;
-    
+
     t1   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
     t2   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz);
     t3   = _mm_load_ss(xyz_shift+2);
     t4   = _mm_load_ss(xyz+2);
     t1   = _mm_add_ps(t1,t2);
     t3   = _mm_add_ss(t3,t4);
-    
+
     *x1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
     *y1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
     *z1  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
@@ -150,30 +150,30 @@ gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
 
 static gmx_inline void
 gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
-                                         const float * gmx_restrict xyz,
-                                         __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
-                                         __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
-                                         __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
+        const float * gmx_restrict xyz,
+        __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
+        __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
+        __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
 {
     __m128 tA,tB;
     __m128 t1,t2,t3,t4,t5,t6;
-    
+
     tA   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
     tB   = _mm_load_ss(xyz_shift+2);
-    
+
     t1   = _mm_loadu_ps(xyz);
     t2   = _mm_loadu_ps(xyz+4);
     t3   = _mm_load_ss(xyz+8);
-    
+
     tA   = _mm_movelh_ps(tA,tB);
     t4   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
     t5   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
     t6   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
-    
+
     t1   = _mm_add_ps(t1,t4);
     t2   = _mm_add_ps(t2,t5);
     t3   = _mm_add_ss(t3,t6);
-    
+
     *x1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
     *y1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
     *z1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
@@ -188,31 +188,31 @@ gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
 
 static gmx_inline void
 gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
-                                         const float * gmx_restrict xyz,
-                                         __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
-                                         __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
-                                         __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
-                                         __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
+        const float * gmx_restrict xyz,
+        __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
+        __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
+        __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
+        __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
 {
     __m128 tA,tB;
     __m128 t1,t2,t3,t4,t5,t6;
-    
+
     tA   = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
     tB   = _mm_load_ss(xyz_shift+2);
-    
+
     t1   = _mm_loadu_ps(xyz);
     t2   = _mm_loadu_ps(xyz+4);
     t3   = _mm_loadu_ps(xyz+8);
-    
+
     tA   = _mm_movelh_ps(tA,tB);
     t4   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
     t5   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
     t6   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
-    
+
     t1   = _mm_add_ps(t1,t4);
     t2   = _mm_add_ps(t2,t5);
     t3   = _mm_add_ps(t3,t6);
-    
+
     *x1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
     *y1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
     *z1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
@@ -263,7 +263,7 @@ gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
                                   const float * gmx_restrict ptrD,
                                   __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
                                   __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
-                                  __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3) 
+                                  __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
 {
     __m128 t1,t2,t3,t4;
     t1            = _mm_loadu_ps(ptrA);
@@ -302,7 +302,7 @@ gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
                                   __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
                                   __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
                                   __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
-                                  __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4) 
+                                  __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
 {
     __m128 t1,t2,t3,t4;
     t1            = _mm_loadu_ps(ptrA);
@@ -373,12 +373,78 @@ gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA,
 
 
 
+#if defined (_MSC_VER) && defined(_M_IX86)
+/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
+#define gmx_mm_decrement_3rvec_4ptr_swizzle_ps(ptrA,ptrB,ptrC,ptrD, \
+_x1,_y1,_z1,_x2,_y2,_z2,_x3,_y3,_z3) \
+{\
+__m128 _t1,_t2,_t3,_t4,_t5,_t6,_t7,_t8,_t9,_t10;\
+__m128 _t11,_t12,_t13,_t14,_t15,_t16,_t17,_t18,_t19;\
+__m128 _t20,_t21,_t22,_t23,_t24,_t25;\
+_t13         = _mm_unpackhi_ps(_x1,_y1);\
+_x1          = _mm_unpacklo_ps(_x1,_y1);\
+_t14         = _mm_unpackhi_ps(_z1,_x2);\
+_z1          = _mm_unpacklo_ps(_z1,_x2);\
+_t15         = _mm_unpackhi_ps(_y2,_z2);\
+_y2          = _mm_unpacklo_ps(_y2,_z2);\
+_t16         = _mm_unpackhi_ps(_x3,_y3);\
+_x3          = _mm_unpacklo_ps(_x3,_y3);\
+_t17         = _mm_shuffle_ps(_z3,_z3,_MM_SHUFFLE(0,0,0,1));\
+_t18         = _mm_movehl_ps(_z3,_z3);\
+_t19         = _mm_shuffle_ps(_t18,_t18,_MM_SHUFFLE(0,0,0,1));\
+_t20         = _mm_movelh_ps(_x1,_z1);\
+_t21         = _mm_movehl_ps(_z1,_x1);\
+_t22         = _mm_movelh_ps(_t13,_t14);\
+_t14         = _mm_movehl_ps(_t14,_t13);\
+_t23         = _mm_movelh_ps(_y2,_x3);\
+_t24         = _mm_movehl_ps(_x3,_y2);\
+_t25         = _mm_movelh_ps(_t15,_t16);\
+_t16         = _mm_movehl_ps(_t16,_t15);\
+_t1          = _mm_loadu_ps(ptrA);\
+_t2          = _mm_loadu_ps(ptrA+4);\
+_t3          = _mm_load_ss(ptrA+8);\
+_t1          = _mm_sub_ps(_t1,_t20);\
+_t2          = _mm_sub_ps(_t2,_t23);\
+_t3          = _mm_sub_ss(_t3,_z3);\
+_mm_storeu_ps(ptrA,_t1);\
+_mm_storeu_ps(ptrA+4,_t2);\
+_mm_store_ss(ptrA+8,_t3);\
+_t4          = _mm_loadu_ps(ptrB);\
+_t5          = _mm_loadu_ps(ptrB+4);\
+_t6          = _mm_load_ss(ptrB+8);\
+_t4          = _mm_sub_ps(_t4,_t21);\
+_t5          = _mm_sub_ps(_t5,_t24);\
+_t6          = _mm_sub_ss(_t6,_t17);\
+_mm_storeu_ps(ptrB,_t4);\
+_mm_storeu_ps(ptrB+4,_t5);\
+_mm_store_ss(ptrB+8,_t6);\
+_t7          = _mm_loadu_ps(ptrC);\
+_t8          = _mm_loadu_ps(ptrC+4);\
+_t9          = _mm_load_ss(ptrC+8);\
+_t7          = _mm_sub_ps(_t7,_t22);\
+_t8          = _mm_sub_ps(_t8,_t25);\
+_t9          = _mm_sub_ss(_t9,_t18);\
+_mm_storeu_ps(ptrC,_t7);\
+_mm_storeu_ps(ptrC+4,_t8);\
+_mm_store_ss(ptrC+8,_t9);\
+_t10         = _mm_loadu_ps(ptrD);\
+_t11         = _mm_loadu_ps(ptrD+4);\
+_t12         = _mm_load_ss(ptrD+8);\
+_t10         = _mm_sub_ps(_t10,_t14);\
+_t11         = _mm_sub_ps(_t11,_t16);\
+_t12         = _mm_sub_ss(_t12,_t19);\
+_mm_storeu_ps(ptrD,_t10);\
+_mm_storeu_ps(ptrD+4,_t11);\
+_mm_store_ss(ptrD+8,_t12);\
+}
+#else
+/* Real function for sane compilers */
 static void
 gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
                                        float * gmx_restrict ptrC, float * gmx_restrict ptrD,
                                        __m128 x1, __m128 y1, __m128 z1,
                                        __m128 x2, __m128 y2, __m128 z2,
-                                       __m128 x3, __m128 y3, __m128 z3) 
+                                       __m128 x3, __m128 y3, __m128 z3)
 {
     __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
     __m128 t11,t12,t13,t14,t15,t16,t17,t18,t19;
@@ -440,15 +506,87 @@ gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_re
     _mm_storeu_ps(ptrD+4,t11);
     _mm_store_ss(ptrD+8,t12);
 }
-
-
+#endif
+
+
+#if defined (_MSC_VER) && defined(_M_IX86)
+/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
+#define gmx_mm_decrement_4rvec_4ptr_swizzle_ps(ptrA,ptrB,ptrC,ptrD, \
+_x1,_y1,_z1,_x2,_y2,_z2,_x3,_y3,_z3,_x4,_y4,_z4) \
+{\
+__m128 _t1,_t2,_t3,_t4,_t5,_t6,_t7,_t8,_t9,_t10,_t11;\
+__m128 _t12,_t13,_t14,_t15,_t16,_t17,_t18,_t19,_t20,_t21,_t22;\
+__m128 _t23,_t24;\
+_t13         = _mm_unpackhi_ps(_x1,_y1);\
+_x1          = _mm_unpacklo_ps(_x1,_y1);\
+_t14         = _mm_unpackhi_ps(_z1,_x2);\
+_z1          = _mm_unpacklo_ps(_z1,_x2);\
+_t15         = _mm_unpackhi_ps(_y2,_z2);\
+_y2          = _mm_unpacklo_ps(_y2,_z2);\
+_t16         = _mm_unpackhi_ps(_x3,_y3);\
+_x3          = _mm_unpacklo_ps(_x3,_y3);\
+_t17         = _mm_unpackhi_ps(_z3,_x4);\
+_z3          = _mm_unpacklo_ps(_z3,_x4);\
+_t18         = _mm_unpackhi_ps(_y4,_z4);\
+_y4          = _mm_unpacklo_ps(_y4,_z4);\
+_t19         = _mm_movelh_ps(_x1,_z1);\
+_z1          = _mm_movehl_ps(_z1,_x1);\
+_t20         = _mm_movelh_ps(_t13,_t14);\
+_t14         = _mm_movehl_ps(_t14,_t13);\
+_t21         = _mm_movelh_ps(_y2,_x3);\
+_x3          = _mm_movehl_ps(_x3,_y2);\
+_t22         = _mm_movelh_ps(_t15,_t16);\
+_t16         = _mm_movehl_ps(_t16,_t15);\
+_t23         = _mm_movelh_ps(_z3,_y4);\
+_y4          = _mm_movehl_ps(_y4,_z3);\
+_t24         = _mm_movelh_ps(_t17,_t18);\
+_t18         = _mm_movehl_ps(_t18,_t17);\
+_t1          = _mm_loadu_ps(ptrA);\
+_t2          = _mm_loadu_ps(ptrA+4);\
+_t3          = _mm_loadu_ps(ptrA+8);\
+_t1          = _mm_sub_ps(_t1,_t19);\
+_t2          = _mm_sub_ps(_t2,_t21);\
+_t3          = _mm_sub_ps(_t3,_t23);\
+_mm_storeu_ps(ptrA,_t1);\
+_mm_storeu_ps(ptrA+4,_t2);\
+_mm_storeu_ps(ptrA+8,_t3);\
+_t4          = _mm_loadu_ps(ptrB);\
+_t5          = _mm_loadu_ps(ptrB+4);\
+_t6          = _mm_loadu_ps(ptrB+8);\
+_t4          = _mm_sub_ps(_t4,_z1);\
+_t5          = _mm_sub_ps(_t5,_x3);\
+_t6          = _mm_sub_ps(_t6,_y4);\
+_mm_storeu_ps(ptrB,_t4);\
+_mm_storeu_ps(ptrB+4,_t5);\
+_mm_storeu_ps(ptrB+8,_t6);\
+_t7          = _mm_loadu_ps(ptrC);\
+_t8          = _mm_loadu_ps(ptrC+4);\
+_t9          = _mm_loadu_ps(ptrC+8);\
+_t7          = _mm_sub_ps(_t7,_t20);\
+_t8          = _mm_sub_ps(_t8,_t22);\
+_t9          = _mm_sub_ps(_t9,_t24);\
+_mm_storeu_ps(ptrC,_t7);\
+_mm_storeu_ps(ptrC+4,_t8);\
+_mm_storeu_ps(ptrC+8,_t9);\
+_t10         = _mm_loadu_ps(ptrD);\
+_t11         = _mm_loadu_ps(ptrD+4);\
+_t12         = _mm_loadu_ps(ptrD+8);\
+_t10         = _mm_sub_ps(_t10,_t14);\
+_t11         = _mm_sub_ps(_t11,_t16);\
+_t12         = _mm_sub_ps(_t12,_t18);\
+_mm_storeu_ps(ptrD,_t10);\
+_mm_storeu_ps(ptrD+4,_t11);\
+_mm_storeu_ps(ptrD+8,_t12);\
+}
+#else
+/* Real function for sane compilers */
 static void
 gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
                                        float * gmx_restrict ptrC, float * gmx_restrict ptrD,
                                        __m128 x1, __m128 y1, __m128 z1,
                                        __m128 x2, __m128 y2, __m128 z2,
                                        __m128 x3, __m128 y3, __m128 z3,
-                                       __m128 x4, __m128 y4, __m128 z4) 
+                                       __m128 x4, __m128 y4, __m128 z4)
 {
     __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11;
     __m128 t12,t13,t14,t15,t16,t17,t18,t19,t20,t21,t22;
@@ -514,7 +652,7 @@ gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_re
     _mm_storeu_ps(ptrD+4,t11);
     _mm_storeu_ps(ptrD+8,t12);
 }
-
+#endif
 
 
 static gmx_inline void
@@ -543,6 +681,38 @@ gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
     _mm_storeh_pi((__m64 *)(fshiftptr+1),t3);
 }
 
+#if defined (_MSC_VER) && defined(_M_IX86)
+/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
+#define gmx_mm_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3, \
+                                              fptr,fshiftptr) \
+{\
+    __m128 _t1,_t2,_t3,_t4;\
+\
+    _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);\
+    _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);\
+    _t2   = _mm_movehl_ps(_mm_setzero_ps(),fiz3);\
+    _t1   = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(0,0,0,1));\
+    _t3   = _mm_shuffle_ps(_t2,_t2,_MM_SHUFFLE(0,0,0,1));\
+    fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));\
+    fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));\
+    fiz3 = _mm_add_ss(_mm_add_ps(fiz3,_t1)  , _mm_add_ps(_t2,_t3));\
+    _mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));\
+    _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));\
+    _mm_store_ss (fptr+8,_mm_add_ss(fiz3,_mm_load_ss(fptr+8) ));\
+    _t4 = _mm_load_ss(fshiftptr+2);\
+    _t4 = _mm_loadh_pi(_t4,(__m64 *)(fshiftptr));\
+    _t1 = _mm_shuffle_ps(fiz3,fix1,_MM_SHUFFLE(1,0,0,0));\
+    _t2 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(3,2,2,2));\
+    _t3 = _mm_shuffle_ps(fiy2,fix1,_MM_SHUFFLE(3,3,0,1));\
+    _t3 = _mm_shuffle_ps(_t3  ,_t3  ,_MM_SHUFFLE(1,2,0,0));\
+    _t1 = _mm_add_ps(_t1,_t2);\
+    _t3 = _mm_add_ps(_t3,_t4);\
+    _t1 = _mm_add_ps(_t1,_t3);\
+    _mm_store_ss(fshiftptr+2,_t1);\
+    _mm_storeh_pi((__m64 *)(fshiftptr),_t1);\
+}
+#else
+/* Real function for sane compilers */
 static gmx_inline void
 gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
                                       __m128 fix2, __m128 fiy2, __m128 fiz2,
@@ -582,8 +752,39 @@ gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
     _mm_store_ss(fshiftptr+2,t1);
     _mm_storeh_pi((__m64 *)(fshiftptr),t1);
 }
-
-
+#endif
+
+#if defined (_MSC_VER) && defined(_M_IX86)
+/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
+#define gmx_mm_update_iforce_4atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,fix4,fiy4,fiz4, \
+                                              fptr,fshiftptr) \
+{\
+    __m128 _t1,_t2,_t3,_t4,_t5;\
+    _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);\
+    _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);\
+    _MM_TRANSPOSE4_PS(fiz3,fix4,fiy4,fiz4);\
+    fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));\
+    fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));\
+    fiz3 = _mm_add_ps(_mm_add_ps(fiz3,fix4), _mm_add_ps(fiy4,fiz4));\
+    _mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));\
+    _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));\
+    _mm_storeu_ps(fptr+8,_mm_add_ps(fiz3,_mm_loadu_ps(fptr+8)));\
+    _t5 = _mm_load_ss(fshiftptr+2);\
+    _t5 = _mm_loadh_pi(_t5,(__m64 *)(fshiftptr));\
+    _t1 = _mm_shuffle_ps(fix1,fix1,_MM_SHUFFLE(1,0,2,2));\
+    _t2 = _mm_shuffle_ps(fiy2,fiy2,_MM_SHUFFLE(3,2,1,1));\
+    _t3 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(2,1,0,0));\
+    _t4 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(0,0,3,3));\
+    _t4 = _mm_shuffle_ps(fiz3,_t4  ,_MM_SHUFFLE(2,0,3,3));\
+    _t1 = _mm_add_ps(_t1,_t2);\
+    _t3 = _mm_add_ps(_t3,_t4);\
+    _t1 = _mm_add_ps(_t1,_t3);\
+    _t5 = _mm_add_ps(_t5,_t1);\
+    _mm_store_ss(fshiftptr+2,_t5);\
+    _mm_storeh_pi((__m64 *)(fshiftptr),_t5);\
+}
+#else
+/* Real function for sane compilers */
 static gmx_inline void
 gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
                                       __m128 fix2, __m128 fiy2, __m128 fiz2,
@@ -624,7 +825,7 @@ gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
     _mm_store_ss(fshiftptr+2,t5);
     _mm_storeh_pi((__m64 *)(fshiftptr),t5);
 }
-
+#endif
 
 
 static void
@@ -651,22 +852,4 @@ gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
 }
 
 
-static void
-gmx_mm_update_4pot_ps(__m128 pot1, float * gmx_restrict ptrA,
-                      __m128 pot2, float * gmx_restrict ptrB,
-                      __m128 pot3, float * gmx_restrict ptrC,
-                      __m128 pot4, float * gmx_restrict ptrD)
-{
-    _MM_TRANSPOSE4_PS(pot1,pot2,pot3,pot4);
-    pot1 = _mm_add_ps(_mm_add_ps(pot1,pot2),_mm_add_ps(pot3,pot4));
-    pot2 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(1,1,1,1));
-    pot3 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(2,2,2,2));
-    pot4 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(3,3,3,3));
-    _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
-    _mm_store_ss(ptrB,_mm_add_ss(pot2,_mm_load_ss(ptrB)));
-    _mm_store_ss(ptrC,_mm_add_ss(pot3,_mm_load_ss(ptrC)));
-    _mm_store_ss(ptrD,_mm_add_ss(pot4,_mm_load_ss(ptrD)));
-}
-
-
 #endif /* _kernelutil_x86_sse2_single_h_ */