static gmx_inline void
gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
- const float * gmx_restrict xyz,
- __m128 * gmx_restrict x1,
- __m128 * gmx_restrict y1,
- __m128 * gmx_restrict z1)
+ const float * gmx_restrict xyz,
+ __m128 * gmx_restrict x1,
+ __m128 * gmx_restrict y1,
+ __m128 * gmx_restrict z1)
{
__m128 t1,t2,t3,t4;
static gmx_inline void
gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
- const float * gmx_restrict xyz,
- __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
- __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
- __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
+ const float * gmx_restrict xyz,
+ __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
+ __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
+ __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
{
__m128 tA,tB;
__m128 t1,t2,t3,t4,t5,t6;
static gmx_inline void
gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
- const float * gmx_restrict xyz,
- __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
- __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
- __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
- __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
+ const float * gmx_restrict xyz,
+ __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
+ __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
+ __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
+ __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
{
__m128 tA,tB;
__m128 t1,t2,t3,t4,t5,t6;
{
__m128 t1,t2,t3,t4;
__m128i mask = _mm_set_epi32(0,-1,-1,-1);
- t1 = _mm_maskload_ps(ptrA,mask);
- t2 = _mm_maskload_ps(ptrB,mask);
- t3 = _mm_maskload_ps(ptrC,mask);
- t4 = _mm_maskload_ps(ptrD,mask);
+ t1 = gmx_mm_maskload_ps(ptrA,mask);
+ t2 = gmx_mm_maskload_ps(ptrB,mask);
+ t3 = gmx_mm_maskload_ps(ptrC,mask);
+ t4 = gmx_mm_maskload_ps(ptrD,mask);
_MM_TRANSPOSE4_PS(t1,t2,t3,t4);
*x1 = t1;
*y1 = t2;
}
+#if defined (_MSC_VER) && defined(_M_IX86)
+/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
+#define gmx_mm_decrement_3rvec_4ptr_swizzle_ps(ptrA,ptrB,ptrC,ptrD, \
+ _x1,_y1,_z1,_x2,_y2,_z2,_x3,_y3,_z3) \
+{\
+ __m128 _t1,_t2,_t3,_t4,_t5,_t6,_t7,_t8,_t9,_t10;\
+ __m128 _t11,_t12,_t13,_t14,_t15,_t16,_t17,_t18,_t19;\
+ __m128 _t20,_t21,_t22,_t23,_t24,_t25;\
+ _t13 = _mm_unpackhi_ps(_x1,_y1);\
+ _x1 = _mm_unpacklo_ps(_x1,_y1);\
+ _t14 = _mm_unpackhi_ps(_z1,_x2);\
+ _z1 = _mm_unpacklo_ps(_z1,_x2);\
+ _t15 = _mm_unpackhi_ps(_y2,_z2);\
+ _y2 = _mm_unpacklo_ps(_y2,_z2);\
+ _t16 = _mm_unpackhi_ps(_x3,_y3);\
+ _x3 = _mm_unpacklo_ps(_x3,_y3);\
+ _t17 = _mm_permute_ps(_z3,_MM_SHUFFLE(0,0,0,1));\
+ _t18 = _mm_movehl_ps(_z3,_z3);\
+ _t19 = _mm_permute_ps(_t18,_MM_SHUFFLE(0,0,0,1));\
+ _t20 = _mm_movelh_ps(_x1,_z1);\
+ _t21 = _mm_movehl_ps(_z1,_x1);\
+ _t22 = _mm_movelh_ps(_t13,_t14);\
+ _t14 = _mm_movehl_ps(_t14,_t13);\
+ _t23 = _mm_movelh_ps(_y2,_x3);\
+ _t24 = _mm_movehl_ps(_x3,_y2);\
+ _t25 = _mm_movelh_ps(_t15,_t16);\
+ _t16 = _mm_movehl_ps(_t16,_t15);\
+ _t1 = _mm_loadu_ps(ptrA);\
+ _t2 = _mm_loadu_ps(ptrA+4);\
+ _t3 = _mm_load_ss(ptrA+8);\
+ _t1 = _mm_sub_ps(_t1,_t20);\
+ _t2 = _mm_sub_ps(_t2,_t23);\
+ _t3 = _mm_sub_ss(_t3,_z3);\
+ _mm_storeu_ps(ptrA,_t1);\
+ _mm_storeu_ps(ptrA+4,_t2);\
+ _mm_store_ss(ptrA+8,_t3);\
+ _t4 = _mm_loadu_ps(ptrB);\
+ _t5 = _mm_loadu_ps(ptrB+4);\
+ _t6 = _mm_load_ss(ptrB+8);\
+ _t4 = _mm_sub_ps(_t4,_t21);\
+ _t5 = _mm_sub_ps(_t5,_t24);\
+ _t6 = _mm_sub_ss(_t6,_t17);\
+ _mm_storeu_ps(ptrB,_t4);\
+ _mm_storeu_ps(ptrB+4,_t5);\
+ _mm_store_ss(ptrB+8,_t6);\
+ _t7 = _mm_loadu_ps(ptrC);\
+ _t8 = _mm_loadu_ps(ptrC+4);\
+ _t9 = _mm_load_ss(ptrC+8);\
+ _t7 = _mm_sub_ps(_t7,_t22);\
+ _t8 = _mm_sub_ps(_t8,_t25);\
+ _t9 = _mm_sub_ss(_t9,_t18);\
+ _mm_storeu_ps(ptrC,_t7);\
+ _mm_storeu_ps(ptrC+4,_t8);\
+ _mm_store_ss(ptrC+8,_t9);\
+ _t10 = _mm_loadu_ps(ptrD);\
+ _t11 = _mm_loadu_ps(ptrD+4);\
+ _t12 = _mm_load_ss(ptrD+8);\
+ _t10 = _mm_sub_ps(_t10,_t14);\
+ _t11 = _mm_sub_ps(_t11,_t16);\
+ _t12 = _mm_sub_ss(_t12,_t19);\
+ _mm_storeu_ps(ptrD,_t10);\
+ _mm_storeu_ps(ptrD+4,_t11);\
+ _mm_store_ss(ptrD+8,_t12);\
+}
+#else
+/* Real function for sane compilers */
static gmx_inline void
gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
float * gmx_restrict ptrC, float * gmx_restrict ptrD,
_mm_storeu_ps(ptrD+4,t11);
_mm_store_ss(ptrD+8,t12);
}
-
-
+#endif
+
+#if defined (_MSC_VER) && defined(_M_IX86)
+/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
+#define gmx_mm_decrement_4rvec_4ptr_swizzle_ps(ptrA,ptrB,ptrC,ptrD, \
+ _x1,_y1,_z1,_x2,_y2,_z2,_x3,_y3,_z3,_x4,_y4,_z4) \
+{\
+ __m128 _t1,_t2,_t3,_t4,_t5,_t6,_t7,_t8,_t9,_t10,_t11;\
+ __m128 _t12,_t13,_t14,_t15,_t16,_t17,_t18,_t19,_t20,_t21,_t22;\
+ __m128 _t23,_t24;\
+ _t13 = _mm_unpackhi_ps(_x1,_y1);\
+ _x1 = _mm_unpacklo_ps(_x1,_y1);\
+ _t14 = _mm_unpackhi_ps(_z1,_x2);\
+ _z1 = _mm_unpacklo_ps(_z1,_x2);\
+ _t15 = _mm_unpackhi_ps(_y2,_z2);\
+ _y2 = _mm_unpacklo_ps(_y2,_z2);\
+ _t16 = _mm_unpackhi_ps(_x3,_y3);\
+ _x3 = _mm_unpacklo_ps(_x3,_y3);\
+ _t17 = _mm_unpackhi_ps(_z3,_x4);\
+ _z3 = _mm_unpacklo_ps(_z3,_x4);\
+ _t18 = _mm_unpackhi_ps(_y4,_z4);\
+ _y4 = _mm_unpacklo_ps(_y4,_z4);\
+ _t19 = _mm_movelh_ps(_x1,_z1);\
+ _z1 = _mm_movehl_ps(_z1,_x1);\
+ _t20 = _mm_movelh_ps(_t13,_t14);\
+ _t14 = _mm_movehl_ps(_t14,_t13);\
+ _t21 = _mm_movelh_ps(_y2,_x3);\
+ _x3 = _mm_movehl_ps(_x3,_y2);\
+ _t22 = _mm_movelh_ps(_t15,_t16);\
+ _t16 = _mm_movehl_ps(_t16,_t15);\
+ _t23 = _mm_movelh_ps(_z3,_y4);\
+ _y4 = _mm_movehl_ps(_y4,_z3);\
+ _t24 = _mm_movelh_ps(_t17,_t18);\
+ _t18 = _mm_movehl_ps(_t18,_t17);\
+ _t1 = _mm_loadu_ps(ptrA);\
+ _t2 = _mm_loadu_ps(ptrA+4);\
+ _t3 = _mm_loadu_ps(ptrA+8);\
+ _t1 = _mm_sub_ps(_t1,_t19);\
+ _t2 = _mm_sub_ps(_t2,_t21);\
+ _t3 = _mm_sub_ps(_t3,_t23);\
+ _mm_storeu_ps(ptrA,_t1);\
+ _mm_storeu_ps(ptrA+4,_t2);\
+ _mm_storeu_ps(ptrA+8,_t3);\
+ _t4 = _mm_loadu_ps(ptrB);\
+ _t5 = _mm_loadu_ps(ptrB+4);\
+ _t6 = _mm_loadu_ps(ptrB+8);\
+ _t4 = _mm_sub_ps(_t4,_z1);\
+ _t5 = _mm_sub_ps(_t5,_x3);\
+ _t6 = _mm_sub_ps(_t6,_y4);\
+ _mm_storeu_ps(ptrB,_t4);\
+ _mm_storeu_ps(ptrB+4,_t5);\
+ _mm_storeu_ps(ptrB+8,_t6);\
+ _t7 = _mm_loadu_ps(ptrC);\
+ _t8 = _mm_loadu_ps(ptrC+4);\
+ _t9 = _mm_loadu_ps(ptrC+8);\
+ _t7 = _mm_sub_ps(_t7,_t20);\
+ _t8 = _mm_sub_ps(_t8,_t22);\
+ _t9 = _mm_sub_ps(_t9,_t24);\
+ _mm_storeu_ps(ptrC,_t7);\
+ _mm_storeu_ps(ptrC+4,_t8);\
+ _mm_storeu_ps(ptrC+8,_t9);\
+ _t10 = _mm_loadu_ps(ptrD);\
+ _t11 = _mm_loadu_ps(ptrD+4);\
+ _t12 = _mm_loadu_ps(ptrD+8);\
+ _t10 = _mm_sub_ps(_t10,_t14);\
+ _t11 = _mm_sub_ps(_t11,_t16);\
+ _t12 = _mm_sub_ps(_t12,_t18);\
+ _mm_storeu_ps(ptrD,_t10);\
+ _mm_storeu_ps(ptrD+4,_t11);\
+ _mm_storeu_ps(ptrD+8,_t12);\
+}
+#else
+/* Real function for sane compilers */
static gmx_inline void
gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
float * gmx_restrict ptrC, float * gmx_restrict ptrD,
_mm_storeu_ps(ptrD+4,t11);
_mm_storeu_ps(ptrD+8,t12);
}
-
+#endif
static gmx_inline void
gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
_mm_storeh_pi((__m64 *)(fshiftptr+1),t3);
}
+#if defined (_MSC_VER) && defined(_M_IX86)
+/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
+#define gmx_mm_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3, \
+ fptr,fshiftptr) \
+{\
+ __m128 _t1,_t2,_t3,_t4;\
+\
+ fix1 = _mm_hadd_ps(fix1,fiy1);\
+ fiz1 = _mm_hadd_ps(fiz1,fix2);\
+ fiy2 = _mm_hadd_ps(fiy2,fiz2);\
+ fix3 = _mm_hadd_ps(fix3,fiy3);\
+ fiz3 = _mm_hadd_ps(fiz3,fiz3);\
+ fix1 = _mm_hadd_ps(fix1,fiz1);\
+ fiy2 = _mm_hadd_ps(fiy2,fix3);\
+ fiz3 = _mm_hadd_ps(fiz3,fiz3);\
+ _mm_storeu_ps(fptr, _mm_add_ps(fix1,_mm_loadu_ps(fptr) ));\
+ _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));\
+ _mm_store_ss (fptr+8,_mm_add_ss(fiz3,_mm_load_ss(fptr+8) ));\
+ _t4 = _mm_load_ss(fshiftptr+2);\
+ _t4 = _mm_loadh_pi(_t4,(__m64 *)(fshiftptr));\
+ _t1 = _mm_shuffle_ps(fiz3,fix1,_MM_SHUFFLE(1,0,0,0));\
+ _t2 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(3,2,2,2));\
+ _t3 = _mm_shuffle_ps(fiy2,fix1,_MM_SHUFFLE(3,3,0,1));\
+ _t3 = _mm_permute_ps(_t3 ,_MM_SHUFFLE(1,2,0,0));\
+ _t1 = _mm_add_ps(_t1,_t2);\
+ _t3 = _mm_add_ps(_t3,_t4);\
+ _t1 = _mm_add_ps(_t1,_t3);\
+ _mm_store_ss(fshiftptr+2,_t1);\
+ _mm_storeh_pi((__m64 *)(fshiftptr),_t1);\
+}
+#else
+/* Real function for sane compilers */
static gmx_inline void
gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
__m128 fix2, __m128 fiy2, __m128 fiz2,
_mm_store_ss(fshiftptr+2,t1);
_mm_storeh_pi((__m64 *)(fshiftptr),t1);
}
-
-
+#endif
+
+#if defined (_MSC_VER) && defined(_M_IX86)
+/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
+#define gmx_mm_update_iforce_4atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,fix4,fiy4,fiz4, \
+ fptr,fshiftptr) \
+{\
+ __m128 _t1,_t2,_t3,_t4,_t5;\
+\
+ fix1 = _mm_hadd_ps(fix1,fiy1);\
+ fiz1 = _mm_hadd_ps(fiz1,fix2);\
+ fiy2 = _mm_hadd_ps(fiy2,fiz2);\
+ fix3 = _mm_hadd_ps(fix3,fiy3);\
+ fiz3 = _mm_hadd_ps(fiz3,fix4);\
+ fiy4 = _mm_hadd_ps(fiy4,fiz4);\
+ fix1 = _mm_hadd_ps(fix1,fiz1);\
+ fiy2 = _mm_hadd_ps(fiy2,fix3);\
+ fiz3 = _mm_hadd_ps(fiz3,fiy4);\
+ _mm_storeu_ps(fptr, _mm_add_ps(fix1,_mm_loadu_ps(fptr) ));\
+ _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));\
+ _mm_storeu_ps(fptr+8,_mm_add_ps(fiz3,_mm_loadu_ps(fptr+8)));\
+ _t5 = _mm_load_ss(fshiftptr+2);\
+ _t5 = _mm_loadh_pi(_t5,(__m64 *)(fshiftptr));\
+ _t1 = _mm_permute_ps(fix1,_MM_SHUFFLE(1,0,2,2));\
+ _t2 = _mm_permute_ps(fiy2,_MM_SHUFFLE(3,2,1,1));\
+ _t3 = _mm_permute_ps(fiz3,_MM_SHUFFLE(2,1,0,0));\
+ _t4 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(0,0,3,3));\
+ _t4 = _mm_shuffle_ps(fiz3,_t4 ,_MM_SHUFFLE(2,0,3,3));\
+ _t1 = _mm_add_ps(_t1,_t2);\
+ _t3 = _mm_add_ps(_t3,_t4);\
+ _t1 = _mm_add_ps(_t1,_t3);\
+ _t5 = _mm_add_ps(_t5,_t1);\
+ _mm_store_ss(fshiftptr+2,_t5);\
+ _mm_storeh_pi((__m64 *)(fshiftptr),_t5);\
+}
+#else
+/* Real function for sane compilers */
static gmx_inline void
gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
__m128 fix2, __m128 fiy2, __m128 fiz2,
_mm_store_ss(fshiftptr+2,t5);
_mm_storeh_pi((__m64 *)(fshiftptr),t5);
}
-
+#endif
static gmx_inline void
}
-static gmx_inline void
-gmx_mm_update_4pot_ps(__m128 pot1, float * gmx_restrict ptrA,
- __m128 pot2, float * gmx_restrict ptrB,
- __m128 pot3, float * gmx_restrict ptrC,
- __m128 pot4, float * gmx_restrict ptrD)
-{
- _MM_TRANSPOSE4_PS(pot1,pot2,pot3,pot4);
- pot1 = _mm_add_ps(_mm_add_ps(pot1,pot2),_mm_add_ps(pot3,pot4));
- pot2 = _mm_permute_ps(pot1,_MM_SHUFFLE(1,1,1,1));
- pot3 = _mm_permute_ps(pot1,_MM_SHUFFLE(2,2,2,2));
- pot4 = _mm_permute_ps(pot1,_MM_SHUFFLE(3,3,3,3));
- _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
- _mm_store_ss(ptrB,_mm_add_ss(pot2,_mm_load_ss(ptrB)));
- _mm_store_ss(ptrC,_mm_add_ss(pot3,_mm_load_ss(ptrC)));
- _mm_store_ss(ptrD,_mm_add_ss(pot4,_mm_load_ss(ptrD)));
-}
-
-
#endif /* _kernelutil_x86_avx_128_fma_single_h_ */