*/
-static void
-gmx_mm_load_1rvec_broadcast_ps(float *ptrA, __m128 *x, __m128 *y, __m128 *z)
+static gmx_inline void
+gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
+ const float * gmx_restrict xyz,
+ __m128 * gmx_restrict x1,
+ __m128 * gmx_restrict y1,
+ __m128 * gmx_restrict z1)
{
- __m128 t1;
-
- t1 = _mm_loadu_ps(ptrA);
-
- *x = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
- *y = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
- *z = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
+ __m128 t1,t2,t3,t4;
+
+ t1 = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
+ t2 = _mm_castpd_ps(_mm_load_sd((const double *)xyz));
+ t3 = _mm_load_ss(xyz_shift+2);
+ t4 = _mm_load_ss(xyz+2);
+ t1 = _mm_add_ps(t1,t2);
+ t3 = _mm_add_ss(t3,t4);
+
+ *x1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
+ *y1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
+ *z1 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
}
-static void
-gmx_mm_load_3rvec_broadcast_ps(float *ptrA,
- __m128 *x1, __m128 *y1, __m128 *z1,
- __m128 *x2, __m128 *y2, __m128 *z2,
- __m128 *x3, __m128 *y3, __m128 *z3)
-{
- __m128 t1,t2,t3;
-
- t1 = _mm_loadu_ps(ptrA);
- t2 = _mm_loadu_ps(ptrA+4);
- *x1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
- *y1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
- *z1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
- *x2 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
- *y2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
- *z2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
- *x3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
- *y3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
-
- t3 = _mm_load_ss(ptrA+8);
- *z3 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
+static gmx_inline void
+gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
+ const float * gmx_restrict xyz,
+ __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
+ __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
+ __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
+{
+ __m128 tA,tB;
+ __m128 t1,t2,t3,t4,t5,t6;
+
+ tA = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
+ tB = _mm_load_ss(xyz_shift+2);
+
+ t1 = _mm_loadu_ps(xyz);
+ t2 = _mm_loadu_ps(xyz+4);
+ t3 = _mm_load_ss(xyz+8);
+
+ tA = _mm_movelh_ps(tA,tB);
+ t4 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
+ t5 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
+ t6 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
+
+ t1 = _mm_add_ps(t1,t4);
+ t2 = _mm_add_ps(t2,t5);
+ t3 = _mm_add_ss(t3,t6);
+
+ *x1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
+ *y1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
+ *z1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
+ *x2 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
+ *y2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
+ *z2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
+ *x3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
+ *y3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
+ *z3 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
}
-static void
-gmx_mm_load_4rvec_broadcast_ps(float *ptrA,
- __m128 *x1, __m128 *y1, __m128 *z1,
- __m128 *x2, __m128 *y2, __m128 *z2,
- __m128 *x3, __m128 *y3, __m128 *z3,
- __m128 *x4, __m128 *y4, __m128 *z4)
-{
- __m128 t1,t2,t3;
- __m128 tA;
-
- t1 = _mm_loadu_ps(ptrA);
- t2 = _mm_loadu_ps(ptrA+4);
- t3 = _mm_loadu_ps(ptrA+8);
-
- *x1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
- *y1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
- *z1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
- *x2 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
- *y2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
- *z2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
- *x3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
- *y3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
- *z3 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
- *x4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
- *y4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(2,2,2,2));
- *z4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(3,3,3,3));
+
+static gmx_inline void
+gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
+ const float * gmx_restrict xyz,
+ __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
+ __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
+ __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
+ __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
+{
+ __m128 tA,tB;
+ __m128 t1,t2,t3,t4,t5,t6;
+
+ tA = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
+ tB = _mm_load_ss(xyz_shift+2);
+
+ t1 = _mm_loadu_ps(xyz);
+ t2 = _mm_loadu_ps(xyz+4);
+ t3 = _mm_loadu_ps(xyz+8);
+
+ tA = _mm_movelh_ps(tA,tB);
+ t4 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
+ t5 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
+ t6 = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
+
+ t1 = _mm_add_ps(t1,t4);
+ t2 = _mm_add_ps(t2,t5);
+ t3 = _mm_add_ps(t3,t6);
+
+ *x1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
+ *y1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
+ *z1 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
+ *x2 = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
+ *y2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
+ *z2 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
+ *x3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
+ *y3 = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
+ *z3 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
+ *x4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
+ *y4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(2,2,2,2));
+ *z4 = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(3,3,3,3));
}
static void
-gmx_mm_load_2rvec_4ptr_swizzle_ps(float *ptrA, float *ptrB, float *ptrC, float *ptrD,
- __m128 *x1, __m128 *y1, __m128 *z1,
- __m128 *x2, __m128 *y2, __m128 *z2)
-{
- __m128 t1,t2,t3,t4;
- t1 = _mm_loadu_ps(ptrA);
- t2 = _mm_loadu_ps(ptrB);
- t3 = _mm_loadu_ps(ptrC);
- t4 = _mm_loadu_ps(ptrD);
- _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
- *x1 = t1;
- *y1 = t2;
- *z1 = t3;
- *x2 = t4;
- t1 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)(ptrA+4));
- t2 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)(ptrB+4));
- t3 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)(ptrC+4));
- t4 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)(ptrD+4));
- t1 = _mm_unpacklo_ps(t1,t3);
- t2 = _mm_unpacklo_ps(t2,t4);
- *y2 = _mm_unpacklo_ps(t1,t2);
- *z2 = _mm_unpackhi_ps(t1,t2);
-}
-
-
-static void
-gmx_mm_load_3rvec_4ptr_swizzle_ps(float *ptrA, float *ptrB, float *ptrC, float *ptrD,
- __m128 *x1, __m128 *y1, __m128 *z1,
- __m128 *x2, __m128 *y2, __m128 *z2,
- __m128 *x3, __m128 *y3, __m128 *z3)
+gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
+ const float * gmx_restrict ptrB,
+ const float * gmx_restrict ptrC,
+ const float * gmx_restrict ptrD,
+ __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
+ __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
+ __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
{
__m128 t1,t2,t3,t4;
t1 = _mm_loadu_ps(ptrA);
static void
-gmx_mm_load_4rvec_4ptr_swizzle_ps(float *ptrA, float *ptrB, float *ptrC, float *ptrD,
- __m128 *x1, __m128 *y1, __m128 *z1,
- __m128 *x2, __m128 *y2, __m128 *z2,
- __m128 *x3, __m128 *y3, __m128 *z3,
- __m128 *x4, __m128 *y4, __m128 *z4)
+gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
+ const float * gmx_restrict ptrB,
+ const float * gmx_restrict ptrC,
+ const float * gmx_restrict ptrD,
+ __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
+ __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
+ __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
+ __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
{
__m128 t1,t2,t3,t4;
t1 = _mm_loadu_ps(ptrA);
}
-/* Routines to increment rvec in memory, typically use for j particle force updates */
-static void
-gmx_mm_increment_1rvec_1ptr_noswizzle_ps(float *ptrA, __m128 xyz)
-{
- __m128 mask = gmx_mm_castsi128_ps( _mm_set_epi32(0,-1,-1,-1) );
- __m128 t1;
-
- t1 = _mm_loadu_ps(ptrA);
- xyz = _mm_and_ps(mask,xyz);
- t1 = _mm_add_ps(t1,xyz);
- _mm_storeu_ps(ptrA,t1);
-}
-
-
-static void
-gmx_mm_increment_3rvec_1ptr_noswizzle_ps(float *ptrA,
- __m128 xyz1, __m128 xyz2, __m128 xyz3)
-{
- __m128 t1,t2,t3,t4;
- __m128 tA,tB,tC;
-
- tA = _mm_loadu_ps(ptrA);
- tB = _mm_loadu_ps(ptrA+4);
- tC = _mm_load_ss(ptrA+8);
-
- t1 = _mm_shuffle_ps(xyz2,xyz2,_MM_SHUFFLE(0,0,2,1)); /* x2 - z2 y2 */
- t2 = _mm_shuffle_ps(xyz3,xyz3,_MM_SHUFFLE(1,0,0,2)); /* y3 x3 - z3 */
-
- t3 = _mm_shuffle_ps(t1,xyz1,_MM_SHUFFLE(2,2,3,3)); /* z1 z1 x2 x2 */
- t3 = _mm_shuffle_ps(xyz1,t3,_MM_SHUFFLE(0,2,1,0)); /* x2 z1 y1 x1 */
-
- t4 = _mm_shuffle_ps(t1,t2,_MM_SHUFFLE(3,2,1,0)); /* y3 x3 z2 y2 */
-
- tA = _mm_add_ps(tA,t3);
- tB = _mm_add_ps(tB,t4);
- tC = _mm_add_ss(tC,t2);
-
- _mm_storeu_ps(ptrA,tA);
- _mm_storeu_ps(ptrA+4,tB);
- _mm_store_ss(ptrA+8,tC);
-}
-
-static void
-gmx_mm_increment_4rvec_1ptr_noswizzle_ps(float *ptrA,
- __m128 xyz1, __m128 xyz2, __m128 xyz3, __m128 xyz4)
-{
- __m128 t1,t2,t3,t4,t5;
- __m128 tA,tB,tC;
-
- tA = _mm_loadu_ps(ptrA);
- tB = _mm_loadu_ps(ptrA+4);
- tC = _mm_loadu_ps(ptrA+8);
-
- t1 = _mm_shuffle_ps(xyz2,xyz2,_MM_SHUFFLE(0,0,2,1)); /* x2 - z2 y2 */
- t2 = _mm_shuffle_ps(xyz3,xyz3,_MM_SHUFFLE(1,0,0,2)); /* y3 x3 - z3 */
-
- t3 = _mm_shuffle_ps(t1,xyz1,_MM_SHUFFLE(2,2,3,3)); /* z1 z1 x2 x2 */
- t3 = _mm_shuffle_ps(xyz1,t3,_MM_SHUFFLE(0,2,1,0)); /* x2 z1 y1 x1 */
-
- t4 = _mm_shuffle_ps(t1,t2,_MM_SHUFFLE(3,2,1,0)); /* y3 x3 z2 y2 */
- t5 = _mm_shuffle_ps(xyz4,xyz4,_MM_SHUFFLE(2,1,0,0)); /* z4 y4 x4 - */
-
- t2 = _mm_shuffle_ps(t2,t5,_MM_SHUFFLE(1,1,0,0)); /* x4 x4 z3 z3 */
- t5 = _mm_shuffle_ps(t2,t5,_MM_SHUFFLE(3,2,2,0)); /* z4 y4 x4 z3 */
-
- tA = _mm_add_ps(tA,t3);
- tB = _mm_add_ps(tB,t4);
- tC = _mm_add_ps(tC,t5);
-
- _mm_storeu_ps(ptrA,tA);
- _mm_storeu_ps(ptrA+4,tB);
- _mm_storeu_ps(ptrA+8,tC);
-
-}
-
-
-
-static void
-gmx_mm_increment_1rvec_4ptr_swizzle_ps(float *ptrA, float *ptrB, float *ptrC,float *ptrD,
- __m128 x1, __m128 y1, __m128 z1)
-{
- __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
- t5 = _mm_unpacklo_ps(y1,z1);
- t6 = _mm_unpackhi_ps(y1,z1);
- t7 = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(1,0,0,0));
- t8 = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(3,2,0,1));
- t9 = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(1,0,0,2));
- t10 = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(3,2,0,3));
- t1 = _mm_load_ss(ptrA);
- t1 = _mm_loadh_pi(t1,(__m64 *)(ptrA+1));
- t1 = _mm_add_ps(t1,t7);
- _mm_store_ss(ptrA,t1);
- _mm_storeh_pi((__m64 *)(ptrA+1),t1);
- t2 = _mm_load_ss(ptrB);
- t2 = _mm_loadh_pi(t2,(__m64 *)(ptrB+1));
- t2 = _mm_add_ps(t2,t8);
- _mm_store_ss(ptrB,t2);
- _mm_storeh_pi((__m64 *)(ptrB+1),t2);
- t3 = _mm_load_ss(ptrC);
- t3 = _mm_loadh_pi(t3,(__m64 *)(ptrC+1));
- t3 = _mm_add_ps(t3,t9);
- _mm_store_ss(ptrC,t3);
- _mm_storeh_pi((__m64 *)(ptrC+1),t3);
- t4 = _mm_load_ss(ptrD);
- t4 = _mm_loadh_pi(t4,(__m64 *)(ptrD+1));
- t4 = _mm_add_ps(t4,t10);
- _mm_store_ss(ptrD,t4);
- _mm_storeh_pi((__m64 *)(ptrD+1),t4);
-}
-
-
-
-
-static void
-gmx_mm_increment_3rvec_4ptr_swizzle_ps(float *ptrA, float *ptrB, float *ptrC, float *ptrD,
- __m128 x1, __m128 y1, __m128 z1,
- __m128 x2, __m128 y2, __m128 z2,
- __m128 x3, __m128 y3, __m128 z3)
-{
- __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
- __m128 t11,t12,t13,t14,t15,t16,t17,t18,t19;
- __m128 t20,t21,t22,t23,t24,t25;
-
- t13 = _mm_unpackhi_ps(x1,y1);
- x1 = _mm_unpacklo_ps(x1,y1);
- t14 = _mm_unpackhi_ps(z1,x2);
- z1 = _mm_unpacklo_ps(z1,x2);
- t15 = _mm_unpackhi_ps(y2,z2);
- y2 = _mm_unpacklo_ps(y2,z2);
- t16 = _mm_unpackhi_ps(x3,y3);
- x3 = _mm_unpacklo_ps(x3,y3);
- t17 = _mm_shuffle_ps(z3,z3,_MM_SHUFFLE(0,0,0,1));
- t18 = _mm_movehl_ps(z3,z3);
- t19 = _mm_shuffle_ps(t18,t18,_MM_SHUFFLE(0,0,0,1));
- t20 = _mm_movelh_ps(x1,z1);
- t21 = _mm_movehl_ps(z1,x1);
- t22 = _mm_movelh_ps(t13,t14);
- t14 = _mm_movehl_ps(t14,t13);
- t23 = _mm_movelh_ps(y2,x3);
- t24 = _mm_movehl_ps(x3,y2);
- t25 = _mm_movelh_ps(t15,t16);
- t16 = _mm_movehl_ps(t16,t15);
- t1 = _mm_loadu_ps(ptrA);
- t2 = _mm_loadu_ps(ptrA+4);
- t3 = _mm_load_ss(ptrA+8);
- t1 = _mm_add_ps(t1,t20);
- t2 = _mm_add_ps(t2,t23);
- t3 = _mm_add_ss(t3,z3);
- _mm_storeu_ps(ptrA,t1);
- _mm_storeu_ps(ptrA+4,t2);
- _mm_store_ss(ptrA+8,t3);
- t4 = _mm_loadu_ps(ptrB);
- t5 = _mm_loadu_ps(ptrB+4);
- t6 = _mm_load_ss(ptrB+8);
- t4 = _mm_add_ps(t4,t21);
- t5 = _mm_add_ps(t5,t24);
- t6 = _mm_add_ss(t6,t17);
- _mm_storeu_ps(ptrB,t4);
- _mm_storeu_ps(ptrB+4,t5);
- _mm_store_ss(ptrB+8,t6);
- t7 = _mm_loadu_ps(ptrC);
- t8 = _mm_loadu_ps(ptrC+4);
- t9 = _mm_load_ss(ptrC+8);
- t7 = _mm_add_ps(t7,t22);
- t8 = _mm_add_ps(t8,t25);
- t9 = _mm_add_ss(t9,t18);
- _mm_storeu_ps(ptrC,t7);
- _mm_storeu_ps(ptrC+4,t8);
- _mm_store_ss(ptrC+8,t9);
- t10 = _mm_loadu_ps(ptrD);
- t11 = _mm_loadu_ps(ptrD+4);
- t12 = _mm_load_ss(ptrD+8);
- t10 = _mm_add_ps(t10,t14);
- t11 = _mm_add_ps(t11,t16);
- t12 = _mm_add_ss(t12,t19);
- _mm_storeu_ps(ptrD,t10);
- _mm_storeu_ps(ptrD+4,t11);
- _mm_store_ss(ptrD+8,t12);
-}
-
-
-static void
-gmx_mm_increment_4rvec_4ptr_swizzle_ps(float *ptrA, float *ptrB, float *ptrC, float *ptrD,
- __m128 x1, __m128 y1, __m128 z1,
- __m128 x2, __m128 y2, __m128 z2,
- __m128 x3, __m128 y3, __m128 z3,
- __m128 x4, __m128 y4, __m128 z4)
-{
- __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11;
- __m128 t12,t13,t14,t15,t16,t17,t18,t19,t20,t21,t22;
- __m128 t23,t24;
- t13 = _mm_unpackhi_ps(x1,y1);
- x1 = _mm_unpacklo_ps(x1,y1);
- t14 = _mm_unpackhi_ps(z1,x2);
- z1 = _mm_unpacklo_ps(z1,x2);
- t15 = _mm_unpackhi_ps(y2,z2);
- y2 = _mm_unpacklo_ps(y2,z2);
- t16 = _mm_unpackhi_ps(x3,y3);
- x3 = _mm_unpacklo_ps(x3,y3);
- t17 = _mm_unpackhi_ps(z3,x4);
- z3 = _mm_unpacklo_ps(z3,x4);
- t18 = _mm_unpackhi_ps(y4,z4);
- y4 = _mm_unpacklo_ps(y4,z4);
- t19 = _mm_movelh_ps(x1,z1);
- z1 = _mm_movehl_ps(z1,x1);
- t20 = _mm_movelh_ps(t13,t14);
- t14 = _mm_movehl_ps(t14,t13);
- t21 = _mm_movelh_ps(y2,x3);
- x3 = _mm_movehl_ps(x3,y2);
- t22 = _mm_movelh_ps(t15,t16);
- t16 = _mm_movehl_ps(t16,t15);
- t23 = _mm_movelh_ps(z3,y4);
- y4 = _mm_movehl_ps(y4,z3);
- t24 = _mm_movelh_ps(t17,t18);
- t18 = _mm_movehl_ps(t18,t17);
- t1 = _mm_loadu_ps(ptrA);
- t2 = _mm_loadu_ps(ptrA+4);
- t3 = _mm_loadu_ps(ptrA+8);
- t1 = _mm_add_ps(t1,t19);
- t2 = _mm_add_ps(t2,t21);
- t3 = _mm_add_ps(t3,t23);
- _mm_storeu_ps(ptrA,t1);
- _mm_storeu_ps(ptrA+4,t2);
- _mm_storeu_ps(ptrA+8,t3);
- t4 = _mm_loadu_ps(ptrB);
- t5 = _mm_loadu_ps(ptrB+4);
- t6 = _mm_loadu_ps(ptrB+8);
- t4 = _mm_add_ps(t4,z1);
- t5 = _mm_add_ps(t5,x3);
- t6 = _mm_add_ps(t6,y4);
- _mm_storeu_ps(ptrB,t4);
- _mm_storeu_ps(ptrB+4,t5);
- _mm_storeu_ps(ptrB+8,t6);
- t7 = _mm_loadu_ps(ptrC);
- t8 = _mm_loadu_ps(ptrC+4);
- t9 = _mm_loadu_ps(ptrC+8);
- t7 = _mm_add_ps(t7,t20);
- t8 = _mm_add_ps(t8,t22);
- t9 = _mm_add_ps(t9,t24);
- _mm_storeu_ps(ptrC,t7);
- _mm_storeu_ps(ptrC+4,t8);
- _mm_storeu_ps(ptrC+8,t9);
- t10 = _mm_loadu_ps(ptrD);
- t11 = _mm_loadu_ps(ptrD+4);
- t12 = _mm_loadu_ps(ptrD+8);
- t10 = _mm_add_ps(t10,t14);
- t11 = _mm_add_ps(t11,t16);
- t12 = _mm_add_ps(t12,t18);
- _mm_storeu_ps(ptrD,t10);
- _mm_storeu_ps(ptrD+4,t11);
- _mm_storeu_ps(ptrD+8,t12);
-}
-
-
-/* Routines to decrement rvec in memory */
-static void
-gmx_mm_decrement_1rvec_1ptr_noswizzle_ps(float *ptrA, __m128 xyz)
-{
- __m128 mask = gmx_mm_castsi128_ps( _mm_set_epi32(0,-1,-1,-1) );
- __m128 t1;
-
- t1 = _mm_loadu_ps(ptrA);
- xyz = _mm_and_ps(mask,xyz);
- t1 = _mm_sub_ps(t1,xyz);
- _mm_storeu_ps(ptrA,t1);
-}
-
-
-static void
-gmx_mm_decrement_3rvec_1ptr_noswizzle_ps(float *ptrA,
- __m128 xyz1, __m128 xyz2, __m128 xyz3)
-{
- __m128 t1,t2,t3,t4;
- __m128 tA,tB,tC;
-
- tA = _mm_loadu_ps(ptrA);
- tB = _mm_loadu_ps(ptrA+4);
- tC = _mm_load_ss(ptrA+8);
-
- t1 = _mm_shuffle_ps(xyz2,xyz2,_MM_SHUFFLE(0,0,2,1)); /* x2 - z2 y2 */
- t2 = _mm_shuffle_ps(xyz3,xyz3,_MM_SHUFFLE(1,0,0,2)); /* y3 x3 - z3 */
-
- t3 = _mm_shuffle_ps(t1,xyz1,_MM_SHUFFLE(2,2,3,3)); /* z1 z1 x2 x2 */
- t3 = _mm_shuffle_ps(xyz1,t3,_MM_SHUFFLE(0,2,1,0)); /* x2 z1 y1 x1 */
-
- t4 = _mm_shuffle_ps(t1,t2,_MM_SHUFFLE(3,2,1,0)); /* y3 x3 z2 y2 */
-
- tA = _mm_sub_ps(tA,t3);
- tB = _mm_sub_ps(tB,t4);
- tC = _mm_sub_ss(tC,t2);
-
- _mm_storeu_ps(ptrA,tA);
- _mm_storeu_ps(ptrA+4,tB);
- _mm_store_ss(ptrA+8,tC);
-}
-
-static void
-gmx_mm_decrement_4rvec_1ptr_noswizzle_ps(float *ptrA,
- __m128 xyz1, __m128 xyz2, __m128 xyz3, __m128 xyz4)
-{
- __m128 t1,t2,t3,t4,t5;
- __m128 tA,tB,tC;
-
- tA = _mm_loadu_ps(ptrA);
- tB = _mm_loadu_ps(ptrA+4);
- tC = _mm_loadu_ps(ptrA+8);
-
- t1 = _mm_shuffle_ps(xyz2,xyz2,_MM_SHUFFLE(0,0,2,1)); /* x2 - z2 y2 */
- t2 = _mm_shuffle_ps(xyz3,xyz3,_MM_SHUFFLE(1,0,0,2)); /* y3 x3 - z3 */
-
- t3 = _mm_shuffle_ps(t1,xyz1,_MM_SHUFFLE(2,2,3,3)); /* z1 z1 x2 x2 */
- t3 = _mm_shuffle_ps(xyz1,t3,_MM_SHUFFLE(0,2,1,0)); /* x2 z1 y1 x1 */
-
- t4 = _mm_shuffle_ps(t1,t2,_MM_SHUFFLE(3,2,1,0)); /* y3 x3 z2 y2 */
-
- t5 = _mm_shuffle_ps(xyz4,xyz4,_MM_SHUFFLE(2,1,0,0)); /* z4 y4 x4 - */
- t2 = _mm_shuffle_ps(t2,t5,_MM_SHUFFLE(1,1,0,0)); /* x4 x4 z3 z3 */
- t5 = _mm_shuffle_ps(t2,t5,_MM_SHUFFLE(3,2,2,0)); /* z4 y4 x4 z3 */
-
- tA = _mm_sub_ps(tA,t3);
- tB = _mm_sub_ps(tB,t4);
- tC = _mm_sub_ps(tC,t5);
-
- _mm_storeu_ps(ptrA,tA);
- _mm_storeu_ps(ptrA+4,tB);
- _mm_storeu_ps(ptrA+8,tC);
-
-}
-
-
static void
gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA,
float * gmx_restrict ptrB,
static void
-gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float *ptrA, float *ptrB, float *ptrC, float *ptrD,
+gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
+ float * gmx_restrict ptrC, float * gmx_restrict ptrD,
__m128 x1, __m128 y1, __m128 z1,
__m128 x2, __m128 y2, __m128 z2,
__m128 x3, __m128 y3, __m128 z3)
static void
-gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float *ptrA, float *ptrB, float *ptrC, float *ptrD,
+gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
+ float * gmx_restrict ptrC, float * gmx_restrict ptrD,
__m128 x1, __m128 y1, __m128 z1,
__m128 x2, __m128 y2, __m128 z2,
__m128 x3, __m128 y3, __m128 z3,
static gmx_inline void
gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
- float *fptr,
- float *fshiftptr)
+ float * gmx_restrict fptr,
+ float * gmx_restrict fshiftptr)
{
__m128 t1,t2,t3;
static gmx_inline void
gmx_mm_update_iforce_2atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
__m128 fix2, __m128 fiy2, __m128 fiz2,
- float *fptr,
- float *fshiftptr)
+ float * gmx_restrict fptr,
+ float * gmx_restrict fshiftptr)
{
__m128 t1,t2,t4;
gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
__m128 fix2, __m128 fiy2, __m128 fiz2,
__m128 fix3, __m128 fiy3, __m128 fiz3,
- float *fptr,
- float *fshiftptr)
+ float * gmx_restrict fptr,
+ float * gmx_restrict fshiftptr)
{
__m128 t1,t2,t3,t4;
__m128 fix2, __m128 fiy2, __m128 fiz2,
__m128 fix3, __m128 fiy3, __m128 fiz3,
__m128 fix4, __m128 fiy4, __m128 fiz4,
- float *fptr,
- float *fshiftptr)
+ float * gmx_restrict fptr,
+ float * gmx_restrict fshiftptr)
{
__m128 t1,t2,t3,t4,t5;
static void
-gmx_mm_update_1pot_ps(__m128 pot1, float *ptrA)
+gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
{
pot1 = _mm_add_ps(pot1,_mm_movehl_ps(_mm_setzero_ps(),pot1));
pot1 = _mm_add_ps(pot1,_mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(0,0,0,1)));
}
static void
-gmx_mm_update_2pot_ps(__m128 pot1, float *ptrA,
- __m128 pot2, float *ptrB)
+gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
+ __m128 pot2, float * gmx_restrict ptrB)
{
__m128 t1,t2;
t1 = _mm_movehl_ps(pot2,pot1);
static void
-gmx_mm_update_4pot_ps(__m128 pot1, float *ptrA,
- __m128 pot2, float *ptrB,
- __m128 pot3, float *ptrC,
- __m128 pot4, float *ptrD)
+gmx_mm_update_4pot_ps(__m128 pot1, float * gmx_restrict ptrA,
+ __m128 pot2, float * gmx_restrict ptrB,
+ __m128 pot3, float * gmx_restrict ptrC,
+ __m128 pot4, float * gmx_restrict ptrD)
{
_MM_TRANSPOSE4_PS(pot1,pot2,pot3,pot4);
pot1 = _mm_add_ps(_mm_add_ps(pot1,pot2),_mm_add_ps(pot3,pot4));