fixed nbnxn AVX-256 Ewald table pointer alignment
authorBerk Hess <hess@kth.se>
Thu, 3 Jan 2013 12:24:03 +0000 (13:24 +0100)
committerGerrit Code Review <gerrit@gerrit.gromacs.org>
Mon, 7 Jan 2013 20:43:13 +0000 (21:43 +0100)
This could lead to access violations, but up till now
this was only observed with 32-bit MSVC builds.
Fixes #1097

Also added more documentation to nbnxn_kernel_simd_utils.h,
renamed macro variables and added minor table optimization
by replacing shuffle with movelh or unpack.

Change-Id: I9df2131cbabb0e6cb86f7224a5fd238acb2e7681

src/mdlib/nbnxn_kernels/nbnxn_kernel_simd_2xnn_outer.h
src/mdlib/nbnxn_kernels/nbnxn_kernel_simd_4xn_outer.h
src/mdlib/nbnxn_kernels/nbnxn_kernel_simd_utils.h

index 78242d73cec848e792ceb3b70ec804a98caa7351..f656e4d6dd05eba0c8023d140866291090052cc0 100644 (file)
@@ -211,8 +211,8 @@ NBK_FUNC_NAME(nbnxn_kernel_simd_2xnn,energrp)
     const real *tab_coul_V;
 #endif
 #ifdef GMX_MM256_HERE
-    int        ti0_array[2*UNROLLJ-1],*ti0;
-    int        ti2_array[2*UNROLLJ-1],*ti2;
+    int        ti0_array[2*GMX_SIMD_WIDTH_HERE-1],*ti0;
+    int        ti2_array[2*GMX_SIMD_WIDTH_HERE-1],*ti2;
 #endif
 #ifdef CALC_ENERGIES
     gmx_mm_pr  mhalfsp_SSE;
@@ -292,9 +292,9 @@ NBK_FUNC_NAME(nbnxn_kernel_simd_2xnn,energrp)
 
 #ifdef CALC_COUL_TAB
 #ifdef GMX_MM256_HERE
-    /* Generate aligned table pointers */
-    ti0 = (int *)(((size_t)(ti0_array+UNROLLJ-1)) & (~((size_t)(UNROLLJ*sizeof(real)-1))));
-    ti2 = (int *)(((size_t)(ti2_array+UNROLLJ-1)) & (~((size_t)(UNROLLJ*sizeof(real)-1))));
+    /* Generate aligned table index pointers */
+    ti0 = (int *)(((size_t)(ti0_array+GMX_SIMD_WIDTH_HERE-1)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(int)-1))));
+    ti2 = (int *)(((size_t)(ti2_array+GMX_SIMD_WIDTH_HERE-1)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(int)-1))));
 #endif
 
     invtsp_SSE  = gmx_set1_pr(ic->tabq_scale);
index 1545d40380c8d48fcc21b16b96477343ef881dfe..ee6e0051f1b7e42bc02118e58ecce16ac444a902 100644 (file)
@@ -291,10 +291,10 @@ NBK_FUNC_NAME(nbnxn_kernel_simd_4xn,energrp)
     const real *tab_coul_V;
 #endif
 #ifdef GMX_MM256_HERE
-    int        ti0_array[2*UNROLLJ-1],*ti0;
-    int        ti1_array[2*UNROLLJ-1],*ti1;
-    int        ti2_array[2*UNROLLJ-1],*ti2;
-    int        ti3_array[2*UNROLLJ-1],*ti3;
+    int        ti0_array[2*GMX_SIMD_WIDTH_HERE-1],*ti0;
+    int        ti1_array[2*GMX_SIMD_WIDTH_HERE-1],*ti1;
+    int        ti2_array[2*GMX_SIMD_WIDTH_HERE-1],*ti2;
+    int        ti3_array[2*GMX_SIMD_WIDTH_HERE-1],*ti3;
 #endif
 #ifdef CALC_ENERGIES
     gmx_mm_pr  mhalfsp_SSE;
@@ -378,11 +378,11 @@ NBK_FUNC_NAME(nbnxn_kernel_simd_4xn,energrp)
 
 #ifdef CALC_COUL_TAB
 #ifdef GMX_MM256_HERE
-    /* Generate aligned table pointers */
-    ti0 = (int *)(((size_t)(ti0_array+UNROLLJ-1)) & (~((size_t)(UNROLLJ*sizeof(real)-1))));
-    ti1 = (int *)(((size_t)(ti1_array+UNROLLJ-1)) & (~((size_t)(UNROLLJ*sizeof(real)-1))));
-    ti2 = (int *)(((size_t)(ti2_array+UNROLLJ-1)) & (~((size_t)(UNROLLJ*sizeof(real)-1))));
-    ti3 = (int *)(((size_t)(ti3_array+UNROLLJ-1)) & (~((size_t)(UNROLLJ*sizeof(real)-1))));
+    /* Generate aligned table index pointers */
+    ti0 = (int *)(((size_t)(ti0_array+GMX_SIMD_WIDTH_HERE-1)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(int)-1))));
+    ti1 = (int *)(((size_t)(ti1_array+GMX_SIMD_WIDTH_HERE-1)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(int)-1))));
+    ti2 = (int *)(((size_t)(ti2_array+GMX_SIMD_WIDTH_HERE-1)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(int)-1))));
+    ti3 = (int *)(((size_t)(ti3_array+GMX_SIMD_WIDTH_HERE-1)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(int)-1))));
 #endif
 
     invtsp_SSE  = gmx_set1_pr(ic->tabq_scale);
index 45ab2aedcc9205f3d7d86d41cb3fbef186d09ef2..63a2e6a95eca23d52fcbb66d32b701cdb67e6717 100644 (file)
  *   energy group pair energy storage
  */
 
+/* Transpose 2 double precision registers */
 #define GMX_MM_TRANSPOSE2_OP_PD(in0,in1,out0,out1)                      \
 {                                                                       \
-    out0 = _mm_shuffle_pd(in0,in1,_MM_SHUFFLE2(0,0));                   \
-    out1 = _mm_shuffle_pd(in0,in1,_MM_SHUFFLE2(1,1));                   \
+    out0 = _mm_unpacklo_pd(in0,in1);                                    \
+    out1 = _mm_unpackhi_pd(in0,in1);                                    \
 }
 
 #if defined GMX_MM128_HERE || !defined GMX_DOUBLE
+/* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
 #define GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(in0,in1,in2,in3,out0,out1)    \
 {                                                                       \
     __m128 _c01,_c23;                                                   \
-    _c01 = _mm_shuffle_ps(in0,in1,_MM_SHUFFLE(1,0,1,0));                \
-    _c23 = _mm_shuffle_ps(in2,in3,_MM_SHUFFLE(1,0,1,0));                \
+    _c01 = _mm_movelh_ps(in0,in1);                                      \
+    _c23 = _mm_movelh_ps(in2,in3);                                      \
     out0 = _mm_shuffle_ps(_c01,_c23,_MM_SHUFFLE(2,0,2,0));              \
     out1 = _mm_shuffle_ps(_c01,_c23,_MM_SHUFFLE(3,1,3,1));              \
 }
 #else
+/* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
 #define GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(in0,in1,in2,in3,out0,out1)    \
 {                                                                       \
     __m256d _c01,_c23;                                                  \
@@ -72,6 +75,7 @@
 }
 #endif
 
+/* Collect element 2 of the 4 inputs to out */
 #define GMX_MM_SHUFFLE_4_PS_FIL2_TO_1_PS(in0,in1,in2,in3,out)           \
 {                                                                       \
     __m128 _c01,_c23;                                                   \
 
 #ifndef GMX_MM256_HERE
 #ifndef GMX_DOUBLE
-#define GMX_MM_TRANSPOSE_SUM4_PR(i_SSE0,i_SSE1,i_SSE2,i_SSE3,o_SSE)     \
+/* Sum the elements within each input register and store the sums in out */
+#define GMX_MM_TRANSPOSE_SUM4_PR(in0,in1,in2,in3,out)                   \
 {                                                                       \
-    _MM_TRANSPOSE4_PS(i_SSE0,i_SSE1,i_SSE2,i_SSE3);                     \
-    i_SSE0 = _mm_add_ps(i_SSE0,i_SSE1);                                 \
-    i_SSE2 = _mm_add_ps(i_SSE2,i_SSE3);                                 \
-    o_SSE  = _mm_add_ps(i_SSE0,i_SSE2);                                 \
+    _MM_TRANSPOSE4_PS(in0,in1,in2,in3);                                 \
+    in0 = _mm_add_ps(in0,in1);                                          \
+    in2 = _mm_add_ps(in2,in3);                                          \
+    out  = _mm_add_ps(in0,in2);                                         \
 }
 #else
-#define GMX_MM_TRANSPOSE_SUM2_PD(i_SSE0,i_SSE1,o_SSE)                   \
+/* Sum the elements within each input register and store the sums in out */
+#define GMX_MM_TRANSPOSE_SUM2_PD(in0,in1,out)                           \
 {                                                                       \
-    GMX_MM_TRANSPOSE2_PD(i_SSE0,i_SSE1);                                \
-    o_SSE  = _mm_add_pd(i_SSE0,i_SSE1);                                 \
+    GMX_MM_TRANSPOSE2_PD(in0,in1);                                      \
+    out  = _mm_add_pd(in0,in1);                                         \
 }
 #endif
 #else
 #ifndef GMX_DOUBLE
-#define GMX_MM_TRANSPOSE_SUM4_PR(i_SSE0,i_SSE1,i_SSE2,i_SSE3,o_SSE)     \
+/* Sum the elements within each input register and store the sums in out */
+#define GMX_MM_TRANSPOSE_SUM4_PR(in0,in1,in2,in3,out)                   \
 {                                                                       \
-    i_SSE0 = _mm256_hadd_ps(i_SSE0,i_SSE1);                             \
-    i_SSE2 = _mm256_hadd_ps(i_SSE2,i_SSE3);                             \
-    i_SSE1 = _mm256_hadd_ps(i_SSE0,i_SSE2);                             \
-    o_SSE  = _mm_add_ps(_mm256_castps256_ps128(i_SSE1),_mm256_extractf128_ps(i_SSE1,1)); \
+    in0 = _mm256_hadd_ps(in0,in1);                                      \
+    in2 = _mm256_hadd_ps(in2,in3);                                      \
+    in1 = _mm256_hadd_ps(in0,in2);                                      \
+    out = _mm_add_ps(_mm256_castps256_ps128(in1),_mm256_extractf128_ps(in1,1)); \
 }
-#define GMX_MM_TRANSPOSE_SUM4H_PR(i_SSE0,i_SSE2,o_SSE)                  \
+/* Sum the elements of halfs of each input register and store sums in out */
+#define GMX_MM_TRANSPOSE_SUM4H_PR(in0,in2,out)                          \
 {                                                                       \
-    i_SSE0 = _mm256_hadd_ps(i_SSE0,_mm256_setzero_ps());                \
-    i_SSE2 = _mm256_hadd_ps(i_SSE2,_mm256_setzero_ps());                \
-    i_SSE0 = _mm256_hadd_ps(i_SSE0,i_SSE2);                             \
-    i_SSE2 = _mm256_permute_ps(i_SSE0,0b10110001);                      \
-    o_SSE  = _mm_add_ps(_mm256_castps256_ps128(i_SSE0),_mm256_extractf128_ps(i_SSE2,1)); \
+    in0 = _mm256_hadd_ps(in0,_mm256_setzero_ps());                      \
+    in2 = _mm256_hadd_ps(in2,_mm256_setzero_ps());                      \
+    in0 = _mm256_hadd_ps(in0,in2);                                      \
+    in2 = _mm256_permute_ps(in0,0b10110001);                            \
+    out = _mm_add_ps(_mm256_castps256_ps128(in0),_mm256_extractf128_ps(in2,1)); \
 }
 #else
-#define GMX_MM_TRANSPOSE_SUM4_PR(i_SSE0,i_SSE1,i_SSE2,i_SSE3,o_SSE)     \
+/* Sum the elements within each input register and store the sums in out */
+#define GMX_MM_TRANSPOSE_SUM4_PR(in0,in1,in2,in3,out)                   \
 {                                                                       \
-    i_SSE0 = _mm256_hadd_pd(i_SSE0,i_SSE1);                             \
-    i_SSE2 = _mm256_hadd_pd(i_SSE2,i_SSE3);                             \
-    o_SSE  = _mm256_add_pd(_mm256_permute2f128_pd(i_SSE0,i_SSE2,0x20),_mm256_permute2f128_pd(i_SSE0,i_SSE2,0x31)); \
+    in0 = _mm256_hadd_pd(in0,in1);                                      \
+    in2 = _mm256_hadd_pd(in2,in3);                                      \
+    out = _mm256_add_pd(_mm256_permute2f128_pd(in0,in2,0x20),_mm256_permute2f128_pd(in0,in2,0x31)); \
 }
 #endif
 #endif
@@ -136,24 +145,24 @@ gmx_mm128_invsqrt_ps_single(__m128 x)
     return _mm_mul_ps(half,_mm_mul_ps(_mm_sub_ps(three,_mm_mul_ps(_mm_mul_ps(lu,lu),x)),lu));
 }
 
-/* Do 2/4 double precision invsqrt operations.
- * Doing the SSE rsqrt and the first Newton Raphson iteration
+/* Do 2 double precision invsqrt operations.
+ * Doing the SIMD rsqrt and the first Newton Raphson iteration
  * in single precision gives full double precision accuracy.
- * The speed is more than twice as fast as two gmx_mm_invsqrt_pd calls.
+ * The speed is more than double that of two gmx_mm_invsqrt_pd calls.
  */
-#define GMX_MM128_INVSQRT2_PD(i_SSE0,i_SSE1,o_SSE0,o_SSE1)              \
+#define GMX_MM128_INVSQRT2_PD(in0,in1,out0,out1)                        \
 {                                                                       \
     const __m128d half  = _mm_set1_pd(0.5);                             \
     const __m128d three = _mm_set1_pd(3.0);                             \
-    __m128  s_SSE,ir_SSE;                                               \
+    __m128  s,ir;                                                       \
     __m128d lu0,lu1;                                                    \
                                                                         \
-    s_SSE  = _mm_movelh_ps(_mm_cvtpd_ps(i_SSE0),_mm_cvtpd_ps(i_SSE1));  \
-    ir_SSE = gmx_mm128_invsqrt_ps_single(s_SSE);                        \
-    lu0    = _mm_cvtps_pd(ir_SSE);                                      \
-    lu1    = _mm_cvtps_pd(_mm_movehl_ps(ir_SSE,ir_SSE));                \
-    o_SSE0 = _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu0,lu0),i_SSE0)),lu0)); \
-    o_SSE1 = _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu1,lu1),i_SSE1)),lu1)); \
+    s    = _mm_movelh_ps(_mm_cvtpd_ps(in0),_mm_cvtpd_ps(in1));          \
+    ir   = gmx_mm128_invsqrt_ps_single(s);                              \
+    lu0  = _mm_cvtps_pd(ir);                                            \
+    lu1  = _mm_cvtps_pd(_mm_movehl_ps(ir,ir));                          \
+    out0 = _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu0,lu0),in0)),lu0)); \
+    out1 = _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu1,lu1),in1)),lu1)); \
 }
 
 #define GMX_MM_INVSQRT2_PD GMX_MM128_INVSQRT2_PD
@@ -173,19 +182,23 @@ gmx_mm256_invsqrt_ps_single(__m256 x)
     return _mm256_mul_ps(half,_mm256_mul_ps(_mm256_sub_ps(three,_mm256_mul_ps(_mm256_mul_ps(lu,lu),x)),lu));
 }
 
-#define GMX_MM256_INVSQRT2_PD(i_SSE0,i_SSE1,o_SSE0,o_SSE1)              \
+/* Do 4 double precision invsqrt operations.
+ * Doing the SIMD rsqrt and the first Newton Raphson iteration
+ * in single precision gives full double precision accuracy.
+ */
+#define GMX_MM256_INVSQRT2_PD(in0,in1,out0,out1)                        \
 {                                                                       \
     const __m256d half  = _mm256_set1_pd(0.5);                          \
     const __m256d three = _mm256_set1_pd(3.0);                          \
-    __m256  s_SSE,ir_SSE;                                               \
+    __m256  s,ir;                                                       \
     __m256d lu0,lu1;                                                    \
                                                                         \
-    s_SSE  = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm256_cvtpd_ps(i_SSE0)),_mm256_cvtpd_ps(i_SSE1),1); \
-    ir_SSE = gmx_mm256_invsqrt_ps_single(s_SSE);                        \
-    lu0    = _mm256_cvtps_pd(_mm256_castps256_ps128(ir_SSE));           \
-    lu1    = _mm256_cvtps_pd(_mm256_extractf128_ps(ir_SSE,1));          \
-    o_SSE0 = _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu0,lu0),i_SSE0)),lu0)); \
-    o_SSE1 = _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu1,lu1),i_SSE1)),lu1)); \
+    s    = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm256_cvtpd_ps(in0)),_mm256_cvtpd_ps(in1),1); \
+    ir   = gmx_mm256_invsqrt_ps_single(s);                              \
+    lu0  = _mm256_cvtps_pd(_mm256_castps256_ps128(ir));                 \
+    lu1  = _mm256_cvtps_pd(_mm256_extractf128_ps(ir,1));                \
+    out0 = _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu0,lu0),in0)),lu0)); \
+    out1 = _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu1,lu1),in1)),lu1)); \
 }
 
 #define GMX_MM_INVSQRT2_PD GMX_MM256_INVSQRT2_PD
@@ -236,12 +249,12 @@ gmx_mm256_invsqrt_ps_single(__m256 x)
     GMX_2_MM_TO_M256(c12t_SSE[0],c12t_SSE[1],c12_SSE);                  \
 }
 
-#define load_lj_pair_params2(nbfp,type,aj,c6_SSE,c12_SSE)                \
+#define load_lj_pair_params2(nbfp,type,aj,c6_SSE,c12_SSE)               \
 {                                                                       \
-    __m128 clj_SSE[2*UNROLLJ],c6t_SSE[2],c12t_SSE[2];                     \
+    __m128 clj_SSE[2*UNROLLJ],c6t_SSE[2],c12t_SSE[2];                   \
     int p;                                                              \
                                                                         \
-    for(p=0; p<2*UNROLLJ; p++)                                            \
+    for(p=0; p<2*UNROLLJ; p++)                                          \
     {                                                                   \
         /* Here we load 4 aligned floats, but we need just 2 */         \
         clj_SSE[p] = _mm_load_ps(nbfp+type[aj+p]*NBFP_STRIDE);          \
@@ -298,7 +311,9 @@ gmx_mm256_invsqrt_ps_single(__m256 x)
  * But AMD CPUs perform significantly worse with gcc than with icc.
  * Performance is improved a bit by using the extract function UNROLLJ times,
  * instead of doing an _mm_store_si128 for every i-particle.
- * With AVX this significantly deteriorates performance (8 extracts iso 4).
+ * This is only faster when we use FDV0 formatted tables, where we also need
+ * to multiple the index by 4, which can be done by a SIMD bit shift.
+ * With single precision AVX, 8 extracts are much slower than 1 store.
  * Because of this, the load_table_f macro always takes the ti parameter,
  * but it is only used with AVX.
  */