#undef GMX_SIMD_WIDTH_HERE
-#undef gmx_epi32
-
/* float/double SIMD register type */
#undef gmx_mm_pr
+/* integer SIMD register type, only used in the tabulated PME kernels */
+#undef gmx_epi32
+
#undef gmx_load_pr
#undef gmx_load1_pr
#undef gmx_set1_pr
#undef gmx_setzero_pr
#undef gmx_store_pr
-/* Only used for debugging */
-#undef gmx_storeu_pr
#undef gmx_add_pr
#undef gmx_sub_pr
#undef gmx_mul_pr
#undef gmx_max_pr
#undef gmx_cmplt_pr
+/* gmx_blendzero_pr(real a, boolean b) does: (b ? a : 0) */
+#undef gmx_blendzero_pr
+/* Logical operations on SIMD booleans */
#undef gmx_and_pr
#undef gmx_or_pr
#undef gmx_andnot_pr
-/* Only used to speed up the nbnxn tabulated PME kernels */
+/* Not required, only used to speed up the nbnxn tabulated PME kernels */
+#undef GMX_HAVE_SIMD_FLOOR
#undef gmx_floor_pr
-/* Only used with x86 when blendv is faster than comparison */
+/* Not required, only used with when blendv is faster than comparison */
+#undef GMX_HAVE_SIMD_BLENDV
#undef gmx_blendv_pr
+/* Not required, gmx_anytrue(x) returns if any of the boolean is x is True.
+ * If this is not present, define GMX_SIMD_IS_TRUE(real x),
+ * which should return x==True, where True is True as defined in SIMD.
+ */
+#undef GMX_HAVE_SIMD_ANYTRUE
+#undef gmx_anytrue_pr
+
+/* Integer set and cast are only used for nbnxn exclusion masks */
+#undef gmx_set1_epi32
+#undef gmx_castsi_pr
+/* For topology exclusion pair checking we need: ((a & b) ? True : False)
+ * when we do a bit-wise and between a and b.
+ * When integer SIMD operations are present, we use gmx_checkbitmask_epi32(a, b)
+ * Otherwise we do all operations, except for the set1, in reals.
+ */
+#undef gmx_load_si
+/* If the same bit is set in both input masks, return all bits 1, otherwise 0 */
+#undef gmx_checkbitmask_epi32
+/* As gmx_checkbitmask_epi32, but operates on reals. In double precision two
+ * identical 32-bit masks are set in one double and one or both can be used.
+ */
+#undef gmx_checkbitmask_pr
-#undef gmx_movemask_pr
-
-/* Integer casts are only used for nbnxn x86 exclusion masks */
-#undef gmx_mm_castsi128_pr
-#undef gmx_mm_castsi256_pr
-
-/* Conversions only used for nbnxn x86 exclusion masks and PME table lookup */
+/* Conversions only used for PME table lookup */
#undef gmx_cvttpr_epi32
#undef gmx_cvtepi32_pr
#undef gmx_pmecorrV_pr
-/* Half SIMD-width types and operations only for nbnxn 2xnn search+kernels */
-#undef gmx_mm_hpr
-
-#undef gmx_load_hpr
-#undef gmx_load1_hpr
-#undef gmx_store_hpr
-#undef gmx_add_hpr
-#undef gmx_sub_hpr
-
-#undef gmx_sum4_hpr
-
-#undef gmx_2hpr_to_pr
-
-
-/* By defining GMX_MM128_HERE or GMX_MM256_HERE before including this file
- * the same intrinsics, with defines, can be compiled for either 128 or 256
- * bit wide SSE or AVX instructions.
- * The gmx_ prefix is replaced by _mm_ or _mm256_ (SSE or AVX).
+/* The same SIMD macros, can be translated to SIMD intrinsics, and compiled
+ * to instructions for, different SIMD width and float precision.
+ * On x86, the gmx_ prefix is replaced by _mm_ or _mm256_ (SSE or AVX).
* The _pr suffix is replaced by _ps or _pd (single or double precision).
* Note that compiler settings will decide if 128-bit intrinsics will
* be translated into SSE or AVX instructions.
*/
-#if !defined GMX_MM128_HERE && !defined GMX_MM256_HERE
-#error "You should define GMX_MM128_HERE or GMX_MM256_HERE"
-#endif
-#if defined GMX_MM128_HERE && defined GMX_MM256_HERE
-#error "You should not define both GMX_MM128_HERE and GMX_MM256_HERE"
+/* Generic macros for obtaining a SIMD aligned pointer from pointer x */
+#undef gmx_simd_align_real
+#undef gmx_simd_align_int
+
+
+#ifdef GMX_USE_HALF_WIDTH_SIMD_HERE
+#if defined GMX_X86_AVX_256
+/* We have half SIMD width support, continue */
+#else
+#error "half SIMD width intrinsics are not supported"
+#endif
#endif
#ifdef GMX_X86_SSE2
-#ifdef GMX_MM128_HERE
-
-#define gmx_epi32 __m128i
+#if !defined GMX_X86_AVX_256 || defined GMX_USE_HALF_WIDTH_SIMD_HERE
#ifndef GMX_DOUBLE
#define gmx_mm_pr __m128
+#define gmx_epi32 __m128i
+
#define gmx_load_pr _mm_load_ps
#define gmx_load1_pr _mm_load1_ps
#define gmx_set1_pr _mm_set1_ps
#define gmx_setzero_pr _mm_setzero_ps
#define gmx_store_pr _mm_store_ps
-#define gmx_storeu_pr _mm_storeu_ps
#define gmx_add_pr _mm_add_ps
#define gmx_sub_pr _mm_sub_ps
#define gmx_mul_pr _mm_mul_ps
#define gmx_max_pr _mm_max_ps
#define gmx_cmplt_pr _mm_cmplt_ps
+#define gmx_blendzero_pr _mm_and_ps
#define gmx_and_pr _mm_and_ps
#define gmx_or_pr _mm_or_ps
#define gmx_andnot_pr _mm_andnot_ps
+#ifdef GMX_X86_SSE4_1
+#define GMX_HAVE_SIMD_FLOOR
#define gmx_floor_pr _mm_floor_ps
+#define GMX_HAVE_SIMD_BLENDV
#define gmx_blendv_pr _mm_blendv_ps
+#endif
-#define gmx_movemask_pr _mm_movemask_ps
+#define GMX_HAVE_SIMD_ANYTRUE
+#define gmx_anytrue_pr _mm_movemask_ps
-#define gmx_mm_castsi128_pr gmx_mm_castsi128_ps
+#define gmx_set1_epi32 _mm_set1_epi32
+#define gmx_castsi_pr gmx_mm_castsi128_ps
+#define gmx_load_si(i) _mm_load_si128((__m128i *) (i))
+#define gmx_checkbitmask_epi32(m0, m1) _mm_cmpeq_epi32(_mm_andnot_si128(m0, m1), _mm_setzero_si128())
#define gmx_cvttpr_epi32 _mm_cvttps_epi32
#define gmx_cvtepi32_pr _mm_cvtepi32_ps
#define gmx_mm_pr __m128d
+#define gmx_epi32 __m128i
+
#define gmx_load_pr _mm_load_pd
#define gmx_load1_pr _mm_load1_pd
#define gmx_set1_pr _mm_set1_pd
#define gmx_setzero_pr _mm_setzero_pd
#define gmx_store_pr _mm_store_pd
-#define gmx_storeu_pr _mm_storeu_pd
#define gmx_add_pr _mm_add_pd
#define gmx_sub_pr _mm_sub_pd
#define gmx_mul_pr _mm_mul_pd
#define gmx_max_pr _mm_max_pd
#define gmx_cmplt_pr _mm_cmplt_pd
+#define gmx_blendzero_pr _mm_and_pd
#define gmx_and_pr _mm_and_pd
#define gmx_or_pr _mm_or_pd
#define gmx_andnot_pr _mm_andnot_pd
+#ifdef GMX_X86_SSE4_1
+#define GMX_HAVE_SIMD_FLOOR
#define gmx_floor_pr _mm_floor_pd
+#define GMX_HAVE_SIMD_BLENDV
#define gmx_blendv_pr _mm_blendv_pd
+#endif
-#define gmx_movemask_pr _mm_movemask_pd
+#define GMX_HAVE_SIMD_ANYTRUE
+#define gmx_anytrue_pr _mm_movemask_pd
-#define gmx_mm_castsi128_pr gmx_mm_castsi128_pd
+#define gmx_set1_epi32 _mm_set1_epi32
+#define gmx_castsi_pr gmx_mm_castsi128_pd
+#define gmx_load_si(i) _mm_load_si128((__m128i *) (i))
+#define gmx_checkbitmask_epi32(m0, m1) _mm_cmpeq_epi32(_mm_andnot_si128(m0, m1), _mm_setzero_si128())
#define gmx_cvttpr_epi32 _mm_cvttpd_epi32
#define gmx_cvtepi32_pr _mm_cvtepi32_pd
#endif /* ifndef GMX_DOUBLE */
-#endif /* GMX_MM128_HERE */
-
-#ifdef GMX_MM256_HERE
-
-#define gmx_epi32 __m256i
+#else
+/* We have GMX_X86_AVX_256 and not GMX_USE_HALF_WIDTH_SIMD_HERE,
+ * so we use 256-bit SIMD.
+ */
#ifndef GMX_DOUBLE
#define gmx_mm_pr __m256
+#define gmx_epi32 __m256i
+
#define gmx_load_pr _mm256_load_ps
#define gmx_load1_pr(x) _mm256_set1_ps((x)[0])
#define gmx_set1_pr _mm256_set1_ps
#define gmx_setzero_pr _mm256_setzero_ps
#define gmx_store_pr _mm256_store_ps
-#define gmx_storeu_pr _mm256_storeu_ps
#define gmx_add_pr _mm256_add_ps
#define gmx_sub_pr _mm256_sub_ps
#define gmx_mul_pr _mm256_mul_ps
#define gmx_max_pr _mm256_max_ps
-/* Not-equal (ordered, non-signaling) */
-#define gmx_cmpneq_pr(x, y) _mm256_cmp_ps(x, y, 0x0c)
-/* Less-than (ordered, non-signaling) */
+/* Less-than (we use ordered, non-signaling, but that's not required) */
#define gmx_cmplt_pr(x, y) _mm256_cmp_ps(x, y, 0x11)
+#define gmx_blendzero_pr _mm256_and_ps
#define gmx_and_pr _mm256_and_ps
#define gmx_or_pr _mm256_or_ps
#define gmx_andnot_pr _mm256_andnot_ps
+#define GMX_HAVE_SIMD_FLOOR
#define gmx_floor_pr _mm256_floor_ps
+#define GMX_HAVE_SIMD_BLENDV
#define gmx_blendv_pr _mm256_blendv_ps
-#define gmx_movemask_pr _mm256_movemask_ps
+#define GMX_HAVE_SIMD_ANYTRUE
+#define gmx_anytrue_pr _mm256_movemask_ps
-#define gmx_mm_castsi256_pr _mm256_castsi256_ps
+#define gmx_set1_epi32 _mm256_set1_epi32
+#define gmx_castsi_pr _mm256_castsi256_ps
+/* With <= 16 bits used the cast and conversion should not be required,
+ * since only mantissa bits are set and that would give a non-zero float,
+ * but with the Intel compiler this does not work correctly.
+ */
+#define gmx_checkbitmask_pr(m0, m1) _mm256_cmp_ps(_mm256_cvtepi32_ps(_mm256_castps_si256(_mm256_and_ps(m0, m1))), _mm256_setzero_ps(), 0x0c)
#define gmx_cvttpr_epi32 _mm256_cvttps_epi32
#define gmx_pmecorrF_pr gmx_mm256_pmecorrF_ps
#define gmx_pmecorrV_pr gmx_mm256_pmecorrV_ps
-#define gmx_loaddh_pr gmx_mm256_load4_ps
-
-/* Half SIMD-width type */
-#define gmx_mm_hpr __m128
-
-/* Half SIMD-width macros */
-#define gmx_load_hpr _mm_load_ps
-#define gmx_load1_hpr(x) _mm_set1_ps((x)[0])
-#define gmx_store_hpr _mm_store_ps
-#define gmx_add_hpr _mm_add_ps
-#define gmx_sub_hpr _mm_sub_ps
-
-#define gmx_sum4_hpr gmx_mm256_sum4h_m128
-
-/* Conversion between half and full SIMD-width */
-#define gmx_2hpr_to_pr gmx_mm256_set_m128
-
#else
#include "gmx_x86_simd_double.h"
#define gmx_mm_pr __m256d
+/* We use 128-bit integer registers because of missing 256-bit operations */
+#define gmx_epi32 __m128i
+
#define gmx_load_pr _mm256_load_pd
#define gmx_load1_pr(x) _mm256_set1_pd((x)[0])
#define gmx_set1_pr _mm256_set1_pd
#define gmx_setzero_pr _mm256_setzero_pd
#define gmx_store_pr _mm256_store_pd
-#define gmx_storeu_pr _mm256_storeu_pd
#define gmx_add_pr _mm256_add_pd
#define gmx_sub_pr _mm256_sub_pd
#define gmx_mul_pr _mm256_mul_pd
#define gmx_max_pr _mm256_max_pd
-/* Not-equal (ordered, non-signaling) */
-#define gmx_cmpneq_pr(x, y) _mm256_cmp_pd(x, y, 0x0c)
-/* Less-than (ordered, non-signaling) */
+/* Less-than (we use ordered, non-signaling, but that's not required) */
#define gmx_cmplt_pr(x, y) _mm256_cmp_pd(x, y, 0x11)
+#define gmx_blendzero_pr _mm256_and_pd
#define gmx_and_pr _mm256_and_pd
#define gmx_or_pr _mm256_or_pd
#define gmx_andnot_pr _mm256_andnot_pd
+#define GMX_HAVE_SIMD_FLOOR
#define gmx_floor_pr _mm256_floor_pd
+#define GMX_HAVE_SIMD_BLENDV
#define gmx_blendv_pr _mm256_blendv_pd
-#define gmx_movemask_pr _mm256_movemask_pd
+#define GMX_HAVE_SIMD_ANYTRUE
+#define gmx_anytrue_pr _mm256_movemask_pd
-#define gmx_mm_castsi256_pr _mm256_castsi256_pd
+#define gmx_set1_epi32 _mm256_set1_epi32
+#define gmx_castsi_pr _mm256_castsi256_pd
+/* With <= 16 bits used the cast and conversion should not be required,
+ * since only mantissa bits are set and that would give a non-zero float,
+ * but with the Intel compiler this does not work correctly.
+ * Because AVX does not have int->double conversion, we convert via float.
+ */
+#define gmx_checkbitmask_pr(m0, m1) _mm256_cmp_pd(_mm256_castps_pd(_mm256_cvtepi32_ps(_mm256_castpd_si256(_mm256_and_pd(m0, m1)))), _mm256_setzero_pd(), 0x0c)
#define gmx_cvttpr_epi32 _mm256_cvttpd_epi32
#define gmx_pmecorrF_pr gmx_mm256_pmecorrF_pd
#define gmx_pmecorrV_pr gmx_mm256_pmecorrV_pd
-#endif
+#endif /* GMX_DOUBLE */
-#endif /* GMX_MM256_HERE */
+#endif /* 128- or 256-bit x86 SIMD */
#endif /* GMX_X86_SSE2 */
+
+
+/* Generic macros to extract a SIMD aligned pointer from a pointer x.
+ * x should have at least GMX_SIMD_WIDTH_HERE elements extra compared
+ * to how many you want to use, to avoid indexing outside the aligned region.
+ */
+
+#define gmx_simd_align_real(x) (real *)(((size_t)((x)+GMX_SIMD_WIDTH_HERE)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(real)-1))))
+
+#define gmx_simd_align_int(x) (int *)(((size_t)((x)+GMX_SIMD_WIDTH_HERE)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(int )-1))))
/* Use SIMD accelerated nbnxn search and kernels */
#define GMX_NBNXN_SIMD
-#ifdef GMX_X86_AVX_256
-/* Note that setting this to 128 will also work with AVX-256, but slower */
+/* Uncomment the next line to use, slower, 128-bit SIMD with AVX-256 */
+/* #define GMX_NBNXN_HALF_WIDTH_SIMD */
+
+#if defined GMX_X86_AVX_256 && !defined GMX_NBNXN_HALF_WIDTH_SIMD
#define GMX_NBNXN_SIMD_BITWIDTH 256
#else
#define GMX_NBNXN_SIMD_BITWIDTH 128
real *x; /* x and possibly q, size natoms*xstride */
real *simd_4xn_diag; /* indices to set the SIMD 4xN diagonal masks */
real *simd_2xnn_diag; /* indices to set the SIMD 2x(N+N)diagonal masks */
+ unsigned *simd_excl_mask; /* exclusion masks for SIMD topology exclusions */
int nout; /* The number of force arrays */
nbnxn_atomdata_output_t *out; /* Output data structures */
int nalloc; /* Allocation size of all arrays (for x/f *x/fstride) */
* In the kernel we can subtract 1 to generate the subsequent mask.
*/
const int simd_width = GMX_NBNXN_SIMD_BITWIDTH/(sizeof(real)*8);
- int simd_4xn_diag_size, j;
+ int simd_4xn_diag_size, real_excl, simd_excl_size, j, s;
simd_4xn_diag_size = max(NBNXN_CPU_CLUSTER_I_SIZE, simd_width);
snew_aligned(nbat->simd_4xn_diag, simd_4xn_diag_size, NBNXN_MEM_ALIGN);
/* The next half of the SIMD width is for i + 1 */
nbat->simd_2xnn_diag[simd_width/2+j] = j - 1 - 0.5;
}
+
+ /* We always use 32-bit integer exclusion masks. When we use
+ * double precision, we fit two integers in a double SIMD register.
+ */
+ real_excl = sizeof(real)/sizeof(*nbat->simd_excl_mask);
+ /* Set bits for use with both 4xN and 2x(N+N) kernels */
+ simd_excl_size = NBNXN_CPU_CLUSTER_I_SIZE*simd_width*real_excl;
+ snew_aligned(nbat->simd_excl_mask, simd_excl_size*real_excl, NBNXN_MEM_ALIGN);
+ for (j = 0; j < simd_excl_size; j++)
+ {
+ /* Set the consecutive bits for masking pair exclusions.
+ * For double a single-bit mask would be enough.
+ * But using two bits avoids endianness issues.
+ */
+ for (s = 0; s < real_excl; s++)
+ {
+ /* Set the consecutive bits for masking pair exclusions */
+ nbat->simd_excl_mask[j*real_excl + s] = (1U << j);
+ }
+ }
}
#endif
#endif
/* Without exclusions and energies we only need to mask the cut-off,
- * this can be faster with blendv (only available with SSE4.1 and later).
+ * this can be faster with blendv.
*/
-#if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_X86_SSE4_1 && !defined COUNT_PAIRS
+#if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_HAVE_SIMD_BLENDV && !defined COUNT_PAIRS
/* With RF and tabulated Coulomb we replace cmp+and with sub+blendv.
* With gcc this is slower, except for RF on Sandy Bridge.
* Tested with gcc 4.6.2, 4.6.3 and 4.7.1.
#ifdef CHECK_EXCLS
/* Interaction (non-exclusion) mask of all 1's or 0's */
- gmx_mm_pr int_SSE0;
- gmx_mm_pr int_SSE2;
+ gmx_mm_pr int_S0;
+ gmx_mm_pr int_S2;
#endif
- gmx_mm_pr jxSSE, jySSE, jzSSE;
- gmx_mm_pr dx_SSE0, dy_SSE0, dz_SSE0;
- gmx_mm_pr dx_SSE2, dy_SSE2, dz_SSE2;
- gmx_mm_pr tx_SSE0, ty_SSE0, tz_SSE0;
- gmx_mm_pr tx_SSE2, ty_SSE2, tz_SSE2;
- gmx_mm_pr rsq_SSE0, rinv_SSE0, rinvsq_SSE0;
- gmx_mm_pr rsq_SSE2, rinv_SSE2, rinvsq_SSE2;
+ gmx_mm_pr jx_S, jy_S, jz_S;
+ gmx_mm_pr dx_S0, dy_S0, dz_S0;
+ gmx_mm_pr dx_S2, dy_S2, dz_S2;
+ gmx_mm_pr tx_S0, ty_S0, tz_S0;
+ gmx_mm_pr tx_S2, ty_S2, tz_S2;
+ gmx_mm_pr rsq_S0, rinv_S0, rinvsq_S0;
+ gmx_mm_pr rsq_S2, rinv_S2, rinvsq_S2;
#ifndef CUTOFF_BLENDV
/* wco: within cut-off, mask of all 1's or 0's */
- gmx_mm_pr wco_SSE0;
- gmx_mm_pr wco_SSE2;
+ gmx_mm_pr wco_S0;
+ gmx_mm_pr wco_S2;
#endif
#ifdef VDW_CUTOFF_CHECK
- gmx_mm_pr wco_vdw_SSE0;
+ gmx_mm_pr wco_vdw_S0;
#ifndef HALF_LJ
- gmx_mm_pr wco_vdw_SSE2;
+ gmx_mm_pr wco_vdw_S2;
#endif
#endif
#ifdef CALC_COULOMB
#ifdef CHECK_EXCLS
/* 1/r masked with the interaction mask */
- gmx_mm_pr rinv_ex_SSE0;
- gmx_mm_pr rinv_ex_SSE2;
+ gmx_mm_pr rinv_ex_S0;
+ gmx_mm_pr rinv_ex_S2;
#endif
- gmx_mm_pr jq_SSE;
- gmx_mm_pr qq_SSE0;
- gmx_mm_pr qq_SSE2;
+ gmx_mm_pr jq_S;
+ gmx_mm_pr qq_S0;
+ gmx_mm_pr qq_S2;
#ifdef CALC_COUL_TAB
/* The force (PME mesh force) we need to subtract from 1/r^2 */
- gmx_mm_pr fsub_SSE0;
- gmx_mm_pr fsub_SSE2;
+ gmx_mm_pr fsub_S0;
+ gmx_mm_pr fsub_S2;
#endif
#ifdef CALC_COUL_EWALD
- gmx_mm_pr brsq_SSE0, brsq_SSE2;
- gmx_mm_pr ewcorr_SSE0, ewcorr_SSE2;
+ gmx_mm_pr brsq_S0, brsq_S2;
+ gmx_mm_pr ewcorr_S0, ewcorr_S2;
#endif
/* frcoul = (1/r - fsub)*r */
- gmx_mm_pr frcoul_SSE0;
- gmx_mm_pr frcoul_SSE2;
+ gmx_mm_pr frcoul_S0;
+ gmx_mm_pr frcoul_S2;
#ifdef CALC_COUL_TAB
/* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
- gmx_mm_pr r_SSE0, rs_SSE0, rf_SSE0, frac_SSE0;
- gmx_mm_pr r_SSE2, rs_SSE2, rf_SSE2, frac_SSE2;
+ gmx_mm_pr r_S0, rs_S0, rf_S0, frac_S0;
+ gmx_mm_pr r_S2, rs_S2, rf_S2, frac_S2;
/* Table index: rs truncated to an int */
-#if !(defined GMX_MM256_HERE && defined GMX_DOUBLE)
- gmx_epi32 ti_SSE0, ti_SSE2;
-#else
- __m128i ti_SSE0, ti_SSE2;
-#endif
+ gmx_epi32 ti_S0, ti_S2;
/* Linear force table values */
- gmx_mm_pr ctab0_SSE0, ctab1_SSE0;
- gmx_mm_pr ctab0_SSE2, ctab1_SSE2;
+ gmx_mm_pr ctab0_S0, ctab1_S0;
+ gmx_mm_pr ctab0_S2, ctab1_S2;
#ifdef CALC_ENERGIES
/* Quadratic energy table value */
- gmx_mm_pr ctabv_SSE0;
- gmx_mm_pr ctabv_SSE2;
+ gmx_mm_pr ctabv_S0;
+ gmx_mm_pr ctabv_S2;
#endif
#endif
#if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
/* The potential (PME mesh) we need to subtract from 1/r */
- gmx_mm_pr vc_sub_SSE0;
- gmx_mm_pr vc_sub_SSE2;
+ gmx_mm_pr vc_sub_S0;
+ gmx_mm_pr vc_sub_S2;
#endif
#ifdef CALC_ENERGIES
/* Electrostatic potential */
- gmx_mm_pr vcoul_SSE0;
- gmx_mm_pr vcoul_SSE2;
+ gmx_mm_pr vcoul_S0;
+ gmx_mm_pr vcoul_S2;
#endif
#endif
/* The force times 1/r */
- gmx_mm_pr fscal_SSE0;
- gmx_mm_pr fscal_SSE2;
+ gmx_mm_pr fscal_S0;
+ gmx_mm_pr fscal_S2;
#ifdef CALC_LJ
#ifdef LJ_COMB_LB
/* LJ sigma_j/2 and sqrt(epsilon_j) */
- gmx_mm_pr hsig_j_SSE, seps_j_SSE;
+ gmx_mm_pr hsig_j_S, seps_j_S;
/* LJ sigma_ij and epsilon_ij */
- gmx_mm_pr sig_SSE0, eps_SSE0;
+ gmx_mm_pr sig_S0, eps_S0;
#ifndef HALF_LJ
- gmx_mm_pr sig_SSE2, eps_SSE2;
+ gmx_mm_pr sig_S2, eps_S2;
#endif
#ifdef CALC_ENERGIES
- gmx_mm_pr sig2_SSE0, sig6_SSE0;
+ gmx_mm_pr sig2_S0, sig6_S0;
#ifndef HALF_LJ
- gmx_mm_pr sig2_SSE2, sig6_SSE2;
+ gmx_mm_pr sig2_S2, sig6_S2;
#endif
#endif /* LJ_COMB_LB */
#endif /* CALC_LJ */
#ifdef LJ_COMB_GEOM
- gmx_mm_pr c6s_j_SSE, c12s_j_SSE;
+ gmx_mm_pr c6s_j_S, c12s_j_S;
#endif
#if defined LJ_COMB_GEOM || defined LJ_COMB_LB
#ifndef FIX_LJ_C
/* LJ C6 and C12 parameters, used with geometric comb. rule */
- gmx_mm_pr c6_SSE0, c12_SSE0;
+ gmx_mm_pr c6_S0, c12_S0;
#ifndef HALF_LJ
- gmx_mm_pr c6_SSE2, c12_SSE2;
+ gmx_mm_pr c6_S2, c12_S2;
#endif
#endif
/* Intermediate variables for LJ calculation */
#ifndef LJ_COMB_LB
- gmx_mm_pr rinvsix_SSE0;
+ gmx_mm_pr rinvsix_S0;
#ifndef HALF_LJ
- gmx_mm_pr rinvsix_SSE2;
+ gmx_mm_pr rinvsix_S2;
#endif
#endif
#ifdef LJ_COMB_LB
- gmx_mm_pr sir_SSE0, sir2_SSE0, sir6_SSE0;
+ gmx_mm_pr sir_S0, sir2_S0, sir6_S0;
#ifndef HALF_LJ
- gmx_mm_pr sir_SSE2, sir2_SSE2, sir6_SSE2;
+ gmx_mm_pr sir_S2, sir2_S2, sir6_S2;
#endif
#endif
- gmx_mm_pr FrLJ6_SSE0, FrLJ12_SSE0;
+ gmx_mm_pr FrLJ6_S0, FrLJ12_S0;
#ifndef HALF_LJ
- gmx_mm_pr FrLJ6_SSE2, FrLJ12_SSE2;
+ gmx_mm_pr FrLJ6_S2, FrLJ12_S2;
#endif
#ifdef CALC_ENERGIES
- gmx_mm_pr VLJ6_SSE0, VLJ12_SSE0, VLJ_SSE0;
+ gmx_mm_pr VLJ6_S0, VLJ12_S0, VLJ_S0;
#ifndef HALF_LJ
- gmx_mm_pr VLJ6_SSE2, VLJ12_SSE2, VLJ_SSE2;
+ gmx_mm_pr VLJ6_S2, VLJ12_S2, VLJ_S2;
#endif
#endif
#endif /* CALC_LJ */
+ gmx_mm_hpr fjx_S, fjy_S, fjz_S;
+
/* j-cluster index */
cj = l_cj[cjind].cj;
#ifdef CHECK_EXCLS
{
/* Load integer interaction mask */
- /* With AVX there are no integer operations, so cast to real */
- gmx_mm_pr mask_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl));
- /* Intel Compiler version 12.1.3 20120130 is buggy: use cast.
- * With gcc we don't need the cast, but it's faster.
- */
-#define cast_cvt(x) _mm256_cvtepi32_ps(_mm256_castps_si256(x))
- int_SSE0 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr, mask0)), zero_SSE);
- int_SSE2 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr, mask2)), zero_SSE);
-#undef cast_cvt
+ gmx_mm_pr mask_pr_S = gmx_castsi_pr(gmx_set1_epi32(l_cj[cjind].excl));
+
+ int_S0 = gmx_checkbitmask_pr(mask_pr_S, mask_S0);
+ int_S2 = gmx_checkbitmask_pr(mask_pr_S, mask_S2);
}
#endif
+
/* load j atom coordinates */
- jxSSE = gmx_loaddh_pr(x+ajx);
- jySSE = gmx_loaddh_pr(x+ajy);
- jzSSE = gmx_loaddh_pr(x+ajz);
+ gmx_loaddh_pr(jx_S, x+ajx);
+ gmx_loaddh_pr(jy_S, x+ajy);
+ gmx_loaddh_pr(jz_S, x+ajz);
/* Calculate distance */
- dx_SSE0 = gmx_sub_pr(ix_SSE0, jxSSE);
- dy_SSE0 = gmx_sub_pr(iy_SSE0, jySSE);
- dz_SSE0 = gmx_sub_pr(iz_SSE0, jzSSE);
- dx_SSE2 = gmx_sub_pr(ix_SSE2, jxSSE);
- dy_SSE2 = gmx_sub_pr(iy_SSE2, jySSE);
- dz_SSE2 = gmx_sub_pr(iz_SSE2, jzSSE);
+ dx_S0 = gmx_sub_pr(ix_S0, jx_S);
+ dy_S0 = gmx_sub_pr(iy_S0, jy_S);
+ dz_S0 = gmx_sub_pr(iz_S0, jz_S);
+ dx_S2 = gmx_sub_pr(ix_S2, jx_S);
+ dy_S2 = gmx_sub_pr(iy_S2, jy_S);
+ dz_S2 = gmx_sub_pr(iz_S2, jz_S);
/* rsq = dx*dx+dy*dy+dz*dz */
- rsq_SSE0 = gmx_calc_rsq_pr(dx_SSE0, dy_SSE0, dz_SSE0);
- rsq_SSE2 = gmx_calc_rsq_pr(dx_SSE2, dy_SSE2, dz_SSE2);
+ rsq_S0 = gmx_calc_rsq_pr(dx_S0, dy_S0, dz_S0);
+ rsq_S2 = gmx_calc_rsq_pr(dx_S2, dy_S2, dz_S2);
#ifndef CUTOFF_BLENDV
- wco_SSE0 = gmx_cmplt_pr(rsq_SSE0, rc2_SSE);
- wco_SSE2 = gmx_cmplt_pr(rsq_SSE2, rc2_SSE);
+ wco_S0 = gmx_cmplt_pr(rsq_S0, rc2_S);
+ wco_S2 = gmx_cmplt_pr(rsq_S2, rc2_S);
#endif
#ifdef CHECK_EXCLS
#if UNROLLJ == UNROLLI
if (cj == ci_sh)
{
- wco_SSE0 = gmx_and_pr(wco_SSE0, diag_SSE0);
- wco_SSE2 = gmx_and_pr(wco_SSE2, diag_SSE2);
+ wco_S0 = gmx_and_pr(wco_S0, diag_S0);
+ wco_S2 = gmx_and_pr(wco_S2, diag_S2);
}
#else
-#error "only UNROLLJ == UNROLLI currently supported in the joined kernels"
+#if UNROLLJ == 2*UNROLLI
+ if (cj*2 == ci_sh)
+ {
+ wco_S0 = gmx_and_pr(wco_S0, diag0_S0);
+ wco_S2 = gmx_and_pr(wco_S2, diag0_S2);
+ }
+ else if (cj*2 + 1 == ci_sh)
+ {
+ wco_S0 = gmx_and_pr(wco_S0, diag1_S0);
+ wco_S2 = gmx_and_pr(wco_S2, diag1_S2);
+ }
+#else
+#error "only UNROLLJ == UNROLLI*(1 or 2) currently supported in 2xnn kernels"
+#endif
#endif
#else /* EXCL_FORCES */
- /* Remove all excluded atom pairs from the list */
- wco_SSE0 = gmx_and_pr(wco_SSE0, int_SSE0);
- wco_SSE2 = gmx_and_pr(wco_SSE2, int_SSE2);
+ /* No exclusion forces: remove all excluded atom pairs from the list */
+ wco_S0 = gmx_and_pr(wco_S0, int_S0);
+ wco_S2 = gmx_and_pr(wco_S2, int_S2);
#endif
#endif
#ifdef COUNT_PAIRS
{
int i, j;
- real tmp[UNROLLJ];
- for (i = 0; i < UNROLLI; i++)
+ real tmpa[2*GMX_SIMD_WIDTH_HERE], *tmp;
+ tmp = gmx_simd_align_real(tmpa);
+ for (i = 0; i < UNROLLI; i+=2)
{
- gmx_storeu_pr(tmp, i == 0 ? wco_SSE0 : (i == 1 ? wco_SSE1 : (i == 2 ? wco_SSE2 : wco_SSE3)));
- for (j = 0; j < UNROLLJ; j++)
+ gmx_store_pr(tmp, i == 0 ? wco_S0 : wco_S2);
+ for (j = 0; j < 2*UNROLLJ; j++)
{
if (!(tmp[j] == 0))
{
#ifdef CHECK_EXCLS
/* For excluded pairs add a small number to avoid r^-6 = NaN */
- rsq_SSE0 = gmx_add_pr(rsq_SSE0, gmx_andnot_pr(int_SSE0, avoid_sing_SSE));
- rsq_SSE2 = gmx_add_pr(rsq_SSE2, gmx_andnot_pr(int_SSE2, avoid_sing_SSE));
+ rsq_S0 = gmx_add_pr(rsq_S0, gmx_andnot_pr(int_S0, avoid_sing_S));
+ rsq_S2 = gmx_add_pr(rsq_S2, gmx_andnot_pr(int_S2, avoid_sing_S));
#endif
/* Calculate 1/r */
- rinv_SSE0 = gmx_invsqrt_pr(rsq_SSE0);
- rinv_SSE2 = gmx_invsqrt_pr(rsq_SSE2);
+ rinv_S0 = gmx_invsqrt_pr(rsq_S0);
+ rinv_S2 = gmx_invsqrt_pr(rsq_S2);
#ifdef CALC_COULOMB
/* Load parameters for j atom */
- jq_SSE = gmx_loaddh_pr(q+aj);
- qq_SSE0 = gmx_mul_pr(iq_SSE0, jq_SSE);
- qq_SSE2 = gmx_mul_pr(iq_SSE2, jq_SSE);
+ gmx_loaddh_pr(jq_S, q+aj);
+ qq_S0 = gmx_mul_pr(iq_S0, jq_S);
+ qq_S2 = gmx_mul_pr(iq_S2, jq_S);
#endif
#ifdef CALC_LJ
#if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
- load_lj_pair_params2(nbfp0, nbfp1, type, aj, c6_SSE0, c12_SSE0);
+ load_lj_pair_params2(nbfp0, nbfp1, type, aj, c6_S0, c12_S0);
#ifndef HALF_LJ
- load_lj_pair_params2(nbfp2, nbfp3, type, aj, c6_SSE2, c12_SSE2);
+ load_lj_pair_params2(nbfp2, nbfp3, type, aj, c6_S2, c12_S2);
#endif
#endif /* not defined any LJ rule */
#ifdef LJ_COMB_GEOM
- c6s_j_SSE = gmx_loaddh_pr(ljc+aj2+0);
- c12s_j_SSE = gmx_loaddh_pr(ljc+aj2+STRIDE);
- c6_SSE0 = gmx_mul_pr(c6s_SSE0, c6s_j_SSE );
+ gmx_loaddh_pr(c6s_j_S, ljc+aj2+0);
+ gmx_loaddh_pr(c12s_j_S, ljc+aj2+STRIDE);
+ c6_S0 = gmx_mul_pr(c6s_S0, c6s_j_S );
#ifndef HALF_LJ
- c6_SSE2 = gmx_mul_pr(c6s_SSE2, c6s_j_SSE );
+ c6_S2 = gmx_mul_pr(c6s_S2, c6s_j_S );
#endif
- c12_SSE0 = gmx_mul_pr(c12s_SSE0, c12s_j_SSE);
+ c12_S0 = gmx_mul_pr(c12s_S0, c12s_j_S);
#ifndef HALF_LJ
- c12_SSE2 = gmx_mul_pr(c12s_SSE2, c12s_j_SSE);
+ c12_S2 = gmx_mul_pr(c12s_S2, c12s_j_S);
#endif
#endif /* LJ_COMB_GEOM */
#ifdef LJ_COMB_LB
- hsig_j_SSE = gmx_loaddh_pr(ljc+aj2+0);
- seps_j_SSE = gmx_loaddh_pr(ljc+aj2+STRIDE);
+ gmx_loaddh_pr(hsig_j_S, ljc+aj2+0);
+ gmx_loaddh_pr(seps_j_S, ljc+aj2+STRIDE);
- sig_SSE0 = gmx_add_pr(hsig_i_SSE0, hsig_j_SSE);
- eps_SSE0 = gmx_mul_pr(seps_i_SSE0, seps_j_SSE);
+ sig_S0 = gmx_add_pr(hsig_i_S0, hsig_j_S);
+ eps_S0 = gmx_mul_pr(seps_i_S0, seps_j_S);
#ifndef HALF_LJ
- sig_SSE2 = gmx_add_pr(hsig_i_SSE2, hsig_j_SSE);
- eps_SSE2 = gmx_mul_pr(seps_i_SSE2, seps_j_SSE);
+ sig_S2 = gmx_add_pr(hsig_i_S2, hsig_j_S);
+ eps_S2 = gmx_mul_pr(seps_i_S2, seps_j_S);
#endif
#endif /* LJ_COMB_LB */
#endif /* CALC_LJ */
#ifndef CUTOFF_BLENDV
- rinv_SSE0 = gmx_and_pr(rinv_SSE0, wco_SSE0);
- rinv_SSE2 = gmx_and_pr(rinv_SSE2, wco_SSE2);
+ rinv_S0 = gmx_blendzero_pr(rinv_S0, wco_S0);
+ rinv_S2 = gmx_blendzero_pr(rinv_S2, wco_S2);
#else
/* We only need to mask for the cut-off: blendv is faster */
- rinv_SSE0 = gmx_blendv_pr(rinv_SSE0, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE0));
- rinv_SSE2 = gmx_blendv_pr(rinv_SSE2, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE2));
+ rinv_S0 = gmx_blendv_pr(rinv_S0, zero_S, gmx_sub_pr(rc2_S, rsq_S0));
+ rinv_S2 = gmx_blendv_pr(rinv_S2, zero_S, gmx_sub_pr(rc2_S, rsq_S2));
#endif
- rinvsq_SSE0 = gmx_mul_pr(rinv_SSE0, rinv_SSE0);
- rinvsq_SSE2 = gmx_mul_pr(rinv_SSE2, rinv_SSE2);
+ rinvsq_S0 = gmx_mul_pr(rinv_S0, rinv_S0);
+ rinvsq_S2 = gmx_mul_pr(rinv_S2, rinv_S2);
#ifdef CALC_COULOMB
/* Note that here we calculate force*r, not the usual force/r.
#ifdef EXCL_FORCES
/* Only add 1/r for non-excluded atom pairs */
- rinv_ex_SSE0 = gmx_and_pr(rinv_SSE0, int_SSE0);
- rinv_ex_SSE2 = gmx_and_pr(rinv_SSE2, int_SSE2);
+ rinv_ex_S0 = gmx_blendzero_pr(rinv_S0, int_S0);
+ rinv_ex_S2 = gmx_blendzero_pr(rinv_S2, int_S2);
#else
/* No exclusion forces, we always need 1/r */
-#define rinv_ex_SSE0 rinv_SSE0
-#define rinv_ex_SSE2 rinv_SSE2
+#define rinv_ex_S0 rinv_S0
+#define rinv_ex_S2 rinv_S2
#endif
#ifdef CALC_COUL_RF
/* Electrostatic interactions */
- frcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_add_pr(rinv_ex_SSE0, gmx_mul_pr(rsq_SSE0, mrc_3_SSE)));
- frcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_add_pr(rinv_ex_SSE2, gmx_mul_pr(rsq_SSE2, mrc_3_SSE)));
+ frcoul_S0 = gmx_mul_pr(qq_S0, gmx_add_pr(rinv_ex_S0, gmx_mul_pr(rsq_S0, mrc_3_S)));
+ frcoul_S2 = gmx_mul_pr(qq_S2, gmx_add_pr(rinv_ex_S2, gmx_mul_pr(rsq_S2, mrc_3_S)));
#ifdef CALC_ENERGIES
- vcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_add_pr(rinv_ex_SSE0, gmx_add_pr(gmx_mul_pr(rsq_SSE0, hrc_3_SSE), moh_rc_SSE)));
- vcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_add_pr(rinv_ex_SSE2, gmx_add_pr(gmx_mul_pr(rsq_SSE2, hrc_3_SSE), moh_rc_SSE)));
+ vcoul_S0 = gmx_mul_pr(qq_S0, gmx_add_pr(rinv_ex_S0, gmx_add_pr(gmx_mul_pr(rsq_S0, hrc_3_S), moh_rc_S)));
+ vcoul_S2 = gmx_mul_pr(qq_S2, gmx_add_pr(rinv_ex_S2, gmx_add_pr(gmx_mul_pr(rsq_S2, hrc_3_S), moh_rc_S)));
#endif
#endif
* as large distances can cause an overflow in gmx_pmecorrF/V.
*/
#ifndef CUTOFF_BLENDV
- brsq_SSE0 = gmx_mul_pr(beta2_SSE, gmx_and_pr(rsq_SSE0, wco_SSE0));
- brsq_SSE2 = gmx_mul_pr(beta2_SSE, gmx_and_pr(rsq_SSE2, wco_SSE2));
+ brsq_S0 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S0, wco_S0));
+ brsq_S2 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S2, wco_S2));
#else
/* Strangely, putting mul on a separate line is slower (icc 13) */
- brsq_SSE0 = gmx_mul_pr(beta2_SSE, gmx_blendv_pr(rsq_SSE0, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE0)));
- brsq_SSE2 = gmx_mul_pr(beta2_SSE, gmx_blendv_pr(rsq_SSE2, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE2)));
+ brsq_S0 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S0, zero_S, gmx_sub_pr(rc2_S, rsq_S0)));
+ brsq_S2 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S2, zero_S, gmx_sub_pr(rc2_S, rsq_S2)));
#endif
- ewcorr_SSE0 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE0), beta_SSE);
- ewcorr_SSE2 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE2), beta_SSE);
- frcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_add_pr(rinv_ex_SSE0, gmx_mul_pr(ewcorr_SSE0, brsq_SSE0)));
- frcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_add_pr(rinv_ex_SSE2, gmx_mul_pr(ewcorr_SSE2, brsq_SSE2)));
+ ewcorr_S0 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S0), beta_S);
+ ewcorr_S2 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S2), beta_S);
+ frcoul_S0 = gmx_mul_pr(qq_S0, gmx_add_pr(rinv_ex_S0, gmx_mul_pr(ewcorr_S0, brsq_S0)));
+ frcoul_S2 = gmx_mul_pr(qq_S2, gmx_add_pr(rinv_ex_S2, gmx_mul_pr(ewcorr_S2, brsq_S2)));
#ifdef CALC_ENERGIES
- vc_sub_SSE0 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE0), beta_SSE);
- vc_sub_SSE2 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE2), beta_SSE);
+ vc_sub_S0 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S0), beta_S);
+ vc_sub_S2 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S2), beta_S);
#endif
#endif /* CALC_COUL_EWALD */
#ifdef CALC_COUL_TAB
/* Electrostatic interactions */
- r_SSE0 = gmx_mul_pr(rsq_SSE0, rinv_SSE0);
- r_SSE2 = gmx_mul_pr(rsq_SSE2, rinv_SSE2);
+ r_S0 = gmx_mul_pr(rsq_S0, rinv_S0);
+ r_S2 = gmx_mul_pr(rsq_S2, rinv_S2);
/* Convert r to scaled table units */
- rs_SSE0 = gmx_mul_pr(r_SSE0, invtsp_SSE);
- rs_SSE2 = gmx_mul_pr(r_SSE2, invtsp_SSE);
+ rs_S0 = gmx_mul_pr(r_S0, invtsp_S);
+ rs_S2 = gmx_mul_pr(r_S2, invtsp_S);
/* Truncate scaled r to an int */
- ti_SSE0 = gmx_cvttpr_epi32(rs_SSE0);
- ti_SSE2 = gmx_cvttpr_epi32(rs_SSE2);
-#ifdef GMX_X86_SSE4_1
- /* SSE4.1 floor is faster than gmx_cvtepi32_ps int->float cast */
- rf_SSE0 = gmx_floor_pr(rs_SSE0);
- rf_SSE2 = gmx_floor_pr(rs_SSE2);
+ ti_S0 = gmx_cvttpr_epi32(rs_S0);
+ ti_S2 = gmx_cvttpr_epi32(rs_S2);
+#ifdef GMX_HAVE_SIMD_FLOOR
+ rf_S0 = gmx_floor_pr(rs_S0);
+ rf_S2 = gmx_floor_pr(rs_S2);
#else
- rf_SSE0 = gmx_cvtepi32_pr(ti_SSE0);
- rf_SSE2 = gmx_cvtepi32_pr(ti_SSE2);
+ rf_S0 = gmx_cvtepi32_pr(ti_S0);
+ rf_S2 = gmx_cvtepi32_pr(ti_S2);
#endif
- frac_SSE0 = gmx_sub_pr(rs_SSE0, rf_SSE0);
- frac_SSE2 = gmx_sub_pr(rs_SSE2, rf_SSE2);
+ frac_S0 = gmx_sub_pr(rs_S0, rf_S0);
+ frac_S2 = gmx_sub_pr(rs_S2, rf_S2);
/* Load and interpolate table forces and possibly energies.
* Force and energy can be combined in one table, stride 4: FDV0
* Currently single precision uses FDV0, double F and V.
*/
#ifndef CALC_ENERGIES
- load_table_f(tab_coul_F, ti_SSE0, ti0, ctab0_SSE0, ctab1_SSE0);
- load_table_f(tab_coul_F, ti_SSE2, ti2, ctab0_SSE2, ctab1_SSE2);
+ load_table_f(tab_coul_F, ti_S0, ti0, ctab0_S0, ctab1_S0);
+ load_table_f(tab_coul_F, ti_S2, ti2, ctab0_S2, ctab1_S2);
#else
#ifdef TAB_FDV0
- load_table_f_v(tab_coul_F, ti_SSE0, ti0, ctab0_SSE0, ctab1_SSE0, ctabv_SSE0);
- load_table_f_v(tab_coul_F, ti_SSE2, ti2, ctab0_SSE2, ctab1_SSE2, ctabv_SSE2);
+ load_table_f_v(tab_coul_F, ti_S0, ti0, ctab0_S0, ctab1_S0, ctabv_S0);
+ load_table_f_v(tab_coul_F, ti_S2, ti2, ctab0_S2, ctab1_S2, ctabv_S2);
#else
- load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE0, ti0, ctab0_SSE0, ctab1_SSE0, ctabv_SSE0);
- load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE2, ti2, ctab0_SSE2, ctab1_SSE2, ctabv_SSE2);
+ load_table_f_v(tab_coul_F, tab_coul_V, ti_S0, ti0, ctab0_S0, ctab1_S0, ctabv_S0);
+ load_table_f_v(tab_coul_F, tab_coul_V, ti_S2, ti2, ctab0_S2, ctab1_S2, ctabv_S2);
#endif
#endif
- fsub_SSE0 = gmx_add_pr(ctab0_SSE0, gmx_mul_pr(frac_SSE0, ctab1_SSE0));
- fsub_SSE2 = gmx_add_pr(ctab0_SSE2, gmx_mul_pr(frac_SSE2, ctab1_SSE2));
- frcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_sub_pr(rinv_ex_SSE0, gmx_mul_pr(fsub_SSE0, r_SSE0)));
- frcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_sub_pr(rinv_ex_SSE2, gmx_mul_pr(fsub_SSE2, r_SSE2)));
+ fsub_S0 = gmx_add_pr(ctab0_S0, gmx_mul_pr(frac_S0, ctab1_S0));
+ fsub_S2 = gmx_add_pr(ctab0_S2, gmx_mul_pr(frac_S2, ctab1_S2));
+ frcoul_S0 = gmx_mul_pr(qq_S0, gmx_sub_pr(rinv_ex_S0, gmx_mul_pr(fsub_S0, r_S0)));
+ frcoul_S2 = gmx_mul_pr(qq_S2, gmx_sub_pr(rinv_ex_S2, gmx_mul_pr(fsub_S2, r_S2)));
#ifdef CALC_ENERGIES
- vc_sub_SSE0 = gmx_add_pr(ctabv_SSE0, gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE, frac_SSE0), gmx_add_pr(ctab0_SSE0, fsub_SSE0)));
- vc_sub_SSE2 = gmx_add_pr(ctabv_SSE2, gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE, frac_SSE2), gmx_add_pr(ctab0_SSE2, fsub_SSE2)));
+ vc_sub_S0 = gmx_add_pr(ctabv_S0, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S0), gmx_add_pr(ctab0_S0, fsub_S0)));
+ vc_sub_S2 = gmx_add_pr(ctabv_S2, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S2), gmx_add_pr(ctab0_S2, fsub_S2)));
#endif
#endif /* CALC_COUL_TAB */
#ifndef NO_SHIFT_EWALD
/* Add Ewald potential shift to vc_sub for convenience */
#ifdef CHECK_EXCLS
- vc_sub_SSE0 = gmx_add_pr(vc_sub_SSE0, gmx_and_pr(sh_ewald_SSE, int_SSE0));
- vc_sub_SSE2 = gmx_add_pr(vc_sub_SSE2, gmx_and_pr(sh_ewald_SSE, int_SSE2));
+ vc_sub_S0 = gmx_add_pr(vc_sub_S0, gmx_blendzero_pr(sh_ewald_S, int_S0));
+ vc_sub_S2 = gmx_add_pr(vc_sub_S2, gmx_blendzero_pr(sh_ewald_S, int_S2));
#else
- vc_sub_SSE0 = gmx_add_pr(vc_sub_SSE0, sh_ewald_SSE);
- vc_sub_SSE2 = gmx_add_pr(vc_sub_SSE2, sh_ewald_SSE);
+ vc_sub_S0 = gmx_add_pr(vc_sub_S0, sh_ewald_S);
+ vc_sub_S2 = gmx_add_pr(vc_sub_S2, sh_ewald_S);
#endif
#endif
- vcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_sub_pr(rinv_ex_SSE0, vc_sub_SSE0));
- vcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_sub_pr(rinv_ex_SSE2, vc_sub_SSE2));
+ vcoul_S0 = gmx_mul_pr(qq_S0, gmx_sub_pr(rinv_ex_S0, vc_sub_S0));
+ vcoul_S2 = gmx_mul_pr(qq_S2, gmx_sub_pr(rinv_ex_S2, vc_sub_S2));
#endif
#ifdef CALC_ENERGIES
/* Mask energy for cut-off and diagonal */
- vcoul_SSE0 = gmx_and_pr(vcoul_SSE0, wco_SSE0);
- vcoul_SSE2 = gmx_and_pr(vcoul_SSE2, wco_SSE2);
+ vcoul_S0 = gmx_blendzero_pr(vcoul_S0, wco_S0);
+ vcoul_S2 = gmx_blendzero_pr(vcoul_S2, wco_S2);
#endif
#endif /* CALC_COULOMB */
/* Lennard-Jones interaction */
#ifdef VDW_CUTOFF_CHECK
- wco_vdw_SSE0 = gmx_cmplt_pr(rsq_SSE0, rcvdw2_SSE);
+ wco_vdw_S0 = gmx_cmplt_pr(rsq_S0, rcvdw2_S);
#ifndef HALF_LJ
- wco_vdw_SSE2 = gmx_cmplt_pr(rsq_SSE2, rcvdw2_SSE);
+ wco_vdw_S2 = gmx_cmplt_pr(rsq_S2, rcvdw2_S);
#endif
#else
/* Same cut-off for Coulomb and VdW, reuse the registers */
-#define wco_vdw_SSE0 wco_SSE0
-#define wco_vdw_SSE2 wco_SSE2
+#define wco_vdw_S0 wco_S0
+#define wco_vdw_S2 wco_S2
#endif
#ifndef LJ_COMB_LB
- rinvsix_SSE0 = gmx_mul_pr(rinvsq_SSE0, gmx_mul_pr(rinvsq_SSE0, rinvsq_SSE0));
+ rinvsix_S0 = gmx_mul_pr(rinvsq_S0, gmx_mul_pr(rinvsq_S0, rinvsq_S0));
#ifdef EXCL_FORCES
- rinvsix_SSE0 = gmx_and_pr(rinvsix_SSE0, int_SSE0);
+ rinvsix_S0 = gmx_blendzero_pr(rinvsix_S0, int_S0);
#endif
#ifndef HALF_LJ
- rinvsix_SSE2 = gmx_mul_pr(rinvsq_SSE2, gmx_mul_pr(rinvsq_SSE2, rinvsq_SSE2));
+ rinvsix_S2 = gmx_mul_pr(rinvsq_S2, gmx_mul_pr(rinvsq_S2, rinvsq_S2));
#ifdef EXCL_FORCES
- rinvsix_SSE2 = gmx_and_pr(rinvsix_SSE2, int_SSE2);
+ rinvsix_S2 = gmx_blendzero_pr(rinvsix_S2, int_S2);
#endif
#endif
#ifdef VDW_CUTOFF_CHECK
- rinvsix_SSE0 = gmx_and_pr(rinvsix_SSE0, wco_vdw_SSE0);
+ rinvsix_S0 = gmx_blendzero_pr(rinvsix_S0, wco_vdw_S0);
#ifndef HALF_LJ
- rinvsix_SSE2 = gmx_and_pr(rinvsix_SSE2, wco_vdw_SSE2);
+ rinvsix_S2 = gmx_blendzero_pr(rinvsix_S2, wco_vdw_S2);
#endif
#endif
- FrLJ6_SSE0 = gmx_mul_pr(c6_SSE0, rinvsix_SSE0);
+ FrLJ6_S0 = gmx_mul_pr(c6_S0, rinvsix_S0);
#ifndef HALF_LJ
- FrLJ6_SSE2 = gmx_mul_pr(c6_SSE2, rinvsix_SSE2);
+ FrLJ6_S2 = gmx_mul_pr(c6_S2, rinvsix_S2);
#endif
- FrLJ12_SSE0 = gmx_mul_pr(c12_SSE0, gmx_mul_pr(rinvsix_SSE0, rinvsix_SSE0));
+ FrLJ12_S0 = gmx_mul_pr(c12_S0, gmx_mul_pr(rinvsix_S0, rinvsix_S0));
#ifndef HALF_LJ
- FrLJ12_SSE2 = gmx_mul_pr(c12_SSE2, gmx_mul_pr(rinvsix_SSE2, rinvsix_SSE2));
+ FrLJ12_S2 = gmx_mul_pr(c12_S2, gmx_mul_pr(rinvsix_S2, rinvsix_S2));
#endif
#endif /* not LJ_COMB_LB */
#ifdef LJ_COMB_LB
- sir_SSE0 = gmx_mul_pr(sig_SSE0, rinv_SSE0);
+ sir_S0 = gmx_mul_pr(sig_S0, rinv_S0);
#ifndef HALF_LJ
- sir_SSE2 = gmx_mul_pr(sig_SSE2, rinv_SSE2);
+ sir_S2 = gmx_mul_pr(sig_S2, rinv_S2);
#endif
- sir2_SSE0 = gmx_mul_pr(sir_SSE0, sir_SSE0);
+ sir2_S0 = gmx_mul_pr(sir_S0, sir_S0);
#ifndef HALF_LJ
- sir2_SSE2 = gmx_mul_pr(sir_SSE2, sir_SSE2);
+ sir2_S2 = gmx_mul_pr(sir_S2, sir_S2);
#endif
- sir6_SSE0 = gmx_mul_pr(sir2_SSE0, gmx_mul_pr(sir2_SSE0, sir2_SSE0));
+ sir6_S0 = gmx_mul_pr(sir2_S0, gmx_mul_pr(sir2_S0, sir2_S0));
#ifdef EXCL_FORCES
- sir6_SSE0 = gmx_and_pr(sir6_SSE0, int_SSE0);
+ sir6_S0 = gmx_blendzero_pr(sir6_S0, int_S0);
#endif
#ifndef HALF_LJ
- sir6_SSE2 = gmx_mul_pr(sir2_SSE2, gmx_mul_pr(sir2_SSE2, sir2_SSE2));
+ sir6_S2 = gmx_mul_pr(sir2_S2, gmx_mul_pr(sir2_S2, sir2_S2));
#ifdef EXCL_FORCES
- sir6_SSE2 = gmx_and_pr(sir6_SSE2, int_SSE2);
+ sir6_S2 = gmx_blendzero_pr(sir6_S2, int_S2);
#endif
#endif
#ifdef VDW_CUTOFF_CHECK
- sir6_SSE0 = gmx_and_pr(sir6_SSE0, wco_vdw_SSE0);
+ sir6_S0 = gmx_blendzero_pr(sir6_S0, wco_vdw_S0);
#ifndef HALF_LJ
- sir6_SSE2 = gmx_and_pr(sir6_SSE2, wco_vdw_SSE2);
+ sir6_S2 = gmx_blendzero_pr(sir6_S2, wco_vdw_S2);
#endif
#endif
- FrLJ6_SSE0 = gmx_mul_pr(eps_SSE0, sir6_SSE0);
+ FrLJ6_S0 = gmx_mul_pr(eps_S0, sir6_S0);
#ifndef HALF_LJ
- FrLJ6_SSE2 = gmx_mul_pr(eps_SSE2, sir6_SSE2);
+ FrLJ6_S2 = gmx_mul_pr(eps_S2, sir6_S2);
#endif
- FrLJ12_SSE0 = gmx_mul_pr(FrLJ6_SSE0, sir6_SSE0);
+ FrLJ12_S0 = gmx_mul_pr(FrLJ6_S0, sir6_S0);
#ifndef HALF_LJ
- FrLJ12_SSE2 = gmx_mul_pr(FrLJ6_SSE2, sir6_SSE2);
+ FrLJ12_S2 = gmx_mul_pr(FrLJ6_S2, sir6_S2);
#endif
#if defined CALC_ENERGIES
/* We need C6 and C12 to calculate the LJ potential shift */
- sig2_SSE0 = gmx_mul_pr(sig_SSE0, sig_SSE0);
+ sig2_S0 = gmx_mul_pr(sig_S0, sig_S0);
#ifndef HALF_LJ
- sig2_SSE2 = gmx_mul_pr(sig_SSE2, sig_SSE2);
+ sig2_S2 = gmx_mul_pr(sig_S2, sig_S2);
#endif
- sig6_SSE0 = gmx_mul_pr(sig2_SSE0, gmx_mul_pr(sig2_SSE0, sig2_SSE0));
+ sig6_S0 = gmx_mul_pr(sig2_S0, gmx_mul_pr(sig2_S0, sig2_S0));
#ifndef HALF_LJ
- sig6_SSE2 = gmx_mul_pr(sig2_SSE2, gmx_mul_pr(sig2_SSE2, sig2_SSE2));
+ sig6_S2 = gmx_mul_pr(sig2_S2, gmx_mul_pr(sig2_S2, sig2_S2));
#endif
- c6_SSE0 = gmx_mul_pr(eps_SSE0, sig6_SSE0);
+ c6_S0 = gmx_mul_pr(eps_S0, sig6_S0);
#ifndef HALF_LJ
- c6_SSE2 = gmx_mul_pr(eps_SSE2, sig6_SSE2);
+ c6_S2 = gmx_mul_pr(eps_S2, sig6_S2);
#endif
- c12_SSE0 = gmx_mul_pr(c6_SSE0, sig6_SSE0);
+ c12_S0 = gmx_mul_pr(c6_S0, sig6_S0);
#ifndef HALF_LJ
- c12_SSE2 = gmx_mul_pr(c6_SSE2, sig6_SSE2);
+ c12_S2 = gmx_mul_pr(c6_S2, sig6_S2);
#endif
#endif
#endif /* LJ_COMB_LB */
#ifdef CALC_COULOMB
#ifndef ENERGY_GROUPS
- vctotSSE = gmx_add_pr(vctotSSE, gmx_add_pr(vcoul_SSE0, vcoul_SSE2));
+ vctot_S = gmx_add_pr(vctot_S, gmx_add_pr(vcoul_S0, vcoul_S2));
#else
- add_ener_grp_halves(vcoul_SSE0, vctp[0], vctp[1], egp_jj);
- add_ener_grp_halves(vcoul_SSE2, vctp[2], vctp[3], egp_jj);
+ add_ener_grp_halves(vcoul_S0, vctp[0], vctp[1], egp_jj);
+ add_ener_grp_halves(vcoul_S2, vctp[2], vctp[3], egp_jj);
#endif
#endif
#ifdef CALC_LJ
/* Calculate the LJ energies */
- VLJ6_SSE0 = gmx_mul_pr(sixthSSE, gmx_sub_pr(FrLJ6_SSE0, gmx_mul_pr(c6_SSE0, sh_invrc6_SSE)));
+ VLJ6_S0 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S0, gmx_mul_pr(c6_S0, sh_invrc6_S)));
#ifndef HALF_LJ
- VLJ6_SSE2 = gmx_mul_pr(sixthSSE, gmx_sub_pr(FrLJ6_SSE2, gmx_mul_pr(c6_SSE2, sh_invrc6_SSE)));
+ VLJ6_S2 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S2, gmx_mul_pr(c6_S2, sh_invrc6_S)));
#endif
- VLJ12_SSE0 = gmx_mul_pr(twelvethSSE, gmx_sub_pr(FrLJ12_SSE0, gmx_mul_pr(c12_SSE0, sh_invrc12_SSE)));
+ VLJ12_S0 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S0, gmx_mul_pr(c12_S0, sh_invrc12_S)));
#ifndef HALF_LJ
- VLJ12_SSE2 = gmx_mul_pr(twelvethSSE, gmx_sub_pr(FrLJ12_SSE2, gmx_mul_pr(c12_SSE2, sh_invrc12_SSE)));
+ VLJ12_S2 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S2, gmx_mul_pr(c12_S2, sh_invrc12_S)));
#endif
- VLJ_SSE0 = gmx_sub_pr(VLJ12_SSE0, VLJ6_SSE0);
+ VLJ_S0 = gmx_sub_pr(VLJ12_S0, VLJ6_S0);
#ifndef HALF_LJ
- VLJ_SSE2 = gmx_sub_pr(VLJ12_SSE2, VLJ6_SSE2);
+ VLJ_S2 = gmx_sub_pr(VLJ12_S2, VLJ6_S2);
#endif
/* The potential shift should be removed for pairs beyond cut-off */
- VLJ_SSE0 = gmx_and_pr(VLJ_SSE0, wco_vdw_SSE0);
+ VLJ_S0 = gmx_blendzero_pr(VLJ_S0, wco_vdw_S0);
#ifndef HALF_LJ
- VLJ_SSE2 = gmx_and_pr(VLJ_SSE2, wco_vdw_SSE2);
+ VLJ_S2 = gmx_blendzero_pr(VLJ_S2, wco_vdw_S2);
#endif
#ifdef CHECK_EXCLS
/* The potential shift should be removed for excluded pairs */
- VLJ_SSE0 = gmx_and_pr(VLJ_SSE0, int_SSE0);
+ VLJ_S0 = gmx_blendzero_pr(VLJ_S0, int_S0);
#ifndef HALF_LJ
- VLJ_SSE2 = gmx_and_pr(VLJ_SSE2, int_SSE2);
+ VLJ_S2 = gmx_blendzero_pr(VLJ_S2, int_S2);
#endif
#endif
#ifndef ENERGY_GROUPS
- VvdwtotSSE = gmx_add_pr(VvdwtotSSE,
+ Vvdwtot_S = gmx_add_pr(Vvdwtot_S,
#ifndef HALF_LJ
- gmx_add_pr(VLJ_SSE0, VLJ_SSE2)
+ gmx_add_pr(VLJ_S0, VLJ_S2)
#else
- VLJ_SSE0
+ VLJ_S0
#endif
- );
+ );
#else
- add_ener_grp_halves(VLJ_SSE0, vvdwtp[0], vvdwtp[1], egp_jj);
+ add_ener_grp_halves(VLJ_S0, vvdwtp[0], vvdwtp[1], egp_jj);
#ifndef HALF_LJ
- add_ener_grp_halves(VLJ_SSE2, vvdwtp[2], vvdwtp[3], egp_jj);
+ add_ener_grp_halves(VLJ_S2, vvdwtp[2], vvdwtp[3], egp_jj);
#endif
#endif
#endif /* CALC_LJ */
#endif /* CALC_ENERGIES */
#ifdef CALC_LJ
- fscal_SSE0 = gmx_mul_pr(rinvsq_SSE0,
+ fscal_S0 = gmx_mul_pr(rinvsq_S0,
#ifdef CALC_COULOMB
- gmx_add_pr(frcoul_SSE0,
+ gmx_add_pr(frcoul_S0,
#else
- (
+ (
#endif
- gmx_sub_pr(FrLJ12_SSE0, FrLJ6_SSE0)));
+ gmx_sub_pr(FrLJ12_S0, FrLJ6_S0)));
#else
- fscal_SSE0 = gmx_mul_pr(rinvsq_SSE0, frcoul_SSE0);
+ fscal_S0 = gmx_mul_pr(rinvsq_S0, frcoul_S0);
#endif /* CALC_LJ */
#if defined CALC_LJ && !defined HALF_LJ
- fscal_SSE2 = gmx_mul_pr(rinvsq_SSE2,
+ fscal_S2 = gmx_mul_pr(rinvsq_S2,
#ifdef CALC_COULOMB
- gmx_add_pr(frcoul_SSE2,
+ gmx_add_pr(frcoul_S2,
#else
- (
+ (
#endif
- gmx_sub_pr(FrLJ12_SSE2, FrLJ6_SSE2)));
+ gmx_sub_pr(FrLJ12_S2, FrLJ6_S2)));
#else
/* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
- fscal_SSE2 = gmx_mul_pr(rinvsq_SSE2, frcoul_SSE2);
+ fscal_S2 = gmx_mul_pr(rinvsq_S2, frcoul_S2);
#endif
/* Calculate temporary vectorial force */
- tx_SSE0 = gmx_mul_pr(fscal_SSE0, dx_SSE0);
- tx_SSE2 = gmx_mul_pr(fscal_SSE2, dx_SSE2);
- ty_SSE0 = gmx_mul_pr(fscal_SSE0, dy_SSE0);
- ty_SSE2 = gmx_mul_pr(fscal_SSE2, dy_SSE2);
- tz_SSE0 = gmx_mul_pr(fscal_SSE0, dz_SSE0);
- tz_SSE2 = gmx_mul_pr(fscal_SSE2, dz_SSE2);
+ tx_S0 = gmx_mul_pr(fscal_S0, dx_S0);
+ tx_S2 = gmx_mul_pr(fscal_S2, dx_S2);
+ ty_S0 = gmx_mul_pr(fscal_S0, dy_S0);
+ ty_S2 = gmx_mul_pr(fscal_S2, dy_S2);
+ tz_S0 = gmx_mul_pr(fscal_S0, dz_S0);
+ tz_S2 = gmx_mul_pr(fscal_S2, dz_S2);
/* Increment i atom force */
- fix_SSE0 = gmx_add_pr(fix_SSE0, tx_SSE0);
- fix_SSE2 = gmx_add_pr(fix_SSE2, tx_SSE2);
- fiy_SSE0 = gmx_add_pr(fiy_SSE0, ty_SSE0);
- fiy_SSE2 = gmx_add_pr(fiy_SSE2, ty_SSE2);
- fiz_SSE0 = gmx_add_pr(fiz_SSE0, tz_SSE0);
- fiz_SSE2 = gmx_add_pr(fiz_SSE2, tz_SSE2);
+ fix_S0 = gmx_add_pr(fix_S0, tx_S0);
+ fix_S2 = gmx_add_pr(fix_S2, tx_S2);
+ fiy_S0 = gmx_add_pr(fiy_S0, ty_S0);
+ fiy_S2 = gmx_add_pr(fiy_S2, ty_S2);
+ fiz_S0 = gmx_add_pr(fiz_S0, tz_S0);
+ fiz_S2 = gmx_add_pr(fiz_S2, tz_S2);
/* Decrement j atom force */
- gmx_store_hpr(f+ajx,
- gmx_sub_hpr( gmx_load_hpr(f+ajx), gmx_sum4_hpr(tx_SSE0, tx_SSE2) ));
- gmx_store_hpr(f+ajy,
- gmx_sub_hpr( gmx_load_hpr(f+ajy), gmx_sum4_hpr(ty_SSE0, ty_SSE2) ));
- gmx_store_hpr(f+ajz,
- gmx_sub_hpr( gmx_load_hpr(f+ajz), gmx_sum4_hpr(tz_SSE0, tz_SSE2) ));
+ gmx_load_hpr(fjx_S, f+ajx);
+ gmx_load_hpr(fjy_S, f+ajy);
+ gmx_load_hpr(fjz_S, f+ajz);
+ gmx_store_hpr(f+ajx, gmx_sub_hpr(fjx_S, gmx_sum4_hpr(tx_S0, tx_S2)));
+ gmx_store_hpr(f+ajy, gmx_sub_hpr(fjy_S, gmx_sum4_hpr(ty_S0, ty_S2)));
+ gmx_store_hpr(f+ajz, gmx_sub_hpr(fjz_S, gmx_sum4_hpr(tz_S0, tz_S2)));
}
-#undef rinv_ex_SSE0
-#undef rinv_ex_SSE2
+#undef rinv_ex_S0
+#undef rinv_ex_S2
-#undef wco_vdw_SSE0
-#undef wco_vdw_SSE2
+#undef wco_vdw_S0
+#undef wco_vdw_S2
#undef CUTOFF_BLENDV
* the research papers on the package. Check out http://www.gromacs.org.
*/
-/* GMX_MM256_HERE should be set before including this file */
+
+/* Include the full width SIMD macros */
#include "gmx_simd_macros.h"
+
+/* Define a few macros for half-width SIMD */
+#if defined GMX_X86_AVX_256 && !defined GMX_DOUBLE
+
+/* Half-width SIMD real type */
+#define gmx_mm_hpr __m128
+
+/* Half-width SIMD operations */
+/* Load reals at half-width aligned pointer b into half-width SIMD register a */
+#define gmx_load_hpr(a, b) a = _mm_load_ps(b)
+/* Load one real at pointer b into half-width SIMD register a */
+#define gmx_load1_hpr(a, b) a = _mm_load1_ps(b)
+/* Load one real at b and one real at b+1 into halves of a, respectively */
+#define gmx_load1p1_pr(a, b) a = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm_load1_ps(b)), _mm_load1_ps(b+1), 0x1)
+/* Load reals at half-width aligned pointer b into two halves of a */
+#define gmx_loaddh_pr(a, b) a = gmx_mm256_load4_ps(b)
+/* To half-width SIMD register b into half width aligned memory a */
+#define gmx_store_hpr(a, b) _mm_store_ps(a, b)
+#define gmx_add_hpr _mm_add_ps
+#define gmx_sub_hpr _mm_sub_ps
+/* Horizontal sum over a half SIMD register */
+#define gmx_sum4_hpr gmx_mm256_sum4h_m128
+
+#else
+#error "Half-width SIMD macros are not yet defined"
+#endif
+
+
#define SUM_SIMD4(x) (x[0]+x[1]+x[2]+x[3])
#define UNROLLI NBNXN_CPU_CLUSTER_I_SIZE
#define UNROLLJ (GMX_SIMD_WIDTH_HERE/2)
-#if defined GMX_MM256_HERE
-#define STRIDE 4
-#endif
+/* The stride of all the atom data arrays is equal to half the SIMD width */
+#define STRIDE (GMX_SIMD_WIDTH_HERE/2)
-#ifdef GMX_MM256_HERE
-#ifndef GMX_DOUBLE
-/* single precision 2x(4+4) kernel */
+#if GMX_SIMD_WIDTH_HERE == 8
#define SUM_SIMD(x) (x[0]+x[1]+x[2]+x[3]+x[4]+x[5]+x[6]+x[7])
-#define TAB_FDV0
+#else
+#if GMX_SIMD_WIDTH_HERE == 16
+/* This is getting ridiculous, SIMD horizontal adds would help,
+ * but this is not performance critical (only used to reduce energies)
+ */
+#define SUM_SIMD(x) (x[0]+x[1]+x[2]+x[3]+x[4]+x[5]+x[6]+x[7]+x[8]+x[9]+x[10]+x[11]+x[12]+x[13]+x[14]+x[15])
#else
#error "unsupported kernel configuration"
#endif
#endif
+
+#if defined GMX_X86_AVX_256 && !defined GMX_DOUBLE
+/* AVX-256 single precision 2x(4+4) kernel,
+ * we can do half SIMD-width aligned FDV0 table loads.
+ */
+#define TAB_FDV0
+#endif
+
+
#define SIMD_MASK_ALL 0xffffffff
#include "nbnxn_kernel_simd_utils.h"
real *vctp[UNROLLI];
#endif
- gmx_mm_pr shX_SSE;
- gmx_mm_pr shY_SSE;
- gmx_mm_pr shZ_SSE;
- gmx_mm_pr ix_SSE0, iy_SSE0, iz_SSE0;
- gmx_mm_pr ix_SSE2, iy_SSE2, iz_SSE2;
- gmx_mm_pr fix_SSE0, fiy_SSE0, fiz_SSE0;
- gmx_mm_pr fix_SSE2, fiy_SSE2, fiz_SSE2;
+ gmx_mm_pr shX_S;
+ gmx_mm_pr shY_S;
+ gmx_mm_pr shZ_S;
+ gmx_mm_pr ix_S0, iy_S0, iz_S0;
+ gmx_mm_pr ix_S2, iy_S2, iz_S2;
+ gmx_mm_pr fix_S0, fiy_S0, fiz_S0;
+ gmx_mm_pr fix_S2, fiy_S2, fiz_S2;
#if UNROLLJ >= 4
#ifndef GMX_DOUBLE
- __m128 fix_SSE, fiy_SSE, fiz_SSE;
+ __m128 fix_S, fiy_S, fiz_S;
#else
- __m256d fix_SSE, fiy_SSE, fiz_SSE;
+ __m256d fix_S, fiy_S, fiz_S;
#endif
#else
- __m128d fix0_SSE, fiy0_SSE, fiz0_SSE;
- __m128d fix2_SSE, fiy2_SSE, fiz2_SSE;
+ __m128d fix0_S, fiy0_S, fiz0_S;
+ __m128d fix2_S, fiy2_S, fiz2_S;
#endif
- /* AVX: use floating point masks, as there are no integer instructions */
- gmx_mm_pr mask0 = _mm256_castsi256_ps(_mm256_set_epi32( 0x0080, 0x0040, 0x0020, 0x0010, 0x0008, 0x0004, 0x0002, 0x0001 ));
- gmx_mm_pr mask2 = _mm256_castsi256_ps(_mm256_set_epi32( 0x8000, 0x4000, 0x2000, 0x1000, 0x0800, 0x0400, 0x0200, 0x0100 ));
-
- gmx_mm_pr diag_jmi_SSE;
+ gmx_mm_pr diag_jmi_S;
#if UNROLLI == UNROLLJ
- gmx_mm_pr diag_SSE0, diag_SSE2;
+ gmx_mm_pr diag_S0, diag_S2;
#else
- gmx_mm_pr diag0_SSE0, diag0_SSE2;
- gmx_mm_pr diag1_SSE0, diag1_SSE2;
+ gmx_mm_pr diag0_S0, diag0_S2;
+ gmx_mm_pr diag1_S0, diag1_S2;
#endif
- gmx_mm_pr zero_SSE = gmx_set1_pr(0);
+ gmx_mm_pr mask_S0, mask_S2;
+
+ gmx_mm_pr zero_S = gmx_set1_pr(0);
- gmx_mm_pr one_SSE = gmx_set1_pr(1.0);
- gmx_mm_pr iq_SSE0 = gmx_setzero_pr();
- gmx_mm_pr iq_SSE2 = gmx_setzero_pr();
- gmx_mm_pr mrc_3_SSE;
+ gmx_mm_pr one_S = gmx_set1_pr(1.0);
+ gmx_mm_pr iq_S0 = gmx_setzero_pr();
+ gmx_mm_pr iq_S2 = gmx_setzero_pr();
+ gmx_mm_pr mrc_3_S;
#ifdef CALC_ENERGIES
- gmx_mm_pr hrc_3_SSE, moh_rc_SSE;
+ gmx_mm_pr hrc_3_S, moh_rc_S;
#endif
#ifdef CALC_COUL_TAB
/* Coulomb table variables */
- gmx_mm_pr invtsp_SSE;
+ gmx_mm_pr invtsp_S;
const real *tab_coul_F;
#ifndef TAB_FDV0
const real *tab_coul_V;
#endif
-#ifdef GMX_MM256_HERE
- int ti0_array[2*GMX_SIMD_WIDTH_HERE-1], *ti0;
- int ti2_array[2*GMX_SIMD_WIDTH_HERE-1], *ti2;
-#endif
+ int ti0_array[2*GMX_SIMD_WIDTH_HERE], *ti0;
+ int ti2_array[2*GMX_SIMD_WIDTH_HERE], *ti2;
#ifdef CALC_ENERGIES
- gmx_mm_pr mhalfsp_SSE;
+ gmx_mm_pr mhalfsp_S;
#endif
#endif
#ifdef CALC_COUL_EWALD
- gmx_mm_pr beta2_SSE, beta_SSE;
+ gmx_mm_pr beta2_S, beta_S;
#endif
#if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
- gmx_mm_pr sh_ewald_SSE;
+ gmx_mm_pr sh_ewald_S;
#endif
#ifdef LJ_COMB_LB
const real *ljc;
- gmx_mm_pr hsig_i_SSE0, seps_i_SSE0;
- gmx_mm_pr hsig_i_SSE2, seps_i_SSE2;
+ gmx_mm_pr hsig_i_S0, seps_i_S0;
+ gmx_mm_pr hsig_i_S2, seps_i_S2;
#else
#ifdef FIX_LJ_C
- real pvdw_array[2*UNROLLI*UNROLLJ+3];
+ real pvdw_array[2*UNROLLI*UNROLLJ+GMX_SIMD_WIDTH_HERE];
real *pvdw_c6, *pvdw_c12;
- gmx_mm_pr c6_SSE0, c12_SSE0;
- gmx_mm_pr c6_SSE2, c12_SSE2;
+ gmx_mm_pr c6_S0, c12_S0;
+ gmx_mm_pr c6_S2, c12_S2;
#endif
#ifdef LJ_COMB_GEOM
const real *ljc;
- gmx_mm_pr c6s_SSE0, c12s_SSE0;
- gmx_mm_pr c6s_SSE1, c12s_SSE1;
- gmx_mm_pr c6s_SSE2 = gmx_setzero_pr(), c12s_SSE2 = gmx_setzero_pr();
- gmx_mm_pr c6s_SSE3 = gmx_setzero_pr(), c12s_SSE3 = gmx_setzero_pr();
+ gmx_mm_pr c6s_S0, c12s_S0;
+ gmx_mm_pr c6s_S1, c12s_S1;
+ gmx_mm_pr c6s_S2 = gmx_setzero_pr(), c12s_S2 = gmx_setzero_pr();
+ gmx_mm_pr c6s_S3 = gmx_setzero_pr(), c12s_S3 = gmx_setzero_pr();
#endif
#endif /* LJ_COMB_LB */
- gmx_mm_pr vctotSSE, VvdwtotSSE;
- gmx_mm_pr sixthSSE, twelvethSSE;
+ gmx_mm_pr vctot_S, Vvdwtot_S;
+ gmx_mm_pr sixth_S, twelveth_S;
- gmx_mm_pr avoid_sing_SSE;
- gmx_mm_pr rc2_SSE;
+ gmx_mm_pr avoid_sing_S;
+ gmx_mm_pr rc2_S;
#ifdef VDW_CUTOFF_CHECK
- gmx_mm_pr rcvdw2_SSE;
+ gmx_mm_pr rcvdw2_S;
#endif
#ifdef CALC_ENERGIES
- gmx_mm_pr sh_invrc6_SSE, sh_invrc12_SSE;
+ gmx_mm_pr sh_invrc6_S, sh_invrc12_S;
/* cppcheck-suppress unassignedVariable */
- real tmpsum_array[15], *tmpsum;
+ real tmpsum_array[2*GMX_SIMD_WIDTH_HERE], *tmpsum;
#endif
#ifdef CALC_SHIFTFORCES
/* cppcheck-suppress unassignedVariable */
- real shf_array[15], *shf;
+ real shf_array[2*GMX_SIMD_WIDTH_HERE], *shf;
#endif
int ninner;
#endif
/* Load j-i for the first i */
- diag_jmi_SSE = gmx_load_pr(nbat->simd_2xnn_diag);
+ diag_jmi_S = gmx_load_pr(nbat->simd_2xnn_diag);
/* Generate all the diagonal masks as comparison results */
#if UNROLLI == UNROLLJ
- diag_SSE0 = gmx_cmplt_pr(zero_SSE, diag_jmi_SSE);
- diag_jmi_SSE = gmx_sub_pr(diag_jmi_SSE, one_SSE);
- diag_jmi_SSE = gmx_sub_pr(diag_jmi_SSE, one_SSE);
- diag_SSE2 = gmx_cmplt_pr(zero_SSE, diag_jmi_SSE);
+ diag_S0 = gmx_cmplt_pr(zero_S, diag_jmi_S);
+ diag_jmi_S = gmx_sub_pr(diag_jmi_S, one_S);
+ diag_jmi_S = gmx_sub_pr(diag_jmi_S, one_S);
+ diag_S2 = gmx_cmplt_pr(zero_S, diag_jmi_S);
#else
#if 2*UNROLLI == UNROLLJ
- diag0_SSE0 = gmx_cmplt_pr(diag_i_SSE, diag_j_SSE);
- diag_i_SSE = gmx_add_pr(diag_i_SSE, one_SSE);
- diag_i_SSE = gmx_add_pr(diag_i_SSE, one_SSE);
- diag0_SSE2 = gmx_cmplt_pr(diag_i_SSE, diag_j_SSE);
- diag_i_SSE = gmx_add_pr(diag_i_SSE, one_SSE);
- diag_i_SSE = gmx_add_pr(diag_i_SSE, one_SSE);
- diag1_SSE0 = gmx_cmplt_pr(diag_i_SSE, diag_j_SSE);
- diag_i_SSE = gmx_add_pr(diag_i_SSE, one_SSE);
- diag_i_SSE = gmx_add_pr(diag_i_SSE, one_SSE);
- diag1_SSE2 = gmx_cmplt_pr(diag_i_SSE, diag_j_SSE);
+ diag0_S0 = gmx_cmplt_pr(diag_i_S, diag_j_S);
+ diag_i_S = gmx_add_pr(diag_i_S, one_S);
+ diag_i_S = gmx_add_pr(diag_i_S, one_S);
+ diag0_S2 = gmx_cmplt_pr(diag_i_S, diag_j_S);
+ diag_i_S = gmx_add_pr(diag_i_S, one_S);
+ diag_i_S = gmx_add_pr(diag_i_S, one_S);
+ diag1_S0 = gmx_cmplt_pr(diag_i_S, diag_j_S);
+ diag_i_S = gmx_add_pr(diag_i_S, one_S);
+ diag_i_S = gmx_add_pr(diag_i_S, one_S);
+ diag1_S2 = gmx_cmplt_pr(diag_i_S, diag_j_S);
#endif
#endif
+ /* Load masks for topology exclusion masking */
+ mask_S0 = gmx_load_pr((real *)nbat->simd_excl_mask + 0*2*UNROLLJ);
+ mask_S2 = gmx_load_pr((real *)nbat->simd_excl_mask + 1*2*UNROLLJ);
+
#ifdef CALC_COUL_TAB
-#ifdef GMX_MM256_HERE
/* Generate aligned table index pointers */
- ti0 = (int *)(((size_t)(ti0_array+GMX_SIMD_WIDTH_HERE-1)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(int)-1))));
- ti2 = (int *)(((size_t)(ti2_array+GMX_SIMD_WIDTH_HERE-1)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(int)-1))));
-#endif
+ ti0 = gmx_simd_align_int(ti0_array);
+ ti2 = gmx_simd_align_int(ti2_array);
- invtsp_SSE = gmx_set1_pr(ic->tabq_scale);
+ invtsp_S = gmx_set1_pr(ic->tabq_scale);
#ifdef CALC_ENERGIES
- mhalfsp_SSE = gmx_set1_pr(-0.5/ic->tabq_scale);
+ mhalfsp_S = gmx_set1_pr(-0.5/ic->tabq_scale);
#endif
#ifdef TAB_FDV0
#endif /* CALC_COUL_TAB */
#ifdef CALC_COUL_EWALD
- beta2_SSE = gmx_set1_pr(ic->ewaldcoeff*ic->ewaldcoeff);
- beta_SSE = gmx_set1_pr(ic->ewaldcoeff);
+ beta2_S = gmx_set1_pr(ic->ewaldcoeff*ic->ewaldcoeff);
+ beta_S = gmx_set1_pr(ic->ewaldcoeff);
#endif
#if (defined CALC_COUL_TAB || defined CALC_COUL_EWALD) && defined CALC_ENERGIES
- sh_ewald_SSE = gmx_set1_pr(ic->sh_ewald);
+ sh_ewald_S = gmx_set1_pr(ic->sh_ewald);
#endif
q = nbat->q;
shiftvec = shift_vec[0];
x = nbat->x;
- avoid_sing_SSE = gmx_set1_pr(NBNXN_AVOID_SING_R2_INC);
+ avoid_sing_S = gmx_set1_pr(NBNXN_AVOID_SING_R2_INC);
/* The kernel either supports rcoulomb = rvdw or rcoulomb >= rvdw */
- rc2_SSE = gmx_set1_pr(ic->rcoulomb*ic->rcoulomb);
+ rc2_S = gmx_set1_pr(ic->rcoulomb*ic->rcoulomb);
#ifdef VDW_CUTOFF_CHECK
- rcvdw2_SSE = gmx_set1_pr(ic->rvdw*ic->rvdw);
+ rcvdw2_S = gmx_set1_pr(ic->rvdw*ic->rvdw);
#endif
#ifdef CALC_ENERGIES
- sixthSSE = gmx_set1_pr(1.0/6.0);
- twelvethSSE = gmx_set1_pr(1.0/12.0);
+ sixth_S = gmx_set1_pr(1.0/6.0);
+ twelveth_S = gmx_set1_pr(1.0/12.0);
- sh_invrc6_SSE = gmx_set1_pr(ic->sh_invrc6);
- sh_invrc12_SSE = gmx_set1_pr(ic->sh_invrc6*ic->sh_invrc6);
+ sh_invrc6_S = gmx_set1_pr(ic->sh_invrc6);
+ sh_invrc12_S = gmx_set1_pr(ic->sh_invrc6*ic->sh_invrc6);
#endif
- mrc_3_SSE = gmx_set1_pr(-2*ic->k_rf);
+ mrc_3_S = gmx_set1_pr(-2*ic->k_rf);
#ifdef CALC_ENERGIES
- hrc_3_SSE = gmx_set1_pr(ic->k_rf);
+ hrc_3_S = gmx_set1_pr(ic->k_rf);
- moh_rc_SSE = gmx_set1_pr(-ic->c_rf);
+ moh_rc_S = gmx_set1_pr(-ic->c_rf);
#endif
#ifdef CALC_ENERGIES
- tmpsum = (real *)(((size_t)(tmpsum_array+7)) & (~((size_t)31)));
+ tmpsum = gmx_simd_align_real(tmpsum_array);
#endif
#ifdef CALC_SHIFTFORCES
- shf = (real *)(((size_t)(shf_array+7)) & (~((size_t)31)));
+ shf = gmx_simd_align_real(shf_array);
#endif
#ifdef FIX_LJ_C
- pvdw_c6 = (real *)(((size_t)(pvdw_array+3)) & (~((size_t)15)));
+ pvdw_c6 = gmx_simd_align_real(pvdw_array);
pvdw_c12 = pvdw_c6 + UNROLLI*UNROLLJ;
for (jp = 0; jp < UNROLLJ; jp++)
pvdw_c12[2*UNROLLJ+jp] = nbat->nbfp[0*2+1];
pvdw_c12[3*UNROLLJ+jp] = nbat->nbfp[0*2+1];
}
- c6_SSE0 = gmx_load_pr(pvdw_c6 +0*UNROLLJ);
- c6_SSE1 = gmx_load_pr(pvdw_c6 +1*UNROLLJ);
- c6_SSE2 = gmx_load_pr(pvdw_c6 +2*UNROLLJ);
- c6_SSE3 = gmx_load_pr(pvdw_c6 +3*UNROLLJ);
-
- c12_SSE0 = gmx_load_pr(pvdw_c12+0*UNROLLJ);
- c12_SSE1 = gmx_load_pr(pvdw_c12+1*UNROLLJ);
- c12_SSE2 = gmx_load_pr(pvdw_c12+2*UNROLLJ);
- c12_SSE3 = gmx_load_pr(pvdw_c12+3*UNROLLJ);
+ c6_S0 = gmx_load_pr(pvdw_c6 +0*UNROLLJ);
+ c6_S1 = gmx_load_pr(pvdw_c6 +1*UNROLLJ);
+ c6_S2 = gmx_load_pr(pvdw_c6 +2*UNROLLJ);
+ c6_S3 = gmx_load_pr(pvdw_c6 +3*UNROLLJ);
+
+ c12_S0 = gmx_load_pr(pvdw_c12+0*UNROLLJ);
+ c12_S1 = gmx_load_pr(pvdw_c12+1*UNROLLJ);
+ c12_S2 = gmx_load_pr(pvdw_c12+2*UNROLLJ);
+ c12_S3 = gmx_load_pr(pvdw_c12+3*UNROLLJ);
#endif /* FIX_LJ_C */
#ifdef ENERGY_GROUPS
ci = nbln->ci;
ci_sh = (ish == CENTRAL ? ci : -1);
- shX_SSE = gmx_load1_pr(shiftvec+ish3);
- shY_SSE = gmx_load1_pr(shiftvec+ish3+1);
- shZ_SSE = gmx_load1_pr(shiftvec+ish3+2);
+ shX_S = gmx_load1_pr(shiftvec+ish3);
+ shY_S = gmx_load1_pr(shiftvec+ish3+1);
+ shZ_S = gmx_load1_pr(shiftvec+ish3+2);
#if UNROLLJ <= 4
sci = ci*STRIDE;
}
#endif
-#define gmx_load2_hpr(x) _mm256_insertf128_ps(gmx_load1_pr(x), gmx_load1_hpr(x+1), 1)
-
/* Load i atom data */
sciy = scix + STRIDE;
sciz = sciy + STRIDE;
- ix_SSE0 = gmx_add_pr(gmx_load2_hpr(x+scix), shX_SSE);
- ix_SSE2 = gmx_add_pr(gmx_load2_hpr(x+scix+2), shX_SSE);
- iy_SSE0 = gmx_add_pr(gmx_load2_hpr(x+sciy), shY_SSE);
- iy_SSE2 = gmx_add_pr(gmx_load2_hpr(x+sciy+2), shY_SSE);
- iz_SSE0 = gmx_add_pr(gmx_load2_hpr(x+sciz), shZ_SSE);
- iz_SSE2 = gmx_add_pr(gmx_load2_hpr(x+sciz+2), shZ_SSE);
+ gmx_load1p1_pr(ix_S0, x+scix);
+ gmx_load1p1_pr(ix_S2, x+scix+2);
+ gmx_load1p1_pr(iy_S0, x+sciy);
+ gmx_load1p1_pr(iy_S2, x+sciy+2);
+ gmx_load1p1_pr(iz_S0, x+sciz);
+ gmx_load1p1_pr(iz_S2, x+sciz+2);
+ ix_S0 = gmx_add_pr(ix_S0, shX_S);
+ ix_S2 = gmx_add_pr(ix_S2, shX_S);
+ iy_S0 = gmx_add_pr(iy_S0, shY_S);
+ iy_S2 = gmx_add_pr(iy_S2, shY_S);
+ iz_S0 = gmx_add_pr(iz_S0, shZ_S);
+ iz_S2 = gmx_add_pr(iz_S2, shZ_S);
if (do_coul)
{
- gmx_mm_pr facel_SSE;
+ gmx_mm_pr facel_S;
- facel_SSE = gmx_set1_pr(facel);
+ facel_S = gmx_set1_pr(facel);
- iq_SSE0 = gmx_mul_pr(facel_SSE, gmx_load2_hpr(q+sci));
- iq_SSE2 = gmx_mul_pr(facel_SSE, gmx_load2_hpr(q+sci+2));
+ gmx_load1p1_pr(iq_S0, q+sci);
+ gmx_load1p1_pr(iq_S2, q+sci+2);
+ iq_S0 = gmx_mul_pr(facel_S, iq_S0);
+ iq_S2 = gmx_mul_pr(facel_S, iq_S2);
}
#ifdef LJ_COMB_LB
- hsig_i_SSE0 = gmx_load2_hpr(ljc+sci2+0);
- hsig_i_SSE2 = gmx_load2_hpr(ljc+sci2+2);
- seps_i_SSE0 = gmx_load2_hpr(ljc+sci2+STRIDE+0);
- seps_i_SSE2 = gmx_load2_hpr(ljc+sci2+STRIDE+2);
+ gmx_load1p1_pr(hsig_i_S0, ljc+sci2+0);
+ gmx_load1p1_pr(hsig_i_S2, ljc+sci2+2);
+ gmx_load1p1_pr(seps_i_S0, ljc+sci2+STRIDE+0);
+ gmx_load1p1_pr(seps_i_S2, ljc+sci2+STRIDE+2);
#else
#ifdef LJ_COMB_GEOM
- c6s_SSE0 = gmx_load2_hpr(ljc+sci2+0);
+ gmx_load1p1_pr(c6s_S0, ljc+sci2+0);
if (!half_LJ)
{
- c6s_SSE2 = gmx_load2_hpr(ljc+sci2+2);
+ gmx_load1p1_pr(c6s_S2, ljc+sci2+2);
}
- c12s_SSE0 = gmx_load2_hpr(ljc+sci2+STRIDE+0);
+ gmx_load1p1_pr(c12s_S0, ljc+sci2+STRIDE+0);
if (!half_LJ)
{
- c12s_SSE2 = gmx_load2_hpr(ljc+sci2+STRIDE+2);
+ gmx_load1p1_pr(c12s_S2, ljc+sci2+STRIDE+2);
}
#else
nbfp0 = nbfp_ptr + type[sci ]*nbat->ntype*nbfp_stride;
#endif
/* Zero the potential energy for this list */
- VvdwtotSSE = gmx_setzero_pr();
- vctotSSE = gmx_setzero_pr();
+ Vvdwtot_S = gmx_setzero_pr();
+ vctot_S = gmx_setzero_pr();
/* Clear i atom forces */
- fix_SSE0 = gmx_setzero_pr();
- fix_SSE2 = gmx_setzero_pr();
- fiy_SSE0 = gmx_setzero_pr();
- fiy_SSE2 = gmx_setzero_pr();
- fiz_SSE0 = gmx_setzero_pr();
- fiz_SSE2 = gmx_setzero_pr();
+ fix_S0 = gmx_setzero_pr();
+ fix_S2 = gmx_setzero_pr();
+ fiy_S0 = gmx_setzero_pr();
+ fiy_S2 = gmx_setzero_pr();
+ fiz_S0 = gmx_setzero_pr();
+ fiz_S2 = gmx_setzero_pr();
cjind = cjind0;
ninner += cjind1 - cjind0;
/* Add accumulated i-forces to the force array */
-#if UNROLLJ >= 4
-#ifndef GMX_DOUBLE
-#define gmx_load_ps4 _mm_load_ps
-#define gmx_store_ps4 _mm_store_ps
-#define gmx_add_ps4 _mm_add_ps
+#if defined GMX_X86_AVX_256 && !defined GMX_DOUBLE
+#define gmx_load_pr4 _mm_load_ps
+#define gmx_store_pr4 _mm_store_ps
+#define gmx_add_pr4 _mm_add_ps
#else
-#define gmx_load_ps4 _mm256_load_pd
-#define gmx_store_ps4 _mm256_store_pd
-#define gmx_add_ps4 _mm256_add_pd
+#error "You need to define 4-width SIM macros for i-force reduction"
#endif
- GMX_MM_TRANSPOSE_SUM4H_PR(fix_SSE0, fix_SSE2, fix_SSE);
- gmx_store_ps4(f+scix, gmx_add_ps4(fix_SSE, gmx_load_ps4(f+scix)));
+ GMX_MM_TRANSPOSE_SUM4H_PR(fix_S0, fix_S2, fix_S);
+ gmx_store_pr4(f+scix, gmx_add_pr4(fix_S, gmx_load_pr4(f+scix)));
- GMX_MM_TRANSPOSE_SUM4H_PR(fiy_SSE0, fiy_SSE2, fiy_SSE);
- gmx_store_ps4(f+sciy, gmx_add_ps4(fiy_SSE, gmx_load_ps4(f+sciy)));
+ GMX_MM_TRANSPOSE_SUM4H_PR(fiy_S0, fiy_S2, fiy_S);
+ gmx_store_pr4(f+sciy, gmx_add_pr4(fiy_S, gmx_load_pr4(f+sciy)));
- GMX_MM_TRANSPOSE_SUM4H_PR(fiz_SSE0, fiz_SSE2, fiz_SSE);
- gmx_store_ps4(f+sciz, gmx_add_ps4(fiz_SSE, gmx_load_ps4(f+sciz)));
+ GMX_MM_TRANSPOSE_SUM4H_PR(fiz_S0, fiz_S2, fiz_S);
+ gmx_store_pr4(f+sciz, gmx_add_pr4(fiz_S, gmx_load_pr4(f+sciz)));
#ifdef CALC_SHIFTFORCES
- gmx_store_ps4(shf, fix_SSE);
+ gmx_store_pr4(shf, fix_S);
fshift[ish3+0] += SUM_SIMD4(shf);
- gmx_store_ps4(shf, fiy_SSE);
+ gmx_store_pr4(shf, fiy_S);
fshift[ish3+1] += SUM_SIMD4(shf);
- gmx_store_ps4(shf, fiz_SSE);
+ gmx_store_pr4(shf, fiz_S);
fshift[ish3+2] += SUM_SIMD4(shf);
#endif
-#else
- GMX_MM_TRANSPOSE_SUM2_PD(fix_SSE0, fix_SSE1, fix0_SSE);
- _mm_store_pd(f+scix, _mm_add_pd(fix0_SSE, _mm_load_pd(f+scix)));
- GMX_MM_TRANSPOSE_SUM2_PD(fix_SSE2, fix_SSE3, fix2_SSE);
- _mm_store_pd(f+scix+2, _mm_add_pd(fix2_SSE, _mm_load_pd(f+scix+2)));
-
- GMX_MM_TRANSPOSE_SUM2_PD(fiy_SSE0, fiy_SSE1, fiy0_SSE);
- _mm_store_pd(f+sciy, _mm_add_pd(fiy0_SSE, _mm_load_pd(f+sciy)));
- GMX_MM_TRANSPOSE_SUM2_PD(fiy_SSE2, fiy_SSE3, fiy2_SSE);
- _mm_store_pd(f+sciy+2, _mm_add_pd(fiy2_SSE, _mm_load_pd(f+sciy+2)));
-
- GMX_MM_TRANSPOSE_SUM2_PD(fiz_SSE0, fiz_SSE1, fiz0_SSE);
- _mm_store_pd(f+sciz, _mm_add_pd(fiz0_SSE, _mm_load_pd(f+sciz)));
- GMX_MM_TRANSPOSE_SUM2_PD(fiz_SSE2, fiz_SSE3, fiz2_SSE);
- _mm_store_pd(f+sciz+2, _mm_add_pd(fiz2_SSE, _mm_load_pd(f+sciz+2)));
-
-#ifdef CALC_SHIFTFORCES
- _mm_store_pd(shf, _mm_add_pd(fix0_SSE, fix2_SSE));
- fshift[ish3+0] += shf[0] + shf[1];
- _mm_store_pd(shf, _mm_add_pd(fiy0_SSE, fiy2_SSE));
- fshift[ish3+1] += shf[0] + shf[1];
- _mm_store_pd(shf, _mm_add_pd(fiz0_SSE, fiz2_SSE));
- fshift[ish3+2] += shf[0] + shf[1];
-#endif
-#endif
#ifdef CALC_ENERGIES
if (do_coul)
{
- gmx_store_pr(tmpsum, vctotSSE);
+ gmx_store_pr(tmpsum, vctot_S);
*Vc += SUM_SIMD(tmpsum);
}
- gmx_store_pr(tmpsum, VvdwtotSSE);
+ gmx_store_pr(tmpsum, Vvdwtot_S);
*Vvdw += SUM_SIMD(tmpsum);
#endif
#endif
}
-#undef gmx_load2_hpr
-#undef gmx_load_ps4
-#undef gmx_store_ps4
-#undef gmx_store_ps4
+#undef gmx_load_pr4
+#undef gmx_store_pr4
+#undef gmx_store_pr4
#undef CALC_SHIFTFORCES
#undef STRIDE
#undef TAB_FDV0
#undef NBFP_STRIDE
+
+#undef gmx_mm_hpr
+
+#undef gmx_load_hpr
+#undef gmx_load1_hpr
+#undef gmx_load1p1_pr
+#undef gmx_loaddh_pr
+#undef gmx_store_hpr
+#undef gmx_add_hpr
+#undef gmx_sub_hpr
+
+#undef gmx_sum4_hpr
/* Include all flavors of the SSE or AVX 4xN kernel loops */
-#if GMX_NBNXN_SIMD_BITWIDTH == 128
-#define GMX_MM128_HERE
-#else
-#if GMX_NBNXN_SIMD_BITWIDTH == 256
-#define GMX_MM256_HERE
-#else
+#if !(GMX_NBNXN_SIMD_BITWIDTH == 128 || GMX_NBNXN_SIMD_BITWIDTH == 256)
#error "unsupported GMX_NBNXN_SIMD_BITWIDTH"
#endif
-#endif
/* Analytical reaction-field kernels */
#define CALC_COUL_RF
#endif
/* Without exclusions and energies we only need to mask the cut-off,
- * this can be faster with blendv (only available with SSE4.1 and later).
+ * this can be faster when we have defined gmx_blendv_pr, i.e. an instruction
+ * that selects from two SIMD registers based on the contents of a third.
*/
-#if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_X86_SSE4_1 && !defined COUNT_PAIRS
+#if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_HAVE_SIMD_BLENDV && !defined COUNT_PAIRS
/* With RF and tabulated Coulomb we replace cmp+and with sub+blendv.
* With gcc this is slower, except for RF on Sandy Bridge.
* Tested with gcc 4.6.2, 4.6.3 and 4.7.1.
*/
#if (defined CALC_COUL_RF || defined CALC_COUL_TAB) && (!defined __GNUC__ || (defined CALC_COUL_RF && defined GMX_X86_AVX_256))
-#define CUTOFF_BLENDV
+#define NBNXN_CUTOFF_USE_BLENDV
#endif
/* With analytical Ewald we replace cmp+and+and with sub+blendv+blendv.
* This is only faster with icc on Sandy Bridge (PS kernel slower than gcc 4.7).
* Tested with icc 13.
*/
#if defined CALC_COUL_EWALD && defined __INTEL_COMPILER && defined GMX_X86_AVX_256
-#define CUTOFF_BLENDV
+#define NBNXN_CUTOFF_USE_BLENDV
#endif
#endif
#ifdef CHECK_EXCLS
/* Interaction (non-exclusion) mask of all 1's or 0's */
- gmx_mm_pr int_SSE0;
- gmx_mm_pr int_SSE1;
- gmx_mm_pr int_SSE2;
- gmx_mm_pr int_SSE3;
-#endif
-
- gmx_mm_pr jxSSE, jySSE, jzSSE;
- gmx_mm_pr dx_SSE0, dy_SSE0, dz_SSE0;
- gmx_mm_pr dx_SSE1, dy_SSE1, dz_SSE1;
- gmx_mm_pr dx_SSE2, dy_SSE2, dz_SSE2;
- gmx_mm_pr dx_SSE3, dy_SSE3, dz_SSE3;
- gmx_mm_pr tx_SSE0, ty_SSE0, tz_SSE0;
- gmx_mm_pr tx_SSE1, ty_SSE1, tz_SSE1;
- gmx_mm_pr tx_SSE2, ty_SSE2, tz_SSE2;
- gmx_mm_pr tx_SSE3, ty_SSE3, tz_SSE3;
- gmx_mm_pr rsq_SSE0, rinv_SSE0, rinvsq_SSE0;
- gmx_mm_pr rsq_SSE1, rinv_SSE1, rinvsq_SSE1;
- gmx_mm_pr rsq_SSE2, rinv_SSE2, rinvsq_SSE2;
- gmx_mm_pr rsq_SSE3, rinv_SSE3, rinvsq_SSE3;
-#ifndef CUTOFF_BLENDV
+ gmx_mm_pr int_S0;
+ gmx_mm_pr int_S1;
+ gmx_mm_pr int_S2;
+ gmx_mm_pr int_S3;
+#endif
+
+ gmx_mm_pr jx_S, jy_S, jz_S;
+ gmx_mm_pr dx_S0, dy_S0, dz_S0;
+ gmx_mm_pr dx_S1, dy_S1, dz_S1;
+ gmx_mm_pr dx_S2, dy_S2, dz_S2;
+ gmx_mm_pr dx_S3, dy_S3, dz_S3;
+ gmx_mm_pr tx_S0, ty_S0, tz_S0;
+ gmx_mm_pr tx_S1, ty_S1, tz_S1;
+ gmx_mm_pr tx_S2, ty_S2, tz_S2;
+ gmx_mm_pr tx_S3, ty_S3, tz_S3;
+ gmx_mm_pr rsq_S0, rinv_S0, rinvsq_S0;
+ gmx_mm_pr rsq_S1, rinv_S1, rinvsq_S1;
+ gmx_mm_pr rsq_S2, rinv_S2, rinvsq_S2;
+ gmx_mm_pr rsq_S3, rinv_S3, rinvsq_S3;
+#ifndef NBNXN_CUTOFF_USE_BLENDV
/* wco: within cut-off, mask of all 1's or 0's */
- gmx_mm_pr wco_SSE0;
- gmx_mm_pr wco_SSE1;
- gmx_mm_pr wco_SSE2;
- gmx_mm_pr wco_SSE3;
+ gmx_mm_pr wco_S0;
+ gmx_mm_pr wco_S1;
+ gmx_mm_pr wco_S2;
+ gmx_mm_pr wco_S3;
#endif
#ifdef VDW_CUTOFF_CHECK
- gmx_mm_pr wco_vdw_SSE0;
- gmx_mm_pr wco_vdw_SSE1;
+ gmx_mm_pr wco_vdw_S0;
+ gmx_mm_pr wco_vdw_S1;
#ifndef HALF_LJ
- gmx_mm_pr wco_vdw_SSE2;
- gmx_mm_pr wco_vdw_SSE3;
+ gmx_mm_pr wco_vdw_S2;
+ gmx_mm_pr wco_vdw_S3;
#endif
#endif
#ifdef CALC_COULOMB
#ifdef CHECK_EXCLS
/* 1/r masked with the interaction mask */
- gmx_mm_pr rinv_ex_SSE0;
- gmx_mm_pr rinv_ex_SSE1;
- gmx_mm_pr rinv_ex_SSE2;
- gmx_mm_pr rinv_ex_SSE3;
-#endif
- gmx_mm_pr jq_SSE;
- gmx_mm_pr qq_SSE0;
- gmx_mm_pr qq_SSE1;
- gmx_mm_pr qq_SSE2;
- gmx_mm_pr qq_SSE3;
+ gmx_mm_pr rinv_ex_S0;
+ gmx_mm_pr rinv_ex_S1;
+ gmx_mm_pr rinv_ex_S2;
+ gmx_mm_pr rinv_ex_S3;
+#endif
+ gmx_mm_pr jq_S;
+ gmx_mm_pr qq_S0;
+ gmx_mm_pr qq_S1;
+ gmx_mm_pr qq_S2;
+ gmx_mm_pr qq_S3;
#ifdef CALC_COUL_TAB
/* The force (PME mesh force) we need to subtract from 1/r^2 */
- gmx_mm_pr fsub_SSE0;
- gmx_mm_pr fsub_SSE1;
- gmx_mm_pr fsub_SSE2;
- gmx_mm_pr fsub_SSE3;
+ gmx_mm_pr fsub_S0;
+ gmx_mm_pr fsub_S1;
+ gmx_mm_pr fsub_S2;
+ gmx_mm_pr fsub_S3;
#endif
#ifdef CALC_COUL_EWALD
- gmx_mm_pr brsq_SSE0, brsq_SSE1, brsq_SSE2, brsq_SSE3;
- gmx_mm_pr ewcorr_SSE0, ewcorr_SSE1, ewcorr_SSE2, ewcorr_SSE3;
+ gmx_mm_pr brsq_S0, brsq_S1, brsq_S2, brsq_S3;
+ gmx_mm_pr ewcorr_S0, ewcorr_S1, ewcorr_S2, ewcorr_S3;
#endif
/* frcoul = (1/r - fsub)*r */
- gmx_mm_pr frcoul_SSE0;
- gmx_mm_pr frcoul_SSE1;
- gmx_mm_pr frcoul_SSE2;
- gmx_mm_pr frcoul_SSE3;
+ gmx_mm_pr frcoul_S0;
+ gmx_mm_pr frcoul_S1;
+ gmx_mm_pr frcoul_S2;
+ gmx_mm_pr frcoul_S3;
#ifdef CALC_COUL_TAB
/* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
- gmx_mm_pr r_SSE0, rs_SSE0, rf_SSE0, frac_SSE0;
- gmx_mm_pr r_SSE1, rs_SSE1, rf_SSE1, frac_SSE1;
- gmx_mm_pr r_SSE2, rs_SSE2, rf_SSE2, frac_SSE2;
- gmx_mm_pr r_SSE3, rs_SSE3, rf_SSE3, frac_SSE3;
+ gmx_mm_pr r_S0, rs_S0, rf_S0, frac_S0;
+ gmx_mm_pr r_S1, rs_S1, rf_S1, frac_S1;
+ gmx_mm_pr r_S2, rs_S2, rf_S2, frac_S2;
+ gmx_mm_pr r_S3, rs_S3, rf_S3, frac_S3;
/* Table index: rs truncated to an int */
-#if !(defined GMX_MM256_HERE && defined GMX_DOUBLE)
- gmx_epi32 ti_SSE0, ti_SSE1, ti_SSE2, ti_SSE3;
-#else
- __m128i ti_SSE0, ti_SSE1, ti_SSE2, ti_SSE3;
-#endif
+ gmx_epi32 ti_S0, ti_S1, ti_S2, ti_S3;
/* Linear force table values */
- gmx_mm_pr ctab0_SSE0, ctab1_SSE0;
- gmx_mm_pr ctab0_SSE1, ctab1_SSE1;
- gmx_mm_pr ctab0_SSE2, ctab1_SSE2;
- gmx_mm_pr ctab0_SSE3, ctab1_SSE3;
+ gmx_mm_pr ctab0_S0, ctab1_S0;
+ gmx_mm_pr ctab0_S1, ctab1_S1;
+ gmx_mm_pr ctab0_S2, ctab1_S2;
+ gmx_mm_pr ctab0_S3, ctab1_S3;
#ifdef CALC_ENERGIES
/* Quadratic energy table value */
- gmx_mm_pr ctabv_SSE0;
- gmx_mm_pr ctabv_SSE1;
- gmx_mm_pr ctabv_SSE2;
- gmx_mm_pr ctabv_SSE3;
+ gmx_mm_pr ctabv_S0;
+ gmx_mm_pr ctabv_S1;
+ gmx_mm_pr ctabv_S2;
+ gmx_mm_pr ctabv_S3;
#endif
#endif
#if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
/* The potential (PME mesh) we need to subtract from 1/r */
- gmx_mm_pr vc_sub_SSE0;
- gmx_mm_pr vc_sub_SSE1;
- gmx_mm_pr vc_sub_SSE2;
- gmx_mm_pr vc_sub_SSE3;
+ gmx_mm_pr vc_sub_S0;
+ gmx_mm_pr vc_sub_S1;
+ gmx_mm_pr vc_sub_S2;
+ gmx_mm_pr vc_sub_S3;
#endif
#ifdef CALC_ENERGIES
/* Electrostatic potential */
- gmx_mm_pr vcoul_SSE0;
- gmx_mm_pr vcoul_SSE1;
- gmx_mm_pr vcoul_SSE2;
- gmx_mm_pr vcoul_SSE3;
+ gmx_mm_pr vcoul_S0;
+ gmx_mm_pr vcoul_S1;
+ gmx_mm_pr vcoul_S2;
+ gmx_mm_pr vcoul_S3;
#endif
#endif
/* The force times 1/r */
- gmx_mm_pr fscal_SSE0;
- gmx_mm_pr fscal_SSE1;
- gmx_mm_pr fscal_SSE2;
- gmx_mm_pr fscal_SSE3;
+ gmx_mm_pr fscal_S0;
+ gmx_mm_pr fscal_S1;
+ gmx_mm_pr fscal_S2;
+ gmx_mm_pr fscal_S3;
#ifdef CALC_LJ
#ifdef LJ_COMB_LB
/* LJ sigma_j/2 and sqrt(epsilon_j) */
- gmx_mm_pr hsig_j_SSE, seps_j_SSE;
+ gmx_mm_pr hsig_j_S, seps_j_S;
/* LJ sigma_ij and epsilon_ij */
- gmx_mm_pr sig_SSE0, eps_SSE0;
- gmx_mm_pr sig_SSE1, eps_SSE1;
+ gmx_mm_pr sig_S0, eps_S0;
+ gmx_mm_pr sig_S1, eps_S1;
#ifndef HALF_LJ
- gmx_mm_pr sig_SSE2, eps_SSE2;
- gmx_mm_pr sig_SSE3, eps_SSE3;
+ gmx_mm_pr sig_S2, eps_S2;
+ gmx_mm_pr sig_S3, eps_S3;
#endif
#ifdef CALC_ENERGIES
- gmx_mm_pr sig2_SSE0, sig6_SSE0;
- gmx_mm_pr sig2_SSE1, sig6_SSE1;
+ gmx_mm_pr sig2_S0, sig6_S0;
+ gmx_mm_pr sig2_S1, sig6_S1;
#ifndef HALF_LJ
- gmx_mm_pr sig2_SSE2, sig6_SSE2;
- gmx_mm_pr sig2_SSE3, sig6_SSE3;
+ gmx_mm_pr sig2_S2, sig6_S2;
+ gmx_mm_pr sig2_S3, sig6_S3;
#endif
#endif /* LJ_COMB_LB */
#endif /* CALC_LJ */
#ifdef LJ_COMB_GEOM
- gmx_mm_pr c6s_j_SSE, c12s_j_SSE;
+ gmx_mm_pr c6s_j_S, c12s_j_S;
#endif
#if defined LJ_COMB_GEOM || defined LJ_COMB_LB
#ifndef FIX_LJ_C
/* LJ C6 and C12 parameters, used with geometric comb. rule */
- gmx_mm_pr c6_SSE0, c12_SSE0;
- gmx_mm_pr c6_SSE1, c12_SSE1;
+ gmx_mm_pr c6_S0, c12_S0;
+ gmx_mm_pr c6_S1, c12_S1;
#ifndef HALF_LJ
- gmx_mm_pr c6_SSE2, c12_SSE2;
- gmx_mm_pr c6_SSE3, c12_SSE3;
+ gmx_mm_pr c6_S2, c12_S2;
+ gmx_mm_pr c6_S3, c12_S3;
#endif
#endif
/* Intermediate variables for LJ calculation */
#ifndef LJ_COMB_LB
- gmx_mm_pr rinvsix_SSE0;
- gmx_mm_pr rinvsix_SSE1;
+ gmx_mm_pr rinvsix_S0;
+ gmx_mm_pr rinvsix_S1;
#ifndef HALF_LJ
- gmx_mm_pr rinvsix_SSE2;
- gmx_mm_pr rinvsix_SSE3;
+ gmx_mm_pr rinvsix_S2;
+ gmx_mm_pr rinvsix_S3;
#endif
#endif
#ifdef LJ_COMB_LB
- gmx_mm_pr sir_SSE0, sir2_SSE0, sir6_SSE0;
- gmx_mm_pr sir_SSE1, sir2_SSE1, sir6_SSE1;
+ gmx_mm_pr sir_S0, sir2_S0, sir6_S0;
+ gmx_mm_pr sir_S1, sir2_S1, sir6_S1;
#ifndef HALF_LJ
- gmx_mm_pr sir_SSE2, sir2_SSE2, sir6_SSE2;
- gmx_mm_pr sir_SSE3, sir2_SSE3, sir6_SSE3;
+ gmx_mm_pr sir_S2, sir2_S2, sir6_S2;
+ gmx_mm_pr sir_S3, sir2_S3, sir6_S3;
#endif
#endif
- gmx_mm_pr FrLJ6_SSE0, FrLJ12_SSE0;
- gmx_mm_pr FrLJ6_SSE1, FrLJ12_SSE1;
+ gmx_mm_pr FrLJ6_S0, FrLJ12_S0;
+ gmx_mm_pr FrLJ6_S1, FrLJ12_S1;
#ifndef HALF_LJ
- gmx_mm_pr FrLJ6_SSE2, FrLJ12_SSE2;
- gmx_mm_pr FrLJ6_SSE3, FrLJ12_SSE3;
+ gmx_mm_pr FrLJ6_S2, FrLJ12_S2;
+ gmx_mm_pr FrLJ6_S3, FrLJ12_S3;
#endif
#ifdef CALC_ENERGIES
- gmx_mm_pr VLJ6_SSE0, VLJ12_SSE0, VLJ_SSE0;
- gmx_mm_pr VLJ6_SSE1, VLJ12_SSE1, VLJ_SSE1;
+ gmx_mm_pr VLJ6_S0, VLJ12_S0, VLJ_S0;
+ gmx_mm_pr VLJ6_S1, VLJ12_S1, VLJ_S1;
#ifndef HALF_LJ
- gmx_mm_pr VLJ6_SSE2, VLJ12_SSE2, VLJ_SSE2;
- gmx_mm_pr VLJ6_SSE3, VLJ12_SSE3, VLJ_SSE3;
+ gmx_mm_pr VLJ6_S2, VLJ12_S2, VLJ_S2;
+ gmx_mm_pr VLJ6_S3, VLJ12_S3, VLJ_S3;
#endif
#endif
#endif /* CALC_LJ */
ajz = ajy + STRIDE;
#ifdef CHECK_EXCLS
-#if defined GMX_X86_SSE2 && defined GMX_MM128_HERE
+#ifdef gmx_checkbitmask_epi32
{
- /* Load integer interaction mask */
- __m128i mask_int = _mm_set1_epi32(l_cj[cjind].excl);
+ /* Integer mask set and operations, cast result to real */
+ gmx_epi32 mask_pr_S = gmx_set1_epi32(l_cj[cjind].excl);
- int_SSE0 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int, mask0), zeroi_SSE));
- int_SSE1 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int, mask1), zeroi_SSE));
- int_SSE2 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int, mask2), zeroi_SSE));
- int_SSE3 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int, mask3), zeroi_SSE));
+ int_S0 = gmx_castsi_pr(gmx_checkbitmask_epi32(mask_pr_S, mask_S0));
+ int_S1 = gmx_castsi_pr(gmx_checkbitmask_epi32(mask_pr_S, mask_S1));
+ int_S2 = gmx_castsi_pr(gmx_checkbitmask_epi32(mask_pr_S, mask_S2));
+ int_S3 = gmx_castsi_pr(gmx_checkbitmask_epi32(mask_pr_S, mask_S3));
}
-#endif
-#if defined GMX_X86_SSE2 && defined GMX_MM256_HERE
- {
-#ifndef GMX_DOUBLE
- /* Load integer interaction mask */
- /* With AVX there are no integer operations, so cast to real */
- gmx_mm_pr mask_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl));
- /* We can't compare all 4*8=32 float bits: shift the mask */
- gmx_mm_pr masksh_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl>>(2*UNROLLJ)));
- /* Intel Compiler version 12.1.3 20120130 is buggy: use cast.
- * With gcc we don't need the cast, but it's faster.
- */
-#define cast_cvt(x) _mm256_cvtepi32_ps(_mm256_castps_si256(x))
- int_SSE0 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr, mask0)), zero_SSE);
- int_SSE1 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr, mask1)), zero_SSE);
- int_SSE2 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr, mask0)), zero_SSE);
- int_SSE3 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr, mask1)), zero_SSE);
-#undef cast_cvt
#else
- /* Load integer interaction mask */
- /* With AVX there are no integer operations,
- * and there is no int to double conversion, so cast to float
- */
- __m256 mask_ps = _mm256_castsi256_ps(_mm256_set1_epi32(l_cj[cjind].excl));
-#define cast_cvt(x) _mm256_castps_pd(_mm256_cvtepi32_ps(_mm256_castps_si256(x)))
- int_SSE0 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps, mask0)), zero_SSE);
- int_SSE1 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps, mask1)), zero_SSE);
- int_SSE2 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps, mask2)), zero_SSE);
- int_SSE3 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps, mask3)), zero_SSE);
-#undef cast_cvt
-#endif
+ {
+ /* Integer mask set, cast to real and real mask operations */
+ gmx_mm_pr mask_pr_S = gmx_castsi_pr(gmx_set1_epi32(l_cj[cjind].excl));
+
+ int_S0 = gmx_checkbitmask_pr(mask_pr_S, mask_S0);
+ int_S1 = gmx_checkbitmask_pr(mask_pr_S, mask_S1);
+ int_S2 = gmx_checkbitmask_pr(mask_pr_S, mask_S2);
+ int_S3 = gmx_checkbitmask_pr(mask_pr_S, mask_S3);
}
#endif
#endif
+
/* load j atom coordinates */
- jxSSE = gmx_load_pr(x+ajx);
- jySSE = gmx_load_pr(x+ajy);
- jzSSE = gmx_load_pr(x+ajz);
+ jx_S = gmx_load_pr(x+ajx);
+ jy_S = gmx_load_pr(x+ajy);
+ jz_S = gmx_load_pr(x+ajz);
/* Calculate distance */
- dx_SSE0 = gmx_sub_pr(ix_SSE0, jxSSE);
- dy_SSE0 = gmx_sub_pr(iy_SSE0, jySSE);
- dz_SSE0 = gmx_sub_pr(iz_SSE0, jzSSE);
- dx_SSE1 = gmx_sub_pr(ix_SSE1, jxSSE);
- dy_SSE1 = gmx_sub_pr(iy_SSE1, jySSE);
- dz_SSE1 = gmx_sub_pr(iz_SSE1, jzSSE);
- dx_SSE2 = gmx_sub_pr(ix_SSE2, jxSSE);
- dy_SSE2 = gmx_sub_pr(iy_SSE2, jySSE);
- dz_SSE2 = gmx_sub_pr(iz_SSE2, jzSSE);
- dx_SSE3 = gmx_sub_pr(ix_SSE3, jxSSE);
- dy_SSE3 = gmx_sub_pr(iy_SSE3, jySSE);
- dz_SSE3 = gmx_sub_pr(iz_SSE3, jzSSE);
+ dx_S0 = gmx_sub_pr(ix_S0, jx_S);
+ dy_S0 = gmx_sub_pr(iy_S0, jy_S);
+ dz_S0 = gmx_sub_pr(iz_S0, jz_S);
+ dx_S1 = gmx_sub_pr(ix_S1, jx_S);
+ dy_S1 = gmx_sub_pr(iy_S1, jy_S);
+ dz_S1 = gmx_sub_pr(iz_S1, jz_S);
+ dx_S2 = gmx_sub_pr(ix_S2, jx_S);
+ dy_S2 = gmx_sub_pr(iy_S2, jy_S);
+ dz_S2 = gmx_sub_pr(iz_S2, jz_S);
+ dx_S3 = gmx_sub_pr(ix_S3, jx_S);
+ dy_S3 = gmx_sub_pr(iy_S3, jy_S);
+ dz_S3 = gmx_sub_pr(iz_S3, jz_S);
/* rsq = dx*dx+dy*dy+dz*dz */
- rsq_SSE0 = gmx_calc_rsq_pr(dx_SSE0, dy_SSE0, dz_SSE0);
- rsq_SSE1 = gmx_calc_rsq_pr(dx_SSE1, dy_SSE1, dz_SSE1);
- rsq_SSE2 = gmx_calc_rsq_pr(dx_SSE2, dy_SSE2, dz_SSE2);
- rsq_SSE3 = gmx_calc_rsq_pr(dx_SSE3, dy_SSE3, dz_SSE3);
+ rsq_S0 = gmx_calc_rsq_pr(dx_S0, dy_S0, dz_S0);
+ rsq_S1 = gmx_calc_rsq_pr(dx_S1, dy_S1, dz_S1);
+ rsq_S2 = gmx_calc_rsq_pr(dx_S2, dy_S2, dz_S2);
+ rsq_S3 = gmx_calc_rsq_pr(dx_S3, dy_S3, dz_S3);
-#ifndef CUTOFF_BLENDV
- wco_SSE0 = gmx_cmplt_pr(rsq_SSE0, rc2_SSE);
- wco_SSE1 = gmx_cmplt_pr(rsq_SSE1, rc2_SSE);
- wco_SSE2 = gmx_cmplt_pr(rsq_SSE2, rc2_SSE);
- wco_SSE3 = gmx_cmplt_pr(rsq_SSE3, rc2_SSE);
+#ifndef NBNXN_CUTOFF_USE_BLENDV
+ wco_S0 = gmx_cmplt_pr(rsq_S0, rc2_S);
+ wco_S1 = gmx_cmplt_pr(rsq_S1, rc2_S);
+ wco_S2 = gmx_cmplt_pr(rsq_S2, rc2_S);
+ wco_S3 = gmx_cmplt_pr(rsq_S3, rc2_S);
#endif
#ifdef CHECK_EXCLS
#if UNROLLJ == UNROLLI
if (cj == ci_sh)
{
- wco_SSE0 = gmx_and_pr(wco_SSE0, diag_SSE0);
- wco_SSE1 = gmx_and_pr(wco_SSE1, diag_SSE1);
- wco_SSE2 = gmx_and_pr(wco_SSE2, diag_SSE2);
- wco_SSE3 = gmx_and_pr(wco_SSE3, diag_SSE3);
+ wco_S0 = gmx_and_pr(wco_S0, diag_S0);
+ wco_S1 = gmx_and_pr(wco_S1, diag_S1);
+ wco_S2 = gmx_and_pr(wco_S2, diag_S2);
+ wco_S3 = gmx_and_pr(wco_S3, diag_S3);
}
#else
#if UNROLLJ < UNROLLI
if (cj == ci_sh*2)
{
- wco_SSE0 = gmx_and_pr(wco_SSE0, diag0_SSE0);
- wco_SSE1 = gmx_and_pr(wco_SSE1, diag0_SSE1);
- wco_SSE2 = gmx_and_pr(wco_SSE2, diag0_SSE2);
- wco_SSE3 = gmx_and_pr(wco_SSE3, diag0_SSE3);
+ wco_S0 = gmx_and_pr(wco_S0, diag0_S0);
+ wco_S1 = gmx_and_pr(wco_S1, diag0_S1);
+ wco_S2 = gmx_and_pr(wco_S2, diag0_S2);
+ wco_S3 = gmx_and_pr(wco_S3, diag0_S3);
}
if (cj == ci_sh*2 + 1)
{
- wco_SSE0 = gmx_and_pr(wco_SSE0, diag1_SSE0);
- wco_SSE1 = gmx_and_pr(wco_SSE1, diag1_SSE1);
- wco_SSE2 = gmx_and_pr(wco_SSE2, diag1_SSE2);
- wco_SSE3 = gmx_and_pr(wco_SSE3, diag1_SSE3);
+ wco_S0 = gmx_and_pr(wco_S0, diag1_S0);
+ wco_S1 = gmx_and_pr(wco_S1, diag1_S1);
+ wco_S2 = gmx_and_pr(wco_S2, diag1_S2);
+ wco_S3 = gmx_and_pr(wco_S3, diag1_S3);
}
#else
if (cj*2 == ci_sh)
{
- wco_SSE0 = gmx_and_pr(wco_SSE0, diag0_SSE0);
- wco_SSE1 = gmx_and_pr(wco_SSE1, diag0_SSE1);
- wco_SSE2 = gmx_and_pr(wco_SSE2, diag0_SSE2);
- wco_SSE3 = gmx_and_pr(wco_SSE3, diag0_SSE3);
+ wco_S0 = gmx_and_pr(wco_S0, diag0_S0);
+ wco_S1 = gmx_and_pr(wco_S1, diag0_S1);
+ wco_S2 = gmx_and_pr(wco_S2, diag0_S2);
+ wco_S3 = gmx_and_pr(wco_S3, diag0_S3);
}
else if (cj*2 + 1 == ci_sh)
{
- wco_SSE0 = gmx_and_pr(wco_SSE0, diag1_SSE0);
- wco_SSE1 = gmx_and_pr(wco_SSE1, diag1_SSE1);
- wco_SSE2 = gmx_and_pr(wco_SSE2, diag1_SSE2);
- wco_SSE3 = gmx_and_pr(wco_SSE3, diag1_SSE3);
+ wco_S0 = gmx_and_pr(wco_S0, diag1_S0);
+ wco_S1 = gmx_and_pr(wco_S1, diag1_S1);
+ wco_S2 = gmx_and_pr(wco_S2, diag1_S2);
+ wco_S3 = gmx_and_pr(wco_S3, diag1_S3);
}
#endif
#endif
#else /* EXCL_FORCES */
- /* Remove all excluded atom pairs from the list */
- wco_SSE0 = gmx_and_pr(wco_SSE0, int_SSE0);
- wco_SSE1 = gmx_and_pr(wco_SSE1, int_SSE1);
- wco_SSE2 = gmx_and_pr(wco_SSE2, int_SSE2);
- wco_SSE3 = gmx_and_pr(wco_SSE3, int_SSE3);
+ /* No exclusion forces: remove all excluded atom pairs from the list */
+ wco_S0 = gmx_and_pr(wco_S0, int_S0);
+ wco_S1 = gmx_and_pr(wco_S1, int_S1);
+ wco_S2 = gmx_and_pr(wco_S2, int_S2);
+ wco_S3 = gmx_and_pr(wco_S3, int_S3);
#endif
#endif
#ifdef COUNT_PAIRS
{
int i, j;
- real tmp[UNROLLJ];
+ real tmpa[2*GMX_SIMD_WIDTH_HERE], *tmp;
+ tmp = gmx_simd_align_real(tmpa);
for (i = 0; i < UNROLLI; i++)
{
- gmx_storeu_pr(tmp, i == 0 ? wco_SSE0 : (i == 1 ? wco_SSE1 : (i == 2 ? wco_SSE2 : wco_SSE3)));
+ gmx_store_pr(tmp, i == 0 ? wco_S0 : (i == 1 ? wco_S1 : (i == 2 ? wco_S2 : wco_S3)));
for (j = 0; j < UNROLLJ; j++)
{
if (!(tmp[j] == 0))
#ifdef CHECK_EXCLS
/* For excluded pairs add a small number to avoid r^-6 = NaN */
- rsq_SSE0 = gmx_add_pr(rsq_SSE0, gmx_andnot_pr(int_SSE0, avoid_sing_SSE));
- rsq_SSE1 = gmx_add_pr(rsq_SSE1, gmx_andnot_pr(int_SSE1, avoid_sing_SSE));
- rsq_SSE2 = gmx_add_pr(rsq_SSE2, gmx_andnot_pr(int_SSE2, avoid_sing_SSE));
- rsq_SSE3 = gmx_add_pr(rsq_SSE3, gmx_andnot_pr(int_SSE3, avoid_sing_SSE));
+ rsq_S0 = gmx_add_pr(rsq_S0, gmx_andnot_pr(int_S0, avoid_sing_S));
+ rsq_S1 = gmx_add_pr(rsq_S1, gmx_andnot_pr(int_S1, avoid_sing_S));
+ rsq_S2 = gmx_add_pr(rsq_S2, gmx_andnot_pr(int_S2, avoid_sing_S));
+ rsq_S3 = gmx_add_pr(rsq_S3, gmx_andnot_pr(int_S3, avoid_sing_S));
#endif
/* Calculate 1/r */
#ifndef GMX_DOUBLE
- rinv_SSE0 = gmx_invsqrt_pr(rsq_SSE0);
- rinv_SSE1 = gmx_invsqrt_pr(rsq_SSE1);
- rinv_SSE2 = gmx_invsqrt_pr(rsq_SSE2);
- rinv_SSE3 = gmx_invsqrt_pr(rsq_SSE3);
+ rinv_S0 = gmx_invsqrt_pr(rsq_S0);
+ rinv_S1 = gmx_invsqrt_pr(rsq_S1);
+ rinv_S2 = gmx_invsqrt_pr(rsq_S2);
+ rinv_S3 = gmx_invsqrt_pr(rsq_S3);
#else
- GMX_MM_INVSQRT2_PD(rsq_SSE0, rsq_SSE1, rinv_SSE0, rinv_SSE1);
- GMX_MM_INVSQRT2_PD(rsq_SSE2, rsq_SSE3, rinv_SSE2, rinv_SSE3);
+ GMX_MM_INVSQRT2_PD(rsq_S0, rsq_S1, rinv_S0, rinv_S1);
+ GMX_MM_INVSQRT2_PD(rsq_S2, rsq_S3, rinv_S2, rinv_S3);
#endif
#ifdef CALC_COULOMB
/* Load parameters for j atom */
- jq_SSE = gmx_load_pr(q+aj);
- qq_SSE0 = gmx_mul_pr(iq_SSE0, jq_SSE);
- qq_SSE1 = gmx_mul_pr(iq_SSE1, jq_SSE);
- qq_SSE2 = gmx_mul_pr(iq_SSE2, jq_SSE);
- qq_SSE3 = gmx_mul_pr(iq_SSE3, jq_SSE);
+ jq_S = gmx_load_pr(q+aj);
+ qq_S0 = gmx_mul_pr(iq_S0, jq_S);
+ qq_S1 = gmx_mul_pr(iq_S1, jq_S);
+ qq_S2 = gmx_mul_pr(iq_S2, jq_S);
+ qq_S3 = gmx_mul_pr(iq_S3, jq_S);
#endif
#ifdef CALC_LJ
#if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
- load_lj_pair_params(nbfp0, type, aj, c6_SSE0, c12_SSE0);
- load_lj_pair_params(nbfp1, type, aj, c6_SSE1, c12_SSE1);
+ load_lj_pair_params(nbfp0, type, aj, c6_S0, c12_S0);
+ load_lj_pair_params(nbfp1, type, aj, c6_S1, c12_S1);
#ifndef HALF_LJ
- load_lj_pair_params(nbfp2, type, aj, c6_SSE2, c12_SSE2);
- load_lj_pair_params(nbfp3, type, aj, c6_SSE3, c12_SSE3);
+ load_lj_pair_params(nbfp2, type, aj, c6_S2, c12_S2);
+ load_lj_pair_params(nbfp3, type, aj, c6_S3, c12_S3);
#endif
#endif /* not defined any LJ rule */
#ifdef LJ_COMB_GEOM
- c6s_j_SSE = gmx_load_pr(ljc+aj2+0);
- c12s_j_SSE = gmx_load_pr(ljc+aj2+STRIDE);
- c6_SSE0 = gmx_mul_pr(c6s_SSE0, c6s_j_SSE );
- c6_SSE1 = gmx_mul_pr(c6s_SSE1, c6s_j_SSE );
+ c6s_j_S = gmx_load_pr(ljc+aj2+0);
+ c12s_j_S = gmx_load_pr(ljc+aj2+STRIDE);
+ c6_S0 = gmx_mul_pr(c6s_S0, c6s_j_S );
+ c6_S1 = gmx_mul_pr(c6s_S1, c6s_j_S );
#ifndef HALF_LJ
- c6_SSE2 = gmx_mul_pr(c6s_SSE2, c6s_j_SSE );
- c6_SSE3 = gmx_mul_pr(c6s_SSE3, c6s_j_SSE );
+ c6_S2 = gmx_mul_pr(c6s_S2, c6s_j_S );
+ c6_S3 = gmx_mul_pr(c6s_S3, c6s_j_S );
#endif
- c12_SSE0 = gmx_mul_pr(c12s_SSE0, c12s_j_SSE);
- c12_SSE1 = gmx_mul_pr(c12s_SSE1, c12s_j_SSE);
+ c12_S0 = gmx_mul_pr(c12s_S0, c12s_j_S);
+ c12_S1 = gmx_mul_pr(c12s_S1, c12s_j_S);
#ifndef HALF_LJ
- c12_SSE2 = gmx_mul_pr(c12s_SSE2, c12s_j_SSE);
- c12_SSE3 = gmx_mul_pr(c12s_SSE3, c12s_j_SSE);
+ c12_S2 = gmx_mul_pr(c12s_S2, c12s_j_S);
+ c12_S3 = gmx_mul_pr(c12s_S3, c12s_j_S);
#endif
#endif /* LJ_COMB_GEOM */
#ifdef LJ_COMB_LB
- hsig_j_SSE = gmx_load_pr(ljc+aj2+0);
- seps_j_SSE = gmx_load_pr(ljc+aj2+STRIDE);
+ hsig_j_S = gmx_load_pr(ljc+aj2+0);
+ seps_j_S = gmx_load_pr(ljc+aj2+STRIDE);
- sig_SSE0 = gmx_add_pr(hsig_i_SSE0, hsig_j_SSE);
- sig_SSE1 = gmx_add_pr(hsig_i_SSE1, hsig_j_SSE);
- eps_SSE0 = gmx_mul_pr(seps_i_SSE0, seps_j_SSE);
- eps_SSE1 = gmx_mul_pr(seps_i_SSE1, seps_j_SSE);
+ sig_S0 = gmx_add_pr(hsig_i_S0, hsig_j_S);
+ sig_S1 = gmx_add_pr(hsig_i_S1, hsig_j_S);
+ eps_S0 = gmx_mul_pr(seps_i_S0, seps_j_S);
+ eps_S1 = gmx_mul_pr(seps_i_S1, seps_j_S);
#ifndef HALF_LJ
- sig_SSE2 = gmx_add_pr(hsig_i_SSE2, hsig_j_SSE);
- sig_SSE3 = gmx_add_pr(hsig_i_SSE3, hsig_j_SSE);
- eps_SSE2 = gmx_mul_pr(seps_i_SSE2, seps_j_SSE);
- eps_SSE3 = gmx_mul_pr(seps_i_SSE3, seps_j_SSE);
+ sig_S2 = gmx_add_pr(hsig_i_S2, hsig_j_S);
+ sig_S3 = gmx_add_pr(hsig_i_S3, hsig_j_S);
+ eps_S2 = gmx_mul_pr(seps_i_S2, seps_j_S);
+ eps_S3 = gmx_mul_pr(seps_i_S3, seps_j_S);
#endif
#endif /* LJ_COMB_LB */
#endif /* CALC_LJ */
-#ifndef CUTOFF_BLENDV
- rinv_SSE0 = gmx_and_pr(rinv_SSE0, wco_SSE0);
- rinv_SSE1 = gmx_and_pr(rinv_SSE1, wco_SSE1);
- rinv_SSE2 = gmx_and_pr(rinv_SSE2, wco_SSE2);
- rinv_SSE3 = gmx_and_pr(rinv_SSE3, wco_SSE3);
+#ifndef NBNXN_CUTOFF_USE_BLENDV
+ rinv_S0 = gmx_blendzero_pr(rinv_S0, wco_S0);
+ rinv_S1 = gmx_blendzero_pr(rinv_S1, wco_S1);
+ rinv_S2 = gmx_blendzero_pr(rinv_S2, wco_S2);
+ rinv_S3 = gmx_blendzero_pr(rinv_S3, wco_S3);
#else
/* We only need to mask for the cut-off: blendv is faster */
- rinv_SSE0 = gmx_blendv_pr(rinv_SSE0, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE0));
- rinv_SSE1 = gmx_blendv_pr(rinv_SSE1, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE1));
- rinv_SSE2 = gmx_blendv_pr(rinv_SSE2, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE2));
- rinv_SSE3 = gmx_blendv_pr(rinv_SSE3, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE3));
+ rinv_S0 = gmx_blendv_pr(rinv_S0, zero_S, gmx_sub_pr(rc2_S, rsq_S0));
+ rinv_S1 = gmx_blendv_pr(rinv_S1, zero_S, gmx_sub_pr(rc2_S, rsq_S1));
+ rinv_S2 = gmx_blendv_pr(rinv_S2, zero_S, gmx_sub_pr(rc2_S, rsq_S2));
+ rinv_S3 = gmx_blendv_pr(rinv_S3, zero_S, gmx_sub_pr(rc2_S, rsq_S3));
#endif
- rinvsq_SSE0 = gmx_mul_pr(rinv_SSE0, rinv_SSE0);
- rinvsq_SSE1 = gmx_mul_pr(rinv_SSE1, rinv_SSE1);
- rinvsq_SSE2 = gmx_mul_pr(rinv_SSE2, rinv_SSE2);
- rinvsq_SSE3 = gmx_mul_pr(rinv_SSE3, rinv_SSE3);
+ rinvsq_S0 = gmx_mul_pr(rinv_S0, rinv_S0);
+ rinvsq_S1 = gmx_mul_pr(rinv_S1, rinv_S1);
+ rinvsq_S2 = gmx_mul_pr(rinv_S2, rinv_S2);
+ rinvsq_S3 = gmx_mul_pr(rinv_S3, rinv_S3);
#ifdef CALC_COULOMB
/* Note that here we calculate force*r, not the usual force/r.
#ifdef EXCL_FORCES
/* Only add 1/r for non-excluded atom pairs */
- rinv_ex_SSE0 = gmx_and_pr(rinv_SSE0, int_SSE0);
- rinv_ex_SSE1 = gmx_and_pr(rinv_SSE1, int_SSE1);
- rinv_ex_SSE2 = gmx_and_pr(rinv_SSE2, int_SSE2);
- rinv_ex_SSE3 = gmx_and_pr(rinv_SSE3, int_SSE3);
+ rinv_ex_S0 = gmx_blendzero_pr(rinv_S0, int_S0);
+ rinv_ex_S1 = gmx_blendzero_pr(rinv_S1, int_S1);
+ rinv_ex_S2 = gmx_blendzero_pr(rinv_S2, int_S2);
+ rinv_ex_S3 = gmx_blendzero_pr(rinv_S3, int_S3);
#else
/* No exclusion forces, we always need 1/r */
-#define rinv_ex_SSE0 rinv_SSE0
-#define rinv_ex_SSE1 rinv_SSE1
-#define rinv_ex_SSE2 rinv_SSE2
-#define rinv_ex_SSE3 rinv_SSE3
+#define rinv_ex_S0 rinv_S0
+#define rinv_ex_S1 rinv_S1
+#define rinv_ex_S2 rinv_S2
+#define rinv_ex_S3 rinv_S3
#endif
#ifdef CALC_COUL_RF
/* Electrostatic interactions */
- frcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_add_pr(rinv_ex_SSE0, gmx_mul_pr(rsq_SSE0, mrc_3_SSE)));
- frcoul_SSE1 = gmx_mul_pr(qq_SSE1, gmx_add_pr(rinv_ex_SSE1, gmx_mul_pr(rsq_SSE1, mrc_3_SSE)));
- frcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_add_pr(rinv_ex_SSE2, gmx_mul_pr(rsq_SSE2, mrc_3_SSE)));
- frcoul_SSE3 = gmx_mul_pr(qq_SSE3, gmx_add_pr(rinv_ex_SSE3, gmx_mul_pr(rsq_SSE3, mrc_3_SSE)));
+ frcoul_S0 = gmx_mul_pr(qq_S0, gmx_add_pr(rinv_ex_S0, gmx_mul_pr(rsq_S0, mrc_3_S)));
+ frcoul_S1 = gmx_mul_pr(qq_S1, gmx_add_pr(rinv_ex_S1, gmx_mul_pr(rsq_S1, mrc_3_S)));
+ frcoul_S2 = gmx_mul_pr(qq_S2, gmx_add_pr(rinv_ex_S2, gmx_mul_pr(rsq_S2, mrc_3_S)));
+ frcoul_S3 = gmx_mul_pr(qq_S3, gmx_add_pr(rinv_ex_S3, gmx_mul_pr(rsq_S3, mrc_3_S)));
#ifdef CALC_ENERGIES
- vcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_add_pr(rinv_ex_SSE0, gmx_add_pr(gmx_mul_pr(rsq_SSE0, hrc_3_SSE), moh_rc_SSE)));
- vcoul_SSE1 = gmx_mul_pr(qq_SSE1, gmx_add_pr(rinv_ex_SSE1, gmx_add_pr(gmx_mul_pr(rsq_SSE1, hrc_3_SSE), moh_rc_SSE)));
- vcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_add_pr(rinv_ex_SSE2, gmx_add_pr(gmx_mul_pr(rsq_SSE2, hrc_3_SSE), moh_rc_SSE)));
- vcoul_SSE3 = gmx_mul_pr(qq_SSE3, gmx_add_pr(rinv_ex_SSE3, gmx_add_pr(gmx_mul_pr(rsq_SSE3, hrc_3_SSE), moh_rc_SSE)));
+ vcoul_S0 = gmx_mul_pr(qq_S0, gmx_add_pr(rinv_ex_S0, gmx_add_pr(gmx_mul_pr(rsq_S0, hrc_3_S), moh_rc_S)));
+ vcoul_S1 = gmx_mul_pr(qq_S1, gmx_add_pr(rinv_ex_S1, gmx_add_pr(gmx_mul_pr(rsq_S1, hrc_3_S), moh_rc_S)));
+ vcoul_S2 = gmx_mul_pr(qq_S2, gmx_add_pr(rinv_ex_S2, gmx_add_pr(gmx_mul_pr(rsq_S2, hrc_3_S), moh_rc_S)));
+ vcoul_S3 = gmx_mul_pr(qq_S3, gmx_add_pr(rinv_ex_S3, gmx_add_pr(gmx_mul_pr(rsq_S3, hrc_3_S), moh_rc_S)));
#endif
#endif
/* We need to mask (or limit) rsq for the cut-off,
* as large distances can cause an overflow in gmx_pmecorrF/V.
*/
-#ifndef CUTOFF_BLENDV
- brsq_SSE0 = gmx_mul_pr(beta2_SSE, gmx_and_pr(rsq_SSE0, wco_SSE0));
- brsq_SSE1 = gmx_mul_pr(beta2_SSE, gmx_and_pr(rsq_SSE1, wco_SSE1));
- brsq_SSE2 = gmx_mul_pr(beta2_SSE, gmx_and_pr(rsq_SSE2, wco_SSE2));
- brsq_SSE3 = gmx_mul_pr(beta2_SSE, gmx_and_pr(rsq_SSE3, wco_SSE3));
+#ifndef NBNXN_CUTOFF_USE_BLENDV
+ brsq_S0 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S0, wco_S0));
+ brsq_S1 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S1, wco_S1));
+ brsq_S2 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S2, wco_S2));
+ brsq_S3 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S3, wco_S3));
#else
/* Strangely, putting mul on a separate line is slower (icc 13) */
- brsq_SSE0 = gmx_mul_pr(beta2_SSE, gmx_blendv_pr(rsq_SSE0, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE0)));
- brsq_SSE1 = gmx_mul_pr(beta2_SSE, gmx_blendv_pr(rsq_SSE1, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE1)));
- brsq_SSE2 = gmx_mul_pr(beta2_SSE, gmx_blendv_pr(rsq_SSE2, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE2)));
- brsq_SSE3 = gmx_mul_pr(beta2_SSE, gmx_blendv_pr(rsq_SSE3, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE3)));
-#endif
- ewcorr_SSE0 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE0), beta_SSE);
- ewcorr_SSE1 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE1), beta_SSE);
- ewcorr_SSE2 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE2), beta_SSE);
- ewcorr_SSE3 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE3), beta_SSE);
- frcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_add_pr(rinv_ex_SSE0, gmx_mul_pr(ewcorr_SSE0, brsq_SSE0)));
- frcoul_SSE1 = gmx_mul_pr(qq_SSE1, gmx_add_pr(rinv_ex_SSE1, gmx_mul_pr(ewcorr_SSE1, brsq_SSE1)));
- frcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_add_pr(rinv_ex_SSE2, gmx_mul_pr(ewcorr_SSE2, brsq_SSE2)));
- frcoul_SSE3 = gmx_mul_pr(qq_SSE3, gmx_add_pr(rinv_ex_SSE3, gmx_mul_pr(ewcorr_SSE3, brsq_SSE3)));
+ brsq_S0 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S0, zero_S, gmx_sub_pr(rc2_S, rsq_S0)));
+ brsq_S1 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S1, zero_S, gmx_sub_pr(rc2_S, rsq_S1)));
+ brsq_S2 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S2, zero_S, gmx_sub_pr(rc2_S, rsq_S2)));
+ brsq_S3 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S3, zero_S, gmx_sub_pr(rc2_S, rsq_S3)));
+#endif
+ ewcorr_S0 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S0), beta_S);
+ ewcorr_S1 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S1), beta_S);
+ ewcorr_S2 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S2), beta_S);
+ ewcorr_S3 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S3), beta_S);
+ frcoul_S0 = gmx_mul_pr(qq_S0, gmx_add_pr(rinv_ex_S0, gmx_mul_pr(ewcorr_S0, brsq_S0)));
+ frcoul_S1 = gmx_mul_pr(qq_S1, gmx_add_pr(rinv_ex_S1, gmx_mul_pr(ewcorr_S1, brsq_S1)));
+ frcoul_S2 = gmx_mul_pr(qq_S2, gmx_add_pr(rinv_ex_S2, gmx_mul_pr(ewcorr_S2, brsq_S2)));
+ frcoul_S3 = gmx_mul_pr(qq_S3, gmx_add_pr(rinv_ex_S3, gmx_mul_pr(ewcorr_S3, brsq_S3)));
#ifdef CALC_ENERGIES
- vc_sub_SSE0 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE0), beta_SSE);
- vc_sub_SSE1 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE1), beta_SSE);
- vc_sub_SSE2 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE2), beta_SSE);
- vc_sub_SSE3 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE3), beta_SSE);
+ vc_sub_S0 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S0), beta_S);
+ vc_sub_S1 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S1), beta_S);
+ vc_sub_S2 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S2), beta_S);
+ vc_sub_S3 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S3), beta_S);
#endif
#endif /* CALC_COUL_EWALD */
#ifdef CALC_COUL_TAB
/* Electrostatic interactions */
- r_SSE0 = gmx_mul_pr(rsq_SSE0, rinv_SSE0);
- r_SSE1 = gmx_mul_pr(rsq_SSE1, rinv_SSE1);
- r_SSE2 = gmx_mul_pr(rsq_SSE2, rinv_SSE2);
- r_SSE3 = gmx_mul_pr(rsq_SSE3, rinv_SSE3);
+ r_S0 = gmx_mul_pr(rsq_S0, rinv_S0);
+ r_S1 = gmx_mul_pr(rsq_S1, rinv_S1);
+ r_S2 = gmx_mul_pr(rsq_S2, rinv_S2);
+ r_S3 = gmx_mul_pr(rsq_S3, rinv_S3);
/* Convert r to scaled table units */
- rs_SSE0 = gmx_mul_pr(r_SSE0, invtsp_SSE);
- rs_SSE1 = gmx_mul_pr(r_SSE1, invtsp_SSE);
- rs_SSE2 = gmx_mul_pr(r_SSE2, invtsp_SSE);
- rs_SSE3 = gmx_mul_pr(r_SSE3, invtsp_SSE);
+ rs_S0 = gmx_mul_pr(r_S0, invtsp_S);
+ rs_S1 = gmx_mul_pr(r_S1, invtsp_S);
+ rs_S2 = gmx_mul_pr(r_S2, invtsp_S);
+ rs_S3 = gmx_mul_pr(r_S3, invtsp_S);
/* Truncate scaled r to an int */
- ti_SSE0 = gmx_cvttpr_epi32(rs_SSE0);
- ti_SSE1 = gmx_cvttpr_epi32(rs_SSE1);
- ti_SSE2 = gmx_cvttpr_epi32(rs_SSE2);
- ti_SSE3 = gmx_cvttpr_epi32(rs_SSE3);
-#ifdef GMX_X86_SSE4_1
+ ti_S0 = gmx_cvttpr_epi32(rs_S0);
+ ti_S1 = gmx_cvttpr_epi32(rs_S1);
+ ti_S2 = gmx_cvttpr_epi32(rs_S2);
+ ti_S3 = gmx_cvttpr_epi32(rs_S3);
+#ifdef GMX_HAVE_SIMD_FLOOR
/* SSE4.1 floor is faster than gmx_cvtepi32_ps int->float cast */
- rf_SSE0 = gmx_floor_pr(rs_SSE0);
- rf_SSE1 = gmx_floor_pr(rs_SSE1);
- rf_SSE2 = gmx_floor_pr(rs_SSE2);
- rf_SSE3 = gmx_floor_pr(rs_SSE3);
+ rf_S0 = gmx_floor_pr(rs_S0);
+ rf_S1 = gmx_floor_pr(rs_S1);
+ rf_S2 = gmx_floor_pr(rs_S2);
+ rf_S3 = gmx_floor_pr(rs_S3);
#else
- rf_SSE0 = gmx_cvtepi32_pr(ti_SSE0);
- rf_SSE1 = gmx_cvtepi32_pr(ti_SSE1);
- rf_SSE2 = gmx_cvtepi32_pr(ti_SSE2);
- rf_SSE3 = gmx_cvtepi32_pr(ti_SSE3);
+ rf_S0 = gmx_cvtepi32_pr(ti_S0);
+ rf_S1 = gmx_cvtepi32_pr(ti_S1);
+ rf_S2 = gmx_cvtepi32_pr(ti_S2);
+ rf_S3 = gmx_cvtepi32_pr(ti_S3);
#endif
- frac_SSE0 = gmx_sub_pr(rs_SSE0, rf_SSE0);
- frac_SSE1 = gmx_sub_pr(rs_SSE1, rf_SSE1);
- frac_SSE2 = gmx_sub_pr(rs_SSE2, rf_SSE2);
- frac_SSE3 = gmx_sub_pr(rs_SSE3, rf_SSE3);
+ frac_S0 = gmx_sub_pr(rs_S0, rf_S0);
+ frac_S1 = gmx_sub_pr(rs_S1, rf_S1);
+ frac_S2 = gmx_sub_pr(rs_S2, rf_S2);
+ frac_S3 = gmx_sub_pr(rs_S3, rf_S3);
/* Load and interpolate table forces and possibly energies.
* Force and energy can be combined in one table, stride 4: FDV0
* Currently single precision uses FDV0, double F and V.
*/
#ifndef CALC_ENERGIES
- load_table_f(tab_coul_F, ti_SSE0, ti0, ctab0_SSE0, ctab1_SSE0);
- load_table_f(tab_coul_F, ti_SSE1, ti1, ctab0_SSE1, ctab1_SSE1);
- load_table_f(tab_coul_F, ti_SSE2, ti2, ctab0_SSE2, ctab1_SSE2);
- load_table_f(tab_coul_F, ti_SSE3, ti3, ctab0_SSE3, ctab1_SSE3);
+ load_table_f(tab_coul_F, ti_S0, ti0, ctab0_S0, ctab1_S0);
+ load_table_f(tab_coul_F, ti_S1, ti1, ctab0_S1, ctab1_S1);
+ load_table_f(tab_coul_F, ti_S2, ti2, ctab0_S2, ctab1_S2);
+ load_table_f(tab_coul_F, ti_S3, ti3, ctab0_S3, ctab1_S3);
#else
#ifdef TAB_FDV0
- load_table_f_v(tab_coul_F, ti_SSE0, ti0, ctab0_SSE0, ctab1_SSE0, ctabv_SSE0);
- load_table_f_v(tab_coul_F, ti_SSE1, ti1, ctab0_SSE1, ctab1_SSE1, ctabv_SSE1);
- load_table_f_v(tab_coul_F, ti_SSE2, ti2, ctab0_SSE2, ctab1_SSE2, ctabv_SSE2);
- load_table_f_v(tab_coul_F, ti_SSE3, ti3, ctab0_SSE3, ctab1_SSE3, ctabv_SSE3);
+ load_table_f_v(tab_coul_F, ti_S0, ti0, ctab0_S0, ctab1_S0, ctabv_S0);
+ load_table_f_v(tab_coul_F, ti_S1, ti1, ctab0_S1, ctab1_S1, ctabv_S1);
+ load_table_f_v(tab_coul_F, ti_S2, ti2, ctab0_S2, ctab1_S2, ctabv_S2);
+ load_table_f_v(tab_coul_F, ti_S3, ti3, ctab0_S3, ctab1_S3, ctabv_S3);
#else
- load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE0, ti0, ctab0_SSE0, ctab1_SSE0, ctabv_SSE0);
- load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE1, ti1, ctab0_SSE1, ctab1_SSE1, ctabv_SSE1);
- load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE2, ti2, ctab0_SSE2, ctab1_SSE2, ctabv_SSE2);
- load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE3, ti3, ctab0_SSE3, ctab1_SSE3, ctabv_SSE3);
+ load_table_f_v(tab_coul_F, tab_coul_V, ti_S0, ti0, ctab0_S0, ctab1_S0, ctabv_S0);
+ load_table_f_v(tab_coul_F, tab_coul_V, ti_S1, ti1, ctab0_S1, ctab1_S1, ctabv_S1);
+ load_table_f_v(tab_coul_F, tab_coul_V, ti_S2, ti2, ctab0_S2, ctab1_S2, ctabv_S2);
+ load_table_f_v(tab_coul_F, tab_coul_V, ti_S3, ti3, ctab0_S3, ctab1_S3, ctabv_S3);
#endif
#endif
- fsub_SSE0 = gmx_add_pr(ctab0_SSE0, gmx_mul_pr(frac_SSE0, ctab1_SSE0));
- fsub_SSE1 = gmx_add_pr(ctab0_SSE1, gmx_mul_pr(frac_SSE1, ctab1_SSE1));
- fsub_SSE2 = gmx_add_pr(ctab0_SSE2, gmx_mul_pr(frac_SSE2, ctab1_SSE2));
- fsub_SSE3 = gmx_add_pr(ctab0_SSE3, gmx_mul_pr(frac_SSE3, ctab1_SSE3));
- frcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_sub_pr(rinv_ex_SSE0, gmx_mul_pr(fsub_SSE0, r_SSE0)));
- frcoul_SSE1 = gmx_mul_pr(qq_SSE1, gmx_sub_pr(rinv_ex_SSE1, gmx_mul_pr(fsub_SSE1, r_SSE1)));
- frcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_sub_pr(rinv_ex_SSE2, gmx_mul_pr(fsub_SSE2, r_SSE2)));
- frcoul_SSE3 = gmx_mul_pr(qq_SSE3, gmx_sub_pr(rinv_ex_SSE3, gmx_mul_pr(fsub_SSE3, r_SSE3)));
+ fsub_S0 = gmx_add_pr(ctab0_S0, gmx_mul_pr(frac_S0, ctab1_S0));
+ fsub_S1 = gmx_add_pr(ctab0_S1, gmx_mul_pr(frac_S1, ctab1_S1));
+ fsub_S2 = gmx_add_pr(ctab0_S2, gmx_mul_pr(frac_S2, ctab1_S2));
+ fsub_S3 = gmx_add_pr(ctab0_S3, gmx_mul_pr(frac_S3, ctab1_S3));
+ frcoul_S0 = gmx_mul_pr(qq_S0, gmx_sub_pr(rinv_ex_S0, gmx_mul_pr(fsub_S0, r_S0)));
+ frcoul_S1 = gmx_mul_pr(qq_S1, gmx_sub_pr(rinv_ex_S1, gmx_mul_pr(fsub_S1, r_S1)));
+ frcoul_S2 = gmx_mul_pr(qq_S2, gmx_sub_pr(rinv_ex_S2, gmx_mul_pr(fsub_S2, r_S2)));
+ frcoul_S3 = gmx_mul_pr(qq_S3, gmx_sub_pr(rinv_ex_S3, gmx_mul_pr(fsub_S3, r_S3)));
#ifdef CALC_ENERGIES
- vc_sub_SSE0 = gmx_add_pr(ctabv_SSE0, gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE, frac_SSE0), gmx_add_pr(ctab0_SSE0, fsub_SSE0)));
- vc_sub_SSE1 = gmx_add_pr(ctabv_SSE1, gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE, frac_SSE1), gmx_add_pr(ctab0_SSE1, fsub_SSE1)));
- vc_sub_SSE2 = gmx_add_pr(ctabv_SSE2, gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE, frac_SSE2), gmx_add_pr(ctab0_SSE2, fsub_SSE2)));
- vc_sub_SSE3 = gmx_add_pr(ctabv_SSE3, gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE, frac_SSE3), gmx_add_pr(ctab0_SSE3, fsub_SSE3)));
+ vc_sub_S0 = gmx_add_pr(ctabv_S0, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S0), gmx_add_pr(ctab0_S0, fsub_S0)));
+ vc_sub_S1 = gmx_add_pr(ctabv_S1, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S1), gmx_add_pr(ctab0_S1, fsub_S1)));
+ vc_sub_S2 = gmx_add_pr(ctabv_S2, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S2), gmx_add_pr(ctab0_S2, fsub_S2)));
+ vc_sub_S3 = gmx_add_pr(ctabv_S3, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S3), gmx_add_pr(ctab0_S3, fsub_S3)));
#endif
#endif /* CALC_COUL_TAB */
#ifndef NO_SHIFT_EWALD
/* Add Ewald potential shift to vc_sub for convenience */
#ifdef CHECK_EXCLS
- vc_sub_SSE0 = gmx_add_pr(vc_sub_SSE0, gmx_and_pr(sh_ewald_SSE, int_SSE0));
- vc_sub_SSE1 = gmx_add_pr(vc_sub_SSE1, gmx_and_pr(sh_ewald_SSE, int_SSE1));
- vc_sub_SSE2 = gmx_add_pr(vc_sub_SSE2, gmx_and_pr(sh_ewald_SSE, int_SSE2));
- vc_sub_SSE3 = gmx_add_pr(vc_sub_SSE3, gmx_and_pr(sh_ewald_SSE, int_SSE3));
+ vc_sub_S0 = gmx_add_pr(vc_sub_S0, gmx_blendzero_pr(sh_ewald_S, int_S0));
+ vc_sub_S1 = gmx_add_pr(vc_sub_S1, gmx_blendzero_pr(sh_ewald_S, int_S1));
+ vc_sub_S2 = gmx_add_pr(vc_sub_S2, gmx_blendzero_pr(sh_ewald_S, int_S2));
+ vc_sub_S3 = gmx_add_pr(vc_sub_S3, gmx_blendzero_pr(sh_ewald_S, int_S3));
#else
- vc_sub_SSE0 = gmx_add_pr(vc_sub_SSE0, sh_ewald_SSE);
- vc_sub_SSE1 = gmx_add_pr(vc_sub_SSE1, sh_ewald_SSE);
- vc_sub_SSE2 = gmx_add_pr(vc_sub_SSE2, sh_ewald_SSE);
- vc_sub_SSE3 = gmx_add_pr(vc_sub_SSE3, sh_ewald_SSE);
+ vc_sub_S0 = gmx_add_pr(vc_sub_S0, sh_ewald_S);
+ vc_sub_S1 = gmx_add_pr(vc_sub_S1, sh_ewald_S);
+ vc_sub_S2 = gmx_add_pr(vc_sub_S2, sh_ewald_S);
+ vc_sub_S3 = gmx_add_pr(vc_sub_S3, sh_ewald_S);
#endif
#endif
- vcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_sub_pr(rinv_ex_SSE0, vc_sub_SSE0));
- vcoul_SSE1 = gmx_mul_pr(qq_SSE1, gmx_sub_pr(rinv_ex_SSE1, vc_sub_SSE1));
- vcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_sub_pr(rinv_ex_SSE2, vc_sub_SSE2));
- vcoul_SSE3 = gmx_mul_pr(qq_SSE3, gmx_sub_pr(rinv_ex_SSE3, vc_sub_SSE3));
+ vcoul_S0 = gmx_mul_pr(qq_S0, gmx_sub_pr(rinv_ex_S0, vc_sub_S0));
+ vcoul_S1 = gmx_mul_pr(qq_S1, gmx_sub_pr(rinv_ex_S1, vc_sub_S1));
+ vcoul_S2 = gmx_mul_pr(qq_S2, gmx_sub_pr(rinv_ex_S2, vc_sub_S2));
+ vcoul_S3 = gmx_mul_pr(qq_S3, gmx_sub_pr(rinv_ex_S3, vc_sub_S3));
#endif
#ifdef CALC_ENERGIES
/* Mask energy for cut-off and diagonal */
- vcoul_SSE0 = gmx_and_pr(vcoul_SSE0, wco_SSE0);
- vcoul_SSE1 = gmx_and_pr(vcoul_SSE1, wco_SSE1);
- vcoul_SSE2 = gmx_and_pr(vcoul_SSE2, wco_SSE2);
- vcoul_SSE3 = gmx_and_pr(vcoul_SSE3, wco_SSE3);
+ vcoul_S0 = gmx_blendzero_pr(vcoul_S0, wco_S0);
+ vcoul_S1 = gmx_blendzero_pr(vcoul_S1, wco_S1);
+ vcoul_S2 = gmx_blendzero_pr(vcoul_S2, wco_S2);
+ vcoul_S3 = gmx_blendzero_pr(vcoul_S3, wco_S3);
#endif
#endif /* CALC_COULOMB */
/* Lennard-Jones interaction */
#ifdef VDW_CUTOFF_CHECK
- wco_vdw_SSE0 = gmx_cmplt_pr(rsq_SSE0, rcvdw2_SSE);
- wco_vdw_SSE1 = gmx_cmplt_pr(rsq_SSE1, rcvdw2_SSE);
+ wco_vdw_S0 = gmx_cmplt_pr(rsq_S0, rcvdw2_S);
+ wco_vdw_S1 = gmx_cmplt_pr(rsq_S1, rcvdw2_S);
#ifndef HALF_LJ
- wco_vdw_SSE2 = gmx_cmplt_pr(rsq_SSE2, rcvdw2_SSE);
- wco_vdw_SSE3 = gmx_cmplt_pr(rsq_SSE3, rcvdw2_SSE);
+ wco_vdw_S2 = gmx_cmplt_pr(rsq_S2, rcvdw2_S);
+ wco_vdw_S3 = gmx_cmplt_pr(rsq_S3, rcvdw2_S);
#endif
#else
/* Same cut-off for Coulomb and VdW, reuse the registers */
-#define wco_vdw_SSE0 wco_SSE0
-#define wco_vdw_SSE1 wco_SSE1
-#define wco_vdw_SSE2 wco_SSE2
-#define wco_vdw_SSE3 wco_SSE3
+#define wco_vdw_S0 wco_S0
+#define wco_vdw_S1 wco_S1
+#define wco_vdw_S2 wco_S2
+#define wco_vdw_S3 wco_S3
#endif
#ifndef LJ_COMB_LB
- rinvsix_SSE0 = gmx_mul_pr(rinvsq_SSE0, gmx_mul_pr(rinvsq_SSE0, rinvsq_SSE0));
- rinvsix_SSE1 = gmx_mul_pr(rinvsq_SSE1, gmx_mul_pr(rinvsq_SSE1, rinvsq_SSE1));
+ rinvsix_S0 = gmx_mul_pr(rinvsq_S0, gmx_mul_pr(rinvsq_S0, rinvsq_S0));
+ rinvsix_S1 = gmx_mul_pr(rinvsq_S1, gmx_mul_pr(rinvsq_S1, rinvsq_S1));
#ifdef EXCL_FORCES
- rinvsix_SSE0 = gmx_and_pr(rinvsix_SSE0, int_SSE0);
- rinvsix_SSE1 = gmx_and_pr(rinvsix_SSE1, int_SSE1);
+ rinvsix_S0 = gmx_blendzero_pr(rinvsix_S0, int_S0);
+ rinvsix_S1 = gmx_blendzero_pr(rinvsix_S1, int_S1);
#endif
#ifndef HALF_LJ
- rinvsix_SSE2 = gmx_mul_pr(rinvsq_SSE2, gmx_mul_pr(rinvsq_SSE2, rinvsq_SSE2));
- rinvsix_SSE3 = gmx_mul_pr(rinvsq_SSE3, gmx_mul_pr(rinvsq_SSE3, rinvsq_SSE3));
+ rinvsix_S2 = gmx_mul_pr(rinvsq_S2, gmx_mul_pr(rinvsq_S2, rinvsq_S2));
+ rinvsix_S3 = gmx_mul_pr(rinvsq_S3, gmx_mul_pr(rinvsq_S3, rinvsq_S3));
#ifdef EXCL_FORCES
- rinvsix_SSE2 = gmx_and_pr(rinvsix_SSE2, int_SSE2);
- rinvsix_SSE3 = gmx_and_pr(rinvsix_SSE3, int_SSE3);
+ rinvsix_S2 = gmx_blendzero_pr(rinvsix_S2, int_S2);
+ rinvsix_S3 = gmx_blendzero_pr(rinvsix_S3, int_S3);
#endif
#endif
#ifdef VDW_CUTOFF_CHECK
- rinvsix_SSE0 = gmx_and_pr(rinvsix_SSE0, wco_vdw_SSE0);
- rinvsix_SSE1 = gmx_and_pr(rinvsix_SSE1, wco_vdw_SSE1);
+ rinvsix_S0 = gmx_blendzero_pr(rinvsix_S0, wco_vdw_S0);
+ rinvsix_S1 = gmx_blendzero_pr(rinvsix_S1, wco_vdw_S1);
#ifndef HALF_LJ
- rinvsix_SSE2 = gmx_and_pr(rinvsix_SSE2, wco_vdw_SSE2);
- rinvsix_SSE3 = gmx_and_pr(rinvsix_SSE3, wco_vdw_SSE3);
+ rinvsix_S2 = gmx_blendzero_pr(rinvsix_S2, wco_vdw_S2);
+ rinvsix_S3 = gmx_blendzero_pr(rinvsix_S3, wco_vdw_S3);
#endif
#endif
- FrLJ6_SSE0 = gmx_mul_pr(c6_SSE0, rinvsix_SSE0);
- FrLJ6_SSE1 = gmx_mul_pr(c6_SSE1, rinvsix_SSE1);
+ FrLJ6_S0 = gmx_mul_pr(c6_S0, rinvsix_S0);
+ FrLJ6_S1 = gmx_mul_pr(c6_S1, rinvsix_S1);
#ifndef HALF_LJ
- FrLJ6_SSE2 = gmx_mul_pr(c6_SSE2, rinvsix_SSE2);
- FrLJ6_SSE3 = gmx_mul_pr(c6_SSE3, rinvsix_SSE3);
+ FrLJ6_S2 = gmx_mul_pr(c6_S2, rinvsix_S2);
+ FrLJ6_S3 = gmx_mul_pr(c6_S3, rinvsix_S3);
#endif
- FrLJ12_SSE0 = gmx_mul_pr(c12_SSE0, gmx_mul_pr(rinvsix_SSE0, rinvsix_SSE0));
- FrLJ12_SSE1 = gmx_mul_pr(c12_SSE1, gmx_mul_pr(rinvsix_SSE1, rinvsix_SSE1));
+ FrLJ12_S0 = gmx_mul_pr(c12_S0, gmx_mul_pr(rinvsix_S0, rinvsix_S0));
+ FrLJ12_S1 = gmx_mul_pr(c12_S1, gmx_mul_pr(rinvsix_S1, rinvsix_S1));
#ifndef HALF_LJ
- FrLJ12_SSE2 = gmx_mul_pr(c12_SSE2, gmx_mul_pr(rinvsix_SSE2, rinvsix_SSE2));
- FrLJ12_SSE3 = gmx_mul_pr(c12_SSE3, gmx_mul_pr(rinvsix_SSE3, rinvsix_SSE3));
+ FrLJ12_S2 = gmx_mul_pr(c12_S2, gmx_mul_pr(rinvsix_S2, rinvsix_S2));
+ FrLJ12_S3 = gmx_mul_pr(c12_S3, gmx_mul_pr(rinvsix_S3, rinvsix_S3));
#endif
#endif /* not LJ_COMB_LB */
#ifdef LJ_COMB_LB
- sir_SSE0 = gmx_mul_pr(sig_SSE0, rinv_SSE0);
- sir_SSE1 = gmx_mul_pr(sig_SSE1, rinv_SSE1);
+ sir_S0 = gmx_mul_pr(sig_S0, rinv_S0);
+ sir_S1 = gmx_mul_pr(sig_S1, rinv_S1);
#ifndef HALF_LJ
- sir_SSE2 = gmx_mul_pr(sig_SSE2, rinv_SSE2);
- sir_SSE3 = gmx_mul_pr(sig_SSE3, rinv_SSE3);
+ sir_S2 = gmx_mul_pr(sig_S2, rinv_S2);
+ sir_S3 = gmx_mul_pr(sig_S3, rinv_S3);
#endif
- sir2_SSE0 = gmx_mul_pr(sir_SSE0, sir_SSE0);
- sir2_SSE1 = gmx_mul_pr(sir_SSE1, sir_SSE1);
+ sir2_S0 = gmx_mul_pr(sir_S0, sir_S0);
+ sir2_S1 = gmx_mul_pr(sir_S1, sir_S1);
#ifndef HALF_LJ
- sir2_SSE2 = gmx_mul_pr(sir_SSE2, sir_SSE2);
- sir2_SSE3 = gmx_mul_pr(sir_SSE3, sir_SSE3);
+ sir2_S2 = gmx_mul_pr(sir_S2, sir_S2);
+ sir2_S3 = gmx_mul_pr(sir_S3, sir_S3);
#endif
- sir6_SSE0 = gmx_mul_pr(sir2_SSE0, gmx_mul_pr(sir2_SSE0, sir2_SSE0));
- sir6_SSE1 = gmx_mul_pr(sir2_SSE1, gmx_mul_pr(sir2_SSE1, sir2_SSE1));
+ sir6_S0 = gmx_mul_pr(sir2_S0, gmx_mul_pr(sir2_S0, sir2_S0));
+ sir6_S1 = gmx_mul_pr(sir2_S1, gmx_mul_pr(sir2_S1, sir2_S1));
#ifdef EXCL_FORCES
- sir6_SSE0 = gmx_and_pr(sir6_SSE0, int_SSE0);
- sir6_SSE1 = gmx_and_pr(sir6_SSE1, int_SSE1);
+ sir6_S0 = gmx_blendzero_pr(sir6_S0, int_S0);
+ sir6_S1 = gmx_blendzero_pr(sir6_S1, int_S1);
#endif
#ifndef HALF_LJ
- sir6_SSE2 = gmx_mul_pr(sir2_SSE2, gmx_mul_pr(sir2_SSE2, sir2_SSE2));
- sir6_SSE3 = gmx_mul_pr(sir2_SSE3, gmx_mul_pr(sir2_SSE3, sir2_SSE3));
+ sir6_S2 = gmx_mul_pr(sir2_S2, gmx_mul_pr(sir2_S2, sir2_S2));
+ sir6_S3 = gmx_mul_pr(sir2_S3, gmx_mul_pr(sir2_S3, sir2_S3));
#ifdef EXCL_FORCES
- sir6_SSE2 = gmx_and_pr(sir6_SSE2, int_SSE2);
- sir6_SSE3 = gmx_and_pr(sir6_SSE3, int_SSE3);
+ sir6_S2 = gmx_blendzero_pr(sir6_S2, int_S2);
+ sir6_S3 = gmx_blendzero_pr(sir6_S3, int_S3);
#endif
#endif
#ifdef VDW_CUTOFF_CHECK
- sir6_SSE0 = gmx_and_pr(sir6_SSE0, wco_vdw_SSE0);
- sir6_SSE1 = gmx_and_pr(sir6_SSE1, wco_vdw_SSE1);
+ sir6_S0 = gmx_blendzero_pr(sir6_S0, wco_vdw_S0);
+ sir6_S1 = gmx_blendzero_pr(sir6_S1, wco_vdw_S1);
#ifndef HALF_LJ
- sir6_SSE2 = gmx_and_pr(sir6_SSE2, wco_vdw_SSE2);
- sir6_SSE3 = gmx_and_pr(sir6_SSE3, wco_vdw_SSE3);
+ sir6_S2 = gmx_blendzero_pr(sir6_S2, wco_vdw_S2);
+ sir6_S3 = gmx_blendzero_pr(sir6_S3, wco_vdw_S3);
#endif
#endif
- FrLJ6_SSE0 = gmx_mul_pr(eps_SSE0, sir6_SSE0);
- FrLJ6_SSE1 = gmx_mul_pr(eps_SSE1, sir6_SSE1);
+ FrLJ6_S0 = gmx_mul_pr(eps_S0, sir6_S0);
+ FrLJ6_S1 = gmx_mul_pr(eps_S1, sir6_S1);
#ifndef HALF_LJ
- FrLJ6_SSE2 = gmx_mul_pr(eps_SSE2, sir6_SSE2);
- FrLJ6_SSE3 = gmx_mul_pr(eps_SSE3, sir6_SSE3);
+ FrLJ6_S2 = gmx_mul_pr(eps_S2, sir6_S2);
+ FrLJ6_S3 = gmx_mul_pr(eps_S3, sir6_S3);
#endif
- FrLJ12_SSE0 = gmx_mul_pr(FrLJ6_SSE0, sir6_SSE0);
- FrLJ12_SSE1 = gmx_mul_pr(FrLJ6_SSE1, sir6_SSE1);
+ FrLJ12_S0 = gmx_mul_pr(FrLJ6_S0, sir6_S0);
+ FrLJ12_S1 = gmx_mul_pr(FrLJ6_S1, sir6_S1);
#ifndef HALF_LJ
- FrLJ12_SSE2 = gmx_mul_pr(FrLJ6_SSE2, sir6_SSE2);
- FrLJ12_SSE3 = gmx_mul_pr(FrLJ6_SSE3, sir6_SSE3);
+ FrLJ12_S2 = gmx_mul_pr(FrLJ6_S2, sir6_S2);
+ FrLJ12_S3 = gmx_mul_pr(FrLJ6_S3, sir6_S3);
#endif
#if defined CALC_ENERGIES
/* We need C6 and C12 to calculate the LJ potential shift */
- sig2_SSE0 = gmx_mul_pr(sig_SSE0, sig_SSE0);
- sig2_SSE1 = gmx_mul_pr(sig_SSE1, sig_SSE1);
+ sig2_S0 = gmx_mul_pr(sig_S0, sig_S0);
+ sig2_S1 = gmx_mul_pr(sig_S1, sig_S1);
#ifndef HALF_LJ
- sig2_SSE2 = gmx_mul_pr(sig_SSE2, sig_SSE2);
- sig2_SSE3 = gmx_mul_pr(sig_SSE3, sig_SSE3);
+ sig2_S2 = gmx_mul_pr(sig_S2, sig_S2);
+ sig2_S3 = gmx_mul_pr(sig_S3, sig_S3);
#endif
- sig6_SSE0 = gmx_mul_pr(sig2_SSE0, gmx_mul_pr(sig2_SSE0, sig2_SSE0));
- sig6_SSE1 = gmx_mul_pr(sig2_SSE1, gmx_mul_pr(sig2_SSE1, sig2_SSE1));
+ sig6_S0 = gmx_mul_pr(sig2_S0, gmx_mul_pr(sig2_S0, sig2_S0));
+ sig6_S1 = gmx_mul_pr(sig2_S1, gmx_mul_pr(sig2_S1, sig2_S1));
#ifndef HALF_LJ
- sig6_SSE2 = gmx_mul_pr(sig2_SSE2, gmx_mul_pr(sig2_SSE2, sig2_SSE2));
- sig6_SSE3 = gmx_mul_pr(sig2_SSE3, gmx_mul_pr(sig2_SSE3, sig2_SSE3));
+ sig6_S2 = gmx_mul_pr(sig2_S2, gmx_mul_pr(sig2_S2, sig2_S2));
+ sig6_S3 = gmx_mul_pr(sig2_S3, gmx_mul_pr(sig2_S3, sig2_S3));
#endif
- c6_SSE0 = gmx_mul_pr(eps_SSE0, sig6_SSE0);
- c6_SSE1 = gmx_mul_pr(eps_SSE1, sig6_SSE1);
+ c6_S0 = gmx_mul_pr(eps_S0, sig6_S0);
+ c6_S1 = gmx_mul_pr(eps_S1, sig6_S1);
#ifndef HALF_LJ
- c6_SSE2 = gmx_mul_pr(eps_SSE2, sig6_SSE2);
- c6_SSE3 = gmx_mul_pr(eps_SSE3, sig6_SSE3);
+ c6_S2 = gmx_mul_pr(eps_S2, sig6_S2);
+ c6_S3 = gmx_mul_pr(eps_S3, sig6_S3);
#endif
- c12_SSE0 = gmx_mul_pr(c6_SSE0, sig6_SSE0);
- c12_SSE1 = gmx_mul_pr(c6_SSE1, sig6_SSE1);
+ c12_S0 = gmx_mul_pr(c6_S0, sig6_S0);
+ c12_S1 = gmx_mul_pr(c6_S1, sig6_S1);
#ifndef HALF_LJ
- c12_SSE2 = gmx_mul_pr(c6_SSE2, sig6_SSE2);
- c12_SSE3 = gmx_mul_pr(c6_SSE3, sig6_SSE3);
+ c12_S2 = gmx_mul_pr(c6_S2, sig6_S2);
+ c12_S3 = gmx_mul_pr(c6_S3, sig6_S3);
#endif
#endif
#endif /* LJ_COMB_LB */
#ifdef CALC_COULOMB
#ifndef ENERGY_GROUPS
- vctotSSE = gmx_add_pr(vctotSSE, gmx_sum4_pr(vcoul_SSE0, vcoul_SSE1, vcoul_SSE2, vcoul_SSE3));
+ vctot_S = gmx_add_pr(vctot_S, gmx_sum4_pr(vcoul_S0, vcoul_S1, vcoul_S2, vcoul_S3));
#else
- add_ener_grp(vcoul_SSE0, vctp[0], egp_jj);
- add_ener_grp(vcoul_SSE1, vctp[1], egp_jj);
- add_ener_grp(vcoul_SSE2, vctp[2], egp_jj);
- add_ener_grp(vcoul_SSE3, vctp[3], egp_jj);
+ add_ener_grp(vcoul_S0, vctp[0], egp_jj);
+ add_ener_grp(vcoul_S1, vctp[1], egp_jj);
+ add_ener_grp(vcoul_S2, vctp[2], egp_jj);
+ add_ener_grp(vcoul_S3, vctp[3], egp_jj);
#endif
#endif
#ifdef CALC_LJ
/* Calculate the LJ energies */
- VLJ6_SSE0 = gmx_mul_pr(sixthSSE, gmx_sub_pr(FrLJ6_SSE0, gmx_mul_pr(c6_SSE0, sh_invrc6_SSE)));
- VLJ6_SSE1 = gmx_mul_pr(sixthSSE, gmx_sub_pr(FrLJ6_SSE1, gmx_mul_pr(c6_SSE1, sh_invrc6_SSE)));
+ VLJ6_S0 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S0, gmx_mul_pr(c6_S0, sh_invrc6_S)));
+ VLJ6_S1 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S1, gmx_mul_pr(c6_S1, sh_invrc6_S)));
#ifndef HALF_LJ
- VLJ6_SSE2 = gmx_mul_pr(sixthSSE, gmx_sub_pr(FrLJ6_SSE2, gmx_mul_pr(c6_SSE2, sh_invrc6_SSE)));
- VLJ6_SSE3 = gmx_mul_pr(sixthSSE, gmx_sub_pr(FrLJ6_SSE3, gmx_mul_pr(c6_SSE3, sh_invrc6_SSE)));
+ VLJ6_S2 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S2, gmx_mul_pr(c6_S2, sh_invrc6_S)));
+ VLJ6_S3 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S3, gmx_mul_pr(c6_S3, sh_invrc6_S)));
#endif
- VLJ12_SSE0 = gmx_mul_pr(twelvethSSE, gmx_sub_pr(FrLJ12_SSE0, gmx_mul_pr(c12_SSE0, sh_invrc12_SSE)));
- VLJ12_SSE1 = gmx_mul_pr(twelvethSSE, gmx_sub_pr(FrLJ12_SSE1, gmx_mul_pr(c12_SSE1, sh_invrc12_SSE)));
+ VLJ12_S0 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S0, gmx_mul_pr(c12_S0, sh_invrc12_S)));
+ VLJ12_S1 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S1, gmx_mul_pr(c12_S1, sh_invrc12_S)));
#ifndef HALF_LJ
- VLJ12_SSE2 = gmx_mul_pr(twelvethSSE, gmx_sub_pr(FrLJ12_SSE2, gmx_mul_pr(c12_SSE2, sh_invrc12_SSE)));
- VLJ12_SSE3 = gmx_mul_pr(twelvethSSE, gmx_sub_pr(FrLJ12_SSE3, gmx_mul_pr(c12_SSE3, sh_invrc12_SSE)));
+ VLJ12_S2 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S2, gmx_mul_pr(c12_S2, sh_invrc12_S)));
+ VLJ12_S3 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S3, gmx_mul_pr(c12_S3, sh_invrc12_S)));
#endif
- VLJ_SSE0 = gmx_sub_pr(VLJ12_SSE0, VLJ6_SSE0);
- VLJ_SSE1 = gmx_sub_pr(VLJ12_SSE1, VLJ6_SSE1);
+ VLJ_S0 = gmx_sub_pr(VLJ12_S0, VLJ6_S0);
+ VLJ_S1 = gmx_sub_pr(VLJ12_S1, VLJ6_S1);
#ifndef HALF_LJ
- VLJ_SSE2 = gmx_sub_pr(VLJ12_SSE2, VLJ6_SSE2);
- VLJ_SSE3 = gmx_sub_pr(VLJ12_SSE3, VLJ6_SSE3);
+ VLJ_S2 = gmx_sub_pr(VLJ12_S2, VLJ6_S2);
+ VLJ_S3 = gmx_sub_pr(VLJ12_S3, VLJ6_S3);
#endif
/* The potential shift should be removed for pairs beyond cut-off */
- VLJ_SSE0 = gmx_and_pr(VLJ_SSE0, wco_vdw_SSE0);
- VLJ_SSE1 = gmx_and_pr(VLJ_SSE1, wco_vdw_SSE1);
+ VLJ_S0 = gmx_blendzero_pr(VLJ_S0, wco_vdw_S0);
+ VLJ_S1 = gmx_blendzero_pr(VLJ_S1, wco_vdw_S1);
#ifndef HALF_LJ
- VLJ_SSE2 = gmx_and_pr(VLJ_SSE2, wco_vdw_SSE2);
- VLJ_SSE3 = gmx_and_pr(VLJ_SSE3, wco_vdw_SSE3);
+ VLJ_S2 = gmx_blendzero_pr(VLJ_S2, wco_vdw_S2);
+ VLJ_S3 = gmx_blendzero_pr(VLJ_S3, wco_vdw_S3);
#endif
#ifdef CHECK_EXCLS
/* The potential shift should be removed for excluded pairs */
- VLJ_SSE0 = gmx_and_pr(VLJ_SSE0, int_SSE0);
- VLJ_SSE1 = gmx_and_pr(VLJ_SSE1, int_SSE1);
+ VLJ_S0 = gmx_blendzero_pr(VLJ_S0, int_S0);
+ VLJ_S1 = gmx_blendzero_pr(VLJ_S1, int_S1);
#ifndef HALF_LJ
- VLJ_SSE2 = gmx_and_pr(VLJ_SSE2, int_SSE2);
- VLJ_SSE3 = gmx_and_pr(VLJ_SSE3, int_SSE3);
+ VLJ_S2 = gmx_blendzero_pr(VLJ_S2, int_S2);
+ VLJ_S3 = gmx_blendzero_pr(VLJ_S3, int_S3);
#endif
#endif
#ifndef ENERGY_GROUPS
- VvdwtotSSE = gmx_add_pr(VvdwtotSSE,
+ Vvdwtot_S = gmx_add_pr(Vvdwtot_S,
#ifndef HALF_LJ
- gmx_sum4_pr(VLJ_SSE0, VLJ_SSE1, VLJ_SSE2, VLJ_SSE3)
+ gmx_sum4_pr(VLJ_S0, VLJ_S1, VLJ_S2, VLJ_S3)
#else
- gmx_add_pr(VLJ_SSE0, VLJ_SSE1)
+ gmx_add_pr(VLJ_S0, VLJ_S1)
#endif
- );
+ );
#else
- add_ener_grp(VLJ_SSE0, vvdwtp[0], egp_jj);
- add_ener_grp(VLJ_SSE1, vvdwtp[1], egp_jj);
+ add_ener_grp(VLJ_S0, vvdwtp[0], egp_jj);
+ add_ener_grp(VLJ_S1, vvdwtp[1], egp_jj);
#ifndef HALF_LJ
- add_ener_grp(VLJ_SSE2, vvdwtp[2], egp_jj);
- add_ener_grp(VLJ_SSE3, vvdwtp[3], egp_jj);
+ add_ener_grp(VLJ_S2, vvdwtp[2], egp_jj);
+ add_ener_grp(VLJ_S3, vvdwtp[3], egp_jj);
#endif
#endif
#endif /* CALC_LJ */
#endif /* CALC_ENERGIES */
#ifdef CALC_LJ
- fscal_SSE0 = gmx_mul_pr(rinvsq_SSE0,
+ fscal_S0 = gmx_mul_pr(rinvsq_S0,
#ifdef CALC_COULOMB
- gmx_add_pr(frcoul_SSE0,
+ gmx_add_pr(frcoul_S0,
#else
(
#endif
- gmx_sub_pr(FrLJ12_SSE0, FrLJ6_SSE0)));
- fscal_SSE1 = gmx_mul_pr(rinvsq_SSE1,
+ gmx_sub_pr(FrLJ12_S0, FrLJ6_S0)));
+ fscal_S1 = gmx_mul_pr(rinvsq_S1,
#ifdef CALC_COULOMB
- gmx_add_pr(frcoul_SSE1,
+ gmx_add_pr(frcoul_S1,
#else
(
#endif
- gmx_sub_pr(FrLJ12_SSE1, FrLJ6_SSE1)));
+ gmx_sub_pr(FrLJ12_S1, FrLJ6_S1)));
#else
- fscal_SSE0 = gmx_mul_pr(rinvsq_SSE0, frcoul_SSE0);
- fscal_SSE1 = gmx_mul_pr(rinvsq_SSE1, frcoul_SSE1);
+ fscal_S0 = gmx_mul_pr(rinvsq_S0, frcoul_S0);
+ fscal_S1 = gmx_mul_pr(rinvsq_S1, frcoul_S1);
#endif /* CALC_LJ */
#if defined CALC_LJ && !defined HALF_LJ
- fscal_SSE2 = gmx_mul_pr(rinvsq_SSE2,
+ fscal_S2 = gmx_mul_pr(rinvsq_S2,
#ifdef CALC_COULOMB
- gmx_add_pr(frcoul_SSE2,
+ gmx_add_pr(frcoul_S2,
#else
(
#endif
- gmx_sub_pr(FrLJ12_SSE2, FrLJ6_SSE2)));
- fscal_SSE3 = gmx_mul_pr(rinvsq_SSE3,
+ gmx_sub_pr(FrLJ12_S2, FrLJ6_S2)));
+ fscal_S3 = gmx_mul_pr(rinvsq_S3,
#ifdef CALC_COULOMB
- gmx_add_pr(frcoul_SSE3,
+ gmx_add_pr(frcoul_S3,
#else
(
#endif
- gmx_sub_pr(FrLJ12_SSE3, FrLJ6_SSE3)));
+ gmx_sub_pr(FrLJ12_S3, FrLJ6_S3)));
#else
/* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
- fscal_SSE2 = gmx_mul_pr(rinvsq_SSE2, frcoul_SSE2);
- fscal_SSE3 = gmx_mul_pr(rinvsq_SSE3, frcoul_SSE3);
+ fscal_S2 = gmx_mul_pr(rinvsq_S2, frcoul_S2);
+ fscal_S3 = gmx_mul_pr(rinvsq_S3, frcoul_S3);
#endif
/* Calculate temporary vectorial force */
- tx_SSE0 = gmx_mul_pr(fscal_SSE0, dx_SSE0);
- tx_SSE1 = gmx_mul_pr(fscal_SSE1, dx_SSE1);
- tx_SSE2 = gmx_mul_pr(fscal_SSE2, dx_SSE2);
- tx_SSE3 = gmx_mul_pr(fscal_SSE3, dx_SSE3);
- ty_SSE0 = gmx_mul_pr(fscal_SSE0, dy_SSE0);
- ty_SSE1 = gmx_mul_pr(fscal_SSE1, dy_SSE1);
- ty_SSE2 = gmx_mul_pr(fscal_SSE2, dy_SSE2);
- ty_SSE3 = gmx_mul_pr(fscal_SSE3, dy_SSE3);
- tz_SSE0 = gmx_mul_pr(fscal_SSE0, dz_SSE0);
- tz_SSE1 = gmx_mul_pr(fscal_SSE1, dz_SSE1);
- tz_SSE2 = gmx_mul_pr(fscal_SSE2, dz_SSE2);
- tz_SSE3 = gmx_mul_pr(fscal_SSE3, dz_SSE3);
+ tx_S0 = gmx_mul_pr(fscal_S0, dx_S0);
+ tx_S1 = gmx_mul_pr(fscal_S1, dx_S1);
+ tx_S2 = gmx_mul_pr(fscal_S2, dx_S2);
+ tx_S3 = gmx_mul_pr(fscal_S3, dx_S3);
+ ty_S0 = gmx_mul_pr(fscal_S0, dy_S0);
+ ty_S1 = gmx_mul_pr(fscal_S1, dy_S1);
+ ty_S2 = gmx_mul_pr(fscal_S2, dy_S2);
+ ty_S3 = gmx_mul_pr(fscal_S3, dy_S3);
+ tz_S0 = gmx_mul_pr(fscal_S0, dz_S0);
+ tz_S1 = gmx_mul_pr(fscal_S1, dz_S1);
+ tz_S2 = gmx_mul_pr(fscal_S2, dz_S2);
+ tz_S3 = gmx_mul_pr(fscal_S3, dz_S3);
/* Increment i atom force */
- fix_SSE0 = gmx_add_pr(fix_SSE0, tx_SSE0);
- fix_SSE1 = gmx_add_pr(fix_SSE1, tx_SSE1);
- fix_SSE2 = gmx_add_pr(fix_SSE2, tx_SSE2);
- fix_SSE3 = gmx_add_pr(fix_SSE3, tx_SSE3);
- fiy_SSE0 = gmx_add_pr(fiy_SSE0, ty_SSE0);
- fiy_SSE1 = gmx_add_pr(fiy_SSE1, ty_SSE1);
- fiy_SSE2 = gmx_add_pr(fiy_SSE2, ty_SSE2);
- fiy_SSE3 = gmx_add_pr(fiy_SSE3, ty_SSE3);
- fiz_SSE0 = gmx_add_pr(fiz_SSE0, tz_SSE0);
- fiz_SSE1 = gmx_add_pr(fiz_SSE1, tz_SSE1);
- fiz_SSE2 = gmx_add_pr(fiz_SSE2, tz_SSE2);
- fiz_SSE3 = gmx_add_pr(fiz_SSE3, tz_SSE3);
+ fix_S0 = gmx_add_pr(fix_S0, tx_S0);
+ fix_S1 = gmx_add_pr(fix_S1, tx_S1);
+ fix_S2 = gmx_add_pr(fix_S2, tx_S2);
+ fix_S3 = gmx_add_pr(fix_S3, tx_S3);
+ fiy_S0 = gmx_add_pr(fiy_S0, ty_S0);
+ fiy_S1 = gmx_add_pr(fiy_S1, ty_S1);
+ fiy_S2 = gmx_add_pr(fiy_S2, ty_S2);
+ fiy_S3 = gmx_add_pr(fiy_S3, ty_S3);
+ fiz_S0 = gmx_add_pr(fiz_S0, tz_S0);
+ fiz_S1 = gmx_add_pr(fiz_S1, tz_S1);
+ fiz_S2 = gmx_add_pr(fiz_S2, tz_S2);
+ fiz_S3 = gmx_add_pr(fiz_S3, tz_S3);
/* Decrement j atom force */
gmx_store_pr(f+ajx,
- gmx_sub_pr( gmx_load_pr(f+ajx), gmx_sum4_pr(tx_SSE0, tx_SSE1, tx_SSE2, tx_SSE3) ));
+ gmx_sub_pr( gmx_load_pr(f+ajx), gmx_sum4_pr(tx_S0, tx_S1, tx_S2, tx_S3) ));
gmx_store_pr(f+ajy,
- gmx_sub_pr( gmx_load_pr(f+ajy), gmx_sum4_pr(ty_SSE0, ty_SSE1, ty_SSE2, ty_SSE3) ));
+ gmx_sub_pr( gmx_load_pr(f+ajy), gmx_sum4_pr(ty_S0, ty_S1, ty_S2, ty_S3) ));
gmx_store_pr(f+ajz,
- gmx_sub_pr( gmx_load_pr(f+ajz), gmx_sum4_pr(tz_SSE0, tz_SSE1, tz_SSE2, tz_SSE3) ));
+ gmx_sub_pr( gmx_load_pr(f+ajz), gmx_sum4_pr(tz_S0, tz_S1, tz_S2, tz_S3) ));
}
-#undef rinv_ex_SSE0
-#undef rinv_ex_SSE1
-#undef rinv_ex_SSE2
-#undef rinv_ex_SSE3
+#undef rinv_ex_S0
+#undef rinv_ex_S1
+#undef rinv_ex_S2
+#undef rinv_ex_S3
-#undef wco_vdw_SSE0
-#undef wco_vdw_SSE1
-#undef wco_vdw_SSE2
-#undef wco_vdw_SSE3
+#undef wco_vdw_S0
+#undef wco_vdw_S1
+#undef wco_vdw_S2
+#undef wco_vdw_S3
-#undef CUTOFF_BLENDV
+#undef NBNXN_CUTOFF_USE_BLENDV
#undef EXCL_FORCES
* the research papers on the package. Check out http://www.gromacs.org.
*/
-/* GMX_MM128_HERE or GMX_MM256_HERE should be set before including this file */
+#if !(GMX_NBNXN_SIMD_BITWIDTH == 128 || GMX_NBNXN_SIMD_BITWIDTH == 256)
+#error "unsupported GMX_NBNXN_SIMD_BITWIDTH"
+#endif
+
+#ifdef GMX_NBNXN_HALF_WIDTH_SIMD
+#define GMX_USE_HALF_WIDTH_SIMD_HERE
+#endif
#include "gmx_simd_macros.h"
#define SUM_SIMD4(x) (x[0]+x[1]+x[2]+x[3])
#define UNROLLI NBNXN_CPU_CLUSTER_I_SIZE
#define UNROLLJ GMX_SIMD_WIDTH_HERE
-#if defined GMX_MM128_HERE || defined GMX_DOUBLE
-#define STRIDE 4
-#endif
-#if defined GMX_MM256_HERE && !defined GMX_DOUBLE
-#define STRIDE 8
+/* The stride of all the atom data arrays is max(UNROLLI,UNROLLJ) */
+#if GMX_SIMD_WIDTH_HERE >= UNROLLI
+#define STRIDE GMX_SIMD_WIDTH_HERE
+#else
+#define STRIDE UNROLLI
#endif
-#ifdef GMX_MM128_HERE
-#ifndef GMX_DOUBLE
-/* single precision 4x4 kernel */
-#define SUM_SIMD(x) SUM_SIMD4(x)
-#define TAB_FDV0
+#if GMX_SIMD_WIDTH_HERE == 2
+#define SUM_SIMD(x) (x[0]+x[1])
+#else
+#if GMX_SIMD_WIDTH_HERE == 4
+#define SUM_SIMD(x) SUM_SIMD4(x)
#else
-/* double precision 4x2 kernel */
-#define SUM_SIMD(x) (x[0]+x[1])
+#if GMX_SIMD_WIDTH_HERE == 8
+#define SUM_SIMD(x) (x[0]+x[1]+x[2]+x[3]+x[4]+x[5]+x[6]+x[7])
+#else
+#error "unsupported kernel configuration"
+#endif
#endif
#endif
-#ifdef GMX_MM256_HERE
-#ifndef GMX_DOUBLE
-/* single precision 4x8 kernel */
-#define SUM_SIMD(x) (x[0]+x[1]+x[2]+x[3]+x[4]+x[5]+x[6]+x[7])
+
+/* Decide if we should use the FDV0 table layout */
+#if defined GMX_X86_AVX_256 && !defined GMX_USE_HALF_WIDTH_SIMD_HERE
+/* With full AVX-256 SIMD, half SIMD-width table loads are optimal */
+#if GMX_SIMD_WIDTH_HERE/2 == 4
#define TAB_FDV0
+#endif
#else
-/* double precision 4x4 kernel */
-#define SUM_SIMD(x) SUM_SIMD4(x)
+/* We use the FDV0 table layout when we can use aligned table loads */
+#if GMX_SIMD_WIDTH_HERE == 4
+#define TAB_FDV0
#endif
#endif
+
#define SIMD_MASK_ALL 0xffffffff
#include "nbnxn_kernel_simd_utils.h"
real *vctp[UNROLLI];
#endif
- gmx_mm_pr shX_SSE;
- gmx_mm_pr shY_SSE;
- gmx_mm_pr shZ_SSE;
- gmx_mm_pr ix_SSE0, iy_SSE0, iz_SSE0;
- gmx_mm_pr ix_SSE1, iy_SSE1, iz_SSE1;
- gmx_mm_pr ix_SSE2, iy_SSE2, iz_SSE2;
- gmx_mm_pr ix_SSE3, iy_SSE3, iz_SSE3;
- gmx_mm_pr fix_SSE0, fiy_SSE0, fiz_SSE0;
- gmx_mm_pr fix_SSE1, fiy_SSE1, fiz_SSE1;
- gmx_mm_pr fix_SSE2, fiy_SSE2, fiz_SSE2;
- gmx_mm_pr fix_SSE3, fiy_SSE3, fiz_SSE3;
+ gmx_mm_pr shX_S;
+ gmx_mm_pr shY_S;
+ gmx_mm_pr shZ_S;
+ gmx_mm_pr ix_S0, iy_S0, iz_S0;
+ gmx_mm_pr ix_S1, iy_S1, iz_S1;
+ gmx_mm_pr ix_S2, iy_S2, iz_S2;
+ gmx_mm_pr ix_S3, iy_S3, iz_S3;
+ gmx_mm_pr fix_S0, fiy_S0, fiz_S0;
+ gmx_mm_pr fix_S1, fiy_S1, fiz_S1;
+ gmx_mm_pr fix_S2, fiy_S2, fiz_S2;
+ gmx_mm_pr fix_S3, fiy_S3, fiz_S3;
#if UNROLLJ >= 4
#ifndef GMX_DOUBLE
- __m128 fix_SSE, fiy_SSE, fiz_SSE;
+ __m128 fix_S, fiy_S, fiz_S;
#else
- __m256d fix_SSE, fiy_SSE, fiz_SSE;
+ __m256d fix_S, fiy_S, fiz_S;
#endif
#else
- __m128d fix0_SSE, fiy0_SSE, fiz0_SSE;
- __m128d fix2_SSE, fiy2_SSE, fiz2_SSE;
+ __m128d fix0_S, fiy0_S, fiz0_S;
+ __m128d fix2_S, fiy2_S, fiz2_S;
#endif
-#ifdef GMX_MM128_HERE
-#ifndef GMX_DOUBLE
- __m128i mask0 = _mm_set_epi32( 0x0008, 0x0004, 0x0002, 0x0001 );
- __m128i mask1 = _mm_set_epi32( 0x0080, 0x0040, 0x0020, 0x0010 );
- __m128i mask2 = _mm_set_epi32( 0x0800, 0x0400, 0x0200, 0x0100 );
- __m128i mask3 = _mm_set_epi32( 0x8000, 0x4000, 0x2000, 0x1000 );
-#else
- /* For double precision we need to set two 32bit ints for one double */
- __m128i mask0 = _mm_set_epi32( 0x0002, 0x0002, 0x0001, 0x0001 );
- __m128i mask1 = _mm_set_epi32( 0x0008, 0x0008, 0x0004, 0x0004 );
- __m128i mask2 = _mm_set_epi32( 0x0020, 0x0020, 0x0010, 0x0010 );
- __m128i mask3 = _mm_set_epi32( 0x0080, 0x0080, 0x0040, 0x0040 );
-#endif
-#endif
-#ifdef GMX_MM256_HERE
- /* AVX: use floating point masks, as there are no integer instructions */
-#ifndef GMX_DOUBLE
- gmx_mm_pr mask0 = _mm256_castsi256_ps(_mm256_set_epi32( 0x0080, 0x0040, 0x0020, 0x0010, 0x0008, 0x0004, 0x0002, 0x0001 ));
- gmx_mm_pr mask1 = _mm256_castsi256_ps(_mm256_set_epi32( 0x8000, 0x4000, 0x2000, 0x1000, 0x0800, 0x0400, 0x0200, 0x0100 ));
+ gmx_mm_pr diag_jmi_S;
+#if UNROLLI == UNROLLJ
+ gmx_mm_pr diag_S0, diag_S1, diag_S2, diag_S3;
#else
- /* There is no 256-bit int to double conversion, so we use float here */
- __m256 mask0 = _mm256_castsi256_ps(_mm256_set_epi32( 0x0008, 0x0008, 0x0004, 0x0004, 0x0002, 0x0002, 0x0001, 0x0001 ));
- __m256 mask1 = _mm256_castsi256_ps(_mm256_set_epi32( 0x0080, 0x0080, 0x0040, 0x0040, 0x0020, 0x0020, 0x0010, 0x0010 ));
- __m256 mask2 = _mm256_castsi256_ps(_mm256_set_epi32( 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100 ));
- __m256 mask3 = _mm256_castsi256_ps(_mm256_set_epi32( 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000 ));
-#endif
+ gmx_mm_pr diag0_S0, diag0_S1, diag0_S2, diag0_S3;
+ gmx_mm_pr diag1_S0, diag1_S1, diag1_S2, diag1_S3;
#endif
- gmx_mm_pr diag_jmi_SSE;
-#if UNROLLI == UNROLLJ
- gmx_mm_pr diag_SSE0, diag_SSE1, diag_SSE2, diag_SSE3;
+#ifdef gmx_checkbitmask_epi32
+ gmx_epi32 mask_S0, mask_S1, mask_S2, mask_S3;
#else
- gmx_mm_pr diag0_SSE0, diag0_SSE1, diag0_SSE2, diag0_SSE3;
- gmx_mm_pr diag1_SSE0, diag1_SSE1, diag1_SSE2, diag1_SSE3;
+ gmx_mm_pr mask_S0, mask_S1, mask_S2, mask_S3;
#endif
-#if defined GMX_X86_SSE2 && defined GMX_MM128_HERE
- __m128i zeroi_SSE = _mm_setzero_si128();
-#endif
- gmx_mm_pr zero_SSE = gmx_set1_pr(0);
+ gmx_mm_pr zero_S = gmx_set1_pr(0);
- gmx_mm_pr one_SSE = gmx_set1_pr(1.0);
- gmx_mm_pr iq_SSE0 = gmx_setzero_pr();
- gmx_mm_pr iq_SSE1 = gmx_setzero_pr();
- gmx_mm_pr iq_SSE2 = gmx_setzero_pr();
- gmx_mm_pr iq_SSE3 = gmx_setzero_pr();
- gmx_mm_pr mrc_3_SSE;
+ gmx_mm_pr one_S = gmx_set1_pr(1.0);
+ gmx_mm_pr iq_S0 = gmx_setzero_pr();
+ gmx_mm_pr iq_S1 = gmx_setzero_pr();
+ gmx_mm_pr iq_S2 = gmx_setzero_pr();
+ gmx_mm_pr iq_S3 = gmx_setzero_pr();
+ gmx_mm_pr mrc_3_S;
#ifdef CALC_ENERGIES
- gmx_mm_pr hrc_3_SSE, moh_rc_SSE;
+ gmx_mm_pr hrc_3_S, moh_rc_S;
#endif
#ifdef CALC_COUL_TAB
/* Coulomb table variables */
- gmx_mm_pr invtsp_SSE;
+ gmx_mm_pr invtsp_S;
const real *tab_coul_F;
#ifndef TAB_FDV0
const real *tab_coul_V;
#endif
-#ifdef GMX_MM256_HERE
+#if GMX_NBNXN_SIMD_BITWIDTH == 256
int ti0_array[2*GMX_SIMD_WIDTH_HERE-1], *ti0;
int ti1_array[2*GMX_SIMD_WIDTH_HERE-1], *ti1;
int ti2_array[2*GMX_SIMD_WIDTH_HERE-1], *ti2;
int ti3_array[2*GMX_SIMD_WIDTH_HERE-1], *ti3;
#endif
#ifdef CALC_ENERGIES
- gmx_mm_pr mhalfsp_SSE;
+ gmx_mm_pr mhalfsp_S;
#endif
#endif
#ifdef CALC_COUL_EWALD
- gmx_mm_pr beta2_SSE, beta_SSE;
+ gmx_mm_pr beta2_S, beta_S;
#endif
#if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
- gmx_mm_pr sh_ewald_SSE;
+ gmx_mm_pr sh_ewald_S;
#endif
#ifdef LJ_COMB_LB
const real *ljc;
- gmx_mm_pr hsig_i_SSE0, seps_i_SSE0;
- gmx_mm_pr hsig_i_SSE1, seps_i_SSE1;
- gmx_mm_pr hsig_i_SSE2, seps_i_SSE2;
- gmx_mm_pr hsig_i_SSE3, seps_i_SSE3;
+ gmx_mm_pr hsig_i_S0, seps_i_S0;
+ gmx_mm_pr hsig_i_S1, seps_i_S1;
+ gmx_mm_pr hsig_i_S2, seps_i_S2;
+ gmx_mm_pr hsig_i_S3, seps_i_S3;
#else
#ifdef FIX_LJ_C
real pvdw_array[2*UNROLLI*UNROLLJ+3];
real *pvdw_c6, *pvdw_c12;
- gmx_mm_pr c6_SSE0, c12_SSE0;
- gmx_mm_pr c6_SSE1, c12_SSE1;
- gmx_mm_pr c6_SSE2, c12_SSE2;
- gmx_mm_pr c6_SSE3, c12_SSE3;
+ gmx_mm_pr c6_S0, c12_S0;
+ gmx_mm_pr c6_S1, c12_S1;
+ gmx_mm_pr c6_S2, c12_S2;
+ gmx_mm_pr c6_S3, c12_S3;
#endif
#ifdef LJ_COMB_GEOM
const real *ljc;
- gmx_mm_pr c6s_SSE0, c12s_SSE0;
- gmx_mm_pr c6s_SSE1, c12s_SSE1;
- gmx_mm_pr c6s_SSE2 = gmx_setzero_pr(), c12s_SSE2 = gmx_setzero_pr();
- gmx_mm_pr c6s_SSE3 = gmx_setzero_pr(), c12s_SSE3 = gmx_setzero_pr();
+ gmx_mm_pr c6s_S0, c12s_S0;
+ gmx_mm_pr c6s_S1, c12s_S1;
+ gmx_mm_pr c6s_S2 = gmx_setzero_pr(), c12s_S2 = gmx_setzero_pr();
+ gmx_mm_pr c6s_S3 = gmx_setzero_pr(), c12s_S3 = gmx_setzero_pr();
#endif
#endif /* LJ_COMB_LB */
- gmx_mm_pr vctotSSE, VvdwtotSSE;
- gmx_mm_pr sixthSSE, twelvethSSE;
+ gmx_mm_pr vctot_S, Vvdwtot_S;
+ gmx_mm_pr sixth_S, twelveth_S;
- gmx_mm_pr avoid_sing_SSE;
- gmx_mm_pr rc2_SSE;
+ gmx_mm_pr avoid_sing_S;
+ gmx_mm_pr rc2_S;
#ifdef VDW_CUTOFF_CHECK
- gmx_mm_pr rcvdw2_SSE;
+ gmx_mm_pr rcvdw2_S;
#endif
#ifdef CALC_ENERGIES
- gmx_mm_pr sh_invrc6_SSE, sh_invrc12_SSE;
+ gmx_mm_pr sh_invrc6_S, sh_invrc12_S;
/* cppcheck-suppress unassignedVariable */
real tmpsum_array[15], *tmpsum;
#endif
/* Load j-i for the first i */
- diag_jmi_SSE = gmx_load_pr(nbat->simd_4xn_diag);
+ diag_jmi_S = gmx_load_pr(nbat->simd_4xn_diag);
/* Generate all the diagonal masks as comparison results */
#if UNROLLI == UNROLLJ
- diag_SSE0 = gmx_cmplt_pr(zero_SSE, diag_jmi_SSE);
- diag_jmi_SSE = gmx_sub_pr(diag_jmi_SSE, one_SSE);
- diag_SSE1 = gmx_cmplt_pr(zero_SSE, diag_jmi_SSE);
- diag_jmi_SSE = gmx_sub_pr(diag_jmi_SSE, one_SSE);
- diag_SSE2 = gmx_cmplt_pr(zero_SSE, diag_jmi_SSE);
- diag_jmi_SSE = gmx_sub_pr(diag_jmi_SSE, one_SSE);
- diag_SSE3 = gmx_cmplt_pr(zero_SSE, diag_jmi_SSE);
+ diag_S0 = gmx_cmplt_pr(zero_S, diag_jmi_S);
+ diag_jmi_S = gmx_sub_pr(diag_jmi_S, one_S);
+ diag_S1 = gmx_cmplt_pr(zero_S, diag_jmi_S);
+ diag_jmi_S = gmx_sub_pr(diag_jmi_S, one_S);
+ diag_S2 = gmx_cmplt_pr(zero_S, diag_jmi_S);
+ diag_jmi_S = gmx_sub_pr(diag_jmi_S, one_S);
+ diag_S3 = gmx_cmplt_pr(zero_S, diag_jmi_S);
#else
#if UNROLLI == 2*UNROLLJ || 2*UNROLLI == UNROLLJ
- diag0_SSE0 = gmx_cmplt_pr(zero_SSE, diag_jmi_SSE);
- diag_jmi_SSE = gmx_sub_pr(diag_jmi_SSE, one_SSE);
- diag0_SSE1 = gmx_cmplt_pr(zero_SSE, diag_jmi_SSE);
- diag_jmi_SSE = gmx_sub_pr(diag_jmi_SSE, one_SSE);
- diag0_SSE2 = gmx_cmplt_pr(zero_SSE, diag_jmi_SSE);
- diag_jmi_SSE = gmx_sub_pr(diag_jmi_SSE, one_SSE);
- diag0_SSE3 = gmx_cmplt_pr(zero_SSE, diag_jmi_SSE);
- diag_jmi_SSE = gmx_sub_pr(diag_jmi_SSE, one_SSE);
+ diag0_S0 = gmx_cmplt_pr(zero_S, diag_jmi_S);
+ diag_jmi_S = gmx_sub_pr(diag_jmi_S, one_S);
+ diag0_S1 = gmx_cmplt_pr(zero_S, diag_jmi_S);
+ diag_jmi_S = gmx_sub_pr(diag_jmi_S, one_S);
+ diag0_S2 = gmx_cmplt_pr(zero_S, diag_jmi_S);
+ diag_jmi_S = gmx_sub_pr(diag_jmi_S, one_S);
+ diag0_S3 = gmx_cmplt_pr(zero_S, diag_jmi_S);
+ diag_jmi_S = gmx_sub_pr(diag_jmi_S, one_S);
#if UNROLLI == 2*UNROLLJ
/* Load j-i for the second half of the j-cluster */
- diag_jmi_SSE = gmx_load_pr(nbat->simd_4xn_diag+UNROLLJ);
+ diag_jmi_S = gmx_load_pr(nbat->simd_4xn_diag+UNROLLJ);
#endif
- diag1_SSE0 = gmx_cmplt_pr(zero_SSE, diag_jmi_SSE);
- diag_jmi_SSE = gmx_sub_pr(diag_jmi_SSE, one_SSE);
- diag1_SSE1 = gmx_cmplt_pr(zero_SSE, diag_jmi_SSE);
- diag_jmi_SSE = gmx_sub_pr(diag_jmi_SSE, one_SSE);
- diag1_SSE2 = gmx_cmplt_pr(zero_SSE, diag_jmi_SSE);
- diag_jmi_SSE = gmx_sub_pr(diag_jmi_SSE, one_SSE);
- diag1_SSE3 = gmx_cmplt_pr(zero_SSE, diag_jmi_SSE);
+ diag1_S0 = gmx_cmplt_pr(zero_S, diag_jmi_S);
+ diag_jmi_S = gmx_sub_pr(diag_jmi_S, one_S);
+ diag1_S1 = gmx_cmplt_pr(zero_S, diag_jmi_S);
+ diag_jmi_S = gmx_sub_pr(diag_jmi_S, one_S);
+ diag1_S2 = gmx_cmplt_pr(zero_S, diag_jmi_S);
+ diag_jmi_S = gmx_sub_pr(diag_jmi_S, one_S);
+ diag1_S3 = gmx_cmplt_pr(zero_S, diag_jmi_S);
#endif
#endif
+ /* Load masks for topology exclusion masking */
+#ifdef gmx_checkbitmask_epi32
+ mask_S0 = gmx_load_si(nbat->simd_excl_mask + 0*GMX_NBNXN_SIMD_BITWIDTH/32);
+ mask_S1 = gmx_load_si(nbat->simd_excl_mask + 1*GMX_NBNXN_SIMD_BITWIDTH/32);
+ mask_S2 = gmx_load_si(nbat->simd_excl_mask + 2*GMX_NBNXN_SIMD_BITWIDTH/32);
+ mask_S3 = gmx_load_si(nbat->simd_excl_mask + 3*GMX_NBNXN_SIMD_BITWIDTH/32);
+#else
+ mask_S0 = gmx_load_pr((real *)nbat->simd_excl_mask + 0*UNROLLJ);
+ mask_S1 = gmx_load_pr((real *)nbat->simd_excl_mask + 1*UNROLLJ);
+ mask_S2 = gmx_load_pr((real *)nbat->simd_excl_mask + 2*UNROLLJ);
+ mask_S3 = gmx_load_pr((real *)nbat->simd_excl_mask + 3*UNROLLJ);
+#endif
+
#ifdef CALC_COUL_TAB
-#ifdef GMX_MM256_HERE
+#if GMX_NBNXN_SIMD_BITWIDTH == 256
/* Generate aligned table index pointers */
- ti0 = (int *)(((size_t)(ti0_array+GMX_SIMD_WIDTH_HERE-1)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(int)-1))));
- ti1 = (int *)(((size_t)(ti1_array+GMX_SIMD_WIDTH_HERE-1)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(int)-1))));
- ti2 = (int *)(((size_t)(ti2_array+GMX_SIMD_WIDTH_HERE-1)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(int)-1))));
- ti3 = (int *)(((size_t)(ti3_array+GMX_SIMD_WIDTH_HERE-1)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(int)-1))));
+ ti0 = gmx_simd_align_int(ti0_array);
+ ti1 = gmx_simd_align_int(ti1_array);
+ ti2 = gmx_simd_align_int(ti2_array);
+ ti3 = gmx_simd_align_int(ti3_array);
#endif
- invtsp_SSE = gmx_set1_pr(ic->tabq_scale);
+ invtsp_S = gmx_set1_pr(ic->tabq_scale);
#ifdef CALC_ENERGIES
- mhalfsp_SSE = gmx_set1_pr(-0.5/ic->tabq_scale);
+ mhalfsp_S = gmx_set1_pr(-0.5/ic->tabq_scale);
#endif
#ifdef TAB_FDV0
#endif /* CALC_COUL_TAB */
#ifdef CALC_COUL_EWALD
- beta2_SSE = gmx_set1_pr(ic->ewaldcoeff*ic->ewaldcoeff);
- beta_SSE = gmx_set1_pr(ic->ewaldcoeff);
+ beta2_S = gmx_set1_pr(ic->ewaldcoeff*ic->ewaldcoeff);
+ beta_S = gmx_set1_pr(ic->ewaldcoeff);
#endif
#if (defined CALC_COUL_TAB || defined CALC_COUL_EWALD) && defined CALC_ENERGIES
- sh_ewald_SSE = gmx_set1_pr(ic->sh_ewald);
+ sh_ewald_S = gmx_set1_pr(ic->sh_ewald);
#endif
q = nbat->q;
shiftvec = shift_vec[0];
x = nbat->x;
- avoid_sing_SSE = gmx_set1_pr(NBNXN_AVOID_SING_R2_INC);
+ avoid_sing_S = gmx_set1_pr(NBNXN_AVOID_SING_R2_INC);
/* The kernel either supports rcoulomb = rvdw or rcoulomb >= rvdw */
- rc2_SSE = gmx_set1_pr(ic->rcoulomb*ic->rcoulomb);
+ rc2_S = gmx_set1_pr(ic->rcoulomb*ic->rcoulomb);
#ifdef VDW_CUTOFF_CHECK
- rcvdw2_SSE = gmx_set1_pr(ic->rvdw*ic->rvdw);
+ rcvdw2_S = gmx_set1_pr(ic->rvdw*ic->rvdw);
#endif
#ifdef CALC_ENERGIES
- sixthSSE = gmx_set1_pr(1.0/6.0);
- twelvethSSE = gmx_set1_pr(1.0/12.0);
+ sixth_S = gmx_set1_pr(1.0/6.0);
+ twelveth_S = gmx_set1_pr(1.0/12.0);
- sh_invrc6_SSE = gmx_set1_pr(ic->sh_invrc6);
- sh_invrc12_SSE = gmx_set1_pr(ic->sh_invrc6*ic->sh_invrc6);
+ sh_invrc6_S = gmx_set1_pr(ic->sh_invrc6);
+ sh_invrc12_S = gmx_set1_pr(ic->sh_invrc6*ic->sh_invrc6);
#endif
- mrc_3_SSE = gmx_set1_pr(-2*ic->k_rf);
+ mrc_3_S = gmx_set1_pr(-2*ic->k_rf);
#ifdef CALC_ENERGIES
- hrc_3_SSE = gmx_set1_pr(ic->k_rf);
+ hrc_3_S = gmx_set1_pr(ic->k_rf);
- moh_rc_SSE = gmx_set1_pr(-ic->c_rf);
+ moh_rc_S = gmx_set1_pr(-ic->c_rf);
#endif
#ifdef CALC_ENERGIES
- tmpsum = (real *)(((size_t)(tmpsum_array+7)) & (~((size_t)31)));
+ tmpsum = gmx_simd_align_real(tmpsum_array);
#endif
#ifdef CALC_SHIFTFORCES
- shf = (real *)(((size_t)(shf_array+7)) & (~((size_t)31)));
+ shf = gmx_simd_align_real(shf_array);
#endif
#ifdef FIX_LJ_C
- pvdw_c6 = (real *)(((size_t)(pvdw_array+3)) & (~((size_t)15)));
+ pvdw_c6 = gmx_simd_align_real(pvdw_array+3);
pvdw_c12 = pvdw_c6 + UNROLLI*UNROLLJ;
for (jp = 0; jp < UNROLLJ; jp++)
pvdw_c12[2*UNROLLJ+jp] = nbat->nbfp[0*2+1];
pvdw_c12[3*UNROLLJ+jp] = nbat->nbfp[0*2+1];
}
- c6_SSE0 = gmx_load_pr(pvdw_c6 +0*UNROLLJ);
- c6_SSE1 = gmx_load_pr(pvdw_c6 +1*UNROLLJ);
- c6_SSE2 = gmx_load_pr(pvdw_c6 +2*UNROLLJ);
- c6_SSE3 = gmx_load_pr(pvdw_c6 +3*UNROLLJ);
-
- c12_SSE0 = gmx_load_pr(pvdw_c12+0*UNROLLJ);
- c12_SSE1 = gmx_load_pr(pvdw_c12+1*UNROLLJ);
- c12_SSE2 = gmx_load_pr(pvdw_c12+2*UNROLLJ);
- c12_SSE3 = gmx_load_pr(pvdw_c12+3*UNROLLJ);
+ c6_S0 = gmx_load_pr(pvdw_c6 +0*UNROLLJ);
+ c6_S1 = gmx_load_pr(pvdw_c6 +1*UNROLLJ);
+ c6_S2 = gmx_load_pr(pvdw_c6 +2*UNROLLJ);
+ c6_S3 = gmx_load_pr(pvdw_c6 +3*UNROLLJ);
+
+ c12_S0 = gmx_load_pr(pvdw_c12+0*UNROLLJ);
+ c12_S1 = gmx_load_pr(pvdw_c12+1*UNROLLJ);
+ c12_S2 = gmx_load_pr(pvdw_c12+2*UNROLLJ);
+ c12_S3 = gmx_load_pr(pvdw_c12+3*UNROLLJ);
#endif /* FIX_LJ_C */
#ifdef ENERGY_GROUPS
ci = nbln->ci;
ci_sh = (ish == CENTRAL ? ci : -1);
- shX_SSE = gmx_load1_pr(shiftvec+ish3);
- shY_SSE = gmx_load1_pr(shiftvec+ish3+1);
- shZ_SSE = gmx_load1_pr(shiftvec+ish3+2);
+ shX_S = gmx_load1_pr(shiftvec+ish3);
+ shY_S = gmx_load1_pr(shiftvec+ish3+1);
+ shZ_S = gmx_load1_pr(shiftvec+ish3+2);
#if UNROLLJ <= 4
sci = ci*STRIDE;
/* Load i atom data */
sciy = scix + STRIDE;
sciz = sciy + STRIDE;
- ix_SSE0 = gmx_add_pr(gmx_load1_pr(x+scix), shX_SSE);
- ix_SSE1 = gmx_add_pr(gmx_load1_pr(x+scix+1), shX_SSE);
- ix_SSE2 = gmx_add_pr(gmx_load1_pr(x+scix+2), shX_SSE);
- ix_SSE3 = gmx_add_pr(gmx_load1_pr(x+scix+3), shX_SSE);
- iy_SSE0 = gmx_add_pr(gmx_load1_pr(x+sciy), shY_SSE);
- iy_SSE1 = gmx_add_pr(gmx_load1_pr(x+sciy+1), shY_SSE);
- iy_SSE2 = gmx_add_pr(gmx_load1_pr(x+sciy+2), shY_SSE);
- iy_SSE3 = gmx_add_pr(gmx_load1_pr(x+sciy+3), shY_SSE);
- iz_SSE0 = gmx_add_pr(gmx_load1_pr(x+sciz), shZ_SSE);
- iz_SSE1 = gmx_add_pr(gmx_load1_pr(x+sciz+1), shZ_SSE);
- iz_SSE2 = gmx_add_pr(gmx_load1_pr(x+sciz+2), shZ_SSE);
- iz_SSE3 = gmx_add_pr(gmx_load1_pr(x+sciz+3), shZ_SSE);
+ ix_S0 = gmx_add_pr(gmx_load1_pr(x+scix), shX_S);
+ ix_S1 = gmx_add_pr(gmx_load1_pr(x+scix+1), shX_S);
+ ix_S2 = gmx_add_pr(gmx_load1_pr(x+scix+2), shX_S);
+ ix_S3 = gmx_add_pr(gmx_load1_pr(x+scix+3), shX_S);
+ iy_S0 = gmx_add_pr(gmx_load1_pr(x+sciy), shY_S);
+ iy_S1 = gmx_add_pr(gmx_load1_pr(x+sciy+1), shY_S);
+ iy_S2 = gmx_add_pr(gmx_load1_pr(x+sciy+2), shY_S);
+ iy_S3 = gmx_add_pr(gmx_load1_pr(x+sciy+3), shY_S);
+ iz_S0 = gmx_add_pr(gmx_load1_pr(x+sciz), shZ_S);
+ iz_S1 = gmx_add_pr(gmx_load1_pr(x+sciz+1), shZ_S);
+ iz_S2 = gmx_add_pr(gmx_load1_pr(x+sciz+2), shZ_S);
+ iz_S3 = gmx_add_pr(gmx_load1_pr(x+sciz+3), shZ_S);
if (do_coul)
{
- iq_SSE0 = gmx_set1_pr(facel*q[sci]);
- iq_SSE1 = gmx_set1_pr(facel*q[sci+1]);
- iq_SSE2 = gmx_set1_pr(facel*q[sci+2]);
- iq_SSE3 = gmx_set1_pr(facel*q[sci+3]);
+ iq_S0 = gmx_set1_pr(facel*q[sci]);
+ iq_S1 = gmx_set1_pr(facel*q[sci+1]);
+ iq_S2 = gmx_set1_pr(facel*q[sci+2]);
+ iq_S3 = gmx_set1_pr(facel*q[sci+3]);
}
#ifdef LJ_COMB_LB
- hsig_i_SSE0 = gmx_load1_pr(ljc+sci2+0);
- hsig_i_SSE1 = gmx_load1_pr(ljc+sci2+1);
- hsig_i_SSE2 = gmx_load1_pr(ljc+sci2+2);
- hsig_i_SSE3 = gmx_load1_pr(ljc+sci2+3);
- seps_i_SSE0 = gmx_load1_pr(ljc+sci2+STRIDE+0);
- seps_i_SSE1 = gmx_load1_pr(ljc+sci2+STRIDE+1);
- seps_i_SSE2 = gmx_load1_pr(ljc+sci2+STRIDE+2);
- seps_i_SSE3 = gmx_load1_pr(ljc+sci2+STRIDE+3);
+ hsig_i_S0 = gmx_load1_pr(ljc+sci2+0);
+ hsig_i_S1 = gmx_load1_pr(ljc+sci2+1);
+ hsig_i_S2 = gmx_load1_pr(ljc+sci2+2);
+ hsig_i_S3 = gmx_load1_pr(ljc+sci2+3);
+ seps_i_S0 = gmx_load1_pr(ljc+sci2+STRIDE+0);
+ seps_i_S1 = gmx_load1_pr(ljc+sci2+STRIDE+1);
+ seps_i_S2 = gmx_load1_pr(ljc+sci2+STRIDE+2);
+ seps_i_S3 = gmx_load1_pr(ljc+sci2+STRIDE+3);
#else
#ifdef LJ_COMB_GEOM
- c6s_SSE0 = gmx_load1_pr(ljc+sci2+0);
- c6s_SSE1 = gmx_load1_pr(ljc+sci2+1);
+ c6s_S0 = gmx_load1_pr(ljc+sci2+0);
+ c6s_S1 = gmx_load1_pr(ljc+sci2+1);
if (!half_LJ)
{
- c6s_SSE2 = gmx_load1_pr(ljc+sci2+2);
- c6s_SSE3 = gmx_load1_pr(ljc+sci2+3);
+ c6s_S2 = gmx_load1_pr(ljc+sci2+2);
+ c6s_S3 = gmx_load1_pr(ljc+sci2+3);
}
- c12s_SSE0 = gmx_load1_pr(ljc+sci2+STRIDE+0);
- c12s_SSE1 = gmx_load1_pr(ljc+sci2+STRIDE+1);
+ c12s_S0 = gmx_load1_pr(ljc+sci2+STRIDE+0);
+ c12s_S1 = gmx_load1_pr(ljc+sci2+STRIDE+1);
if (!half_LJ)
{
- c12s_SSE2 = gmx_load1_pr(ljc+sci2+STRIDE+2);
- c12s_SSE3 = gmx_load1_pr(ljc+sci2+STRIDE+3);
+ c12s_S2 = gmx_load1_pr(ljc+sci2+STRIDE+2);
+ c12s_S3 = gmx_load1_pr(ljc+sci2+STRIDE+3);
}
#else
nbfp0 = nbfp_ptr + type[sci ]*nbat->ntype*nbfp_stride;
#endif
/* Zero the potential energy for this list */
- VvdwtotSSE = gmx_setzero_pr();
- vctotSSE = gmx_setzero_pr();
+ Vvdwtot_S = gmx_setzero_pr();
+ vctot_S = gmx_setzero_pr();
/* Clear i atom forces */
- fix_SSE0 = gmx_setzero_pr();
- fix_SSE1 = gmx_setzero_pr();
- fix_SSE2 = gmx_setzero_pr();
- fix_SSE3 = gmx_setzero_pr();
- fiy_SSE0 = gmx_setzero_pr();
- fiy_SSE1 = gmx_setzero_pr();
- fiy_SSE2 = gmx_setzero_pr();
- fiy_SSE3 = gmx_setzero_pr();
- fiz_SSE0 = gmx_setzero_pr();
- fiz_SSE1 = gmx_setzero_pr();
- fiz_SSE2 = gmx_setzero_pr();
- fiz_SSE3 = gmx_setzero_pr();
+ fix_S0 = gmx_setzero_pr();
+ fix_S1 = gmx_setzero_pr();
+ fix_S2 = gmx_setzero_pr();
+ fix_S3 = gmx_setzero_pr();
+ fiy_S0 = gmx_setzero_pr();
+ fiy_S1 = gmx_setzero_pr();
+ fiy_S2 = gmx_setzero_pr();
+ fiy_S3 = gmx_setzero_pr();
+ fiz_S0 = gmx_setzero_pr();
+ fiz_S1 = gmx_setzero_pr();
+ fiz_S2 = gmx_setzero_pr();
+ fiz_S3 = gmx_setzero_pr();
cjind = cjind0;
/* Add accumulated i-forces to the force array */
#if UNROLLJ >= 4
#ifndef GMX_DOUBLE
-#define gmx_load_ps4 _mm_load_ps
-#define gmx_store_ps4 _mm_store_ps
-#define gmx_add_ps4 _mm_add_ps
+#define gmx_load_pr4 _mm_load_ps
+#define gmx_store_pr4 _mm_store_ps
+#define gmx_add_pr4 _mm_add_ps
#else
-#define gmx_load_ps4 _mm256_load_pd
-#define gmx_store_ps4 _mm256_store_pd
-#define gmx_add_ps4 _mm256_add_pd
+#define gmx_load_pr4 _mm256_load_pd
+#define gmx_store_pr4 _mm256_store_pd
+#define gmx_add_pr4 _mm256_add_pd
#endif
- GMX_MM_TRANSPOSE_SUM4_PR(fix_SSE0, fix_SSE1, fix_SSE2, fix_SSE3, fix_SSE);
- gmx_store_ps4(f+scix, gmx_add_ps4(fix_SSE, gmx_load_ps4(f+scix)));
+ GMX_MM_TRANSPOSE_SUM4_PR(fix_S0, fix_S1, fix_S2, fix_S3, fix_S);
+ gmx_store_pr4(f+scix, gmx_add_pr4(fix_S, gmx_load_pr4(f+scix)));
- GMX_MM_TRANSPOSE_SUM4_PR(fiy_SSE0, fiy_SSE1, fiy_SSE2, fiy_SSE3, fiy_SSE);
- gmx_store_ps4(f+sciy, gmx_add_ps4(fiy_SSE, gmx_load_ps4(f+sciy)));
+ GMX_MM_TRANSPOSE_SUM4_PR(fiy_S0, fiy_S1, fiy_S2, fiy_S3, fiy_S);
+ gmx_store_pr4(f+sciy, gmx_add_pr4(fiy_S, gmx_load_pr4(f+sciy)));
- GMX_MM_TRANSPOSE_SUM4_PR(fiz_SSE0, fiz_SSE1, fiz_SSE2, fiz_SSE3, fiz_SSE);
- gmx_store_ps4(f+sciz, gmx_add_ps4(fiz_SSE, gmx_load_ps4(f+sciz)));
+ GMX_MM_TRANSPOSE_SUM4_PR(fiz_S0, fiz_S1, fiz_S2, fiz_S3, fiz_S);
+ gmx_store_pr4(f+sciz, gmx_add_pr4(fiz_S, gmx_load_pr4(f+sciz)));
#ifdef CALC_SHIFTFORCES
- gmx_store_ps4(shf, fix_SSE);
+ gmx_store_pr4(shf, fix_S);
fshift[ish3+0] += SUM_SIMD4(shf);
- gmx_store_ps4(shf, fiy_SSE);
+ gmx_store_pr4(shf, fiy_S);
fshift[ish3+1] += SUM_SIMD4(shf);
- gmx_store_ps4(shf, fiz_SSE);
+ gmx_store_pr4(shf, fiz_S);
fshift[ish3+2] += SUM_SIMD4(shf);
#endif
#else
- GMX_MM_TRANSPOSE_SUM2_PD(fix_SSE0, fix_SSE1, fix0_SSE);
- _mm_store_pd(f+scix, _mm_add_pd(fix0_SSE, _mm_load_pd(f+scix)));
- GMX_MM_TRANSPOSE_SUM2_PD(fix_SSE2, fix_SSE3, fix2_SSE);
- _mm_store_pd(f+scix+2, _mm_add_pd(fix2_SSE, _mm_load_pd(f+scix+2)));
+ GMX_MM_TRANSPOSE_SUM2_PD(fix_S0, fix_S1, fix0_S);
+ _mm_store_pd(f+scix, _mm_add_pd(fix0_S, _mm_load_pd(f+scix)));
+ GMX_MM_TRANSPOSE_SUM2_PD(fix_S2, fix_S3, fix2_S);
+ _mm_store_pd(f+scix+2, _mm_add_pd(fix2_S, _mm_load_pd(f+scix+2)));
- GMX_MM_TRANSPOSE_SUM2_PD(fiy_SSE0, fiy_SSE1, fiy0_SSE);
- _mm_store_pd(f+sciy, _mm_add_pd(fiy0_SSE, _mm_load_pd(f+sciy)));
- GMX_MM_TRANSPOSE_SUM2_PD(fiy_SSE2, fiy_SSE3, fiy2_SSE);
- _mm_store_pd(f+sciy+2, _mm_add_pd(fiy2_SSE, _mm_load_pd(f+sciy+2)));
+ GMX_MM_TRANSPOSE_SUM2_PD(fiy_S0, fiy_S1, fiy0_S);
+ _mm_store_pd(f+sciy, _mm_add_pd(fiy0_S, _mm_load_pd(f+sciy)));
+ GMX_MM_TRANSPOSE_SUM2_PD(fiy_S2, fiy_S3, fiy2_S);
+ _mm_store_pd(f+sciy+2, _mm_add_pd(fiy2_S, _mm_load_pd(f+sciy+2)));
- GMX_MM_TRANSPOSE_SUM2_PD(fiz_SSE0, fiz_SSE1, fiz0_SSE);
- _mm_store_pd(f+sciz, _mm_add_pd(fiz0_SSE, _mm_load_pd(f+sciz)));
- GMX_MM_TRANSPOSE_SUM2_PD(fiz_SSE2, fiz_SSE3, fiz2_SSE);
- _mm_store_pd(f+sciz+2, _mm_add_pd(fiz2_SSE, _mm_load_pd(f+sciz+2)));
+ GMX_MM_TRANSPOSE_SUM2_PD(fiz_S0, fiz_S1, fiz0_S);
+ _mm_store_pd(f+sciz, _mm_add_pd(fiz0_S, _mm_load_pd(f+sciz)));
+ GMX_MM_TRANSPOSE_SUM2_PD(fiz_S2, fiz_S3, fiz2_S);
+ _mm_store_pd(f+sciz+2, _mm_add_pd(fiz2_S, _mm_load_pd(f+sciz+2)));
#ifdef CALC_SHIFTFORCES
- _mm_store_pd(shf, _mm_add_pd(fix0_SSE, fix2_SSE));
+ _mm_store_pd(shf, _mm_add_pd(fix0_S, fix2_S));
fshift[ish3+0] += shf[0] + shf[1];
- _mm_store_pd(shf, _mm_add_pd(fiy0_SSE, fiy2_SSE));
+ _mm_store_pd(shf, _mm_add_pd(fiy0_S, fiy2_S));
fshift[ish3+1] += shf[0] + shf[1];
- _mm_store_pd(shf, _mm_add_pd(fiz0_SSE, fiz2_SSE));
+ _mm_store_pd(shf, _mm_add_pd(fiz0_S, fiz2_S));
fshift[ish3+2] += shf[0] + shf[1];
#endif
#endif
#ifdef CALC_ENERGIES
if (do_coul)
{
- gmx_store_pr(tmpsum, vctotSSE);
+ gmx_store_pr(tmpsum, vctot_S);
*Vc += SUM_SIMD(tmpsum);
}
- gmx_store_pr(tmpsum, VvdwtotSSE);
+ gmx_store_pr(tmpsum, Vvdwtot_S);
*Vvdw += SUM_SIMD(tmpsum);
#endif
#endif
}
-#undef gmx_load_ps4
-#undef gmx_store_ps4
-#undef gmx_store_ps4
+
+#undef gmx_load_pr4
+#undef gmx_store_pr4
+#undef gmx_store_pr4
#undef CALC_SHIFTFORCES
#undef STRIDE
#undef TAB_FDV0
#undef NBFP_STRIDE
+
+#undef GMX_USE_HALF_WIDTH_SIMD_HERE
out1 = _mm_unpackhi_pd(in0, in1); \
}
-#if defined GMX_MM128_HERE || !defined GMX_DOUBLE
+#if GMX_NBNXN_SIMD_BITWIDTH == 128 || !defined GMX_DOUBLE
/* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
#define GMX_MM_SHUFFLE_4_PS_FIL01_TO_2_PS(in0, in1, in2, in3, out0, out1) \
{ \
out = _mm_shuffle_ps(_c01, _c23, _MM_SHUFFLE(2, 0, 2, 0)); \
}
-#ifndef GMX_MM256_HERE
+#if GMX_NBNXN_SIMD_BITWIDTH == 128
#ifndef GMX_DOUBLE
/* Sum the elements within each input register and store the sums in out */
#define GMX_MM_TRANSPOSE_SUM4_PR(in0, in1, in2, in3, out) \
#endif
#endif
-#ifdef GMX_MM128_HERE
+#if GMX_NBNXN_SIMD_BITWIDTH == 128
static inline __m128
gmx_mm128_invsqrt_ps_single(__m128 x)
#endif
-#ifdef GMX_MM256_HERE
+#if GMX_NBNXN_SIMD_BITWIDTH == 256
static inline __m256
gmx_mm256_invsqrt_ps_single(__m256 x)
/* Force and energy table load and interpolation routines */
-#if defined GMX_MM128_HERE && !defined GMX_DOUBLE
+#if GMX_NBNXN_SIMD_BITWIDTH == 128 && !defined GMX_DOUBLE
#define load_lj_pair_params(nbfp, type, aj, c6_SSE, c12_SSE) \
{ \
#endif
-#if defined GMX_MM256_HERE && !defined GMX_DOUBLE
+#if GMX_NBNXN_SIMD_BITWIDTH == 256 && !defined GMX_DOUBLE
/* Put two 128-bit 4-float registers into one 256-bit 8-float register */
#define GMX_2_MM_TO_M256(in0, in1, out) \
#endif
-#if defined GMX_MM128_HERE && defined GMX_DOUBLE
+#if GMX_NBNXN_SIMD_BITWIDTH == 128 && defined GMX_DOUBLE
#define load_lj_pair_params(nbfp, type, aj, c6_SSE, c12_SSE) \
{ \
#endif
-#if defined GMX_MM256_HERE && defined GMX_DOUBLE
+#if GMX_NBNXN_SIMD_BITWIDTH == 256 && defined GMX_DOUBLE
#define load_lj_pair_params(nbfp, type, aj, c6_SSE, c12_SSE) \
{ \
* but it is only used with AVX.
*/
-#if defined GMX_MM128_HERE && !defined GMX_DOUBLE
+#if GMX_NBNXN_SIMD_BITWIDTH == 128 && !defined GMX_DOUBLE
#define load_table_f(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE) \
{ \
#endif
-#if defined GMX_MM256_HERE && !defined GMX_DOUBLE
+#if GMX_NBNXN_SIMD_BITWIDTH == 256 && !defined GMX_DOUBLE
#define load_table_f(tab_coul_FDV0, ti_SSE, ti, ctab0_SSE, ctab1_SSE) \
{ \
#endif
-#if defined GMX_MM128_HERE && defined GMX_DOUBLE
+#if GMX_NBNXN_SIMD_BITWIDTH == 128 && defined GMX_DOUBLE
#define load_table_f(tab_coul_F, ti_SSE, ti, ctab0_SSE, ctab1_SSE) \
{ \
#endif
-#if defined GMX_MM256_HERE && defined GMX_DOUBLE
+#if GMX_NBNXN_SIMD_BITWIDTH == 256 && defined GMX_DOUBLE
/* Put two 128-bit 2-double registers into one 256-bit 4-ouble register */
#define GMX_2_M128D_TO_M256D(in0, in1, out) \
}
}
-#if defined GMX_X86_AVX_256 && GMX_SIMD_WIDTH_HERE == 8
+#if defined GMX_X86_AVX_256 && GMX_SIMD_WIDTH_HERE == 8 && defined gmx_mm_hpr
/* As add_ener_grp above, but for two groups of UNROLLJ/2 stored in
* a single SIMD register.
*/
{
gmx_mm_hpr v_SSE;
- v_SSE = gmx_load_hpr(v0+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2);
+ gmx_load_hpr(v_SSE, v0+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2);
gmx_store_hpr(v0+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2, gmx_add_hpr(v_SSE, e_SSE0));
}
for (jj = 0; jj < (UNROLLJ/2); jj++)
{
gmx_mm_hpr v_SSE;
- v_SSE = gmx_load_hpr(v1+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2);
+ gmx_load_hpr(v_SSE, v1+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2);
gmx_store_hpr(v1+offset_jj[jj]+jj*GMX_SIMD_WIDTH_HERE/2, gmx_add_hpr(v_SSE, e_SSE1));
}
}
}
/* Returns a diagonal or off-diagonal interaction mask for SIMD128 lists */
-static unsigned int get_imask_x86_simd128(gmx_bool rdiag, int ci, int cj)
+static unsigned int get_imask_simd128(gmx_bool rdiag, int ci, int cj)
{
#ifndef GMX_DOUBLE /* cj-size = 4 */
return (rdiag && ci == cj ? NBNXN_INT_MASK_DIAG : NBNXN_INT_MASK_ALL);
}
/* Returns a diagonal or off-diagonal interaction mask for SIMD256 lists */
-static unsigned int get_imask_x86_simd256(gmx_bool rdiag, int ci, int cj)
+static unsigned int get_imask_simd256(gmx_bool rdiag, int ci, int cj)
{
#ifndef GMX_DOUBLE /* cj-size = 8 */
return (rdiag && ci == cj*2 ? NBNXN_INT_MASK_DIAG_J8_0 :
#ifdef GMX_NBNXN_SIMD
#if GMX_NBNXN_SIMD_BITWIDTH == 128
-#define get_imask_x86_simd_4xn get_imask_x86_simd128
+#define get_imask_simd_4xn get_imask_simd128
#else
#if GMX_NBNXN_SIMD_BITWIDTH == 256
-#define get_imask_x86_simd_4xn get_imask_x86_simd256
-#define get_imask_x86_simd_2xnn get_imask_x86_simd128
+#define get_imask_simd_4xn get_imask_simd256
+#define get_imask_simd_2xnn get_imask_simd128
#else
#error "unsupported GMX_NBNXN_SIMD_BITWIDTH"
#endif
* the research papers on the package. Check out http://www.gromacs.org.
*/
-#if GMX_NBNXN_SIMD_BITWIDTH == 128
-#define GMX_MM128_HERE
-#else
-#if GMX_NBNXN_SIMD_BITWIDTH == 256
-#define GMX_MM256_HERE
-#else
-#error "unsupported GMX_NBNXN_SIMD_BITWIDTH"
-#endif
+#if GMX_NBNXN_SIMD_BITWIDTH != 256
+#error "unsupported SIMD width"
#endif
+
#include "gmx_simd_macros.h"
+/* Define a few macros for half-width SIMD */
+#if defined GMX_X86_AVX_256 && !defined GMX_DOUBLE
+/* Half-width SIMD real type */
+#define gmx_mm_hpr __m128
+/* Half-width SIMD operations */
+/* Load reals at half-width aligned pointer b into half-width SIMD register a */
+#define gmx_load_hpr(a,b) a = _mm_load_ps(b)
+#define gmx_set1_hpr _mm_set1_ps
+/* Load reals at half-width aligned pointer b into two halves of a */
+#define gmx_loaddh_pr(a, b) a = gmx_mm256_load4_ps(b)
+/* Store half width SIMD registers b and c in ful width register a */
+#define gmx_2hpr_to_pr(a, b, c) a = _mm256_insertf128_ps(_mm256_castps128_ps256(b), c, 0x1)
+#else
+#error "Half-width SIMD macros are not yet defined"
+#endif
+
+
#if GMX_SIMD_WIDTH_HERE >= 2*NBNXN_CPU_CLUSTER_I_SIZE
#define STRIDE_S (GMX_SIMD_WIDTH_HERE/2)
#else
static gmx_inline gmx_mm_pr gmx_load_hpr_hilo_pr(const real *a)
{
- gmx_mm_hpr a_SSE;
+ gmx_mm_hpr a_S;
+ gmx_mm_pr a_a_S;
+
+ gmx_load_hpr(a_S, a);
- a_SSE = _mm_load_ps(a);
+ gmx_2hpr_to_pr(a_a_S, a_S, a_S);
- return gmx_2hpr_to_pr(a_SSE, a_SSE);
+ return a_a_S;
}
static gmx_inline gmx_mm_pr gmx_set_2real_shift_pr(const real *a, real shift)
{
- gmx_mm_hpr a0, a1;
+ gmx_mm_hpr a0_S, a1_S;
+ gmx_mm_pr a0_a1_S;
- a0 = _mm_set1_ps(a[0] + shift);
- a1 = _mm_set1_ps(a[1] + shift);
+ a0_S = gmx_set1_hpr(a[0] + shift);
+ a1_S = gmx_set1_hpr(a[1] + shift);
- return gmx_2hpr_to_pr(a1, a0);
+ gmx_2hpr_to_pr(a0_a1_S, a0_S, a1_S);
+
+ return a0_a1_S;
}
/* Copies PBC shifted i-cell packed atom coordinates to working array */
x_ci->iz_SSE2 = gmx_set_2real_shift_pr(x + ia + 2*STRIDE_S + 2, shz);
}
+#ifndef GMX_HAVE_SIMD_ANYTRUE
+/* Fallback function in case gmx_anytrue_pr is not present */
+static gmx_inline gmx_bool
+gmx_anytrue_2xn_pr(gmx_mm_pr bool_S)
+{
+ real bools_array[2*GMX_SIMD_WIDTH_HERE], *bools;
+ gmx_bool any;
+ int s;
+
+ bools = gmx_simd_align_real(bools_array);
+
+ gmx_store_pr(bools, bool_S);
+
+ any = FALSE;
+ for (s = 0; s < GMX_SIMD_WIDTH_HERE; s++)
+ {
+ if (GMX_SIMD_IS_TRUE(s))
+ {
+ any = TRUE;
+ }
+ }
+
+ return any;
+}
+#endif
+
/* SIMD code for making a pair list of cell ci vs cell cjf-cjl
* for coordinates in packed format.
* Checks bouding box distances and possibly atom pair distances.
wco_any_SSE = gmx_or_pr(wco_SSE0, wco_SSE2);
- InRange = gmx_movemask_pr(wco_any_SSE);
+#ifdef GMX_HAVE_SIMD_ANYTRUE
+ InRange = gmx_anytrue_pr(wco_any_SSE);
+#else
+ InRange = gmx_anytrue_2xn_pr(wco_any_SSE);
+#endif
*ndistc += 2*GMX_SIMD_WIDTH_HERE;
}
wco_any_SSE = gmx_or_pr(wco_SSE0, wco_SSE2);
- InRange = gmx_movemask_pr(wco_any_SSE);
+#ifdef GMX_HAVE_SIMD_ANYTRUE
+ InRange = gmx_anytrue_pr(wco_any_SSE);
+#else
+ InRange = gmx_anytrue_2xn_pr(wco_any_SSE);
+#endif
*ndistc += 2*GMX_SIMD_WIDTH_HERE;
}
{
/* Store cj and the interaction mask */
nbl->cj[nbl->ncj].cj = CI_TO_CJ_SIMD_2XNN(gridj->cell0) + cj;
- nbl->cj[nbl->ncj].excl = get_imask_x86_simd_2xnn(remove_sub_diag, ci, cj);
+ nbl->cj[nbl->ncj].excl = get_imask_simd_2xnn(remove_sub_diag, ci, cj);
nbl->ncj++;
}
/* Increase the closing index in i super-cell list */
}
#undef STRIDE_S
-#undef GMX_MM128_HERE
-#undef GMX_MM256_HERE
+
+#undef gmx_mm_hpr
+#undef gmx_load_hpr
+#undef gmx_set1_hpr
+#undef gmx_2hpr_to_pr
* the research papers on the package. Check out http://www.gromacs.org.
*/
-#if GMX_NBNXN_SIMD_BITWIDTH == 128
-#define GMX_MM128_HERE
-#else
-#if GMX_NBNXN_SIMD_BITWIDTH == 256
-#define GMX_MM256_HERE
-#else
+#if !(GMX_NBNXN_SIMD_BITWIDTH == 128 || GMX_NBNXN_SIMD_BITWIDTH == 256)
#error "unsupported GMX_NBNXN_SIMD_BITWIDTH"
#endif
+
+#ifdef GMX_NBNXN_HALF_WIDTH_SIMD
+#define GMX_USE_HALF_WIDTH_SIMD_HERE
#endif
#include "gmx_simd_macros.h"
x_ci->iz_SSE3 = gmx_set1_pr(x[ia + 2*STRIDE_S + 3] + shz);
}
+#ifndef GMX_HAVE_SIMD_ANYTRUE
+/* Fallback function in case gmx_anytrue_pr is not present */
+static gmx_inline gmx_bool
+gmx_anytrue_4xn_pr(gmx_mm_pr bool_S)
+{
+ real bools_array[2*GMX_SIMD_WIDTH_HERE], *bools;
+ gmx_bool any;
+ int s;
+
+ bools = gmx_simd_align_real(bools_array);
+
+ gmx_store_pr(bools, bool_S);
+
+ any = FALSE;
+ for (s = 0; s < GMX_SIMD_WIDTH_HERE; s++)
+ {
+ if (GMX_SIMD_IS_TRUE(bools[s]))
+ {
+ any = TRUE;
+ }
+ }
+
+ return any;
+}
+#endif
+
/* SIMD code for making a pair list of cell ci vs cell cjf-cjl
* for coordinates in packed format.
* Checks bouding box distances and possibly atom pair distances.
wco_any_SSE23 = gmx_or_pr(wco_SSE2, wco_SSE3);
wco_any_SSE = gmx_or_pr(wco_any_SSE01, wco_any_SSE23);
- InRange = gmx_movemask_pr(wco_any_SSE);
+#ifdef GMX_HAVE_SIMD_ANYTRUE
+ InRange = gmx_anytrue_pr(wco_any_SSE);
+#else
+ InRange = gmx_anytrue_4xn_pr(wco_any_SSE);
+#endif
*ndistc += 4*GMX_SIMD_WIDTH_HERE;
}
wco_any_SSE23 = gmx_or_pr(wco_SSE2, wco_SSE3);
wco_any_SSE = gmx_or_pr(wco_any_SSE01, wco_any_SSE23);
- InRange = gmx_movemask_pr(wco_any_SSE);
+#ifdef GMX_HAVE_SIMD_ANYTRUE
+ InRange = gmx_anytrue_pr(wco_any_SSE);
+#else
+ InRange = gmx_anytrue_4xn_pr(wco_any_SSE);
+#endif
*ndistc += 4*GMX_SIMD_WIDTH_HERE;
}
{
/* Store cj and the interaction mask */
nbl->cj[nbl->ncj].cj = CI_TO_CJ_SIMD_4XN(gridj->cell0) + cj;
- nbl->cj[nbl->ncj].excl = get_imask_x86_simd_4xn(remove_sub_diag, ci, cj);
+ nbl->cj[nbl->ncj].excl = get_imask_simd_4xn(remove_sub_diag, ci, cj);
nbl->ncj++;
}
/* Increase the closing index in i super-cell list */
}
#undef STRIDE_S
-#undef GMX_MM128_HERE
-#undef GMX_MM256_HERE
+#undef GMX_USE_HALF_WIDTH_SIMD_HERE