#ifndef _nbnxn_kernel_simd_utils_h_
#define _nbnxn_kernel_simd_utils_h_
+#include "config.h"
+
#include "gromacs/legacyheaders/types/simple.h"
/*! \brief Provides hardware-specific utility routines for the SIMD kernels.
return NULL;
}
-#include "nbnxn_kernel_simd_utils_ref.h"
+#include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_simd_utils_ref.h"
#else /* GMX_SIMD_REFERENCE */
-#if defined GMX_SIMD_X86_SSE2_OR_HIGHER && !defined __MIC__
+#if defined GMX_TARGET_X86 && !defined __MIC__
/* Include x86 SSE2 compatible SIMD functions */
/* Set the stride for the lookup of the two LJ parameters from their
#endif
/* Align a stack-based thread-local working array. Table loads on
- * full-width AVX_256 use the array, but other implementations do
- * not. */
+ * 256-bit AVX use the array, but other implementations do not.
+ */
static gmx_inline int *
-prepare_table_load_buffer(const int gmx_unused *array)
+prepare_table_load_buffer(int gmx_unused *array)
{
-#if defined GMX_SIMD_X86_AVX_256_OR_HIGHER && !defined GMX_USE_HALF_WIDTH_SIMD_HERE
+#if GMX_SIMD_REAL_WIDTH >= 8 || (defined GMX_DOUBLE && GMX_SIMD_REAL_WIDTH >= 4)
return gmx_simd_align_i(array);
#else
return NULL;
#endif
}
-#if defined GMX_SIMD_X86_AVX_256_OR_HIGHER && !defined GMX_USE_HALF_WIDTH_SIMD_HERE
-
-/* With full AVX-256 SIMD, half SIMD-width table loads are optimal */
-#if GMX_SIMD_REAL_WIDTH == 8
-#define TAB_FDV0
-#endif
#ifdef GMX_DOUBLE
-#include "nbnxn_kernel_simd_utils_x86_256d.h"
-#else /* GMX_DOUBLE */
-#include "nbnxn_kernel_simd_utils_x86_256s.h"
-#endif /* GMX_DOUBLE */
-
-#else /* defined GMX_SIMD_X86_AVX_256_OR_HIGHER && !defined GMX_USE_HALF_WIDTH_SIMD_HERE */
-
-/* We use the FDV0 table layout when we can use aligned table loads */
-#if GMX_SIMD_REAL_WIDTH == 4
+#if GMX_SIMD_REAL_WIDTH == 2
+#include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_simd_utils_x86_128d.h"
+#else
+#include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_simd_utils_x86_256d.h"
+#endif
+#else /* GMX_DOUBLE */
+/* In single precision aligned FDV0 table loads are optimal */
#define TAB_FDV0
+#if GMX_SIMD_REAL_WIDTH == 4
+#include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_simd_utils_x86_128s.h"
+#else
+#include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_simd_utils_x86_256s.h"
#endif
-
-#ifdef GMX_DOUBLE
-#include "nbnxn_kernel_simd_utils_x86_128d.h"
-#else /* GMX_DOUBLE */
-#include "nbnxn_kernel_simd_utils_x86_128s.h"
#endif /* GMX_DOUBLE */
-#endif /* defined GMX_SIMD_X86_AVX_256_OR_HIGHER && !defined GMX_USE_HALF_WIDTH_SIMD_HERE */
-
-#else /* GMX_SIMD_X86_SSE2_OR_HIGHER */
+#else /* GMX_TARGET_X86 && !__MIC__ */
#if GMX_SIMD_REAL_WIDTH > 4
/* For width>4 we use unaligned loads. And thus we can use the minimal stride */
#endif
#ifdef GMX_SIMD_IBM_QPX
-#include "nbnxn_kernel_simd_utils_ibm_qpx.h"
+#include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_simd_utils_ibm_qpx.h"
#endif /* GMX_SIMD_IBM_QPX */
#ifdef __MIC__
-#include "nbnxn_kernel_simd_utils_x86_mic.h"
-#endif
-
-#endif /* GMX_SIMD_X86_SSE2_OR_HIGHER */
-#endif /* GMX_SIMD_REFERENCE */
-
-#if GMX_SIMD_REAL_WIDTH == 4
-#define gmx_mm_pr4 gmx_simd_real_t
-#define gmx_load_pr4 gmx_simd_load_r
-#define gmx_store_pr4 gmx_simd_store_r
-#define gmx_add_pr4 gmx_simd_add_r
+#include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_simd_utils_x86_mic.h"
#endif
-#ifndef HAVE_GMX_SUM_SIMD /* should be defined for arch with hardware reduce */
-static gmx_inline real
-gmx_sum_simd2(gmx_simd_real_t x, real* b)
-{
- gmx_simd_store_r(b, x);
- return b[0]+b[1];
-}
+#endif /* GMX_TARGET_X86 && !__MIC__ */
-#if GMX_SIMD_REAL_WIDTH >= 4
-static gmx_inline real
-gmx_sum_simd4(gmx_mm_pr4 x, real* b)
-{
- gmx_store_pr4(b, x);
- return b[0]+b[1]+b[2]+b[3];
-}
-#endif
+#endif /* GMX_SIMD_REFERENCE */
-#if GMX_SIMD_REAL_WIDTH == 2
-static gmx_inline real gmx_sum_simd(gmx_simd_real_t x, real* b)
-{
- gmx_simd_store_r(b, x);
- return b[0]+b[1];
-}
-#elif GMX_SIMD_REAL_WIDTH == 4
-static gmx_inline real gmx_sum_simd(gmx_simd_real_t x, real* b)
-{
- gmx_simd_store_r(b, x);
- return b[0]+b[1]+b[2]+b[3];
-}
-#elif GMX_SIMD_REAL_WIDTH == 8
-static gmx_inline real gmx_sum_simd(gmx_simd_real_t x, real* b)
-{
- gmx_simd_store_r(b, x);
- return b[0]+b[1]+b[2]+b[3]+b[4]+b[5]+b[6]+b[7];
-}
-#elif GMX_SIMD_REAL_WIDTH == 16
-/* This is getting ridiculous, SIMD horizontal adds would help,
- * but this is not performance critical (only used to reduce energies)
+/* If the simd width is 4, but simd4 instructions are not defined,
+ * reuse the simd real type and the four instructions we need.
*/
-static gmx_inline real gmx_sum_simd(gmx_simd_real_t x, real* b)
-{
- gmx_simd_store_r(b, x);
- return b[0]+b[1]+b[2]+b[3]+b[4]+b[5]+b[6]+b[7]+b[8]+b[9]+b[10]+b[11]+b[12]+b[13]+b[14]+b[15];
-}
-#else
-#error "unsupported kernel configuration"
+#if GMX_SIMD_REAL_WIDTH == 4 && \
+ !((!defined GMX_DOUBLE && defined GMX_SIMD4_HAVE_FLOAT) || \
+ (defined GMX_DOUBLE && defined GMX_SIMD4_HAVE_DOUBLE))
+#define gmx_simd4_real_t gmx_simd_real_t
+#define gmx_simd4_load_r gmx_simd_load_r
+#define gmx_simd4_store_r gmx_simd_store_r
+#define gmx_simd4_add_r gmx_simd_add_r
+#define gmx_simd4_reduce_r gmx_simd_reduce_r
#endif
-#endif //HAVE_GMX_SUM_SIMD
#ifdef UNROLLJ
/* Add energy register to possibly multiple terms in the energy array */
-static inline void add_ener_grp(gmx_simd_real_t e_S, real *v, const int *offset_jj)
+static gmx_inline void add_ener_grp(gmx_simd_real_t e_S, real *v, const int *offset_jj)
{
int jj;
/* As add_ener_grp, but for two groups of UNROLLJ/2 stored in
* a single SIMD register.
*/
-static inline void
+static gmx_inline void
add_ener_grp_halves(gmx_simd_real_t e_S, real *v0, real *v1, const int *offset_jj)
{
gmx_mm_hpr e_S0, e_S1;