unsigned excl; /* The exclusion (interaction) bits */
} nbnxn_cj_t;
+/* In nbnxn_ci_t the integer shift contains the shift in the lower 7 bits.
+ * The upper bits contain information for non-bonded kernel optimization.
+ * Simply calculating LJ and Coulomb for all pairs in a cluster pair is fine.
+ * But three flags can be used to skip interactions, currently only for subc=0
+ * !(shift & NBNXN_CI_DO_LJ(subc)) => we can skip LJ for all pairs
+ * shift & NBNXN_CI_HALF_LJ(subc) => we can skip LJ for the second half of i
+ * !(shift & NBNXN_CI_DO_COUL(subc)) => we can skip Coulomb for all pairs
+ */
#define NBNXN_CI_SHIFT 127
#define NBNXN_CI_DO_LJ(subc) (1<<(7+3*(subc)))
#define NBNXN_CI_HALF_LJ(subc) (1<<(8+3*(subc)))
/* Simple pair-list i-unit */
typedef struct {
int ci; /* i-cluster */
- int shift; /* Shift vector index plus possible flags */
+ int shift; /* Shift vector index plus possible flags, see above */
int cj_ind_start; /* Start index into cj */
int cj_ind_end; /* End index into cj */
} nbnxn_ci_t;
#endif
if (getenv("GMX_NBNXN_SIMD_4XN") != NULL)
{
-#ifdef GMX_NBNXN_SIMD_2XNN
+#ifdef GMX_NBNXN_SIMD_4XN
*kernel_type = nbnxnk4xN_SIMD_4xN;
#else
gmx_fatal(FARGS,"SIMD 4xN kernels requested, but Gromacs has been compiled without support for these kernels");
real *nbfp_i;
int n,ci,ci_sh;
int ish,ishf;
- gmx_bool half_LJ,do_coul;
+ gmx_bool do_LJ,half_LJ,do_coul;
int cjind0,cjind1,cjind;
int ip,jp;
ci = nbln->ci;
ci_sh = (ish == CENTRAL ? ci : -1);
- half_LJ = (nbln->shift & NBNXN_CI_HALF_LJ(0));
+ /* We have 5 LJ/C combinations, but use only three inner loops,
+ * as the other combinations are unlikely and/or not much faster:
+ * inner half-LJ + C for half-LJ + C / no-LJ + C
+ * inner LJ + C for full-LJ + C
+ * inner LJ for full-LJ + no-C / half-LJ + no-C
+ */
+ do_LJ = (nbln->shift & NBNXN_CI_DO_LJ(0));
do_coul = (nbln->shift & NBNXN_CI_DO_COUL(0));
+ half_LJ = ((nbln->shift & NBNXN_CI_HALF_LJ(0)) || !do_LJ) && do_coul;
#ifdef CALC_ENERGIES
#ifndef ENERGY_GROUPS
}
}
- /* With half_LJ we currently always calculate Coulomb interactions */
- if (do_coul || half_LJ)
+ if (do_coul)
{
#ifdef CALC_ENERGIES
real Vc_sub_self;
* the research papers on the package. Check out http://www.gromacs.org.
*/
-/* GMX_MM128_HERE or GMX_MM256_HERE should be set before including this file */
+/* GMX_MM256_HERE should be set before including this file */
#include "gmx_simd_macros.h"
#define SUM_SIMD4(x) (x[0]+x[1]+x[2]+x[3])
#define UNROLLI NBNXN_CPU_CLUSTER_I_SIZE
#define UNROLLJ (GMX_SIMD_WIDTH_HERE/2)
-#if defined GMX_MM128_HERE || defined GMX_DOUBLE
-#define STRIDE 4
-#endif
-#if defined GMX_MM256_HERE && !defined GMX_DOUBLE
+#if defined GMX_MM256_HERE
#define STRIDE 4
#endif
-#ifdef GMX_MM128_HERE
-#ifndef GMX_DOUBLE
-/* SSE single precision 4x4 kernel */
-#define SUM_SIMD(x) SUM_SIMD4(x)
-#define TAB_FDV0
-#else
-/* SSE double precision 4x2 kernel */
-#define SUM_SIMD(x) (x[0]+x[1])
-#endif
-#endif
-
#ifdef GMX_MM256_HERE
#ifndef GMX_DOUBLE
-/* AVX single precision 4x8 kernel */
+/* single precision 2x(4+4) kernel */
#define SUM_SIMD(x) (x[0]+x[1]+x[2]+x[3]+x[4]+x[5]+x[6]+x[7])
#define TAB_FDV0
#else
-/* AVX double precision 4x4 kernel */
-#define SUM_SIMD(x) SUM_SIMD4(x)
+#error "unsupported kernel configuration"
#endif
#endif
int nbfp_stride;
int n,ci,ci_sh;
int ish,ish3;
- gmx_bool half_LJ,do_coul;
+ gmx_bool do_LJ,half_LJ,do_coul;
int sci,scix,sciy,sciz,sci2;
int cjind0,cjind1,cjind;
int ip,jp;
gmx_mm_pr diag_SSE0 = _mm256_castsi256_ps( _mm256_set_epi32( 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 ));
gmx_mm_pr diag_SSE2 = _mm256_castsi256_ps( _mm256_set_epi32( 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffffff, 0x00000000, 0x00000000, 0x00000000 ));
-#ifndef GMX_MM256_HERE
- __m128i zeroi_SSE = _mm_setzero_si128();
-#endif
#ifdef GMX_X86_SSE4_1
gmx_mm_pr zero_SSE = gmx_set1_pr(0);
#endif
egps_jshift = 2*nbat->neg_2log;
egps_jmask = (1<<egps_jshift) - 1;
egps_jstride = (UNROLLJ>>1)*UNROLLJ;
- /* Major division is over i-particles: divide nVS by 4 for i-stride */
+ /* Major division is over i-particle energy groups, determine the stride */
Vstride_i = nbat->nenergrp*(1<<nbat->neg_2log)*egps_jstride;
#endif
ish = (nbln->shift & NBNXN_CI_SHIFT);
ish3 = ish*3;
- cjind0 = nbln->cj_ind_start;
- cjind1 = nbln->cj_ind_end;
- /* Currently only works super-cells equal to sub-cells */
+ cjind0 = nbln->cj_ind_start;
+ cjind1 = nbln->cj_ind_end;
ci = nbln->ci;
ci_sh = (ish == CENTRAL ? ci : -1);
sci += (ci & 1)*(STRIDE>>1);
#endif
- half_LJ = (nbln->shift & NBNXN_CI_HALF_LJ(0));
+ /* We have 5 LJ/C combinations, but use only three inner loops,
+ * as the other combinations are unlikely and/or not much faster:
+ * inner half-LJ + C for half-LJ + C / no-LJ + C
+ * inner LJ + C for full-LJ + C
+ * inner LJ for full-LJ + no-C / half-LJ + no-C
+ */
+ do_LJ = (nbln->shift & NBNXN_CI_DO_LJ(0));
do_coul = (nbln->shift & NBNXN_CI_DO_COUL(0));
+ half_LJ = ((nbln->shift & NBNXN_CI_HALF_LJ(0)) || !do_LJ) && do_coul;
#ifdef ENERGY_GROUPS
egps_i = nbat->energrp[ci];
iz_SSE0 = gmx_add_pr(gmx_load2_hpr(x+sciz) ,shZ_SSE);
iz_SSE2 = gmx_add_pr(gmx_load2_hpr(x+sciz+2),shZ_SSE);
- /* With half_LJ we currently always calculate Coulomb interactions */
- if (do_coul || half_LJ)
+ if (do_coul)
{
gmx_mm_pr facel_SSE;
#ifdef GMX_MM128_HERE
#ifndef GMX_DOUBLE
-/* SSE single precision 4x4 kernel */
+/* single precision 4x4 kernel */
#define SUM_SIMD(x) SUM_SIMD4(x)
#define TAB_FDV0
#else
-/* SSE double precision 4x2 kernel */
+/* double precision 4x2 kernel */
#define SUM_SIMD(x) (x[0]+x[1])
#endif
#endif
#ifdef GMX_MM256_HERE
#ifndef GMX_DOUBLE
-/* AVX single precision 4x8 kernel */
+/* single precision 4x8 kernel */
#define SUM_SIMD(x) (x[0]+x[1]+x[2]+x[3]+x[4]+x[5]+x[6]+x[7])
#define TAB_FDV0
#else
-/* AVX double precision 4x4 kernel */
+/* double precision 4x4 kernel */
#define SUM_SIMD(x) SUM_SIMD4(x)
#endif
#endif
int nbfp_stride;
int n,ci,ci_sh;
int ish,ish3;
- gmx_bool half_LJ,do_coul;
+ gmx_bool do_LJ,half_LJ,do_coul;
int sci,scix,sciy,sciz,sci2;
int cjind0,cjind1,cjind;
int ip,jp;
__m128d fix2_SSE,fiy2_SSE,fiz2_SSE;
#endif
-#ifndef GMX_MM256_HERE
+#ifdef GMX_MM128_HERE
#ifndef GMX_DOUBLE
__m128i mask0 = _mm_set_epi32( 0x0008, 0x0004, 0x0002, 0x0001 );
__m128i mask1 = _mm_set_epi32( 0x0080, 0x0040, 0x0020, 0x0010 );
__m128i mask2 = _mm_set_epi32( 0x0020, 0x0020, 0x0010, 0x0010 );
__m128i mask3 = _mm_set_epi32( 0x0080, 0x0080, 0x0040, 0x0040 );
#endif
-#else
+#endif
+#ifdef GMX_MM256_HERE
/* AVX: use floating point masks, as there are no integer instructions */
#ifndef GMX_DOUBLE
gmx_mm_pr mask0 = _mm256_castsi256_ps(_mm256_set_epi32( 0x0080, 0x0040, 0x0020, 0x0010, 0x0008, 0x0004, 0x0002, 0x0001 ));
#endif
#endif
-#ifndef GMX_MM256_HERE
+#ifdef GMX_MM128_HERE
#ifndef GMX_DOUBLE
__m128 diag_SSE0 = gmx_mm_castsi128_pr( _mm_set_epi32( 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 ));
__m128 diag_SSE1 = gmx_mm_castsi128_pr( _mm_set_epi32( 0xffffffff, 0xffffffff, 0x00000000, 0x00000000 ));
__m128d diag1_SSE2 = gmx_mm_castsi128_pd( _mm_set_epi32( 0xffffffff, 0xffffffff, 0x00000000, 0x00000000 ));
__m128d diag1_SSE3 = gmx_mm_castsi128_pd( _mm_set_epi32( 0x00000000, 0x00000000, 0x00000000, 0x00000000 ));
#endif
-#else /* GMX_MM256_HERE */
+#endif
+#ifdef GMX_MM256_HERE
#ifndef GMX_DOUBLE
gmx_mm_pr diag0_SSE0 = _mm256_castsi256_ps( _mm256_set_epi32( 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 ));
gmx_mm_pr diag0_SSE1 = _mm256_castsi256_ps( _mm256_set_epi32( 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000 ));
#endif
#endif
-#ifndef GMX_MM256_HERE
+#ifdef GMX_MM128_HERE
__m128i zeroi_SSE = _mm_setzero_si128();
#endif
#ifdef GMX_X86_SSE4_1
egps_jshift = 2*nbat->neg_2log;
egps_jmask = (1<<egps_jshift) - 1;
egps_jstride = (UNROLLJ>>1)*UNROLLJ;
- /* Major division is over i-particles: divide nVS by 4 for i-stride */
+ /* Major division is over i-particle energy groups, determine the stride */
Vstride_i = nbat->nenergrp*(1<<nbat->neg_2log)*egps_jstride;
#endif
ish = (nbln->shift & NBNXN_CI_SHIFT);
ish3 = ish*3;
- cjind0 = nbln->cj_ind_start;
- cjind1 = nbln->cj_ind_end;
- /* Currently only works super-cells equal to sub-cells */
+ cjind0 = nbln->cj_ind_start;
+ cjind1 = nbln->cj_ind_end;
ci = nbln->ci;
ci_sh = (ish == CENTRAL ? ci : -1);
sci += (ci & 1)*(STRIDE>>1);
#endif
- half_LJ = (nbln->shift & NBNXN_CI_HALF_LJ(0));
+ /* We have 5 LJ/C combinations, but use only three inner loops,
+ * as the other combinations are unlikely and/or not much faster:
+ * inner half-LJ + C for half-LJ + C / no-LJ + C
+ * inner LJ + C for full-LJ + C
+ * inner LJ for full-LJ + no-C / half-LJ + no-C
+ */
+ do_LJ = (nbln->shift & NBNXN_CI_DO_LJ(0));
do_coul = (nbln->shift & NBNXN_CI_DO_COUL(0));
+ half_LJ = ((nbln->shift & NBNXN_CI_HALF_LJ(0)) || !do_LJ) && do_coul;
#ifdef ENERGY_GROUPS
egps_i = nbat->energrp[ci];
iz_SSE2 = gmx_add_pr(gmx_load1_pr(x+sciz+2),shZ_SSE);
iz_SSE3 = gmx_add_pr(gmx_load1_pr(x+sciz+3),shZ_SSE);
- /* With half_LJ we currently always calculate Coulomb interactions */
- if (do_coul || half_LJ)
+ if (do_coul)
{
iq_SSE0 = gmx_set1_pr(facel*q[sci]);
iq_SSE1 = gmx_set1_pr(facel*q[sci+1]);
{
sort_cj_excl(nbl->cj+nbl->ci[nbl->nci].cj_ind_start,jlen,nbl->work);
- if (nbl->ci[nbl->nci].shift & NBNXN_CI_HALF_LJ(0))
+ /* The counts below are used for non-bonded pair/flop counts
+ * and should therefore match the available kernel setups.
+ */
+ if (!(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_COUL(0)))
{
- nbl->work->ncj_hlj += jlen;
+ nbl->work->ncj_noq += jlen;
}
- else if (!(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_COUL(0)))
+ else if ((nbl->ci[nbl->nci].shift & NBNXN_CI_HALF_LJ(0)) ||
+ !(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_LJ(0)))
{
- nbl->work->ncj_noq += jlen;
+ nbl->work->ncj_hlj += jlen;
}
nbl->nci++;