}
static void
-nbnxn_atomdata_reduce_reals_simd(real * gmx_restrict dest,
- gmx_bool bDestSet,
- real ** gmx_restrict src,
- int nsrc,
- int i0, int i1)
+nbnxn_atomdata_reduce_reals_simd(real gmx_unused * gmx_restrict dest,
+ gmx_bool gmx_unused bDestSet,
+ real gmx_unused ** gmx_restrict src,
+ int gmx_unused nsrc,
+ int gmx_unused i0, int gmx_unused i1)
{
#ifdef GMX_NBNXN_SIMD
/* The SIMD width here is actually independent of that in the kernels,
const int *atinfo,
rvec *x,
int sx, int sy, int sz,
- nbnxn_bb_t *bb_work_aligned)
+ nbnxn_bb_t gmx_unused *bb_work_aligned)
{
- int na, a;
- size_t offset;
+ int na, a;
+ size_t offset;
nbnxn_bb_t *bb_ptr;
#ifdef NBNXN_BBXXXX
float *pbb_ptr;
{
return gmx_simd4_add_pr( gmx_simd4_add_pr( gmx_simd4_mul_pr(x, x), gmx_simd4_mul_pr(y, y) ), gmx_simd4_mul_pr(z, z) );
}
-#endif
/* 4-wide SIMD function which determines if any atom pair between two cells,
* both with 8 atoms, is within distance sqrt(rl2).
int csj, int stride, const real *x_j,
real rl2)
{
-#ifdef NBNXN_SEARCH_SIMD4_FLOAT_X_BB
gmx_simd4_pr ix_S0, iy_S0, iz_S0;
gmx_simd4_pr ix_S1, iy_S1, iz_S1;
}
return FALSE;
-#else
- /* No SIMD4 */
- gmx_incons("SIMD4 function called without 4-wide SIMD support");
-
- return TRUE;
-#endif
}
+#endif
+
/* Returns the j sub-cell for index cj_ind */
static int nbl_cj(const nbnxn_pairlist_t *nbl, int cj_ind)
real rl2, float rbb2,
int *ndistc)
{
- int na_c;
- int npair;
- int cjo, ci1, ci, cj, cj_gl;
- int cj4_ind, cj_offset;
- unsigned imask;
- nbnxn_cj4_t *cj4;
+ int na_c;
+ int npair;
+ int cjo, ci1, ci, cj, cj_gl;
+ int cj4_ind, cj_offset;
+ unsigned imask;
+ nbnxn_cj4_t *cj4;
#ifdef NBNXN_BBXXXX
const float *pbb_ci;
#else
const nbnxn_bb_t *bb_ci;
#endif
- const real *x_ci;
- float *d2l, d2;
- int w;
+ const real *x_ci;
+ float *d2l, d2;
+ int w;
#define PRUNE_LIST_CPU_ONE
#ifdef PRUNE_LIST_CPU_ONE
int ci_last = -1;
gmx_bool bMakeList;
real shx, shy, shz;
int conv_i, cell0_i;
- const nbnxn_bb_t *bb_i=NULL;
+ const nbnxn_bb_t *bb_i = NULL;
#ifdef NBNXN_BBXXXX
- const float *pbb_i=NULL;
+ const float *pbb_i = NULL;
#endif
const float *bbcz_i, *bbcz_j;
const int *flags_i;
}
-static void spread_q_bsplines_thread(pmegrid_t *pmegrid,
- pme_atomcomm_t *atc, splinedata_t *spline,
- pme_spline_work_t *work)
+static void spread_q_bsplines_thread(pmegrid_t *pmegrid,
+ pme_atomcomm_t *atc,
+ splinedata_t *spline,
+ pme_spline_work_t gmx_unused *work)
{
/* spread charges from home atoms to local grid */
}
}
-static void set_grid_alignment(int *pmegrid_nz, int pme_order)
+static void set_grid_alignment(int gmx_unused *pmegrid_nz, int gmx_unused pme_order)
{
#ifdef PME_SIMD4_SPREAD_GATHER
if (pme_order == 5
#ifdef PME_SIMD
/* Calculate exponentials through SIMD */
-inline static void calc_exponentials(int start, int end, real f, real *d_aligned, real *r_aligned, real *e_aligned)
+inline static void calc_exponentials(int gmx_unused start, int end, real f, real *d_aligned, real *r_aligned, real *e_aligned)
{
{
const gmx_mm_pr two = gmx_set1_pr(2.0);
gmx_mm_pr tmp_d1, d_inv, tmp_r, tmp_e;
int kx;
f_simd = gmx_set1_pr(f);
+ /* We only need to calculate from start. But since start is 0 or 1
+ * and we want to use aligned loads/stores, we always start from 0.
+ */
for (kx = 0; kx < end; kx += GMX_SIMD_WIDTH_HERE)
{
tmp_d1 = gmx_load_pr(d_aligned+kx);
p0 = grid + iy*local_size[ZZ]*local_size[XX] + iz*local_size[XX];
/* We should skip the k-space point (0,0,0) */
+ /* Note that since here x is the minor index, local_offset[XX]=0 */
if (local_offset[XX] > 0 || ky > 0 || kz > 0)
{
kxstart = local_offset[XX];
*fraction_shift = fsh;
}
-static pme_spline_work_t *make_pme_spline_work(int order)
+static pme_spline_work_t *make_pme_spline_work(int gmx_unused order)
{
pme_spline_work_t *work;