-/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
+/*
+ * This file is part of the GROMACS molecular simulation package.
*
+ * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
+ * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
+ * and including many others, as listed in the AUTHORS file in the
+ * top-level source directory and at http://www.gromacs.org.
*
- * This source code is part of
- *
- * G R O M A C S
+ * GROMACS is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
*
- * GROningen MAchine for Chemical Simulations
+ * GROMACS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
*
- * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
- * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
- * Copyright (c) 2001-2012, The GROMACS development team,
- * check out http://www.gromacs.org for more information.
-
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with GROMACS; if not, see
+ * http://www.gnu.org/licenses, or write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
- * If you want to redistribute modifications, please consider that
- * scientific software is very special. Version control is crucial -
- * bugs must be traceable. We will be happy to consider code for
- * inclusion in the official distribution, but derived work must not
- * be called official GROMACS. Details are found in the README & COPYING
- * files - if they are missing, get the official version at www.gromacs.org.
+ * If you want to redistribute modifications to GROMACS, please
+ * consider that scientific software is very special. Version
+ * control is crucial - bugs must be traceable. We will be happy to
+ * consider code for inclusion in the official distribution, but
+ * derived work must not be called official GROMACS. Details are found
+ * in the README & COPYING files - if they are missing, get the
+ * official version at http://www.gromacs.org.
*
* To help us fund GROMACS development, we humbly ask that you cite
- * the papers on the package - you can find them in the top README file.
- *
- * For more info, check our website at http://www.gromacs.org
+ * the research papers on the package. Check out http://www.gromacs.org.
*/
#ifdef HAVE_CONFIG_H
#include <math.h>
#include <string.h>
+#include <assert.h>
+
#include "sysstuff.h"
-#include "smalloc.h"
+#include "gromacs/utility/smalloc.h"
+#include "types/commrec.h"
#include "macros.h"
-#include "maths.h"
+#include "gromacs/math/utilities.h"
#include "vec.h"
#include "pbc.h"
#include "nbnxn_consts.h"
+/* nbnxn_internal.h included gromacs/simd/macros.h */
#include "nbnxn_internal.h"
+#ifdef GMX_NBNXN_SIMD
+#include "gromacs/simd/vector_operations.h"
+#endif
#include "nbnxn_atomdata.h"
#include "nbnxn_search.h"
-#include "gmx_cyclecounter.h"
-#include "gmxfio.h"
#include "gmx_omp_nthreads.h"
#include "nrnb.h"
+#include "ns.h"
+#include "gromacs/fileio/gmxfio.h"
-/* Pair search box lower and upper corner in x,y,z.
- * Store this in 4 iso 3 reals, which is useful with SSE.
- * To avoid complicating the code we also use 4 without SSE.
- */
-#define NNBSBB_C 4
-#define NNBSBB_B (2*NNBSBB_C)
-/* Pair search box lower and upper bound in z only. */
-#define NNBSBB_D 2
-/* Pair search box lower and upper corner x,y,z indices */
-#define BBL_X 0
-#define BBL_Y 1
-#define BBL_Z 2
-#define BBU_X 4
-#define BBU_Y 5
-#define BBU_Z 6
-
-
-#ifdef NBNXN_SEARCH_BB_SSE
-/* We use SSE or AVX-128bit for bounding box calculations */
-
-#ifndef GMX_DOUBLE
-/* Single precision BBs + coordinates, we can also load coordinates using SSE */
-#define NBNXN_SEARCH_SSE_SINGLE
-#endif
+#ifdef NBNXN_SEARCH_BB_SIMD4
+/* Always use 4-wide SIMD for bounding box calculations */
-/* Include basic SSE2 stuff */
-#include <emmintrin.h>
+# ifndef GMX_DOUBLE
+/* Single precision BBs + coordinates, we can also load coordinates with SIMD */
+# define NBNXN_SEARCH_SIMD4_FLOAT_X_BB
+# endif
-#if defined NBNXN_SEARCH_SSE_SINGLE && (GPU_NSUBCELL == 4 || GPU_NSUBCELL == 8)
+# if defined NBNXN_SEARCH_SIMD4_FLOAT_X_BB && (GPU_NSUBCELL == 4 || GPU_NSUBCELL == 8)
/* Store bounding boxes with x, y and z coordinates in packs of 4 */
-#define NBNXN_PBB_SSE
-#endif
+# define NBNXN_PBB_SIMD4
+# endif
-/* The width of SSE/AVX128 with single precision for bounding boxes with GPU.
- * Here AVX-256 turns out to be slightly slower than AVX-128.
+/* The packed bounding box coordinate stride is always set to 4.
+ * With AVX we could use 8, but that turns out not to be faster.
*/
-#define STRIDE_PBB 4
-#define STRIDE_PBB_2LOG 2
+# define STRIDE_PBB 4
+# define STRIDE_PBB_2LOG 2
-#endif /* NBNXN_SEARCH_BB_SSE */
+#endif /* NBNXN_SEARCH_BB_SIMD4 */
#ifdef GMX_NBNXN_SIMD
#define X_IND_CJ_J8(cj) ((cj)*STRIDE_P8)
/* The j-cluster size is matched to the SIMD width */
-#if GMX_NBNXN_SIMD_BITWIDTH == 128
-#ifdef GMX_DOUBLE
+#if GMX_SIMD_REAL_WIDTH == 2
#define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J2(ci)
#define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J2(ci)
#define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J2(cj)
#else
-#define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J4(ci)
-#define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J4(ci)
-#define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J4(cj)
-#endif
-#else
-#if GMX_NBNXN_SIMD_BITWIDTH == 256
-#ifdef GMX_DOUBLE
+#if GMX_SIMD_REAL_WIDTH == 4
#define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J4(ci)
#define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J4(ci)
#define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J4(cj)
#else
+#if GMX_SIMD_REAL_WIDTH == 8
#define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J8(ci)
#define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J8(ci)
#define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J8(cj)
#define CI_TO_CJ_SIMD_2XNN(ci) CI_TO_CJ_J4(ci)
#define X_IND_CI_SIMD_2XNN(ci) X_IND_CI_J4(ci)
#define X_IND_CJ_SIMD_2XNN(cj) X_IND_CJ_J4(cj)
-#endif
#else
-#error "unsupported GMX_NBNXN_SIMD_WIDTH"
+#if GMX_SIMD_REAL_WIDTH == 16
+#define CI_TO_CJ_SIMD_2XNN(ci) CI_TO_CJ_J8(ci)
+#define X_IND_CI_SIMD_2XNN(ci) X_IND_CI_J8(ci)
+#define X_IND_CJ_SIMD_2XNN(cj) X_IND_CJ_J8(cj)
+#else
+#error "unsupported GMX_SIMD_REAL_WIDTH"
+#endif
+#endif
#endif
#endif
#endif /* GMX_NBNXN_SIMD */
-/* Interaction masks for 4xN atom interactions.
- * Bit i*CJ_SIZE + j tells if atom i and j interact.
- */
-/* All interaction mask is the same for all kernels */
-#define NBNXN_INT_MASK_ALL 0xffffffff
-/* 4x4 kernel diagonal mask */
-#define NBNXN_INT_MASK_DIAG 0x08ce
-/* 4x2 kernel diagonal masks */
-#define NBNXN_INT_MASK_DIAG_J2_0 0x0002
-#define NBNXN_INT_MASK_DIAG_J2_1 0x002F
-/* 4x8 kernel diagonal masks */
-#define NBNXN_INT_MASK_DIAG_J8_0 0xf0f8fcfe
-#define NBNXN_INT_MASK_DIAG_J8_1 0x0080c0e0
-
-
-#ifdef NBNXN_SEARCH_BB_SSE
+#ifdef NBNXN_SEARCH_BB_SIMD4
/* Store bounding boxes corners as quadruplets: xxxxyyyyzzzz */
#define NBNXN_BBXXXX
/* Size of bounding box corners quadruplet */
int cj_size = 0;
#ifdef GMX_NBNXN_SIMD
- nbnxn_simd_width = GMX_NBNXN_SIMD_BITWIDTH/(sizeof(real)*8);
+ nbnxn_simd_width = GMX_SIMD_REAL_WIDTH;
#endif
switch (nb_kernel_type)
}
}
+/* Initializes a single nbnxn_pairlist_t data structure */
+static void nbnxn_init_pairlist_fep(t_nblist *nl)
+{
+ nl->type = GMX_NBLIST_INTERACTION_FREE_ENERGY;
+ nl->igeometry = GMX_NBLIST_GEOMETRY_PARTICLE_PARTICLE;
+ /* The interaction functions are set in the free energy kernel fuction */
+ nl->ivdw = -1;
+ nl->ivdwmod = -1;
+ nl->ielec = -1;
+ nl->ielecmod = -1;
+
+ nl->maxnri = 0;
+ nl->maxnrj = 0;
+ nl->nri = 0;
+ nl->nrj = 0;
+ nl->iinr = NULL;
+ nl->gid = NULL;
+ nl->shift = NULL;
+ nl->jindex = NULL;
+ nl->jjnr = NULL;
+ nl->excl_fep = NULL;
+
+}
+
void nbnxn_init_search(nbnxn_search_t * nbs_ptr,
ivec *n_dd_cells,
gmx_domdec_zones_t *zones,
+ gmx_bool bFEP,
int nthread_max)
{
nbnxn_search_t nbs;
snew(nbs, 1);
*nbs_ptr = nbs;
+ nbs->bFEP = bFEP;
+
nbs->DomDec = (n_dd_cells != NULL);
clear_ivec(nbs->dd_dim);
nbs->work[t].cxy_na_nalloc = 0;
nbs->work[t].sort_work = NULL;
nbs->work[t].sort_work_nalloc = 0;
+
+ snew(nbs->work[t].nbl_fep, 1);
+ nbnxn_init_pairlist_fep(nbs->work[t].nbl_fep);
}
/* Initialize detailed nbsearch cycle counting */
{
rvec size;
+ if (n == 0)
+ {
+ /* To avoid zero density we use a minimum of 1 atom */
+ n = 1;
+ }
+
rvec_sub(corner1, corner0, size);
return n/(size[XX]*size[YY]*size[ZZ]);
nbnxn_grid_t *grid,
int dd_zone,
int n, rvec corner0, rvec corner1,
- real atom_density,
- int XFormat)
+ real atom_density)
{
rvec size;
int na_c;
if (n > grid->na_sc)
{
+ assert(atom_density > 0);
+
/* target cell length */
if (grid->bSimple)
{
if (nc_max > grid->nc_nalloc)
{
- int bb_nalloc;
-
grid->nc_nalloc = over_alloc_large(nc_max);
srenew(grid->nsubc, grid->nc_nalloc);
srenew(grid->bbcz, grid->nc_nalloc*NNBSBB_D);
-#ifdef NBNXN_PBB_SSE
- bb_nalloc = grid->nc_nalloc*GPU_NSUBCELL/STRIDE_PBB*NNBSBB_XXXX;
-#else
- bb_nalloc = grid->nc_nalloc*GPU_NSUBCELL*NNBSBB_B;
-#endif
+
sfree_aligned(grid->bb);
/* This snew also zeros the contents, this avoid possible
- * floating exceptions in SSE with the unused bb elements.
+ * floating exceptions in SIMD with the unused bb elements.
*/
- snew_aligned(grid->bb, bb_nalloc, 16);
+ if (grid->bSimple)
+ {
+ snew_aligned(grid->bb, grid->nc_nalloc, 16);
+ }
+ else
+ {
+#ifdef NBNXN_BBXXXX
+ int pbb_nalloc;
+
+ pbb_nalloc = grid->nc_nalloc*GPU_NSUBCELL/STRIDE_PBB*NNBSBB_XXXX;
+ snew_aligned(grid->pbb, pbb_nalloc, 16);
+#else
+ snew_aligned(grid->bb, grid->nc_nalloc*GPU_NSUBCELL, 16);
+#endif
+ }
if (grid->bSimple)
{
else
{
sfree_aligned(grid->bbj);
- snew_aligned(grid->bbj, bb_nalloc*grid->na_c/grid->na_cj, 16);
+ snew_aligned(grid->bbj, grid->nc_nalloc*grid->na_c/grid->na_cj, 16);
}
}
srenew(grid->flags, grid->nc_nalloc);
+ if (nbs->bFEP)
+ {
+ srenew(grid->fep, grid->nc_nalloc*grid->na_sc/grid->na_c);
+ }
}
copy_rvec(corner0, grid->c0);
* or easier, allocate at least n*SGSF elements.
*/
static void sort_atoms(int dim, gmx_bool Backwards,
+ int gmx_unused dd_zone,
int *a, int n, rvec *x,
real h0, real invh, int n_per_h,
int *sort)
#ifndef NDEBUG
/* As we can have rounding effect, we use > iso >= here */
- if (zi < 0 || zi > n_per_h*SORT_GRID_OVERSIZE)
+ if (zi < 0 || (dd_zone == 0 && zi > n_per_h*SORT_GRID_OVERSIZE))
{
gmx_fatal(FARGS, "(int)((x[%d][%c]=%f - %f)*%f) = %d, not in 0 - %d*%d\n",
a[i], 'x'+dim, x[a[i]][dim], h0, invh, zi,
}
#endif
+ /* In a non-local domain, particles communcated for bonded interactions
+ * can be far beyond the grid size, which is set by the non-bonded
+ * cut-off distance. We sort such particles into the last cell.
+ */
+ if (zi > n_per_h*SORT_GRID_OVERSIZE)
+ {
+ zi = n_per_h*SORT_GRID_OVERSIZE;
+ }
+
/* Ideally this particle should go in sort cell zi,
* but that might already be in use,
* in that case find the first empty cell higher up
#endif
/* Coordinate order x,y,z, bb order xyz0 */
-static void calc_bounding_box(int na, int stride, const real *x, float *bb)
+static void calc_bounding_box(int na, int stride, const real *x, nbnxn_bb_t *bb)
{
int i, j;
real xl, xh, yl, yh, zl, zh;
i += stride;
}
/* Note: possible double to float conversion here */
- bb[BBL_X] = R2F_D(xl);
- bb[BBL_Y] = R2F_D(yl);
- bb[BBL_Z] = R2F_D(zl);
- bb[BBU_X] = R2F_U(xh);
- bb[BBU_Y] = R2F_U(yh);
- bb[BBU_Z] = R2F_U(zh);
+ bb->lower[BB_X] = R2F_D(xl);
+ bb->lower[BB_Y] = R2F_D(yl);
+ bb->lower[BB_Z] = R2F_D(zl);
+ bb->upper[BB_X] = R2F_U(xh);
+ bb->upper[BB_Y] = R2F_U(yh);
+ bb->upper[BB_Z] = R2F_U(zh);
}
/* Packed coordinates, bb order xyz0 */
-static void calc_bounding_box_x_x4(int na, const real *x, float *bb)
+static void calc_bounding_box_x_x4(int na, const real *x, nbnxn_bb_t *bb)
{
int j;
real xl, xh, yl, yh, zl, zh;
zh = max(zh, x[j+ZZ*PACK_X4]);
}
/* Note: possible double to float conversion here */
- bb[BBL_X] = R2F_D(xl);
- bb[BBL_Y] = R2F_D(yl);
- bb[BBL_Z] = R2F_D(zl);
- bb[BBU_X] = R2F_U(xh);
- bb[BBU_Y] = R2F_U(yh);
- bb[BBU_Z] = R2F_U(zh);
+ bb->lower[BB_X] = R2F_D(xl);
+ bb->lower[BB_Y] = R2F_D(yl);
+ bb->lower[BB_Z] = R2F_D(zl);
+ bb->upper[BB_X] = R2F_U(xh);
+ bb->upper[BB_Y] = R2F_U(yh);
+ bb->upper[BB_Z] = R2F_U(zh);
}
/* Packed coordinates, bb order xyz0 */
-static void calc_bounding_box_x_x8(int na, const real *x, float *bb)
+static void calc_bounding_box_x_x8(int na, const real *x, nbnxn_bb_t *bb)
{
int j;
real xl, xh, yl, yh, zl, zh;
zh = max(zh, x[j+ZZ*PACK_X8]);
}
/* Note: possible double to float conversion here */
- bb[BBL_X] = R2F_D(xl);
- bb[BBL_Y] = R2F_D(yl);
- bb[BBL_Z] = R2F_D(zl);
- bb[BBU_X] = R2F_U(xh);
- bb[BBU_Y] = R2F_U(yh);
- bb[BBU_Z] = R2F_U(zh);
+ bb->lower[BB_X] = R2F_D(xl);
+ bb->lower[BB_Y] = R2F_D(yl);
+ bb->lower[BB_Z] = R2F_D(zl);
+ bb->upper[BB_X] = R2F_U(xh);
+ bb->upper[BB_Y] = R2F_U(yh);
+ bb->upper[BB_Z] = R2F_U(zh);
}
-#ifdef NBNXN_SEARCH_BB_SSE
-
/* Packed coordinates, bb order xyz0 */
static void calc_bounding_box_x_x4_halves(int na, const real *x,
- float *bb, float *bbj)
+ nbnxn_bb_t *bb, nbnxn_bb_t *bbj)
{
calc_bounding_box_x_x4(min(na, 2), x, bbj);
if (na > 2)
{
- calc_bounding_box_x_x4(min(na-2, 2), x+(PACK_X4>>1), bbj+NNBSBB_B);
+ calc_bounding_box_x_x4(min(na-2, 2), x+(PACK_X4>>1), bbj+1);
}
else
{
/* Set the "empty" bounding box to the same as the first one,
* so we don't need to treat special cases in the rest of the code.
*/
- _mm_store_ps(bbj+NNBSBB_B, _mm_load_ps(bbj));
- _mm_store_ps(bbj+NNBSBB_B+NNBSBB_C, _mm_load_ps(bbj+NNBSBB_C));
+#ifdef NBNXN_SEARCH_BB_SIMD4
+ gmx_simd4_store_f(&bbj[1].lower[0], gmx_simd4_load_f(&bbj[0].lower[0]));
+ gmx_simd4_store_f(&bbj[1].upper[0], gmx_simd4_load_f(&bbj[0].upper[0]));
+#else
+ bbj[1] = bbj[0];
+#endif
}
- _mm_store_ps(bb, _mm_min_ps(_mm_load_ps(bbj),
- _mm_load_ps(bbj+NNBSBB_B)));
- _mm_store_ps(bb+NNBSBB_C, _mm_max_ps(_mm_load_ps(bbj+NNBSBB_C),
- _mm_load_ps(bbj+NNBSBB_B+NNBSBB_C)));
+#ifdef NBNXN_SEARCH_BB_SIMD4
+ gmx_simd4_store_f(&bb->lower[0],
+ gmx_simd4_min_f(gmx_simd4_load_f(&bbj[0].lower[0]),
+ gmx_simd4_load_f(&bbj[1].lower[0])));
+ gmx_simd4_store_f(&bb->upper[0],
+ gmx_simd4_max_f(gmx_simd4_load_f(&bbj[0].upper[0]),
+ gmx_simd4_load_f(&bbj[1].upper[0])));
+#else
+ {
+ int i;
+
+ for (i = 0; i < NNBSBB_C; i++)
+ {
+ bb->lower[i] = min(bbj[0].lower[i], bbj[1].lower[i]);
+ bb->upper[i] = max(bbj[0].upper[i], bbj[1].upper[i]);
+ }
+ }
+#endif
}
+#ifdef NBNXN_SEARCH_BB_SIMD4
+
/* Coordinate order xyz, bb order xxxxyyyyzzzz */
static void calc_bounding_box_xxxx(int na, int stride, const real *x, float *bb)
{
bb[5*STRIDE_PBB] = R2F_U(zh);
}
-#endif /* NBNXN_SEARCH_BB_SSE */
+#endif /* NBNXN_SEARCH_BB_SIMD4 */
-#ifdef NBNXN_SEARCH_SSE_SINGLE
+#ifdef NBNXN_SEARCH_SIMD4_FLOAT_X_BB
/* Coordinate order xyz?, bb order xyz0 */
-static void calc_bounding_box_sse(int na, const float *x, float *bb)
+static void calc_bounding_box_simd4(int na, const float *x, nbnxn_bb_t *bb)
{
- __m128 bb_0_SSE, bb_1_SSE;
- __m128 x_SSE;
+ gmx_simd4_float_t bb_0_S, bb_1_S;
+ gmx_simd4_float_t x_S;
- int i;
+ int i;
- bb_0_SSE = _mm_load_ps(x);
- bb_1_SSE = bb_0_SSE;
+ bb_0_S = gmx_simd4_load_f(x);
+ bb_1_S = bb_0_S;
for (i = 1; i < na; i++)
{
- x_SSE = _mm_load_ps(x+i*NNBSBB_C);
- bb_0_SSE = _mm_min_ps(bb_0_SSE, x_SSE);
- bb_1_SSE = _mm_max_ps(bb_1_SSE, x_SSE);
+ x_S = gmx_simd4_load_f(x+i*NNBSBB_C);
+ bb_0_S = gmx_simd4_min_f(bb_0_S, x_S);
+ bb_1_S = gmx_simd4_max_f(bb_1_S, x_S);
}
- _mm_store_ps(bb, bb_0_SSE);
- _mm_store_ps(bb+4, bb_1_SSE);
+ gmx_simd4_store_f(&bb->lower[0], bb_0_S);
+ gmx_simd4_store_f(&bb->upper[0], bb_1_S);
}
/* Coordinate order xyz?, bb order xxxxyyyyzzzz */
-static void calc_bounding_box_xxxx_sse(int na, const float *x,
- float *bb_work,
- real *bb)
+static void calc_bounding_box_xxxx_simd4(int na, const float *x,
+ nbnxn_bb_t *bb_work_aligned,
+ real *bb)
{
- calc_bounding_box_sse(na, x, bb_work);
-
- bb[0*STRIDE_PBB] = bb_work[BBL_X];
- bb[1*STRIDE_PBB] = bb_work[BBL_Y];
- bb[2*STRIDE_PBB] = bb_work[BBL_Z];
- bb[3*STRIDE_PBB] = bb_work[BBU_X];
- bb[4*STRIDE_PBB] = bb_work[BBU_Y];
- bb[5*STRIDE_PBB] = bb_work[BBU_Z];
+ calc_bounding_box_simd4(na, x, bb_work_aligned);
+
+ bb[0*STRIDE_PBB] = bb_work_aligned->lower[BB_X];
+ bb[1*STRIDE_PBB] = bb_work_aligned->lower[BB_Y];
+ bb[2*STRIDE_PBB] = bb_work_aligned->lower[BB_Z];
+ bb[3*STRIDE_PBB] = bb_work_aligned->upper[BB_X];
+ bb[4*STRIDE_PBB] = bb_work_aligned->upper[BB_Y];
+ bb[5*STRIDE_PBB] = bb_work_aligned->upper[BB_Z];
}
-#endif /* NBNXN_SEARCH_SSE_SINGLE */
+#endif /* NBNXN_SEARCH_SIMD4_FLOAT_X_BB */
-#ifdef NBNXN_SEARCH_BB_SSE
/* Combines pairs of consecutive bounding boxes */
-static void combine_bounding_box_pairs(nbnxn_grid_t *grid, const float *bb)
+static void combine_bounding_box_pairs(nbnxn_grid_t *grid, const nbnxn_bb_t *bb)
{
int i, j, sc2, nc2, c2;
- __m128 min_SSE, max_SSE;
for (i = 0; i < grid->ncx*grid->ncy; i++)
{
nc2 = (grid->cxy_na[i]+3)>>(2+1);
for (c2 = sc2; c2 < sc2+nc2; c2++)
{
- min_SSE = _mm_min_ps(_mm_load_ps(bb+(c2*4+0)*NNBSBB_C),
- _mm_load_ps(bb+(c2*4+2)*NNBSBB_C));
- max_SSE = _mm_max_ps(_mm_load_ps(bb+(c2*4+1)*NNBSBB_C),
- _mm_load_ps(bb+(c2*4+3)*NNBSBB_C));
- _mm_store_ps(grid->bbj+(c2*2+0)*NNBSBB_C, min_SSE);
- _mm_store_ps(grid->bbj+(c2*2+1)*NNBSBB_C, max_SSE);
+#ifdef NBNXN_SEARCH_BB_SIMD4
+ gmx_simd4_float_t min_S, max_S;
+
+ min_S = gmx_simd4_min_f(gmx_simd4_load_f(&bb[c2*2+0].lower[0]),
+ gmx_simd4_load_f(&bb[c2*2+1].lower[0]));
+ max_S = gmx_simd4_max_f(gmx_simd4_load_f(&bb[c2*2+0].upper[0]),
+ gmx_simd4_load_f(&bb[c2*2+1].upper[0]));
+ gmx_simd4_store_f(&grid->bbj[c2].lower[0], min_S);
+ gmx_simd4_store_f(&grid->bbj[c2].upper[0], max_S);
+#else
+ for (j = 0; j < NNBSBB_C; j++)
+ {
+ grid->bbj[c2].lower[j] = min(bb[c2*2+0].lower[j],
+ bb[c2*2+1].lower[j]);
+ grid->bbj[c2].upper[j] = max(bb[c2*2+0].upper[j],
+ bb[c2*2+1].upper[j]);
+ }
+#endif
}
if (((grid->cxy_na[i]+3)>>2) & 1)
{
- /* Copy the last bb for odd bb count in this column */
+ /* The bb count in this column is odd: duplicate the last bb */
for (j = 0; j < NNBSBB_C; j++)
{
- grid->bbj[(c2*2+0)*NNBSBB_C+j] = bb[(c2*4+0)*NNBSBB_C+j];
- grid->bbj[(c2*2+1)*NNBSBB_C+j] = bb[(c2*4+1)*NNBSBB_C+j];
+ grid->bbj[c2].lower[j] = bb[c2*2].lower[j];
+ grid->bbj[c2].upper[j] = bb[c2*2].upper[j];
}
}
}
}
-#endif
-
/* Prints the average bb size, used for debug output */
static void print_bbsizes_simple(FILE *fp,
{
for (d = 0; d < DIM; d++)
{
- ba[d] += grid->bb[c*NNBSBB_B+NNBSBB_C+d] - grid->bb[c*NNBSBB_B+d];
+ ba[d] += grid->bb[c].upper[d] - grid->bb[c].lower[d];
}
}
dsvmul(1.0/grid->nc, ba, ba);
for (d = 0; d < DIM; d++)
{
ba[d] +=
- grid->bb[cs_w*NNBSBB_XXXX+(DIM+d)*STRIDE_PBB+i] -
- grid->bb[cs_w*NNBSBB_XXXX+ d *STRIDE_PBB+i];
+ grid->pbb[cs_w*NNBSBB_XXXX+(DIM+d)*STRIDE_PBB+i] -
+ grid->pbb[cs_w*NNBSBB_XXXX+ d *STRIDE_PBB+i];
}
}
}
cs = c*GPU_NSUBCELL + s;
for (d = 0; d < DIM; d++)
{
- ba[d] +=
- grid->bb[cs*NNBSBB_B+NNBSBB_C+d] -
- grid->bb[cs*NNBSBB_B +d];
+ ba[d] += grid->bb[cs].upper[d] - grid->bb[cs].lower[d];
}
}
#endif
/* Potentially sorts atoms on LJ coefficients !=0 and ==0.
* Also sets interaction flags.
*/
-void sort_on_lj(nbnxn_atomdata_t *nbat, int na_c,
+void sort_on_lj(int na_c,
int a0, int a1, const int *atinfo,
int *order,
int *flags)
int subc, s, a, n1, n2, a_lj_max, i, j;
int sort1[NBNXN_NA_SC_MAX/GPU_NSUBCELL];
int sort2[NBNXN_NA_SC_MAX/GPU_NSUBCELL];
- gmx_bool haveQ;
+ gmx_bool haveQ, bFEP;
*flags = 0;
}
}
- /* If we don't have atom with LJ, there's nothing to sort */
+ /* If we don't have atoms with LJ, there's nothing to sort */
if (n1 > 0)
{
*flags |= NBNXN_CI_DO_LJ(subc);
const int *atinfo,
rvec *x,
int sx, int sy, int sz,
- float *bb_work)
+ nbnxn_bb_t gmx_unused *bb_work_aligned)
{
- int na, a;
- size_t offset;
- float *bb_ptr;
+ int na, a;
+ size_t offset;
+ nbnxn_bb_t *bb_ptr;
+#ifdef NBNXN_BBXXXX
+ float *pbb_ptr;
+#endif
na = a1 - a0;
if (grid->bSimple)
{
- sort_on_lj(nbat, grid->na_c, a0, a1, atinfo, nbs->a,
+ sort_on_lj(grid->na_c, a0, a1, atinfo, nbs->a,
grid->flags+(a0>>grid->na_c_2log)-grid->cell0);
}
+ if (nbs->bFEP)
+ {
+ /* Set the fep flag for perturbed atoms in this (sub-)cell */
+ int c, at;
+
+ /* The grid-local cluster/(sub-)cell index */
+ c = (a0 >> grid->na_c_2log) - grid->cell0*(grid->bSimple ? 1 : GPU_NSUBCELL);
+ grid->fep[c] = 0;
+ for (at = a0; at < a1; at++)
+ {
+ if (nbs->a[at] >= 0 && GET_CGINFO_FEP(atinfo[nbs->a[at]]))
+ {
+ grid->fep[c] |= (1 << (at - a0));
+ }
+ }
+ }
+
/* Now we have sorted the atoms, set the cell indices */
for (a = a0; a < a1; a++)
{
if (nbat->XFormat == nbatX4)
{
/* Store the bounding boxes as xyz.xyz. */
- offset = ((a0 - grid->cell0*grid->na_sc)>>grid->na_c_2log)*NNBSBB_B;
+ offset = (a0 - grid->cell0*grid->na_sc) >> grid->na_c_2log;
bb_ptr = grid->bb + offset;
-#if defined GMX_DOUBLE && defined NBNXN_SEARCH_BB_SSE
+#if defined GMX_NBNXN_SIMD && GMX_SIMD_REAL_WIDTH == 2
if (2*grid->na_cj == grid->na_c)
{
calc_bounding_box_x_x4_halves(na, nbat->x+X4_IND_A(a0), bb_ptr,
else if (nbat->XFormat == nbatX8)
{
/* Store the bounding boxes as xyz.xyz. */
- offset = ((a0 - grid->cell0*grid->na_sc)>>grid->na_c_2log)*NNBSBB_B;
+ offset = (a0 - grid->cell0*grid->na_sc) >> grid->na_c_2log;
bb_ptr = grid->bb + offset;
calc_bounding_box_x_x8(na, nbat->x+X8_IND_A(a0), bb_ptr);
else if (!grid->bSimple)
{
/* Store the bounding boxes in a format convenient
- * for SSE calculations: xxxxyyyyzzzz...
+ * for SIMD4 calculations: xxxxyyyyzzzz...
*/
- bb_ptr =
- grid->bb +
+ pbb_ptr =
+ grid->pbb +
((a0-grid->cell0*grid->na_sc)>>(grid->na_c_2log+STRIDE_PBB_2LOG))*NNBSBB_XXXX +
(((a0-grid->cell0*grid->na_sc)>>grid->na_c_2log) & (STRIDE_PBB-1));
-#ifdef NBNXN_SEARCH_SSE_SINGLE
+#ifdef NBNXN_SEARCH_SIMD4_FLOAT_X_BB
if (nbat->XFormat == nbatXYZQ)
{
- calc_bounding_box_xxxx_sse(na, nbat->x+a0*nbat->xstride,
- bb_work, bb_ptr);
+ calc_bounding_box_xxxx_simd4(na, nbat->x+a0*nbat->xstride,
+ bb_work_aligned, pbb_ptr);
}
else
#endif
{
calc_bounding_box_xxxx(na, nbat->xstride, nbat->x+a0*nbat->xstride,
- bb_ptr);
+ pbb_ptr);
}
if (gmx_debug_at)
{
fprintf(debug, "%2d %2d %2d bb %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f\n",
sx, sy, sz,
- bb_ptr[0*STRIDE_PBB], bb_ptr[3*STRIDE_PBB],
- bb_ptr[1*STRIDE_PBB], bb_ptr[4*STRIDE_PBB],
- bb_ptr[2*STRIDE_PBB], bb_ptr[5*STRIDE_PBB]);
+ pbb_ptr[0*STRIDE_PBB], pbb_ptr[3*STRIDE_PBB],
+ pbb_ptr[1*STRIDE_PBB], pbb_ptr[4*STRIDE_PBB],
+ pbb_ptr[2*STRIDE_PBB], pbb_ptr[5*STRIDE_PBB]);
}
}
#endif
else
{
/* Store the bounding boxes as xyz.xyz. */
- bb_ptr = grid->bb+((a0-grid->cell0*grid->na_sc)>>grid->na_c_2log)*NNBSBB_B;
+ bb_ptr = grid->bb+((a0-grid->cell0*grid->na_sc)>>grid->na_c_2log);
calc_bounding_box(na, nbat->xstride, nbat->x+a0*nbat->xstride,
bb_ptr);
bbo = (a0 - grid->cell0*grid->na_sc)/grid->na_c;
fprintf(debug, "%2d %2d %2d bb %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f\n",
sx, sy, sz,
- (grid->bb+bbo*NNBSBB_B)[BBL_X],
- (grid->bb+bbo*NNBSBB_B)[BBU_X],
- (grid->bb+bbo*NNBSBB_B)[BBL_Y],
- (grid->bb+bbo*NNBSBB_B)[BBU_Y],
- (grid->bb+bbo*NNBSBB_B)[BBL_Z],
- (grid->bb+bbo*NNBSBB_B)[BBU_Z]);
+ grid->bb[bbo].lower[BB_X],
+ grid->bb[bbo].lower[BB_Y],
+ grid->bb[bbo].lower[BB_Z],
+ grid->bb[bbo].upper[BB_X],
+ grid->bb[bbo].upper[BB_Y],
+ grid->bb[bbo].upper[BB_Z]);
}
}
}
ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
/* Sort the atoms within each x,y column on z coordinate */
- sort_atoms(ZZ, FALSE,
+ sort_atoms(ZZ, FALSE, dd_zone,
nbs->a+ash, na, x,
grid->c0[ZZ],
1.0/nbs->box[ZZ][ZZ], ncz*grid->na_sc,
{
cfilled = c;
}
- grid->bbcz[c*NNBSBB_D ] = grid->bb[cfilled*NNBSBB_B+2];
- grid->bbcz[c*NNBSBB_D+1] = grid->bb[cfilled*NNBSBB_B+6];
+ grid->bbcz[c*NNBSBB_D ] = grid->bb[cfilled].lower[BB_Z];
+ grid->bbcz[c*NNBSBB_D+1] = grid->bb[cfilled].upper[BB_Z];
}
/* Set the unused atom indices to -1 */
int subdiv_x, sub_x, na_x, ash_x;
/* cppcheck-suppress unassignedVariable */
- float bb_work_array[NNBSBB_B+3], *bb_work_align;
+ nbnxn_bb_t bb_work_array[2], *bb_work_aligned;
- bb_work_align = (float *)(((size_t)(bb_work_array+3)) & (~((size_t)15)));
+ bb_work_aligned = (nbnxn_bb_t *)(((size_t)(bb_work_array+1)) & (~((size_t)15)));
if (debug)
{
ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
/* Sort the atoms within each x,y column on z coordinate */
- sort_atoms(ZZ, FALSE,
+ sort_atoms(ZZ, FALSE, dd_zone,
nbs->a+ash, na, x,
grid->c0[ZZ],
1.0/nbs->box[ZZ][ZZ], ncz*grid->na_sc,
#if GPU_NSUBCELL_Y > 1
/* Sort the atoms along y */
- sort_atoms(YY, (sub_z & 1),
+ sort_atoms(YY, (sub_z & 1), dd_zone,
nbs->a+ash_z, na_z, x,
grid->c0[YY]+cy*grid->sy,
grid->inv_sy, subdiv_z,
#if GPU_NSUBCELL_X > 1
/* Sort the atoms along x */
- sort_atoms(XX, ((cz*GPU_NSUBCELL_Y + sub_y) & 1),
+ sort_atoms(XX, ((cz*GPU_NSUBCELL_Y + sub_y) & 1), dd_zone,
nbs->a+ash_y, na_y, x,
grid->c0[XX]+cx*grid->sx,
grid->inv_sx, subdiv_y,
grid->na_c*(cx*GPU_NSUBCELL_X+sub_x) + (dd_zone >> 2),
grid->na_c*(cy*GPU_NSUBCELL_Y+sub_y) + (dd_zone & 3),
grid->na_c*sub_z,
- bb_work_align);
+ bb_work_aligned);
}
}
}
}
/* Sort the super-cell columns along z into the sub-cells. */
-#pragma omp parallel for num_threads(nbs->nthread_max) schedule(static)
- for (thread = 0; thread < nbs->nthread_max; thread++)
+#pragma omp parallel for num_threads(nthread) schedule(static)
+ for (thread = 0; thread < nthread; thread++)
{
if (grid->bSimple)
{
}
}
-#ifdef NBNXN_SEARCH_BB_SSE
if (grid->bSimple && nbat->XFormat == nbatX8)
{
combine_bounding_box_pairs(grid, grid->bb);
}
-#endif
if (!grid->bSimple)
{
nbs->ePBC = ePBC;
copy_mat(box, nbs->box);
- if (atom_density >= 0)
+ /* Avoid zero density */
+ if (atom_density > 0)
{
grid->atom_density = atom_density;
}
* for the local atoms (dd_zone=0).
*/
nbs->natoms_nonlocal = a1 - nmoved;
+
+ if (debug)
+ {
+ fprintf(debug, "natoms_local = %5d atom_density = %5.1f\n",
+ nbs->natoms_local, grid->atom_density);
+ }
}
else
{
nbs->natoms_nonlocal = max(nbs->natoms_nonlocal, a1);
}
+ /* We always use the home zone (grid[0]) for setting the cell size,
+ * since determining densities for non-local zones is difficult.
+ */
nc_max_grid = set_grid_size_xy(nbs, grid,
dd_zone, n-nmoved, corner0, corner1,
- nbs->grid[0].atom_density,
- nbat->XFormat);
+ nbs->grid[0].atom_density);
nc_max = grid->cell0 + nc_max_grid;
nbnxn_atomdata_t *nbat)
{
nbnxn_grid_t *grid;
- float *bbcz, *bb;
+ float *bbcz;
+ nbnxn_bb_t *bb;
int ncd, sc;
+ int nthreads gmx_unused;
grid = &nbs->grid[0];
{
grid->nc_nalloc_simple = over_alloc_large(grid->nc*ncd);
srenew(grid->bbcz_simple, grid->nc_nalloc_simple*NNBSBB_D);
- srenew(grid->bb_simple, grid->nc_nalloc_simple*NNBSBB_B);
+ srenew(grid->bb_simple, grid->nc_nalloc_simple);
srenew(grid->flags_simple, grid->nc_nalloc_simple);
if (nbat->XFormat)
{
bbcz = grid->bbcz_simple;
bb = grid->bb_simple;
-#pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntPairsearch)) schedule(static)
+ nthreads = gmx_omp_nthreads_get(emntPairsearch);
+#pragma omp parallel for num_threads(nthreads) schedule(static)
for (sc = 0; sc < grid->nc; sc++)
{
int c, tx, na;
case nbatX4:
/* PACK_X4==NBNXN_CPU_CLUSTER_I_SIZE, so this is simple */
calc_bounding_box_x_x4(na, nbat->x+tx*STRIDE_P4,
- bb+tx*NNBSBB_B);
+ bb+tx);
break;
case nbatX8:
/* PACK_X8>NBNXN_CPU_CLUSTER_I_SIZE, more complicated */
calc_bounding_box_x_x8(na, nbat->x+X8_IND_A(tx*NBNXN_CPU_CLUSTER_I_SIZE),
- bb+tx*NNBSBB_B);
+ bb+tx);
break;
default:
calc_bounding_box(na, nbat->xstride,
nbat->x+tx*NBNXN_CPU_CLUSTER_I_SIZE*nbat->xstride,
- bb+tx*NNBSBB_B);
+ bb+tx);
break;
}
- bbcz[tx*NNBSBB_D+0] = bb[tx*NNBSBB_B +ZZ];
- bbcz[tx*NNBSBB_D+1] = bb[tx*NNBSBB_B+NNBSBB_C+ZZ];
+ bbcz[tx*NNBSBB_D+0] = bb[tx].lower[BB_Z];
+ bbcz[tx*NNBSBB_D+1] = bb[tx].upper[BB_Z];
/* No interaction optimization yet here */
grid->flags_simple[tx] = NBNXN_CI_DO_LJ(0) | NBNXN_CI_DO_COUL(0);
}
}
-#ifdef NBNXN_SEARCH_BB_SSE
if (grid->bSimple && nbat->XFormat == nbatX8)
{
combine_bounding_box_pairs(grid, grid->bb_simple);
}
-#endif
}
void nbnxn_get_ncells(nbnxn_search_t nbs, int *ncx, int *ncy)
/* Reference code calculating the distance^2 between two bounding boxes */
static float box_dist2(float bx0, float bx1, float by0,
float by1, float bz0, float bz1,
- const float *bb)
+ const nbnxn_bb_t *bb)
{
float d2;
float dl, dh, dm, dm0;
d2 = 0;
- dl = bx0 - bb[BBU_X];
- dh = bb[BBL_X] - bx1;
+ dl = bx0 - bb->upper[BB_X];
+ dh = bb->lower[BB_X] - bx1;
dm = max(dl, dh);
dm0 = max(dm, 0);
d2 += dm0*dm0;
- dl = by0 - bb[BBU_Y];
- dh = bb[BBL_Y] - by1;
+ dl = by0 - bb->upper[BB_Y];
+ dh = bb->lower[BB_Y] - by1;
dm = max(dl, dh);
dm0 = max(dm, 0);
d2 += dm0*dm0;
- dl = bz0 - bb[BBU_Z];
- dh = bb[BBL_Z] - bz1;
+ dl = bz0 - bb->upper[BB_Z];
+ dh = bb->lower[BB_Z] - bz1;
dm = max(dl, dh);
dm0 = max(dm, 0);
d2 += dm0*dm0;
}
/* Plain C code calculating the distance^2 between two bounding boxes */
-static float subc_bb_dist2(int si, const float *bb_i_ci,
- int csj, const float *bb_j_all)
+static float subc_bb_dist2(int si, const nbnxn_bb_t *bb_i_ci,
+ int csj, const nbnxn_bb_t *bb_j_all)
{
- const float *bb_i, *bb_j;
- float d2;
- float dl, dh, dm, dm0;
+ const nbnxn_bb_t *bb_i, *bb_j;
+ float d2;
+ float dl, dh, dm, dm0;
- bb_i = bb_i_ci + si*NNBSBB_B;
- bb_j = bb_j_all + csj*NNBSBB_B;
+ bb_i = bb_i_ci + si;
+ bb_j = bb_j_all + csj;
d2 = 0;
- dl = bb_i[BBL_X] - bb_j[BBU_X];
- dh = bb_j[BBL_X] - bb_i[BBU_X];
+ dl = bb_i->lower[BB_X] - bb_j->upper[BB_X];
+ dh = bb_j->lower[BB_X] - bb_i->upper[BB_X];
dm = max(dl, dh);
dm0 = max(dm, 0);
d2 += dm0*dm0;
- dl = bb_i[BBL_Y] - bb_j[BBU_Y];
- dh = bb_j[BBL_Y] - bb_i[BBU_Y];
+ dl = bb_i->lower[BB_Y] - bb_j->upper[BB_Y];
+ dh = bb_j->lower[BB_Y] - bb_i->upper[BB_Y];
dm = max(dl, dh);
dm0 = max(dm, 0);
d2 += dm0*dm0;
- dl = bb_i[BBL_Z] - bb_j[BBU_Z];
- dh = bb_j[BBL_Z] - bb_i[BBU_Z];
+ dl = bb_i->lower[BB_Z] - bb_j->upper[BB_Z];
+ dh = bb_j->lower[BB_Z] - bb_i->upper[BB_Z];
dm = max(dl, dh);
dm0 = max(dm, 0);
d2 += dm0*dm0;
return d2;
}
-#ifdef NBNXN_SEARCH_BB_SSE
+#ifdef NBNXN_SEARCH_BB_SIMD4
-/* SSE code for bb distance for bb format xyz0 */
-static float subc_bb_dist2_sse(int na_c,
- int si, const float *bb_i_ci,
- int csj, const float *bb_j_all)
+/* 4-wide SIMD code for bb distance for bb format xyz0 */
+static float subc_bb_dist2_simd4(int si, const nbnxn_bb_t *bb_i_ci,
+ int csj, const nbnxn_bb_t *bb_j_all)
{
- const float *bb_i, *bb_j;
-
- __m128 bb_i_SSE0, bb_i_SSE1;
- __m128 bb_j_SSE0, bb_j_SSE1;
- __m128 dl_SSE;
- __m128 dh_SSE;
- __m128 dm_SSE;
- __m128 dm0_SSE;
- __m128 d2_SSE;
-#ifndef GMX_X86_SSE4_1
- float d2_array[7], *d2_align;
-
- d2_align = (float *)(((size_t)(d2_array+3)) & (~((size_t)15)));
-#else
- float d2;
-#endif
-
- bb_i = bb_i_ci + si*NNBSBB_B;
- bb_j = bb_j_all + csj*NNBSBB_B;
-
- bb_i_SSE0 = _mm_load_ps(bb_i);
- bb_i_SSE1 = _mm_load_ps(bb_i+NNBSBB_C);
- bb_j_SSE0 = _mm_load_ps(bb_j);
- bb_j_SSE1 = _mm_load_ps(bb_j+NNBSBB_C);
+ gmx_simd4_float_t bb_i_S0, bb_i_S1;
+ gmx_simd4_float_t bb_j_S0, bb_j_S1;
+ gmx_simd4_float_t dl_S;
+ gmx_simd4_float_t dh_S;
+ gmx_simd4_float_t dm_S;
+ gmx_simd4_float_t dm0_S;
- dl_SSE = _mm_sub_ps(bb_i_SSE0, bb_j_SSE1);
- dh_SSE = _mm_sub_ps(bb_j_SSE0, bb_i_SSE1);
+ bb_i_S0 = gmx_simd4_load_f(&bb_i_ci[si].lower[0]);
+ bb_i_S1 = gmx_simd4_load_f(&bb_i_ci[si].upper[0]);
+ bb_j_S0 = gmx_simd4_load_f(&bb_j_all[csj].lower[0]);
+ bb_j_S1 = gmx_simd4_load_f(&bb_j_all[csj].upper[0]);
- dm_SSE = _mm_max_ps(dl_SSE, dh_SSE);
- dm0_SSE = _mm_max_ps(dm_SSE, _mm_setzero_ps());
-#ifndef GMX_X86_SSE4_1
- d2_SSE = _mm_mul_ps(dm0_SSE, dm0_SSE);
-
- _mm_store_ps(d2_align, d2_SSE);
-
- return d2_align[0] + d2_align[1] + d2_align[2];
-#else
- /* SSE4.1 dot product of components 0,1,2 */
- d2_SSE = _mm_dp_ps(dm0_SSE, dm0_SSE, 0x71);
+ dl_S = gmx_simd4_sub_f(bb_i_S0, bb_j_S1);
+ dh_S = gmx_simd4_sub_f(bb_j_S0, bb_i_S1);
- _mm_store_ss(&d2, d2_SSE);
+ dm_S = gmx_simd4_max_f(dl_S, dh_S);
+ dm0_S = gmx_simd4_max_f(dm_S, gmx_simd4_setzero_f());
- return d2;
-#endif
+ return gmx_simd4_dotproduct3_f(dm0_S, dm0_S);
}
/* Calculate bb bounding distances of bb_i[si,...,si+3] and store them in d2 */
-#define SUBC_BB_DIST2_SSE_XXXX_INNER(si, bb_i, d2) \
+#define SUBC_BB_DIST2_SIMD4_XXXX_INNER(si, bb_i, d2) \
{ \
- int shi; \
+ int shi; \
\
- __m128 dx_0, dy_0, dz_0; \
- __m128 dx_1, dy_1, dz_1; \
+ gmx_simd4_float_t dx_0, dy_0, dz_0; \
+ gmx_simd4_float_t dx_1, dy_1, dz_1; \
\
- __m128 mx, my, mz; \
- __m128 m0x, m0y, m0z; \
+ gmx_simd4_float_t mx, my, mz; \
+ gmx_simd4_float_t m0x, m0y, m0z; \
\
- __m128 d2x, d2y, d2z; \
- __m128 d2s, d2t; \
+ gmx_simd4_float_t d2x, d2y, d2z; \
+ gmx_simd4_float_t d2s, d2t; \
\
shi = si*NNBSBB_D*DIM; \
\
- xi_l = _mm_load_ps(bb_i+shi+0*STRIDE_PBB); \
- yi_l = _mm_load_ps(bb_i+shi+1*STRIDE_PBB); \
- zi_l = _mm_load_ps(bb_i+shi+2*STRIDE_PBB); \
- xi_h = _mm_load_ps(bb_i+shi+3*STRIDE_PBB); \
- yi_h = _mm_load_ps(bb_i+shi+4*STRIDE_PBB); \
- zi_h = _mm_load_ps(bb_i+shi+5*STRIDE_PBB); \
+ xi_l = gmx_simd4_load_f(bb_i+shi+0*STRIDE_PBB); \
+ yi_l = gmx_simd4_load_f(bb_i+shi+1*STRIDE_PBB); \
+ zi_l = gmx_simd4_load_f(bb_i+shi+2*STRIDE_PBB); \
+ xi_h = gmx_simd4_load_f(bb_i+shi+3*STRIDE_PBB); \
+ yi_h = gmx_simd4_load_f(bb_i+shi+4*STRIDE_PBB); \
+ zi_h = gmx_simd4_load_f(bb_i+shi+5*STRIDE_PBB); \
\
- dx_0 = _mm_sub_ps(xi_l, xj_h); \
- dy_0 = _mm_sub_ps(yi_l, yj_h); \
- dz_0 = _mm_sub_ps(zi_l, zj_h); \
+ dx_0 = gmx_simd4_sub_f(xi_l, xj_h); \
+ dy_0 = gmx_simd4_sub_f(yi_l, yj_h); \
+ dz_0 = gmx_simd4_sub_f(zi_l, zj_h); \
\
- dx_1 = _mm_sub_ps(xj_l, xi_h); \
- dy_1 = _mm_sub_ps(yj_l, yi_h); \
- dz_1 = _mm_sub_ps(zj_l, zi_h); \
+ dx_1 = gmx_simd4_sub_f(xj_l, xi_h); \
+ dy_1 = gmx_simd4_sub_f(yj_l, yi_h); \
+ dz_1 = gmx_simd4_sub_f(zj_l, zi_h); \
\
- mx = _mm_max_ps(dx_0, dx_1); \
- my = _mm_max_ps(dy_0, dy_1); \
- mz = _mm_max_ps(dz_0, dz_1); \
+ mx = gmx_simd4_max_f(dx_0, dx_1); \
+ my = gmx_simd4_max_f(dy_0, dy_1); \
+ mz = gmx_simd4_max_f(dz_0, dz_1); \
\
- m0x = _mm_max_ps(mx, zero); \
- m0y = _mm_max_ps(my, zero); \
- m0z = _mm_max_ps(mz, zero); \
+ m0x = gmx_simd4_max_f(mx, zero); \
+ m0y = gmx_simd4_max_f(my, zero); \
+ m0z = gmx_simd4_max_f(mz, zero); \
\
- d2x = _mm_mul_ps(m0x, m0x); \
- d2y = _mm_mul_ps(m0y, m0y); \
- d2z = _mm_mul_ps(m0z, m0z); \
+ d2x = gmx_simd4_mul_f(m0x, m0x); \
+ d2y = gmx_simd4_mul_f(m0y, m0y); \
+ d2z = gmx_simd4_mul_f(m0z, m0z); \
\
- d2s = _mm_add_ps(d2x, d2y); \
- d2t = _mm_add_ps(d2s, d2z); \
+ d2s = gmx_simd4_add_f(d2x, d2y); \
+ d2t = gmx_simd4_add_f(d2s, d2z); \
\
- _mm_store_ps(d2+si, d2t); \
+ gmx_simd4_store_f(d2+si, d2t); \
}
-/* SSE code for nsi bb distances for bb format xxxxyyyyzzzz */
-static void subc_bb_dist2_sse_xxxx(const float *bb_j,
- int nsi, const float *bb_i,
- float *d2)
+/* 4-wide SIMD code for nsi bb distances for bb format xxxxyyyyzzzz */
+static void subc_bb_dist2_simd4_xxxx(const float *bb_j,
+ int nsi, const float *bb_i,
+ float *d2)
{
- __m128 xj_l, yj_l, zj_l;
- __m128 xj_h, yj_h, zj_h;
- __m128 xi_l, yi_l, zi_l;
- __m128 xi_h, yi_h, zi_h;
+ gmx_simd4_float_t xj_l, yj_l, zj_l;
+ gmx_simd4_float_t xj_h, yj_h, zj_h;
+ gmx_simd4_float_t xi_l, yi_l, zi_l;
+ gmx_simd4_float_t xi_h, yi_h, zi_h;
- __m128 zero;
+ gmx_simd4_float_t zero;
- zero = _mm_setzero_ps();
+ zero = gmx_simd4_setzero_f();
- xj_l = _mm_set1_ps(bb_j[0*STRIDE_PBB]);
- yj_l = _mm_set1_ps(bb_j[1*STRIDE_PBB]);
- zj_l = _mm_set1_ps(bb_j[2*STRIDE_PBB]);
- xj_h = _mm_set1_ps(bb_j[3*STRIDE_PBB]);
- yj_h = _mm_set1_ps(bb_j[4*STRIDE_PBB]);
- zj_h = _mm_set1_ps(bb_j[5*STRIDE_PBB]);
+ xj_l = gmx_simd4_set1_f(bb_j[0*STRIDE_PBB]);
+ yj_l = gmx_simd4_set1_f(bb_j[1*STRIDE_PBB]);
+ zj_l = gmx_simd4_set1_f(bb_j[2*STRIDE_PBB]);
+ xj_h = gmx_simd4_set1_f(bb_j[3*STRIDE_PBB]);
+ yj_h = gmx_simd4_set1_f(bb_j[4*STRIDE_PBB]);
+ zj_h = gmx_simd4_set1_f(bb_j[5*STRIDE_PBB]);
/* Here we "loop" over si (0,STRIDE_PBB) from 0 to nsi with step STRIDE_PBB.
* But as we know the number of iterations is 1 or 2, we unroll manually.
*/
- SUBC_BB_DIST2_SSE_XXXX_INNER(0, bb_i, d2);
+ SUBC_BB_DIST2_SIMD4_XXXX_INNER(0, bb_i, d2);
if (STRIDE_PBB < nsi)
{
- SUBC_BB_DIST2_SSE_XXXX_INNER(STRIDE_PBB, bb_i, d2);
+ SUBC_BB_DIST2_SIMD4_XXXX_INNER(STRIDE_PBB, bb_i, d2);
}
}
-#endif /* NBNXN_SEARCH_BB_SSE */
+#endif /* NBNXN_SEARCH_BB_SIMD4 */
/* Plain C function which determines if any atom pair between two cells
* is within distance sqrt(rl2).
return FALSE;
}
-/* SSE function which determines if any atom pair between two cells,
+#ifdef NBNXN_SEARCH_SIMD4_FLOAT_X_BB
+
+/* 4-wide SIMD function which determines if any atom pair between two cells,
* both with 8 atoms, is within distance sqrt(rl2).
+ * Using 8-wide AVX is not faster on Intel Sandy Bridge.
*/
-static gmx_bool subc_in_range_sse8(int na_c,
- int si, const real *x_i,
- int csj, int stride, const real *x_j,
- real rl2)
+static gmx_bool subc_in_range_simd4(int na_c,
+ int si, const real *x_i,
+ int csj, int stride, const real *x_j,
+ real rl2)
{
-#ifdef NBNXN_SEARCH_SSE_SINGLE
- __m128 ix_SSE0, iy_SSE0, iz_SSE0;
- __m128 ix_SSE1, iy_SSE1, iz_SSE1;
+ gmx_simd4_real_t ix_S0, iy_S0, iz_S0;
+ gmx_simd4_real_t ix_S1, iy_S1, iz_S1;
- __m128 rc2_SSE;
+ gmx_simd4_real_t rc2_S;
- int na_c_sse;
- int j0, j1;
+ int dim_stride;
+ int j0, j1;
- rc2_SSE = _mm_set1_ps(rl2);
+ rc2_S = gmx_simd4_set1_r(rl2);
- na_c_sse = NBNXN_GPU_CLUSTER_SIZE/STRIDE_PBB;
- ix_SSE0 = _mm_load_ps(x_i+(si*na_c_sse*DIM+0)*STRIDE_PBB);
- iy_SSE0 = _mm_load_ps(x_i+(si*na_c_sse*DIM+1)*STRIDE_PBB);
- iz_SSE0 = _mm_load_ps(x_i+(si*na_c_sse*DIM+2)*STRIDE_PBB);
- ix_SSE1 = _mm_load_ps(x_i+(si*na_c_sse*DIM+3)*STRIDE_PBB);
- iy_SSE1 = _mm_load_ps(x_i+(si*na_c_sse*DIM+4)*STRIDE_PBB);
- iz_SSE1 = _mm_load_ps(x_i+(si*na_c_sse*DIM+5)*STRIDE_PBB);
+ dim_stride = NBNXN_GPU_CLUSTER_SIZE/STRIDE_PBB*DIM;
+ ix_S0 = gmx_simd4_load_r(x_i+(si*dim_stride+0)*STRIDE_PBB);
+ iy_S0 = gmx_simd4_load_r(x_i+(si*dim_stride+1)*STRIDE_PBB);
+ iz_S0 = gmx_simd4_load_r(x_i+(si*dim_stride+2)*STRIDE_PBB);
+ ix_S1 = gmx_simd4_load_r(x_i+(si*dim_stride+3)*STRIDE_PBB);
+ iy_S1 = gmx_simd4_load_r(x_i+(si*dim_stride+4)*STRIDE_PBB);
+ iz_S1 = gmx_simd4_load_r(x_i+(si*dim_stride+5)*STRIDE_PBB);
/* We loop from the outer to the inner particles to maximize
* the chance that we find a pair in range quickly and return.
j1 = j0 + na_c - 1;
while (j0 < j1)
{
- __m128 jx0_SSE, jy0_SSE, jz0_SSE;
- __m128 jx1_SSE, jy1_SSE, jz1_SSE;
+ gmx_simd4_real_t jx0_S, jy0_S, jz0_S;
+ gmx_simd4_real_t jx1_S, jy1_S, jz1_S;
- __m128 dx_SSE0, dy_SSE0, dz_SSE0;
- __m128 dx_SSE1, dy_SSE1, dz_SSE1;
- __m128 dx_SSE2, dy_SSE2, dz_SSE2;
- __m128 dx_SSE3, dy_SSE3, dz_SSE3;
+ gmx_simd4_real_t dx_S0, dy_S0, dz_S0;
+ gmx_simd4_real_t dx_S1, dy_S1, dz_S1;
+ gmx_simd4_real_t dx_S2, dy_S2, dz_S2;
+ gmx_simd4_real_t dx_S3, dy_S3, dz_S3;
- __m128 rsq_SSE0;
- __m128 rsq_SSE1;
- __m128 rsq_SSE2;
- __m128 rsq_SSE3;
+ gmx_simd4_real_t rsq_S0;
+ gmx_simd4_real_t rsq_S1;
+ gmx_simd4_real_t rsq_S2;
+ gmx_simd4_real_t rsq_S3;
- __m128 wco_SSE0;
- __m128 wco_SSE1;
- __m128 wco_SSE2;
- __m128 wco_SSE3;
- __m128 wco_any_SSE01, wco_any_SSE23, wco_any_SSE;
+ gmx_simd4_bool_t wco_S0;
+ gmx_simd4_bool_t wco_S1;
+ gmx_simd4_bool_t wco_S2;
+ gmx_simd4_bool_t wco_S3;
+ gmx_simd4_bool_t wco_any_S01, wco_any_S23, wco_any_S;
- jx0_SSE = _mm_load1_ps(x_j+j0*stride+0);
- jy0_SSE = _mm_load1_ps(x_j+j0*stride+1);
- jz0_SSE = _mm_load1_ps(x_j+j0*stride+2);
+ jx0_S = gmx_simd4_set1_r(x_j[j0*stride+0]);
+ jy0_S = gmx_simd4_set1_r(x_j[j0*stride+1]);
+ jz0_S = gmx_simd4_set1_r(x_j[j0*stride+2]);
- jx1_SSE = _mm_load1_ps(x_j+j1*stride+0);
- jy1_SSE = _mm_load1_ps(x_j+j1*stride+1);
- jz1_SSE = _mm_load1_ps(x_j+j1*stride+2);
+ jx1_S = gmx_simd4_set1_r(x_j[j1*stride+0]);
+ jy1_S = gmx_simd4_set1_r(x_j[j1*stride+1]);
+ jz1_S = gmx_simd4_set1_r(x_j[j1*stride+2]);
/* Calculate distance */
- dx_SSE0 = _mm_sub_ps(ix_SSE0, jx0_SSE);
- dy_SSE0 = _mm_sub_ps(iy_SSE0, jy0_SSE);
- dz_SSE0 = _mm_sub_ps(iz_SSE0, jz0_SSE);
- dx_SSE1 = _mm_sub_ps(ix_SSE1, jx0_SSE);
- dy_SSE1 = _mm_sub_ps(iy_SSE1, jy0_SSE);
- dz_SSE1 = _mm_sub_ps(iz_SSE1, jz0_SSE);
- dx_SSE2 = _mm_sub_ps(ix_SSE0, jx1_SSE);
- dy_SSE2 = _mm_sub_ps(iy_SSE0, jy1_SSE);
- dz_SSE2 = _mm_sub_ps(iz_SSE0, jz1_SSE);
- dx_SSE3 = _mm_sub_ps(ix_SSE1, jx1_SSE);
- dy_SSE3 = _mm_sub_ps(iy_SSE1, jy1_SSE);
- dz_SSE3 = _mm_sub_ps(iz_SSE1, jz1_SSE);
+ dx_S0 = gmx_simd4_sub_r(ix_S0, jx0_S);
+ dy_S0 = gmx_simd4_sub_r(iy_S0, jy0_S);
+ dz_S0 = gmx_simd4_sub_r(iz_S0, jz0_S);
+ dx_S1 = gmx_simd4_sub_r(ix_S1, jx0_S);
+ dy_S1 = gmx_simd4_sub_r(iy_S1, jy0_S);
+ dz_S1 = gmx_simd4_sub_r(iz_S1, jz0_S);
+ dx_S2 = gmx_simd4_sub_r(ix_S0, jx1_S);
+ dy_S2 = gmx_simd4_sub_r(iy_S0, jy1_S);
+ dz_S2 = gmx_simd4_sub_r(iz_S0, jz1_S);
+ dx_S3 = gmx_simd4_sub_r(ix_S1, jx1_S);
+ dy_S3 = gmx_simd4_sub_r(iy_S1, jy1_S);
+ dz_S3 = gmx_simd4_sub_r(iz_S1, jz1_S);
/* rsq = dx*dx+dy*dy+dz*dz */
- rsq_SSE0 = gmx_mm_calc_rsq_ps(dx_SSE0, dy_SSE0, dz_SSE0);
- rsq_SSE1 = gmx_mm_calc_rsq_ps(dx_SSE1, dy_SSE1, dz_SSE1);
- rsq_SSE2 = gmx_mm_calc_rsq_ps(dx_SSE2, dy_SSE2, dz_SSE2);
- rsq_SSE3 = gmx_mm_calc_rsq_ps(dx_SSE3, dy_SSE3, dz_SSE3);
+ rsq_S0 = gmx_simd4_calc_rsq_r(dx_S0, dy_S0, dz_S0);
+ rsq_S1 = gmx_simd4_calc_rsq_r(dx_S1, dy_S1, dz_S1);
+ rsq_S2 = gmx_simd4_calc_rsq_r(dx_S2, dy_S2, dz_S2);
+ rsq_S3 = gmx_simd4_calc_rsq_r(dx_S3, dy_S3, dz_S3);
- wco_SSE0 = _mm_cmplt_ps(rsq_SSE0, rc2_SSE);
- wco_SSE1 = _mm_cmplt_ps(rsq_SSE1, rc2_SSE);
- wco_SSE2 = _mm_cmplt_ps(rsq_SSE2, rc2_SSE);
- wco_SSE3 = _mm_cmplt_ps(rsq_SSE3, rc2_SSE);
+ wco_S0 = gmx_simd4_cmplt_r(rsq_S0, rc2_S);
+ wco_S1 = gmx_simd4_cmplt_r(rsq_S1, rc2_S);
+ wco_S2 = gmx_simd4_cmplt_r(rsq_S2, rc2_S);
+ wco_S3 = gmx_simd4_cmplt_r(rsq_S3, rc2_S);
- wco_any_SSE01 = _mm_or_ps(wco_SSE0, wco_SSE1);
- wco_any_SSE23 = _mm_or_ps(wco_SSE2, wco_SSE3);
- wco_any_SSE = _mm_or_ps(wco_any_SSE01, wco_any_SSE23);
+ wco_any_S01 = gmx_simd4_or_b(wco_S0, wco_S1);
+ wco_any_S23 = gmx_simd4_or_b(wco_S2, wco_S3);
+ wco_any_S = gmx_simd4_or_b(wco_any_S01, wco_any_S23);
- if (_mm_movemask_ps(wco_any_SSE))
+ if (gmx_simd4_anytrue_b(wco_any_S))
{
return TRUE;
}
}
return FALSE;
-#else
- /* No SSE */
- gmx_incons("SSE function called without SSE support");
-
- return TRUE;
-#endif
}
+#endif
+
/* Returns the j sub-cell for index cj_ind */
static int nbl_cj(const nbnxn_pairlist_t *nbl, int cj_ind)
}
/* Returns the i-interaction mask of the j sub-cell for index cj_ind */
-static unsigned nbl_imask0(const nbnxn_pairlist_t *nbl, int cj_ind)
+static unsigned int nbl_imask0(const nbnxn_pairlist_t *nbl, int cj_ind)
{
return nbl->cj4[cj_ind >> NBNXN_GPU_JGROUP_SIZE_2LOG].imei[0].imask;
}
for (t = 0; t < WARP_SIZE; t++)
{
/* Turn all interaction bits on */
- excl->pair[t] = NBNXN_INT_MASK_ALL;
+ excl->pair[t] = NBNXN_INTERACTION_MASK_ALL;
}
}
}
snew(nbl->work, 1);
+ if (nbl->bSimple)
+ {
+ snew_aligned(nbl->work->bb_ci, 1, NBNXN_SEARCH_BB_MEM_ALIGN);
+ }
+ else
+ {
#ifdef NBNXN_BBXXXX
- snew_aligned(nbl->work->bb_ci, GPU_NSUBCELL/STRIDE_PBB*NNBSBB_XXXX, NBNXN_MEM_ALIGN);
+ snew_aligned(nbl->work->pbb_ci, GPU_NSUBCELL/STRIDE_PBB*NNBSBB_XXXX, NBNXN_SEARCH_BB_MEM_ALIGN);
#else
- snew_aligned(nbl->work->bb_ci, GPU_NSUBCELL*NNBSBB_B, NBNXN_MEM_ALIGN);
+ snew_aligned(nbl->work->bb_ci, GPU_NSUBCELL, NBNXN_SEARCH_BB_MEM_ALIGN);
#endif
- snew_aligned(nbl->work->x_ci, NBNXN_NA_SC_MAX*DIM, NBNXN_MEM_ALIGN);
+ }
+ snew_aligned(nbl->work->x_ci, NBNXN_NA_SC_MAX*DIM, NBNXN_SEARCH_BB_MEM_ALIGN);
#ifdef GMX_NBNXN_SIMD
snew_aligned(nbl->work->x_ci_simd_4xn, 1, NBNXN_MEM_ALIGN);
snew_aligned(nbl->work->x_ci_simd_2xnn, 1, NBNXN_MEM_ALIGN);
#endif
- snew_aligned(nbl->work->d2, GPU_NSUBCELL, NBNXN_MEM_ALIGN);
+ snew_aligned(nbl->work->d2, GPU_NSUBCELL, NBNXN_SEARCH_BB_MEM_ALIGN);
nbl->work->sort = NULL;
nbl->work->sort_nalloc = 0;
}
snew(nbl_list->nbl, nbl_list->nnbl);
+ snew(nbl_list->nbl_fep, nbl_list->nnbl);
/* Execute in order to avoid memory interleaving between threads */
#pragma omp parallel for num_threads(nbl_list->nnbl) schedule(static)
for (i = 0; i < nbl_list->nnbl; i++)
{
nbnxn_init_pairlist(nbl_list->nbl[i], nbl_list->bSimple, NULL, NULL);
}
+
+ snew(nbl_list->nbl_fep[i], 1);
+ nbnxn_init_pairlist_fep(nbl_list->nbl_fep[i]);
}
}
j = nbl->ci[i].cj_ind_start;
while (j < nbl->ci[i].cj_ind_end &&
- nbl->cj[j].excl != NBNXN_INT_MASK_ALL)
+ nbl->cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
{
npexcl++;
j++;
}
/* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp,
- * allocates extra memory, if necessary.
+ * generates a new element and allocates extra memory, if necessary.
*/
static void get_nbl_exclusions_1(nbnxn_pairlist_t *nbl, int cj4,
int warp, nbnxn_excl_t **excl)
}
/* Returns pointers to the exclusion mask for cj4-unit cj4 for both warps,
- * allocates extra memory, if necessary.
+ * generates a new element and allocates extra memory, if necessary.
*/
static void get_nbl_exclusions_2(nbnxn_pairlist_t *nbl, int cj4,
nbnxn_excl_t **excl_w0,
/* Returns a diagonal or off-diagonal interaction mask for plain C lists */
static unsigned int get_imask(gmx_bool rdiag, int ci, int cj)
{
- return (rdiag && ci == cj ? NBNXN_INT_MASK_DIAG : NBNXN_INT_MASK_ALL);
+ return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
}
-/* Returns a diagonal or off-diagonal interaction mask for SIMD128 lists */
-static unsigned int get_imask_simd128(gmx_bool rdiag, int ci, int cj)
+/* Returns a diagonal or off-diagonal interaction mask for cj-size=2 */
+static unsigned int get_imask_simd_j2(gmx_bool rdiag, int ci, int cj)
{
-#ifndef GMX_DOUBLE /* cj-size = 4 */
- return (rdiag && ci == cj ? NBNXN_INT_MASK_DIAG : NBNXN_INT_MASK_ALL);
-#else /* cj-size = 2 */
- return (rdiag && ci*2 == cj ? NBNXN_INT_MASK_DIAG_J2_0 :
- (rdiag && ci*2+1 == cj ? NBNXN_INT_MASK_DIAG_J2_1 :
- NBNXN_INT_MASK_ALL));
-#endif
+ return (rdiag && ci*2 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_0 :
+ (rdiag && ci*2+1 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_1 :
+ NBNXN_INTERACTION_MASK_ALL));
}
-/* Returns a diagonal or off-diagonal interaction mask for SIMD256 lists */
-static unsigned int get_imask_simd256(gmx_bool rdiag, int ci, int cj)
+/* Returns a diagonal or off-diagonal interaction mask for cj-size=4 */
+static unsigned int get_imask_simd_j4(gmx_bool rdiag, int ci, int cj)
{
-#ifndef GMX_DOUBLE /* cj-size = 8 */
- return (rdiag && ci == cj*2 ? NBNXN_INT_MASK_DIAG_J8_0 :
- (rdiag && ci == cj*2+1 ? NBNXN_INT_MASK_DIAG_J8_1 :
- NBNXN_INT_MASK_ALL));
-#else /* cj-size = 4 */
- return (rdiag && ci == cj ? NBNXN_INT_MASK_DIAG : NBNXN_INT_MASK_ALL);
-#endif
+ return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
+}
+
+/* Returns a diagonal or off-diagonal interaction mask for cj-size=8 */
+static unsigned int get_imask_simd_j8(gmx_bool rdiag, int ci, int cj)
+{
+ return (rdiag && ci == cj*2 ? NBNXN_INTERACTION_MASK_DIAG_J8_0 :
+ (rdiag && ci == cj*2+1 ? NBNXN_INTERACTION_MASK_DIAG_J8_1 :
+ NBNXN_INTERACTION_MASK_ALL));
}
#ifdef GMX_NBNXN_SIMD
-#if GMX_NBNXN_SIMD_BITWIDTH == 128
-#define get_imask_simd_4xn get_imask_simd128
-#else
-#if GMX_NBNXN_SIMD_BITWIDTH == 256
-#define get_imask_simd_4xn get_imask_simd256
-#define get_imask_simd_2xnn get_imask_simd128
-#else
-#error "unsupported GMX_NBNXN_SIMD_BITWIDTH"
+#if GMX_SIMD_REAL_WIDTH == 2
+#define get_imask_simd_4xn get_imask_simd_j2
+#endif
+#if GMX_SIMD_REAL_WIDTH == 4
+#define get_imask_simd_4xn get_imask_simd_j4
+#endif
+#if GMX_SIMD_REAL_WIDTH == 8
+#define get_imask_simd_4xn get_imask_simd_j8
+#define get_imask_simd_2xnn get_imask_simd_j4
#endif
+#if GMX_SIMD_REAL_WIDTH == 16
+#define get_imask_simd_2xnn get_imask_simd_j8
#endif
#endif
{
const nbnxn_list_work_t *work;
- const float *bb_ci;
+ const nbnxn_bb_t *bb_ci;
const real *x_ci;
gmx_bool InRange;
#include "nbnxn_search_simd_2xnn.h"
#endif
-/* Plain C or SSE code for making a pair list of super-cell sci vs scj.
+/* Plain C or SIMD4 code for making a pair list of super-cell sci vs scj.
* Checks bounding box distances and possibly atom pair distances.
*/
-static void make_cluster_list_supersub(const nbnxn_search_t nbs,
- const nbnxn_grid_t *gridi,
+static void make_cluster_list_supersub(const nbnxn_grid_t *gridi,
const nbnxn_grid_t *gridj,
nbnxn_pairlist_t *nbl,
int sci, int scj,
real rl2, float rbb2,
int *ndistc)
{
- int na_c;
- int npair;
- int cjo, ci1, ci, cj, cj_gl;
- int cj4_ind, cj_offset;
- unsigned imask;
- nbnxn_cj4_t *cj4;
- const float *bb_ci;
- const real *x_ci;
- float *d2l, d2;
- int w;
+ int na_c;
+ int npair;
+ int cjo, ci1, ci, cj, cj_gl;
+ int cj4_ind, cj_offset;
+ unsigned int imask;
+ nbnxn_cj4_t *cj4;
+#ifdef NBNXN_BBXXXX
+ const float *pbb_ci;
+#else
+ const nbnxn_bb_t *bb_ci;
+#endif
+ const real *x_ci;
+ float *d2l, d2;
+ int w;
#define PRUNE_LIST_CPU_ONE
#ifdef PRUNE_LIST_CPU_ONE
int ci_last = -1;
d2l = nbl->work->d2;
- bb_ci = nbl->work->bb_ci;
- x_ci = nbl->work->x_ci;
+#ifdef NBNXN_BBXXXX
+ pbb_ci = nbl->work->pbb_ci;
+#else
+ bb_ci = nbl->work->bb_ci;
+#endif
+ x_ci = nbl->work->x_ci;
na_c = gridj->na_c;
}
#ifdef NBNXN_BBXXXX
- /* Determine all ci1 bb distances in one call with SSE */
- subc_bb_dist2_sse_xxxx(gridj->bb+(cj>>STRIDE_PBB_2LOG)*NNBSBB_XXXX+(cj & (STRIDE_PBB-1)),
- ci1, bb_ci, d2l);
+ /* Determine all ci1 bb distances in one call with SIMD4 */
+ subc_bb_dist2_simd4_xxxx(gridj->pbb+(cj>>STRIDE_PBB_2LOG)*NNBSBB_XXXX+(cj & (STRIDE_PBB-1)),
+ ci1, pbb_ci, d2l);
*ndistc += na_c*2;
#endif
*ndistc += na_c*na_c;
if (d2 < rbb2 ||
(d2 < rl2 &&
-#ifdef NBNXN_PBB_SSE
- subc_in_range_sse8
+#ifdef NBNXN_PBB_SIMD4
+ subc_in_range_simd4
#else
subc_in_range_x
#endif
{
/* Avoid using function pointers here, as it's slower */
if (
-#ifdef NBNXN_PBB_SSE
- !subc_in_range_sse8
+#ifdef NBNXN_PBB_SIMD4
+ !subc_in_range_simd4
#else
!subc_in_range_x
#endif
ndirect++;
}
}
-#ifdef NBNXN_SEARCH_BB_SSE
+#ifdef NBNXN_SEARCH_BB_SIMD4
else
{
while (cj_ind_first + ndirect <= cj_ind_last &&
inner_e = ge - (se << na_cj_2log);
nbl->cj[found].excl &= ~(1U<<((inner_i<<na_cj_2log) + inner_e));
+/* The next code line is usually not needed. We do not want to version
+ * away the above line, because there is logic that relies on being
+ * able to detect easily whether any exclusions exist. */
+#if (defined GMX_SIMD_IBM_QPX)
+ nbl->cj[found].interaction_mask_indices[inner_i] &= ~(1U << inner_e);
+#endif
+ }
+ }
+ }
+ }
+ }
+}
+
+/* Add a new i-entry to the FEP list and copy the i-properties */
+static gmx_inline void fep_list_new_nri_copy(t_nblist *nlist)
+{
+ /* Add a new i-entry */
+ nlist->nri++;
+
+ assert(nlist->nri < nlist->maxnri);
+
+ /* Duplicate the last i-entry, except for jindex, which continues */
+ nlist->iinr[nlist->nri] = nlist->iinr[nlist->nri-1];
+ nlist->shift[nlist->nri] = nlist->shift[nlist->nri-1];
+ nlist->gid[nlist->nri] = nlist->gid[nlist->nri-1];
+ nlist->jindex[nlist->nri] = nlist->nrj;
+}
+
+/* For load balancing of the free-energy lists over threads, we set
+ * the maximum nrj size of an i-entry to 40. This leads to good
+ * load balancing in the worst case scenario of a single perturbed
+ * particle on 16 threads, while not introducing significant overhead.
+ * Note that half of the perturbed pairs will anyhow end up in very small lists,
+ * since non perturbed i-particles will see few perturbed j-particles).
+ */
+const int max_nrj_fep = 40;
+
+/* Exclude the perturbed pairs from the Verlet list. This is only done to avoid
+ * singularities for overlapping particles (0/0), since the charges and
+ * LJ parameters have been zeroed in the nbnxn data structure.
+ * Simultaneously make a group pair list for the perturbed pairs.
+ */
+static void make_fep_list(const nbnxn_search_t nbs,
+ const nbnxn_atomdata_t *nbat,
+ nbnxn_pairlist_t *nbl,
+ gmx_bool bDiagRemoved,
+ nbnxn_ci_t *nbl_ci,
+ const nbnxn_grid_t *gridi,
+ const nbnxn_grid_t *gridj,
+ t_nblist *nlist)
+{
+ int ci, cj_ind_start, cj_ind_end, cj_ind, cja, cjr;
+ int nri_max;
+ int ngid, gid_i = 0, gid_j, gid;
+ int egp_shift, egp_mask;
+ int gid_cj = 0;
+ int i, j, ind_i, ind_j, ai, aj;
+ int nri;
+ gmx_bool bFEP_i, bFEP_i_all;
+
+ if (nbl_ci->cj_ind_end == nbl_ci->cj_ind_start)
+ {
+ /* Empty list */
+ return;
+ }
+
+ ci = nbl_ci->ci;
+
+ cj_ind_start = nbl_ci->cj_ind_start;
+ cj_ind_end = nbl_ci->cj_ind_end;
+
+ /* In worst case we have alternating energy groups
+ * and create #atom-pair lists, which means we need the size
+ * of a cluster pair (na_ci*na_cj) times the number of cj's.
+ */
+ nri_max = nbl->na_ci*nbl->na_cj*(cj_ind_end - cj_ind_start);
+ if (nlist->nri + nri_max > nlist->maxnri)
+ {
+ nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
+ reallocate_nblist(nlist);
+ }
+
+ ngid = nbat->nenergrp;
+
+ if (ngid*gridj->na_cj > sizeof(gid_cj)*8)
+ {
+ gmx_fatal(FARGS, "The Verlet scheme with %dx%d kernels and free-energy only supports up to %d energy groups",
+ gridi->na_c, gridj->na_cj, (sizeof(gid_cj)*8)/gridj->na_cj);
+ }
+
+ egp_shift = nbat->neg_2log;
+ egp_mask = (1<<nbat->neg_2log) - 1;
+
+ /* Loop over the atoms in the i sub-cell */
+ bFEP_i_all = TRUE;
+ for (i = 0; i < nbl->na_ci; i++)
+ {
+ ind_i = ci*nbl->na_ci + i;
+ ai = nbs->a[ind_i];
+ if (ai >= 0)
+ {
+ nri = nlist->nri;
+ nlist->jindex[nri+1] = nlist->jindex[nri];
+ nlist->iinr[nri] = ai;
+ /* The actual energy group pair index is set later */
+ nlist->gid[nri] = 0;
+ nlist->shift[nri] = nbl_ci->shift & NBNXN_CI_SHIFT;
+
+ bFEP_i = gridi->fep[ci - gridi->cell0] & (1 << i);
+
+ bFEP_i_all = bFEP_i_all && bFEP_i;
+
+ if ((nlist->nrj + cj_ind_end - cj_ind_start)*nbl->na_cj > nlist->maxnrj)
+ {
+ nlist->maxnrj = over_alloc_small((nlist->nrj + cj_ind_end - cj_ind_start)*nbl->na_cj);
+ srenew(nlist->jjnr, nlist->maxnrj);
+ srenew(nlist->excl_fep, nlist->maxnrj);
+ }
+
+ if (ngid > 1)
+ {
+ gid_i = (nbat->energrp[ci] >> (egp_shift*i)) & egp_mask;
+ }
+
+ for (cj_ind = cj_ind_start; cj_ind < cj_ind_end; cj_ind++)
+ {
+ unsigned int fep_cj;
+
+ cja = nbl->cj[cj_ind].cj;
+
+ if (gridj->na_cj == gridj->na_c)
+ {
+ cjr = cja - gridj->cell0;
+ fep_cj = gridj->fep[cjr];
+ if (ngid > 1)
+ {
+ gid_cj = nbat->energrp[cja];
+ }
+ }
+ else if (2*gridj->na_cj == gridj->na_c)
+ {
+ cjr = cja - gridj->cell0*2;
+ /* Extract half of the ci fep/energrp mask */
+ fep_cj = (gridj->fep[cjr>>1] >> ((cjr&1)*gridj->na_cj)) & ((1<<gridj->na_cj) - 1);
+ if (ngid > 1)
+ {
+ gid_cj = nbat->energrp[cja>>1] >> ((cja&1)*gridj->na_cj*egp_shift) & ((1<<(gridj->na_cj*egp_shift)) - 1);
+ }
+ }
+ else
+ {
+ cjr = cja - (gridj->cell0>>1);
+ /* Combine two ci fep masks/energrp */
+ fep_cj = gridj->fep[cjr*2] + (gridj->fep[cjr*2+1] << gridj->na_c);
+ if (ngid > 1)
+ {
+ gid_cj = nbat->energrp[cja*2] + (nbat->energrp[cja*2+1] << (gridj->na_c*egp_shift));
+ }
+ }
+
+ if (bFEP_i || fep_cj != 0)
+ {
+ for (j = 0; j < nbl->na_cj; j++)
+ {
+ /* Is this interaction perturbed and not excluded? */
+ ind_j = cja*nbl->na_cj + j;
+ aj = nbs->a[ind_j];
+ if (aj >= 0 &&
+ (bFEP_i || (fep_cj & (1 << j))) &&
+ (!bDiagRemoved || ind_j >= ind_i))
+ {
+ if (ngid > 1)
+ {
+ gid_j = (gid_cj >> (j*egp_shift)) & egp_mask;
+ gid = GID(gid_i, gid_j, ngid);
+
+ if (nlist->nrj > nlist->jindex[nri] &&
+ nlist->gid[nri] != gid)
+ {
+ /* Energy group pair changed: new list */
+ fep_list_new_nri_copy(nlist);
+ nri = nlist->nri;
+ }
+ nlist->gid[nri] = gid;
+ }
+
+ if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
+ {
+ fep_list_new_nri_copy(nlist);
+ nri = nlist->nri;
+ }
+
+ /* Add it to the FEP list */
+ nlist->jjnr[nlist->nrj] = aj;
+ nlist->excl_fep[nlist->nrj] = (nbl->cj[cj_ind].excl >> (i*nbl->na_cj + j)) & 1;
+ nlist->nrj++;
+
+ /* Exclude it from the normal list.
+ * Note that the charge has been set to zero,
+ * but we need to avoid 0/0, as perturbed atoms
+ * can be on top of each other.
+ */
+ nbl->cj[cj_ind].excl &= ~(1U << (i*nbl->na_cj + j));
+ }
}
}
}
+
+ if (nlist->nrj > nlist->jindex[nri])
+ {
+ /* Actually add this new, non-empty, list */
+ nlist->nri++;
+ nlist->jindex[nlist->nri] = nlist->nrj;
+ }
+ }
+ }
+
+ if (bFEP_i_all)
+ {
+ /* All interactions are perturbed, we can skip this entry */
+ nbl_ci->cj_ind_end = cj_ind_start;
+ }
+}
+
+/* Return the index of atom a within a cluster */
+static gmx_inline int cj_mod_cj4(int cj)
+{
+ return cj & (NBNXN_GPU_JGROUP_SIZE - 1);
+}
+
+/* Convert a j-cluster to a cj4 group */
+static gmx_inline int cj_to_cj4(int cj)
+{
+ return cj >> NBNXN_GPU_JGROUP_SIZE_2LOG;
+}
+
+/* Return the index of an j-atom within a warp */
+static gmx_inline int a_mod_wj(int a)
+{
+ return a & (NBNXN_GPU_CLUSTER_SIZE/2 - 1);
+}
+
+/* As make_fep_list above, but for super/sub lists. */
+static void make_fep_list_supersub(const nbnxn_search_t nbs,
+ const nbnxn_atomdata_t *nbat,
+ nbnxn_pairlist_t *nbl,
+ gmx_bool bDiagRemoved,
+ const nbnxn_sci_t *nbl_sci,
+ real shx,
+ real shy,
+ real shz,
+ real rlist_fep2,
+ const nbnxn_grid_t *gridi,
+ const nbnxn_grid_t *gridj,
+ t_nblist *nlist)
+{
+ int sci, cj4_ind_start, cj4_ind_end, cj4_ind, gcj, cjr;
+ int nri_max;
+ int c, c_abs;
+ int i, j, ind_i, ind_j, ai, aj;
+ int nri;
+ gmx_bool bFEP_i;
+ real xi, yi, zi;
+ const nbnxn_cj4_t *cj4;
+
+ if (nbl_sci->cj4_ind_end == nbl_sci->cj4_ind_start)
+ {
+ /* Empty list */
+ return;
+ }
+
+ sci = nbl_sci->sci;
+
+ cj4_ind_start = nbl_sci->cj4_ind_start;
+ cj4_ind_end = nbl_sci->cj4_ind_end;
+
+ /* Here we process one super-cell, max #atoms na_sc, versus a list
+ * cj4 entries, each with max NBNXN_GPU_JGROUP_SIZE cj's, each
+ * of size na_cj atoms.
+ * On the GPU we don't support energy groups (yet).
+ * So for each of the na_sc i-atoms, we need max one FEP list
+ * for each max_nrj_fep j-atoms.
+ */
+ nri_max = nbl->na_sc*nbl->na_cj*(1 + ((cj4_ind_end - cj4_ind_start)*NBNXN_GPU_JGROUP_SIZE)/max_nrj_fep);
+ if (nlist->nri + nri_max > nlist->maxnri)
+ {
+ nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
+ reallocate_nblist(nlist);
+ }
+
+ /* Loop over the atoms in the i super-cluster */
+ for (c = 0; c < GPU_NSUBCELL; c++)
+ {
+ c_abs = sci*GPU_NSUBCELL + c;
+
+ for (i = 0; i < nbl->na_ci; i++)
+ {
+ ind_i = c_abs*nbl->na_ci + i;
+ ai = nbs->a[ind_i];
+ if (ai >= 0)
+ {
+ nri = nlist->nri;
+ nlist->jindex[nri+1] = nlist->jindex[nri];
+ nlist->iinr[nri] = ai;
+ /* With GPUs, energy groups are not supported */
+ nlist->gid[nri] = 0;
+ nlist->shift[nri] = nbl_sci->shift & NBNXN_CI_SHIFT;
+
+ bFEP_i = (gridi->fep[c_abs - gridi->cell0] & (1 << i));
+
+ xi = nbat->x[ind_i*nbat->xstride+XX] + shx;
+ yi = nbat->x[ind_i*nbat->xstride+YY] + shy;
+ zi = nbat->x[ind_i*nbat->xstride+ZZ] + shz;
+
+ if ((nlist->nrj + cj4_ind_end - cj4_ind_start)*NBNXN_GPU_JGROUP_SIZE*nbl->na_cj > nlist->maxnrj)
+ {
+ nlist->maxnrj = over_alloc_small((nlist->nrj + cj4_ind_end - cj4_ind_start)*NBNXN_GPU_JGROUP_SIZE*nbl->na_cj);
+ srenew(nlist->jjnr, nlist->maxnrj);
+ srenew(nlist->excl_fep, nlist->maxnrj);
+ }
+
+ for (cj4_ind = cj4_ind_start; cj4_ind < cj4_ind_end; cj4_ind++)
+ {
+ cj4 = &nbl->cj4[cj4_ind];
+
+ for (gcj = 0; gcj < NBNXN_GPU_JGROUP_SIZE; gcj++)
+ {
+ unsigned int fep_cj;
+
+ if ((cj4->imei[0].imask & (1U << (gcj*GPU_NSUBCELL + c))) == 0)
+ {
+ /* Skip this ci for this cj */
+ continue;
+ }
+
+ cjr = cj4->cj[gcj] - gridj->cell0*GPU_NSUBCELL;
+
+ fep_cj = gridj->fep[cjr];
+
+ if (bFEP_i || fep_cj != 0)
+ {
+ for (j = 0; j < nbl->na_cj; j++)
+ {
+ /* Is this interaction perturbed and not excluded? */
+ ind_j = (gridj->cell0*GPU_NSUBCELL + cjr)*nbl->na_cj + j;
+ aj = nbs->a[ind_j];
+ if (aj >= 0 &&
+ (bFEP_i || (fep_cj & (1 << j))) &&
+ (!bDiagRemoved || ind_j >= ind_i))
+ {
+ nbnxn_excl_t *excl;
+ int excl_pair;
+ unsigned int excl_bit;
+ real dx, dy, dz;
+
+ get_nbl_exclusions_1(nbl, cj4_ind, j>>2, &excl);
+
+ excl_pair = a_mod_wj(j)*nbl->na_ci + i;
+ excl_bit = (1U << (gcj*GPU_NSUBCELL + c));
+
+ dx = nbat->x[ind_j*nbat->xstride+XX] - xi;
+ dy = nbat->x[ind_j*nbat->xstride+YY] - yi;
+ dz = nbat->x[ind_j*nbat->xstride+ZZ] - zi;
+
+ /* The unpruned GPU list has more than 2/3
+ * of the atom pairs beyond rlist. Using
+ * this list will cause a lot of overhead
+ * in the CPU FEP kernels, especially
+ * relative to the fast GPU kernels.
+ * So we prune the FEP list here.
+ */
+ if (dx*dx + dy*dy + dz*dz < rlist_fep2)
+ {
+ if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
+ {
+ fep_list_new_nri_copy(nlist);
+ nri = nlist->nri;
+ }
+
+ /* Add it to the FEP list */
+ nlist->jjnr[nlist->nrj] = aj;
+ nlist->excl_fep[nlist->nrj] = (excl->pair[excl_pair] & excl_bit) ? 1 : 0;
+ nlist->nrj++;
+ }
+
+ /* Exclude it from the normal list.
+ * Note that the charge and LJ parameters have
+ * been set to zero, but we need to avoid 0/0,
+ * as perturbed atoms can be on top of each other.
+ */
+ excl->pair[excl_pair] &= ~excl_bit;
+ }
+ }
+
+ /* Note that we could mask out this pair in imask
+ * if all i- and/or all j-particles are perturbed.
+ * But since the perturbed pairs on the CPU will
+ * take an order of magnitude more time, the GPU
+ * will finish before the CPU and there is no gain.
+ */
+ }
+ }
+ }
+
+ if (nlist->nrj > nlist->jindex[nri])
+ {
+ /* Actually add this new, non-empty, list */
+ nlist->nri++;
+ nlist->jindex[nlist->nri] = nlist->nrj;
+ }
+ }
}
}
}
inner_i = i - si*na_c;
inner_e = ge - se*na_c;
-/* Macro for getting the index of atom a within a cluster */
-#define AMODCJ4(a) ((a) & (NBNXN_GPU_JGROUP_SIZE - 1))
-/* Macro for converting an atom number to a cluster number */
-#define A2CJ4(a) ((a) >> NBNXN_GPU_JGROUP_SIZE_2LOG)
-/* Macro for getting the index of an i-atom within a warp */
-#define AMODWI(a) ((a) & (NBNXN_GPU_CLUSTER_SIZE/2 - 1))
-
- if (nbl_imask0(nbl, found) & (1U << (AMODCJ4(found)*GPU_NSUBCELL + si)))
+ if (nbl_imask0(nbl, found) & (1U << (cj_mod_cj4(found)*GPU_NSUBCELL + si)))
{
w = (inner_e >> 2);
- get_nbl_exclusions_1(nbl, A2CJ4(found), w, &nbl_excl);
+ get_nbl_exclusions_1(nbl, cj_to_cj4(found), w, &nbl_excl);
- nbl_excl->pair[AMODWI(inner_e)*nbl->na_ci+inner_i] &=
- ~(1U << (AMODCJ4(found)*GPU_NSUBCELL + si));
+ nbl_excl->pair[a_mod_wj(inner_e)*nbl->na_ci+inner_i] &=
+ ~(1U << (cj_mod_cj4(found)*GPU_NSUBCELL + si));
}
-
-#undef AMODCJ4
-#undef A2CJ4
-#undef AMODWI
}
}
}
}
/* Make a new ci entry at index nbl->nci */
-static void new_ci_entry(nbnxn_pairlist_t *nbl, int ci, int shift, int flags,
- nbnxn_list_work_t *work)
+static void new_ci_entry(nbnxn_pairlist_t *nbl, int ci, int shift, int flags)
{
if (nbl->nci + 1 > nbl->ci_nalloc)
{
}
/* Make a new sci entry at index nbl->nsci */
-static void new_sci_entry(nbnxn_pairlist_t *nbl, int sci, int shift, int flags,
- nbnxn_list_work_t *work)
+static void new_sci_entry(nbnxn_pairlist_t *nbl, int sci, int shift)
{
if (nbl->nsci + 1 > nbl->sci_nalloc)
{
jnew = 0;
for (j = 0; j < ncj; j++)
{
- if (cj[j].excl != NBNXN_INT_MASK_ALL)
+ if (cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
{
work->cj[jnew++] = cj[j];
}
}
/* Check if there are exclusions at all or not just the first entry */
if (!((jnew == 0) ||
- (jnew == 1 && cj[0].excl != NBNXN_INT_MASK_ALL)))
+ (jnew == 1 && cj[0].excl != NBNXN_INTERACTION_MASK_ALL)))
{
for (j = 0; j < ncj; j++)
{
- if (cj[j].excl == NBNXN_INT_MASK_ALL)
+ if (cj[j].excl == NBNXN_INTERACTION_MASK_ALL)
{
work->cj[jnew++] = cj[j];
}
nbl->work->ncj_hlj = 0;
}
+/* Clears a group scheme pair list */
+static void clear_pairlist_fep(t_nblist *nl)
+{
+ nl->nri = 0;
+ nl->nrj = 0;
+ if (nl->jindex == NULL)
+ {
+ snew(nl->jindex, 1);
+ }
+ nl->jindex[0] = 0;
+}
+
/* Sets a simple list i-cell bounding box, including PBC shift */
-static void set_icell_bb_simple(const float *bb, int ci,
- real shx, real shy, real shz,
- float *bb_ci)
+static gmx_inline void set_icell_bb_simple(const nbnxn_bb_t *bb, int ci,
+ real shx, real shy, real shz,
+ nbnxn_bb_t *bb_ci)
{
- int ia;
-
- ia = ci*NNBSBB_B;
- bb_ci[BBL_X] = bb[ia+BBL_X] + shx;
- bb_ci[BBL_Y] = bb[ia+BBL_Y] + shy;
- bb_ci[BBL_Z] = bb[ia+BBL_Z] + shz;
- bb_ci[BBU_X] = bb[ia+BBU_X] + shx;
- bb_ci[BBU_Y] = bb[ia+BBU_Y] + shy;
- bb_ci[BBU_Z] = bb[ia+BBU_Z] + shz;
+ bb_ci->lower[BB_X] = bb[ci].lower[BB_X] + shx;
+ bb_ci->lower[BB_Y] = bb[ci].lower[BB_Y] + shy;
+ bb_ci->lower[BB_Z] = bb[ci].lower[BB_Z] + shz;
+ bb_ci->upper[BB_X] = bb[ci].upper[BB_X] + shx;
+ bb_ci->upper[BB_Y] = bb[ci].upper[BB_Y] + shy;
+ bb_ci->upper[BB_Z] = bb[ci].upper[BB_Z] + shz;
}
+#ifdef NBNXN_BBXXXX
/* Sets a super-cell and sub cell bounding boxes, including PBC shift */
-static void set_icell_bb_supersub(const float *bb, int ci,
- real shx, real shy, real shz,
- float *bb_ci)
+static void set_icell_bbxxxx_supersub(const float *bb, int ci,
+ real shx, real shy, real shz,
+ float *bb_ci)
{
int ia, m, i;
-#ifdef NBNXN_BBXXXX
ia = ci*(GPU_NSUBCELL>>STRIDE_PBB_2LOG)*NNBSBB_XXXX;
for (m = 0; m < (GPU_NSUBCELL>>STRIDE_PBB_2LOG)*NNBSBB_XXXX; m += NNBSBB_XXXX)
{
bb_ci[m+5*STRIDE_PBB+i] = bb[ia+m+5*STRIDE_PBB+i] + shz;
}
}
-#else
- ia = ci*GPU_NSUBCELL*NNBSBB_B;
- for (i = 0; i < GPU_NSUBCELL*NNBSBB_B; i += NNBSBB_B)
+}
+#endif
+
+/* Sets a super-cell and sub cell bounding boxes, including PBC shift */
+static void set_icell_bb_supersub(const nbnxn_bb_t *bb, int ci,
+ real shx, real shy, real shz,
+ nbnxn_bb_t *bb_ci)
+{
+ int i;
+
+ for (i = 0; i < GPU_NSUBCELL; i++)
{
- bb_ci[i+BBL_X] = bb[ia+i+BBL_X] + shx;
- bb_ci[i+BBL_Y] = bb[ia+i+BBL_Y] + shy;
- bb_ci[i+BBL_Z] = bb[ia+i+BBL_Z] + shz;
- bb_ci[i+BBU_X] = bb[ia+i+BBU_X] + shx;
- bb_ci[i+BBU_Y] = bb[ia+i+BBU_Y] + shy;
- bb_ci[i+BBU_Z] = bb[ia+i+BBU_Z] + shz;
+ set_icell_bb_simple(bb, ci*GPU_NSUBCELL+i,
+ shx, shy, shz,
+ &bb_ci[i]);
}
-#endif
}
/* Copies PBC shifted i-cell atom coordinates x,y,z to working array */
static void icell_set_x_simple(int ci,
real shx, real shy, real shz,
- int na_c,
+ int gmx_unused na_c,
int stride, const real *x,
nbnxn_list_work_t *work)
{
int stride, const real *x,
nbnxn_list_work_t *work)
{
- int ia, i;
+ int ia, i;
real *x_ci;
x_ci = work->x_ci;
}
}
-#ifdef NBNXN_SEARCH_BB_SSE
+#ifdef NBNXN_SEARCH_BB_SIMD4
/* Copies PBC shifted super-cell packed atom coordinates to working array */
-static void icell_set_x_supersub_sse8(int ci,
- real shx, real shy, real shz,
- int na_c,
- int stride, const real *x,
- nbnxn_list_work_t *work)
+static void icell_set_x_supersub_simd4(int ci,
+ real shx, real shy, real shz,
+ int na_c,
+ int stride, const real *x,
+ nbnxn_list_work_t *work)
{
- int si, io, ia, i, j;
+ int si, io, ia, i, j;
real *x_ci;
x_ci = work->x_ci;
}
#endif
-static real nbnxn_rlist_inc_nonloc_fac = 0.6;
+static real minimum_subgrid_size_xy(const nbnxn_grid_t *grid)
+{
+ if (grid->bSimple)
+ {
+ return min(grid->sx, grid->sy);
+ }
+ else
+ {
+ return min(grid->sx/GPU_NSUBCELL_X, grid->sy/GPU_NSUBCELL_Y);
+ }
+}
+
+static real effective_buffer_1x1_vs_MxN(const nbnxn_grid_t *gridi,
+ const nbnxn_grid_t *gridj)
+{
+ const real eff_1x1_buffer_fac_overest = 0.1;
+
+ /* Determine an atom-pair list cut-off buffer size for atom pairs,
+ * to be added to rlist (including buffer) used for MxN.
+ * This is for converting an MxN list to a 1x1 list. This means we can't
+ * use the normal buffer estimate, as we have an MxN list in which
+ * some atom pairs beyond rlist are missing. We want to capture
+ * the beneficial effect of buffering by extra pairs just outside rlist,
+ * while removing the useless pairs that are further away from rlist.
+ * (Also the buffer could have been set manually not using the estimate.)
+ * This buffer size is an overestimate.
+ * We add 10% of the smallest grid sub-cell dimensions.
+ * Note that the z-size differs per cell and we don't use this,
+ * so we overestimate.
+ * With PME, the 10% value gives a buffer that is somewhat larger
+ * than the effective buffer with a tolerance of 0.005 kJ/mol/ps.
+ * Smaller tolerances or using RF lead to a smaller effective buffer,
+ * so 10% gives a safe overestimate.
+ */
+ return eff_1x1_buffer_fac_overest*(minimum_subgrid_size_xy(gridi) +
+ minimum_subgrid_size_xy(gridj));
+}
+
+/* Clusters at the cut-off only increase rlist by 60% of their size */
+static real nbnxn_rlist_inc_outside_fac = 0.6;
/* Due to the cluster size the effective pair-list is longer than
* that of a simple atom pair-list. This function gives the extra distance.
*/
-real nbnxn_get_rlist_effective_inc(int cluster_size, real atom_density)
+real nbnxn_get_rlist_effective_inc(int cluster_size_j, real atom_density)
{
- return ((0.5 + nbnxn_rlist_inc_nonloc_fac)*sqr(((cluster_size) - 1.0)/(cluster_size))*pow((cluster_size)/(atom_density), 1.0/3.0));
+ int cluster_size_i;
+ real vol_inc_i, vol_inc_j;
+
+ /* We should get this from the setup, but currently it's the same for
+ * all setups, including GPUs.
+ */
+ cluster_size_i = NBNXN_CPU_CLUSTER_I_SIZE;
+
+ vol_inc_i = (cluster_size_i - 1)/atom_density;
+ vol_inc_j = (cluster_size_j - 1)/atom_density;
+
+ return nbnxn_rlist_inc_outside_fac*pow(vol_inc_i + vol_inc_j, 1.0/3.0);
}
/* Estimates the interaction volume^2 for non-local interactions */
int min_ci_balanced)
{
const nbnxn_grid_t *grid;
- rvec ls;
- real xy_diag2, r_eff_sup, vol_est, nsp_est, nsp_est_nl;
- int nsubpair_max;
+ rvec ls;
+ real xy_diag2, r_eff_sup, vol_est, nsp_est, nsp_est_nl;
+ int nsubpair_max;
grid = &nbs->grid[0];
xy_diag2 = ls[XX]*ls[XX] + ls[YY]*ls[YY] + ls[ZZ]*ls[ZZ];
/* The formulas below are a heuristic estimate of the average nsj per si*/
- r_eff_sup = rlist + nbnxn_rlist_inc_nonloc_fac*sqr((grid->na_c - 1.0)/grid->na_c)*sqrt(xy_diag2/3);
+ r_eff_sup = rlist + nbnxn_rlist_inc_outside_fac*sqr((grid->na_c - 1.0)/grid->na_c)*sqrt(xy_diag2/3);
if (!nbs->DomDec || nbs->zones->n == 1)
{
{
int nsci, ncj4, nexcl;
int n, i;
+ int nthreads gmx_unused;
if (nblc->bSimple)
{
/* Each thread should copy its own data to the combined arrays,
* as otherwise data will go back and forth between different caches.
*/
-#pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntPairsearch)) schedule(static)
+ nthreads = gmx_omp_nthreads_get(emntPairsearch);
+#pragma omp parallel for num_threads(nthreads) schedule(static)
for (n = 0; n < nnbl; n++)
{
- int sci_offset;
- int cj4_offset;
- int ci_offset;
- int excl_offset;
- int i, j4;
+ int sci_offset;
+ int cj4_offset;
+ int ci_offset;
+ int excl_offset;
+ int i, j4;
const nbnxn_pairlist_t *nbli;
/* Determine the offset in the combined data for our thread */
}
}
+static void balance_fep_lists(const nbnxn_search_t nbs,
+ nbnxn_pairlist_set_t *nbl_lists)
+{
+ int nnbl, th;
+ int nri_tot, nrj_tot, nrj_target;
+ int th_dest;
+ t_nblist *nbld;
+
+ nnbl = nbl_lists->nnbl;
+
+ if (nnbl == 1)
+ {
+ /* Nothing to balance */
+ return;
+ }
+
+ /* Count the total i-lists and pairs */
+ nri_tot = 0;
+ nrj_tot = 0;
+ for (th = 0; th < nnbl; th++)
+ {
+ nri_tot += nbl_lists->nbl_fep[th]->nri;
+ nrj_tot += nbl_lists->nbl_fep[th]->nrj;
+ }
+
+ nrj_target = (nrj_tot + nnbl - 1)/nnbl;
+
+ assert(gmx_omp_nthreads_get(emntNonbonded) == nnbl);
+
+#pragma omp parallel for schedule(static) num_threads(nnbl)
+ for (th = 0; th < nnbl; th++)
+ {
+ t_nblist *nbl;
+
+ nbl = nbs->work[th].nbl_fep;
+
+ /* Note that here we allocate for the total size, instead of
+ * a per-thread esimate (which is hard to obtain).
+ */
+ if (nri_tot > nbl->maxnri)
+ {
+ nbl->maxnri = over_alloc_large(nri_tot);
+ reallocate_nblist(nbl);
+ }
+ if (nri_tot > nbl->maxnri || nrj_tot > nbl->maxnrj)
+ {
+ nbl->maxnrj = over_alloc_small(nrj_tot);
+ srenew(nbl->jjnr, nbl->maxnrj);
+ srenew(nbl->excl_fep, nbl->maxnrj);
+ }
+
+ clear_pairlist_fep(nbl);
+ }
+
+ /* Loop over the source lists and assign and copy i-entries */
+ th_dest = 0;
+ nbld = nbs->work[th_dest].nbl_fep;
+ for (th = 0; th < nnbl; th++)
+ {
+ t_nblist *nbls;
+ int i, j;
+
+ nbls = nbl_lists->nbl_fep[th];
+
+ for (i = 0; i < nbls->nri; i++)
+ {
+ int nrj;
+
+ /* The number of pairs in this i-entry */
+ nrj = nbls->jindex[i+1] - nbls->jindex[i];
+
+ /* Decide if list th_dest is too large and we should procede
+ * to the next destination list.
+ */
+ if (th_dest+1 < nnbl && nbld->nrj > 0 &&
+ nbld->nrj + nrj - nrj_target > nrj_target - nbld->nrj)
+ {
+ th_dest++;
+ nbld = nbs->work[th_dest].nbl_fep;
+ }
+
+ nbld->iinr[nbld->nri] = nbls->iinr[i];
+ nbld->gid[nbld->nri] = nbls->gid[i];
+ nbld->shift[nbld->nri] = nbls->shift[i];
+
+ for (j = nbls->jindex[i]; j < nbls->jindex[i+1]; j++)
+ {
+ nbld->jjnr[nbld->nrj] = nbls->jjnr[j];
+ nbld->excl_fep[nbld->nrj] = nbls->excl_fep[j];
+ nbld->nrj++;
+ }
+ nbld->nri++;
+ nbld->jindex[nbld->nri] = nbld->nrj;
+ }
+ }
+
+ /* Swap the list pointers */
+ for (th = 0; th < nnbl; th++)
+ {
+ t_nblist *nbl_tmp;
+
+ nbl_tmp = nbl_lists->nbl_fep[th];
+ nbl_lists->nbl_fep[th] = nbs->work[th].nbl_fep;
+ nbs->work[th].nbl_fep = nbl_tmp;
+
+ if (debug)
+ {
+ fprintf(debug, "nbl_fep[%d] nri %4d nrj %4d\n",
+ th,
+ nbl_lists->nbl_fep[th]->nri,
+ nbl_lists->nbl_fep[th]->nrj);
+ }
+ }
+}
+
/* Returns the next ci to be processes by our thread */
static gmx_bool next_ci(const nbnxn_grid_t *grid,
int conv,
const int ci_block_enum = 5;
const int ci_block_denom = 11;
const int ci_block_min_atoms = 16;
- int ci_block;
+ int ci_block;
/* Here we decide how to distribute the blocks over the threads.
* We use prime numbers to try to avoid that the grid size becomes
gmx_bool progBal,
int min_ci_balanced,
int th, int nth,
- nbnxn_pairlist_t *nbl)
+ nbnxn_pairlist_t *nbl,
+ t_nblist *nbl_fep)
{
- int na_cj_2log;
- matrix box;
- real rl2;
- float rbb2;
- int d;
- int ci_b, ci, ci_x, ci_y, ci_xy, cj;
- ivec shp;
- int tx, ty, tz;
- int shift;
- gmx_bool bMakeList;
- real shx, shy, shz;
- int conv_i, cell0_i;
- const float *bb_i, *bbcz_i, *bbcz_j;
- const int *flags_i;
- real bx0, bx1, by0, by1, bz0, bz1;
- real bz1_frac;
- real d2cx, d2z, d2z_cx, d2z_cy, d2zx, d2zxy, d2xy;
- int cxf, cxl, cyf, cyf_x, cyl;
- int cx, cy;
- int c0, c1, cs, cf, cl;
- int ndistc;
- int ncpcheck;
- int gridi_flag_shift = 0, gridj_flag_shift = 0;
- unsigned *gridj_flag = NULL;
- int ncj_old_i, ncj_old_j;
+ int na_cj_2log;
+ matrix box;
+ real rl2, rl_fep2 = 0;
+ float rbb2;
+ int d;
+ int ci_b, ci, ci_x, ci_y, ci_xy, cj;
+ ivec shp;
+ int tx, ty, tz;
+ int shift;
+ gmx_bool bMakeList;
+ real shx, shy, shz;
+ int conv_i, cell0_i;
+ const nbnxn_bb_t *bb_i = NULL;
+#ifdef NBNXN_BBXXXX
+ const float *pbb_i = NULL;
+#endif
+ const float *bbcz_i, *bbcz_j;
+ const int *flags_i;
+ real bx0, bx1, by0, by1, bz0, bz1;
+ real bz1_frac;
+ real d2cx, d2z, d2z_cx, d2z_cy, d2zx, d2zxy, d2xy;
+ int cxf, cxl, cyf, cyf_x, cyl;
+ int cx, cy;
+ int c0, c1, cs, cf, cl;
+ int ndistc;
+ int ncpcheck;
+ int gridi_flag_shift = 0, gridj_flag_shift = 0;
+ unsigned int *gridj_flag = NULL;
+ int ncj_old_i, ncj_old_j;
nbs_cycle_start(&work->cc[enbsCCsearch]);
rl2 = nbl->rlist*nbl->rlist;
+ if (nbs->bFEP && !nbl->bSimple)
+ {
+ /* Determine an atom-pair list cut-off distance for FEP atom pairs.
+ * We should not simply use rlist, since then we would not have
+ * the small, effective buffering of the NxN lists.
+ * The buffer is on overestimate, but the resulting cost for pairs
+ * beyond rlist is neglible compared to the FEP pairs within rlist.
+ */
+ rl_fep2 = nbl->rlist + effective_buffer_1x1_vs_MxN(gridi, gridj);
+
+ if (debug)
+ {
+ fprintf(debug, "nbl_fep atom-pair rlist %f\n", rl_fep2);
+ }
+ rl_fep2 = rl_fep2*rl_fep2;
+ }
+
rbb2 = boundingbox_only_distance2(gridi, gridj, nbl->rlist, nbl->bSimple);
if (debug)
else
{
conv_i = 1;
- bb_i = gridi->bb;
+#ifdef NBNXN_BBXXXX
+ if (gridi->bSimple)
+ {
+ bb_i = gridi->bb;
+ }
+ else
+ {
+ pbb_i = gridi->pbb;
+ }
+#else
+ /* We use the normal bounding box format for both grid types */
+ bb_i = gridi->bb;
+#endif
bbcz_i = gridi->bbcz;
flags_i = gridi->flags;
}
{
if (nbl->bSimple)
{
- bx1 = bb_i[ci*NNBSBB_B+NNBSBB_C+XX];
+ bx1 = bb_i[ci].upper[BB_X];
}
else
{
if (nbl->bSimple)
{
- by0 = bb_i[ci*NNBSBB_B +YY] + shy;
- by1 = bb_i[ci*NNBSBB_B+NNBSBB_C+YY] + shy;
+ by0 = bb_i[ci].lower[BB_Y] + shy;
+ by1 = bb_i[ci].upper[BB_Y] + shy;
}
else
{
if (nbl->bSimple)
{
- bx0 = bb_i[ci*NNBSBB_B +XX] + shx;
- bx1 = bb_i[ci*NNBSBB_B+NNBSBB_C+XX] + shx;
+ bx0 = bb_i[ci].lower[BB_X] + shx;
+ bx1 = bb_i[ci].upper[BB_X] + shx;
}
else
{
if (nbl->bSimple)
{
- new_ci_entry(nbl, cell0_i+ci, shift, flags_i[ci],
- nbl->work);
+ new_ci_entry(nbl, cell0_i+ci, shift, flags_i[ci]);
}
else
{
- new_sci_entry(nbl, cell0_i+ci, shift, flags_i[ci],
- nbl->work);
+ new_sci_entry(nbl, cell0_i+ci, shift);
}
#ifndef NBNXN_SHIFT_BACKWARD
}
else
{
+#ifdef NBNXN_BBXXXX
+ set_icell_bbxxxx_supersub(pbb_i, ci, shx, shy, shz,
+ nbl->work->pbb_ci);
+#else
set_icell_bb_supersub(bb_i, ci, shx, shy, shz,
nbl->work->bb_ci);
+#endif
}
nbs->icell_set_x(cell0_i+ci, shx, shy, shz,
cl = -1;
for (k = c0; k < c1; k++)
{
- if (box_dist2(bx0, bx1, by0, by1, bz0, bz1,
- bb+k*NNBSBB_B) < rl2 &&
+ if (box_dist2(bx0, bx1, by0, by1, bz0, bz1, bb+k) < rl2 &&
k < cf)
{
cf = k;
}
- if (box_dist2(bx0, bx1, by0, by1, bz0, bz1,
- bb+k*NNBSBB_B) < rl2 &&
+ if (box_dist2(bx0, bx1, by0, by1, bz0, bz1, bb+k) < rl2 &&
k > cl)
{
cl = k;
check_subcell_list_space_supersub(nbl, cl-cf+1);
for (cj = cf; cj <= cl; cj++)
{
- make_cluster_list_supersub(nbs, gridi, gridj,
+ make_cluster_list_supersub(gridi, gridj,
nbl, ci, cj,
(gridi == gridj && shift == CENTRAL && ci == cj),
nbat->xstride, nbat->x,
na_cj_2log,
&(nbl->ci[nbl->nci]),
excl);
+
+ if (nbs->bFEP)
+ {
+ make_fep_list(nbs, nbat, nbl,
+ shift == CENTRAL && gridi == gridj,
+ &(nbl->ci[nbl->nci]),
+ gridi, gridj, nbl_fep);
+ }
}
else
{
gridj->na_c_2log,
&(nbl->sci[nbl->nsci]),
excl);
+
+ if (nbs->bFEP)
+ {
+ make_fep_list_supersub(nbs, nbat, nbl,
+ shift == CENTRAL && gridi == gridj,
+ &(nbl->sci[nbl->nsci]),
+ shx, shy, shz,
+ rl_fep2,
+ gridi, gridj, nbl_fep);
+ }
}
/* Close this ci list */
print_nblist_statistics_supersub(debug, nbl, nbs, rlist);
}
+ if (nbs->bFEP)
+ {
+ fprintf(debug, "nbl FEP list pairs: %d\n", nbl_fep->nrj);
+ }
}
}
int nsrc,
const nbnxn_buffer_flags_t *dest)
{
- int s, b;
- const unsigned *flag;
+ int s, b;
+ const unsigned int *flag;
for (s = 0; s < nsrc; s++)
{
int nb_kernel_type,
t_nrnb *nrnb)
{
- nbnxn_grid_t *gridi, *gridj;
- gmx_bool bGPUCPU;
- int nzi, zi, zj0, zj1, zj;
- int nsubpair_max;
- int th;
- int nnbl;
+ nbnxn_grid_t *gridi, *gridj;
+ gmx_bool bGPUCPU;
+ int nzi, zi, zj0, zj1, zj;
+ int nsubpair_max;
+ int th;
+ int nnbl;
nbnxn_pairlist_t **nbl;
- int ci_block;
- gmx_bool CombineNBLists;
- gmx_bool progBal;
- int np_tot, np_noq, np_hlj, nap;
+ int ci_block;
+ gmx_bool CombineNBLists;
+ gmx_bool progBal;
+ int np_tot, np_noq, np_hlj, nap;
/* Check if we are running hybrid GPU + CPU nbnxn mode */
bGPUCPU = (!nbs->grid[0].bSimple && nbl_list->bSimple);
}
else
{
-#ifdef NBNXN_SEARCH_BB_SSE
- nbs->icell_set_x = icell_set_x_supersub_sse8;
+#ifdef NBNXN_SEARCH_BB_SIMD4
+ nbs->icell_set_x = icell_set_x_supersub_simd4;
#else
nbs->icell_set_x = icell_set_x_supersub;
#endif
for (th = 0; th < nnbl; th++)
{
clear_pairlist(nbl[th]);
+
+ if (nbs->bFEP)
+ {
+ clear_pairlist_fep(nbl_list->nbl_fep[th]);
+ }
}
for (zi = 0; zi < nzi; zi++)
ci_block = get_ci_block_size(gridi, nbs->DomDec, nnbl);
}
+ /* With GPU: generate progressively smaller lists for
+ * load balancing for local only or non-local with 2 zones.
+ */
+ progBal = (LOCAL_I(iloc) || nbs->zones->n <= 2);
+
#pragma omp parallel for num_threads(nnbl) schedule(static)
for (th = 0; th < nnbl; th++)
{
clear_pairlist(nbl[th]);
}
- /* With GPU: generate progressively smaller lists for
- * load balancing for local only or non-local with 2 zones.
- */
- progBal = (LOCAL_I(iloc) || nbs->zones->n <= 2);
-
/* Divide the i super cell equally over the nblists */
nbnxn_make_pairlist_part(nbs, gridi, gridj,
&nbs->work[th], nbat, excl,
nsubpair_max,
progBal, min_ci_balanced,
th, nnbl,
- nbl[th]);
+ nbl[th],
+ nbl_list->nbl_fep[th]);
}
nbs_cycle_stop(&nbs->cc[enbsCCsearch]);
reduce_buffer_flags(nbs, nnbl, &nbat->buffer_flags);
}
+ if (nbs->bFEP)
+ {
+ /* Balance the free-energy lists over all the threads */
+ balance_fep_lists(nbs, nbl_list);
+ }
+
/* Special performance logging stuff (env.var. GMX_NBNXN_CYCLE) */
if (LOCAL_I(iloc))
{