2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "nbnxn_search.h"
48 #include "gromacs/domdec/domdec_struct.h"
49 #include "gromacs/gmxlib/nrnb.h"
50 #include "gromacs/math/functions.h"
51 #include "gromacs/math/utilities.h"
52 #include "gromacs/math/vec.h"
53 #include "gromacs/mdlib/gmx_omp_nthreads.h"
54 #include "gromacs/mdlib/nb_verlet.h"
55 #include "gromacs/mdlib/nbnxn_atomdata.h"
56 #include "gromacs/mdlib/nbnxn_consts.h"
57 #include "gromacs/mdlib/nbnxn_grid.h"
58 #include "gromacs/mdlib/nbnxn_internal.h"
59 #include "gromacs/mdlib/nbnxn_simd.h"
60 #include "gromacs/mdlib/nbnxn_util.h"
61 #include "gromacs/mdlib/ns.h"
62 #include "gromacs/mdtypes/group.h"
63 #include "gromacs/mdtypes/md_enums.h"
64 #include "gromacs/pbcutil/ishift.h"
65 #include "gromacs/pbcutil/pbc.h"
66 #include "gromacs/simd/simd.h"
67 #include "gromacs/simd/vector_operations.h"
68 #include "gromacs/topology/block.h"
69 #include "gromacs/utility/exceptions.h"
70 #include "gromacs/utility/fatalerror.h"
71 #include "gromacs/utility/gmxomp.h"
72 #include "gromacs/utility/smalloc.h"
74 using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
77 /* We shift the i-particles backward for PBC.
78 * This leads to more conditionals than shifting forward.
79 * We do this to get more balanced pair lists.
81 constexpr bool c_pbcShiftBackward = true;
84 static void nbs_cycle_clear(nbnxn_cycle_t *cc)
86 for (int i = 0; i < enbsCCnr; i++)
93 static double Mcyc_av(const nbnxn_cycle_t *cc)
95 return static_cast<double>(cc->c)*1e-6/cc->count;
98 static void nbs_cycle_print(FILE *fp, const nbnxn_search *nbs)
101 fprintf(fp, "ns %4d grid %4.1f search %4.1f red.f %5.3f",
102 nbs->cc[enbsCCgrid].count,
103 Mcyc_av(&nbs->cc[enbsCCgrid]),
104 Mcyc_av(&nbs->cc[enbsCCsearch]),
105 Mcyc_av(&nbs->cc[enbsCCreducef]));
107 if (nbs->work.size() > 1)
109 if (nbs->cc[enbsCCcombine].count > 0)
111 fprintf(fp, " comb %5.2f",
112 Mcyc_av(&nbs->cc[enbsCCcombine]));
114 fprintf(fp, " s. th");
115 for (const nbnxn_search_work_t &work : nbs->work)
117 fprintf(fp, " %4.1f",
118 Mcyc_av(&work.cc[enbsCCsearch]));
124 /* Layout for the nonbonded NxN pair lists */
125 enum class NbnxnLayout
127 NoSimd4x4, // i-cluster size 4, j-cluster size 4
128 Simd4xN, // i-cluster size 4, j-cluster size SIMD width
129 Simd2xNN, // i-cluster size 4, j-cluster size half SIMD width
130 Gpu8x8x8 // i-cluster size 8, j-cluster size 8 + super-clustering
134 /* Returns the j-cluster size */
135 template <NbnxnLayout layout>
136 static constexpr int jClusterSize()
138 static_assert(layout == NbnxnLayout::NoSimd4x4 || layout == NbnxnLayout::Simd4xN || layout == NbnxnLayout::Simd2xNN, "Currently jClusterSize only supports CPU layouts");
140 return layout == NbnxnLayout::Simd4xN ? GMX_SIMD_REAL_WIDTH : (layout == NbnxnLayout::Simd2xNN ? GMX_SIMD_REAL_WIDTH/2 : NBNXN_CPU_CLUSTER_I_SIZE);
143 /* Returns the j-cluster index given the i-cluster index */
144 template <int jClusterSize>
145 static inline int cjFromCi(int ci)
147 static_assert(jClusterSize == NBNXN_CPU_CLUSTER_I_SIZE/2 || jClusterSize == NBNXN_CPU_CLUSTER_I_SIZE || jClusterSize == NBNXN_CPU_CLUSTER_I_SIZE*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
149 if (jClusterSize == NBNXN_CPU_CLUSTER_I_SIZE/2)
153 else if (jClusterSize == NBNXN_CPU_CLUSTER_I_SIZE)
163 /* Returns the j-cluster index given the i-cluster index */
164 template <NbnxnLayout layout>
165 static inline int cjFromCi(int ci)
167 constexpr int clusterSize = jClusterSize<layout>();
169 return cjFromCi<clusterSize>(ci);
172 /* Returns the nbnxn coordinate data index given the i-cluster index */
173 template <NbnxnLayout layout>
174 static inline int xIndexFromCi(int ci)
176 constexpr int clusterSize = jClusterSize<layout>();
178 static_assert(clusterSize == NBNXN_CPU_CLUSTER_I_SIZE/2 || clusterSize == NBNXN_CPU_CLUSTER_I_SIZE || clusterSize == NBNXN_CPU_CLUSTER_I_SIZE*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
180 if (clusterSize <= NBNXN_CPU_CLUSTER_I_SIZE)
182 /* Coordinates are stored packed in groups of 4 */
187 /* Coordinates packed in 8, i-cluster size is half the packing width */
188 return (ci >> 1)*STRIDE_P8 + (ci & 1)*(c_packX8 >> 1);
192 /* Returns the nbnxn coordinate data index given the j-cluster index */
193 template <NbnxnLayout layout>
194 static inline int xIndexFromCj(int cj)
196 constexpr int clusterSize = jClusterSize<layout>();
198 static_assert(clusterSize == NBNXN_CPU_CLUSTER_I_SIZE/2 || clusterSize == NBNXN_CPU_CLUSTER_I_SIZE || clusterSize == NBNXN_CPU_CLUSTER_I_SIZE*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
200 if (clusterSize == NBNXN_CPU_CLUSTER_I_SIZE/2)
202 /* Coordinates are stored packed in groups of 4 */
203 return (cj >> 1)*STRIDE_P4 + (cj & 1)*(c_packX4 >> 1);
205 else if (clusterSize == NBNXN_CPU_CLUSTER_I_SIZE)
207 /* Coordinates are stored packed in groups of 4 */
212 /* Coordinates are stored packed in groups of 8 */
218 gmx_bool nbnxn_kernel_pairlist_simple(int nb_kernel_type)
220 if (nb_kernel_type == nbnxnkNotSet)
222 gmx_fatal(FARGS, "Non-bonded kernel type not set for Verlet-style pair-list.");
225 switch (nb_kernel_type)
227 case nbnxnk8x8x8_GPU:
228 case nbnxnk8x8x8_PlainC:
231 case nbnxnk4x4_PlainC:
232 case nbnxnk4xN_SIMD_4xN:
233 case nbnxnk4xN_SIMD_2xNN:
237 gmx_incons("Invalid nonbonded kernel type passed!");
242 /* Initializes a single nbnxn_pairlist_t data structure */
243 static void nbnxn_init_pairlist_fep(t_nblist *nl)
245 nl->type = GMX_NBLIST_INTERACTION_FREE_ENERGY;
246 nl->igeometry = GMX_NBLIST_GEOMETRY_PARTICLE_PARTICLE;
247 /* The interaction functions are set in the free energy kernel fuction */
260 nl->jindex = nullptr;
262 nl->excl_fep = nullptr;
266 static void free_nblist(t_nblist *nl)
276 nbnxn_search_work_t::nbnxn_search_work_t() :
279 buffer_flags({0, nullptr, 0}),
281 nbl_fep(new t_nblist),
284 nbnxn_init_pairlist_fep(nbl_fep.get());
289 nbnxn_search_work_t::~nbnxn_search_work_t()
291 sfree(buffer_flags.flag);
293 free_nblist(nbl_fep.get());
296 nbnxn_search::nbnxn_search(const ivec *n_dd_cells,
297 const gmx_domdec_zones_t *zones,
301 ePBC(epbcNONE), // The correct value will be set during the gridding
308 // The correct value will be set during the gridding
312 DomDec = n_dd_cells != nullptr;
315 for (int d = 0; d < DIM; d++)
317 if ((*n_dd_cells)[d] > 1)
320 /* Each grid matches a DD zone */
326 grid.resize(numGrids);
328 /* Initialize detailed nbsearch cycle counting */
329 print_cycles = (getenv("GMX_NBNXN_CYCLE") != nullptr);
333 nbnxn_search *nbnxn_init_search(const ivec *n_dd_cells,
334 const gmx_domdec_zones_t *zones,
338 return new nbnxn_search(n_dd_cells, zones, bFEP, nthread_max);
341 static void init_buffer_flags(nbnxn_buffer_flags_t *flags,
344 flags->nflag = (natoms + NBNXN_BUFFERFLAG_SIZE - 1)/NBNXN_BUFFERFLAG_SIZE;
345 if (flags->nflag > flags->flag_nalloc)
347 flags->flag_nalloc = over_alloc_large(flags->nflag);
348 srenew(flags->flag, flags->flag_nalloc);
350 for (int b = 0; b < flags->nflag; b++)
352 bitmask_clear(&(flags->flag[b]));
356 /* Determines the cell range along one dimension that
357 * the bounding box b0 - b1 sees.
360 static void get_cell_range(real b0, real b1,
361 const nbnxn_grid_t &gridj,
362 real d2, real r2, int *cf, int *cl)
364 real distanceInCells = (b0 - gridj.c0[dim])*gridj.invCellSize[dim];
365 *cf = std::max(static_cast<int>(distanceInCells), 0);
368 d2 + gmx::square((b0 - gridj.c0[dim]) - (*cf - 1 + 1)*gridj.cellSize[dim]) < r2)
373 *cl = std::min(static_cast<int>((b1 - gridj.c0[dim])*gridj.invCellSize[dim]), gridj.numCells[dim] - 1);
374 while (*cl < gridj.numCells[dim] - 1 &&
375 d2 + gmx::square((*cl + 1)*gridj.cellSize[dim] - (b1 - gridj.c0[dim])) < r2)
381 /* Reference code calculating the distance^2 between two bounding boxes */
383 static float box_dist2(float bx0, float bx1, float by0,
384 float by1, float bz0, float bz1,
385 const nbnxn_bb_t *bb)
388 float dl, dh, dm, dm0;
392 dl = bx0 - bb->upper[BB_X];
393 dh = bb->lower[BB_X] - bx1;
394 dm = std::max(dl, dh);
395 dm0 = std::max(dm, 0.0f);
398 dl = by0 - bb->upper[BB_Y];
399 dh = bb->lower[BB_Y] - by1;
400 dm = std::max(dl, dh);
401 dm0 = std::max(dm, 0.0f);
404 dl = bz0 - bb->upper[BB_Z];
405 dh = bb->lower[BB_Z] - bz1;
406 dm = std::max(dl, dh);
407 dm0 = std::max(dm, 0.0f);
414 /* Plain C code calculating the distance^2 between two bounding boxes */
415 static float subc_bb_dist2(int si,
416 const nbnxn_bb_t *bb_i_ci,
418 gmx::ArrayRef<const nbnxn_bb_t> bb_j_all)
420 const nbnxn_bb_t *bb_i = bb_i_ci + si;
421 const nbnxn_bb_t *bb_j = bb_j_all.data() + csj;
424 float dl, dh, dm, dm0;
426 dl = bb_i->lower[BB_X] - bb_j->upper[BB_X];
427 dh = bb_j->lower[BB_X] - bb_i->upper[BB_X];
428 dm = std::max(dl, dh);
429 dm0 = std::max(dm, 0.0f);
432 dl = bb_i->lower[BB_Y] - bb_j->upper[BB_Y];
433 dh = bb_j->lower[BB_Y] - bb_i->upper[BB_Y];
434 dm = std::max(dl, dh);
435 dm0 = std::max(dm, 0.0f);
438 dl = bb_i->lower[BB_Z] - bb_j->upper[BB_Z];
439 dh = bb_j->lower[BB_Z] - bb_i->upper[BB_Z];
440 dm = std::max(dl, dh);
441 dm0 = std::max(dm, 0.0f);
447 #if NBNXN_SEARCH_BB_SIMD4
449 /* 4-wide SIMD code for bb distance for bb format xyz0 */
450 static float subc_bb_dist2_simd4(int si,
451 const nbnxn_bb_t *bb_i_ci,
453 gmx::ArrayRef<const nbnxn_bb_t> bb_j_all)
455 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
458 Simd4Float bb_i_S0, bb_i_S1;
459 Simd4Float bb_j_S0, bb_j_S1;
465 bb_i_S0 = load4(&bb_i_ci[si].lower[0]);
466 bb_i_S1 = load4(&bb_i_ci[si].upper[0]);
467 bb_j_S0 = load4(&bb_j_all[csj].lower[0]);
468 bb_j_S1 = load4(&bb_j_all[csj].upper[0]);
470 dl_S = bb_i_S0 - bb_j_S1;
471 dh_S = bb_j_S0 - bb_i_S1;
473 dm_S = max(dl_S, dh_S);
474 dm0_S = max(dm_S, simd4SetZeroF());
476 return dotProduct(dm0_S, dm0_S);
479 /* Calculate bb bounding distances of bb_i[si,...,si+3] and store them in d2 */
480 #define SUBC_BB_DIST2_SIMD4_XXXX_INNER(si, bb_i, d2) \
484 Simd4Float dx_0, dy_0, dz_0; \
485 Simd4Float dx_1, dy_1, dz_1; \
487 Simd4Float mx, my, mz; \
488 Simd4Float m0x, m0y, m0z; \
490 Simd4Float d2x, d2y, d2z; \
491 Simd4Float d2s, d2t; \
493 shi = (si)*NNBSBB_D*DIM; \
495 xi_l = load4((bb_i)+shi+0*STRIDE_PBB); \
496 yi_l = load4((bb_i)+shi+1*STRIDE_PBB); \
497 zi_l = load4((bb_i)+shi+2*STRIDE_PBB); \
498 xi_h = load4((bb_i)+shi+3*STRIDE_PBB); \
499 yi_h = load4((bb_i)+shi+4*STRIDE_PBB); \
500 zi_h = load4((bb_i)+shi+5*STRIDE_PBB); \
502 dx_0 = xi_l - xj_h; \
503 dy_0 = yi_l - yj_h; \
504 dz_0 = zi_l - zj_h; \
506 dx_1 = xj_l - xi_h; \
507 dy_1 = yj_l - yi_h; \
508 dz_1 = zj_l - zi_h; \
510 mx = max(dx_0, dx_1); \
511 my = max(dy_0, dy_1); \
512 mz = max(dz_0, dz_1); \
514 m0x = max(mx, zero); \
515 m0y = max(my, zero); \
516 m0z = max(mz, zero); \
525 store4((d2)+(si), d2t); \
528 /* 4-wide SIMD code for nsi bb distances for bb format xxxxyyyyzzzz */
529 static void subc_bb_dist2_simd4_xxxx(const float *bb_j,
530 int nsi, const float *bb_i,
533 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
536 Simd4Float xj_l, yj_l, zj_l;
537 Simd4Float xj_h, yj_h, zj_h;
538 Simd4Float xi_l, yi_l, zi_l;
539 Simd4Float xi_h, yi_h, zi_h;
545 xj_l = Simd4Float(bb_j[0*STRIDE_PBB]);
546 yj_l = Simd4Float(bb_j[1*STRIDE_PBB]);
547 zj_l = Simd4Float(bb_j[2*STRIDE_PBB]);
548 xj_h = Simd4Float(bb_j[3*STRIDE_PBB]);
549 yj_h = Simd4Float(bb_j[4*STRIDE_PBB]);
550 zj_h = Simd4Float(bb_j[5*STRIDE_PBB]);
552 /* Here we "loop" over si (0,STRIDE_PBB) from 0 to nsi with step STRIDE_PBB.
553 * But as we know the number of iterations is 1 or 2, we unroll manually.
555 SUBC_BB_DIST2_SIMD4_XXXX_INNER(0, bb_i, d2);
556 if (STRIDE_PBB < nsi)
558 SUBC_BB_DIST2_SIMD4_XXXX_INNER(STRIDE_PBB, bb_i, d2);
562 #endif /* NBNXN_SEARCH_BB_SIMD4 */
565 /* Returns if any atom pair from two clusters is within distance sqrt(rlist2) */
566 static inline gmx_bool
567 clusterpair_in_range(const nbnxn_list_work_t *work,
569 int csj, int stride, const real *x_j,
572 #if !GMX_SIMD4_HAVE_REAL
575 * All coordinates are stored as xyzxyz...
578 const real *x_i = work->x_ci;
580 for (int i = 0; i < c_nbnxnGpuClusterSize; i++)
582 int i0 = (si*c_nbnxnGpuClusterSize + i)*DIM;
583 for (int j = 0; j < c_nbnxnGpuClusterSize; j++)
585 int j0 = (csj*c_nbnxnGpuClusterSize + j)*stride;
587 real d2 = gmx::square(x_i[i0 ] - x_j[j0 ]) + gmx::square(x_i[i0+1] - x_j[j0+1]) + gmx::square(x_i[i0+2] - x_j[j0+2]);
598 #else /* !GMX_SIMD4_HAVE_REAL */
600 /* 4-wide SIMD version.
601 * The coordinates x_i are stored as xxxxyyyy..., x_j is stored xyzxyz...
602 * Using 8-wide AVX(2) is not faster on Intel Sandy Bridge and Haswell.
604 static_assert(c_nbnxnGpuClusterSize == 8 || c_nbnxnGpuClusterSize == 4,
605 "A cluster is hard-coded to 4/8 atoms.");
607 Simd4Real rc2_S = Simd4Real(rlist2);
609 const real *x_i = work->x_ci_simd;
611 int dim_stride = c_nbnxnGpuClusterSize*DIM;
612 Simd4Real ix_S0 = load4(x_i + si*dim_stride + 0*GMX_SIMD4_WIDTH);
613 Simd4Real iy_S0 = load4(x_i + si*dim_stride + 1*GMX_SIMD4_WIDTH);
614 Simd4Real iz_S0 = load4(x_i + si*dim_stride + 2*GMX_SIMD4_WIDTH);
616 Simd4Real ix_S1, iy_S1, iz_S1;
617 if (c_nbnxnGpuClusterSize == 8)
619 ix_S1 = load4(x_i + si*dim_stride + 3*GMX_SIMD4_WIDTH);
620 iy_S1 = load4(x_i + si*dim_stride + 4*GMX_SIMD4_WIDTH);
621 iz_S1 = load4(x_i + si*dim_stride + 5*GMX_SIMD4_WIDTH);
623 /* We loop from the outer to the inner particles to maximize
624 * the chance that we find a pair in range quickly and return.
626 int j0 = csj*c_nbnxnGpuClusterSize;
627 int j1 = j0 + c_nbnxnGpuClusterSize - 1;
630 Simd4Real jx0_S, jy0_S, jz0_S;
631 Simd4Real jx1_S, jy1_S, jz1_S;
633 Simd4Real dx_S0, dy_S0, dz_S0;
634 Simd4Real dx_S1, dy_S1, dz_S1;
635 Simd4Real dx_S2, dy_S2, dz_S2;
636 Simd4Real dx_S3, dy_S3, dz_S3;
647 Simd4Bool wco_any_S01, wco_any_S23, wco_any_S;
649 jx0_S = Simd4Real(x_j[j0*stride+0]);
650 jy0_S = Simd4Real(x_j[j0*stride+1]);
651 jz0_S = Simd4Real(x_j[j0*stride+2]);
653 jx1_S = Simd4Real(x_j[j1*stride+0]);
654 jy1_S = Simd4Real(x_j[j1*stride+1]);
655 jz1_S = Simd4Real(x_j[j1*stride+2]);
657 /* Calculate distance */
658 dx_S0 = ix_S0 - jx0_S;
659 dy_S0 = iy_S0 - jy0_S;
660 dz_S0 = iz_S0 - jz0_S;
661 dx_S2 = ix_S0 - jx1_S;
662 dy_S2 = iy_S0 - jy1_S;
663 dz_S2 = iz_S0 - jz1_S;
664 if (c_nbnxnGpuClusterSize == 8)
666 dx_S1 = ix_S1 - jx0_S;
667 dy_S1 = iy_S1 - jy0_S;
668 dz_S1 = iz_S1 - jz0_S;
669 dx_S3 = ix_S1 - jx1_S;
670 dy_S3 = iy_S1 - jy1_S;
671 dz_S3 = iz_S1 - jz1_S;
674 /* rsq = dx*dx+dy*dy+dz*dz */
675 rsq_S0 = norm2(dx_S0, dy_S0, dz_S0);
676 rsq_S2 = norm2(dx_S2, dy_S2, dz_S2);
677 if (c_nbnxnGpuClusterSize == 8)
679 rsq_S1 = norm2(dx_S1, dy_S1, dz_S1);
680 rsq_S3 = norm2(dx_S3, dy_S3, dz_S3);
683 wco_S0 = (rsq_S0 < rc2_S);
684 wco_S2 = (rsq_S2 < rc2_S);
685 if (c_nbnxnGpuClusterSize == 8)
687 wco_S1 = (rsq_S1 < rc2_S);
688 wco_S3 = (rsq_S3 < rc2_S);
690 if (c_nbnxnGpuClusterSize == 8)
692 wco_any_S01 = wco_S0 || wco_S1;
693 wco_any_S23 = wco_S2 || wco_S3;
694 wco_any_S = wco_any_S01 || wco_any_S23;
698 wco_any_S = wco_S0 || wco_S2;
701 if (anyTrue(wco_any_S))
712 #endif /* !GMX_SIMD4_HAVE_REAL */
715 /* Returns the j-cluster index for index cjIndex in a cj list */
716 static inline int nblCj(const nbnxn_cj_t *cjList, int cjIndex)
718 return cjList[cjIndex].cj;
721 /* Returns the j-cluster index for index cjIndex in a cj4 list */
722 static inline int nblCj(const nbnxn_cj4_t *cj4List, int cjIndex)
724 return cj4List[cjIndex/c_nbnxnGpuJgroupSize].cj[cjIndex & (c_nbnxnGpuJgroupSize - 1)];
727 /* Returns the i-interaction mask of the j sub-cell for index cj_ind */
728 static unsigned int nbl_imask0(const nbnxn_pairlist_t *nbl, int cj_ind)
730 return nbl->cj4[cj_ind/c_nbnxnGpuJgroupSize].imei[0].imask;
733 /* Ensures there is enough space for extra extra exclusion masks */
734 static void check_excl_space(nbnxn_pairlist_t *nbl, int extra)
736 if (nbl->nexcl+extra > nbl->excl_nalloc)
738 nbl->excl_nalloc = over_alloc_small(nbl->nexcl+extra);
739 nbnxn_realloc_void(reinterpret_cast<void **>(&nbl->excl),
740 nbl->nexcl*sizeof(*nbl->excl),
741 nbl->excl_nalloc*sizeof(*nbl->excl),
742 nbl->alloc, nbl->free);
746 /* Ensures there is enough space for maxNumExtraClusters extra j-clusters in the list */
747 static void check_cell_list_space_simple(nbnxn_pairlist_t *nbl,
748 int maxNumExtraClusters)
752 cj_max = nbl->ncj + maxNumExtraClusters;
754 if (cj_max > nbl->cj_nalloc)
756 nbl->cj_nalloc = over_alloc_small(cj_max);
757 nbnxn_realloc_void(reinterpret_cast<void **>(&nbl->cj),
758 nbl->ncj*sizeof(*nbl->cj),
759 nbl->cj_nalloc*sizeof(*nbl->cj),
760 nbl->alloc, nbl->free);
762 nbnxn_realloc_void(reinterpret_cast<void **>(&nbl->cjOuter),
763 nbl->ncj*sizeof(*nbl->cjOuter),
764 nbl->cj_nalloc*sizeof(*nbl->cjOuter),
765 nbl->alloc, nbl->free);
769 /* Ensures there is enough space for ncell extra j-clusters in the list */
770 static void check_cell_list_space_supersub(nbnxn_pairlist_t *nbl,
775 /* We can have maximally nsupercell*c_gpuNumClusterPerCell sj lists */
776 /* We can store 4 j-subcell - i-supercell pairs in one struct.
777 * since we round down, we need one extra entry.
779 ncj4_max = ((nbl->work->cj_ind + ncell*c_gpuNumClusterPerCell + c_nbnxnGpuJgroupSize - 1)/c_nbnxnGpuJgroupSize);
781 if (ncj4_max > nbl->cj4_nalloc)
783 nbl->cj4_nalloc = over_alloc_small(ncj4_max);
784 nbnxn_realloc_void(reinterpret_cast<void **>(&nbl->cj4),
785 nbl->work->cj4_init*sizeof(*nbl->cj4),
786 nbl->cj4_nalloc*sizeof(*nbl->cj4),
787 nbl->alloc, nbl->free);
790 if (ncj4_max > nbl->work->cj4_init)
792 for (int j4 = nbl->work->cj4_init; j4 < ncj4_max; j4++)
794 /* No i-subcells and no excl's in the list initially */
795 for (w = 0; w < c_nbnxnGpuClusterpairSplit; w++)
797 nbl->cj4[j4].imei[w].imask = 0U;
798 nbl->cj4[j4].imei[w].excl_ind = 0;
802 nbl->work->cj4_init = ncj4_max;
806 /* Set all excl masks for one GPU warp no exclusions */
807 static void set_no_excls(nbnxn_excl_t *excl)
809 for (int t = 0; t < c_nbnxnGpuExclSize; t++)
811 /* Turn all interaction bits on */
812 excl->pair[t] = NBNXN_INTERACTION_MASK_ALL;
816 /* Initializes a single nbnxn_pairlist_t data structure */
817 static void nbnxn_init_pairlist(nbnxn_pairlist_t *nbl,
819 nbnxn_alloc_t *alloc,
822 if (alloc == nullptr)
824 nbl->alloc = nbnxn_alloc_aligned;
832 nbl->free = nbnxn_free_aligned;
839 nbl->bSimple = bSimple;
854 /* We need one element extra in sj, so alloc initially with 1 */
861 GMX_ASSERT(c_nbnxnGpuNumClusterPerSupercluster == c_gpuNumClusterPerCell, "The search code assumes that the a super-cluster matches a search grid cell");
863 GMX_ASSERT(sizeof(nbl->cj4[0].imei[0].imask)*8 >= c_nbnxnGpuJgroupSize*c_gpuNumClusterPerCell, "The i super-cluster cluster interaction mask does not contain a sufficient number of bits");
864 GMX_ASSERT(sizeof(nbl->excl[0])*8 >= c_nbnxnGpuJgroupSize*c_gpuNumClusterPerCell, "The GPU exclusion mask does not contain a sufficient number of bits");
867 nbl->excl_nalloc = 0;
869 check_excl_space(nbl, 1);
871 set_no_excls(&nbl->excl[0]);
877 snew_aligned(nbl->work->bb_ci, 1, NBNXN_SEARCH_BB_MEM_ALIGN);
882 snew_aligned(nbl->work->pbb_ci, c_gpuNumClusterPerCell/STRIDE_PBB*NNBSBB_XXXX, NBNXN_SEARCH_BB_MEM_ALIGN);
884 snew_aligned(nbl->work->bb_ci, c_gpuNumClusterPerCell, NBNXN_SEARCH_BB_MEM_ALIGN);
887 int gpu_clusterpair_nc = c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize*DIM;
888 snew(nbl->work->x_ci, gpu_clusterpair_nc);
890 snew_aligned(nbl->work->x_ci_simd,
891 std::max(NBNXN_CPU_CLUSTER_I_SIZE*DIM*GMX_SIMD_REAL_WIDTH,
893 GMX_SIMD_REAL_WIDTH);
895 snew_aligned(nbl->work->d2, c_gpuNumClusterPerCell, NBNXN_SEARCH_BB_MEM_ALIGN);
897 nbl->work->sort = nullptr;
898 nbl->work->sort_nalloc = 0;
899 nbl->work->sci_sort = nullptr;
900 nbl->work->sci_sort_nalloc = 0;
903 void nbnxn_init_pairlist_set(nbnxn_pairlist_set_t *nbl_list,
904 gmx_bool bSimple, gmx_bool bCombined,
905 nbnxn_alloc_t *alloc,
908 nbl_list->bSimple = bSimple;
909 nbl_list->bCombined = bCombined;
911 nbl_list->nnbl = gmx_omp_nthreads_get(emntNonbonded);
913 if (!nbl_list->bCombined &&
914 nbl_list->nnbl > NBNXN_BUFFERFLAG_MAX_THREADS)
916 gmx_fatal(FARGS, "%d OpenMP threads were requested. Since the non-bonded force buffer reduction is prohibitively slow with more than %d threads, we do not allow this. Use %d or less OpenMP threads.",
917 nbl_list->nnbl, NBNXN_BUFFERFLAG_MAX_THREADS, NBNXN_BUFFERFLAG_MAX_THREADS);
920 snew(nbl_list->nbl, nbl_list->nnbl);
921 if (bSimple && nbl_list->nnbl > 1)
923 snew(nbl_list->nbl_work, nbl_list->nnbl);
925 snew(nbl_list->nbl_fep, nbl_list->nnbl);
926 /* Execute in order to avoid memory interleaving between threads */
927 #pragma omp parallel for num_threads(nbl_list->nnbl) schedule(static)
928 for (int i = 0; i < nbl_list->nnbl; i++)
932 /* Allocate the nblist data structure locally on each thread
933 * to optimize memory access for NUMA architectures.
935 snew(nbl_list->nbl[i], 1);
937 /* Only list 0 is used on the GPU, use normal allocation for i>0 */
938 if (!bSimple && i == 0)
940 nbnxn_init_pairlist(nbl_list->nbl[i], nbl_list->bSimple, alloc, free);
944 nbnxn_init_pairlist(nbl_list->nbl[i], nbl_list->bSimple, nullptr, nullptr);
945 if (bSimple && nbl_list->nnbl > 1)
947 snew(nbl_list->nbl_work[i], 1);
948 nbnxn_init_pairlist(nbl_list->nbl_work[i], nbl_list->bSimple, nullptr, nullptr);
952 snew(nbl_list->nbl_fep[i], 1);
953 nbnxn_init_pairlist_fep(nbl_list->nbl_fep[i]);
955 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
959 /* Print statistics of a pair list, used for debug output */
960 static void print_nblist_statistics_simple(FILE *fp, const nbnxn_pairlist_t *nbl,
961 const nbnxn_search *nbs, real rl)
963 const nbnxn_grid_t *grid;
967 grid = &nbs->grid[0];
969 fprintf(fp, "nbl nci %d ncj %d\n",
970 nbl->nci, nbl->ncjInUse);
971 fprintf(fp, "nbl na_sc %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
972 nbl->na_sc, rl, nbl->ncjInUse, nbl->ncjInUse/static_cast<double>(grid->nc),
973 nbl->ncjInUse/static_cast<double>(grid->nc)*grid->na_sc,
974 nbl->ncjInUse/static_cast<double>(grid->nc)*grid->na_sc/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid->nc*grid->na_sc/(grid->size[XX]*grid->size[YY]*grid->size[ZZ])));
976 fprintf(fp, "nbl average j cell list length %.1f\n",
977 0.25*nbl->ncjInUse/static_cast<double>(std::max(nbl->nci, 1)));
979 for (int s = 0; s < SHIFTS; s++)
984 for (int i = 0; i < nbl->nci; i++)
986 cs[nbl->ci[i].shift & NBNXN_CI_SHIFT] +=
987 nbl->ci[i].cj_ind_end - nbl->ci[i].cj_ind_start;
989 int j = nbl->ci[i].cj_ind_start;
990 while (j < nbl->ci[i].cj_ind_end &&
991 nbl->cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
997 fprintf(fp, "nbl cell pairs, total: %d excl: %d %.1f%%\n",
998 nbl->ncj, npexcl, 100*npexcl/static_cast<double>(std::max(nbl->ncj, 1)));
999 for (int s = 0; s < SHIFTS; s++)
1003 fprintf(fp, "nbl shift %2d ncj %3d\n", s, cs[s]);
1008 /* Print statistics of a pair lists, used for debug output */
1009 static void print_nblist_statistics_supersub(FILE *fp, const nbnxn_pairlist_t *nbl,
1010 const nbnxn_search *nbs, real rl)
1012 const nbnxn_grid_t *grid;
1014 int c[c_gpuNumClusterPerCell + 1];
1015 double sum_nsp, sum_nsp2;
1018 /* This code only produces correct statistics with domain decomposition */
1019 grid = &nbs->grid[0];
1021 fprintf(fp, "nbl nsci %d ncj4 %d nsi %d excl4 %d\n",
1022 nbl->nsci, nbl->ncj4, nbl->nci_tot, nbl->nexcl);
1023 fprintf(fp, "nbl na_c %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
1024 nbl->na_ci, rl, nbl->nci_tot, nbl->nci_tot/static_cast<double>(grid->nsubc_tot),
1025 nbl->nci_tot/static_cast<double>(grid->nsubc_tot)*grid->na_c,
1026 nbl->nci_tot/static_cast<double>(grid->nsubc_tot)*grid->na_c/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid->nsubc_tot*grid->na_c/(grid->size[XX]*grid->size[YY]*grid->size[ZZ])));
1031 for (int si = 0; si <= c_gpuNumClusterPerCell; si++)
1035 for (int i = 0; i < nbl->nsci; i++)
1040 for (int j4 = nbl->sci[i].cj4_ind_start; j4 < nbl->sci[i].cj4_ind_end; j4++)
1042 for (int j = 0; j < c_nbnxnGpuJgroupSize; j++)
1045 for (int si = 0; si < c_gpuNumClusterPerCell; si++)
1047 if (nbl->cj4[j4].imei[0].imask & (1U << (j*c_gpuNumClusterPerCell + si)))
1057 sum_nsp2 += nsp*nsp;
1058 nsp_max = std::max(nsp_max, nsp);
1062 sum_nsp /= nbl->nsci;
1063 sum_nsp2 /= nbl->nsci;
1065 fprintf(fp, "nbl #cluster-pairs: av %.1f stddev %.1f max %d\n",
1066 sum_nsp, std::sqrt(sum_nsp2 - sum_nsp*sum_nsp), nsp_max);
1070 for (b = 0; b <= c_gpuNumClusterPerCell; b++)
1072 fprintf(fp, "nbl j-list #i-subcell %d %7d %4.1f\n",
1074 100.0*c[b]/static_cast<double>(nbl->ncj4*c_nbnxnGpuJgroupSize));
1079 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp */
1080 static void low_get_nbl_exclusions(nbnxn_pairlist_t *nbl, int cj4,
1081 int warp, nbnxn_excl_t **excl)
1083 if (nbl->cj4[cj4].imei[warp].excl_ind == 0)
1085 /* No exclusions set, make a new list entry */
1086 nbl->cj4[cj4].imei[warp].excl_ind = nbl->nexcl;
1088 *excl = &nbl->excl[nbl->cj4[cj4].imei[warp].excl_ind];
1089 set_no_excls(*excl);
1093 /* We already have some exclusions, new ones can be added to the list */
1094 *excl = &nbl->excl[nbl->cj4[cj4].imei[warp].excl_ind];
1098 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp,
1099 * generates a new element and allocates extra memory, if necessary.
1101 static void get_nbl_exclusions_1(nbnxn_pairlist_t *nbl, int cj4,
1102 int warp, nbnxn_excl_t **excl)
1104 if (nbl->cj4[cj4].imei[warp].excl_ind == 0)
1106 /* We need to make a new list entry, check if we have space */
1107 check_excl_space(nbl, 1);
1109 low_get_nbl_exclusions(nbl, cj4, warp, excl);
1112 /* Returns pointers to the exclusion masks for cj4-unit cj4 for both warps,
1113 * generates a new element and allocates extra memory, if necessary.
1115 static void get_nbl_exclusions_2(nbnxn_pairlist_t *nbl, int cj4,
1116 nbnxn_excl_t **excl_w0,
1117 nbnxn_excl_t **excl_w1)
1119 /* Check for space we might need */
1120 check_excl_space(nbl, 2);
1122 low_get_nbl_exclusions(nbl, cj4, 0, excl_w0);
1123 low_get_nbl_exclusions(nbl, cj4, 1, excl_w1);
1126 /* Sets the self exclusions i=j and pair exclusions i>j */
1127 static void set_self_and_newton_excls_supersub(nbnxn_pairlist_t *nbl,
1128 int cj4_ind, int sj_offset,
1129 int i_cluster_in_cell)
1131 nbnxn_excl_t *excl[c_nbnxnGpuClusterpairSplit];
1133 /* Here we only set the set self and double pair exclusions */
1135 static_assert(c_nbnxnGpuClusterpairSplit == 2, "");
1137 get_nbl_exclusions_2(nbl, cj4_ind, &excl[0], &excl[1]);
1139 /* Only minor < major bits set */
1140 for (int ej = 0; ej < nbl->na_ci; ej++)
1143 for (int ei = ej; ei < nbl->na_ci; ei++)
1145 excl[w]->pair[(ej & (c_nbnxnGpuJgroupSize-1))*nbl->na_ci + ei] &=
1146 ~(1U << (sj_offset*c_gpuNumClusterPerCell + i_cluster_in_cell));
1151 /* Returns a diagonal or off-diagonal interaction mask for plain C lists */
1152 static unsigned int get_imask(gmx_bool rdiag, int ci, int cj)
1154 return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
1157 /* Returns a diagonal or off-diagonal interaction mask for cj-size=2 */
1158 gmx_unused static unsigned int get_imask_simd_j2(gmx_bool rdiag, int ci, int cj)
1160 return (rdiag && ci*2 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_0 :
1161 (rdiag && ci*2+1 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_1 :
1162 NBNXN_INTERACTION_MASK_ALL));
1165 /* Returns a diagonal or off-diagonal interaction mask for cj-size=4 */
1166 gmx_unused static unsigned int get_imask_simd_j4(gmx_bool rdiag, int ci, int cj)
1168 return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
1171 /* Returns a diagonal or off-diagonal interaction mask for cj-size=8 */
1172 gmx_unused static unsigned int get_imask_simd_j8(gmx_bool rdiag, int ci, int cj)
1174 return (rdiag && ci == cj*2 ? NBNXN_INTERACTION_MASK_DIAG_J8_0 :
1175 (rdiag && ci == cj*2+1 ? NBNXN_INTERACTION_MASK_DIAG_J8_1 :
1176 NBNXN_INTERACTION_MASK_ALL));
1180 #if GMX_SIMD_REAL_WIDTH == 2
1181 #define get_imask_simd_4xn get_imask_simd_j2
1183 #if GMX_SIMD_REAL_WIDTH == 4
1184 #define get_imask_simd_4xn get_imask_simd_j4
1186 #if GMX_SIMD_REAL_WIDTH == 8
1187 #define get_imask_simd_4xn get_imask_simd_j8
1188 #define get_imask_simd_2xnn get_imask_simd_j4
1190 #if GMX_SIMD_REAL_WIDTH == 16
1191 #define get_imask_simd_2xnn get_imask_simd_j8
1195 /* Plain C code for checking and adding cluster-pairs to the list.
1197 * \param[in] gridj The j-grid
1198 * \param[in,out] nbl The pair-list to store the cluster pairs in
1199 * \param[in] icluster The index of the i-cluster
1200 * \param[in] jclusterFirst The first cluster in the j-range
1201 * \param[in] jclusterLast The last cluster in the j-range
1202 * \param[in] excludeSubDiagonal Exclude atom pairs with i-index > j-index
1203 * \param[in] x_j Coordinates for the j-atom, in xyz format
1204 * \param[in] rlist2 The squared list cut-off
1205 * \param[in] rbb2 The squared cut-off for putting cluster-pairs in the list based on bounding box distance only
1206 * \param[in,out] numDistanceChecks The number of distance checks performed
1209 makeClusterListSimple(const nbnxn_grid_t * gridj,
1210 nbnxn_pairlist_t * nbl,
1214 bool excludeSubDiagonal,
1215 const real * gmx_restrict x_j,
1218 int * gmx_restrict numDistanceChecks)
1220 const nbnxn_bb_t * gmx_restrict bb_ci = nbl->work->bb_ci;
1221 const real * gmx_restrict x_ci = nbl->work->x_ci;
1226 while (!InRange && jclusterFirst <= jclusterLast)
1228 real d2 = subc_bb_dist2(0, bb_ci, jclusterFirst, gridj->bb);
1229 *numDistanceChecks += 2;
1231 /* Check if the distance is within the distance where
1232 * we use only the bounding box distance rbb,
1233 * or within the cut-off and there is at least one atom pair
1234 * within the cut-off.
1240 else if (d2 < rlist2)
1242 int cjf_gl = gridj->cell0 + jclusterFirst;
1243 for (int i = 0; i < NBNXN_CPU_CLUSTER_I_SIZE && !InRange; i++)
1245 for (int j = 0; j < NBNXN_CPU_CLUSTER_I_SIZE; j++)
1247 InRange = InRange ||
1248 (gmx::square(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+XX]) +
1249 gmx::square(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+YY]) +
1250 gmx::square(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+ZZ]) < rlist2);
1253 *numDistanceChecks += NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
1266 while (!InRange && jclusterLast > jclusterFirst)
1268 real d2 = subc_bb_dist2(0, bb_ci, jclusterLast, gridj->bb);
1269 *numDistanceChecks += 2;
1271 /* Check if the distance is within the distance where
1272 * we use only the bounding box distance rbb,
1273 * or within the cut-off and there is at least one atom pair
1274 * within the cut-off.
1280 else if (d2 < rlist2)
1282 int cjl_gl = gridj->cell0 + jclusterLast;
1283 for (int i = 0; i < NBNXN_CPU_CLUSTER_I_SIZE && !InRange; i++)
1285 for (int j = 0; j < NBNXN_CPU_CLUSTER_I_SIZE; j++)
1287 InRange = InRange ||
1288 (gmx::square(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+XX]) +
1289 gmx::square(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+YY]) +
1290 gmx::square(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+ZZ]) < rlist2);
1293 *numDistanceChecks += NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
1301 if (jclusterFirst <= jclusterLast)
1303 for (int jcluster = jclusterFirst; jcluster <= jclusterLast; jcluster++)
1305 /* Store cj and the interaction mask */
1306 nbl->cj[nbl->ncj].cj = gridj->cell0 + jcluster;
1307 nbl->cj[nbl->ncj].excl = get_imask(excludeSubDiagonal, icluster, jcluster);
1310 /* Increase the closing index in i super-cell list */
1311 nbl->ci[nbl->nci].cj_ind_end = nbl->ncj;
1315 #ifdef GMX_NBNXN_SIMD_4XN
1316 #include "gromacs/mdlib/nbnxn_search_simd_4xn.h"
1318 #ifdef GMX_NBNXN_SIMD_2XNN
1319 #include "gromacs/mdlib/nbnxn_search_simd_2xnn.h"
1322 /* Plain C or SIMD4 code for making a pair list of super-cell sci vs scj.
1323 * Checks bounding box distances and possibly atom pair distances.
1325 static void make_cluster_list_supersub(const nbnxn_grid_t *gridi,
1326 const nbnxn_grid_t *gridj,
1327 nbnxn_pairlist_t *nbl,
1329 gmx_bool sci_equals_scj,
1330 int stride, const real *x,
1331 real rlist2, float rbb2,
1332 int *numDistanceChecks)
1334 nbnxn_list_work_t *work = nbl->work;
1337 const float *pbb_ci = work->pbb_ci;
1339 const nbnxn_bb_t *bb_ci = work->bb_ci;
1342 assert(c_nbnxnGpuClusterSize == gridi->na_c);
1343 assert(c_nbnxnGpuClusterSize == gridj->na_c);
1345 /* We generate the pairlist mainly based on bounding-box distances
1346 * and do atom pair distance based pruning on the GPU.
1347 * Only if a j-group contains a single cluster-pair, we try to prune
1348 * that pair based on atom distances on the CPU to avoid empty j-groups.
1350 #define PRUNE_LIST_CPU_ONE 1
1351 #define PRUNE_LIST_CPU_ALL 0
1353 #if PRUNE_LIST_CPU_ONE
1357 float *d2l = work->d2;
1359 for (int subc = 0; subc < gridj->nsubc[scj]; subc++)
1361 int cj4_ind = nbl->work->cj_ind/c_nbnxnGpuJgroupSize;
1362 int cj_offset = nbl->work->cj_ind - cj4_ind*c_nbnxnGpuJgroupSize;
1363 nbnxn_cj4_t *cj4 = &nbl->cj4[cj4_ind];
1365 int cj = scj*c_gpuNumClusterPerCell + subc;
1367 int cj_gl = gridj->cell0*c_gpuNumClusterPerCell + cj;
1369 /* Initialize this j-subcell i-subcell list */
1370 cj4->cj[cj_offset] = cj_gl;
1379 ci1 = gridi->nsubc[sci];
1383 /* Determine all ci1 bb distances in one call with SIMD4 */
1384 subc_bb_dist2_simd4_xxxx(gridj->pbb.data() + (cj >> STRIDE_PBB_2LOG)*NNBSBB_XXXX + (cj & (STRIDE_PBB-1)),
1386 *numDistanceChecks += c_nbnxnGpuClusterSize*2;
1390 unsigned int imask = 0;
1391 /* We use a fixed upper-bound instead of ci1 to help optimization */
1392 for (int ci = 0; ci < c_gpuNumClusterPerCell; ci++)
1400 /* Determine the bb distance between ci and cj */
1401 d2l[ci] = subc_bb_dist2(ci, bb_ci, cj, gridj->bb);
1402 *numDistanceChecks += 2;
1406 #if PRUNE_LIST_CPU_ALL
1407 /* Check if the distance is within the distance where
1408 * we use only the bounding box distance rbb,
1409 * or within the cut-off and there is at least one atom pair
1410 * within the cut-off. This check is very costly.
1412 *numDistanceChecks += c_nbnxnGpuClusterSize*c_nbnxnGpuClusterSize;
1415 clusterpair_in_range(work, ci, cj_gl, stride, x, rlist2)))
1417 /* Check if the distance between the two bounding boxes
1418 * in within the pair-list cut-off.
1423 /* Flag this i-subcell to be taken into account */
1424 imask |= (1U << (cj_offset*c_gpuNumClusterPerCell + ci));
1426 #if PRUNE_LIST_CPU_ONE
1434 #if PRUNE_LIST_CPU_ONE
1435 /* If we only found 1 pair, check if any atoms are actually
1436 * within the cut-off, so we could get rid of it.
1438 if (npair == 1 && d2l[ci_last] >= rbb2 &&
1439 !clusterpair_in_range(work, ci_last, cj_gl, stride, x, rlist2))
1441 imask &= ~(1U << (cj_offset*c_gpuNumClusterPerCell + ci_last));
1448 /* We have a useful sj entry, close it now */
1450 /* Set the exclusions for the ci==sj entry.
1451 * Here we don't bother to check if this entry is actually flagged,
1452 * as it will nearly always be in the list.
1456 set_self_and_newton_excls_supersub(nbl, cj4_ind, cj_offset, subc);
1459 /* Copy the cluster interaction mask to the list */
1460 for (int w = 0; w < c_nbnxnGpuClusterpairSplit; w++)
1462 cj4->imei[w].imask |= imask;
1465 nbl->work->cj_ind++;
1467 /* Keep the count */
1468 nbl->nci_tot += npair;
1470 /* Increase the closing index in i super-cell list */
1471 nbl->sci[nbl->nsci].cj4_ind_end =
1472 (nbl->work->cj_ind + c_nbnxnGpuJgroupSize - 1)/c_nbnxnGpuJgroupSize;
1477 /* Returns how many contiguous j-clusters we have starting in the i-list */
1478 template <typename CjListType>
1479 static int numContiguousJClusters(const int cjIndexStart,
1480 const int cjIndexEnd,
1481 const CjListType &cjList)
1483 const int firstJCluster = nblCj(cjList, cjIndexStart);
1485 int numContiguous = 0;
1487 while (cjIndexStart + numContiguous < cjIndexEnd &&
1488 nblCj(cjList, cjIndexStart + numContiguous) == firstJCluster + numContiguous)
1493 return numContiguous;
1496 /* Helper struct for efficient searching for excluded atoms in a j-list */
1500 template <typename CjListType>
1501 JListRanges(int cjIndexStart,
1503 const CjListType &cjList);
1505 int cjIndexStart; // The start index in the j-list
1506 int cjIndexEnd; // The end index in the j-list
1507 int cjFirst; // The j-cluster with index cjIndexStart
1508 int cjLast; // The j-cluster with index cjIndexEnd-1
1509 int numDirect; // Up to cjIndexStart+numDirect the j-clusters are cjFirst + the index offset
1512 template <typename CjListType>
1513 JListRanges::JListRanges(int cjIndexStart,
1515 const CjListType &cjList) :
1516 cjIndexStart(cjIndexStart),
1517 cjIndexEnd(cjIndexEnd)
1519 GMX_ASSERT(cjIndexEnd > cjIndexStart, "JListRanges should only be called with non-empty lists");
1521 cjFirst = nblCj(cjList, cjIndexStart);
1522 cjLast = nblCj(cjList, cjIndexEnd - 1);
1524 /* Determine how many contiguous j-cells we have starting
1525 * from the first i-cell. This number can be used to directly
1526 * calculate j-cell indices for excluded atoms.
1528 numDirect = numContiguousJClusters(cjIndexStart, cjIndexEnd, cjList);
1531 /* Return the index of \p jCluster in the given range or -1 when not present
1533 * Note: This code is executed very often and therefore performance is
1534 * important. It should be inlined and fully optimized.
1536 template <typename CjListType>
1537 static inline int findJClusterInJList(int jCluster,
1538 const JListRanges &ranges,
1539 const CjListType &cjList)
1543 if (jCluster < ranges.cjFirst + ranges.numDirect)
1545 /* We can calculate the index directly using the offset */
1546 index = ranges.cjIndexStart + jCluster - ranges.cjFirst;
1550 /* Search for jCluster using bisection */
1552 int rangeStart = ranges.cjIndexStart + ranges.numDirect;
1553 int rangeEnd = ranges.cjIndexEnd;
1555 while (index == -1 && rangeStart < rangeEnd)
1557 rangeMiddle = (rangeStart + rangeEnd) >> 1;
1559 const int clusterMiddle = nblCj(cjList, rangeMiddle);
1561 if (jCluster == clusterMiddle)
1563 index = rangeMiddle;
1565 else if (jCluster < clusterMiddle)
1567 rangeEnd = rangeMiddle;
1571 rangeStart = rangeMiddle + 1;
1579 /* Set all atom-pair exclusions for a simple type list i-entry
1581 * Set all atom-pair exclusions from the topology stored in exclusions
1582 * as masks in the pair-list for simple list entry iEntry.
1585 setExclusionsForSimpleIentry(const nbnxn_search *nbs,
1586 nbnxn_pairlist_t *nbl,
1587 gmx_bool diagRemoved,
1589 const nbnxn_ci_t &iEntry,
1590 const t_blocka &exclusions)
1592 if (iEntry.cj_ind_end == iEntry.cj_ind_start)
1594 /* Empty list: no exclusions */
1598 const JListRanges ranges(iEntry.cj_ind_start, iEntry.cj_ind_end, nbl->cj);
1600 const int iCluster = iEntry.ci;
1602 gmx::ArrayRef<const int> cell = nbs->cell;
1604 /* Loop over the atoms in the i-cluster */
1605 for (int i = 0; i < nbl->na_sc; i++)
1607 const int iIndex = iCluster*nbl->na_sc + i;
1608 const int iAtom = nbs->a[iIndex];
1611 /* Loop over the topology-based exclusions for this i-atom */
1612 for (int exclIndex = exclusions.index[iAtom]; exclIndex < exclusions.index[iAtom + 1]; exclIndex++)
1614 const int jAtom = exclusions.a[exclIndex];
1618 /* The self exclusion are already set, save some time */
1622 /* Get the index of the j-atom in the nbnxn atom data */
1623 const int jIndex = cell[jAtom];
1625 /* Without shifts we only calculate interactions j>i
1626 * for one-way pair-lists.
1628 if (diagRemoved && jIndex <= iIndex)
1633 const int jCluster = (jIndex >> na_cj_2log);
1635 /* Could the cluster se be in our list? */
1636 if (jCluster >= ranges.cjFirst && jCluster <= ranges.cjLast)
1639 findJClusterInJList(jCluster, ranges, nbl->cj);
1643 /* We found an exclusion, clear the corresponding
1646 const int innerJ = jIndex - (jCluster << na_cj_2log);
1648 nbl->cj[index].excl &= ~(1U << ((i << na_cj_2log) + innerJ));
1656 /* Add a new i-entry to the FEP list and copy the i-properties */
1657 static inline void fep_list_new_nri_copy(t_nblist *nlist)
1659 /* Add a new i-entry */
1662 assert(nlist->nri < nlist->maxnri);
1664 /* Duplicate the last i-entry, except for jindex, which continues */
1665 nlist->iinr[nlist->nri] = nlist->iinr[nlist->nri-1];
1666 nlist->shift[nlist->nri] = nlist->shift[nlist->nri-1];
1667 nlist->gid[nlist->nri] = nlist->gid[nlist->nri-1];
1668 nlist->jindex[nlist->nri] = nlist->nrj;
1671 /* For load balancing of the free-energy lists over threads, we set
1672 * the maximum nrj size of an i-entry to 40. This leads to good
1673 * load balancing in the worst case scenario of a single perturbed
1674 * particle on 16 threads, while not introducing significant overhead.
1675 * Note that half of the perturbed pairs will anyhow end up in very small lists,
1676 * since non perturbed i-particles will see few perturbed j-particles).
1678 const int max_nrj_fep = 40;
1680 /* Exclude the perturbed pairs from the Verlet list. This is only done to avoid
1681 * singularities for overlapping particles (0/0), since the charges and
1682 * LJ parameters have been zeroed in the nbnxn data structure.
1683 * Simultaneously make a group pair list for the perturbed pairs.
1685 static void make_fep_list(const nbnxn_search *nbs,
1686 const nbnxn_atomdata_t *nbat,
1687 nbnxn_pairlist_t *nbl,
1688 gmx_bool bDiagRemoved,
1690 const nbnxn_grid_t *gridi,
1691 const nbnxn_grid_t *gridj,
1694 int ci, cj_ind_start, cj_ind_end, cja, cjr;
1696 int ngid, gid_i = 0, gid_j, gid;
1697 int egp_shift, egp_mask;
1699 int ind_i, ind_j, ai, aj;
1701 gmx_bool bFEP_i, bFEP_i_all;
1703 if (nbl_ci->cj_ind_end == nbl_ci->cj_ind_start)
1711 cj_ind_start = nbl_ci->cj_ind_start;
1712 cj_ind_end = nbl_ci->cj_ind_end;
1714 /* In worst case we have alternating energy groups
1715 * and create #atom-pair lists, which means we need the size
1716 * of a cluster pair (na_ci*na_cj) times the number of cj's.
1718 nri_max = nbl->na_ci*nbl->na_cj*(cj_ind_end - cj_ind_start);
1719 if (nlist->nri + nri_max > nlist->maxnri)
1721 nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
1722 reallocate_nblist(nlist);
1725 ngid = nbat->nenergrp;
1727 if (ngid*gridj->na_cj > gmx::index(sizeof(gid_cj)*8))
1729 gmx_fatal(FARGS, "The Verlet scheme with %dx%d kernels and free-energy only supports up to %lu energy groups",
1730 gridi->na_c, gridj->na_cj, (sizeof(gid_cj)*8)/gridj->na_cj);
1733 egp_shift = nbat->neg_2log;
1734 egp_mask = (1<<nbat->neg_2log) - 1;
1736 /* Loop over the atoms in the i sub-cell */
1738 for (int i = 0; i < nbl->na_ci; i++)
1740 ind_i = ci*nbl->na_ci + i;
1745 nlist->jindex[nri+1] = nlist->jindex[nri];
1746 nlist->iinr[nri] = ai;
1747 /* The actual energy group pair index is set later */
1748 nlist->gid[nri] = 0;
1749 nlist->shift[nri] = nbl_ci->shift & NBNXN_CI_SHIFT;
1751 bFEP_i = gridi->fep[ci - gridi->cell0] & (1 << i);
1753 bFEP_i_all = bFEP_i_all && bFEP_i;
1755 if (nlist->nrj + (cj_ind_end - cj_ind_start)*nbl->na_cj > nlist->maxnrj)
1757 nlist->maxnrj = over_alloc_small(nlist->nrj + (cj_ind_end - cj_ind_start)*nbl->na_cj);
1758 srenew(nlist->jjnr, nlist->maxnrj);
1759 srenew(nlist->excl_fep, nlist->maxnrj);
1764 gid_i = (nbat->energrp[ci] >> (egp_shift*i)) & egp_mask;
1767 for (int cj_ind = cj_ind_start; cj_ind < cj_ind_end; cj_ind++)
1769 unsigned int fep_cj;
1771 cja = nbl->cj[cj_ind].cj;
1773 if (gridj->na_cj == gridj->na_c)
1775 cjr = cja - gridj->cell0;
1776 fep_cj = gridj->fep[cjr];
1779 gid_cj = nbat->energrp[cja];
1782 else if (2*gridj->na_cj == gridj->na_c)
1784 cjr = cja - gridj->cell0*2;
1785 /* Extract half of the ci fep/energrp mask */
1786 fep_cj = (gridj->fep[cjr>>1] >> ((cjr&1)*gridj->na_cj)) & ((1<<gridj->na_cj) - 1);
1789 gid_cj = nbat->energrp[cja>>1] >> ((cja&1)*gridj->na_cj*egp_shift) & ((1<<(gridj->na_cj*egp_shift)) - 1);
1794 cjr = cja - (gridj->cell0>>1);
1795 /* Combine two ci fep masks/energrp */
1796 fep_cj = gridj->fep[cjr*2] + (gridj->fep[cjr*2+1] << gridj->na_c);
1799 gid_cj = nbat->energrp[cja*2] + (nbat->energrp[cja*2+1] << (gridj->na_c*egp_shift));
1803 if (bFEP_i || fep_cj != 0)
1805 for (int j = 0; j < nbl->na_cj; j++)
1807 /* Is this interaction perturbed and not excluded? */
1808 ind_j = cja*nbl->na_cj + j;
1811 (bFEP_i || (fep_cj & (1 << j))) &&
1812 (!bDiagRemoved || ind_j >= ind_i))
1816 gid_j = (gid_cj >> (j*egp_shift)) & egp_mask;
1817 gid = GID(gid_i, gid_j, ngid);
1819 if (nlist->nrj > nlist->jindex[nri] &&
1820 nlist->gid[nri] != gid)
1822 /* Energy group pair changed: new list */
1823 fep_list_new_nri_copy(nlist);
1826 nlist->gid[nri] = gid;
1829 if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
1831 fep_list_new_nri_copy(nlist);
1835 /* Add it to the FEP list */
1836 nlist->jjnr[nlist->nrj] = aj;
1837 nlist->excl_fep[nlist->nrj] = (nbl->cj[cj_ind].excl >> (i*nbl->na_cj + j)) & 1;
1840 /* Exclude it from the normal list.
1841 * Note that the charge has been set to zero,
1842 * but we need to avoid 0/0, as perturbed atoms
1843 * can be on top of each other.
1845 nbl->cj[cj_ind].excl &= ~(1U << (i*nbl->na_cj + j));
1851 if (nlist->nrj > nlist->jindex[nri])
1853 /* Actually add this new, non-empty, list */
1855 nlist->jindex[nlist->nri] = nlist->nrj;
1862 /* All interactions are perturbed, we can skip this entry */
1863 nbl_ci->cj_ind_end = cj_ind_start;
1864 nbl->ncjInUse -= cj_ind_end - cj_ind_start;
1868 /* Return the index of atom a within a cluster */
1869 static inline int cj_mod_cj4(int cj)
1871 return cj & (c_nbnxnGpuJgroupSize - 1);
1874 /* Convert a j-cluster to a cj4 group */
1875 static inline int cj_to_cj4(int cj)
1877 return cj/c_nbnxnGpuJgroupSize;
1880 /* Return the index of an j-atom within a warp */
1881 static inline int a_mod_wj(int a)
1883 return a & (c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit - 1);
1886 /* As make_fep_list above, but for super/sub lists. */
1887 static void make_fep_list_supersub(const nbnxn_search *nbs,
1888 const nbnxn_atomdata_t *nbat,
1889 nbnxn_pairlist_t *nbl,
1890 gmx_bool bDiagRemoved,
1891 const nbnxn_sci_t *nbl_sci,
1896 const nbnxn_grid_t *gridi,
1897 const nbnxn_grid_t *gridj,
1900 int sci, cj4_ind_start, cj4_ind_end, cjr;
1903 int ind_i, ind_j, ai, aj;
1907 const nbnxn_cj4_t *cj4;
1909 if (nbl_sci->cj4_ind_end == nbl_sci->cj4_ind_start)
1917 cj4_ind_start = nbl_sci->cj4_ind_start;
1918 cj4_ind_end = nbl_sci->cj4_ind_end;
1920 /* Here we process one super-cell, max #atoms na_sc, versus a list
1921 * cj4 entries, each with max c_nbnxnGpuJgroupSize cj's, each
1922 * of size na_cj atoms.
1923 * On the GPU we don't support energy groups (yet).
1924 * So for each of the na_sc i-atoms, we need max one FEP list
1925 * for each max_nrj_fep j-atoms.
1927 nri_max = nbl->na_sc*nbl->na_cj*(1 + ((cj4_ind_end - cj4_ind_start)*c_nbnxnGpuJgroupSize)/max_nrj_fep);
1928 if (nlist->nri + nri_max > nlist->maxnri)
1930 nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
1931 reallocate_nblist(nlist);
1934 /* Loop over the atoms in the i super-cluster */
1935 for (int c = 0; c < c_gpuNumClusterPerCell; c++)
1937 c_abs = sci*c_gpuNumClusterPerCell + c;
1939 for (int i = 0; i < nbl->na_ci; i++)
1941 ind_i = c_abs*nbl->na_ci + i;
1946 nlist->jindex[nri+1] = nlist->jindex[nri];
1947 nlist->iinr[nri] = ai;
1948 /* With GPUs, energy groups are not supported */
1949 nlist->gid[nri] = 0;
1950 nlist->shift[nri] = nbl_sci->shift & NBNXN_CI_SHIFT;
1952 bFEP_i = (gridi->fep[c_abs - gridi->cell0*c_gpuNumClusterPerCell] & (1 << i));
1954 xi = nbat->x[ind_i*nbat->xstride+XX] + shx;
1955 yi = nbat->x[ind_i*nbat->xstride+YY] + shy;
1956 zi = nbat->x[ind_i*nbat->xstride+ZZ] + shz;
1958 if ((nlist->nrj + cj4_ind_end - cj4_ind_start)*c_nbnxnGpuJgroupSize*nbl->na_cj > nlist->maxnrj)
1960 nlist->maxnrj = over_alloc_small((nlist->nrj + cj4_ind_end - cj4_ind_start)*c_nbnxnGpuJgroupSize*nbl->na_cj);
1961 srenew(nlist->jjnr, nlist->maxnrj);
1962 srenew(nlist->excl_fep, nlist->maxnrj);
1965 for (int cj4_ind = cj4_ind_start; cj4_ind < cj4_ind_end; cj4_ind++)
1967 cj4 = &nbl->cj4[cj4_ind];
1969 for (int gcj = 0; gcj < c_nbnxnGpuJgroupSize; gcj++)
1971 unsigned int fep_cj;
1973 if ((cj4->imei[0].imask & (1U << (gcj*c_gpuNumClusterPerCell + c))) == 0)
1975 /* Skip this ci for this cj */
1979 cjr = cj4->cj[gcj] - gridj->cell0*c_gpuNumClusterPerCell;
1981 fep_cj = gridj->fep[cjr];
1983 if (bFEP_i || fep_cj != 0)
1985 for (int j = 0; j < nbl->na_cj; j++)
1987 /* Is this interaction perturbed and not excluded? */
1988 ind_j = (gridj->cell0*c_gpuNumClusterPerCell + cjr)*nbl->na_cj + j;
1991 (bFEP_i || (fep_cj & (1 << j))) &&
1992 (!bDiagRemoved || ind_j >= ind_i))
1996 unsigned int excl_bit;
1999 const int jHalf = j/(c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit);
2000 get_nbl_exclusions_1(nbl, cj4_ind, jHalf, &excl);
2002 excl_pair = a_mod_wj(j)*nbl->na_ci + i;
2003 excl_bit = (1U << (gcj*c_gpuNumClusterPerCell + c));
2005 dx = nbat->x[ind_j*nbat->xstride+XX] - xi;
2006 dy = nbat->x[ind_j*nbat->xstride+YY] - yi;
2007 dz = nbat->x[ind_j*nbat->xstride+ZZ] - zi;
2009 /* The unpruned GPU list has more than 2/3
2010 * of the atom pairs beyond rlist. Using
2011 * this list will cause a lot of overhead
2012 * in the CPU FEP kernels, especially
2013 * relative to the fast GPU kernels.
2014 * So we prune the FEP list here.
2016 if (dx*dx + dy*dy + dz*dz < rlist_fep2)
2018 if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
2020 fep_list_new_nri_copy(nlist);
2024 /* Add it to the FEP list */
2025 nlist->jjnr[nlist->nrj] = aj;
2026 nlist->excl_fep[nlist->nrj] = (excl->pair[excl_pair] & excl_bit) ? 1 : 0;
2030 /* Exclude it from the normal list.
2031 * Note that the charge and LJ parameters have
2032 * been set to zero, but we need to avoid 0/0,
2033 * as perturbed atoms can be on top of each other.
2035 excl->pair[excl_pair] &= ~excl_bit;
2039 /* Note that we could mask out this pair in imask
2040 * if all i- and/or all j-particles are perturbed.
2041 * But since the perturbed pairs on the CPU will
2042 * take an order of magnitude more time, the GPU
2043 * will finish before the CPU and there is no gain.
2049 if (nlist->nrj > nlist->jindex[nri])
2051 /* Actually add this new, non-empty, list */
2053 nlist->jindex[nlist->nri] = nlist->nrj;
2060 /* Set all atom-pair exclusions for a GPU type list i-entry
2062 * Sets all atom-pair exclusions from the topology stored in exclusions
2063 * as masks in the pair-list for i-super-cluster list entry iEntry.
2066 setExclusionsForGpuIentry(const nbnxn_search *nbs,
2067 nbnxn_pairlist_t *nbl,
2068 gmx_bool diagRemoved,
2069 const nbnxn_sci_t &iEntry,
2070 const t_blocka &exclusions)
2072 if (iEntry.cj4_ind_end == iEntry.cj4_ind_start)
2078 /* Set the search ranges using start and end j-cluster indices.
2079 * Note that here we can not use cj4_ind_end, since the last cj4
2080 * can be only partially filled, so we use cj_ind.
2082 const JListRanges ranges(iEntry.cj4_ind_start*c_nbnxnGpuJgroupSize,
2086 GMX_ASSERT(nbl->na_ci == c_nbnxnGpuClusterSize, "na_ci should match the GPU cluster size");
2087 constexpr int c_clusterSize = c_nbnxnGpuClusterSize;
2088 constexpr int c_superClusterSize = c_nbnxnGpuNumClusterPerSupercluster*c_nbnxnGpuClusterSize;
2090 const int iSuperCluster = iEntry.sci;
2092 gmx::ArrayRef<const int> cell = nbs->cell;
2094 /* Loop over the atoms in the i super-cluster */
2095 for (int i = 0; i < c_superClusterSize; i++)
2097 const int iIndex = iSuperCluster*c_superClusterSize + i;
2098 const int iAtom = nbs->a[iIndex];
2101 const int iCluster = i/c_clusterSize;
2103 /* Loop over the topology-based exclusions for this i-atom */
2104 for (int exclIndex = exclusions.index[iAtom]; exclIndex < exclusions.index[iAtom + 1]; exclIndex++)
2106 const int jAtom = exclusions.a[exclIndex];
2110 /* The self exclusions are already set, save some time */
2114 /* Get the index of the j-atom in the nbnxn atom data */
2115 const int jIndex = cell[jAtom];
2117 /* Without shifts we only calculate interactions j>i
2118 * for one-way pair-lists.
2120 /* NOTE: We would like to use iIndex on the right hand side,
2121 * but that makes this routine 25% slower with gcc6/7.
2122 * Even using c_superClusterSize makes it slower.
2123 * Either of these changes triggers peeling of the exclIndex
2124 * loop, which apparently leads to far less efficient code.
2126 if (diagRemoved && jIndex <= iSuperCluster*nbl->na_sc + i)
2131 const int jCluster = jIndex/c_clusterSize;
2133 /* Check whether the cluster is in our list? */
2134 if (jCluster >= ranges.cjFirst && jCluster <= ranges.cjLast)
2137 findJClusterInJList(jCluster, ranges, nbl->cj4);
2141 /* We found an exclusion, clear the corresponding
2144 const unsigned int pairMask = (1U << (cj_mod_cj4(index)*c_gpuNumClusterPerCell + iCluster));
2145 /* Check if the i-cluster interacts with the j-cluster */
2146 if (nbl_imask0(nbl, index) & pairMask)
2148 const int innerI = (i & (c_clusterSize - 1));
2149 const int innerJ = (jIndex & (c_clusterSize - 1));
2151 /* Determine which j-half (CUDA warp) we are in */
2152 const int jHalf = innerJ/(c_clusterSize/c_nbnxnGpuClusterpairSplit);
2154 nbnxn_excl_t *interactionMask;
2155 get_nbl_exclusions_1(nbl, cj_to_cj4(index), jHalf, &interactionMask);
2157 interactionMask->pair[a_mod_wj(innerJ)*c_clusterSize + innerI] &= ~pairMask;
2166 /* Reallocate the simple ci list for at least n entries */
2167 static void nb_realloc_ci(nbnxn_pairlist_t *nbl, int n)
2169 nbl->ci_nalloc = over_alloc_small(n);
2170 nbnxn_realloc_void(reinterpret_cast<void **>(&nbl->ci),
2171 nbl->nci*sizeof(*nbl->ci),
2172 nbl->ci_nalloc*sizeof(*nbl->ci),
2173 nbl->alloc, nbl->free);
2175 nbnxn_realloc_void(reinterpret_cast<void **>(&nbl->ciOuter),
2176 nbl->nci*sizeof(*nbl->ciOuter),
2177 nbl->ci_nalloc*sizeof(*nbl->ciOuter),
2178 nbl->alloc, nbl->free);
2181 /* Reallocate the super-cell sci list for at least n entries */
2182 static void nb_realloc_sci(nbnxn_pairlist_t *nbl, int n)
2184 nbl->sci_nalloc = over_alloc_small(n);
2185 nbnxn_realloc_void(reinterpret_cast<void **>(&nbl->sci),
2186 nbl->nsci*sizeof(*nbl->sci),
2187 nbl->sci_nalloc*sizeof(*nbl->sci),
2188 nbl->alloc, nbl->free);
2191 /* Make a new ci entry at index nbl->nci */
2192 static void new_ci_entry(nbnxn_pairlist_t *nbl, int ci, int shift, int flags)
2194 if (nbl->nci + 1 > nbl->ci_nalloc)
2196 nb_realloc_ci(nbl, nbl->nci+1);
2198 nbl->ci[nbl->nci].ci = ci;
2199 nbl->ci[nbl->nci].shift = shift;
2200 /* Store the interaction flags along with the shift */
2201 nbl->ci[nbl->nci].shift |= flags;
2202 nbl->ci[nbl->nci].cj_ind_start = nbl->ncj;
2203 nbl->ci[nbl->nci].cj_ind_end = nbl->ncj;
2206 /* Make a new sci entry at index nbl->nsci */
2207 static void new_sci_entry(nbnxn_pairlist_t *nbl, int sci, int shift)
2209 if (nbl->nsci + 1 > nbl->sci_nalloc)
2211 nb_realloc_sci(nbl, nbl->nsci+1);
2213 nbl->sci[nbl->nsci].sci = sci;
2214 nbl->sci[nbl->nsci].shift = shift;
2215 nbl->sci[nbl->nsci].cj4_ind_start = nbl->ncj4;
2216 nbl->sci[nbl->nsci].cj4_ind_end = nbl->ncj4;
2219 /* Sort the simple j-list cj on exclusions.
2220 * Entries with exclusions will all be sorted to the beginning of the list.
2222 static void sort_cj_excl(nbnxn_cj_t *cj, int ncj,
2223 nbnxn_list_work_t *work)
2227 if (ncj > work->cj_nalloc)
2229 work->cj_nalloc = over_alloc_large(ncj);
2230 srenew(work->cj, work->cj_nalloc);
2233 /* Make a list of the j-cells involving exclusions */
2235 for (int j = 0; j < ncj; j++)
2237 if (cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
2239 work->cj[jnew++] = cj[j];
2242 /* Check if there are exclusions at all or not just the first entry */
2243 if (!((jnew == 0) ||
2244 (jnew == 1 && cj[0].excl != NBNXN_INTERACTION_MASK_ALL)))
2246 for (int j = 0; j < ncj; j++)
2248 if (cj[j].excl == NBNXN_INTERACTION_MASK_ALL)
2250 work->cj[jnew++] = cj[j];
2253 for (int j = 0; j < ncj; j++)
2255 cj[j] = work->cj[j];
2260 /* Close this simple list i entry */
2261 static void close_ci_entry_simple(nbnxn_pairlist_t *nbl)
2265 /* All content of the new ci entry have already been filled correctly,
2266 * we only need to increase the count here (for non empty lists).
2268 jlen = nbl->ci[nbl->nci].cj_ind_end - nbl->ci[nbl->nci].cj_ind_start;
2271 sort_cj_excl(nbl->cj+nbl->ci[nbl->nci].cj_ind_start, jlen, nbl->work);
2273 /* The counts below are used for non-bonded pair/flop counts
2274 * and should therefore match the available kernel setups.
2276 if (!(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_COUL(0)))
2278 nbl->work->ncj_noq += jlen;
2280 else if ((nbl->ci[nbl->nci].shift & NBNXN_CI_HALF_LJ(0)) ||
2281 !(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_LJ(0)))
2283 nbl->work->ncj_hlj += jlen;
2290 /* Split sci entry for load balancing on the GPU.
2291 * Splitting ensures we have enough lists to fully utilize the whole GPU.
2292 * With progBal we generate progressively smaller lists, which improves
2293 * load balancing. As we only know the current count on our own thread,
2294 * we will need to estimate the current total amount of i-entries.
2295 * As the lists get concatenated later, this estimate depends
2296 * both on nthread and our own thread index.
2298 static void split_sci_entry(nbnxn_pairlist_t *nbl,
2300 gmx_bool progBal, float nsp_tot_est,
2301 int thread, int nthread)
2304 int cj4_start, cj4_end, j4len;
2306 int nsp, nsp_sci, nsp_cj4, nsp_cj4_e, nsp_cj4_p;
2312 /* Estimate the total numbers of ci's of the nblist combined
2313 * over all threads using the target number of ci's.
2315 nsp_est = (nsp_tot_est*thread)/nthread + nbl->nci_tot;
2317 /* The first ci blocks should be larger, to avoid overhead.
2318 * The last ci blocks should be smaller, to improve load balancing.
2319 * The factor 3/2 makes the first block 3/2 times the target average
2320 * and ensures that the total number of blocks end up equal to
2321 * that of equally sized blocks of size nsp_target_av.
2323 nsp_max = static_cast<int>(nsp_target_av*(nsp_tot_est*1.5/(nsp_est + nsp_tot_est)));
2327 nsp_max = nsp_target_av;
2330 cj4_start = nbl->sci[nbl->nsci-1].cj4_ind_start;
2331 cj4_end = nbl->sci[nbl->nsci-1].cj4_ind_end;
2332 j4len = cj4_end - cj4_start;
2334 if (j4len > 1 && j4len*c_gpuNumClusterPerCell*c_nbnxnGpuJgroupSize > nsp_max)
2336 /* Remove the last ci entry and process the cj4's again */
2344 for (int cj4 = cj4_start; cj4 < cj4_end; cj4++)
2346 nsp_cj4_p = nsp_cj4;
2347 /* Count the number of cluster pairs in this cj4 group */
2349 for (int p = 0; p < c_gpuNumClusterPerCell*c_nbnxnGpuJgroupSize; p++)
2351 nsp_cj4 += (nbl->cj4[cj4].imei[0].imask >> p) & 1;
2354 /* If adding the current cj4 with nsp_cj4 pairs get us further
2355 * away from our target nsp_max, split the list before this cj4.
2357 if (nsp > 0 && nsp_max - nsp < nsp + nsp_cj4 - nsp_max)
2359 /* Split the list at cj4 */
2360 nbl->sci[sci].cj4_ind_end = cj4;
2361 /* Create a new sci entry */
2364 if (nbl->nsci+1 > nbl->sci_nalloc)
2366 nb_realloc_sci(nbl, nbl->nsci+1);
2368 nbl->sci[sci].sci = nbl->sci[nbl->nsci-1].sci;
2369 nbl->sci[sci].shift = nbl->sci[nbl->nsci-1].shift;
2370 nbl->sci[sci].cj4_ind_start = cj4;
2372 nsp_cj4_e = nsp_cj4_p;
2378 /* Put the remaining cj4's in the last sci entry */
2379 nbl->sci[sci].cj4_ind_end = cj4_end;
2381 /* Possibly balance out the last two sci's
2382 * by moving the last cj4 of the second last sci.
2384 if (nsp_sci - nsp_cj4_e >= nsp + nsp_cj4_e)
2386 nbl->sci[sci-1].cj4_ind_end--;
2387 nbl->sci[sci].cj4_ind_start--;
2394 /* Clost this super/sub list i entry */
2395 static void close_ci_entry_supersub(nbnxn_pairlist_t *nbl,
2397 gmx_bool progBal, float nsp_tot_est,
2398 int thread, int nthread)
2400 /* All content of the new ci entry have already been filled correctly,
2401 * we only need to increase the count here (for non empty lists).
2403 int j4len = nbl->sci[nbl->nsci].cj4_ind_end - nbl->sci[nbl->nsci].cj4_ind_start;
2406 /* We can only have complete blocks of 4 j-entries in a list,
2407 * so round the count up before closing.
2409 nbl->ncj4 = (nbl->work->cj_ind + c_nbnxnGpuJgroupSize - 1)/c_nbnxnGpuJgroupSize;
2410 nbl->work->cj_ind = nbl->ncj4*c_nbnxnGpuJgroupSize;
2416 /* Measure the size of the new entry and potentially split it */
2417 split_sci_entry(nbl, nsp_max_av, progBal, nsp_tot_est,
2423 /* Syncs the working array before adding another grid pair to the list */
2424 static void sync_work(nbnxn_pairlist_t *nbl)
2428 nbl->work->cj_ind = nbl->ncj4*c_nbnxnGpuJgroupSize;
2429 nbl->work->cj4_init = nbl->ncj4;
2433 /* Clears an nbnxn_pairlist_t data structure */
2434 static void clear_pairlist(nbnxn_pairlist_t *nbl)
2445 nbl->work->ncj_noq = 0;
2446 nbl->work->ncj_hlj = 0;
2449 /* Clears a group scheme pair list */
2450 static void clear_pairlist_fep(t_nblist *nl)
2454 if (nl->jindex == nullptr)
2456 snew(nl->jindex, 1);
2461 /* Sets a simple list i-cell bounding box, including PBC shift */
2462 static inline void set_icell_bb_simple(gmx::ArrayRef<const nbnxn_bb_t> bb,
2464 real shx, real shy, real shz,
2467 bb_ci->lower[BB_X] = bb[ci].lower[BB_X] + shx;
2468 bb_ci->lower[BB_Y] = bb[ci].lower[BB_Y] + shy;
2469 bb_ci->lower[BB_Z] = bb[ci].lower[BB_Z] + shz;
2470 bb_ci->upper[BB_X] = bb[ci].upper[BB_X] + shx;
2471 bb_ci->upper[BB_Y] = bb[ci].upper[BB_Y] + shy;
2472 bb_ci->upper[BB_Z] = bb[ci].upper[BB_Z] + shz;
2476 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2477 static void set_icell_bbxxxx_supersub(gmx::ArrayRef<const float> bb,
2479 real shx, real shy, real shz,
2482 int ia = ci*(c_gpuNumClusterPerCell >> STRIDE_PBB_2LOG)*NNBSBB_XXXX;
2483 for (int m = 0; m < (c_gpuNumClusterPerCell >> STRIDE_PBB_2LOG)*NNBSBB_XXXX; m += NNBSBB_XXXX)
2485 for (int i = 0; i < STRIDE_PBB; i++)
2487 bb_ci[m+0*STRIDE_PBB+i] = bb[ia+m+0*STRIDE_PBB+i] + shx;
2488 bb_ci[m+1*STRIDE_PBB+i] = bb[ia+m+1*STRIDE_PBB+i] + shy;
2489 bb_ci[m+2*STRIDE_PBB+i] = bb[ia+m+2*STRIDE_PBB+i] + shz;
2490 bb_ci[m+3*STRIDE_PBB+i] = bb[ia+m+3*STRIDE_PBB+i] + shx;
2491 bb_ci[m+4*STRIDE_PBB+i] = bb[ia+m+4*STRIDE_PBB+i] + shy;
2492 bb_ci[m+5*STRIDE_PBB+i] = bb[ia+m+5*STRIDE_PBB+i] + shz;
2498 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2499 gmx_unused static void set_icell_bb_supersub(gmx::ArrayRef<const nbnxn_bb_t> bb,
2501 real shx, real shy, real shz,
2504 for (int i = 0; i < c_gpuNumClusterPerCell; i++)
2506 set_icell_bb_simple(bb, ci*c_gpuNumClusterPerCell+i,
2512 /* Copies PBC shifted i-cell atom coordinates x,y,z to working array */
2513 static void icell_set_x_simple(int ci,
2514 real shx, real shy, real shz,
2515 int stride, const real *x,
2516 nbnxn_list_work_t *work)
2518 int ia = ci*NBNXN_CPU_CLUSTER_I_SIZE;
2520 for (int i = 0; i < NBNXN_CPU_CLUSTER_I_SIZE; i++)
2522 work->x_ci[i*STRIDE_XYZ+XX] = x[(ia+i)*stride+XX] + shx;
2523 work->x_ci[i*STRIDE_XYZ+YY] = x[(ia+i)*stride+YY] + shy;
2524 work->x_ci[i*STRIDE_XYZ+ZZ] = x[(ia+i)*stride+ZZ] + shz;
2528 /* Copies PBC shifted super-cell atom coordinates x,y,z to working array */
2529 static void icell_set_x_supersub(int ci,
2530 real shx, real shy, real shz,
2531 int stride, const real *x,
2532 nbnxn_list_work_t *work)
2534 #if !GMX_SIMD4_HAVE_REAL
2536 real * x_ci = work->x_ci;
2538 int ia = ci*c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize;
2539 for (int i = 0; i < c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize; i++)
2541 x_ci[i*DIM + XX] = x[(ia+i)*stride + XX] + shx;
2542 x_ci[i*DIM + YY] = x[(ia+i)*stride + YY] + shy;
2543 x_ci[i*DIM + ZZ] = x[(ia+i)*stride + ZZ] + shz;
2546 #else /* !GMX_SIMD4_HAVE_REAL */
2548 real * x_ci = work->x_ci_simd;
2550 for (int si = 0; si < c_gpuNumClusterPerCell; si++)
2552 for (int i = 0; i < c_nbnxnGpuClusterSize; i += GMX_SIMD4_WIDTH)
2554 int io = si*c_nbnxnGpuClusterSize + i;
2555 int ia = ci*c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize + io;
2556 for (int j = 0; j < GMX_SIMD4_WIDTH; j++)
2558 x_ci[io*DIM + j + XX*GMX_SIMD4_WIDTH] = x[(ia + j)*stride + XX] + shx;
2559 x_ci[io*DIM + j + YY*GMX_SIMD4_WIDTH] = x[(ia + j)*stride + YY] + shy;
2560 x_ci[io*DIM + j + ZZ*GMX_SIMD4_WIDTH] = x[(ia + j)*stride + ZZ] + shz;
2565 #endif /* !GMX_SIMD4_HAVE_REAL */
2568 static real minimum_subgrid_size_xy(const nbnxn_grid_t *grid)
2572 return std::min(grid->cellSize[XX], grid->cellSize[YY]);
2576 return std::min(grid->cellSize[XX]/c_gpuNumClusterPerCellX,
2577 grid->cellSize[YY]/c_gpuNumClusterPerCellY);
2581 static real effective_buffer_1x1_vs_MxN(const nbnxn_grid_t *gridi,
2582 const nbnxn_grid_t *gridj)
2584 const real eff_1x1_buffer_fac_overest = 0.1;
2586 /* Determine an atom-pair list cut-off buffer size for atom pairs,
2587 * to be added to rlist (including buffer) used for MxN.
2588 * This is for converting an MxN list to a 1x1 list. This means we can't
2589 * use the normal buffer estimate, as we have an MxN list in which
2590 * some atom pairs beyond rlist are missing. We want to capture
2591 * the beneficial effect of buffering by extra pairs just outside rlist,
2592 * while removing the useless pairs that are further away from rlist.
2593 * (Also the buffer could have been set manually not using the estimate.)
2594 * This buffer size is an overestimate.
2595 * We add 10% of the smallest grid sub-cell dimensions.
2596 * Note that the z-size differs per cell and we don't use this,
2597 * so we overestimate.
2598 * With PME, the 10% value gives a buffer that is somewhat larger
2599 * than the effective buffer with a tolerance of 0.005 kJ/mol/ps.
2600 * Smaller tolerances or using RF lead to a smaller effective buffer,
2601 * so 10% gives a safe overestimate.
2603 return eff_1x1_buffer_fac_overest*(minimum_subgrid_size_xy(gridi) +
2604 minimum_subgrid_size_xy(gridj));
2607 /* Clusters at the cut-off only increase rlist by 60% of their size */
2608 static real nbnxn_rlist_inc_outside_fac = 0.6;
2610 /* Due to the cluster size the effective pair-list is longer than
2611 * that of a simple atom pair-list. This function gives the extra distance.
2613 real nbnxn_get_rlist_effective_inc(int cluster_size_j, real atom_density)
2616 real vol_inc_i, vol_inc_j;
2618 /* We should get this from the setup, but currently it's the same for
2619 * all setups, including GPUs.
2621 cluster_size_i = NBNXN_CPU_CLUSTER_I_SIZE;
2623 vol_inc_i = (cluster_size_i - 1)/atom_density;
2624 vol_inc_j = (cluster_size_j - 1)/atom_density;
2626 return nbnxn_rlist_inc_outside_fac*std::cbrt(vol_inc_i + vol_inc_j);
2629 /* Estimates the interaction volume^2 for non-local interactions */
2630 static real nonlocal_vol2(const struct gmx_domdec_zones_t *zones, const rvec ls, real r)
2638 /* Here we simply add up the volumes of 1, 2 or 3 1D decomposition
2639 * not home interaction volume^2. As these volumes are not additive,
2640 * this is an overestimate, but it would only be significant in the limit
2641 * of small cells, where we anyhow need to split the lists into
2642 * as small parts as possible.
2645 for (int z = 0; z < zones->n; z++)
2647 if (zones->shift[z][XX] + zones->shift[z][YY] + zones->shift[z][ZZ] == 1)
2652 for (int d = 0; d < DIM; d++)
2654 if (zones->shift[z][d] == 0)
2658 za *= zones->size[z].x1[d] - zones->size[z].x0[d];
2662 /* 4 octants of a sphere */
2663 vold_est = 0.25*M_PI*r*r*r*r;
2664 /* 4 quarter pie slices on the edges */
2665 vold_est += 4*cl*M_PI/6.0*r*r*r;
2666 /* One rectangular volume on a face */
2667 vold_est += ca*0.5*r*r;
2669 vol2_est_tot += vold_est*za;
2673 return vol2_est_tot;
2676 /* Estimates the average size of a full j-list for super/sub setup */
2677 static void get_nsubpair_target(const nbnxn_search *nbs,
2680 int min_ci_balanced,
2681 int *nsubpair_target,
2682 float *nsubpair_tot_est)
2684 /* The target value of 36 seems to be the optimum for Kepler.
2685 * Maxwell is less sensitive to the exact value.
2687 const int nsubpair_target_min = 36;
2688 const nbnxn_grid_t *grid;
2690 real r_eff_sup, vol_est, nsp_est, nsp_est_nl;
2692 grid = &nbs->grid[0];
2694 /* We don't need to balance list sizes if:
2695 * - We didn't request balancing.
2696 * - The number of grid cells >= the number of lists requested,
2697 * since we will always generate at least #cells lists.
2698 * - We don't have any cells, since then there won't be any lists.
2700 if (min_ci_balanced <= 0 || grid->nc >= min_ci_balanced || grid->nc == 0)
2702 /* nsubpair_target==0 signals no balancing */
2703 *nsubpair_target = 0;
2704 *nsubpair_tot_est = 0;
2709 ls[XX] = (grid->c1[XX] - grid->c0[XX])/(grid->numCells[XX]*c_gpuNumClusterPerCellX);
2710 ls[YY] = (grid->c1[YY] - grid->c0[YY])/(grid->numCells[YY]*c_gpuNumClusterPerCellY);
2711 ls[ZZ] = grid->na_c/(grid->atom_density*ls[XX]*ls[YY]);
2713 /* The average length of the diagonal of a sub cell */
2714 real diagonal = std::sqrt(ls[XX]*ls[XX] + ls[YY]*ls[YY] + ls[ZZ]*ls[ZZ]);
2716 /* The formulas below are a heuristic estimate of the average nsj per si*/
2717 r_eff_sup = rlist + nbnxn_rlist_inc_outside_fac*gmx::square((grid->na_c - 1.0)/grid->na_c)*0.5*diagonal;
2719 if (!nbs->DomDec || nbs->zones->n == 1)
2726 gmx::square(grid->atom_density/grid->na_c)*
2727 nonlocal_vol2(nbs->zones, ls, r_eff_sup);
2732 /* Sub-cell interacts with itself */
2733 vol_est = ls[XX]*ls[YY]*ls[ZZ];
2734 /* 6/2 rectangular volume on the faces */
2735 vol_est += (ls[XX]*ls[YY] + ls[XX]*ls[ZZ] + ls[YY]*ls[ZZ])*r_eff_sup;
2736 /* 12/2 quarter pie slices on the edges */
2737 vol_est += 2*(ls[XX] + ls[YY] + ls[ZZ])*0.25*M_PI*gmx::square(r_eff_sup);
2738 /* 4 octants of a sphere */
2739 vol_est += 0.5*4.0/3.0*M_PI*gmx::power3(r_eff_sup);
2741 /* Estimate the number of cluster pairs as the local number of
2742 * clusters times the volume they interact with times the density.
2744 nsp_est = grid->nsubc_tot*vol_est*grid->atom_density/grid->na_c;
2746 /* Subtract the non-local pair count */
2747 nsp_est -= nsp_est_nl;
2749 /* For small cut-offs nsp_est will be an underesimate.
2750 * With DD nsp_est_nl is an overestimate so nsp_est can get negative.
2751 * So to avoid too small or negative nsp_est we set a minimum of
2752 * all cells interacting with all 3^3 direct neighbors (3^3-1)/2+1=14.
2753 * This might be a slight overestimate for small non-periodic groups of
2754 * atoms as will occur for a local domain with DD, but for small
2755 * groups of atoms we'll anyhow be limited by nsubpair_target_min,
2756 * so this overestimation will not matter.
2758 nsp_est = std::max(nsp_est, grid->nsubc_tot*14._real);
2762 fprintf(debug, "nsp_est local %5.1f non-local %5.1f\n",
2763 nsp_est, nsp_est_nl);
2768 nsp_est = nsp_est_nl;
2771 /* Thus the (average) maximum j-list size should be as follows.
2772 * Since there is overhead, we shouldn't make the lists too small
2773 * (and we can't chop up j-groups) so we use a minimum target size of 36.
2775 *nsubpair_target = std::max(nsubpair_target_min,
2776 static_cast<int>(nsp_est/min_ci_balanced + 0.5));
2777 *nsubpair_tot_est = static_cast<int>(nsp_est);
2781 fprintf(debug, "nbl nsp estimate %.1f, nsubpair_target %d\n",
2782 nsp_est, *nsubpair_target);
2786 /* Debug list print function */
2787 static void print_nblist_ci_cj(FILE *fp, const nbnxn_pairlist_t *nbl)
2789 for (int i = 0; i < nbl->nci; i++)
2791 fprintf(fp, "ci %4d shift %2d ncj %3d\n",
2792 nbl->ci[i].ci, nbl->ci[i].shift,
2793 nbl->ci[i].cj_ind_end - nbl->ci[i].cj_ind_start);
2795 for (int j = nbl->ci[i].cj_ind_start; j < nbl->ci[i].cj_ind_end; j++)
2797 fprintf(fp, " cj %5d imask %x\n",
2804 /* Debug list print function */
2805 static void print_nblist_sci_cj(FILE *fp, const nbnxn_pairlist_t *nbl)
2807 for (int i = 0; i < nbl->nsci; i++)
2809 fprintf(fp, "ci %4d shift %2d ncj4 %2d\n",
2810 nbl->sci[i].sci, nbl->sci[i].shift,
2811 nbl->sci[i].cj4_ind_end - nbl->sci[i].cj4_ind_start);
2814 for (int j4 = nbl->sci[i].cj4_ind_start; j4 < nbl->sci[i].cj4_ind_end; j4++)
2816 for (int j = 0; j < c_nbnxnGpuJgroupSize; j++)
2818 fprintf(fp, " sj %5d imask %x\n",
2820 nbl->cj4[j4].imei[0].imask);
2821 for (int si = 0; si < c_gpuNumClusterPerCell; si++)
2823 if (nbl->cj4[j4].imei[0].imask & (1U << (j*c_gpuNumClusterPerCell + si)))
2830 fprintf(fp, "ci %4d shift %2d ncj4 %2d ncp %3d\n",
2831 nbl->sci[i].sci, nbl->sci[i].shift,
2832 nbl->sci[i].cj4_ind_end - nbl->sci[i].cj4_ind_start,
2837 /* Combine pair lists *nbl generated on multiple threads nblc */
2838 static void combine_nblists(int nnbl, nbnxn_pairlist_t **nbl,
2839 nbnxn_pairlist_t *nblc)
2841 int nsci, ncj4, nexcl;
2845 gmx_incons("combine_nblists does not support simple lists");
2850 nexcl = nblc->nexcl;
2851 for (int i = 0; i < nnbl; i++)
2853 nsci += nbl[i]->nsci;
2854 ncj4 += nbl[i]->ncj4;
2855 nexcl += nbl[i]->nexcl;
2858 if (nsci > nblc->sci_nalloc)
2860 nb_realloc_sci(nblc, nsci);
2862 if (ncj4 > nblc->cj4_nalloc)
2864 nblc->cj4_nalloc = over_alloc_small(ncj4);
2865 nbnxn_realloc_void(reinterpret_cast<void **>(&nblc->cj4),
2866 nblc->ncj4*sizeof(*nblc->cj4),
2867 nblc->cj4_nalloc*sizeof(*nblc->cj4),
2868 nblc->alloc, nblc->free);
2870 if (nexcl > nblc->excl_nalloc)
2872 nblc->excl_nalloc = over_alloc_small(nexcl);
2873 nbnxn_realloc_void(reinterpret_cast<void **>(&nblc->excl),
2874 nblc->nexcl*sizeof(*nblc->excl),
2875 nblc->excl_nalloc*sizeof(*nblc->excl),
2876 nblc->alloc, nblc->free);
2879 /* Each thread should copy its own data to the combined arrays,
2880 * as otherwise data will go back and forth between different caches.
2882 #if GMX_OPENMP && !(defined __clang_analyzer__)
2883 int nthreads = gmx_omp_nthreads_get(emntPairsearch);
2886 #pragma omp parallel for num_threads(nthreads) schedule(static)
2887 for (int n = 0; n < nnbl; n++)
2894 const nbnxn_pairlist_t *nbli;
2896 /* Determine the offset in the combined data for our thread */
2897 sci_offset = nblc->nsci;
2898 cj4_offset = nblc->ncj4;
2899 excl_offset = nblc->nexcl;
2901 for (int i = 0; i < n; i++)
2903 sci_offset += nbl[i]->nsci;
2904 cj4_offset += nbl[i]->ncj4;
2905 excl_offset += nbl[i]->nexcl;
2910 for (int i = 0; i < nbli->nsci; i++)
2912 nblc->sci[sci_offset+i] = nbli->sci[i];
2913 nblc->sci[sci_offset+i].cj4_ind_start += cj4_offset;
2914 nblc->sci[sci_offset+i].cj4_ind_end += cj4_offset;
2917 for (int j4 = 0; j4 < nbli->ncj4; j4++)
2919 nblc->cj4[cj4_offset+j4] = nbli->cj4[j4];
2920 nblc->cj4[cj4_offset+j4].imei[0].excl_ind += excl_offset;
2921 nblc->cj4[cj4_offset+j4].imei[1].excl_ind += excl_offset;
2924 for (int j4 = 0; j4 < nbli->nexcl; j4++)
2926 nblc->excl[excl_offset+j4] = nbli->excl[j4];
2929 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2932 for (int n = 0; n < nnbl; n++)
2934 nblc->nsci += nbl[n]->nsci;
2935 nblc->ncj4 += nbl[n]->ncj4;
2936 nblc->nci_tot += nbl[n]->nci_tot;
2937 nblc->nexcl += nbl[n]->nexcl;
2941 static void balance_fep_lists(const nbnxn_search *nbs,
2942 nbnxn_pairlist_set_t *nbl_lists)
2945 int nri_tot, nrj_tot, nrj_target;
2949 nnbl = nbl_lists->nnbl;
2953 /* Nothing to balance */
2957 /* Count the total i-lists and pairs */
2960 for (int th = 0; th < nnbl; th++)
2962 nri_tot += nbl_lists->nbl_fep[th]->nri;
2963 nrj_tot += nbl_lists->nbl_fep[th]->nrj;
2966 nrj_target = (nrj_tot + nnbl - 1)/nnbl;
2968 assert(gmx_omp_nthreads_get(emntNonbonded) == nnbl);
2970 #pragma omp parallel for schedule(static) num_threads(nnbl)
2971 for (int th = 0; th < nnbl; th++)
2975 t_nblist *nbl = nbs->work[th].nbl_fep.get();
2977 /* Note that here we allocate for the total size, instead of
2978 * a per-thread esimate (which is hard to obtain).
2980 if (nri_tot > nbl->maxnri)
2982 nbl->maxnri = over_alloc_large(nri_tot);
2983 reallocate_nblist(nbl);
2985 if (nri_tot > nbl->maxnri || nrj_tot > nbl->maxnrj)
2987 nbl->maxnrj = over_alloc_small(nrj_tot);
2988 srenew(nbl->jjnr, nbl->maxnrj);
2989 srenew(nbl->excl_fep, nbl->maxnrj);
2992 clear_pairlist_fep(nbl);
2994 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2997 /* Loop over the source lists and assign and copy i-entries */
2999 nbld = nbs->work[th_dest].nbl_fep.get();
3000 for (int th = 0; th < nnbl; th++)
3004 nbls = nbl_lists->nbl_fep[th];
3006 for (int i = 0; i < nbls->nri; i++)
3010 /* The number of pairs in this i-entry */
3011 nrj = nbls->jindex[i+1] - nbls->jindex[i];
3013 /* Decide if list th_dest is too large and we should procede
3014 * to the next destination list.
3016 if (th_dest+1 < nnbl && nbld->nrj > 0 &&
3017 nbld->nrj + nrj - nrj_target > nrj_target - nbld->nrj)
3020 nbld = nbs->work[th_dest].nbl_fep.get();
3023 nbld->iinr[nbld->nri] = nbls->iinr[i];
3024 nbld->gid[nbld->nri] = nbls->gid[i];
3025 nbld->shift[nbld->nri] = nbls->shift[i];
3027 for (int j = nbls->jindex[i]; j < nbls->jindex[i+1]; j++)
3029 nbld->jjnr[nbld->nrj] = nbls->jjnr[j];
3030 nbld->excl_fep[nbld->nrj] = nbls->excl_fep[j];
3034 nbld->jindex[nbld->nri] = nbld->nrj;
3038 /* Swap the list pointers */
3039 for (int th = 0; th < nnbl; th++)
3041 t_nblist *nbl_tmp = nbs->work[th].nbl_fep.release();
3042 nbs->work[th].nbl_fep.reset(nbl_lists->nbl_fep[th]);
3043 nbl_lists->nbl_fep[th] = nbl_tmp;
3047 fprintf(debug, "nbl_fep[%d] nri %4d nrj %4d\n",
3049 nbl_lists->nbl_fep[th]->nri,
3050 nbl_lists->nbl_fep[th]->nrj);
3055 /* Returns the next ci to be processes by our thread */
3056 static gmx_bool next_ci(const nbnxn_grid_t *grid,
3057 int nth, int ci_block,
3058 int *ci_x, int *ci_y,
3064 if (*ci_b == ci_block)
3066 /* Jump to the next block assigned to this task */
3067 *ci += (nth - 1)*ci_block;
3071 if (*ci >= grid->nc)
3076 while (*ci >= grid->cxy_ind[*ci_x*grid->numCells[YY] + *ci_y + 1])
3079 if (*ci_y == grid->numCells[YY])
3089 /* Returns the distance^2 for which we put cell pairs in the list
3090 * without checking atom pair distances. This is usually < rlist^2.
3092 static float boundingbox_only_distance2(const nbnxn_grid_t *gridi,
3093 const nbnxn_grid_t *gridj,
3097 /* If the distance between two sub-cell bounding boxes is less
3098 * than this distance, do not check the distance between
3099 * all particle pairs in the sub-cell, since then it is likely
3100 * that the box pair has atom pairs within the cut-off.
3101 * We use the nblist cut-off minus 0.5 times the average x/y diagonal
3102 * spacing of the sub-cells. Around 40% of the checked pairs are pruned.
3103 * Using more than 0.5 gains at most 0.5%.
3104 * If forces are calculated more than twice, the performance gain
3105 * in the force calculation outweighs the cost of checking.
3106 * Note that with subcell lists, the atom-pair distance check
3107 * is only performed when only 1 out of 8 sub-cells in within range,
3108 * this is because the GPU is much faster than the cpu.
3113 bbx = 0.5*(gridi->cellSize[XX] + gridj->cellSize[XX]);
3114 bby = 0.5*(gridi->cellSize[YY] + gridj->cellSize[YY]);
3117 bbx /= c_gpuNumClusterPerCellX;
3118 bby /= c_gpuNumClusterPerCellY;
3121 rbb2 = std::max(0.0, rlist - 0.5*std::sqrt(bbx*bbx + bby*bby));
3127 return (float)((1+GMX_FLOAT_EPS)*rbb2);
3131 static int get_ci_block_size(const nbnxn_grid_t *gridi,
3132 gmx_bool bDomDec, int nth)
3134 const int ci_block_enum = 5;
3135 const int ci_block_denom = 11;
3136 const int ci_block_min_atoms = 16;
3139 /* Here we decide how to distribute the blocks over the threads.
3140 * We use prime numbers to try to avoid that the grid size becomes
3141 * a multiple of the number of threads, which would lead to some
3142 * threads getting "inner" pairs and others getting boundary pairs,
3143 * which in turns will lead to load imbalance between threads.
3144 * Set the block size as 5/11/ntask times the average number of cells
3145 * in a y,z slab. This should ensure a quite uniform distribution
3146 * of the grid parts of the different thread along all three grid
3147 * zone boundaries with 3D domain decomposition. At the same time
3148 * the blocks will not become too small.
3150 ci_block = (gridi->nc*ci_block_enum)/(ci_block_denom*gridi->numCells[XX]*nth);
3152 /* Ensure the blocks are not too small: avoids cache invalidation */
3153 if (ci_block*gridi->na_sc < ci_block_min_atoms)
3155 ci_block = (ci_block_min_atoms + gridi->na_sc - 1)/gridi->na_sc;
3158 /* Without domain decomposition
3159 * or with less than 3 blocks per task, divide in nth blocks.
3161 if (!bDomDec || nth*3*ci_block > gridi->nc)
3163 ci_block = (gridi->nc + nth - 1)/nth;
3166 if (ci_block > 1 && (nth - 1)*ci_block >= gridi->nc)
3168 /* Some threads have no work. Although reducing the block size
3169 * does not decrease the block count on the first few threads,
3170 * with GPUs better mixing of "upper" cells that have more empty
3171 * clusters results in a somewhat lower max load over all threads.
3172 * Without GPUs the regime of so few atoms per thread is less
3173 * performance relevant, but with 8-wide SIMD the same reasoning
3174 * applies, since the pair list uses 4 i-atom "sub-clusters".
3182 /* Returns the number of bits to right-shift a cluster index to obtain
3183 * the corresponding force buffer flag index.
3185 static int getBufferFlagShift(int numAtomsPerCluster)
3187 int bufferFlagShift = 0;
3188 while ((numAtomsPerCluster << bufferFlagShift) < NBNXN_BUFFERFLAG_SIZE)
3193 return bufferFlagShift;
3196 /* Generates the part of pair-list nbl assigned to our thread */
3197 static void nbnxn_make_pairlist_part(const nbnxn_search *nbs,
3198 const nbnxn_grid_t *gridi,
3199 const nbnxn_grid_t *gridj,
3200 nbnxn_search_work_t *work,
3201 const nbnxn_atomdata_t *nbat,
3202 const t_blocka &exclusions,
3206 gmx_bool bFBufferFlag,
3209 float nsubpair_tot_est,
3211 nbnxn_pairlist_t *nbl,
3216 real rlist2, rl_fep2 = 0;
3218 int ci_b, ci, ci_x, ci_y, ci_xy, cj;
3222 real bx0, bx1, by0, by1, bz0, bz1;
3224 real d2cx, d2z, d2z_cx, d2z_cy, d2zx, d2zxy, d2xy;
3225 int cxf, cxl, cyf, cyf_x, cyl;
3226 int numDistanceChecks;
3227 int gridi_flag_shift = 0, gridj_flag_shift = 0;
3228 gmx_bitmask_t *gridj_flag = nullptr;
3229 int ncj_old_i, ncj_old_j;
3231 nbs_cycle_start(&work->cc[enbsCCsearch]);
3233 if (gridj->bSimple != nbl->bSimple || gridi->bSimple != nbl->bSimple)
3235 gmx_incons("Grid incompatible with pair-list");
3239 nbl->na_sc = gridj->na_sc;
3240 nbl->na_ci = gridj->na_c;
3241 nbl->na_cj = nbnxn_kernel_to_cluster_j_size(nb_kernel_type);
3242 na_cj_2log = get_2log(nbl->na_cj);
3248 /* Determine conversion of clusters to flag blocks */
3249 gridi_flag_shift = getBufferFlagShift(nbl->na_ci);
3250 gridj_flag_shift = getBufferFlagShift(nbl->na_cj);
3252 gridj_flag = work->buffer_flags.flag;
3255 copy_mat(nbs->box, box);
3257 rlist2 = nbl->rlist*nbl->rlist;
3259 if (nbs->bFEP && !nbl->bSimple)
3261 /* Determine an atom-pair list cut-off distance for FEP atom pairs.
3262 * We should not simply use rlist, since then we would not have
3263 * the small, effective buffering of the NxN lists.
3264 * The buffer is on overestimate, but the resulting cost for pairs
3265 * beyond rlist is neglible compared to the FEP pairs within rlist.
3267 rl_fep2 = nbl->rlist + effective_buffer_1x1_vs_MxN(gridi, gridj);
3271 fprintf(debug, "nbl_fep atom-pair rlist %f\n", rl_fep2);
3273 rl_fep2 = rl_fep2*rl_fep2;
3276 rbb2 = boundingbox_only_distance2(gridi, gridj, nbl->rlist, nbl->bSimple);
3280 fprintf(debug, "nbl bounding box only distance %f\n", std::sqrt(rbb2));
3283 /* Set the shift range */
3284 for (int d = 0; d < DIM; d++)
3286 /* Check if we need periodicity shifts.
3287 * Without PBC or with domain decomposition we don't need them.
3289 if (d >= ePBC2npbcdim(nbs->ePBC) || nbs->dd_dim[d])
3296 box[XX][XX] - fabs(box[YY][XX]) - fabs(box[ZZ][XX]) < std::sqrt(rlist2))
3306 const bool bSimple = nbl->bSimple;
3307 gmx::ArrayRef<const nbnxn_bb_t> bb_i;
3309 gmx::ArrayRef<const float> pbb_i;
3319 /* We use the normal bounding box format for both grid types */
3322 gmx::ArrayRef<const float> bbcz_i = gridi->bbcz;
3323 gmx::ArrayRef<const int> flags_i = gridi->flags;
3324 gmx::ArrayRef<const float> bbcz_j = gridj->bbcz;
3325 int cell0_i = gridi->cell0;
3329 fprintf(debug, "nbl nc_i %d col.av. %.1f ci_block %d\n",
3330 gridi->nc, gridi->nc/static_cast<double>(gridi->numCells[XX]*gridi->numCells[YY]), ci_block);
3333 numDistanceChecks = 0;
3335 /* Initially ci_b and ci to 1 before where we want them to start,
3336 * as they will both be incremented in next_ci.
3339 ci = th*ci_block - 1;
3342 while (next_ci(gridi, nth, ci_block, &ci_x, &ci_y, &ci_b, &ci))
3344 if (bSimple && flags_i[ci] == 0)
3349 ncj_old_i = nbl->ncj;
3352 if (gridj != gridi && shp[XX] == 0)
3356 bx1 = bb_i[ci].upper[BB_X];
3360 bx1 = gridi->c0[XX] + (ci_x+1)*gridi->cellSize[XX];
3362 if (bx1 < gridj->c0[XX])
3364 d2cx = gmx::square(gridj->c0[XX] - bx1);
3373 ci_xy = ci_x*gridi->numCells[YY] + ci_y;
3375 /* Loop over shift vectors in three dimensions */
3376 for (int tz = -shp[ZZ]; tz <= shp[ZZ]; tz++)
3378 shz = tz*box[ZZ][ZZ];
3380 bz0 = bbcz_i[ci*NNBSBB_D ] + shz;
3381 bz1 = bbcz_i[ci*NNBSBB_D+1] + shz;
3389 d2z = gmx::square(bz1);
3393 d2z = gmx::square(bz0 - box[ZZ][ZZ]);
3396 d2z_cx = d2z + d2cx;
3398 if (d2z_cx >= rlist2)
3403 bz1_frac = bz1/(gridi->cxy_ind[ci_xy+1] - gridi->cxy_ind[ci_xy]);
3408 /* The check with bz1_frac close to or larger than 1 comes later */
3410 for (int ty = -shp[YY]; ty <= shp[YY]; ty++)
3412 shy = ty*box[YY][YY] + tz*box[ZZ][YY];
3416 by0 = bb_i[ci].lower[BB_Y] + shy;
3417 by1 = bb_i[ci].upper[BB_Y] + shy;
3421 by0 = gridi->c0[YY] + (ci_y )*gridi->cellSize[YY] + shy;
3422 by1 = gridi->c0[YY] + (ci_y+1)*gridi->cellSize[YY] + shy;
3425 get_cell_range<YY>(by0, by1,
3436 if (by1 < gridj->c0[YY])
3438 d2z_cy += gmx::square(gridj->c0[YY] - by1);
3440 else if (by0 > gridj->c1[YY])
3442 d2z_cy += gmx::square(by0 - gridj->c1[YY]);
3445 for (int tx = -shp[XX]; tx <= shp[XX]; tx++)
3447 shift = XYZ2IS(tx, ty, tz);
3449 if (c_pbcShiftBackward && gridi == gridj && shift > CENTRAL)
3454 shx = tx*box[XX][XX] + ty*box[YY][XX] + tz*box[ZZ][XX];
3458 bx0 = bb_i[ci].lower[BB_X] + shx;
3459 bx1 = bb_i[ci].upper[BB_X] + shx;
3463 bx0 = gridi->c0[XX] + (ci_x )*gridi->cellSize[XX] + shx;
3464 bx1 = gridi->c0[XX] + (ci_x+1)*gridi->cellSize[XX] + shx;
3467 get_cell_range<XX>(bx0, bx1,
3479 new_ci_entry(nbl, cell0_i+ci, shift, flags_i[ci]);
3483 new_sci_entry(nbl, cell0_i+ci, shift);
3486 if ((!c_pbcShiftBackward || (shift == CENTRAL &&
3490 /* Leave the pairs with i > j.
3491 * x is the major index, so skip half of it.
3498 set_icell_bb_simple(bb_i, ci, shx, shy, shz,
3504 set_icell_bbxxxx_supersub(pbb_i, ci, shx, shy, shz,
3507 set_icell_bb_supersub(bb_i, ci, shx, shy, shz,
3512 nbs->icell_set_x(cell0_i+ci, shx, shy, shz,
3513 nbat->xstride, nbat->x,
3516 for (int cx = cxf; cx <= cxl; cx++)
3519 if (gridj->c0[XX] + cx*gridj->cellSize[XX] > bx1)
3521 d2zx += gmx::square(gridj->c0[XX] + cx*gridj->cellSize[XX] - bx1);
3523 else if (gridj->c0[XX] + (cx+1)*gridj->cellSize[XX] < bx0)
3525 d2zx += gmx::square(gridj->c0[XX] + (cx+1)*gridj->cellSize[XX] - bx0);
3528 if (gridi == gridj &&
3530 (!c_pbcShiftBackward || shift == CENTRAL) &&
3533 /* Leave the pairs with i > j.
3534 * Skip half of y when i and j have the same x.
3543 for (int cy = cyf_x; cy <= cyl; cy++)
3545 const int columnStart = gridj->cxy_ind[cx*gridj->numCells[YY] + cy];
3546 const int columnEnd = gridj->cxy_ind[cx*gridj->numCells[YY] + cy + 1];
3549 if (gridj->c0[YY] + cy*gridj->cellSize[YY] > by1)
3551 d2zxy += gmx::square(gridj->c0[YY] + cy*gridj->cellSize[YY] - by1);
3553 else if (gridj->c0[YY] + (cy+1)*gridj->cellSize[YY] < by0)
3555 d2zxy += gmx::square(gridj->c0[YY] + (cy+1)*gridj->cellSize[YY] - by0);
3557 if (columnStart < columnEnd && d2zxy < rlist2)
3559 /* To improve efficiency in the common case
3560 * of a homogeneous particle distribution,
3561 * we estimate the index of the middle cell
3562 * in range (midCell). We search down and up
3563 * starting from this index.
3565 * Note that the bbcz_j array contains bounds
3566 * for i-clusters, thus for clusters of 4 atoms.
3567 * For the common case where the j-cluster size
3568 * is 8, we could step with a stride of 2,
3569 * but we do not do this because it would
3570 * complicate this code even more.
3572 int midCell = columnStart + static_cast<int>(bz1_frac*(columnEnd - columnStart));
3573 if (midCell >= columnEnd)
3575 midCell = columnEnd - 1;
3580 /* Find the lowest cell that can possibly
3582 * Check if we hit the bottom of the grid,
3583 * if the j-cell is below the i-cell and if so,
3584 * if it is within range.
3586 int downTestCell = midCell;
3587 while (downTestCell >= columnStart &&
3588 (bbcz_j[downTestCell*NNBSBB_D + 1] >= bz0 ||
3589 d2xy + gmx::square(bbcz_j[downTestCell*NNBSBB_D + 1] - bz0) < rlist2))
3593 int firstCell = downTestCell + 1;
3595 /* Find the highest cell that can possibly
3597 * Check if we hit the top of the grid,
3598 * if the j-cell is above the i-cell and if so,
3599 * if it is within range.
3601 int upTestCell = midCell + 1;
3602 while (upTestCell < columnEnd &&
3603 (bbcz_j[upTestCell*NNBSBB_D] <= bz1 ||
3604 d2xy + gmx::square(bbcz_j[upTestCell*NNBSBB_D] - bz1) < rlist2))
3608 int lastCell = upTestCell - 1;
3610 #define NBNXN_REFCODE 0
3613 /* Simple reference code, for debugging,
3614 * overrides the more complex code above.
3616 firstCell = columnEnd;
3618 for (int k = columnStart; k < columnEnd; k++)
3620 if (d2xy + gmx::square(bbcz_j[k*NNBSBB_D + 1] - bz0) < rlist2 &&
3625 if (d2xy + gmx::square(bbcz_j[k*NNBSBB_D] - bz1) < rlist2 &&
3636 /* We want each atom/cell pair only once,
3637 * only use cj >= ci.
3639 if (!c_pbcShiftBackward || shift == CENTRAL)
3641 firstCell = std::max(firstCell, ci);
3645 if (firstCell <= lastCell)
3647 GMX_ASSERT(firstCell >= columnStart && lastCell < columnEnd, "The range should reside within the current grid column");
3649 /* For f buffer flags with simple lists */
3650 ncj_old_j = nbl->ncj;
3654 /* We have a maximum of 2 j-clusters
3655 * per i-cluster sized cell.
3657 check_cell_list_space_simple(nbl, 2*(lastCell - firstCell + 1));
3661 check_cell_list_space_supersub(nbl, lastCell - firstCell + 1);
3664 switch (nb_kernel_type)
3666 case nbnxnk4x4_PlainC:
3667 makeClusterListSimple(gridj,
3668 nbl, ci, firstCell, lastCell,
3669 (gridi == gridj && shift == CENTRAL),
3672 &numDistanceChecks);
3674 #ifdef GMX_NBNXN_SIMD_4XN
3675 case nbnxnk4xN_SIMD_4xN:
3676 makeClusterListSimd4xn(gridj,
3677 nbl, ci, firstCell, lastCell,
3678 (gridi == gridj && shift == CENTRAL),
3681 &numDistanceChecks);
3684 #ifdef GMX_NBNXN_SIMD_2XNN
3685 case nbnxnk4xN_SIMD_2xNN:
3686 makeClusterListSimd2xnn(gridj,
3687 nbl, ci, firstCell, lastCell,
3688 (gridi == gridj && shift == CENTRAL),
3691 &numDistanceChecks);
3694 case nbnxnk8x8x8_PlainC:
3695 case nbnxnk8x8x8_GPU:
3696 for (cj = firstCell; cj <= lastCell; cj++)
3698 make_cluster_list_supersub(gridi, gridj,
3700 (gridi == gridj && shift == CENTRAL && ci == cj),
3701 nbat->xstride, nbat->x,
3703 &numDistanceChecks);
3708 if (bFBufferFlag && nbl->ncj > ncj_old_j)
3710 int cbf = nbl->cj[ncj_old_j].cj >> gridj_flag_shift;
3711 int cbl = nbl->cj[nbl->ncj-1].cj >> gridj_flag_shift;
3712 for (int cb = cbf; cb <= cbl; cb++)
3714 bitmask_init_bit(&gridj_flag[cb], th);
3718 nbl->ncjInUse += nbl->ncj - ncj_old_j;
3724 /* Set the exclusions for this ci list */
3727 setExclusionsForSimpleIentry(nbs,
3729 shift == CENTRAL && gridi == gridj,
3736 make_fep_list(nbs, nbat, nbl,
3737 shift == CENTRAL && gridi == gridj,
3738 &(nbl->ci[nbl->nci]),
3739 gridi, gridj, nbl_fep);
3744 setExclusionsForGpuIentry(nbs,
3746 shift == CENTRAL && gridi == gridj,
3747 nbl->sci[nbl->nsci],
3752 make_fep_list_supersub(nbs, nbat, nbl,
3753 shift == CENTRAL && gridi == gridj,
3754 &(nbl->sci[nbl->nsci]),
3757 gridi, gridj, nbl_fep);
3761 /* Close this ci list */
3764 close_ci_entry_simple(nbl);
3768 close_ci_entry_supersub(nbl,
3770 progBal, nsubpair_tot_est,
3777 if (bFBufferFlag && nbl->ncj > ncj_old_i)
3779 bitmask_init_bit(&(work->buffer_flags.flag[(gridi->cell0+ci)>>gridi_flag_shift]), th);
3783 work->ndistc = numDistanceChecks;
3785 nbs_cycle_stop(&work->cc[enbsCCsearch]);
3787 GMX_ASSERT(nbl->ncjInUse == nbl->ncj || nbs->bFEP, "Without free-energy all cj pair-list entries should be in use. Note that subsequent code does not make use of the equality, this check is only here to catch bugs");
3791 fprintf(debug, "number of distance checks %d\n", numDistanceChecks);
3795 print_nblist_statistics_simple(debug, nbl, nbs, rlist);
3799 print_nblist_statistics_supersub(debug, nbl, nbs, rlist);
3804 fprintf(debug, "nbl FEP list pairs: %d\n", nbl_fep->nrj);
3809 static void reduce_buffer_flags(const nbnxn_search *nbs,
3811 const nbnxn_buffer_flags_t *dest)
3813 for (int s = 0; s < nsrc; s++)
3815 gmx_bitmask_t * flag = nbs->work[s].buffer_flags.flag;
3817 for (int b = 0; b < dest->nflag; b++)
3819 bitmask_union(&(dest->flag[b]), flag[b]);
3824 static void print_reduction_cost(const nbnxn_buffer_flags_t *flags, int nout)
3826 int nelem, nkeep, ncopy, nred, out;
3827 gmx_bitmask_t mask_0;
3833 bitmask_init_bit(&mask_0, 0);
3834 for (int b = 0; b < flags->nflag; b++)
3836 if (bitmask_is_equal(flags->flag[b], mask_0))
3838 /* Only flag 0 is set, no copy of reduction required */
3842 else if (!bitmask_is_zero(flags->flag[b]))
3845 for (out = 0; out < nout; out++)
3847 if (bitmask_is_set(flags->flag[b], out))
3864 fprintf(debug, "nbnxn reduction: #flag %d #list %d elem %4.2f, keep %4.2f copy %4.2f red %4.2f\n",
3866 nelem/static_cast<double>(flags->nflag),
3867 nkeep/static_cast<double>(flags->nflag),
3868 ncopy/static_cast<double>(flags->nflag),
3869 nred/static_cast<double>(flags->nflag));
3872 /* Copies the list entries from src to dest when cjStart <= *cjGlobal < cjEnd.
3873 * *cjGlobal is updated with the cj count in src.
3874 * When setFlags==true, flag bit t is set in flag for all i and j clusters.
3876 template<bool setFlags>
3877 static void copySelectedListRange(const nbnxn_ci_t * gmx_restrict srcCi,
3878 const nbnxn_pairlist_t * gmx_restrict src,
3879 nbnxn_pairlist_t * gmx_restrict dest,
3880 gmx_bitmask_t *flag,
3881 int iFlagShift, int jFlagShift, int t)
3883 int ncj = srcCi->cj_ind_end - srcCi->cj_ind_start;
3885 if (dest->nci + 1 >= dest->ci_nalloc)
3887 nb_realloc_ci(dest, dest->nci + 1);
3889 check_cell_list_space_simple(dest, ncj);
3891 dest->ci[dest->nci] = *srcCi;
3892 dest->ci[dest->nci].cj_ind_start = dest->ncj;
3893 dest->ci[dest->nci].cj_ind_end = dest->ncj + ncj;
3897 bitmask_init_bit(&flag[srcCi->ci >> iFlagShift], t);
3900 for (int j = srcCi->cj_ind_start; j < srcCi->cj_ind_end; j++)
3902 dest->cj[dest->ncj++] = src->cj[j];
3906 /* NOTE: This is relatively expensive, since this
3907 * operation is done for all elements in the list,
3908 * whereas at list generation this is done only
3909 * once for each flag entry.
3911 bitmask_init_bit(&flag[src->cj[j].cj >> jFlagShift], t);
3918 /* This routine re-balances the pairlists such that all are nearly equally
3919 * sized. Only whole i-entries are moved between lists. These are moved
3920 * between the ends of the lists, such that the buffer reduction cost should
3921 * not change significantly.
3922 * Note that all original reduction flags are currently kept. This can lead
3923 * to reduction of parts of the force buffer that could be avoided. But since
3924 * the original lists are quite balanced, this will only give minor overhead.
3926 static void rebalanceSimpleLists(int numLists,
3927 nbnxn_pairlist_t * const * const srcSet,
3928 nbnxn_pairlist_t **destSet,
3929 gmx::ArrayRef<nbnxn_search_work_t> searchWork)
3932 for (int s = 0; s < numLists; s++)
3934 ncjTotal += srcSet[s]->ncjInUse;
3936 int ncjTarget = (ncjTotal + numLists - 1)/numLists;
3938 #pragma omp parallel num_threads(numLists)
3940 int t = gmx_omp_get_thread_num();
3942 int cjStart = ncjTarget* t;
3943 int cjEnd = ncjTarget*(t + 1);
3945 /* The destination pair-list for task/thread t */
3946 nbnxn_pairlist_t *dest = destSet[t];
3948 clear_pairlist(dest);
3949 dest->bSimple = srcSet[0]->bSimple;
3950 dest->na_ci = srcSet[0]->na_ci;
3951 dest->na_cj = srcSet[0]->na_cj;
3953 /* Note that the flags in the work struct (still) contain flags
3954 * for all entries that are present in srcSet->nbl[t].
3956 gmx_bitmask_t *flag = searchWork[t].buffer_flags.flag;
3958 int iFlagShift = getBufferFlagShift(dest->na_ci);
3959 int jFlagShift = getBufferFlagShift(dest->na_cj);
3962 for (int s = 0; s < numLists && cjGlobal < cjEnd; s++)
3964 const nbnxn_pairlist_t *src = srcSet[s];
3966 if (cjGlobal + src->ncjInUse > cjStart)
3968 for (int i = 0; i < src->nci && cjGlobal < cjEnd; i++)
3970 const nbnxn_ci_t *srcCi = &src->ci[i];
3971 int ncj = srcCi->cj_ind_end - srcCi->cj_ind_start;
3972 if (cjGlobal >= cjStart)
3974 /* If the source list is not our own, we need to set
3975 * extra flags (the template bool parameter).
3979 copySelectedListRange
3982 flag, iFlagShift, jFlagShift, t);
3986 copySelectedListRange
3989 dest, flag, iFlagShift, jFlagShift, t);
3997 cjGlobal += src->ncjInUse;
4001 dest->ncjInUse = dest->ncj;
4005 int ncjTotalNew = 0;
4006 for (int s = 0; s < numLists; s++)
4008 ncjTotalNew += destSet[s]->ncjInUse;
4010 GMX_RELEASE_ASSERT(ncjTotalNew == ncjTotal, "The total size of the lists before and after rebalancing should match");
4014 /* Returns if the pairlists are so imbalanced that it is worth rebalancing. */
4015 static bool checkRebalanceSimpleLists(const nbnxn_pairlist_set_t *listSet)
4017 int numLists = listSet->nnbl;
4020 for (int s = 0; s < numLists; s++)
4022 ncjMax = std::max(ncjMax, listSet->nbl[s]->ncjInUse);
4023 ncjTotal += listSet->nbl[s]->ncjInUse;
4027 fprintf(debug, "Pair-list ncjMax %d ncjTotal %d\n", ncjMax, ncjTotal);
4029 /* The rebalancing adds 3% extra time to the search. Heuristically we
4030 * determined that under common conditions the non-bonded kernel balance
4031 * improvement will outweigh this when the imbalance is more than 3%.
4032 * But this will, obviously, depend on search vs kernel time and nstlist.
4034 const real rebalanceTolerance = 1.03;
4036 return numLists*ncjMax > ncjTotal*rebalanceTolerance;
4039 /* Perform a count (linear) sort to sort the smaller lists to the end.
4040 * This avoids load imbalance on the GPU, as large lists will be
4041 * scheduled and executed first and the smaller lists later.
4042 * Load balancing between multi-processors only happens at the end
4043 * and there smaller lists lead to more effective load balancing.
4044 * The sorting is done on the cj4 count, not on the actual pair counts.
4045 * Not only does this make the sort faster, but it also results in
4046 * better load balancing than using a list sorted on exact load.
4047 * This function swaps the pointer in the pair list to avoid a copy operation.
4049 static void sort_sci(nbnxn_pairlist_t *nbl)
4051 nbnxn_list_work_t *work;
4053 nbnxn_sci_t *sci_sort;
4055 if (nbl->ncj4 <= nbl->nsci)
4057 /* nsci = 0 or all sci have size 1, sorting won't change the order */
4063 /* We will distinguish differences up to double the average */
4064 m = (2*nbl->ncj4)/nbl->nsci;
4066 if (m + 1 > work->sort_nalloc)
4068 work->sort_nalloc = over_alloc_large(m + 1);
4069 srenew(work->sort, work->sort_nalloc);
4072 if (work->sci_sort_nalloc != nbl->sci_nalloc)
4074 work->sci_sort_nalloc = nbl->sci_nalloc;
4075 nbnxn_realloc_void(reinterpret_cast<void **>(&work->sci_sort),
4077 work->sci_sort_nalloc*sizeof(*work->sci_sort),
4078 nbl->alloc, nbl->free);
4081 /* Count the entries of each size */
4082 for (int i = 0; i <= m; i++)
4086 for (int s = 0; s < nbl->nsci; s++)
4088 int i = std::min(m, nbl->sci[s].cj4_ind_end - nbl->sci[s].cj4_ind_start);
4091 /* Calculate the offset for each count */
4094 for (int i = m - 1; i >= 0; i--)
4097 work->sort[i] = work->sort[i + 1] + s0;
4101 /* Sort entries directly into place */
4102 sci_sort = work->sci_sort;
4103 for (int s = 0; s < nbl->nsci; s++)
4105 int i = std::min(m, nbl->sci[s].cj4_ind_end - nbl->sci[s].cj4_ind_start);
4106 sci_sort[work->sort[i]++] = nbl->sci[s];
4109 /* Swap the sci pointers so we use the new, sorted list */
4110 work->sci_sort = nbl->sci;
4111 nbl->sci = sci_sort;
4114 /* Make a local or non-local pair-list, depending on iloc */
4115 void nbnxn_make_pairlist(nbnxn_search *nbs,
4116 nbnxn_atomdata_t *nbat,
4117 const t_blocka *excl,
4119 int min_ci_balanced,
4120 nbnxn_pairlist_set_t *nbl_list,
4125 nbnxn_grid_t *gridi, *gridj;
4127 int nsubpair_target;
4128 float nsubpair_tot_est;
4130 nbnxn_pairlist_t **nbl;
4132 gmx_bool CombineNBLists;
4134 int np_tot, np_noq, np_hlj, nap;
4136 nnbl = nbl_list->nnbl;
4137 nbl = nbl_list->nbl;
4138 CombineNBLists = nbl_list->bCombined;
4142 fprintf(debug, "ns making %d nblists\n", nnbl);
4145 nbat->bUseBufferFlags = (nbat->nout > 1);
4146 /* We should re-init the flags before making the first list */
4147 if (nbat->bUseBufferFlags && LOCAL_I(iloc))
4149 init_buffer_flags(&nbat->buffer_flags, nbat->natoms);
4152 if (nbl_list->bSimple)
4155 switch (nb_kernel_type)
4157 #ifdef GMX_NBNXN_SIMD_4XN
4158 case nbnxnk4xN_SIMD_4xN:
4159 nbs->icell_set_x = icell_set_x_simd_4xn;
4162 #ifdef GMX_NBNXN_SIMD_2XNN
4163 case nbnxnk4xN_SIMD_2xNN:
4164 nbs->icell_set_x = icell_set_x_simd_2xnn;
4168 nbs->icell_set_x = icell_set_x_simple;
4172 /* MSVC 2013 complains about switch statements without case */
4173 nbs->icell_set_x = icell_set_x_simple;
4178 nbs->icell_set_x = icell_set_x_supersub;
4183 /* Only zone (grid) 0 vs 0 */
4190 nzi = nbs->zones->nizone;
4193 if (!nbl_list->bSimple && min_ci_balanced > 0)
4195 get_nsubpair_target(nbs, iloc, rlist, min_ci_balanced,
4196 &nsubpair_target, &nsubpair_tot_est);
4200 nsubpair_target = 0;
4201 nsubpair_tot_est = 0;
4204 /* Clear all pair-lists */
4205 for (int th = 0; th < nnbl; th++)
4207 clear_pairlist(nbl[th]);
4211 clear_pairlist_fep(nbl_list->nbl_fep[th]);
4215 for (int zi = 0; zi < nzi; zi++)
4217 gridi = &nbs->grid[zi];
4219 if (NONLOCAL_I(iloc))
4221 zj0 = nbs->zones->izone[zi].j0;
4222 zj1 = nbs->zones->izone[zi].j1;
4228 for (int zj = zj0; zj < zj1; zj++)
4230 gridj = &nbs->grid[zj];
4234 fprintf(debug, "ns search grid %d vs %d\n", zi, zj);
4237 nbs_cycle_start(&nbs->cc[enbsCCsearch]);
4239 ci_block = get_ci_block_size(gridi, nbs->DomDec, nnbl);
4241 /* With GPU: generate progressively smaller lists for
4242 * load balancing for local only or non-local with 2 zones.
4244 progBal = (LOCAL_I(iloc) || nbs->zones->n <= 2);
4246 #pragma omp parallel for num_threads(nnbl) schedule(static)
4247 for (int th = 0; th < nnbl; th++)
4251 /* Re-init the thread-local work flag data before making
4252 * the first list (not an elegant conditional).
4254 if (nbat->bUseBufferFlags && ((zi == 0 && zj == 0)))
4256 init_buffer_flags(&nbs->work[th].buffer_flags, nbat->natoms);
4259 if (CombineNBLists && th > 0)
4261 clear_pairlist(nbl[th]);
4264 /* Divide the i super cell equally over the nblists */
4265 nbnxn_make_pairlist_part(nbs, gridi, gridj,
4266 &nbs->work[th], nbat, *excl,
4270 nbat->bUseBufferFlags,
4272 progBal, nsubpair_tot_est,
4275 nbl_list->nbl_fep[th]);
4277 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
4279 nbs_cycle_stop(&nbs->cc[enbsCCsearch]);
4284 for (int th = 0; th < nnbl; th++)
4286 inc_nrnb(nrnb, eNR_NBNXN_DIST2, nbs->work[th].ndistc);
4288 if (nbl_list->bSimple)
4290 np_tot += nbl[th]->ncj;
4291 np_noq += nbl[th]->work->ncj_noq;
4292 np_hlj += nbl[th]->work->ncj_hlj;
4296 /* This count ignores potential subsequent pair pruning */
4297 np_tot += nbl[th]->nci_tot;
4300 nap = nbl[0]->na_ci*nbl[0]->na_cj;
4301 nbl_list->natpair_ljq = (np_tot - np_noq)*nap - np_hlj*nap/2;
4302 nbl_list->natpair_lj = np_noq*nap;
4303 nbl_list->natpair_q = np_hlj*nap/2;
4305 if (CombineNBLists && nnbl > 1)
4307 nbs_cycle_start(&nbs->cc[enbsCCcombine]);
4309 combine_nblists(nnbl-1, nbl+1, nbl[0]);
4311 nbs_cycle_stop(&nbs->cc[enbsCCcombine]);
4316 if (nbl_list->bSimple)
4318 if (nnbl > 1 && checkRebalanceSimpleLists(nbl_list))
4320 rebalanceSimpleLists(nbl_list->nnbl, nbl_list->nbl, nbl_list->nbl_work, nbs->work);
4322 /* Swap the pointer of the sets of pair lists */
4323 nbnxn_pairlist_t **tmp = nbl_list->nbl;
4324 nbl_list->nbl = nbl_list->nbl_work;
4325 nbl_list->nbl_work = tmp;
4330 /* Sort the entries on size, large ones first */
4331 if (CombineNBLists || nnbl == 1)
4337 #pragma omp parallel for num_threads(nnbl) schedule(static)
4338 for (int th = 0; th < nnbl; th++)
4344 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
4349 if (nbat->bUseBufferFlags)
4351 reduce_buffer_flags(nbs, nbl_list->nnbl, &nbat->buffer_flags);
4356 /* Balance the free-energy lists over all the threads */
4357 balance_fep_lists(nbs, nbl_list);
4360 /* This is a fresh list, so not pruned, stored using ci and nci.
4361 * ciOuter and nciOuter are invalid at this point.
4363 GMX_ASSERT(nbl_list->nbl[0]->nciOuter == -1, "nciOuter should have been set to -1 to signal that it is invalid");
4365 /* Special performance logging stuff (env.var. GMX_NBNXN_CYCLE) */
4368 nbs->search_count++;
4370 if (nbs->print_cycles &&
4371 (!nbs->DomDec || !LOCAL_I(iloc)) &&
4372 nbs->search_count % 100 == 0)
4374 nbs_cycle_print(stderr, nbs);
4377 /* If we have more than one list, they either got rebalancing (CPU)
4378 * or combined (GPU), so we should dump the final result to debug.
4380 if (debug && nbl_list->nnbl > 1)
4382 if (nbl_list->bSimple)
4384 for (int t = 0; t < nbl_list->nnbl; t++)
4386 print_nblist_statistics_simple(debug, nbl_list->nbl[t], nbs, rlist);
4391 print_nblist_statistics_supersub(debug, nbl_list->nbl[0], nbs, rlist);
4399 if (nbl_list->bSimple)
4401 for (int t = 0; t < nbl_list->nnbl; t++)
4403 print_nblist_ci_cj(debug, nbl_list->nbl[t]);
4408 print_nblist_sci_cj(debug, nbl_list->nbl[0]);
4412 if (nbat->bUseBufferFlags)
4414 print_reduction_cost(&nbat->buffer_flags, nbl_list->nnbl);
4419 void nbnxnPrepareListForDynamicPruning(nbnxn_pairlist_set_t *listSet)
4421 /* TODO: Restructure the lists so we have actual outer and inner
4422 * list objects so we can set a single pointer instead of
4423 * swapping several pointers.
4426 for (int i = 0; i < listSet->nnbl; i++)
4428 /* The search produced a list in ci/cj.
4429 * Swap the list pointers so we get the outer list is ciOuter,cjOuter
4430 * and we can prune that to get an inner list in ci/cj.
4432 nbnxn_pairlist_t *list = listSet->nbl[i];
4433 list->nciOuter = list->nci;
4435 nbnxn_ci_t *ciTmp = list->ciOuter;
4436 list->ciOuter = list->ci;
4439 nbnxn_cj_t *cjTmp = list->cjOuter;
4440 list->cjOuter = list->cj;
4443 /* Signal that this inner list is currently invalid */