2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
48 #include "gromacs/domdec/domdec_struct.h"
49 #include "gromacs/gmxlib/nrnb.h"
50 #include "gromacs/math/functions.h"
51 #include "gromacs/math/utilities.h"
52 #include "gromacs/math/vec.h"
53 #include "gromacs/mdlib/gmx_omp_nthreads.h"
54 #include "gromacs/mdlib/ns.h"
55 #include "gromacs/mdtypes/group.h"
56 #include "gromacs/mdtypes/md_enums.h"
57 #include "gromacs/nbnxm/atomdata.h"
58 #include "gromacs/nbnxm/nbnxm.h"
59 #include "gromacs/nbnxm/nbnxm_geometry.h"
60 #include "gromacs/nbnxm/nbnxm_simd.h"
61 #include "gromacs/nbnxm/pairlistset.h"
62 #include "gromacs/pbcutil/ishift.h"
63 #include "gromacs/pbcutil/pbc.h"
64 #include "gromacs/simd/simd.h"
65 #include "gromacs/simd/vector_operations.h"
66 #include "gromacs/topology/block.h"
67 #include "gromacs/utility/exceptions.h"
68 #include "gromacs/utility/fatalerror.h"
69 #include "gromacs/utility/gmxomp.h"
70 #include "gromacs/utility/smalloc.h"
74 #include "pairlistwork.h"
76 using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
78 // Convience alias for partial Nbnxn namespace usage
79 using InteractionLocality = Nbnxm::InteractionLocality;
81 /* We shift the i-particles backward for PBC.
82 * This leads to more conditionals than shifting forward.
83 * We do this to get more balanced pair lists.
85 constexpr bool c_pbcShiftBackward = true;
88 static void nbs_cycle_clear(nbnxn_cycle_t *cc)
90 for (int i = 0; i < enbsCCnr; i++)
97 static double Mcyc_av(const nbnxn_cycle_t *cc)
99 return static_cast<double>(cc->c)*1e-6/cc->count;
102 static void nbs_cycle_print(FILE *fp, const nbnxn_search *nbs)
105 fprintf(fp, "ns %4d grid %4.1f search %4.1f red.f %5.3f",
106 nbs->cc[enbsCCgrid].count,
107 Mcyc_av(&nbs->cc[enbsCCgrid]),
108 Mcyc_av(&nbs->cc[enbsCCsearch]),
109 Mcyc_av(&nbs->cc[enbsCCreducef]));
111 if (nbs->work.size() > 1)
113 if (nbs->cc[enbsCCcombine].count > 0)
115 fprintf(fp, " comb %5.2f",
116 Mcyc_av(&nbs->cc[enbsCCcombine]));
118 fprintf(fp, " s. th");
119 for (const nbnxn_search_work_t &work : nbs->work)
121 fprintf(fp, " %4.1f",
122 Mcyc_av(&work.cc[enbsCCsearch]));
128 /* Layout for the nonbonded NxN pair lists */
129 enum class NbnxnLayout
131 NoSimd4x4, // i-cluster size 4, j-cluster size 4
132 Simd4xN, // i-cluster size 4, j-cluster size SIMD width
133 Simd2xNN, // i-cluster size 4, j-cluster size half SIMD width
134 Gpu8x8x8 // i-cluster size 8, j-cluster size 8 + super-clustering
138 /* Returns the j-cluster size */
139 template <NbnxnLayout layout>
140 static constexpr int jClusterSize()
142 static_assert(layout == NbnxnLayout::NoSimd4x4 || layout == NbnxnLayout::Simd4xN || layout == NbnxnLayout::Simd2xNN, "Currently jClusterSize only supports CPU layouts");
144 return layout == NbnxnLayout::Simd4xN ? GMX_SIMD_REAL_WIDTH : (layout == NbnxnLayout::Simd2xNN ? GMX_SIMD_REAL_WIDTH/2 : c_nbnxnCpuIClusterSize);
147 /*! \brief Returns the j-cluster index given the i-cluster index.
149 * \tparam jClusterSize The number of atoms in a j-cluster
150 * \tparam jSubClusterIndex The j-sub-cluster index (0/1), used when size(j-cluster) < size(i-cluster)
151 * \param[in] ci The i-cluster index
153 template <int jClusterSize, int jSubClusterIndex>
154 static inline int cjFromCi(int ci)
156 static_assert(jClusterSize == c_nbnxnCpuIClusterSize/2 || jClusterSize == c_nbnxnCpuIClusterSize || jClusterSize == c_nbnxnCpuIClusterSize*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
158 static_assert(jSubClusterIndex == 0 || jSubClusterIndex == 1,
159 "Only sub-cluster indices 0 and 1 are supported");
161 if (jClusterSize == c_nbnxnCpuIClusterSize/2)
163 if (jSubClusterIndex == 0)
169 return ((ci + 1) << 1) - 1;
172 else if (jClusterSize == c_nbnxnCpuIClusterSize)
182 /*! \brief Returns the j-cluster index given the i-cluster index.
184 * \tparam layout The pair-list layout
185 * \tparam jSubClusterIndex The j-sub-cluster index (0/1), used when size(j-cluster) < size(i-cluster)
186 * \param[in] ci The i-cluster index
188 template <NbnxnLayout layout, int jSubClusterIndex>
189 static inline int cjFromCi(int ci)
191 constexpr int clusterSize = jClusterSize<layout>();
193 return cjFromCi<clusterSize, jSubClusterIndex>(ci);
196 /* Returns the nbnxn coordinate data index given the i-cluster index */
197 template <NbnxnLayout layout>
198 static inline int xIndexFromCi(int ci)
200 constexpr int clusterSize = jClusterSize<layout>();
202 static_assert(clusterSize == c_nbnxnCpuIClusterSize/2 || clusterSize == c_nbnxnCpuIClusterSize || clusterSize == c_nbnxnCpuIClusterSize*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
204 if (clusterSize <= c_nbnxnCpuIClusterSize)
206 /* Coordinates are stored packed in groups of 4 */
211 /* Coordinates packed in 8, i-cluster size is half the packing width */
212 return (ci >> 1)*STRIDE_P8 + (ci & 1)*(c_packX8 >> 1);
216 /* Returns the nbnxn coordinate data index given the j-cluster index */
217 template <NbnxnLayout layout>
218 static inline int xIndexFromCj(int cj)
220 constexpr int clusterSize = jClusterSize<layout>();
222 static_assert(clusterSize == c_nbnxnCpuIClusterSize/2 || clusterSize == c_nbnxnCpuIClusterSize || clusterSize == c_nbnxnCpuIClusterSize*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
224 if (clusterSize == c_nbnxnCpuIClusterSize/2)
226 /* Coordinates are stored packed in groups of 4 */
227 return (cj >> 1)*STRIDE_P4 + (cj & 1)*(c_packX4 >> 1);
229 else if (clusterSize == c_nbnxnCpuIClusterSize)
231 /* Coordinates are stored packed in groups of 4 */
236 /* Coordinates are stored packed in groups of 8 */
242 gmx_bool nbnxn_kernel_pairlist_simple(int nb_kernel_type)
244 if (nb_kernel_type == nbnxnkNotSet)
246 gmx_fatal(FARGS, "Non-bonded kernel type not set for Verlet-style pair-list.");
249 switch (nb_kernel_type)
251 case nbnxnk8x8x8_GPU:
252 case nbnxnk8x8x8_PlainC:
255 case nbnxnk4x4_PlainC:
256 case nbnxnk4xN_SIMD_4xN:
257 case nbnxnk4xN_SIMD_2xNN:
261 gmx_incons("Invalid nonbonded kernel type passed!");
266 /* Initializes a single nbnxn_pairlist_t data structure */
267 static void nbnxn_init_pairlist_fep(t_nblist *nl)
269 nl->type = GMX_NBLIST_INTERACTION_FREE_ENERGY;
270 nl->igeometry = GMX_NBLIST_GEOMETRY_PARTICLE_PARTICLE;
271 /* The interaction functions are set in the free energy kernel fuction */
284 nl->jindex = nullptr;
286 nl->excl_fep = nullptr;
290 static void free_nblist(t_nblist *nl)
300 nbnxn_search_work_t::nbnxn_search_work_t() :
303 buffer_flags({0, nullptr, 0}),
305 nbl_fep(new t_nblist),
308 nbnxn_init_pairlist_fep(nbl_fep.get());
313 nbnxn_search_work_t::~nbnxn_search_work_t()
315 sfree(buffer_flags.flag);
317 free_nblist(nbl_fep.get());
320 nbnxn_search::nbnxn_search(const ivec *n_dd_cells,
321 const gmx_domdec_zones_t *zones,
325 ePBC(epbcNONE), // The correct value will be set during the gridding
332 // The correct value will be set during the gridding
336 DomDec = n_dd_cells != nullptr;
339 for (int d = 0; d < DIM; d++)
341 if ((*n_dd_cells)[d] > 1)
344 /* Each grid matches a DD zone */
350 grid.resize(numGrids);
352 /* Initialize detailed nbsearch cycle counting */
353 print_cycles = (getenv("GMX_NBNXN_CYCLE") != nullptr);
357 nbnxn_search *nbnxn_init_search(const ivec *n_dd_cells,
358 const gmx_domdec_zones_t *zones,
362 return new nbnxn_search(n_dd_cells, zones, bFEP, nthread_max);
365 static void init_buffer_flags(nbnxn_buffer_flags_t *flags,
368 flags->nflag = (natoms + NBNXN_BUFFERFLAG_SIZE - 1)/NBNXN_BUFFERFLAG_SIZE;
369 if (flags->nflag > flags->flag_nalloc)
371 flags->flag_nalloc = over_alloc_large(flags->nflag);
372 srenew(flags->flag, flags->flag_nalloc);
374 for (int b = 0; b < flags->nflag; b++)
376 bitmask_clear(&(flags->flag[b]));
380 /* Returns the pair-list cutoff between a bounding box and a grid cell given an atom-to-atom pair-list cutoff
382 * Given a cutoff distance between atoms, this functions returns the cutoff
383 * distance2 between a bounding box of a group of atoms and a grid cell.
384 * Since atoms can be geometrically outside of the cell they have been
385 * assigned to (when atom groups instead of individual atoms are assigned
386 * to cells), this distance returned can be larger than the input.
388 static real listRangeForBoundingBoxToGridCell(real rlist,
389 const nbnxn_grid_t &grid)
391 return rlist + grid.maxAtomGroupRadius;
394 /* Returns the pair-list cutoff between a grid cells given an atom-to-atom pair-list cutoff
396 * Given a cutoff distance between atoms, this functions returns the cutoff
397 * distance2 between two grid cells.
398 * Since atoms can be geometrically outside of the cell they have been
399 * assigned to (when atom groups instead of individual atoms are assigned
400 * to cells), this distance returned can be larger than the input.
402 static real listRangeForGridCellToGridCell(real rlist,
403 const nbnxn_grid_t &iGrid,
404 const nbnxn_grid_t &jGrid)
406 return rlist + iGrid.maxAtomGroupRadius + jGrid.maxAtomGroupRadius;
409 /* Determines the cell range along one dimension that
410 * the bounding box b0 - b1 sees.
413 static void get_cell_range(real b0, real b1,
414 const nbnxn_grid_t &jGrid,
415 real d2, real rlist, int *cf, int *cl)
417 real listRangeBBToCell2 = gmx::square(listRangeForBoundingBoxToGridCell(rlist, jGrid));
418 real distanceInCells = (b0 - jGrid.c0[dim])*jGrid.invCellSize[dim];
419 *cf = std::max(static_cast<int>(distanceInCells), 0);
422 d2 + gmx::square((b0 - jGrid.c0[dim]) - (*cf - 1 + 1)*jGrid.cellSize[dim]) < listRangeBBToCell2)
427 *cl = std::min(static_cast<int>((b1 - jGrid.c0[dim])*jGrid.invCellSize[dim]), jGrid.numCells[dim] - 1);
428 while (*cl < jGrid.numCells[dim] - 1 &&
429 d2 + gmx::square((*cl + 1)*jGrid.cellSize[dim] - (b1 - jGrid.c0[dim])) < listRangeBBToCell2)
435 /* Reference code calculating the distance^2 between two bounding boxes */
437 static float box_dist2(float bx0, float bx1, float by0,
438 float by1, float bz0, float bz1,
439 const nbnxn_bb_t *bb)
442 float dl, dh, dm, dm0;
446 dl = bx0 - bb->upper[BB_X];
447 dh = bb->lower[BB_X] - bx1;
448 dm = std::max(dl, dh);
449 dm0 = std::max(dm, 0.0f);
452 dl = by0 - bb->upper[BB_Y];
453 dh = bb->lower[BB_Y] - by1;
454 dm = std::max(dl, dh);
455 dm0 = std::max(dm, 0.0f);
458 dl = bz0 - bb->upper[BB_Z];
459 dh = bb->lower[BB_Z] - bz1;
460 dm = std::max(dl, dh);
461 dm0 = std::max(dm, 0.0f);
468 /* Plain C code calculating the distance^2 between two bounding boxes */
469 static float subc_bb_dist2(int si,
470 const nbnxn_bb_t *bb_i_ci,
472 gmx::ArrayRef<const nbnxn_bb_t> bb_j_all)
474 const nbnxn_bb_t *bb_i = bb_i_ci + si;
475 const nbnxn_bb_t *bb_j = bb_j_all.data() + csj;
478 float dl, dh, dm, dm0;
480 dl = bb_i->lower[BB_X] - bb_j->upper[BB_X];
481 dh = bb_j->lower[BB_X] - bb_i->upper[BB_X];
482 dm = std::max(dl, dh);
483 dm0 = std::max(dm, 0.0f);
486 dl = bb_i->lower[BB_Y] - bb_j->upper[BB_Y];
487 dh = bb_j->lower[BB_Y] - bb_i->upper[BB_Y];
488 dm = std::max(dl, dh);
489 dm0 = std::max(dm, 0.0f);
492 dl = bb_i->lower[BB_Z] - bb_j->upper[BB_Z];
493 dh = bb_j->lower[BB_Z] - bb_i->upper[BB_Z];
494 dm = std::max(dl, dh);
495 dm0 = std::max(dm, 0.0f);
501 #if NBNXN_SEARCH_BB_SIMD4
503 /* 4-wide SIMD code for bb distance for bb format xyz0 */
504 static float subc_bb_dist2_simd4(int si,
505 const nbnxn_bb_t *bb_i_ci,
507 gmx::ArrayRef<const nbnxn_bb_t> bb_j_all)
509 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
512 Simd4Float bb_i_S0, bb_i_S1;
513 Simd4Float bb_j_S0, bb_j_S1;
519 bb_i_S0 = load4(&bb_i_ci[si].lower[0]);
520 bb_i_S1 = load4(&bb_i_ci[si].upper[0]);
521 bb_j_S0 = load4(&bb_j_all[csj].lower[0]);
522 bb_j_S1 = load4(&bb_j_all[csj].upper[0]);
524 dl_S = bb_i_S0 - bb_j_S1;
525 dh_S = bb_j_S0 - bb_i_S1;
527 dm_S = max(dl_S, dh_S);
528 dm0_S = max(dm_S, simd4SetZeroF());
530 return dotProduct(dm0_S, dm0_S);
533 /* Calculate bb bounding distances of bb_i[si,...,si+3] and store them in d2 */
534 #define SUBC_BB_DIST2_SIMD4_XXXX_INNER(si, bb_i, d2) \
538 Simd4Float dx_0, dy_0, dz_0; \
539 Simd4Float dx_1, dy_1, dz_1; \
541 Simd4Float mx, my, mz; \
542 Simd4Float m0x, m0y, m0z; \
544 Simd4Float d2x, d2y, d2z; \
545 Simd4Float d2s, d2t; \
547 shi = (si)*NNBSBB_D*DIM; \
549 xi_l = load4((bb_i)+shi+0*STRIDE_PBB); \
550 yi_l = load4((bb_i)+shi+1*STRIDE_PBB); \
551 zi_l = load4((bb_i)+shi+2*STRIDE_PBB); \
552 xi_h = load4((bb_i)+shi+3*STRIDE_PBB); \
553 yi_h = load4((bb_i)+shi+4*STRIDE_PBB); \
554 zi_h = load4((bb_i)+shi+5*STRIDE_PBB); \
556 dx_0 = xi_l - xj_h; \
557 dy_0 = yi_l - yj_h; \
558 dz_0 = zi_l - zj_h; \
560 dx_1 = xj_l - xi_h; \
561 dy_1 = yj_l - yi_h; \
562 dz_1 = zj_l - zi_h; \
564 mx = max(dx_0, dx_1); \
565 my = max(dy_0, dy_1); \
566 mz = max(dz_0, dz_1); \
568 m0x = max(mx, zero); \
569 m0y = max(my, zero); \
570 m0z = max(mz, zero); \
579 store4((d2)+(si), d2t); \
582 /* 4-wide SIMD code for nsi bb distances for bb format xxxxyyyyzzzz */
583 static void subc_bb_dist2_simd4_xxxx(const float *bb_j,
584 int nsi, const float *bb_i,
587 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
590 Simd4Float xj_l, yj_l, zj_l;
591 Simd4Float xj_h, yj_h, zj_h;
592 Simd4Float xi_l, yi_l, zi_l;
593 Simd4Float xi_h, yi_h, zi_h;
599 xj_l = Simd4Float(bb_j[0*STRIDE_PBB]);
600 yj_l = Simd4Float(bb_j[1*STRIDE_PBB]);
601 zj_l = Simd4Float(bb_j[2*STRIDE_PBB]);
602 xj_h = Simd4Float(bb_j[3*STRIDE_PBB]);
603 yj_h = Simd4Float(bb_j[4*STRIDE_PBB]);
604 zj_h = Simd4Float(bb_j[5*STRIDE_PBB]);
606 /* Here we "loop" over si (0,STRIDE_PBB) from 0 to nsi with step STRIDE_PBB.
607 * But as we know the number of iterations is 1 or 2, we unroll manually.
609 SUBC_BB_DIST2_SIMD4_XXXX_INNER(0, bb_i, d2);
610 if (STRIDE_PBB < nsi)
612 SUBC_BB_DIST2_SIMD4_XXXX_INNER(STRIDE_PBB, bb_i, d2);
616 #endif /* NBNXN_SEARCH_BB_SIMD4 */
619 /* Returns if any atom pair from two clusters is within distance sqrt(rlist2) */
620 static inline gmx_bool
621 clusterpair_in_range(const NbnxnPairlistGpuWork &work,
623 int csj, int stride, const real *x_j,
626 #if !GMX_SIMD4_HAVE_REAL
629 * All coordinates are stored as xyzxyz...
632 const real *x_i = work.iSuperClusterData.x.data();
634 for (int i = 0; i < c_nbnxnGpuClusterSize; i++)
636 int i0 = (si*c_nbnxnGpuClusterSize + i)*DIM;
637 for (int j = 0; j < c_nbnxnGpuClusterSize; j++)
639 int j0 = (csj*c_nbnxnGpuClusterSize + j)*stride;
641 real d2 = gmx::square(x_i[i0 ] - x_j[j0 ]) + gmx::square(x_i[i0+1] - x_j[j0+1]) + gmx::square(x_i[i0+2] - x_j[j0+2]);
652 #else /* !GMX_SIMD4_HAVE_REAL */
654 /* 4-wide SIMD version.
655 * The coordinates x_i are stored as xxxxyyyy..., x_j is stored xyzxyz...
656 * Using 8-wide AVX(2) is not faster on Intel Sandy Bridge and Haswell.
658 static_assert(c_nbnxnGpuClusterSize == 8 || c_nbnxnGpuClusterSize == 4,
659 "A cluster is hard-coded to 4/8 atoms.");
661 Simd4Real rc2_S = Simd4Real(rlist2);
663 const real *x_i = work.iSuperClusterData.xSimd.data();
665 int dim_stride = c_nbnxnGpuClusterSize*DIM;
666 Simd4Real ix_S0 = load4(x_i + si*dim_stride + 0*GMX_SIMD4_WIDTH);
667 Simd4Real iy_S0 = load4(x_i + si*dim_stride + 1*GMX_SIMD4_WIDTH);
668 Simd4Real iz_S0 = load4(x_i + si*dim_stride + 2*GMX_SIMD4_WIDTH);
670 Simd4Real ix_S1, iy_S1, iz_S1;
671 if (c_nbnxnGpuClusterSize == 8)
673 ix_S1 = load4(x_i + si*dim_stride + 3*GMX_SIMD4_WIDTH);
674 iy_S1 = load4(x_i + si*dim_stride + 4*GMX_SIMD4_WIDTH);
675 iz_S1 = load4(x_i + si*dim_stride + 5*GMX_SIMD4_WIDTH);
677 /* We loop from the outer to the inner particles to maximize
678 * the chance that we find a pair in range quickly and return.
680 int j0 = csj*c_nbnxnGpuClusterSize;
681 int j1 = j0 + c_nbnxnGpuClusterSize - 1;
684 Simd4Real jx0_S, jy0_S, jz0_S;
685 Simd4Real jx1_S, jy1_S, jz1_S;
687 Simd4Real dx_S0, dy_S0, dz_S0;
688 Simd4Real dx_S1, dy_S1, dz_S1;
689 Simd4Real dx_S2, dy_S2, dz_S2;
690 Simd4Real dx_S3, dy_S3, dz_S3;
701 Simd4Bool wco_any_S01, wco_any_S23, wco_any_S;
703 jx0_S = Simd4Real(x_j[j0*stride+0]);
704 jy0_S = Simd4Real(x_j[j0*stride+1]);
705 jz0_S = Simd4Real(x_j[j0*stride+2]);
707 jx1_S = Simd4Real(x_j[j1*stride+0]);
708 jy1_S = Simd4Real(x_j[j1*stride+1]);
709 jz1_S = Simd4Real(x_j[j1*stride+2]);
711 /* Calculate distance */
712 dx_S0 = ix_S0 - jx0_S;
713 dy_S0 = iy_S0 - jy0_S;
714 dz_S0 = iz_S0 - jz0_S;
715 dx_S2 = ix_S0 - jx1_S;
716 dy_S2 = iy_S0 - jy1_S;
717 dz_S2 = iz_S0 - jz1_S;
718 if (c_nbnxnGpuClusterSize == 8)
720 dx_S1 = ix_S1 - jx0_S;
721 dy_S1 = iy_S1 - jy0_S;
722 dz_S1 = iz_S1 - jz0_S;
723 dx_S3 = ix_S1 - jx1_S;
724 dy_S3 = iy_S1 - jy1_S;
725 dz_S3 = iz_S1 - jz1_S;
728 /* rsq = dx*dx+dy*dy+dz*dz */
729 rsq_S0 = norm2(dx_S0, dy_S0, dz_S0);
730 rsq_S2 = norm2(dx_S2, dy_S2, dz_S2);
731 if (c_nbnxnGpuClusterSize == 8)
733 rsq_S1 = norm2(dx_S1, dy_S1, dz_S1);
734 rsq_S3 = norm2(dx_S3, dy_S3, dz_S3);
737 wco_S0 = (rsq_S0 < rc2_S);
738 wco_S2 = (rsq_S2 < rc2_S);
739 if (c_nbnxnGpuClusterSize == 8)
741 wco_S1 = (rsq_S1 < rc2_S);
742 wco_S3 = (rsq_S3 < rc2_S);
744 if (c_nbnxnGpuClusterSize == 8)
746 wco_any_S01 = wco_S0 || wco_S1;
747 wco_any_S23 = wco_S2 || wco_S3;
748 wco_any_S = wco_any_S01 || wco_any_S23;
752 wco_any_S = wco_S0 || wco_S2;
755 if (anyTrue(wco_any_S))
766 #endif /* !GMX_SIMD4_HAVE_REAL */
769 /* Returns the j-cluster index for index cjIndex in a cj list */
770 static inline int nblCj(gmx::ArrayRef<const nbnxn_cj_t> cjList,
773 return cjList[cjIndex].cj;
776 /* Returns the j-cluster index for index cjIndex in a cj4 list */
777 static inline int nblCj(gmx::ArrayRef<const nbnxn_cj4_t> cj4List,
780 return cj4List[cjIndex/c_nbnxnGpuJgroupSize].cj[cjIndex & (c_nbnxnGpuJgroupSize - 1)];
783 /* Returns the i-interaction mask of the j sub-cell for index cj_ind */
784 static unsigned int nbl_imask0(const NbnxnPairlistGpu *nbl, int cj_ind)
786 return nbl->cj4[cj_ind/c_nbnxnGpuJgroupSize].imei[0].imask;
789 /* Initializes a single NbnxnPairlistCpu data structure */
790 static void nbnxn_init_pairlist(NbnxnPairlistCpu *nbl)
792 nbl->na_ci = c_nbnxnCpuIClusterSize;
795 nbl->ciOuter.clear();
798 nbl->cjOuter.clear();
801 nbl->work = new NbnxnPairlistCpuWork();
804 NbnxnPairlistGpu::NbnxnPairlistGpu(gmx::PinningPolicy pinningPolicy) :
805 na_ci(c_nbnxnGpuClusterSize),
806 na_cj(c_nbnxnGpuClusterSize),
807 na_sc(c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize),
809 sci({}, {pinningPolicy}),
810 cj4({}, {pinningPolicy}),
811 excl({}, {pinningPolicy}),
814 static_assert(c_nbnxnGpuNumClusterPerSupercluster == c_gpuNumClusterPerCell,
815 "The search code assumes that the a super-cluster matches a search grid cell");
817 static_assert(sizeof(cj4[0].imei[0].imask)*8 >= c_nbnxnGpuJgroupSize*c_gpuNumClusterPerCell,
818 "The i super-cluster cluster interaction mask does not contain a sufficient number of bits");
820 static_assert(sizeof(excl[0])*8 >= c_nbnxnGpuJgroupSize*c_gpuNumClusterPerCell, "The GPU exclusion mask does not contain a sufficient number of bits");
822 // We always want a first entry without any exclusions
825 work = new NbnxnPairlistGpuWork();
828 void nbnxn_init_pairlist_set(nbnxn_pairlist_set_t *nbl_list,
829 gmx_bool bSimple, gmx_bool bCombined)
831 GMX_RELEASE_ASSERT(!bSimple || !bCombined, "Can only combine non-simple lists");
833 nbl_list->bSimple = bSimple;
834 nbl_list->bCombined = bCombined;
836 nbl_list->nnbl = gmx_omp_nthreads_get(emntNonbonded);
838 if (!nbl_list->bCombined &&
839 nbl_list->nnbl > NBNXN_BUFFERFLAG_MAX_THREADS)
841 gmx_fatal(FARGS, "%d OpenMP threads were requested. Since the non-bonded force buffer reduction is prohibitively slow with more than %d threads, we do not allow this. Use %d or less OpenMP threads.",
842 nbl_list->nnbl, NBNXN_BUFFERFLAG_MAX_THREADS, NBNXN_BUFFERFLAG_MAX_THREADS);
847 snew(nbl_list->nbl, nbl_list->nnbl);
848 if (nbl_list->nnbl > 1)
850 snew(nbl_list->nbl_work, nbl_list->nnbl);
855 snew(nbl_list->nblGpu, nbl_list->nnbl);
857 snew(nbl_list->nbl_fep, nbl_list->nnbl);
858 /* Execute in order to avoid memory interleaving between threads */
859 #pragma omp parallel for num_threads(nbl_list->nnbl) schedule(static)
860 for (int i = 0; i < nbl_list->nnbl; i++)
864 /* Allocate the nblist data structure locally on each thread
865 * to optimize memory access for NUMA architectures.
869 nbl_list->nbl[i] = new NbnxnPairlistCpu();
871 nbnxn_init_pairlist(nbl_list->nbl[i]);
872 if (nbl_list->nnbl > 1)
874 nbl_list->nbl_work[i] = new NbnxnPairlistCpu();
875 nbnxn_init_pairlist(nbl_list->nbl_work[i]);
880 /* Only list 0 is used on the GPU, use normal allocation for i>0 */
881 auto pinningPolicy = (i == 0 ? gmx::PinningPolicy::PinnedIfSupported : gmx::PinningPolicy::CannotBePinned);
883 nbl_list->nblGpu[i] = new NbnxnPairlistGpu(pinningPolicy);
886 snew(nbl_list->nbl_fep[i], 1);
887 nbnxn_init_pairlist_fep(nbl_list->nbl_fep[i]);
889 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
893 /* Print statistics of a pair list, used for debug output */
894 static void print_nblist_statistics(FILE *fp, const NbnxnPairlistCpu *nbl,
895 const nbnxn_search *nbs, real rl)
897 const nbnxn_grid_t *grid;
901 grid = &nbs->grid[0];
903 fprintf(fp, "nbl nci %zu ncj %d\n",
904 nbl->ci.size(), nbl->ncjInUse);
905 fprintf(fp, "nbl na_cj %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
906 nbl->na_cj, rl, nbl->ncjInUse, nbl->ncjInUse/static_cast<double>(grid->nc),
907 nbl->ncjInUse/static_cast<double>(grid->nc)*grid->na_cj,
908 nbl->ncjInUse/static_cast<double>(grid->nc)*grid->na_cj/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid->nc*grid->na_cj/(grid->size[XX]*grid->size[YY]*grid->size[ZZ])));
910 fprintf(fp, "nbl average j cell list length %.1f\n",
911 0.25*nbl->ncjInUse/std::max(static_cast<double>(nbl->ci.size()), 1.0));
913 for (int s = 0; s < SHIFTS; s++)
918 for (const nbnxn_ci_t &ciEntry : nbl->ci)
920 cs[ciEntry.shift & NBNXN_CI_SHIFT] +=
921 ciEntry.cj_ind_end - ciEntry.cj_ind_start;
923 int j = ciEntry.cj_ind_start;
924 while (j < ciEntry.cj_ind_end &&
925 nbl->cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
931 fprintf(fp, "nbl cell pairs, total: %zu excl: %d %.1f%%\n",
932 nbl->cj.size(), npexcl, 100*npexcl/std::max(static_cast<double>(nbl->cj.size()), 1.0));
933 for (int s = 0; s < SHIFTS; s++)
937 fprintf(fp, "nbl shift %2d ncj %3d\n", s, cs[s]);
942 /* Print statistics of a pair lists, used for debug output */
943 static void print_nblist_statistics(FILE *fp, const NbnxnPairlistGpu *nbl,
944 const nbnxn_search *nbs, real rl)
946 const nbnxn_grid_t *grid;
948 int c[c_gpuNumClusterPerCell + 1];
949 double sum_nsp, sum_nsp2;
952 /* This code only produces correct statistics with domain decomposition */
953 grid = &nbs->grid[0];
955 fprintf(fp, "nbl nsci %zu ncj4 %zu nsi %d excl4 %zu\n",
956 nbl->sci.size(), nbl->cj4.size(), nbl->nci_tot, nbl->excl.size());
957 fprintf(fp, "nbl na_c %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
958 nbl->na_ci, rl, nbl->nci_tot, nbl->nci_tot/static_cast<double>(grid->nsubc_tot),
959 nbl->nci_tot/static_cast<double>(grid->nsubc_tot)*grid->na_c,
960 nbl->nci_tot/static_cast<double>(grid->nsubc_tot)*grid->na_c/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid->nsubc_tot*grid->na_c/(grid->size[XX]*grid->size[YY]*grid->size[ZZ])));
965 for (int si = 0; si <= c_gpuNumClusterPerCell; si++)
969 for (const nbnxn_sci_t &sci : nbl->sci)
972 for (int j4 = sci.cj4_ind_start; j4 < sci.cj4_ind_end; j4++)
974 for (int j = 0; j < c_nbnxnGpuJgroupSize; j++)
977 for (int si = 0; si < c_gpuNumClusterPerCell; si++)
979 if (nbl->cj4[j4].imei[0].imask & (1U << (j*c_gpuNumClusterPerCell + si)))
990 nsp_max = std::max(nsp_max, nsp);
992 if (!nbl->sci.empty())
994 sum_nsp /= nbl->sci.size();
995 sum_nsp2 /= nbl->sci.size();
997 fprintf(fp, "nbl #cluster-pairs: av %.1f stddev %.1f max %d\n",
998 sum_nsp, std::sqrt(sum_nsp2 - sum_nsp*sum_nsp), nsp_max);
1000 if (!nbl->cj4.empty())
1002 for (b = 0; b <= c_gpuNumClusterPerCell; b++)
1004 fprintf(fp, "nbl j-list #i-subcell %d %7d %4.1f\n",
1005 b, c[b], 100.0*c[b]/size_t {nbl->cj4.size()*c_nbnxnGpuJgroupSize});
1010 /* Returns a pointer to the exclusion mask for j-cluster-group \p cj4 and warp \p warp
1011 * Generates a new exclusion entry when the j-cluster-group uses
1012 * the default all-interaction mask at call time, so the returned mask
1013 * can be modified when needed.
1015 static nbnxn_excl_t *get_exclusion_mask(NbnxnPairlistGpu *nbl,
1019 if (nbl->cj4[cj4].imei[warp].excl_ind == 0)
1021 /* No exclusions set, make a new list entry */
1022 const size_t oldSize = nbl->excl.size();
1023 GMX_ASSERT(oldSize >= 1, "We should always have entry [0]");
1024 /* Add entry with default values: no exclusions */
1025 nbl->excl.resize(oldSize + 1);
1026 nbl->cj4[cj4].imei[warp].excl_ind = oldSize;
1029 return &nbl->excl[nbl->cj4[cj4].imei[warp].excl_ind];
1032 static void set_self_and_newton_excls_supersub(NbnxnPairlistGpu *nbl,
1033 int cj4_ind, int sj_offset,
1034 int i_cluster_in_cell)
1036 nbnxn_excl_t *excl[c_nbnxnGpuClusterpairSplit];
1038 /* Here we only set the set self and double pair exclusions */
1040 /* Reserve extra elements, so the resize() in get_exclusion_mask()
1041 * will not invalidate excl entries in the loop below
1043 nbl->excl.reserve(nbl->excl.size() + c_nbnxnGpuClusterpairSplit);
1044 for (int w = 0; w < c_nbnxnGpuClusterpairSplit; w++)
1046 excl[w] = get_exclusion_mask(nbl, cj4_ind, w);
1049 /* Only minor < major bits set */
1050 for (int ej = 0; ej < nbl->na_ci; ej++)
1053 for (int ei = ej; ei < nbl->na_ci; ei++)
1055 excl[w]->pair[(ej & (c_nbnxnGpuJgroupSize-1))*nbl->na_ci + ei] &=
1056 ~(1U << (sj_offset*c_gpuNumClusterPerCell + i_cluster_in_cell));
1061 /* Returns a diagonal or off-diagonal interaction mask for plain C lists */
1062 static unsigned int get_imask(gmx_bool rdiag, int ci, int cj)
1064 return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
1067 /* Returns a diagonal or off-diagonal interaction mask for cj-size=2 */
1068 gmx_unused static unsigned int get_imask_simd_j2(gmx_bool rdiag, int ci, int cj)
1070 return (rdiag && ci*2 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_0 :
1071 (rdiag && ci*2+1 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_1 :
1072 NBNXN_INTERACTION_MASK_ALL));
1075 /* Returns a diagonal or off-diagonal interaction mask for cj-size=4 */
1076 gmx_unused static unsigned int get_imask_simd_j4(gmx_bool rdiag, int ci, int cj)
1078 return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
1081 /* Returns a diagonal or off-diagonal interaction mask for cj-size=8 */
1082 gmx_unused static unsigned int get_imask_simd_j8(gmx_bool rdiag, int ci, int cj)
1084 return (rdiag && ci == cj*2 ? NBNXN_INTERACTION_MASK_DIAG_J8_0 :
1085 (rdiag && ci == cj*2+1 ? NBNXN_INTERACTION_MASK_DIAG_J8_1 :
1086 NBNXN_INTERACTION_MASK_ALL));
1090 #if GMX_SIMD_REAL_WIDTH == 2
1091 #define get_imask_simd_4xn get_imask_simd_j2
1093 #if GMX_SIMD_REAL_WIDTH == 4
1094 #define get_imask_simd_4xn get_imask_simd_j4
1096 #if GMX_SIMD_REAL_WIDTH == 8
1097 #define get_imask_simd_4xn get_imask_simd_j8
1098 #define get_imask_simd_2xnn get_imask_simd_j4
1100 #if GMX_SIMD_REAL_WIDTH == 16
1101 #define get_imask_simd_2xnn get_imask_simd_j8
1105 /* Plain C code for checking and adding cluster-pairs to the list.
1107 * \param[in] gridj The j-grid
1108 * \param[in,out] nbl The pair-list to store the cluster pairs in
1109 * \param[in] icluster The index of the i-cluster
1110 * \param[in] jclusterFirst The first cluster in the j-range
1111 * \param[in] jclusterLast The last cluster in the j-range
1112 * \param[in] excludeSubDiagonal Exclude atom pairs with i-index > j-index
1113 * \param[in] x_j Coordinates for the j-atom, in xyz format
1114 * \param[in] rlist2 The squared list cut-off
1115 * \param[in] rbb2 The squared cut-off for putting cluster-pairs in the list based on bounding box distance only
1116 * \param[in,out] numDistanceChecks The number of distance checks performed
1119 makeClusterListSimple(const nbnxn_grid_t &jGrid,
1120 NbnxnPairlistCpu * nbl,
1124 bool excludeSubDiagonal,
1125 const real * gmx_restrict x_j,
1128 int * gmx_restrict numDistanceChecks)
1130 const nbnxn_bb_t * gmx_restrict bb_ci = nbl->work->iClusterData.bb.data();
1131 const real * gmx_restrict x_ci = nbl->work->iClusterData.x.data();
1136 while (!InRange && jclusterFirst <= jclusterLast)
1138 real d2 = subc_bb_dist2(0, bb_ci, jclusterFirst, jGrid.bb);
1139 *numDistanceChecks += 2;
1141 /* Check if the distance is within the distance where
1142 * we use only the bounding box distance rbb,
1143 * or within the cut-off and there is at least one atom pair
1144 * within the cut-off.
1150 else if (d2 < rlist2)
1152 int cjf_gl = jGrid.cell0 + jclusterFirst;
1153 for (int i = 0; i < c_nbnxnCpuIClusterSize && !InRange; i++)
1155 for (int j = 0; j < c_nbnxnCpuIClusterSize; j++)
1157 InRange = InRange ||
1158 (gmx::square(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjf_gl*c_nbnxnCpuIClusterSize+j)*STRIDE_XYZ+XX]) +
1159 gmx::square(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjf_gl*c_nbnxnCpuIClusterSize+j)*STRIDE_XYZ+YY]) +
1160 gmx::square(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjf_gl*c_nbnxnCpuIClusterSize+j)*STRIDE_XYZ+ZZ]) < rlist2);
1163 *numDistanceChecks += c_nbnxnCpuIClusterSize*c_nbnxnCpuIClusterSize;
1176 while (!InRange && jclusterLast > jclusterFirst)
1178 real d2 = subc_bb_dist2(0, bb_ci, jclusterLast, jGrid.bb);
1179 *numDistanceChecks += 2;
1181 /* Check if the distance is within the distance where
1182 * we use only the bounding box distance rbb,
1183 * or within the cut-off and there is at least one atom pair
1184 * within the cut-off.
1190 else if (d2 < rlist2)
1192 int cjl_gl = jGrid.cell0 + jclusterLast;
1193 for (int i = 0; i < c_nbnxnCpuIClusterSize && !InRange; i++)
1195 for (int j = 0; j < c_nbnxnCpuIClusterSize; j++)
1197 InRange = InRange ||
1198 (gmx::square(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjl_gl*c_nbnxnCpuIClusterSize+j)*STRIDE_XYZ+XX]) +
1199 gmx::square(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjl_gl*c_nbnxnCpuIClusterSize+j)*STRIDE_XYZ+YY]) +
1200 gmx::square(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjl_gl*c_nbnxnCpuIClusterSize+j)*STRIDE_XYZ+ZZ]) < rlist2);
1203 *numDistanceChecks += c_nbnxnCpuIClusterSize*c_nbnxnCpuIClusterSize;
1211 if (jclusterFirst <= jclusterLast)
1213 for (int jcluster = jclusterFirst; jcluster <= jclusterLast; jcluster++)
1215 /* Store cj and the interaction mask */
1217 cjEntry.cj = jGrid.cell0 + jcluster;
1218 cjEntry.excl = get_imask(excludeSubDiagonal, icluster, jcluster);
1219 nbl->cj.push_back(cjEntry);
1221 /* Increase the closing index in the i list */
1222 nbl->ci.back().cj_ind_end = nbl->cj.size();
1226 #ifdef GMX_NBNXN_SIMD_4XN
1227 #include "gromacs/nbnxm/pairlist_simd_4xm.h"
1229 #ifdef GMX_NBNXN_SIMD_2XNN
1230 #include "gromacs/nbnxm/pairlist_simd_2xmm.h"
1233 /* Plain C or SIMD4 code for making a pair list of super-cell sci vs scj.
1234 * Checks bounding box distances and possibly atom pair distances.
1236 static void make_cluster_list_supersub(const nbnxn_grid_t &iGrid,
1237 const nbnxn_grid_t &jGrid,
1238 NbnxnPairlistGpu *nbl,
1241 const bool excludeSubDiagonal,
1246 int *numDistanceChecks)
1248 NbnxnPairlistGpuWork &work = *nbl->work;
1251 const float *pbb_ci = work.iSuperClusterData.bbPacked.data();
1253 const nbnxn_bb_t *bb_ci = work.iSuperClusterData.bb.data();
1256 assert(c_nbnxnGpuClusterSize == iGrid.na_c);
1257 assert(c_nbnxnGpuClusterSize == jGrid.na_c);
1259 /* We generate the pairlist mainly based on bounding-box distances
1260 * and do atom pair distance based pruning on the GPU.
1261 * Only if a j-group contains a single cluster-pair, we try to prune
1262 * that pair based on atom distances on the CPU to avoid empty j-groups.
1264 #define PRUNE_LIST_CPU_ONE 1
1265 #define PRUNE_LIST_CPU_ALL 0
1267 #if PRUNE_LIST_CPU_ONE
1271 float *d2l = work.distanceBuffer.data();
1273 for (int subc = 0; subc < jGrid.nsubc[scj]; subc++)
1275 const int cj4_ind = work.cj_ind/c_nbnxnGpuJgroupSize;
1276 const int cj_offset = work.cj_ind - cj4_ind*c_nbnxnGpuJgroupSize;
1277 const int cj = scj*c_gpuNumClusterPerCell + subc;
1279 const int cj_gl = jGrid.cell0*c_gpuNumClusterPerCell + cj;
1282 if (excludeSubDiagonal && sci == scj)
1288 ci1 = iGrid.nsubc[sci];
1292 /* Determine all ci1 bb distances in one call with SIMD4 */
1293 subc_bb_dist2_simd4_xxxx(jGrid.pbb.data() + (cj >> STRIDE_PBB_2LOG)*NNBSBB_XXXX + (cj & (STRIDE_PBB-1)),
1295 *numDistanceChecks += c_nbnxnGpuClusterSize*2;
1299 unsigned int imask = 0;
1300 /* We use a fixed upper-bound instead of ci1 to help optimization */
1301 for (int ci = 0; ci < c_gpuNumClusterPerCell; ci++)
1309 /* Determine the bb distance between ci and cj */
1310 d2l[ci] = subc_bb_dist2(ci, bb_ci, cj, jGrid.bb);
1311 *numDistanceChecks += 2;
1315 #if PRUNE_LIST_CPU_ALL
1316 /* Check if the distance is within the distance where
1317 * we use only the bounding box distance rbb,
1318 * or within the cut-off and there is at least one atom pair
1319 * within the cut-off. This check is very costly.
1321 *numDistanceChecks += c_nbnxnGpuClusterSize*c_nbnxnGpuClusterSize;
1324 clusterpair_in_range(work, ci, cj_gl, stride, x, rlist2)))
1326 /* Check if the distance between the two bounding boxes
1327 * in within the pair-list cut-off.
1332 /* Flag this i-subcell to be taken into account */
1333 imask |= (1U << (cj_offset*c_gpuNumClusterPerCell + ci));
1335 #if PRUNE_LIST_CPU_ONE
1343 #if PRUNE_LIST_CPU_ONE
1344 /* If we only found 1 pair, check if any atoms are actually
1345 * within the cut-off, so we could get rid of it.
1347 if (npair == 1 && d2l[ci_last] >= rbb2 &&
1348 !clusterpair_in_range(work, ci_last, cj_gl, stride, x, rlist2))
1350 imask &= ~(1U << (cj_offset*c_gpuNumClusterPerCell + ci_last));
1357 /* We have at least one cluster pair: add a j-entry */
1358 if (static_cast<size_t>(cj4_ind) == nbl->cj4.size())
1360 nbl->cj4.resize(nbl->cj4.size() + 1);
1362 nbnxn_cj4_t *cj4 = &nbl->cj4[cj4_ind];
1364 cj4->cj[cj_offset] = cj_gl;
1366 /* Set the exclusions for the ci==sj entry.
1367 * Here we don't bother to check if this entry is actually flagged,
1368 * as it will nearly always be in the list.
1370 if (excludeSubDiagonal && sci == scj)
1372 set_self_and_newton_excls_supersub(nbl, cj4_ind, cj_offset, subc);
1375 /* Copy the cluster interaction mask to the list */
1376 for (int w = 0; w < c_nbnxnGpuClusterpairSplit; w++)
1378 cj4->imei[w].imask |= imask;
1381 nbl->work->cj_ind++;
1383 /* Keep the count */
1384 nbl->nci_tot += npair;
1386 /* Increase the closing index in i super-cell list */
1387 nbl->sci.back().cj4_ind_end =
1388 (nbl->work->cj_ind + c_nbnxnGpuJgroupSize - 1)/c_nbnxnGpuJgroupSize;
1393 /* Returns how many contiguous j-clusters we have starting in the i-list */
1394 template <typename CjListType>
1395 static int numContiguousJClusters(const int cjIndexStart,
1396 const int cjIndexEnd,
1397 gmx::ArrayRef<const CjListType> cjList)
1399 const int firstJCluster = nblCj(cjList, cjIndexStart);
1401 int numContiguous = 0;
1403 while (cjIndexStart + numContiguous < cjIndexEnd &&
1404 nblCj(cjList, cjIndexStart + numContiguous) == firstJCluster + numContiguous)
1409 return numContiguous;
1413 * \brief Helper struct for efficient searching for excluded atoms in a j-list
1417 /*! \brief Constructs a j-list range from \p cjList with the given index range */
1418 template <typename CjListType>
1419 JListRanges(int cjIndexStart,
1421 gmx::ArrayRef<const CjListType> cjList);
1423 int cjIndexStart; //!< The start index in the j-list
1424 int cjIndexEnd; //!< The end index in the j-list
1425 int cjFirst; //!< The j-cluster with index cjIndexStart
1426 int cjLast; //!< The j-cluster with index cjIndexEnd-1
1427 int numDirect; //!< Up to cjIndexStart+numDirect the j-clusters are cjFirst + the index offset
1431 template <typename CjListType>
1432 JListRanges::JListRanges(int cjIndexStart,
1434 gmx::ArrayRef<const CjListType> cjList) :
1435 cjIndexStart(cjIndexStart),
1436 cjIndexEnd(cjIndexEnd)
1438 GMX_ASSERT(cjIndexEnd > cjIndexStart, "JListRanges should only be called with non-empty lists");
1440 cjFirst = nblCj(cjList, cjIndexStart);
1441 cjLast = nblCj(cjList, cjIndexEnd - 1);
1443 /* Determine how many contiguous j-cells we have starting
1444 * from the first i-cell. This number can be used to directly
1445 * calculate j-cell indices for excluded atoms.
1447 numDirect = numContiguousJClusters(cjIndexStart, cjIndexEnd, cjList);
1451 /* Return the index of \p jCluster in the given range or -1 when not present
1453 * Note: This code is executed very often and therefore performance is
1454 * important. It should be inlined and fully optimized.
1456 template <typename CjListType>
1458 findJClusterInJList(int jCluster,
1459 const JListRanges &ranges,
1460 gmx::ArrayRef<const CjListType> cjList)
1464 if (jCluster < ranges.cjFirst + ranges.numDirect)
1466 /* We can calculate the index directly using the offset */
1467 index = ranges.cjIndexStart + jCluster - ranges.cjFirst;
1471 /* Search for jCluster using bisection */
1473 int rangeStart = ranges.cjIndexStart + ranges.numDirect;
1474 int rangeEnd = ranges.cjIndexEnd;
1476 while (index == -1 && rangeStart < rangeEnd)
1478 rangeMiddle = (rangeStart + rangeEnd) >> 1;
1480 const int clusterMiddle = nblCj(cjList, rangeMiddle);
1482 if (jCluster == clusterMiddle)
1484 index = rangeMiddle;
1486 else if (jCluster < clusterMiddle)
1488 rangeEnd = rangeMiddle;
1492 rangeStart = rangeMiddle + 1;
1500 // TODO: Get rid of the two functions below by renaming sci to ci (or something better)
1502 /* Return the i-entry in the list we are currently operating on */
1503 static nbnxn_ci_t *getOpenIEntry(NbnxnPairlistCpu *nbl)
1505 return &nbl->ci.back();
1508 /* Return the i-entry in the list we are currently operating on */
1509 static nbnxn_sci_t *getOpenIEntry(NbnxnPairlistGpu *nbl)
1511 return &nbl->sci.back();
1514 /* Set all atom-pair exclusions for a simple type list i-entry
1516 * Set all atom-pair exclusions from the topology stored in exclusions
1517 * as masks in the pair-list for simple list entry iEntry.
1520 setExclusionsForIEntry(const nbnxn_search *nbs,
1521 NbnxnPairlistCpu *nbl,
1522 gmx_bool diagRemoved,
1524 const nbnxn_ci_t &iEntry,
1525 const t_blocka &exclusions)
1527 if (iEntry.cj_ind_end == iEntry.cj_ind_start)
1529 /* Empty list: no exclusions */
1533 const JListRanges ranges(iEntry.cj_ind_start, iEntry.cj_ind_end, gmx::makeConstArrayRef(nbl->cj));
1535 const int iCluster = iEntry.ci;
1537 gmx::ArrayRef<const int> cell = nbs->cell;
1539 /* Loop over the atoms in the i-cluster */
1540 for (int i = 0; i < nbl->na_ci; i++)
1542 const int iIndex = iCluster*nbl->na_ci + i;
1543 const int iAtom = nbs->a[iIndex];
1546 /* Loop over the topology-based exclusions for this i-atom */
1547 for (int exclIndex = exclusions.index[iAtom]; exclIndex < exclusions.index[iAtom + 1]; exclIndex++)
1549 const int jAtom = exclusions.a[exclIndex];
1553 /* The self exclusion are already set, save some time */
1557 /* Get the index of the j-atom in the nbnxn atom data */
1558 const int jIndex = cell[jAtom];
1560 /* Without shifts we only calculate interactions j>i
1561 * for one-way pair-lists.
1563 if (diagRemoved && jIndex <= iIndex)
1568 const int jCluster = (jIndex >> na_cj_2log);
1570 /* Could the cluster se be in our list? */
1571 if (jCluster >= ranges.cjFirst && jCluster <= ranges.cjLast)
1574 findJClusterInJList(jCluster, ranges,
1575 gmx::makeConstArrayRef(nbl->cj));
1579 /* We found an exclusion, clear the corresponding
1582 const int innerJ = jIndex - (jCluster << na_cj_2log);
1584 nbl->cj[index].excl &= ~(1U << ((i << na_cj_2log) + innerJ));
1592 /* Add a new i-entry to the FEP list and copy the i-properties */
1593 static inline void fep_list_new_nri_copy(t_nblist *nlist)
1595 /* Add a new i-entry */
1598 assert(nlist->nri < nlist->maxnri);
1600 /* Duplicate the last i-entry, except for jindex, which continues */
1601 nlist->iinr[nlist->nri] = nlist->iinr[nlist->nri-1];
1602 nlist->shift[nlist->nri] = nlist->shift[nlist->nri-1];
1603 nlist->gid[nlist->nri] = nlist->gid[nlist->nri-1];
1604 nlist->jindex[nlist->nri] = nlist->nrj;
1607 /* For load balancing of the free-energy lists over threads, we set
1608 * the maximum nrj size of an i-entry to 40. This leads to good
1609 * load balancing in the worst case scenario of a single perturbed
1610 * particle on 16 threads, while not introducing significant overhead.
1611 * Note that half of the perturbed pairs will anyhow end up in very small lists,
1612 * since non perturbed i-particles will see few perturbed j-particles).
1614 const int max_nrj_fep = 40;
1616 /* Exclude the perturbed pairs from the Verlet list. This is only done to avoid
1617 * singularities for overlapping particles (0/0), since the charges and
1618 * LJ parameters have been zeroed in the nbnxn data structure.
1619 * Simultaneously make a group pair list for the perturbed pairs.
1621 static void make_fep_list(const nbnxn_search *nbs,
1622 const nbnxn_atomdata_t *nbat,
1623 NbnxnPairlistCpu *nbl,
1624 gmx_bool bDiagRemoved,
1626 real gmx_unused shx,
1627 real gmx_unused shy,
1628 real gmx_unused shz,
1629 real gmx_unused rlist_fep2,
1630 const nbnxn_grid_t &iGrid,
1631 const nbnxn_grid_t &jGrid,
1634 int ci, cj_ind_start, cj_ind_end, cja, cjr;
1636 int ngid, gid_i = 0, gid_j, gid;
1637 int egp_shift, egp_mask;
1639 int ind_i, ind_j, ai, aj;
1641 gmx_bool bFEP_i, bFEP_i_all;
1643 if (nbl_ci->cj_ind_end == nbl_ci->cj_ind_start)
1651 cj_ind_start = nbl_ci->cj_ind_start;
1652 cj_ind_end = nbl_ci->cj_ind_end;
1654 /* In worst case we have alternating energy groups
1655 * and create #atom-pair lists, which means we need the size
1656 * of a cluster pair (na_ci*na_cj) times the number of cj's.
1658 nri_max = nbl->na_ci*nbl->na_cj*(cj_ind_end - cj_ind_start);
1659 if (nlist->nri + nri_max > nlist->maxnri)
1661 nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
1662 reallocate_nblist(nlist);
1665 const nbnxn_atomdata_t::Params &nbatParams = nbat->params();
1667 ngid = nbatParams.nenergrp;
1669 if (ngid*jGrid.na_cj > gmx::index(sizeof(gid_cj)*8))
1671 gmx_fatal(FARGS, "The Verlet scheme with %dx%d kernels and free-energy only supports up to %zu energy groups",
1672 iGrid.na_c, jGrid.na_cj, (sizeof(gid_cj)*8)/jGrid.na_cj);
1675 egp_shift = nbatParams.neg_2log;
1676 egp_mask = (1 << egp_shift) - 1;
1678 /* Loop over the atoms in the i sub-cell */
1680 for (int i = 0; i < nbl->na_ci; i++)
1682 ind_i = ci*nbl->na_ci + i;
1687 nlist->jindex[nri+1] = nlist->jindex[nri];
1688 nlist->iinr[nri] = ai;
1689 /* The actual energy group pair index is set later */
1690 nlist->gid[nri] = 0;
1691 nlist->shift[nri] = nbl_ci->shift & NBNXN_CI_SHIFT;
1693 bFEP_i = ((iGrid.fep[ci - iGrid.cell0] & (1 << i)) != 0u);
1695 bFEP_i_all = bFEP_i_all && bFEP_i;
1697 if (nlist->nrj + (cj_ind_end - cj_ind_start)*nbl->na_cj > nlist->maxnrj)
1699 nlist->maxnrj = over_alloc_small(nlist->nrj + (cj_ind_end - cj_ind_start)*nbl->na_cj);
1700 srenew(nlist->jjnr, nlist->maxnrj);
1701 srenew(nlist->excl_fep, nlist->maxnrj);
1706 gid_i = (nbatParams.energrp[ci] >> (egp_shift*i)) & egp_mask;
1709 for (int cj_ind = cj_ind_start; cj_ind < cj_ind_end; cj_ind++)
1711 unsigned int fep_cj;
1713 cja = nbl->cj[cj_ind].cj;
1715 if (jGrid.na_cj == jGrid.na_c)
1717 cjr = cja - jGrid.cell0;
1718 fep_cj = jGrid.fep[cjr];
1721 gid_cj = nbatParams.energrp[cja];
1724 else if (2*jGrid.na_cj == jGrid.na_c)
1726 cjr = cja - jGrid.cell0*2;
1727 /* Extract half of the ci fep/energrp mask */
1728 fep_cj = (jGrid.fep[cjr>>1] >> ((cjr&1)*jGrid.na_cj)) & ((1<<jGrid.na_cj) - 1);
1731 gid_cj = nbatParams.energrp[cja>>1] >> ((cja&1)*jGrid.na_cj*egp_shift) & ((1<<(jGrid.na_cj*egp_shift)) - 1);
1736 cjr = cja - (jGrid.cell0>>1);
1737 /* Combine two ci fep masks/energrp */
1738 fep_cj = jGrid.fep[cjr*2] + (jGrid.fep[cjr*2+1] << jGrid.na_c);
1741 gid_cj = nbatParams.energrp[cja*2] + (nbatParams.energrp[cja*2+1] << (jGrid.na_c*egp_shift));
1745 if (bFEP_i || fep_cj != 0)
1747 for (int j = 0; j < nbl->na_cj; j++)
1749 /* Is this interaction perturbed and not excluded? */
1750 ind_j = cja*nbl->na_cj + j;
1753 (bFEP_i || (fep_cj & (1 << j))) &&
1754 (!bDiagRemoved || ind_j >= ind_i))
1758 gid_j = (gid_cj >> (j*egp_shift)) & egp_mask;
1759 gid = GID(gid_i, gid_j, ngid);
1761 if (nlist->nrj > nlist->jindex[nri] &&
1762 nlist->gid[nri] != gid)
1764 /* Energy group pair changed: new list */
1765 fep_list_new_nri_copy(nlist);
1768 nlist->gid[nri] = gid;
1771 if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
1773 fep_list_new_nri_copy(nlist);
1777 /* Add it to the FEP list */
1778 nlist->jjnr[nlist->nrj] = aj;
1779 nlist->excl_fep[nlist->nrj] = (nbl->cj[cj_ind].excl >> (i*nbl->na_cj + j)) & 1;
1782 /* Exclude it from the normal list.
1783 * Note that the charge has been set to zero,
1784 * but we need to avoid 0/0, as perturbed atoms
1785 * can be on top of each other.
1787 nbl->cj[cj_ind].excl &= ~(1U << (i*nbl->na_cj + j));
1793 if (nlist->nrj > nlist->jindex[nri])
1795 /* Actually add this new, non-empty, list */
1797 nlist->jindex[nlist->nri] = nlist->nrj;
1804 /* All interactions are perturbed, we can skip this entry */
1805 nbl_ci->cj_ind_end = cj_ind_start;
1806 nbl->ncjInUse -= cj_ind_end - cj_ind_start;
1810 /* Return the index of atom a within a cluster */
1811 static inline int cj_mod_cj4(int cj)
1813 return cj & (c_nbnxnGpuJgroupSize - 1);
1816 /* Convert a j-cluster to a cj4 group */
1817 static inline int cj_to_cj4(int cj)
1819 return cj/c_nbnxnGpuJgroupSize;
1822 /* Return the index of an j-atom within a warp */
1823 static inline int a_mod_wj(int a)
1825 return a & (c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit - 1);
1828 /* As make_fep_list above, but for super/sub lists. */
1829 static void make_fep_list(const nbnxn_search *nbs,
1830 const nbnxn_atomdata_t *nbat,
1831 NbnxnPairlistGpu *nbl,
1832 gmx_bool bDiagRemoved,
1833 const nbnxn_sci_t *nbl_sci,
1838 const nbnxn_grid_t &iGrid,
1839 const nbnxn_grid_t &jGrid,
1844 int ind_i, ind_j, ai, aj;
1848 const nbnxn_cj4_t *cj4;
1850 const int numJClusterGroups = nbl_sci->numJClusterGroups();
1851 if (numJClusterGroups == 0)
1857 const int sci = nbl_sci->sci;
1859 const int cj4_ind_start = nbl_sci->cj4_ind_start;
1860 const int cj4_ind_end = nbl_sci->cj4_ind_end;
1862 /* Here we process one super-cell, max #atoms na_sc, versus a list
1863 * cj4 entries, each with max c_nbnxnGpuJgroupSize cj's, each
1864 * of size na_cj atoms.
1865 * On the GPU we don't support energy groups (yet).
1866 * So for each of the na_sc i-atoms, we need max one FEP list
1867 * for each max_nrj_fep j-atoms.
1869 nri_max = nbl->na_sc*nbl->na_cj*(1 + (numJClusterGroups*c_nbnxnGpuJgroupSize)/max_nrj_fep);
1870 if (nlist->nri + nri_max > nlist->maxnri)
1872 nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
1873 reallocate_nblist(nlist);
1876 /* Loop over the atoms in the i super-cluster */
1877 for (int c = 0; c < c_gpuNumClusterPerCell; c++)
1879 c_abs = sci*c_gpuNumClusterPerCell + c;
1881 for (int i = 0; i < nbl->na_ci; i++)
1883 ind_i = c_abs*nbl->na_ci + i;
1888 nlist->jindex[nri+1] = nlist->jindex[nri];
1889 nlist->iinr[nri] = ai;
1890 /* With GPUs, energy groups are not supported */
1891 nlist->gid[nri] = 0;
1892 nlist->shift[nri] = nbl_sci->shift & NBNXN_CI_SHIFT;
1894 bFEP_i = ((iGrid.fep[c_abs - iGrid.cell0*c_gpuNumClusterPerCell] & (1 << i)) != 0u);
1896 xi = nbat->x()[ind_i*nbat->xstride+XX] + shx;
1897 yi = nbat->x()[ind_i*nbat->xstride+YY] + shy;
1898 zi = nbat->x()[ind_i*nbat->xstride+ZZ] + shz;
1900 const int nrjMax = nlist->nrj + numJClusterGroups*c_nbnxnGpuJgroupSize*nbl->na_cj;
1901 if (nrjMax > nlist->maxnrj)
1903 nlist->maxnrj = over_alloc_small(nrjMax);
1904 srenew(nlist->jjnr, nlist->maxnrj);
1905 srenew(nlist->excl_fep, nlist->maxnrj);
1908 for (int cj4_ind = cj4_ind_start; cj4_ind < cj4_ind_end; cj4_ind++)
1910 cj4 = &nbl->cj4[cj4_ind];
1912 for (int gcj = 0; gcj < c_nbnxnGpuJgroupSize; gcj++)
1914 unsigned int fep_cj;
1916 if ((cj4->imei[0].imask & (1U << (gcj*c_gpuNumClusterPerCell + c))) == 0)
1918 /* Skip this ci for this cj */
1923 cj4->cj[gcj] - jGrid.cell0*c_gpuNumClusterPerCell;
1925 fep_cj = jGrid.fep[cjr];
1927 if (bFEP_i || fep_cj != 0)
1929 for (int j = 0; j < nbl->na_cj; j++)
1931 /* Is this interaction perturbed and not excluded? */
1932 ind_j = (jGrid.cell0*c_gpuNumClusterPerCell + cjr)*nbl->na_cj + j;
1935 (bFEP_i || (fep_cj & (1 << j))) &&
1936 (!bDiagRemoved || ind_j >= ind_i))
1939 unsigned int excl_bit;
1942 const int jHalf = j/(c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit);
1943 nbnxn_excl_t *excl =
1944 get_exclusion_mask(nbl, cj4_ind, jHalf);
1946 excl_pair = a_mod_wj(j)*nbl->na_ci + i;
1947 excl_bit = (1U << (gcj*c_gpuNumClusterPerCell + c));
1949 dx = nbat->x()[ind_j*nbat->xstride+XX] - xi;
1950 dy = nbat->x()[ind_j*nbat->xstride+YY] - yi;
1951 dz = nbat->x()[ind_j*nbat->xstride+ZZ] - zi;
1953 /* The unpruned GPU list has more than 2/3
1954 * of the atom pairs beyond rlist. Using
1955 * this list will cause a lot of overhead
1956 * in the CPU FEP kernels, especially
1957 * relative to the fast GPU kernels.
1958 * So we prune the FEP list here.
1960 if (dx*dx + dy*dy + dz*dz < rlist_fep2)
1962 if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
1964 fep_list_new_nri_copy(nlist);
1968 /* Add it to the FEP list */
1969 nlist->jjnr[nlist->nrj] = aj;
1970 nlist->excl_fep[nlist->nrj] = (excl->pair[excl_pair] & excl_bit) ? 1 : 0;
1974 /* Exclude it from the normal list.
1975 * Note that the charge and LJ parameters have
1976 * been set to zero, but we need to avoid 0/0,
1977 * as perturbed atoms can be on top of each other.
1979 excl->pair[excl_pair] &= ~excl_bit;
1983 /* Note that we could mask out this pair in imask
1984 * if all i- and/or all j-particles are perturbed.
1985 * But since the perturbed pairs on the CPU will
1986 * take an order of magnitude more time, the GPU
1987 * will finish before the CPU and there is no gain.
1993 if (nlist->nrj > nlist->jindex[nri])
1995 /* Actually add this new, non-empty, list */
1997 nlist->jindex[nlist->nri] = nlist->nrj;
2004 /* Set all atom-pair exclusions for a GPU type list i-entry
2006 * Sets all atom-pair exclusions from the topology stored in exclusions
2007 * as masks in the pair-list for i-super-cluster list entry iEntry.
2010 setExclusionsForIEntry(const nbnxn_search *nbs,
2011 NbnxnPairlistGpu *nbl,
2012 gmx_bool diagRemoved,
2013 int gmx_unused na_cj_2log,
2014 const nbnxn_sci_t &iEntry,
2015 const t_blocka &exclusions)
2017 if (iEntry.numJClusterGroups() == 0)
2023 /* Set the search ranges using start and end j-cluster indices.
2024 * Note that here we can not use cj4_ind_end, since the last cj4
2025 * can be only partially filled, so we use cj_ind.
2027 const JListRanges ranges(iEntry.cj4_ind_start*c_nbnxnGpuJgroupSize,
2029 gmx::makeConstArrayRef(nbl->cj4));
2031 GMX_ASSERT(nbl->na_ci == c_nbnxnGpuClusterSize, "na_ci should match the GPU cluster size");
2032 constexpr int c_clusterSize = c_nbnxnGpuClusterSize;
2033 constexpr int c_superClusterSize = c_nbnxnGpuNumClusterPerSupercluster*c_nbnxnGpuClusterSize;
2035 const int iSuperCluster = iEntry.sci;
2037 gmx::ArrayRef<const int> cell = nbs->cell;
2039 /* Loop over the atoms in the i super-cluster */
2040 for (int i = 0; i < c_superClusterSize; i++)
2042 const int iIndex = iSuperCluster*c_superClusterSize + i;
2043 const int iAtom = nbs->a[iIndex];
2046 const int iCluster = i/c_clusterSize;
2048 /* Loop over the topology-based exclusions for this i-atom */
2049 for (int exclIndex = exclusions.index[iAtom]; exclIndex < exclusions.index[iAtom + 1]; exclIndex++)
2051 const int jAtom = exclusions.a[exclIndex];
2055 /* The self exclusions are already set, save some time */
2059 /* Get the index of the j-atom in the nbnxn atom data */
2060 const int jIndex = cell[jAtom];
2062 /* Without shifts we only calculate interactions j>i
2063 * for one-way pair-lists.
2065 /* NOTE: We would like to use iIndex on the right hand side,
2066 * but that makes this routine 25% slower with gcc6/7.
2067 * Even using c_superClusterSize makes it slower.
2068 * Either of these changes triggers peeling of the exclIndex
2069 * loop, which apparently leads to far less efficient code.
2071 if (diagRemoved && jIndex <= iSuperCluster*nbl->na_sc + i)
2076 const int jCluster = jIndex/c_clusterSize;
2078 /* Check whether the cluster is in our list? */
2079 if (jCluster >= ranges.cjFirst && jCluster <= ranges.cjLast)
2082 findJClusterInJList(jCluster, ranges,
2083 gmx::makeConstArrayRef(nbl->cj4));
2087 /* We found an exclusion, clear the corresponding
2090 const unsigned int pairMask = (1U << (cj_mod_cj4(index)*c_gpuNumClusterPerCell + iCluster));
2091 /* Check if the i-cluster interacts with the j-cluster */
2092 if (nbl_imask0(nbl, index) & pairMask)
2094 const int innerI = (i & (c_clusterSize - 1));
2095 const int innerJ = (jIndex & (c_clusterSize - 1));
2097 /* Determine which j-half (CUDA warp) we are in */
2098 const int jHalf = innerJ/(c_clusterSize/c_nbnxnGpuClusterpairSplit);
2100 nbnxn_excl_t *interactionMask =
2101 get_exclusion_mask(nbl, cj_to_cj4(index), jHalf);
2103 interactionMask->pair[a_mod_wj(innerJ)*c_clusterSize + innerI] &= ~pairMask;
2112 /* Make a new ci entry at the back of nbl->ci */
2113 static void addNewIEntry(NbnxnPairlistCpu *nbl, int ci, int shift, int flags)
2117 ciEntry.shift = shift;
2118 /* Store the interaction flags along with the shift */
2119 ciEntry.shift |= flags;
2120 ciEntry.cj_ind_start = nbl->cj.size();
2121 ciEntry.cj_ind_end = nbl->cj.size();
2122 nbl->ci.push_back(ciEntry);
2125 /* Make a new sci entry at index nbl->nsci */
2126 static void addNewIEntry(NbnxnPairlistGpu *nbl, int sci, int shift, int gmx_unused flags)
2128 nbnxn_sci_t sciEntry;
2130 sciEntry.shift = shift;
2131 sciEntry.cj4_ind_start = nbl->cj4.size();
2132 sciEntry.cj4_ind_end = nbl->cj4.size();
2134 nbl->sci.push_back(sciEntry);
2137 /* Sort the simple j-list cj on exclusions.
2138 * Entries with exclusions will all be sorted to the beginning of the list.
2140 static void sort_cj_excl(nbnxn_cj_t *cj, int ncj,
2141 NbnxnPairlistCpuWork *work)
2143 work->cj.resize(ncj);
2145 /* Make a list of the j-cells involving exclusions */
2147 for (int j = 0; j < ncj; j++)
2149 if (cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
2151 work->cj[jnew++] = cj[j];
2154 /* Check if there are exclusions at all or not just the first entry */
2155 if (!((jnew == 0) ||
2156 (jnew == 1 && cj[0].excl != NBNXN_INTERACTION_MASK_ALL)))
2158 for (int j = 0; j < ncj; j++)
2160 if (cj[j].excl == NBNXN_INTERACTION_MASK_ALL)
2162 work->cj[jnew++] = cj[j];
2165 for (int j = 0; j < ncj; j++)
2167 cj[j] = work->cj[j];
2172 /* Close this simple list i entry */
2173 static void closeIEntry(NbnxnPairlistCpu *nbl,
2174 int gmx_unused sp_max_av,
2175 gmx_bool gmx_unused progBal,
2176 float gmx_unused nsp_tot_est,
2177 int gmx_unused thread,
2178 int gmx_unused nthread)
2180 nbnxn_ci_t &ciEntry = nbl->ci.back();
2182 /* All content of the new ci entry have already been filled correctly,
2183 * we only need to sort and increase counts or remove the entry when empty.
2185 const int jlen = ciEntry.cj_ind_end - ciEntry.cj_ind_start;
2188 sort_cj_excl(nbl->cj.data() + ciEntry.cj_ind_start, jlen, nbl->work);
2190 /* The counts below are used for non-bonded pair/flop counts
2191 * and should therefore match the available kernel setups.
2193 if (!(ciEntry.shift & NBNXN_CI_DO_COUL(0)))
2195 nbl->work->ncj_noq += jlen;
2197 else if ((ciEntry.shift & NBNXN_CI_HALF_LJ(0)) ||
2198 !(ciEntry.shift & NBNXN_CI_DO_LJ(0)))
2200 nbl->work->ncj_hlj += jlen;
2205 /* Entry is empty: remove it */
2210 /* Split sci entry for load balancing on the GPU.
2211 * Splitting ensures we have enough lists to fully utilize the whole GPU.
2212 * With progBal we generate progressively smaller lists, which improves
2213 * load balancing. As we only know the current count on our own thread,
2214 * we will need to estimate the current total amount of i-entries.
2215 * As the lists get concatenated later, this estimate depends
2216 * both on nthread and our own thread index.
2218 static void split_sci_entry(NbnxnPairlistGpu *nbl,
2220 gmx_bool progBal, float nsp_tot_est,
2221 int thread, int nthread)
2229 /* Estimate the total numbers of ci's of the nblist combined
2230 * over all threads using the target number of ci's.
2232 nsp_est = (nsp_tot_est*thread)/nthread + nbl->nci_tot;
2234 /* The first ci blocks should be larger, to avoid overhead.
2235 * The last ci blocks should be smaller, to improve load balancing.
2236 * The factor 3/2 makes the first block 3/2 times the target average
2237 * and ensures that the total number of blocks end up equal to
2238 * that of equally sized blocks of size nsp_target_av.
2240 nsp_max = static_cast<int>(nsp_target_av*(nsp_tot_est*1.5/(nsp_est + nsp_tot_est)));
2244 nsp_max = nsp_target_av;
2247 const int cj4_start = nbl->sci.back().cj4_ind_start;
2248 const int cj4_end = nbl->sci.back().cj4_ind_end;
2249 const int j4len = cj4_end - cj4_start;
2251 if (j4len > 1 && j4len*c_gpuNumClusterPerCell*c_nbnxnGpuJgroupSize > nsp_max)
2253 /* Modify the last ci entry and process the cj4's again */
2259 for (int cj4 = cj4_start; cj4 < cj4_end; cj4++)
2261 int nsp_cj4_p = nsp_cj4;
2262 /* Count the number of cluster pairs in this cj4 group */
2264 for (int p = 0; p < c_gpuNumClusterPerCell*c_nbnxnGpuJgroupSize; p++)
2266 nsp_cj4 += (nbl->cj4[cj4].imei[0].imask >> p) & 1;
2269 /* If adding the current cj4 with nsp_cj4 pairs get us further
2270 * away from our target nsp_max, split the list before this cj4.
2272 if (nsp > 0 && nsp_max - nsp < nsp + nsp_cj4 - nsp_max)
2274 /* Split the list at cj4 */
2275 nbl->sci.back().cj4_ind_end = cj4;
2276 /* Create a new sci entry */
2278 sciNew.sci = nbl->sci.back().sci;
2279 sciNew.shift = nbl->sci.back().shift;
2280 sciNew.cj4_ind_start = cj4;
2281 nbl->sci.push_back(sciNew);
2284 nsp_cj4_e = nsp_cj4_p;
2290 /* Put the remaining cj4's in the last sci entry */
2291 nbl->sci.back().cj4_ind_end = cj4_end;
2293 /* Possibly balance out the last two sci's
2294 * by moving the last cj4 of the second last sci.
2296 if (nsp_sci - nsp_cj4_e >= nsp + nsp_cj4_e)
2298 GMX_ASSERT(nbl->sci.size() >= 2, "We expect at least two elements");
2299 nbl->sci[nbl->sci.size() - 2].cj4_ind_end--;
2300 nbl->sci[nbl->sci.size() - 1].cj4_ind_start--;
2305 /* Clost this super/sub list i entry */
2306 static void closeIEntry(NbnxnPairlistGpu *nbl,
2308 gmx_bool progBal, float nsp_tot_est,
2309 int thread, int nthread)
2311 nbnxn_sci_t &sciEntry = *getOpenIEntry(nbl);
2313 /* All content of the new ci entry have already been filled correctly,
2314 * we only need to, potentially, split or remove the entry when empty.
2316 int j4len = sciEntry.numJClusterGroups();
2319 /* We can only have complete blocks of 4 j-entries in a list,
2320 * so round the count up before closing.
2322 int ncj4 = (nbl->work->cj_ind + c_nbnxnGpuJgroupSize - 1)/c_nbnxnGpuJgroupSize;
2323 nbl->work->cj_ind = ncj4*c_nbnxnGpuJgroupSize;
2327 /* Measure the size of the new entry and potentially split it */
2328 split_sci_entry(nbl, nsp_max_av, progBal, nsp_tot_est,
2334 /* Entry is empty: remove it */
2335 nbl->sci.pop_back();
2339 /* Syncs the working array before adding another grid pair to the GPU list */
2340 static void sync_work(NbnxnPairlistCpu gmx_unused *nbl)
2344 /* Syncs the working array before adding another grid pair to the GPU list */
2345 static void sync_work(NbnxnPairlistGpu *nbl)
2347 nbl->work->cj_ind = nbl->cj4.size()*c_nbnxnGpuJgroupSize;
2350 /* Clears an NbnxnPairlistCpu data structure */
2351 static void clear_pairlist(NbnxnPairlistCpu *nbl)
2357 nbl->ciOuter.clear();
2358 nbl->cjOuter.clear();
2360 nbl->work->ncj_noq = 0;
2361 nbl->work->ncj_hlj = 0;
2364 /* Clears an NbnxnPairlistGpu data structure */
2365 static void clear_pairlist(NbnxnPairlistGpu *nbl)
2369 nbl->excl.resize(1);
2373 /* Clears a group scheme pair list */
2374 static void clear_pairlist_fep(t_nblist *nl)
2378 if (nl->jindex == nullptr)
2380 snew(nl->jindex, 1);
2385 /* Sets a simple list i-cell bounding box, including PBC shift */
2386 static inline void set_icell_bb_simple(gmx::ArrayRef<const nbnxn_bb_t> bb,
2388 real shx, real shy, real shz,
2391 bb_ci->lower[BB_X] = bb[ci].lower[BB_X] + shx;
2392 bb_ci->lower[BB_Y] = bb[ci].lower[BB_Y] + shy;
2393 bb_ci->lower[BB_Z] = bb[ci].lower[BB_Z] + shz;
2394 bb_ci->upper[BB_X] = bb[ci].upper[BB_X] + shx;
2395 bb_ci->upper[BB_Y] = bb[ci].upper[BB_Y] + shy;
2396 bb_ci->upper[BB_Z] = bb[ci].upper[BB_Z] + shz;
2399 /* Sets a simple list i-cell bounding box, including PBC shift */
2400 static inline void set_icell_bb(const nbnxn_grid_t &iGrid,
2402 real shx, real shy, real shz,
2403 NbnxnPairlistCpuWork *work)
2405 set_icell_bb_simple(iGrid.bb, ci, shx, shy, shz, &work->iClusterData.bb[0]);
2409 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2410 static void set_icell_bbxxxx_supersub(gmx::ArrayRef<const float> bb,
2412 real shx, real shy, real shz,
2415 int ia = ci*(c_gpuNumClusterPerCell >> STRIDE_PBB_2LOG)*NNBSBB_XXXX;
2416 for (int m = 0; m < (c_gpuNumClusterPerCell >> STRIDE_PBB_2LOG)*NNBSBB_XXXX; m += NNBSBB_XXXX)
2418 for (int i = 0; i < STRIDE_PBB; i++)
2420 bb_ci[m+0*STRIDE_PBB+i] = bb[ia+m+0*STRIDE_PBB+i] + shx;
2421 bb_ci[m+1*STRIDE_PBB+i] = bb[ia+m+1*STRIDE_PBB+i] + shy;
2422 bb_ci[m+2*STRIDE_PBB+i] = bb[ia+m+2*STRIDE_PBB+i] + shz;
2423 bb_ci[m+3*STRIDE_PBB+i] = bb[ia+m+3*STRIDE_PBB+i] + shx;
2424 bb_ci[m+4*STRIDE_PBB+i] = bb[ia+m+4*STRIDE_PBB+i] + shy;
2425 bb_ci[m+5*STRIDE_PBB+i] = bb[ia+m+5*STRIDE_PBB+i] + shz;
2431 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2432 gmx_unused static void set_icell_bb_supersub(gmx::ArrayRef<const nbnxn_bb_t> bb,
2434 real shx, real shy, real shz,
2437 for (int i = 0; i < c_gpuNumClusterPerCell; i++)
2439 set_icell_bb_simple(bb, ci*c_gpuNumClusterPerCell+i,
2445 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2446 gmx_unused static void set_icell_bb(const nbnxn_grid_t &iGrid,
2448 real shx, real shy, real shz,
2449 NbnxnPairlistGpuWork *work)
2452 set_icell_bbxxxx_supersub(iGrid.pbb, ci, shx, shy, shz,
2453 work->iSuperClusterData.bbPacked.data());
2455 set_icell_bb_supersub(iGrid.bb, ci, shx, shy, shz,
2456 work->iSuperClusterData.bb.data());
2460 /* Copies PBC shifted i-cell atom coordinates x,y,z to working array */
2461 static void icell_set_x_simple(int ci,
2462 real shx, real shy, real shz,
2463 int stride, const real *x,
2464 NbnxnPairlistCpuWork::IClusterData *iClusterData)
2466 const int ia = ci*c_nbnxnCpuIClusterSize;
2468 for (int i = 0; i < c_nbnxnCpuIClusterSize; i++)
2470 iClusterData->x[i*STRIDE_XYZ+XX] = x[(ia+i)*stride+XX] + shx;
2471 iClusterData->x[i*STRIDE_XYZ+YY] = x[(ia+i)*stride+YY] + shy;
2472 iClusterData->x[i*STRIDE_XYZ+ZZ] = x[(ia+i)*stride+ZZ] + shz;
2476 static void icell_set_x(int ci,
2477 real shx, real shy, real shz,
2478 int stride, const real *x,
2480 NbnxnPairlistCpuWork *work)
2482 switch (nb_kernel_type)
2485 #ifdef GMX_NBNXN_SIMD_4XN
2486 case nbnxnk4xN_SIMD_4xN:
2487 icell_set_x_simd_4xn(ci, shx, shy, shz, stride, x, work);
2490 #ifdef GMX_NBNXN_SIMD_2XNN
2491 case nbnxnk4xN_SIMD_2xNN:
2492 icell_set_x_simd_2xnn(ci, shx, shy, shz, stride, x, work);
2496 case nbnxnk4x4_PlainC:
2497 icell_set_x_simple(ci, shx, shy, shz, stride, x, &work->iClusterData);
2500 GMX_ASSERT(false, "Unhandled case");
2505 /* Copies PBC shifted super-cell atom coordinates x,y,z to working array */
2506 static void icell_set_x(int ci,
2507 real shx, real shy, real shz,
2508 int stride, const real *x,
2509 int gmx_unused nb_kernel_type,
2510 NbnxnPairlistGpuWork *work)
2512 #if !GMX_SIMD4_HAVE_REAL
2514 real * x_ci = work->iSuperClusterData.x.data();
2516 int ia = ci*c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize;
2517 for (int i = 0; i < c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize; i++)
2519 x_ci[i*DIM + XX] = x[(ia+i)*stride + XX] + shx;
2520 x_ci[i*DIM + YY] = x[(ia+i)*stride + YY] + shy;
2521 x_ci[i*DIM + ZZ] = x[(ia+i)*stride + ZZ] + shz;
2524 #else /* !GMX_SIMD4_HAVE_REAL */
2526 real * x_ci = work->iSuperClusterData.xSimd.data();
2528 for (int si = 0; si < c_gpuNumClusterPerCell; si++)
2530 for (int i = 0; i < c_nbnxnGpuClusterSize; i += GMX_SIMD4_WIDTH)
2532 int io = si*c_nbnxnGpuClusterSize + i;
2533 int ia = ci*c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize + io;
2534 for (int j = 0; j < GMX_SIMD4_WIDTH; j++)
2536 x_ci[io*DIM + j + XX*GMX_SIMD4_WIDTH] = x[(ia + j)*stride + XX] + shx;
2537 x_ci[io*DIM + j + YY*GMX_SIMD4_WIDTH] = x[(ia + j)*stride + YY] + shy;
2538 x_ci[io*DIM + j + ZZ*GMX_SIMD4_WIDTH] = x[(ia + j)*stride + ZZ] + shz;
2543 #endif /* !GMX_SIMD4_HAVE_REAL */
2546 static real minimum_subgrid_size_xy(const nbnxn_grid_t &grid)
2550 return std::min(grid.cellSize[XX], grid.cellSize[YY]);
2554 return std::min(grid.cellSize[XX]/c_gpuNumClusterPerCellX,
2555 grid.cellSize[YY]/c_gpuNumClusterPerCellY);
2559 static real effective_buffer_1x1_vs_MxN(const nbnxn_grid_t &iGrid,
2560 const nbnxn_grid_t &jGrid)
2562 const real eff_1x1_buffer_fac_overest = 0.1;
2564 /* Determine an atom-pair list cut-off buffer size for atom pairs,
2565 * to be added to rlist (including buffer) used for MxN.
2566 * This is for converting an MxN list to a 1x1 list. This means we can't
2567 * use the normal buffer estimate, as we have an MxN list in which
2568 * some atom pairs beyond rlist are missing. We want to capture
2569 * the beneficial effect of buffering by extra pairs just outside rlist,
2570 * while removing the useless pairs that are further away from rlist.
2571 * (Also the buffer could have been set manually not using the estimate.)
2572 * This buffer size is an overestimate.
2573 * We add 10% of the smallest grid sub-cell dimensions.
2574 * Note that the z-size differs per cell and we don't use this,
2575 * so we overestimate.
2576 * With PME, the 10% value gives a buffer that is somewhat larger
2577 * than the effective buffer with a tolerance of 0.005 kJ/mol/ps.
2578 * Smaller tolerances or using RF lead to a smaller effective buffer,
2579 * so 10% gives a safe overestimate.
2581 return eff_1x1_buffer_fac_overest*(minimum_subgrid_size_xy(iGrid) +
2582 minimum_subgrid_size_xy(jGrid));
2585 /* Clusters at the cut-off only increase rlist by 60% of their size */
2586 static real nbnxn_rlist_inc_outside_fac = 0.6;
2588 /* Due to the cluster size the effective pair-list is longer than
2589 * that of a simple atom pair-list. This function gives the extra distance.
2591 real nbnxn_get_rlist_effective_inc(int cluster_size_j, real atom_density)
2594 real vol_inc_i, vol_inc_j;
2596 /* We should get this from the setup, but currently it's the same for
2597 * all setups, including GPUs.
2599 cluster_size_i = c_nbnxnCpuIClusterSize;
2601 vol_inc_i = (cluster_size_i - 1)/atom_density;
2602 vol_inc_j = (cluster_size_j - 1)/atom_density;
2604 return nbnxn_rlist_inc_outside_fac*std::cbrt(vol_inc_i + vol_inc_j);
2607 /* Estimates the interaction volume^2 for non-local interactions */
2608 static real nonlocal_vol2(const struct gmx_domdec_zones_t *zones, const rvec ls, real r)
2616 /* Here we simply add up the volumes of 1, 2 or 3 1D decomposition
2617 * not home interaction volume^2. As these volumes are not additive,
2618 * this is an overestimate, but it would only be significant in the limit
2619 * of small cells, where we anyhow need to split the lists into
2620 * as small parts as possible.
2623 for (int z = 0; z < zones->n; z++)
2625 if (zones->shift[z][XX] + zones->shift[z][YY] + zones->shift[z][ZZ] == 1)
2630 for (int d = 0; d < DIM; d++)
2632 if (zones->shift[z][d] == 0)
2636 za *= zones->size[z].x1[d] - zones->size[z].x0[d];
2640 /* 4 octants of a sphere */
2641 vold_est = 0.25*M_PI*r*r*r*r;
2642 /* 4 quarter pie slices on the edges */
2643 vold_est += 4*cl*M_PI/6.0*r*r*r;
2644 /* One rectangular volume on a face */
2645 vold_est += ca*0.5*r*r;
2647 vol2_est_tot += vold_est*za;
2651 return vol2_est_tot;
2654 /* Estimates the average size of a full j-list for super/sub setup */
2655 static void get_nsubpair_target(const nbnxn_search *nbs,
2656 const InteractionLocality iloc,
2658 const int min_ci_balanced,
2659 int *nsubpair_target,
2660 float *nsubpair_tot_est)
2662 /* The target value of 36 seems to be the optimum for Kepler.
2663 * Maxwell is less sensitive to the exact value.
2665 const int nsubpair_target_min = 36;
2667 real r_eff_sup, vol_est, nsp_est, nsp_est_nl;
2669 const nbnxn_grid_t &grid = nbs->grid[0];
2671 /* We don't need to balance list sizes if:
2672 * - We didn't request balancing.
2673 * - The number of grid cells >= the number of lists requested,
2674 * since we will always generate at least #cells lists.
2675 * - We don't have any cells, since then there won't be any lists.
2677 if (min_ci_balanced <= 0 || grid.nc >= min_ci_balanced || grid.nc == 0)
2679 /* nsubpair_target==0 signals no balancing */
2680 *nsubpair_target = 0;
2681 *nsubpair_tot_est = 0;
2686 ls[XX] = (grid.c1[XX] - grid.c0[XX])/(grid.numCells[XX]*c_gpuNumClusterPerCellX);
2687 ls[YY] = (grid.c1[YY] - grid.c0[YY])/(grid.numCells[YY]*c_gpuNumClusterPerCellY);
2688 ls[ZZ] = grid.na_c/(grid.atom_density*ls[XX]*ls[YY]);
2690 /* The average length of the diagonal of a sub cell */
2691 real diagonal = std::sqrt(ls[XX]*ls[XX] + ls[YY]*ls[YY] + ls[ZZ]*ls[ZZ]);
2693 /* The formulas below are a heuristic estimate of the average nsj per si*/
2694 r_eff_sup = rlist + nbnxn_rlist_inc_outside_fac*gmx::square((grid.na_c - 1.0)/grid.na_c)*0.5*diagonal;
2696 if (!nbs->DomDec || nbs->zones->n == 1)
2703 gmx::square(grid.atom_density/grid.na_c)*
2704 nonlocal_vol2(nbs->zones, ls, r_eff_sup);
2707 if (iloc == InteractionLocality::Local)
2709 /* Sub-cell interacts with itself */
2710 vol_est = ls[XX]*ls[YY]*ls[ZZ];
2711 /* 6/2 rectangular volume on the faces */
2712 vol_est += (ls[XX]*ls[YY] + ls[XX]*ls[ZZ] + ls[YY]*ls[ZZ])*r_eff_sup;
2713 /* 12/2 quarter pie slices on the edges */
2714 vol_est += 2*(ls[XX] + ls[YY] + ls[ZZ])*0.25*M_PI*gmx::square(r_eff_sup);
2715 /* 4 octants of a sphere */
2716 vol_est += 0.5*4.0/3.0*M_PI*gmx::power3(r_eff_sup);
2718 /* Estimate the number of cluster pairs as the local number of
2719 * clusters times the volume they interact with times the density.
2721 nsp_est = grid.nsubc_tot*vol_est*grid.atom_density/grid.na_c;
2723 /* Subtract the non-local pair count */
2724 nsp_est -= nsp_est_nl;
2726 /* For small cut-offs nsp_est will be an underesimate.
2727 * With DD nsp_est_nl is an overestimate so nsp_est can get negative.
2728 * So to avoid too small or negative nsp_est we set a minimum of
2729 * all cells interacting with all 3^3 direct neighbors (3^3-1)/2+1=14.
2730 * This might be a slight overestimate for small non-periodic groups of
2731 * atoms as will occur for a local domain with DD, but for small
2732 * groups of atoms we'll anyhow be limited by nsubpair_target_min,
2733 * so this overestimation will not matter.
2735 nsp_est = std::max(nsp_est, grid.nsubc_tot*14._real);
2739 fprintf(debug, "nsp_est local %5.1f non-local %5.1f\n",
2740 nsp_est, nsp_est_nl);
2745 nsp_est = nsp_est_nl;
2748 /* Thus the (average) maximum j-list size should be as follows.
2749 * Since there is overhead, we shouldn't make the lists too small
2750 * (and we can't chop up j-groups) so we use a minimum target size of 36.
2752 *nsubpair_target = std::max(nsubpair_target_min,
2753 roundToInt(nsp_est/min_ci_balanced));
2754 *nsubpair_tot_est = static_cast<int>(nsp_est);
2758 fprintf(debug, "nbl nsp estimate %.1f, nsubpair_target %d\n",
2759 nsp_est, *nsubpair_target);
2763 /* Debug list print function */
2764 static void print_nblist_ci_cj(FILE *fp, const NbnxnPairlistCpu *nbl)
2766 for (const nbnxn_ci_t &ciEntry : nbl->ci)
2768 fprintf(fp, "ci %4d shift %2d ncj %3d\n",
2769 ciEntry.ci, ciEntry.shift,
2770 ciEntry.cj_ind_end - ciEntry.cj_ind_start);
2772 for (int j = ciEntry.cj_ind_start; j < ciEntry.cj_ind_end; j++)
2774 fprintf(fp, " cj %5d imask %x\n",
2781 /* Debug list print function */
2782 static void print_nblist_sci_cj(FILE *fp, const NbnxnPairlistGpu *nbl)
2784 for (const nbnxn_sci_t &sci : nbl->sci)
2786 fprintf(fp, "ci %4d shift %2d ncj4 %2d\n",
2788 sci.numJClusterGroups());
2791 for (int j4 = sci.cj4_ind_start; j4 < sci.cj4_ind_end; j4++)
2793 for (int j = 0; j < c_nbnxnGpuJgroupSize; j++)
2795 fprintf(fp, " sj %5d imask %x\n",
2797 nbl->cj4[j4].imei[0].imask);
2798 for (int si = 0; si < c_gpuNumClusterPerCell; si++)
2800 if (nbl->cj4[j4].imei[0].imask & (1U << (j*c_gpuNumClusterPerCell + si)))
2807 fprintf(fp, "ci %4d shift %2d ncj4 %2d ncp %3d\n",
2809 sci.numJClusterGroups(),
2814 /* Combine pair lists *nbl generated on multiple threads nblc */
2815 static void combine_nblists(int nnbl, NbnxnPairlistGpu **nbl,
2816 NbnxnPairlistGpu *nblc)
2818 int nsci = nblc->sci.size();
2819 int ncj4 = nblc->cj4.size();
2820 int nexcl = nblc->excl.size();
2821 for (int i = 0; i < nnbl; i++)
2823 nsci += nbl[i]->sci.size();
2824 ncj4 += nbl[i]->cj4.size();
2825 nexcl += nbl[i]->excl.size();
2828 /* Resize with the final, combined size, so we can fill in parallel */
2829 /* NOTE: For better performance we should use default initialization */
2830 nblc->sci.resize(nsci);
2831 nblc->cj4.resize(ncj4);
2832 nblc->excl.resize(nexcl);
2834 /* Each thread should copy its own data to the combined arrays,
2835 * as otherwise data will go back and forth between different caches.
2837 #if GMX_OPENMP && !(defined __clang_analyzer__)
2838 int nthreads = gmx_omp_nthreads_get(emntPairsearch);
2841 #pragma omp parallel for num_threads(nthreads) schedule(static)
2842 for (int n = 0; n < nnbl; n++)
2846 /* Determine the offset in the combined data for our thread.
2847 * Note that the original sizes in nblc are lost.
2849 int sci_offset = nsci;
2850 int cj4_offset = ncj4;
2851 int excl_offset = nexcl;
2853 for (int i = n; i < nnbl; i++)
2855 sci_offset -= nbl[i]->sci.size();
2856 cj4_offset -= nbl[i]->cj4.size();
2857 excl_offset -= nbl[i]->excl.size();
2860 const NbnxnPairlistGpu &nbli = *nbl[n];
2862 for (size_t i = 0; i < nbli.sci.size(); i++)
2864 nblc->sci[sci_offset + i] = nbli.sci[i];
2865 nblc->sci[sci_offset + i].cj4_ind_start += cj4_offset;
2866 nblc->sci[sci_offset + i].cj4_ind_end += cj4_offset;
2869 for (size_t j4 = 0; j4 < nbli.cj4.size(); j4++)
2871 nblc->cj4[cj4_offset + j4] = nbli.cj4[j4];
2872 nblc->cj4[cj4_offset + j4].imei[0].excl_ind += excl_offset;
2873 nblc->cj4[cj4_offset + j4].imei[1].excl_ind += excl_offset;
2876 for (size_t j4 = 0; j4 < nbli.excl.size(); j4++)
2878 nblc->excl[excl_offset + j4] = nbli.excl[j4];
2881 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2884 for (int n = 0; n < nnbl; n++)
2886 nblc->nci_tot += nbl[n]->nci_tot;
2890 static void balance_fep_lists(const nbnxn_search *nbs,
2891 nbnxn_pairlist_set_t *nbl_lists)
2894 int nri_tot, nrj_tot, nrj_target;
2898 nnbl = nbl_lists->nnbl;
2902 /* Nothing to balance */
2906 /* Count the total i-lists and pairs */
2909 for (int th = 0; th < nnbl; th++)
2911 nri_tot += nbl_lists->nbl_fep[th]->nri;
2912 nrj_tot += nbl_lists->nbl_fep[th]->nrj;
2915 nrj_target = (nrj_tot + nnbl - 1)/nnbl;
2917 assert(gmx_omp_nthreads_get(emntNonbonded) == nnbl);
2919 #pragma omp parallel for schedule(static) num_threads(nnbl)
2920 for (int th = 0; th < nnbl; th++)
2924 t_nblist *nbl = nbs->work[th].nbl_fep.get();
2926 /* Note that here we allocate for the total size, instead of
2927 * a per-thread esimate (which is hard to obtain).
2929 if (nri_tot > nbl->maxnri)
2931 nbl->maxnri = over_alloc_large(nri_tot);
2932 reallocate_nblist(nbl);
2934 if (nri_tot > nbl->maxnri || nrj_tot > nbl->maxnrj)
2936 nbl->maxnrj = over_alloc_small(nrj_tot);
2937 srenew(nbl->jjnr, nbl->maxnrj);
2938 srenew(nbl->excl_fep, nbl->maxnrj);
2941 clear_pairlist_fep(nbl);
2943 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2946 /* Loop over the source lists and assign and copy i-entries */
2948 nbld = nbs->work[th_dest].nbl_fep.get();
2949 for (int th = 0; th < nnbl; th++)
2953 nbls = nbl_lists->nbl_fep[th];
2955 for (int i = 0; i < nbls->nri; i++)
2959 /* The number of pairs in this i-entry */
2960 nrj = nbls->jindex[i+1] - nbls->jindex[i];
2962 /* Decide if list th_dest is too large and we should procede
2963 * to the next destination list.
2965 if (th_dest+1 < nnbl && nbld->nrj > 0 &&
2966 nbld->nrj + nrj - nrj_target > nrj_target - nbld->nrj)
2969 nbld = nbs->work[th_dest].nbl_fep.get();
2972 nbld->iinr[nbld->nri] = nbls->iinr[i];
2973 nbld->gid[nbld->nri] = nbls->gid[i];
2974 nbld->shift[nbld->nri] = nbls->shift[i];
2976 for (int j = nbls->jindex[i]; j < nbls->jindex[i+1]; j++)
2978 nbld->jjnr[nbld->nrj] = nbls->jjnr[j];
2979 nbld->excl_fep[nbld->nrj] = nbls->excl_fep[j];
2983 nbld->jindex[nbld->nri] = nbld->nrj;
2987 /* Swap the list pointers */
2988 for (int th = 0; th < nnbl; th++)
2990 t_nblist *nbl_tmp = nbs->work[th].nbl_fep.release();
2991 nbs->work[th].nbl_fep.reset(nbl_lists->nbl_fep[th]);
2992 nbl_lists->nbl_fep[th] = nbl_tmp;
2996 fprintf(debug, "nbl_fep[%d] nri %4d nrj %4d\n",
2998 nbl_lists->nbl_fep[th]->nri,
2999 nbl_lists->nbl_fep[th]->nrj);
3004 /* Returns the next ci to be processes by our thread */
3005 static gmx_bool next_ci(const nbnxn_grid_t &grid,
3006 int nth, int ci_block,
3007 int *ci_x, int *ci_y,
3013 if (*ci_b == ci_block)
3015 /* Jump to the next block assigned to this task */
3016 *ci += (nth - 1)*ci_block;
3025 while (*ci >= grid.cxy_ind[*ci_x*grid.numCells[YY] + *ci_y + 1])
3028 if (*ci_y == grid.numCells[YY])
3038 /* Returns the distance^2 for which we put cell pairs in the list
3039 * without checking atom pair distances. This is usually < rlist^2.
3041 static float boundingbox_only_distance2(const nbnxn_grid_t &iGrid,
3042 const nbnxn_grid_t &jGrid,
3046 /* If the distance between two sub-cell bounding boxes is less
3047 * than this distance, do not check the distance between
3048 * all particle pairs in the sub-cell, since then it is likely
3049 * that the box pair has atom pairs within the cut-off.
3050 * We use the nblist cut-off minus 0.5 times the average x/y diagonal
3051 * spacing of the sub-cells. Around 40% of the checked pairs are pruned.
3052 * Using more than 0.5 gains at most 0.5%.
3053 * If forces are calculated more than twice, the performance gain
3054 * in the force calculation outweighs the cost of checking.
3055 * Note that with subcell lists, the atom-pair distance check
3056 * is only performed when only 1 out of 8 sub-cells in within range,
3057 * this is because the GPU is much faster than the cpu.
3062 bbx = 0.5*(iGrid.cellSize[XX] + jGrid.cellSize[XX]);
3063 bby = 0.5*(iGrid.cellSize[YY] + jGrid.cellSize[YY]);
3066 bbx /= c_gpuNumClusterPerCellX;
3067 bby /= c_gpuNumClusterPerCellY;
3070 rbb2 = std::max(0.0, rlist - 0.5*std::sqrt(bbx*bbx + bby*bby));
3076 return (float)((1+GMX_FLOAT_EPS)*rbb2);
3080 static int get_ci_block_size(const nbnxn_grid_t &iGrid,
3081 gmx_bool bDomDec, int nth)
3083 const int ci_block_enum = 5;
3084 const int ci_block_denom = 11;
3085 const int ci_block_min_atoms = 16;
3088 /* Here we decide how to distribute the blocks over the threads.
3089 * We use prime numbers to try to avoid that the grid size becomes
3090 * a multiple of the number of threads, which would lead to some
3091 * threads getting "inner" pairs and others getting boundary pairs,
3092 * which in turns will lead to load imbalance between threads.
3093 * Set the block size as 5/11/ntask times the average number of cells
3094 * in a y,z slab. This should ensure a quite uniform distribution
3095 * of the grid parts of the different thread along all three grid
3096 * zone boundaries with 3D domain decomposition. At the same time
3097 * the blocks will not become too small.
3099 ci_block = (iGrid.nc*ci_block_enum)/(ci_block_denom*iGrid.numCells[XX]*nth);
3101 /* Ensure the blocks are not too small: avoids cache invalidation */
3102 if (ci_block*iGrid.na_sc < ci_block_min_atoms)
3104 ci_block = (ci_block_min_atoms + iGrid.na_sc - 1)/iGrid.na_sc;
3107 /* Without domain decomposition
3108 * or with less than 3 blocks per task, divide in nth blocks.
3110 if (!bDomDec || nth*3*ci_block > iGrid.nc)
3112 ci_block = (iGrid.nc + nth - 1)/nth;
3115 if (ci_block > 1 && (nth - 1)*ci_block >= iGrid.nc)
3117 /* Some threads have no work. Although reducing the block size
3118 * does not decrease the block count on the first few threads,
3119 * with GPUs better mixing of "upper" cells that have more empty
3120 * clusters results in a somewhat lower max load over all threads.
3121 * Without GPUs the regime of so few atoms per thread is less
3122 * performance relevant, but with 8-wide SIMD the same reasoning
3123 * applies, since the pair list uses 4 i-atom "sub-clusters".
3131 /* Returns the number of bits to right-shift a cluster index to obtain
3132 * the corresponding force buffer flag index.
3134 static int getBufferFlagShift(int numAtomsPerCluster)
3136 int bufferFlagShift = 0;
3137 while ((numAtomsPerCluster << bufferFlagShift) < NBNXN_BUFFERFLAG_SIZE)
3142 return bufferFlagShift;
3145 static bool pairlistIsSimple(const NbnxnPairlistCpu gmx_unused &pairlist)
3150 static bool pairlistIsSimple(const NbnxnPairlistGpu gmx_unused &pairlist)
3155 static void makeClusterListWrapper(NbnxnPairlistCpu *nbl,
3156 const nbnxn_grid_t gmx_unused &iGrid,
3158 const nbnxn_grid_t &jGrid,
3159 const int firstCell,
3161 const bool excludeSubDiagonal,
3162 const nbnxn_atomdata_t *nbat,
3165 const int nb_kernel_type,
3166 int *numDistanceChecks)
3168 switch (nb_kernel_type)
3170 case nbnxnk4x4_PlainC:
3171 makeClusterListSimple(jGrid,
3172 nbl, ci, firstCell, lastCell,
3178 #ifdef GMX_NBNXN_SIMD_4XN
3179 case nbnxnk4xN_SIMD_4xN:
3180 makeClusterListSimd4xn(jGrid,
3181 nbl, ci, firstCell, lastCell,
3188 #ifdef GMX_NBNXN_SIMD_2XNN
3189 case nbnxnk4xN_SIMD_2xNN:
3190 makeClusterListSimd2xnn(jGrid,
3191 nbl, ci, firstCell, lastCell,
3201 static void makeClusterListWrapper(NbnxnPairlistGpu *nbl,
3202 const nbnxn_grid_t &gmx_unused iGrid,
3204 const nbnxn_grid_t &jGrid,
3205 const int firstCell,
3207 const bool excludeSubDiagonal,
3208 const nbnxn_atomdata_t *nbat,
3211 const int gmx_unused nb_kernel_type,
3212 int *numDistanceChecks)
3214 for (int cj = firstCell; cj <= lastCell; cj++)
3216 make_cluster_list_supersub(iGrid, jGrid,
3219 nbat->xstride, nbat->x().data(),
3225 static int getNumSimpleJClustersInList(const NbnxnPairlistCpu &nbl)
3227 return nbl.cj.size();
3230 static int getNumSimpleJClustersInList(const gmx_unused NbnxnPairlistGpu &nbl)
3235 static void incrementNumSimpleJClustersInList(NbnxnPairlistCpu *nbl,
3238 nbl->ncjInUse += nbl->cj.size() - ncj_old_j;
3241 static void incrementNumSimpleJClustersInList(NbnxnPairlistGpu gmx_unused *nbl,
3242 int gmx_unused ncj_old_j)
3246 static void checkListSizeConsistency(const NbnxnPairlistCpu &nbl,
3247 const bool haveFreeEnergy)
3249 GMX_RELEASE_ASSERT(static_cast<size_t>(nbl.ncjInUse) == nbl.cj.size() || haveFreeEnergy,
3250 "Without free-energy all cj pair-list entries should be in use. "
3251 "Note that subsequent code does not make use of the equality, "
3252 "this check is only here to catch bugs");
3255 static void checkListSizeConsistency(const NbnxnPairlistGpu gmx_unused &nbl,
3256 bool gmx_unused haveFreeEnergy)
3258 /* We currently can not check consistency here */
3261 /* Set the buffer flags for newly added entries in the list */
3262 static void setBufferFlags(const NbnxnPairlistCpu &nbl,
3263 const int ncj_old_j,
3264 const int gridj_flag_shift,
3265 gmx_bitmask_t *gridj_flag,
3268 if (gmx::ssize(nbl.cj) > ncj_old_j)
3270 int cbFirst = nbl.cj[ncj_old_j].cj >> gridj_flag_shift;
3271 int cbLast = nbl.cj.back().cj >> gridj_flag_shift;
3272 for (int cb = cbFirst; cb <= cbLast; cb++)
3274 bitmask_init_bit(&gridj_flag[cb], th);
3279 static void setBufferFlags(const NbnxnPairlistGpu gmx_unused &nbl,
3280 int gmx_unused ncj_old_j,
3281 int gmx_unused gridj_flag_shift,
3282 gmx_bitmask_t gmx_unused *gridj_flag,
3285 GMX_ASSERT(false, "This function should never be called");
3288 /* Generates the part of pair-list nbl assigned to our thread */
3289 template <typename T>
3290 static void nbnxn_make_pairlist_part(const nbnxn_search *nbs,
3291 const nbnxn_grid_t &iGrid,
3292 const nbnxn_grid_t &jGrid,
3293 nbnxn_search_work_t *work,
3294 const nbnxn_atomdata_t *nbat,
3295 const t_blocka &exclusions,
3299 gmx_bool bFBufferFlag,
3302 float nsubpair_tot_est,
3309 real rlist2, rl_fep2 = 0;
3311 int ci_b, ci, ci_x, ci_y, ci_xy;
3313 real bx0, bx1, by0, by1, bz0, bz1;
3315 real d2cx, d2z, d2z_cx, d2z_cy, d2zx, d2zxy, d2xy;
3316 int cxf, cxl, cyf, cyf_x, cyl;
3317 int numDistanceChecks;
3318 int gridi_flag_shift = 0, gridj_flag_shift = 0;
3319 gmx_bitmask_t *gridj_flag = nullptr;
3320 int ncj_old_i, ncj_old_j;
3322 nbs_cycle_start(&work->cc[enbsCCsearch]);
3324 if (jGrid.bSimple != pairlistIsSimple(*nbl) ||
3325 iGrid.bSimple != pairlistIsSimple(*nbl))
3327 gmx_incons("Grid incompatible with pair-list");
3331 GMX_ASSERT(nbl->na_ci == jGrid.na_c, "The cluster sizes in the list and grid should match");
3332 nbl->na_cj = nbnxn_kernel_to_cluster_j_size(nb_kernel_type);
3333 na_cj_2log = get_2log(nbl->na_cj);
3339 /* Determine conversion of clusters to flag blocks */
3340 gridi_flag_shift = getBufferFlagShift(nbl->na_ci);
3341 gridj_flag_shift = getBufferFlagShift(nbl->na_cj);
3343 gridj_flag = work->buffer_flags.flag;
3346 copy_mat(nbs->box, box);
3348 rlist2 = nbl->rlist*nbl->rlist;
3350 if (nbs->bFEP && !pairlistIsSimple(*nbl))
3352 /* Determine an atom-pair list cut-off distance for FEP atom pairs.
3353 * We should not simply use rlist, since then we would not have
3354 * the small, effective buffering of the NxN lists.
3355 * The buffer is on overestimate, but the resulting cost for pairs
3356 * beyond rlist is neglible compared to the FEP pairs within rlist.
3358 rl_fep2 = nbl->rlist + effective_buffer_1x1_vs_MxN(iGrid, jGrid);
3362 fprintf(debug, "nbl_fep atom-pair rlist %f\n", rl_fep2);
3364 rl_fep2 = rl_fep2*rl_fep2;
3367 rbb2 = boundingbox_only_distance2(iGrid, jGrid, nbl->rlist, pairlistIsSimple(*nbl));
3371 fprintf(debug, "nbl bounding box only distance %f\n", std::sqrt(rbb2));
3374 const bool isIntraGridList = (&iGrid == &jGrid);
3376 /* Set the shift range */
3377 for (int d = 0; d < DIM; d++)
3379 /* Check if we need periodicity shifts.
3380 * Without PBC or with domain decomposition we don't need them.
3382 if (d >= ePBC2npbcdim(nbs->ePBC) || nbs->dd_dim[d])
3388 const real listRangeCellToCell = listRangeForGridCellToGridCell(rlist, iGrid, jGrid);
3390 box[XX][XX] - fabs(box[YY][XX]) - fabs(box[ZZ][XX]) < listRangeCellToCell)
3400 const bool bSimple = pairlistIsSimple(*nbl);
3401 gmx::ArrayRef<const nbnxn_bb_t> bb_i;
3403 gmx::ArrayRef<const float> pbb_i;
3413 /* We use the normal bounding box format for both grid types */
3416 gmx::ArrayRef<const float> bbcz_i = iGrid.bbcz;
3417 gmx::ArrayRef<const int> flags_i = iGrid.flags;
3418 gmx::ArrayRef<const float> bbcz_j = jGrid.bbcz;
3419 int cell0_i = iGrid.cell0;
3423 fprintf(debug, "nbl nc_i %d col.av. %.1f ci_block %d\n",
3424 iGrid.nc, iGrid.nc/static_cast<double>(iGrid.numCells[XX]*iGrid.numCells[YY]), ci_block);
3427 numDistanceChecks = 0;
3429 const real listRangeBBToJCell2 = gmx::square(listRangeForBoundingBoxToGridCell(rlist, jGrid));
3431 /* Initially ci_b and ci to 1 before where we want them to start,
3432 * as they will both be incremented in next_ci.
3435 ci = th*ci_block - 1;
3438 while (next_ci(iGrid, nth, ci_block, &ci_x, &ci_y, &ci_b, &ci))
3440 if (bSimple && flags_i[ci] == 0)
3445 ncj_old_i = getNumSimpleJClustersInList(*nbl);
3448 if (!isIntraGridList && shp[XX] == 0)
3452 bx1 = bb_i[ci].upper[BB_X];
3456 bx1 = iGrid.c0[XX] + (ci_x+1)*iGrid.cellSize[XX];
3458 if (bx1 < jGrid.c0[XX])
3460 d2cx = gmx::square(jGrid.c0[XX] - bx1);
3462 if (d2cx >= listRangeBBToJCell2)
3469 ci_xy = ci_x*iGrid.numCells[YY] + ci_y;
3471 /* Loop over shift vectors in three dimensions */
3472 for (int tz = -shp[ZZ]; tz <= shp[ZZ]; tz++)
3474 const real shz = tz*box[ZZ][ZZ];
3476 bz0 = bbcz_i[ci*NNBSBB_D ] + shz;
3477 bz1 = bbcz_i[ci*NNBSBB_D+1] + shz;
3485 d2z = gmx::square(bz1);
3489 d2z = gmx::square(bz0 - box[ZZ][ZZ]);
3492 d2z_cx = d2z + d2cx;
3494 if (d2z_cx >= rlist2)
3499 bz1_frac = bz1/(iGrid.cxy_ind[ci_xy+1] - iGrid.cxy_ind[ci_xy]);
3504 /* The check with bz1_frac close to or larger than 1 comes later */
3506 for (int ty = -shp[YY]; ty <= shp[YY]; ty++)
3508 const real shy = ty*box[YY][YY] + tz*box[ZZ][YY];
3512 by0 = bb_i[ci].lower[BB_Y] + shy;
3513 by1 = bb_i[ci].upper[BB_Y] + shy;
3517 by0 = iGrid.c0[YY] + (ci_y )*iGrid.cellSize[YY] + shy;
3518 by1 = iGrid.c0[YY] + (ci_y+1)*iGrid.cellSize[YY] + shy;
3521 get_cell_range<YY>(by0, by1,
3532 if (by1 < jGrid.c0[YY])
3534 d2z_cy += gmx::square(jGrid.c0[YY] - by1);
3536 else if (by0 > jGrid.c1[YY])
3538 d2z_cy += gmx::square(by0 - jGrid.c1[YY]);
3541 for (int tx = -shp[XX]; tx <= shp[XX]; tx++)
3543 const int shift = XYZ2IS(tx, ty, tz);
3545 const bool excludeSubDiagonal = (isIntraGridList && shift == CENTRAL);
3547 if (c_pbcShiftBackward && isIntraGridList && shift > CENTRAL)
3552 const real shx = tx*box[XX][XX] + ty*box[YY][XX] + tz*box[ZZ][XX];
3556 bx0 = bb_i[ci].lower[BB_X] + shx;
3557 bx1 = bb_i[ci].upper[BB_X] + shx;
3561 bx0 = iGrid.c0[XX] + (ci_x )*iGrid.cellSize[XX] + shx;
3562 bx1 = iGrid.c0[XX] + (ci_x+1)*iGrid.cellSize[XX] + shx;
3565 get_cell_range<XX>(bx0, bx1,
3575 addNewIEntry(nbl, cell0_i+ci, shift, flags_i[ci]);
3577 if ((!c_pbcShiftBackward || excludeSubDiagonal) &&
3580 /* Leave the pairs with i > j.
3581 * x is the major index, so skip half of it.
3586 set_icell_bb(iGrid, ci, shx, shy, shz,
3589 icell_set_x(cell0_i+ci, shx, shy, shz,
3590 nbat->xstride, nbat->x().data(),
3594 for (int cx = cxf; cx <= cxl; cx++)
3597 if (jGrid.c0[XX] + cx*jGrid.cellSize[XX] > bx1)
3599 d2zx += gmx::square(jGrid.c0[XX] + cx*jGrid.cellSize[XX] - bx1);
3601 else if (jGrid.c0[XX] + (cx+1)*jGrid.cellSize[XX] < bx0)
3603 d2zx += gmx::square(jGrid.c0[XX] + (cx+1)*jGrid.cellSize[XX] - bx0);
3606 if (isIntraGridList &&
3608 (!c_pbcShiftBackward || shift == CENTRAL) &&
3611 /* Leave the pairs with i > j.
3612 * Skip half of y when i and j have the same x.
3621 for (int cy = cyf_x; cy <= cyl; cy++)
3623 const int columnStart = jGrid.cxy_ind[cx*jGrid.numCells[YY] + cy];
3624 const int columnEnd = jGrid.cxy_ind[cx*jGrid.numCells[YY] + cy + 1];
3627 if (jGrid.c0[YY] + cy*jGrid.cellSize[YY] > by1)
3629 d2zxy += gmx::square(jGrid.c0[YY] + cy*jGrid.cellSize[YY] - by1);
3631 else if (jGrid.c0[YY] + (cy+1)*jGrid.cellSize[YY] < by0)
3633 d2zxy += gmx::square(jGrid.c0[YY] + (cy+1)*jGrid.cellSize[YY] - by0);
3635 if (columnStart < columnEnd && d2zxy < listRangeBBToJCell2)
3637 /* To improve efficiency in the common case
3638 * of a homogeneous particle distribution,
3639 * we estimate the index of the middle cell
3640 * in range (midCell). We search down and up
3641 * starting from this index.
3643 * Note that the bbcz_j array contains bounds
3644 * for i-clusters, thus for clusters of 4 atoms.
3645 * For the common case where the j-cluster size
3646 * is 8, we could step with a stride of 2,
3647 * but we do not do this because it would
3648 * complicate this code even more.
3650 int midCell = columnStart + static_cast<int>(bz1_frac*(columnEnd - columnStart));
3651 if (midCell >= columnEnd)
3653 midCell = columnEnd - 1;
3658 /* Find the lowest cell that can possibly
3660 * Check if we hit the bottom of the grid,
3661 * if the j-cell is below the i-cell and if so,
3662 * if it is within range.
3664 int downTestCell = midCell;
3665 while (downTestCell >= columnStart &&
3666 (bbcz_j[downTestCell*NNBSBB_D + 1] >= bz0 ||
3667 d2xy + gmx::square(bbcz_j[downTestCell*NNBSBB_D + 1] - bz0) < rlist2))
3671 int firstCell = downTestCell + 1;
3673 /* Find the highest cell that can possibly
3675 * Check if we hit the top of the grid,
3676 * if the j-cell is above the i-cell and if so,
3677 * if it is within range.
3679 int upTestCell = midCell + 1;
3680 while (upTestCell < columnEnd &&
3681 (bbcz_j[upTestCell*NNBSBB_D] <= bz1 ||
3682 d2xy + gmx::square(bbcz_j[upTestCell*NNBSBB_D] - bz1) < rlist2))
3686 int lastCell = upTestCell - 1;
3688 #define NBNXN_REFCODE 0
3691 /* Simple reference code, for debugging,
3692 * overrides the more complex code above.
3694 firstCell = columnEnd;
3696 for (int k = columnStart; k < columnEnd; k++)
3698 if (d2xy + gmx::square(bbcz_j[k*NNBSBB_D + 1] - bz0) < rlist2 &&
3703 if (d2xy + gmx::square(bbcz_j[k*NNBSBB_D] - bz1) < rlist2 &&
3712 if (isIntraGridList)
3714 /* We want each atom/cell pair only once,
3715 * only use cj >= ci.
3717 if (!c_pbcShiftBackward || shift == CENTRAL)
3719 firstCell = std::max(firstCell, ci);
3723 if (firstCell <= lastCell)
3725 GMX_ASSERT(firstCell >= columnStart && lastCell < columnEnd, "The range should reside within the current grid column");
3727 /* For f buffer flags with simple lists */
3728 ncj_old_j = getNumSimpleJClustersInList(*nbl);
3730 makeClusterListWrapper(nbl,
3732 jGrid, firstCell, lastCell,
3737 &numDistanceChecks);
3741 setBufferFlags(*nbl, ncj_old_j, gridj_flag_shift,
3745 incrementNumSimpleJClustersInList(nbl, ncj_old_j);
3751 /* Set the exclusions for this ci list */
3752 setExclusionsForIEntry(nbs,
3756 *getOpenIEntry(nbl),
3761 make_fep_list(nbs, nbat, nbl,
3766 iGrid, jGrid, nbl_fep);
3769 /* Close this ci list */
3772 progBal, nsubpair_tot_est,
3778 if (bFBufferFlag && getNumSimpleJClustersInList(*nbl) > ncj_old_i)
3780 bitmask_init_bit(&(work->buffer_flags.flag[(iGrid.cell0+ci) >> gridi_flag_shift]), th);
3784 work->ndistc = numDistanceChecks;
3786 nbs_cycle_stop(&work->cc[enbsCCsearch]);
3788 checkListSizeConsistency(*nbl, nbs->bFEP);
3792 fprintf(debug, "number of distance checks %d\n", numDistanceChecks);
3794 print_nblist_statistics(debug, nbl, nbs, rlist);
3798 fprintf(debug, "nbl FEP list pairs: %d\n", nbl_fep->nrj);
3803 static void reduce_buffer_flags(const nbnxn_search *nbs,
3805 const nbnxn_buffer_flags_t *dest)
3807 for (int s = 0; s < nsrc; s++)
3809 gmx_bitmask_t * flag = nbs->work[s].buffer_flags.flag;
3811 for (int b = 0; b < dest->nflag; b++)
3813 bitmask_union(&(dest->flag[b]), flag[b]);
3818 static void print_reduction_cost(const nbnxn_buffer_flags_t *flags, int nout)
3820 int nelem, nkeep, ncopy, nred, out;
3821 gmx_bitmask_t mask_0;
3827 bitmask_init_bit(&mask_0, 0);
3828 for (int b = 0; b < flags->nflag; b++)
3830 if (bitmask_is_equal(flags->flag[b], mask_0))
3832 /* Only flag 0 is set, no copy of reduction required */
3836 else if (!bitmask_is_zero(flags->flag[b]))
3839 for (out = 0; out < nout; out++)
3841 if (bitmask_is_set(flags->flag[b], out))
3858 fprintf(debug, "nbnxn reduction: #flag %d #list %d elem %4.2f, keep %4.2f copy %4.2f red %4.2f\n",
3860 nelem/static_cast<double>(flags->nflag),
3861 nkeep/static_cast<double>(flags->nflag),
3862 ncopy/static_cast<double>(flags->nflag),
3863 nred/static_cast<double>(flags->nflag));
3866 /* Copies the list entries from src to dest when cjStart <= *cjGlobal < cjEnd.
3867 * *cjGlobal is updated with the cj count in src.
3868 * When setFlags==true, flag bit t is set in flag for all i and j clusters.
3870 template<bool setFlags>
3871 static void copySelectedListRange(const nbnxn_ci_t * gmx_restrict srcCi,
3872 const NbnxnPairlistCpu * gmx_restrict src,
3873 NbnxnPairlistCpu * gmx_restrict dest,
3874 gmx_bitmask_t *flag,
3875 int iFlagShift, int jFlagShift, int t)
3877 const int ncj = srcCi->cj_ind_end - srcCi->cj_ind_start;
3879 dest->ci.push_back(*srcCi);
3880 dest->ci.back().cj_ind_start = dest->cj.size();
3881 dest->ci.back().cj_ind_end = dest->cj.size() + ncj;
3885 bitmask_init_bit(&flag[srcCi->ci >> iFlagShift], t);
3888 for (int j = srcCi->cj_ind_start; j < srcCi->cj_ind_end; j++)
3890 dest->cj.push_back(src->cj[j]);
3894 /* NOTE: This is relatively expensive, since this
3895 * operation is done for all elements in the list,
3896 * whereas at list generation this is done only
3897 * once for each flag entry.
3899 bitmask_init_bit(&flag[src->cj[j].cj >> jFlagShift], t);
3904 /* This routine re-balances the pairlists such that all are nearly equally
3905 * sized. Only whole i-entries are moved between lists. These are moved
3906 * between the ends of the lists, such that the buffer reduction cost should
3907 * not change significantly.
3908 * Note that all original reduction flags are currently kept. This can lead
3909 * to reduction of parts of the force buffer that could be avoided. But since
3910 * the original lists are quite balanced, this will only give minor overhead.
3912 static void rebalanceSimpleLists(int numLists,
3913 NbnxnPairlistCpu * const * const srcSet,
3914 NbnxnPairlistCpu **destSet,
3915 gmx::ArrayRef<nbnxn_search_work_t> searchWork)
3918 for (int s = 0; s < numLists; s++)
3920 ncjTotal += srcSet[s]->ncjInUse;
3922 int ncjTarget = (ncjTotal + numLists - 1)/numLists;
3924 #pragma omp parallel num_threads(numLists)
3926 int t = gmx_omp_get_thread_num();
3928 int cjStart = ncjTarget* t;
3929 int cjEnd = ncjTarget*(t + 1);
3931 /* The destination pair-list for task/thread t */
3932 NbnxnPairlistCpu *dest = destSet[t];
3934 clear_pairlist(dest);
3935 dest->na_cj = srcSet[0]->na_cj;
3937 /* Note that the flags in the work struct (still) contain flags
3938 * for all entries that are present in srcSet->nbl[t].
3940 gmx_bitmask_t *flag = searchWork[t].buffer_flags.flag;
3942 int iFlagShift = getBufferFlagShift(dest->na_ci);
3943 int jFlagShift = getBufferFlagShift(dest->na_cj);
3946 for (int s = 0; s < numLists && cjGlobal < cjEnd; s++)
3948 const NbnxnPairlistCpu *src = srcSet[s];
3950 if (cjGlobal + src->ncjInUse > cjStart)
3952 for (gmx::index i = 0; i < gmx::ssize(src->ci) && cjGlobal < cjEnd; i++)
3954 const nbnxn_ci_t *srcCi = &src->ci[i];
3955 int ncj = srcCi->cj_ind_end - srcCi->cj_ind_start;
3956 if (cjGlobal >= cjStart)
3958 /* If the source list is not our own, we need to set
3959 * extra flags (the template bool parameter).
3963 copySelectedListRange
3966 flag, iFlagShift, jFlagShift, t);
3970 copySelectedListRange
3973 dest, flag, iFlagShift, jFlagShift, t);
3981 cjGlobal += src->ncjInUse;
3985 dest->ncjInUse = dest->cj.size();
3989 int ncjTotalNew = 0;
3990 for (int s = 0; s < numLists; s++)
3992 ncjTotalNew += destSet[s]->ncjInUse;
3994 GMX_RELEASE_ASSERT(ncjTotalNew == ncjTotal, "The total size of the lists before and after rebalancing should match");
3998 /* Returns if the pairlists are so imbalanced that it is worth rebalancing. */
3999 static bool checkRebalanceSimpleLists(const nbnxn_pairlist_set_t *listSet)
4001 int numLists = listSet->nnbl;
4004 for (int s = 0; s < numLists; s++)
4006 ncjMax = std::max(ncjMax, listSet->nbl[s]->ncjInUse);
4007 ncjTotal += listSet->nbl[s]->ncjInUse;
4011 fprintf(debug, "Pair-list ncjMax %d ncjTotal %d\n", ncjMax, ncjTotal);
4013 /* The rebalancing adds 3% extra time to the search. Heuristically we
4014 * determined that under common conditions the non-bonded kernel balance
4015 * improvement will outweigh this when the imbalance is more than 3%.
4016 * But this will, obviously, depend on search vs kernel time and nstlist.
4018 const real rebalanceTolerance = 1.03;
4020 return numLists*ncjMax > ncjTotal*rebalanceTolerance;
4023 /* Perform a count (linear) sort to sort the smaller lists to the end.
4024 * This avoids load imbalance on the GPU, as large lists will be
4025 * scheduled and executed first and the smaller lists later.
4026 * Load balancing between multi-processors only happens at the end
4027 * and there smaller lists lead to more effective load balancing.
4028 * The sorting is done on the cj4 count, not on the actual pair counts.
4029 * Not only does this make the sort faster, but it also results in
4030 * better load balancing than using a list sorted on exact load.
4031 * This function swaps the pointer in the pair list to avoid a copy operation.
4033 static void sort_sci(NbnxnPairlistGpu *nbl)
4035 if (nbl->cj4.size() <= nbl->sci.size())
4037 /* nsci = 0 or all sci have size 1, sorting won't change the order */
4041 NbnxnPairlistGpuWork &work = *nbl->work;
4043 /* We will distinguish differences up to double the average */
4044 const int m = (2*nbl->cj4.size())/nbl->sci.size();
4046 /* Resize work.sci_sort so we can sort into it */
4047 work.sci_sort.resize(nbl->sci.size());
4049 std::vector<int> &sort = work.sortBuffer;
4050 /* Set up m + 1 entries in sort, initialized at 0 */
4052 sort.resize(m + 1, 0);
4053 /* Count the entries of each size */
4054 for (const nbnxn_sci_t &sci : nbl->sci)
4056 int i = std::min(m, sci.numJClusterGroups());
4059 /* Calculate the offset for each count */
4062 for (int i = m - 1; i >= 0; i--)
4065 sort[i] = sort[i + 1] + s0;
4069 /* Sort entries directly into place */
4070 gmx::ArrayRef<nbnxn_sci_t> sci_sort = work.sci_sort;
4071 for (const nbnxn_sci_t &sci : nbl->sci)
4073 int i = std::min(m, sci.numJClusterGroups());
4074 sci_sort[sort[i]++] = sci;
4077 /* Swap the sci pointers so we use the new, sorted list */
4078 std::swap(nbl->sci, work.sci_sort);
4081 /* Make a local or non-local pair-list, depending on iloc */
4082 void nbnxn_make_pairlist(nbnxn_search *nbs,
4083 nbnxn_atomdata_t *nbat,
4084 const t_blocka *excl,
4086 const int min_ci_balanced,
4087 nbnxn_pairlist_set_t *nbl_list,
4088 const InteractionLocality iloc,
4089 const int nb_kernel_type,
4092 int nsubpair_target;
4093 float nsubpair_tot_est;
4096 gmx_bool CombineNBLists;
4098 int np_tot, np_noq, np_hlj, nap;
4100 nnbl = nbl_list->nnbl;
4101 CombineNBLists = nbl_list->bCombined;
4105 fprintf(debug, "ns making %d nblists\n", nnbl);
4108 nbat->bUseBufferFlags = (nbat->out.size() > 1);
4109 /* We should re-init the flags before making the first list */
4110 if (nbat->bUseBufferFlags && iloc == InteractionLocality::Local)
4112 init_buffer_flags(&nbat->buffer_flags, nbat->numAtoms());
4116 if (iloc == InteractionLocality::Local)
4118 /* Only zone (grid) 0 vs 0 */
4123 nzi = nbs->zones->nizone;
4126 if (!nbl_list->bSimple && min_ci_balanced > 0)
4128 get_nsubpair_target(nbs, iloc, rlist, min_ci_balanced,
4129 &nsubpair_target, &nsubpair_tot_est);
4133 nsubpair_target = 0;
4134 nsubpair_tot_est = 0;
4137 /* Clear all pair-lists */
4138 for (int th = 0; th < nnbl; th++)
4140 if (nbl_list->bSimple)
4142 clear_pairlist(nbl_list->nbl[th]);
4146 clear_pairlist(nbl_list->nblGpu[th]);
4151 clear_pairlist_fep(nbl_list->nbl_fep[th]);
4155 for (int zi = 0; zi < nzi; zi++)
4157 const nbnxn_grid_t &iGrid = nbs->grid[zi];
4161 if (iloc == InteractionLocality::Local)
4168 zj0 = nbs->zones->izone[zi].j0;
4169 zj1 = nbs->zones->izone[zi].j1;
4175 for (int zj = zj0; zj < zj1; zj++)
4177 const nbnxn_grid_t &jGrid = nbs->grid[zj];
4181 fprintf(debug, "ns search grid %d vs %d\n", zi, zj);
4184 nbs_cycle_start(&nbs->cc[enbsCCsearch]);
4186 ci_block = get_ci_block_size(iGrid, nbs->DomDec, nnbl);
4188 /* With GPU: generate progressively smaller lists for
4189 * load balancing for local only or non-local with 2 zones.
4191 progBal = (iloc == InteractionLocality::Local || nbs->zones->n <= 2);
4193 #pragma omp parallel for num_threads(nnbl) schedule(static)
4194 for (int th = 0; th < nnbl; th++)
4198 /* Re-init the thread-local work flag data before making
4199 * the first list (not an elegant conditional).
4201 if (nbat->bUseBufferFlags && ((zi == 0 && zj == 0)))
4203 init_buffer_flags(&nbs->work[th].buffer_flags, nbat->numAtoms());
4206 if (CombineNBLists && th > 0)
4208 GMX_ASSERT(!nbl_list->bSimple, "Can only combine GPU lists");
4210 clear_pairlist(nbl_list->nblGpu[th]);
4213 /* Divide the i super cell equally over the nblists */
4214 if (nbl_list->bSimple)
4216 nbnxn_make_pairlist_part(nbs, iGrid, jGrid,
4217 &nbs->work[th], nbat, *excl,
4221 nbat->bUseBufferFlags,
4223 progBal, nsubpair_tot_est,
4226 nbl_list->nbl_fep[th]);
4230 nbnxn_make_pairlist_part(nbs, iGrid, jGrid,
4231 &nbs->work[th], nbat, *excl,
4235 nbat->bUseBufferFlags,
4237 progBal, nsubpair_tot_est,
4239 nbl_list->nblGpu[th],
4240 nbl_list->nbl_fep[th]);
4243 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
4245 nbs_cycle_stop(&nbs->cc[enbsCCsearch]);
4250 for (int th = 0; th < nnbl; th++)
4252 inc_nrnb(nrnb, eNR_NBNXN_DIST2, nbs->work[th].ndistc);
4254 if (nbl_list->bSimple)
4256 NbnxnPairlistCpu *nbl = nbl_list->nbl[th];
4257 np_tot += nbl->cj.size();
4258 np_noq += nbl->work->ncj_noq;
4259 np_hlj += nbl->work->ncj_hlj;
4263 NbnxnPairlistGpu *nbl = nbl_list->nblGpu[th];
4264 /* This count ignores potential subsequent pair pruning */
4265 np_tot += nbl->nci_tot;
4268 if (nbl_list->bSimple)
4270 nap = nbl_list->nbl[0]->na_ci*nbl_list->nbl[0]->na_cj;
4274 nap = gmx::square(nbl_list->nblGpu[0]->na_ci);
4276 nbl_list->natpair_ljq = (np_tot - np_noq)*nap - np_hlj*nap/2;
4277 nbl_list->natpair_lj = np_noq*nap;
4278 nbl_list->natpair_q = np_hlj*nap/2;
4280 if (CombineNBLists && nnbl > 1)
4282 GMX_ASSERT(!nbl_list->bSimple, "Can only combine GPU lists");
4283 NbnxnPairlistGpu **nbl = nbl_list->nblGpu;
4285 nbs_cycle_start(&nbs->cc[enbsCCcombine]);
4287 combine_nblists(nnbl-1, nbl+1, nbl[0]);
4289 nbs_cycle_stop(&nbs->cc[enbsCCcombine]);
4294 if (nbl_list->bSimple)
4296 if (nnbl > 1 && checkRebalanceSimpleLists(nbl_list))
4298 rebalanceSimpleLists(nbl_list->nnbl, nbl_list->nbl, nbl_list->nbl_work, nbs->work);
4300 /* Swap the pointer of the sets of pair lists */
4301 NbnxnPairlistCpu **tmp = nbl_list->nbl;
4302 nbl_list->nbl = nbl_list->nbl_work;
4303 nbl_list->nbl_work = tmp;
4308 /* Sort the entries on size, large ones first */
4309 if (CombineNBLists || nnbl == 1)
4311 sort_sci(nbl_list->nblGpu[0]);
4315 #pragma omp parallel for num_threads(nnbl) schedule(static)
4316 for (int th = 0; th < nnbl; th++)
4320 sort_sci(nbl_list->nblGpu[th]);
4322 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
4327 if (nbat->bUseBufferFlags)
4329 reduce_buffer_flags(nbs, nbl_list->nnbl, &nbat->buffer_flags);
4334 /* Balance the free-energy lists over all the threads */
4335 balance_fep_lists(nbs, nbl_list);
4338 if (nbl_list->bSimple)
4340 /* This is a fresh list, so not pruned, stored using ci.
4341 * ciOuter is invalid at this point.
4343 GMX_ASSERT(nbl_list->nbl[0]->ciOuter.empty(), "ciOuter is invalid so it should be empty");
4346 /* Special performance logging stuff (env.var. GMX_NBNXN_CYCLE) */
4347 if (iloc == InteractionLocality::Local)
4349 nbs->search_count++;
4351 if (nbs->print_cycles &&
4352 (!nbs->DomDec || iloc == InteractionLocality::NonLocal) &&
4353 nbs->search_count % 100 == 0)
4355 nbs_cycle_print(stderr, nbs);
4358 /* If we have more than one list, they either got rebalancing (CPU)
4359 * or combined (GPU), so we should dump the final result to debug.
4361 if (debug && nbl_list->nnbl > 1)
4363 if (nbl_list->bSimple)
4365 for (int t = 0; t < nbl_list->nnbl; t++)
4367 print_nblist_statistics(debug, nbl_list->nbl[t], nbs, rlist);
4372 print_nblist_statistics(debug, nbl_list->nblGpu[0], nbs, rlist);
4380 if (nbl_list->bSimple)
4382 for (int t = 0; t < nbl_list->nnbl; t++)
4384 print_nblist_ci_cj(debug, nbl_list->nbl[t]);
4389 print_nblist_sci_cj(debug, nbl_list->nblGpu[0]);
4393 if (nbat->bUseBufferFlags)
4395 print_reduction_cost(&nbat->buffer_flags, nbl_list->nnbl);
4400 void nbnxnPrepareListForDynamicPruning(nbnxn_pairlist_set_t *listSet)
4402 GMX_RELEASE_ASSERT(listSet->bSimple, "Should only be called for simple lists");
4404 /* TODO: Restructure the lists so we have actual outer and inner
4405 * list objects so we can set a single pointer instead of
4406 * swapping several pointers.
4409 for (int i = 0; i < listSet->nnbl; i++)
4411 NbnxnPairlistCpu &list = *listSet->nbl[i];
4413 /* The search produced a list in ci/cj.
4414 * Swap the list pointers so we get the outer list is ciOuter,cjOuter
4415 * and we can prune that to get an inner list in ci/cj.
4417 GMX_RELEASE_ASSERT(list.ciOuter.empty() && list.cjOuter.empty(),
4418 "The outer lists should be empty before preparation");
4420 std::swap(list.ci, list.ciOuter);
4421 std::swap(list.cj, list.cjOuter);