2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
48 #include "gromacs/domdec/domdec_struct.h"
49 #include "gromacs/gmxlib/nrnb.h"
50 #include "gromacs/math/functions.h"
51 #include "gromacs/math/utilities.h"
52 #include "gromacs/math/vec.h"
53 #include "gromacs/mdlib/gmx_omp_nthreads.h"
54 #include "gromacs/mdlib/ns.h"
55 #include "gromacs/mdtypes/group.h"
56 #include "gromacs/mdtypes/md_enums.h"
57 #include "gromacs/nbnxm/atomdata.h"
58 #include "gromacs/nbnxm/gpu_data_mgmt.h"
59 #include "gromacs/nbnxm/nbnxm.h"
60 #include "gromacs/nbnxm/nbnxm_geometry.h"
61 #include "gromacs/nbnxm/nbnxm_simd.h"
62 #include "gromacs/nbnxm/pairlistset.h"
63 #include "gromacs/pbcutil/ishift.h"
64 #include "gromacs/pbcutil/pbc.h"
65 #include "gromacs/simd/simd.h"
66 #include "gromacs/simd/vector_operations.h"
67 #include "gromacs/topology/block.h"
68 #include "gromacs/utility/exceptions.h"
69 #include "gromacs/utility/fatalerror.h"
70 #include "gromacs/utility/gmxomp.h"
71 #include "gromacs/utility/smalloc.h"
75 #include "pairlistwork.h"
77 using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
79 using BoundingBox = Nbnxm::BoundingBox; // TODO: Remove when refactoring this file
80 using BoundingBox1D = Nbnxm::BoundingBox1D; // TODO: Remove when refactoring this file
82 using Grid = Nbnxm::Grid; // TODO: Remove when refactoring this file
84 // Convience alias for partial Nbnxn namespace usage
85 using InteractionLocality = Nbnxm::InteractionLocality;
87 /* We shift the i-particles backward for PBC.
88 * This leads to more conditionals than shifting forward.
89 * We do this to get more balanced pair lists.
91 constexpr bool c_pbcShiftBackward = true;
94 void PairSearch::SearchCycleCounting::printCycles(FILE *fp,
95 gmx::ArrayRef<const PairsearchWork> work) const
98 fprintf(fp, "ns %4d grid %4.1f search %4.1f",
99 cc_[enbsCCgrid].count(),
100 cc_[enbsCCgrid].averageMCycles(),
101 cc_[enbsCCsearch].averageMCycles());
105 if (cc_[enbsCCcombine].count() > 0)
107 fprintf(fp, " comb %5.2f",
108 cc_[enbsCCcombine].averageMCycles());
110 fprintf(fp, " s. th");
111 for (const PairsearchWork &workEntry : work)
113 fprintf(fp, " %4.1f",
114 workEntry.cycleCounter.averageMCycles());
120 /* Layout for the nonbonded NxN pair lists */
121 enum class NbnxnLayout
123 NoSimd4x4, // i-cluster size 4, j-cluster size 4
124 Simd4xN, // i-cluster size 4, j-cluster size SIMD width
125 Simd2xNN, // i-cluster size 4, j-cluster size half SIMD width
126 Gpu8x8x8 // i-cluster size 8, j-cluster size 8 + super-clustering
130 /* Returns the j-cluster size */
131 template <NbnxnLayout layout>
132 static constexpr int jClusterSize()
134 static_assert(layout == NbnxnLayout::NoSimd4x4 || layout == NbnxnLayout::Simd4xN || layout == NbnxnLayout::Simd2xNN, "Currently jClusterSize only supports CPU layouts");
136 return layout == NbnxnLayout::Simd4xN ? GMX_SIMD_REAL_WIDTH : (layout == NbnxnLayout::Simd2xNN ? GMX_SIMD_REAL_WIDTH/2 : c_nbnxnCpuIClusterSize);
139 /*! \brief Returns the j-cluster index given the i-cluster index.
141 * \tparam jClusterSize The number of atoms in a j-cluster
142 * \tparam jSubClusterIndex The j-sub-cluster index (0/1), used when size(j-cluster) < size(i-cluster)
143 * \param[in] ci The i-cluster index
145 template <int jClusterSize, int jSubClusterIndex>
146 static inline int cjFromCi(int ci)
148 static_assert(jClusterSize == c_nbnxnCpuIClusterSize/2 || jClusterSize == c_nbnxnCpuIClusterSize || jClusterSize == c_nbnxnCpuIClusterSize*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
150 static_assert(jSubClusterIndex == 0 || jSubClusterIndex == 1,
151 "Only sub-cluster indices 0 and 1 are supported");
153 if (jClusterSize == c_nbnxnCpuIClusterSize/2)
155 if (jSubClusterIndex == 0)
161 return ((ci + 1) << 1) - 1;
164 else if (jClusterSize == c_nbnxnCpuIClusterSize)
174 /*! \brief Returns the j-cluster index given the i-cluster index.
176 * \tparam layout The pair-list layout
177 * \tparam jSubClusterIndex The j-sub-cluster index (0/1), used when size(j-cluster) < size(i-cluster)
178 * \param[in] ci The i-cluster index
180 template <NbnxnLayout layout, int jSubClusterIndex>
181 static inline int cjFromCi(int ci)
183 constexpr int clusterSize = jClusterSize<layout>();
185 return cjFromCi<clusterSize, jSubClusterIndex>(ci);
188 /* Returns the nbnxn coordinate data index given the i-cluster index */
189 template <NbnxnLayout layout>
190 static inline int xIndexFromCi(int ci)
192 constexpr int clusterSize = jClusterSize<layout>();
194 static_assert(clusterSize == c_nbnxnCpuIClusterSize/2 || clusterSize == c_nbnxnCpuIClusterSize || clusterSize == c_nbnxnCpuIClusterSize*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
196 if (clusterSize <= c_nbnxnCpuIClusterSize)
198 /* Coordinates are stored packed in groups of 4 */
203 /* Coordinates packed in 8, i-cluster size is half the packing width */
204 return (ci >> 1)*STRIDE_P8 + (ci & 1)*(c_packX8 >> 1);
208 /* Returns the nbnxn coordinate data index given the j-cluster index */
209 template <NbnxnLayout layout>
210 static inline int xIndexFromCj(int cj)
212 constexpr int clusterSize = jClusterSize<layout>();
214 static_assert(clusterSize == c_nbnxnCpuIClusterSize/2 || clusterSize == c_nbnxnCpuIClusterSize || clusterSize == c_nbnxnCpuIClusterSize*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
216 if (clusterSize == c_nbnxnCpuIClusterSize/2)
218 /* Coordinates are stored packed in groups of 4 */
219 return (cj >> 1)*STRIDE_P4 + (cj & 1)*(c_packX4 >> 1);
221 else if (clusterSize == c_nbnxnCpuIClusterSize)
223 /* Coordinates are stored packed in groups of 4 */
228 /* Coordinates are stored packed in groups of 8 */
234 /* Initializes a single nbnxn_pairlist_t data structure */
235 static void nbnxn_init_pairlist_fep(t_nblist *nl)
237 nl->type = GMX_NBLIST_INTERACTION_FREE_ENERGY;
238 nl->igeometry = GMX_NBLIST_GEOMETRY_PARTICLE_PARTICLE;
239 /* The interaction functions are set in the free energy kernel fuction */
252 nl->jindex = nullptr;
254 nl->excl_fep = nullptr;
258 static void free_nblist(t_nblist *nl)
268 PairsearchWork::PairsearchWork() :
271 buffer_flags({0, nullptr, 0}),
273 nbl_fep(new t_nblist),
276 nbnxn_init_pairlist_fep(nbl_fep.get());
279 PairsearchWork::~PairsearchWork()
281 sfree(buffer_flags.flag);
283 free_nblist(nbl_fep.get());
286 // TODO: Move to pairsearch.cpp
287 PairSearch::DomainSetup::DomainSetup(const int ePBC,
288 const ivec *numDDCells,
289 const gmx_domdec_zones_t *ddZones) :
291 haveDomDec(numDDCells != nullptr),
294 for (int d = 0; d < DIM; d++)
296 haveDomDecPerDim[d] = (numDDCells != nullptr && (*numDDCells)[d] > 1);
300 // TODO: Move to pairsearch.cpp
301 PairSearch::PairSearch(const int ePBC,
302 const ivec *numDDCells,
303 const gmx_domdec_zones_t *ddZones,
304 const PairlistType pairlistType,
306 const int maxNumThreads) :
307 domainSetup_(ePBC, numDDCells, ddZones),
308 gridSet_(domainSetup_.haveDomDecPerDim, pairlistType, haveFep, maxNumThreads),
311 cycleCounting_.recordCycles_ = (getenv("GMX_NBNXN_CYCLE") != nullptr);
314 static void init_buffer_flags(nbnxn_buffer_flags_t *flags,
317 flags->nflag = (natoms + NBNXN_BUFFERFLAG_SIZE - 1)/NBNXN_BUFFERFLAG_SIZE;
318 if (flags->nflag > flags->flag_nalloc)
320 flags->flag_nalloc = over_alloc_large(flags->nflag);
321 srenew(flags->flag, flags->flag_nalloc);
323 for (int b = 0; b < flags->nflag; b++)
325 bitmask_clear(&(flags->flag[b]));
329 /* Returns the pair-list cutoff between a bounding box and a grid cell given an atom-to-atom pair-list cutoff
331 * Given a cutoff distance between atoms, this functions returns the cutoff
332 * distance2 between a bounding box of a group of atoms and a grid cell.
333 * Since atoms can be geometrically outside of the cell they have been
334 * assigned to (when atom groups instead of individual atoms are assigned
335 * to cells), this distance returned can be larger than the input.
338 listRangeForBoundingBoxToGridCell(real rlist,
339 const Grid::Dimensions &gridDims)
341 return rlist + gridDims.maxAtomGroupRadius;
344 /* Returns the pair-list cutoff between a grid cells given an atom-to-atom pair-list cutoff
346 * Given a cutoff distance between atoms, this functions returns the cutoff
347 * distance2 between two grid cells.
348 * Since atoms can be geometrically outside of the cell they have been
349 * assigned to (when atom groups instead of individual atoms are assigned
350 * to cells), this distance returned can be larger than the input.
353 listRangeForGridCellToGridCell(real rlist,
354 const Grid::Dimensions &iGridDims,
355 const Grid::Dimensions &jGridDims)
357 return rlist + iGridDims.maxAtomGroupRadius + jGridDims.maxAtomGroupRadius;
360 /* Determines the cell range along one dimension that
361 * the bounding box b0 - b1 sees.
364 static void get_cell_range(real b0, real b1,
365 const Grid::Dimensions &jGridDims,
366 real d2, real rlist, int *cf, int *cl)
368 real listRangeBBToCell2 = gmx::square(listRangeForBoundingBoxToGridCell(rlist, jGridDims));
369 real distanceInCells = (b0 - jGridDims.lowerCorner[dim])*jGridDims.invCellSize[dim];
370 *cf = std::max(static_cast<int>(distanceInCells), 0);
373 d2 + gmx::square((b0 - jGridDims.lowerCorner[dim]) - (*cf - 1 + 1)*jGridDims.cellSize[dim]) < listRangeBBToCell2)
378 *cl = std::min(static_cast<int>((b1 - jGridDims.lowerCorner[dim])*jGridDims.invCellSize[dim]), jGridDims.numCells[dim] - 1);
379 while (*cl < jGridDims.numCells[dim] - 1 &&
380 d2 + gmx::square((*cl + 1)*jGridDims.cellSize[dim] - (b1 - jGridDims.lowerCorner[dim])) < listRangeBBToCell2)
386 /* Reference code calculating the distance^2 between two bounding boxes */
388 static float box_dist2(float bx0, float bx1, float by0,
389 float by1, float bz0, float bz1,
390 const BoundingBox *bb)
393 float dl, dh, dm, dm0;
397 dl = bx0 - bb->upper.x;
398 dh = bb->lower.x - bx1;
399 dm = std::max(dl, dh);
400 dm0 = std::max(dm, 0.0f);
403 dl = by0 - bb->upper.y;
404 dh = bb->lower.y - by1;
405 dm = std::max(dl, dh);
406 dm0 = std::max(dm, 0.0f);
409 dl = bz0 - bb->upper.z;
410 dh = bb->lower.z - bz1;
411 dm = std::max(dl, dh);
412 dm0 = std::max(dm, 0.0f);
419 #if !NBNXN_SEARCH_BB_SIMD4
421 /*! \brief Plain C code calculating the distance^2 between two bounding boxes in xyz0 format
423 * \param[in] bb_i First bounding box
424 * \param[in] bb_j Second bounding box
426 static float clusterBoundingBoxDistance2(const BoundingBox &bb_i,
427 const BoundingBox &bb_j)
429 float dl = bb_i.lower.x - bb_j.upper.x;
430 float dh = bb_j.lower.x - bb_i.upper.x;
431 float dm = std::max(dl, dh);
432 float dm0 = std::max(dm, 0.0f);
435 dl = bb_i.lower.y - bb_j.upper.y;
436 dh = bb_j.lower.y - bb_i.upper.y;
437 dm = std::max(dl, dh);
438 dm0 = std::max(dm, 0.0f);
441 dl = bb_i.lower.z - bb_j.upper.z;
442 dh = bb_j.lower.z - bb_i.upper.z;
443 dm = std::max(dl, dh);
444 dm0 = std::max(dm, 0.0f);
450 #else /* NBNXN_SEARCH_BB_SIMD4 */
452 /*! \brief 4-wide SIMD code calculating the distance^2 between two bounding boxes in xyz0 format
454 * \param[in] bb_i First bounding box, should be aligned for 4-wide SIMD
455 * \param[in] bb_j Second bounding box, should be aligned for 4-wide SIMD
457 static float clusterBoundingBoxDistance2(const BoundingBox &bb_i,
458 const BoundingBox &bb_j)
460 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
463 const Simd4Float bb_i_S0 = load4(bb_i.lower.ptr());
464 const Simd4Float bb_i_S1 = load4(bb_i.upper.ptr());
465 const Simd4Float bb_j_S0 = load4(bb_j.lower.ptr());
466 const Simd4Float bb_j_S1 = load4(bb_j.upper.ptr());
468 const Simd4Float dl_S = bb_i_S0 - bb_j_S1;
469 const Simd4Float dh_S = bb_j_S0 - bb_i_S1;
471 const Simd4Float dm_S = max(dl_S, dh_S);
472 const Simd4Float dm0_S = max(dm_S, simd4SetZeroF());
474 return dotProduct(dm0_S, dm0_S);
477 /* Calculate bb bounding distances of bb_i[si,...,si+3] and store them in d2 */
478 template <int boundingBoxStart>
479 static inline void gmx_simdcall
480 clusterBoundingBoxDistance2_xxxx_simd4_inner(const float *bb_i,
482 const Simd4Float xj_l,
483 const Simd4Float yj_l,
484 const Simd4Float zj_l,
485 const Simd4Float xj_h,
486 const Simd4Float yj_h,
487 const Simd4Float zj_h)
489 constexpr int stride = c_packedBoundingBoxesDimSize;
491 const int shi = boundingBoxStart*Nbnxm::c_numBoundingBoxBounds1D*DIM;
493 const Simd4Float zero = setZero();
495 const Simd4Float xi_l = load4(bb_i + shi + 0*stride);
496 const Simd4Float yi_l = load4(bb_i + shi + 1*stride);
497 const Simd4Float zi_l = load4(bb_i + shi + 2*stride);
498 const Simd4Float xi_h = load4(bb_i + shi + 3*stride);
499 const Simd4Float yi_h = load4(bb_i + shi + 4*stride);
500 const Simd4Float zi_h = load4(bb_i + shi + 5*stride);
502 const Simd4Float dx_0 = xi_l - xj_h;
503 const Simd4Float dy_0 = yi_l - yj_h;
504 const Simd4Float dz_0 = zi_l - zj_h;
506 const Simd4Float dx_1 = xj_l - xi_h;
507 const Simd4Float dy_1 = yj_l - yi_h;
508 const Simd4Float dz_1 = zj_l - zi_h;
510 const Simd4Float mx = max(dx_0, dx_1);
511 const Simd4Float my = max(dy_0, dy_1);
512 const Simd4Float mz = max(dz_0, dz_1);
514 const Simd4Float m0x = max(mx, zero);
515 const Simd4Float m0y = max(my, zero);
516 const Simd4Float m0z = max(mz, zero);
518 const Simd4Float d2x = m0x * m0x;
519 const Simd4Float d2y = m0y * m0y;
520 const Simd4Float d2z = m0z * m0z;
522 const Simd4Float d2s = d2x + d2y;
523 const Simd4Float d2t = d2s + d2z;
525 store4(d2 + boundingBoxStart, d2t);
528 /* 4-wide SIMD code for nsi bb distances for bb format xxxxyyyyzzzz */
530 clusterBoundingBoxDistance2_xxxx_simd4(const float *bb_j,
535 constexpr int stride = c_packedBoundingBoxesDimSize;
537 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
540 const Simd4Float xj_l = Simd4Float(bb_j[0*stride]);
541 const Simd4Float yj_l = Simd4Float(bb_j[1*stride]);
542 const Simd4Float zj_l = Simd4Float(bb_j[2*stride]);
543 const Simd4Float xj_h = Simd4Float(bb_j[3*stride]);
544 const Simd4Float yj_h = Simd4Float(bb_j[4*stride]);
545 const Simd4Float zj_h = Simd4Float(bb_j[5*stride]);
547 /* Here we "loop" over si (0,stride) from 0 to nsi with step stride.
548 * But as we know the number of iterations is 1 or 2, we unroll manually.
550 clusterBoundingBoxDistance2_xxxx_simd4_inner<0>(bb_i, d2,
555 clusterBoundingBoxDistance2_xxxx_simd4_inner<stride>(bb_i, d2,
561 #endif /* NBNXN_SEARCH_BB_SIMD4 */
564 /* Returns if any atom pair from two clusters is within distance sqrt(rlist2) */
565 static inline gmx_bool
566 clusterpair_in_range(const NbnxnPairlistGpuWork &work,
568 int csj, int stride, const real *x_j,
571 #if !GMX_SIMD4_HAVE_REAL
574 * All coordinates are stored as xyzxyz...
577 const real *x_i = work.iSuperClusterData.x.data();
579 for (int i = 0; i < c_nbnxnGpuClusterSize; i++)
581 int i0 = (si*c_nbnxnGpuClusterSize + i)*DIM;
582 for (int j = 0; j < c_nbnxnGpuClusterSize; j++)
584 int j0 = (csj*c_nbnxnGpuClusterSize + j)*stride;
586 real d2 = gmx::square(x_i[i0 ] - x_j[j0 ]) + gmx::square(x_i[i0+1] - x_j[j0+1]) + gmx::square(x_i[i0+2] - x_j[j0+2]);
597 #else /* !GMX_SIMD4_HAVE_REAL */
599 /* 4-wide SIMD version.
600 * The coordinates x_i are stored as xxxxyyyy..., x_j is stored xyzxyz...
601 * Using 8-wide AVX(2) is not faster on Intel Sandy Bridge and Haswell.
603 static_assert(c_nbnxnGpuClusterSize == 8 || c_nbnxnGpuClusterSize == 4,
604 "A cluster is hard-coded to 4/8 atoms.");
606 Simd4Real rc2_S = Simd4Real(rlist2);
608 const real *x_i = work.iSuperClusterData.xSimd.data();
610 int dim_stride = c_nbnxnGpuClusterSize*DIM;
611 Simd4Real ix_S0 = load4(x_i + si*dim_stride + 0*GMX_SIMD4_WIDTH);
612 Simd4Real iy_S0 = load4(x_i + si*dim_stride + 1*GMX_SIMD4_WIDTH);
613 Simd4Real iz_S0 = load4(x_i + si*dim_stride + 2*GMX_SIMD4_WIDTH);
615 Simd4Real ix_S1, iy_S1, iz_S1;
616 if (c_nbnxnGpuClusterSize == 8)
618 ix_S1 = load4(x_i + si*dim_stride + 3*GMX_SIMD4_WIDTH);
619 iy_S1 = load4(x_i + si*dim_stride + 4*GMX_SIMD4_WIDTH);
620 iz_S1 = load4(x_i + si*dim_stride + 5*GMX_SIMD4_WIDTH);
622 /* We loop from the outer to the inner particles to maximize
623 * the chance that we find a pair in range quickly and return.
625 int j0 = csj*c_nbnxnGpuClusterSize;
626 int j1 = j0 + c_nbnxnGpuClusterSize - 1;
629 Simd4Real jx0_S, jy0_S, jz0_S;
630 Simd4Real jx1_S, jy1_S, jz1_S;
632 Simd4Real dx_S0, dy_S0, dz_S0;
633 Simd4Real dx_S1, dy_S1, dz_S1;
634 Simd4Real dx_S2, dy_S2, dz_S2;
635 Simd4Real dx_S3, dy_S3, dz_S3;
646 Simd4Bool wco_any_S01, wco_any_S23, wco_any_S;
648 jx0_S = Simd4Real(x_j[j0*stride+0]);
649 jy0_S = Simd4Real(x_j[j0*stride+1]);
650 jz0_S = Simd4Real(x_j[j0*stride+2]);
652 jx1_S = Simd4Real(x_j[j1*stride+0]);
653 jy1_S = Simd4Real(x_j[j1*stride+1]);
654 jz1_S = Simd4Real(x_j[j1*stride+2]);
656 /* Calculate distance */
657 dx_S0 = ix_S0 - jx0_S;
658 dy_S0 = iy_S0 - jy0_S;
659 dz_S0 = iz_S0 - jz0_S;
660 dx_S2 = ix_S0 - jx1_S;
661 dy_S2 = iy_S0 - jy1_S;
662 dz_S2 = iz_S0 - jz1_S;
663 if (c_nbnxnGpuClusterSize == 8)
665 dx_S1 = ix_S1 - jx0_S;
666 dy_S1 = iy_S1 - jy0_S;
667 dz_S1 = iz_S1 - jz0_S;
668 dx_S3 = ix_S1 - jx1_S;
669 dy_S3 = iy_S1 - jy1_S;
670 dz_S3 = iz_S1 - jz1_S;
673 /* rsq = dx*dx+dy*dy+dz*dz */
674 rsq_S0 = norm2(dx_S0, dy_S0, dz_S0);
675 rsq_S2 = norm2(dx_S2, dy_S2, dz_S2);
676 if (c_nbnxnGpuClusterSize == 8)
678 rsq_S1 = norm2(dx_S1, dy_S1, dz_S1);
679 rsq_S3 = norm2(dx_S3, dy_S3, dz_S3);
682 wco_S0 = (rsq_S0 < rc2_S);
683 wco_S2 = (rsq_S2 < rc2_S);
684 if (c_nbnxnGpuClusterSize == 8)
686 wco_S1 = (rsq_S1 < rc2_S);
687 wco_S3 = (rsq_S3 < rc2_S);
689 if (c_nbnxnGpuClusterSize == 8)
691 wco_any_S01 = wco_S0 || wco_S1;
692 wco_any_S23 = wco_S2 || wco_S3;
693 wco_any_S = wco_any_S01 || wco_any_S23;
697 wco_any_S = wco_S0 || wco_S2;
700 if (anyTrue(wco_any_S))
711 #endif /* !GMX_SIMD4_HAVE_REAL */
714 /* Returns the j-cluster index for index cjIndex in a cj list */
715 static inline int nblCj(gmx::ArrayRef<const nbnxn_cj_t> cjList,
718 return cjList[cjIndex].cj;
721 /* Returns the j-cluster index for index cjIndex in a cj4 list */
722 static inline int nblCj(gmx::ArrayRef<const nbnxn_cj4_t> cj4List,
725 return cj4List[cjIndex/c_nbnxnGpuJgroupSize].cj[cjIndex & (c_nbnxnGpuJgroupSize - 1)];
728 /* Returns the i-interaction mask of the j sub-cell for index cj_ind */
729 static unsigned int nbl_imask0(const NbnxnPairlistGpu *nbl, int cj_ind)
731 return nbl->cj4[cj_ind/c_nbnxnGpuJgroupSize].imei[0].imask;
734 /* Initializes a single NbnxnPairlistCpu data structure */
735 static void nbnxn_init_pairlist(NbnxnPairlistCpu *nbl)
737 nbl->na_ci = c_nbnxnCpuIClusterSize;
740 nbl->ciOuter.clear();
743 nbl->cjOuter.clear();
746 nbl->work = new NbnxnPairlistCpuWork();
749 NbnxnPairlistGpu::NbnxnPairlistGpu(gmx::PinningPolicy pinningPolicy) :
750 na_ci(c_nbnxnGpuClusterSize),
751 na_cj(c_nbnxnGpuClusterSize),
752 na_sc(c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize),
754 sci({}, {pinningPolicy}),
755 cj4({}, {pinningPolicy}),
756 excl({}, {pinningPolicy}),
759 static_assert(c_nbnxnGpuNumClusterPerSupercluster == c_gpuNumClusterPerCell,
760 "The search code assumes that the a super-cluster matches a search grid cell");
762 static_assert(sizeof(cj4[0].imei[0].imask)*8 >= c_nbnxnGpuJgroupSize*c_gpuNumClusterPerCell,
763 "The i super-cluster cluster interaction mask does not contain a sufficient number of bits");
765 static_assert(sizeof(excl[0])*8 >= c_nbnxnGpuJgroupSize*c_gpuNumClusterPerCell, "The GPU exclusion mask does not contain a sufficient number of bits");
767 // We always want a first entry without any exclusions
770 work = new NbnxnPairlistGpuWork();
773 void nbnxn_init_pairlist_set(nbnxn_pairlist_set_t *nbl_list)
776 (nbl_list->params.pairlistType == PairlistType::Simple4x2 ||
777 nbl_list->params.pairlistType == PairlistType::Simple4x4 ||
778 nbl_list->params.pairlistType == PairlistType::Simple4x8);
779 // Currently GPU lists are always combined
780 nbl_list->bCombined = !nbl_list->bSimple;
782 nbl_list->nnbl = gmx_omp_nthreads_get(emntNonbonded);
784 if (!nbl_list->bCombined &&
785 nbl_list->nnbl > NBNXN_BUFFERFLAG_MAX_THREADS)
787 gmx_fatal(FARGS, "%d OpenMP threads were requested. Since the non-bonded force buffer reduction is prohibitively slow with more than %d threads, we do not allow this. Use %d or less OpenMP threads.",
788 nbl_list->nnbl, NBNXN_BUFFERFLAG_MAX_THREADS, NBNXN_BUFFERFLAG_MAX_THREADS);
791 if (nbl_list->bSimple)
793 snew(nbl_list->nbl, nbl_list->nnbl);
794 if (nbl_list->nnbl > 1)
796 snew(nbl_list->nbl_work, nbl_list->nnbl);
801 snew(nbl_list->nblGpu, nbl_list->nnbl);
803 nbl_list->nbl_fep.resize(nbl_list->nnbl);
804 /* Execute in order to avoid memory interleaving between threads */
805 #pragma omp parallel for num_threads(nbl_list->nnbl) schedule(static)
806 for (int i = 0; i < nbl_list->nnbl; i++)
810 /* Allocate the nblist data structure locally on each thread
811 * to optimize memory access for NUMA architectures.
813 if (nbl_list->bSimple)
815 nbl_list->nbl[i] = new NbnxnPairlistCpu();
817 nbnxn_init_pairlist(nbl_list->nbl[i]);
818 if (nbl_list->nnbl > 1)
820 nbl_list->nbl_work[i] = new NbnxnPairlistCpu();
821 nbnxn_init_pairlist(nbl_list->nbl_work[i]);
826 /* Only list 0 is used on the GPU, use normal allocation for i>0 */
827 auto pinningPolicy = (i == 0 ? gmx::PinningPolicy::PinnedIfSupported : gmx::PinningPolicy::CannotBePinned);
829 nbl_list->nblGpu[i] = new NbnxnPairlistGpu(pinningPolicy);
832 snew(nbl_list->nbl_fep[i], 1);
833 nbnxn_init_pairlist_fep(nbl_list->nbl_fep[i]);
835 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
839 /* Print statistics of a pair list, used for debug output */
840 static void print_nblist_statistics(FILE *fp,
841 const NbnxnPairlistCpu *nbl,
842 const PairSearch &pairSearch,
845 const Grid &grid = pairSearch.gridSet().grids()[0];
846 const Grid::Dimensions &dims = grid.dimensions();
848 fprintf(fp, "nbl nci %zu ncj %d\n",
849 nbl->ci.size(), nbl->ncjInUse);
850 const int numAtomsJCluster = grid.geometry().numAtomsJCluster;
851 const double numAtomsPerCell = nbl->ncjInUse/static_cast<double>(grid.numCells())*numAtomsJCluster;
852 fprintf(fp, "nbl na_cj %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
853 nbl->na_cj, rl, nbl->ncjInUse, nbl->ncjInUse/static_cast<double>(grid.numCells()),
855 numAtomsPerCell/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid.numCells()*numAtomsJCluster/(dims.gridSize[XX]*dims.gridSize[YY]*dims.gridSize[ZZ])));
857 fprintf(fp, "nbl average j cell list length %.1f\n",
858 0.25*nbl->ncjInUse/std::max(static_cast<double>(nbl->ci.size()), 1.0));
860 int cs[SHIFTS] = { 0 };
862 for (const nbnxn_ci_t &ciEntry : nbl->ci)
864 cs[ciEntry.shift & NBNXN_CI_SHIFT] +=
865 ciEntry.cj_ind_end - ciEntry.cj_ind_start;
867 int j = ciEntry.cj_ind_start;
868 while (j < ciEntry.cj_ind_end &&
869 nbl->cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
875 fprintf(fp, "nbl cell pairs, total: %zu excl: %d %.1f%%\n",
876 nbl->cj.size(), npexcl, 100*npexcl/std::max(static_cast<double>(nbl->cj.size()), 1.0));
877 for (int s = 0; s < SHIFTS; s++)
881 fprintf(fp, "nbl shift %2d ncj %3d\n", s, cs[s]);
886 /* Print statistics of a pair lists, used for debug output */
887 static void print_nblist_statistics(FILE *fp,
888 const NbnxnPairlistGpu *nbl,
889 const PairSearch &pairSearch,
892 const Grid &grid = pairSearch.gridSet().grids()[0];
893 const Grid::Dimensions &dims = grid.dimensions();
895 fprintf(fp, "nbl nsci %zu ncj4 %zu nsi %d excl4 %zu\n",
896 nbl->sci.size(), nbl->cj4.size(), nbl->nci_tot, nbl->excl.size());
897 const int numAtomsCluster = grid.geometry().numAtomsICluster;
898 const double numAtomsPerCell = nbl->nci_tot/static_cast<double>(grid.numClusters())*numAtomsCluster;
899 fprintf(fp, "nbl na_c %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
900 nbl->na_ci, rl, nbl->nci_tot, nbl->nci_tot/static_cast<double>(grid.numClusters()),
902 numAtomsPerCell/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid.numClusters()*numAtomsCluster/(dims.gridSize[XX]*dims.gridSize[YY]*dims.gridSize[ZZ])));
907 int c[c_gpuNumClusterPerCell + 1] = { 0 };
908 for (const nbnxn_sci_t &sci : nbl->sci)
911 for (int j4 = sci.cj4_ind_start; j4 < sci.cj4_ind_end; j4++)
913 for (int j = 0; j < c_nbnxnGpuJgroupSize; j++)
916 for (int si = 0; si < c_gpuNumClusterPerCell; si++)
918 if (nbl->cj4[j4].imei[0].imask & (1U << (j*c_gpuNumClusterPerCell + si)))
929 nsp_max = std::max(nsp_max, nsp);
931 if (!nbl->sci.empty())
933 sum_nsp /= nbl->sci.size();
934 sum_nsp2 /= nbl->sci.size();
936 fprintf(fp, "nbl #cluster-pairs: av %.1f stddev %.1f max %d\n",
937 sum_nsp, std::sqrt(sum_nsp2 - sum_nsp*sum_nsp), nsp_max);
939 if (!nbl->cj4.empty())
941 for (int b = 0; b <= c_gpuNumClusterPerCell; b++)
943 fprintf(fp, "nbl j-list #i-subcell %d %7d %4.1f\n",
944 b, c[b], 100.0*c[b]/size_t {nbl->cj4.size()*c_nbnxnGpuJgroupSize});
949 /* Returns a pointer to the exclusion mask for j-cluster-group \p cj4 and warp \p warp
950 * Generates a new exclusion entry when the j-cluster-group uses
951 * the default all-interaction mask at call time, so the returned mask
952 * can be modified when needed.
954 static nbnxn_excl_t *get_exclusion_mask(NbnxnPairlistGpu *nbl,
958 if (nbl->cj4[cj4].imei[warp].excl_ind == 0)
960 /* No exclusions set, make a new list entry */
961 const size_t oldSize = nbl->excl.size();
962 GMX_ASSERT(oldSize >= 1, "We should always have entry [0]");
963 /* Add entry with default values: no exclusions */
964 nbl->excl.resize(oldSize + 1);
965 nbl->cj4[cj4].imei[warp].excl_ind = oldSize;
968 return &nbl->excl[nbl->cj4[cj4].imei[warp].excl_ind];
971 static void set_self_and_newton_excls_supersub(NbnxnPairlistGpu *nbl,
972 int cj4_ind, int sj_offset,
973 int i_cluster_in_cell)
975 nbnxn_excl_t *excl[c_nbnxnGpuClusterpairSplit];
977 /* Here we only set the set self and double pair exclusions */
979 /* Reserve extra elements, so the resize() in get_exclusion_mask()
980 * will not invalidate excl entries in the loop below
982 nbl->excl.reserve(nbl->excl.size() + c_nbnxnGpuClusterpairSplit);
983 for (int w = 0; w < c_nbnxnGpuClusterpairSplit; w++)
985 excl[w] = get_exclusion_mask(nbl, cj4_ind, w);
988 /* Only minor < major bits set */
989 for (int ej = 0; ej < nbl->na_ci; ej++)
992 for (int ei = ej; ei < nbl->na_ci; ei++)
994 excl[w]->pair[(ej & (c_nbnxnGpuJgroupSize-1))*nbl->na_ci + ei] &=
995 ~(1U << (sj_offset*c_gpuNumClusterPerCell + i_cluster_in_cell));
1000 /* Returns a diagonal or off-diagonal interaction mask for plain C lists */
1001 static unsigned int get_imask(gmx_bool rdiag, int ci, int cj)
1003 return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
1006 /* Returns a diagonal or off-diagonal interaction mask for cj-size=2 */
1007 gmx_unused static unsigned int get_imask_simd_j2(gmx_bool rdiag, int ci, int cj)
1009 return (rdiag && ci*2 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_0 :
1010 (rdiag && ci*2+1 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_1 :
1011 NBNXN_INTERACTION_MASK_ALL));
1014 /* Returns a diagonal or off-diagonal interaction mask for cj-size=4 */
1015 gmx_unused static unsigned int get_imask_simd_j4(gmx_bool rdiag, int ci, int cj)
1017 return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
1020 /* Returns a diagonal or off-diagonal interaction mask for cj-size=8 */
1021 gmx_unused static unsigned int get_imask_simd_j8(gmx_bool rdiag, int ci, int cj)
1023 return (rdiag && ci == cj*2 ? NBNXN_INTERACTION_MASK_DIAG_J8_0 :
1024 (rdiag && ci == cj*2+1 ? NBNXN_INTERACTION_MASK_DIAG_J8_1 :
1025 NBNXN_INTERACTION_MASK_ALL));
1029 #if GMX_SIMD_REAL_WIDTH == 2
1030 #define get_imask_simd_4xn get_imask_simd_j2
1032 #if GMX_SIMD_REAL_WIDTH == 4
1033 #define get_imask_simd_4xn get_imask_simd_j4
1035 #if GMX_SIMD_REAL_WIDTH == 8
1036 #define get_imask_simd_4xn get_imask_simd_j8
1037 #define get_imask_simd_2xnn get_imask_simd_j4
1039 #if GMX_SIMD_REAL_WIDTH == 16
1040 #define get_imask_simd_2xnn get_imask_simd_j8
1044 /* Plain C code for checking and adding cluster-pairs to the list.
1046 * \param[in] gridj The j-grid
1047 * \param[in,out] nbl The pair-list to store the cluster pairs in
1048 * \param[in] icluster The index of the i-cluster
1049 * \param[in] jclusterFirst The first cluster in the j-range
1050 * \param[in] jclusterLast The last cluster in the j-range
1051 * \param[in] excludeSubDiagonal Exclude atom pairs with i-index > j-index
1052 * \param[in] x_j Coordinates for the j-atom, in xyz format
1053 * \param[in] rlist2 The squared list cut-off
1054 * \param[in] rbb2 The squared cut-off for putting cluster-pairs in the list based on bounding box distance only
1055 * \param[in,out] numDistanceChecks The number of distance checks performed
1058 makeClusterListSimple(const Grid &jGrid,
1059 NbnxnPairlistCpu * nbl,
1063 bool excludeSubDiagonal,
1064 const real * gmx_restrict x_j,
1067 int * gmx_restrict numDistanceChecks)
1069 const BoundingBox * gmx_restrict bb_ci = nbl->work->iClusterData.bb.data();
1070 const real * gmx_restrict x_ci = nbl->work->iClusterData.x.data();
1075 while (!InRange && jclusterFirst <= jclusterLast)
1077 real d2 = clusterBoundingBoxDistance2(bb_ci[0], jGrid.jBoundingBoxes()[jclusterFirst]);
1078 *numDistanceChecks += 2;
1080 /* Check if the distance is within the distance where
1081 * we use only the bounding box distance rbb,
1082 * or within the cut-off and there is at least one atom pair
1083 * within the cut-off.
1089 else if (d2 < rlist2)
1091 int cjf_gl = jGrid.cellOffset() + jclusterFirst;
1092 for (int i = 0; i < c_nbnxnCpuIClusterSize && !InRange; i++)
1094 for (int j = 0; j < c_nbnxnCpuIClusterSize; j++)
1096 InRange = InRange ||
1097 (gmx::square(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjf_gl*c_nbnxnCpuIClusterSize+j)*STRIDE_XYZ+XX]) +
1098 gmx::square(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjf_gl*c_nbnxnCpuIClusterSize+j)*STRIDE_XYZ+YY]) +
1099 gmx::square(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjf_gl*c_nbnxnCpuIClusterSize+j)*STRIDE_XYZ+ZZ]) < rlist2);
1102 *numDistanceChecks += c_nbnxnCpuIClusterSize*c_nbnxnCpuIClusterSize;
1115 while (!InRange && jclusterLast > jclusterFirst)
1117 real d2 = clusterBoundingBoxDistance2(bb_ci[0], jGrid.jBoundingBoxes()[jclusterLast]);
1118 *numDistanceChecks += 2;
1120 /* Check if the distance is within the distance where
1121 * we use only the bounding box distance rbb,
1122 * or within the cut-off and there is at least one atom pair
1123 * within the cut-off.
1129 else if (d2 < rlist2)
1131 int cjl_gl = jGrid.cellOffset() + jclusterLast;
1132 for (int i = 0; i < c_nbnxnCpuIClusterSize && !InRange; i++)
1134 for (int j = 0; j < c_nbnxnCpuIClusterSize; j++)
1136 InRange = InRange ||
1137 (gmx::square(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjl_gl*c_nbnxnCpuIClusterSize+j)*STRIDE_XYZ+XX]) +
1138 gmx::square(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjl_gl*c_nbnxnCpuIClusterSize+j)*STRIDE_XYZ+YY]) +
1139 gmx::square(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjl_gl*c_nbnxnCpuIClusterSize+j)*STRIDE_XYZ+ZZ]) < rlist2);
1142 *numDistanceChecks += c_nbnxnCpuIClusterSize*c_nbnxnCpuIClusterSize;
1150 if (jclusterFirst <= jclusterLast)
1152 for (int jcluster = jclusterFirst; jcluster <= jclusterLast; jcluster++)
1154 /* Store cj and the interaction mask */
1156 cjEntry.cj = jGrid.cellOffset() + jcluster;
1157 cjEntry.excl = get_imask(excludeSubDiagonal, icluster, jcluster);
1158 nbl->cj.push_back(cjEntry);
1160 /* Increase the closing index in the i list */
1161 nbl->ci.back().cj_ind_end = nbl->cj.size();
1165 #ifdef GMX_NBNXN_SIMD_4XN
1166 #include "gromacs/nbnxm/pairlist_simd_4xm.h"
1168 #ifdef GMX_NBNXN_SIMD_2XNN
1169 #include "gromacs/nbnxm/pairlist_simd_2xmm.h"
1172 /* Plain C or SIMD4 code for making a pair list of super-cell sci vs scj.
1173 * Checks bounding box distances and possibly atom pair distances.
1175 static void make_cluster_list_supersub(const Grid &iGrid,
1177 NbnxnPairlistGpu *nbl,
1180 const bool excludeSubDiagonal,
1185 int *numDistanceChecks)
1187 NbnxnPairlistGpuWork &work = *nbl->work;
1190 const float *pbb_ci = work.iSuperClusterData.bbPacked.data();
1192 const BoundingBox *bb_ci = work.iSuperClusterData.bb.data();
1195 assert(c_nbnxnGpuClusterSize == iGrid.geometry().numAtomsICluster);
1196 assert(c_nbnxnGpuClusterSize == jGrid.geometry().numAtomsICluster);
1198 /* We generate the pairlist mainly based on bounding-box distances
1199 * and do atom pair distance based pruning on the GPU.
1200 * Only if a j-group contains a single cluster-pair, we try to prune
1201 * that pair based on atom distances on the CPU to avoid empty j-groups.
1203 #define PRUNE_LIST_CPU_ONE 1
1204 #define PRUNE_LIST_CPU_ALL 0
1206 #if PRUNE_LIST_CPU_ONE
1210 float *d2l = work.distanceBuffer.data();
1212 for (int subc = 0; subc < jGrid.numClustersPerCell()[scj]; subc++)
1214 const int cj4_ind = work.cj_ind/c_nbnxnGpuJgroupSize;
1215 const int cj_offset = work.cj_ind - cj4_ind*c_nbnxnGpuJgroupSize;
1216 const int cj = scj*c_gpuNumClusterPerCell + subc;
1218 const int cj_gl = jGrid.cellOffset()*c_gpuNumClusterPerCell + cj;
1221 if (excludeSubDiagonal && sci == scj)
1227 ci1 = iGrid.numClustersPerCell()[sci];
1231 /* Determine all ci1 bb distances in one call with SIMD4 */
1232 const int offset = packedBoundingBoxesIndex(cj) + (cj & (c_packedBoundingBoxesDimSize - 1));
1233 clusterBoundingBoxDistance2_xxxx_simd4(jGrid.packedBoundingBoxes().data() + offset,
1235 *numDistanceChecks += c_nbnxnGpuClusterSize*2;
1239 unsigned int imask = 0;
1240 /* We use a fixed upper-bound instead of ci1 to help optimization */
1241 for (int ci = 0; ci < c_gpuNumClusterPerCell; ci++)
1249 /* Determine the bb distance between ci and cj */
1250 d2l[ci] = clusterBoundingBoxDistance2(bb_ci[ci], jGrid.jBoundingBoxes()[cj]);
1251 *numDistanceChecks += 2;
1255 #if PRUNE_LIST_CPU_ALL
1256 /* Check if the distance is within the distance where
1257 * we use only the bounding box distance rbb,
1258 * or within the cut-off and there is at least one atom pair
1259 * within the cut-off. This check is very costly.
1261 *numDistanceChecks += c_nbnxnGpuClusterSize*c_nbnxnGpuClusterSize;
1264 clusterpair_in_range(work, ci, cj_gl, stride, x, rlist2)))
1266 /* Check if the distance between the two bounding boxes
1267 * in within the pair-list cut-off.
1272 /* Flag this i-subcell to be taken into account */
1273 imask |= (1U << (cj_offset*c_gpuNumClusterPerCell + ci));
1275 #if PRUNE_LIST_CPU_ONE
1283 #if PRUNE_LIST_CPU_ONE
1284 /* If we only found 1 pair, check if any atoms are actually
1285 * within the cut-off, so we could get rid of it.
1287 if (npair == 1 && d2l[ci_last] >= rbb2 &&
1288 !clusterpair_in_range(work, ci_last, cj_gl, stride, x, rlist2))
1290 imask &= ~(1U << (cj_offset*c_gpuNumClusterPerCell + ci_last));
1297 /* We have at least one cluster pair: add a j-entry */
1298 if (static_cast<size_t>(cj4_ind) == nbl->cj4.size())
1300 nbl->cj4.resize(nbl->cj4.size() + 1);
1302 nbnxn_cj4_t *cj4 = &nbl->cj4[cj4_ind];
1304 cj4->cj[cj_offset] = cj_gl;
1306 /* Set the exclusions for the ci==sj entry.
1307 * Here we don't bother to check if this entry is actually flagged,
1308 * as it will nearly always be in the list.
1310 if (excludeSubDiagonal && sci == scj)
1312 set_self_and_newton_excls_supersub(nbl, cj4_ind, cj_offset, subc);
1315 /* Copy the cluster interaction mask to the list */
1316 for (int w = 0; w < c_nbnxnGpuClusterpairSplit; w++)
1318 cj4->imei[w].imask |= imask;
1321 nbl->work->cj_ind++;
1323 /* Keep the count */
1324 nbl->nci_tot += npair;
1326 /* Increase the closing index in i super-cell list */
1327 nbl->sci.back().cj4_ind_end =
1328 (nbl->work->cj_ind + c_nbnxnGpuJgroupSize - 1)/c_nbnxnGpuJgroupSize;
1333 /* Returns how many contiguous j-clusters we have starting in the i-list */
1334 template <typename CjListType>
1335 static int numContiguousJClusters(const int cjIndexStart,
1336 const int cjIndexEnd,
1337 gmx::ArrayRef<const CjListType> cjList)
1339 const int firstJCluster = nblCj(cjList, cjIndexStart);
1341 int numContiguous = 0;
1343 while (cjIndexStart + numContiguous < cjIndexEnd &&
1344 nblCj(cjList, cjIndexStart + numContiguous) == firstJCluster + numContiguous)
1349 return numContiguous;
1353 * \brief Helper struct for efficient searching for excluded atoms in a j-list
1357 /*! \brief Constructs a j-list range from \p cjList with the given index range */
1358 template <typename CjListType>
1359 JListRanges(int cjIndexStart,
1361 gmx::ArrayRef<const CjListType> cjList);
1363 int cjIndexStart; //!< The start index in the j-list
1364 int cjIndexEnd; //!< The end index in the j-list
1365 int cjFirst; //!< The j-cluster with index cjIndexStart
1366 int cjLast; //!< The j-cluster with index cjIndexEnd-1
1367 int numDirect; //!< Up to cjIndexStart+numDirect the j-clusters are cjFirst + the index offset
1371 template <typename CjListType>
1372 JListRanges::JListRanges(int cjIndexStart,
1374 gmx::ArrayRef<const CjListType> cjList) :
1375 cjIndexStart(cjIndexStart),
1376 cjIndexEnd(cjIndexEnd)
1378 GMX_ASSERT(cjIndexEnd > cjIndexStart, "JListRanges should only be called with non-empty lists");
1380 cjFirst = nblCj(cjList, cjIndexStart);
1381 cjLast = nblCj(cjList, cjIndexEnd - 1);
1383 /* Determine how many contiguous j-cells we have starting
1384 * from the first i-cell. This number can be used to directly
1385 * calculate j-cell indices for excluded atoms.
1387 numDirect = numContiguousJClusters(cjIndexStart, cjIndexEnd, cjList);
1391 /* Return the index of \p jCluster in the given range or -1 when not present
1393 * Note: This code is executed very often and therefore performance is
1394 * important. It should be inlined and fully optimized.
1396 template <typename CjListType>
1398 findJClusterInJList(int jCluster,
1399 const JListRanges &ranges,
1400 gmx::ArrayRef<const CjListType> cjList)
1404 if (jCluster < ranges.cjFirst + ranges.numDirect)
1406 /* We can calculate the index directly using the offset */
1407 index = ranges.cjIndexStart + jCluster - ranges.cjFirst;
1411 /* Search for jCluster using bisection */
1413 int rangeStart = ranges.cjIndexStart + ranges.numDirect;
1414 int rangeEnd = ranges.cjIndexEnd;
1416 while (index == -1 && rangeStart < rangeEnd)
1418 rangeMiddle = (rangeStart + rangeEnd) >> 1;
1420 const int clusterMiddle = nblCj(cjList, rangeMiddle);
1422 if (jCluster == clusterMiddle)
1424 index = rangeMiddle;
1426 else if (jCluster < clusterMiddle)
1428 rangeEnd = rangeMiddle;
1432 rangeStart = rangeMiddle + 1;
1440 // TODO: Get rid of the two functions below by renaming sci to ci (or something better)
1442 /* Return the i-entry in the list we are currently operating on */
1443 static nbnxn_ci_t *getOpenIEntry(NbnxnPairlistCpu *nbl)
1445 return &nbl->ci.back();
1448 /* Return the i-entry in the list we are currently operating on */
1449 static nbnxn_sci_t *getOpenIEntry(NbnxnPairlistGpu *nbl)
1451 return &nbl->sci.back();
1454 /* Set all atom-pair exclusions for a simple type list i-entry
1456 * Set all atom-pair exclusions from the topology stored in exclusions
1457 * as masks in the pair-list for simple list entry iEntry.
1460 setExclusionsForIEntry(const Nbnxm::GridSet &gridSet,
1461 NbnxnPairlistCpu *nbl,
1462 gmx_bool diagRemoved,
1464 const nbnxn_ci_t &iEntry,
1465 const t_blocka &exclusions)
1467 if (iEntry.cj_ind_end == iEntry.cj_ind_start)
1469 /* Empty list: no exclusions */
1473 const JListRanges ranges(iEntry.cj_ind_start, iEntry.cj_ind_end, gmx::makeConstArrayRef(nbl->cj));
1475 const int iCluster = iEntry.ci;
1477 gmx::ArrayRef<const int> cell = gridSet.cells();
1478 gmx::ArrayRef<const int> atomIndices = gridSet.atomIndices();
1480 /* Loop over the atoms in the i-cluster */
1481 for (int i = 0; i < nbl->na_ci; i++)
1483 const int iIndex = iCluster*nbl->na_ci + i;
1484 const int iAtom = atomIndices[iIndex];
1487 /* Loop over the topology-based exclusions for this i-atom */
1488 for (int exclIndex = exclusions.index[iAtom]; exclIndex < exclusions.index[iAtom + 1]; exclIndex++)
1490 const int jAtom = exclusions.a[exclIndex];
1494 /* The self exclusion are already set, save some time */
1498 /* Get the index of the j-atom in the nbnxn atom data */
1499 const int jIndex = cell[jAtom];
1501 /* Without shifts we only calculate interactions j>i
1502 * for one-way pair-lists.
1504 if (diagRemoved && jIndex <= iIndex)
1509 const int jCluster = (jIndex >> na_cj_2log);
1511 /* Could the cluster se be in our list? */
1512 if (jCluster >= ranges.cjFirst && jCluster <= ranges.cjLast)
1515 findJClusterInJList(jCluster, ranges,
1516 gmx::makeConstArrayRef(nbl->cj));
1520 /* We found an exclusion, clear the corresponding
1523 const int innerJ = jIndex - (jCluster << na_cj_2log);
1525 nbl->cj[index].excl &= ~(1U << ((i << na_cj_2log) + innerJ));
1533 /* Add a new i-entry to the FEP list and copy the i-properties */
1534 static inline void fep_list_new_nri_copy(t_nblist *nlist)
1536 /* Add a new i-entry */
1539 assert(nlist->nri < nlist->maxnri);
1541 /* Duplicate the last i-entry, except for jindex, which continues */
1542 nlist->iinr[nlist->nri] = nlist->iinr[nlist->nri-1];
1543 nlist->shift[nlist->nri] = nlist->shift[nlist->nri-1];
1544 nlist->gid[nlist->nri] = nlist->gid[nlist->nri-1];
1545 nlist->jindex[nlist->nri] = nlist->nrj;
1548 /* For load balancing of the free-energy lists over threads, we set
1549 * the maximum nrj size of an i-entry to 40. This leads to good
1550 * load balancing in the worst case scenario of a single perturbed
1551 * particle on 16 threads, while not introducing significant overhead.
1552 * Note that half of the perturbed pairs will anyhow end up in very small lists,
1553 * since non perturbed i-particles will see few perturbed j-particles).
1555 const int max_nrj_fep = 40;
1557 /* Exclude the perturbed pairs from the Verlet list. This is only done to avoid
1558 * singularities for overlapping particles (0/0), since the charges and
1559 * LJ parameters have been zeroed in the nbnxn data structure.
1560 * Simultaneously make a group pair list for the perturbed pairs.
1562 static void make_fep_list(gmx::ArrayRef<const int> atomIndices,
1563 const nbnxn_atomdata_t *nbat,
1564 NbnxnPairlistCpu *nbl,
1565 gmx_bool bDiagRemoved,
1567 real gmx_unused shx,
1568 real gmx_unused shy,
1569 real gmx_unused shz,
1570 real gmx_unused rlist_fep2,
1575 int ci, cj_ind_start, cj_ind_end, cja, cjr;
1577 int gid_i = 0, gid_j, gid;
1578 int egp_shift, egp_mask;
1580 int ind_i, ind_j, ai, aj;
1582 gmx_bool bFEP_i, bFEP_i_all;
1584 if (nbl_ci->cj_ind_end == nbl_ci->cj_ind_start)
1592 cj_ind_start = nbl_ci->cj_ind_start;
1593 cj_ind_end = nbl_ci->cj_ind_end;
1595 /* In worst case we have alternating energy groups
1596 * and create #atom-pair lists, which means we need the size
1597 * of a cluster pair (na_ci*na_cj) times the number of cj's.
1599 nri_max = nbl->na_ci*nbl->na_cj*(cj_ind_end - cj_ind_start);
1600 if (nlist->nri + nri_max > nlist->maxnri)
1602 nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
1603 reallocate_nblist(nlist);
1606 const int numAtomsJCluster = jGrid.geometry().numAtomsJCluster;
1608 const nbnxn_atomdata_t::Params &nbatParams = nbat->params();
1610 const int ngid = nbatParams.nenergrp;
1612 /* TODO: Consider adding a check in grompp and changing this to an assert */
1613 const int numBitsInEnergyGroupIdsForAtomsInJCluster = sizeof(gid_cj)*8;
1614 if (ngid*numAtomsJCluster > numBitsInEnergyGroupIdsForAtomsInJCluster)
1616 gmx_fatal(FARGS, "The Verlet scheme with %dx%d kernels and free-energy only supports up to %zu energy groups",
1617 iGrid.geometry().numAtomsICluster, numAtomsJCluster,
1618 (sizeof(gid_cj)*8)/numAtomsJCluster);
1621 egp_shift = nbatParams.neg_2log;
1622 egp_mask = (1 << egp_shift) - 1;
1624 /* Loop over the atoms in the i sub-cell */
1626 for (int i = 0; i < nbl->na_ci; i++)
1628 ind_i = ci*nbl->na_ci + i;
1629 ai = atomIndices[ind_i];
1633 nlist->jindex[nri+1] = nlist->jindex[nri];
1634 nlist->iinr[nri] = ai;
1635 /* The actual energy group pair index is set later */
1636 nlist->gid[nri] = 0;
1637 nlist->shift[nri] = nbl_ci->shift & NBNXN_CI_SHIFT;
1639 bFEP_i = iGrid.atomIsPerturbed(ci - iGrid.cellOffset(), i);
1641 bFEP_i_all = bFEP_i_all && bFEP_i;
1643 if (nlist->nrj + (cj_ind_end - cj_ind_start)*nbl->na_cj > nlist->maxnrj)
1645 nlist->maxnrj = over_alloc_small(nlist->nrj + (cj_ind_end - cj_ind_start)*nbl->na_cj);
1646 srenew(nlist->jjnr, nlist->maxnrj);
1647 srenew(nlist->excl_fep, nlist->maxnrj);
1652 gid_i = (nbatParams.energrp[ci] >> (egp_shift*i)) & egp_mask;
1655 for (int cj_ind = cj_ind_start; cj_ind < cj_ind_end; cj_ind++)
1657 unsigned int fep_cj;
1659 cja = nbl->cj[cj_ind].cj;
1661 if (numAtomsJCluster == jGrid.geometry().numAtomsICluster)
1663 cjr = cja - jGrid.cellOffset();
1664 fep_cj = jGrid.fepBits(cjr);
1667 gid_cj = nbatParams.energrp[cja];
1670 else if (2*numAtomsJCluster == jGrid.geometry().numAtomsICluster)
1672 cjr = cja - jGrid.cellOffset()*2;
1673 /* Extract half of the ci fep/energrp mask */
1674 fep_cj = (jGrid.fepBits(cjr >> 1) >> ((cjr & 1)*numAtomsJCluster)) & ((1 << numAtomsJCluster) - 1);
1677 gid_cj = nbatParams.energrp[cja >> 1] >> ((cja & 1)*numAtomsJCluster*egp_shift) & ((1 << (numAtomsJCluster*egp_shift)) - 1);
1682 cjr = cja - (jGrid.cellOffset() >> 1);
1683 /* Combine two ci fep masks/energrp */
1684 fep_cj = jGrid.fepBits(cjr*2) + (jGrid.fepBits(cjr*2 + 1) << jGrid.geometry().numAtomsICluster);
1687 gid_cj = nbatParams.energrp[cja*2] + (nbatParams.energrp[cja*2+1] << (jGrid.geometry().numAtomsICluster*egp_shift));
1691 if (bFEP_i || fep_cj != 0)
1693 for (int j = 0; j < nbl->na_cj; j++)
1695 /* Is this interaction perturbed and not excluded? */
1696 ind_j = cja*nbl->na_cj + j;
1697 aj = atomIndices[ind_j];
1699 (bFEP_i || (fep_cj & (1 << j))) &&
1700 (!bDiagRemoved || ind_j >= ind_i))
1704 gid_j = (gid_cj >> (j*egp_shift)) & egp_mask;
1705 gid = GID(gid_i, gid_j, ngid);
1707 if (nlist->nrj > nlist->jindex[nri] &&
1708 nlist->gid[nri] != gid)
1710 /* Energy group pair changed: new list */
1711 fep_list_new_nri_copy(nlist);
1714 nlist->gid[nri] = gid;
1717 if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
1719 fep_list_new_nri_copy(nlist);
1723 /* Add it to the FEP list */
1724 nlist->jjnr[nlist->nrj] = aj;
1725 nlist->excl_fep[nlist->nrj] = (nbl->cj[cj_ind].excl >> (i*nbl->na_cj + j)) & 1;
1728 /* Exclude it from the normal list.
1729 * Note that the charge has been set to zero,
1730 * but we need to avoid 0/0, as perturbed atoms
1731 * can be on top of each other.
1733 nbl->cj[cj_ind].excl &= ~(1U << (i*nbl->na_cj + j));
1739 if (nlist->nrj > nlist->jindex[nri])
1741 /* Actually add this new, non-empty, list */
1743 nlist->jindex[nlist->nri] = nlist->nrj;
1750 /* All interactions are perturbed, we can skip this entry */
1751 nbl_ci->cj_ind_end = cj_ind_start;
1752 nbl->ncjInUse -= cj_ind_end - cj_ind_start;
1756 /* Return the index of atom a within a cluster */
1757 static inline int cj_mod_cj4(int cj)
1759 return cj & (c_nbnxnGpuJgroupSize - 1);
1762 /* Convert a j-cluster to a cj4 group */
1763 static inline int cj_to_cj4(int cj)
1765 return cj/c_nbnxnGpuJgroupSize;
1768 /* Return the index of an j-atom within a warp */
1769 static inline int a_mod_wj(int a)
1771 return a & (c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit - 1);
1774 /* As make_fep_list above, but for super/sub lists. */
1775 static void make_fep_list(gmx::ArrayRef<const int> atomIndices,
1776 const nbnxn_atomdata_t *nbat,
1777 NbnxnPairlistGpu *nbl,
1778 gmx_bool bDiagRemoved,
1779 const nbnxn_sci_t *nbl_sci,
1790 int ind_i, ind_j, ai, aj;
1794 const nbnxn_cj4_t *cj4;
1796 const int numJClusterGroups = nbl_sci->numJClusterGroups();
1797 if (numJClusterGroups == 0)
1803 const int sci = nbl_sci->sci;
1805 const int cj4_ind_start = nbl_sci->cj4_ind_start;
1806 const int cj4_ind_end = nbl_sci->cj4_ind_end;
1808 /* Here we process one super-cell, max #atoms na_sc, versus a list
1809 * cj4 entries, each with max c_nbnxnGpuJgroupSize cj's, each
1810 * of size na_cj atoms.
1811 * On the GPU we don't support energy groups (yet).
1812 * So for each of the na_sc i-atoms, we need max one FEP list
1813 * for each max_nrj_fep j-atoms.
1815 nri_max = nbl->na_sc*nbl->na_cj*(1 + (numJClusterGroups*c_nbnxnGpuJgroupSize)/max_nrj_fep);
1816 if (nlist->nri + nri_max > nlist->maxnri)
1818 nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
1819 reallocate_nblist(nlist);
1822 /* Loop over the atoms in the i super-cluster */
1823 for (int c = 0; c < c_gpuNumClusterPerCell; c++)
1825 c_abs = sci*c_gpuNumClusterPerCell + c;
1827 for (int i = 0; i < nbl->na_ci; i++)
1829 ind_i = c_abs*nbl->na_ci + i;
1830 ai = atomIndices[ind_i];
1834 nlist->jindex[nri+1] = nlist->jindex[nri];
1835 nlist->iinr[nri] = ai;
1836 /* With GPUs, energy groups are not supported */
1837 nlist->gid[nri] = 0;
1838 nlist->shift[nri] = nbl_sci->shift & NBNXN_CI_SHIFT;
1840 bFEP_i = iGrid.atomIsPerturbed(c_abs - iGrid.cellOffset()*c_gpuNumClusterPerCell, i);
1842 xi = nbat->x()[ind_i*nbat->xstride+XX] + shx;
1843 yi = nbat->x()[ind_i*nbat->xstride+YY] + shy;
1844 zi = nbat->x()[ind_i*nbat->xstride+ZZ] + shz;
1846 const int nrjMax = nlist->nrj + numJClusterGroups*c_nbnxnGpuJgroupSize*nbl->na_cj;
1847 if (nrjMax > nlist->maxnrj)
1849 nlist->maxnrj = over_alloc_small(nrjMax);
1850 srenew(nlist->jjnr, nlist->maxnrj);
1851 srenew(nlist->excl_fep, nlist->maxnrj);
1854 for (int cj4_ind = cj4_ind_start; cj4_ind < cj4_ind_end; cj4_ind++)
1856 cj4 = &nbl->cj4[cj4_ind];
1858 for (int gcj = 0; gcj < c_nbnxnGpuJgroupSize; gcj++)
1860 if ((cj4->imei[0].imask & (1U << (gcj*c_gpuNumClusterPerCell + c))) == 0)
1862 /* Skip this ci for this cj */
1867 cj4->cj[gcj] - jGrid.cellOffset()*c_gpuNumClusterPerCell;
1869 if (bFEP_i || jGrid.clusterIsPerturbed(cjr))
1871 for (int j = 0; j < nbl->na_cj; j++)
1873 /* Is this interaction perturbed and not excluded? */
1874 ind_j = (jGrid.cellOffset()*c_gpuNumClusterPerCell + cjr)*nbl->na_cj + j;
1875 aj = atomIndices[ind_j];
1877 (bFEP_i || jGrid.atomIsPerturbed(cjr, j)) &&
1878 (!bDiagRemoved || ind_j >= ind_i))
1881 unsigned int excl_bit;
1884 const int jHalf = j/(c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit);
1885 nbnxn_excl_t *excl =
1886 get_exclusion_mask(nbl, cj4_ind, jHalf);
1888 excl_pair = a_mod_wj(j)*nbl->na_ci + i;
1889 excl_bit = (1U << (gcj*c_gpuNumClusterPerCell + c));
1891 dx = nbat->x()[ind_j*nbat->xstride+XX] - xi;
1892 dy = nbat->x()[ind_j*nbat->xstride+YY] - yi;
1893 dz = nbat->x()[ind_j*nbat->xstride+ZZ] - zi;
1895 /* The unpruned GPU list has more than 2/3
1896 * of the atom pairs beyond rlist. Using
1897 * this list will cause a lot of overhead
1898 * in the CPU FEP kernels, especially
1899 * relative to the fast GPU kernels.
1900 * So we prune the FEP list here.
1902 if (dx*dx + dy*dy + dz*dz < rlist_fep2)
1904 if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
1906 fep_list_new_nri_copy(nlist);
1910 /* Add it to the FEP list */
1911 nlist->jjnr[nlist->nrj] = aj;
1912 nlist->excl_fep[nlist->nrj] = (excl->pair[excl_pair] & excl_bit) ? 1 : 0;
1916 /* Exclude it from the normal list.
1917 * Note that the charge and LJ parameters have
1918 * been set to zero, but we need to avoid 0/0,
1919 * as perturbed atoms can be on top of each other.
1921 excl->pair[excl_pair] &= ~excl_bit;
1925 /* Note that we could mask out this pair in imask
1926 * if all i- and/or all j-particles are perturbed.
1927 * But since the perturbed pairs on the CPU will
1928 * take an order of magnitude more time, the GPU
1929 * will finish before the CPU and there is no gain.
1935 if (nlist->nrj > nlist->jindex[nri])
1937 /* Actually add this new, non-empty, list */
1939 nlist->jindex[nlist->nri] = nlist->nrj;
1946 /* Set all atom-pair exclusions for a GPU type list i-entry
1948 * Sets all atom-pair exclusions from the topology stored in exclusions
1949 * as masks in the pair-list for i-super-cluster list entry iEntry.
1952 setExclusionsForIEntry(const Nbnxm::GridSet &gridSet,
1953 NbnxnPairlistGpu *nbl,
1954 gmx_bool diagRemoved,
1955 int gmx_unused na_cj_2log,
1956 const nbnxn_sci_t &iEntry,
1957 const t_blocka &exclusions)
1959 if (iEntry.numJClusterGroups() == 0)
1965 /* Set the search ranges using start and end j-cluster indices.
1966 * Note that here we can not use cj4_ind_end, since the last cj4
1967 * can be only partially filled, so we use cj_ind.
1969 const JListRanges ranges(iEntry.cj4_ind_start*c_nbnxnGpuJgroupSize,
1971 gmx::makeConstArrayRef(nbl->cj4));
1973 GMX_ASSERT(nbl->na_ci == c_nbnxnGpuClusterSize, "na_ci should match the GPU cluster size");
1974 constexpr int c_clusterSize = c_nbnxnGpuClusterSize;
1975 constexpr int c_superClusterSize = c_nbnxnGpuNumClusterPerSupercluster*c_nbnxnGpuClusterSize;
1977 const int iSuperCluster = iEntry.sci;
1979 gmx::ArrayRef<const int> atomIndices = gridSet.atomIndices();
1980 gmx::ArrayRef<const int> cell = gridSet.cells();
1982 /* Loop over the atoms in the i super-cluster */
1983 for (int i = 0; i < c_superClusterSize; i++)
1985 const int iIndex = iSuperCluster*c_superClusterSize + i;
1986 const int iAtom = atomIndices[iIndex];
1989 const int iCluster = i/c_clusterSize;
1991 /* Loop over the topology-based exclusions for this i-atom */
1992 for (int exclIndex = exclusions.index[iAtom]; exclIndex < exclusions.index[iAtom + 1]; exclIndex++)
1994 const int jAtom = exclusions.a[exclIndex];
1998 /* The self exclusions are already set, save some time */
2002 /* Get the index of the j-atom in the nbnxn atom data */
2003 const int jIndex = cell[jAtom];
2005 /* Without shifts we only calculate interactions j>i
2006 * for one-way pair-lists.
2008 /* NOTE: We would like to use iIndex on the right hand side,
2009 * but that makes this routine 25% slower with gcc6/7.
2010 * Even using c_superClusterSize makes it slower.
2011 * Either of these changes triggers peeling of the exclIndex
2012 * loop, which apparently leads to far less efficient code.
2014 if (diagRemoved && jIndex <= iSuperCluster*nbl->na_sc + i)
2019 const int jCluster = jIndex/c_clusterSize;
2021 /* Check whether the cluster is in our list? */
2022 if (jCluster >= ranges.cjFirst && jCluster <= ranges.cjLast)
2025 findJClusterInJList(jCluster, ranges,
2026 gmx::makeConstArrayRef(nbl->cj4));
2030 /* We found an exclusion, clear the corresponding
2033 const unsigned int pairMask = (1U << (cj_mod_cj4(index)*c_gpuNumClusterPerCell + iCluster));
2034 /* Check if the i-cluster interacts with the j-cluster */
2035 if (nbl_imask0(nbl, index) & pairMask)
2037 const int innerI = (i & (c_clusterSize - 1));
2038 const int innerJ = (jIndex & (c_clusterSize - 1));
2040 /* Determine which j-half (CUDA warp) we are in */
2041 const int jHalf = innerJ/(c_clusterSize/c_nbnxnGpuClusterpairSplit);
2043 nbnxn_excl_t *interactionMask =
2044 get_exclusion_mask(nbl, cj_to_cj4(index), jHalf);
2046 interactionMask->pair[a_mod_wj(innerJ)*c_clusterSize + innerI] &= ~pairMask;
2055 /* Make a new ci entry at the back of nbl->ci */
2056 static void addNewIEntry(NbnxnPairlistCpu *nbl, int ci, int shift, int flags)
2060 ciEntry.shift = shift;
2061 /* Store the interaction flags along with the shift */
2062 ciEntry.shift |= flags;
2063 ciEntry.cj_ind_start = nbl->cj.size();
2064 ciEntry.cj_ind_end = nbl->cj.size();
2065 nbl->ci.push_back(ciEntry);
2068 /* Make a new sci entry at index nbl->nsci */
2069 static void addNewIEntry(NbnxnPairlistGpu *nbl, int sci, int shift, int gmx_unused flags)
2071 nbnxn_sci_t sciEntry;
2073 sciEntry.shift = shift;
2074 sciEntry.cj4_ind_start = nbl->cj4.size();
2075 sciEntry.cj4_ind_end = nbl->cj4.size();
2077 nbl->sci.push_back(sciEntry);
2080 /* Sort the simple j-list cj on exclusions.
2081 * Entries with exclusions will all be sorted to the beginning of the list.
2083 static void sort_cj_excl(nbnxn_cj_t *cj, int ncj,
2084 NbnxnPairlistCpuWork *work)
2086 work->cj.resize(ncj);
2088 /* Make a list of the j-cells involving exclusions */
2090 for (int j = 0; j < ncj; j++)
2092 if (cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
2094 work->cj[jnew++] = cj[j];
2097 /* Check if there are exclusions at all or not just the first entry */
2098 if (!((jnew == 0) ||
2099 (jnew == 1 && cj[0].excl != NBNXN_INTERACTION_MASK_ALL)))
2101 for (int j = 0; j < ncj; j++)
2103 if (cj[j].excl == NBNXN_INTERACTION_MASK_ALL)
2105 work->cj[jnew++] = cj[j];
2108 for (int j = 0; j < ncj; j++)
2110 cj[j] = work->cj[j];
2115 /* Close this simple list i entry */
2116 static void closeIEntry(NbnxnPairlistCpu *nbl,
2117 int gmx_unused sp_max_av,
2118 gmx_bool gmx_unused progBal,
2119 float gmx_unused nsp_tot_est,
2120 int gmx_unused thread,
2121 int gmx_unused nthread)
2123 nbnxn_ci_t &ciEntry = nbl->ci.back();
2125 /* All content of the new ci entry have already been filled correctly,
2126 * we only need to sort and increase counts or remove the entry when empty.
2128 const int jlen = ciEntry.cj_ind_end - ciEntry.cj_ind_start;
2131 sort_cj_excl(nbl->cj.data() + ciEntry.cj_ind_start, jlen, nbl->work);
2133 /* The counts below are used for non-bonded pair/flop counts
2134 * and should therefore match the available kernel setups.
2136 if (!(ciEntry.shift & NBNXN_CI_DO_COUL(0)))
2138 nbl->work->ncj_noq += jlen;
2140 else if ((ciEntry.shift & NBNXN_CI_HALF_LJ(0)) ||
2141 !(ciEntry.shift & NBNXN_CI_DO_LJ(0)))
2143 nbl->work->ncj_hlj += jlen;
2148 /* Entry is empty: remove it */
2153 /* Split sci entry for load balancing on the GPU.
2154 * Splitting ensures we have enough lists to fully utilize the whole GPU.
2155 * With progBal we generate progressively smaller lists, which improves
2156 * load balancing. As we only know the current count on our own thread,
2157 * we will need to estimate the current total amount of i-entries.
2158 * As the lists get concatenated later, this estimate depends
2159 * both on nthread and our own thread index.
2161 static void split_sci_entry(NbnxnPairlistGpu *nbl,
2163 gmx_bool progBal, float nsp_tot_est,
2164 int thread, int nthread)
2172 /* Estimate the total numbers of ci's of the nblist combined
2173 * over all threads using the target number of ci's.
2175 nsp_est = (nsp_tot_est*thread)/nthread + nbl->nci_tot;
2177 /* The first ci blocks should be larger, to avoid overhead.
2178 * The last ci blocks should be smaller, to improve load balancing.
2179 * The factor 3/2 makes the first block 3/2 times the target average
2180 * and ensures that the total number of blocks end up equal to
2181 * that of equally sized blocks of size nsp_target_av.
2183 nsp_max = static_cast<int>(nsp_target_av*(nsp_tot_est*1.5/(nsp_est + nsp_tot_est)));
2187 nsp_max = nsp_target_av;
2190 const int cj4_start = nbl->sci.back().cj4_ind_start;
2191 const int cj4_end = nbl->sci.back().cj4_ind_end;
2192 const int j4len = cj4_end - cj4_start;
2194 if (j4len > 1 && j4len*c_gpuNumClusterPerCell*c_nbnxnGpuJgroupSize > nsp_max)
2196 /* Modify the last ci entry and process the cj4's again */
2202 for (int cj4 = cj4_start; cj4 < cj4_end; cj4++)
2204 int nsp_cj4_p = nsp_cj4;
2205 /* Count the number of cluster pairs in this cj4 group */
2207 for (int p = 0; p < c_gpuNumClusterPerCell*c_nbnxnGpuJgroupSize; p++)
2209 nsp_cj4 += (nbl->cj4[cj4].imei[0].imask >> p) & 1;
2212 /* If adding the current cj4 with nsp_cj4 pairs get us further
2213 * away from our target nsp_max, split the list before this cj4.
2215 if (nsp > 0 && nsp_max - nsp < nsp + nsp_cj4 - nsp_max)
2217 /* Split the list at cj4 */
2218 nbl->sci.back().cj4_ind_end = cj4;
2219 /* Create a new sci entry */
2221 sciNew.sci = nbl->sci.back().sci;
2222 sciNew.shift = nbl->sci.back().shift;
2223 sciNew.cj4_ind_start = cj4;
2224 nbl->sci.push_back(sciNew);
2227 nsp_cj4_e = nsp_cj4_p;
2233 /* Put the remaining cj4's in the last sci entry */
2234 nbl->sci.back().cj4_ind_end = cj4_end;
2236 /* Possibly balance out the last two sci's
2237 * by moving the last cj4 of the second last sci.
2239 if (nsp_sci - nsp_cj4_e >= nsp + nsp_cj4_e)
2241 GMX_ASSERT(nbl->sci.size() >= 2, "We expect at least two elements");
2242 nbl->sci[nbl->sci.size() - 2].cj4_ind_end--;
2243 nbl->sci[nbl->sci.size() - 1].cj4_ind_start--;
2248 /* Clost this super/sub list i entry */
2249 static void closeIEntry(NbnxnPairlistGpu *nbl,
2251 gmx_bool progBal, float nsp_tot_est,
2252 int thread, int nthread)
2254 nbnxn_sci_t &sciEntry = *getOpenIEntry(nbl);
2256 /* All content of the new ci entry have already been filled correctly,
2257 * we only need to, potentially, split or remove the entry when empty.
2259 int j4len = sciEntry.numJClusterGroups();
2262 /* We can only have complete blocks of 4 j-entries in a list,
2263 * so round the count up before closing.
2265 int ncj4 = (nbl->work->cj_ind + c_nbnxnGpuJgroupSize - 1)/c_nbnxnGpuJgroupSize;
2266 nbl->work->cj_ind = ncj4*c_nbnxnGpuJgroupSize;
2270 /* Measure the size of the new entry and potentially split it */
2271 split_sci_entry(nbl, nsp_max_av, progBal, nsp_tot_est,
2277 /* Entry is empty: remove it */
2278 nbl->sci.pop_back();
2282 /* Syncs the working array before adding another grid pair to the GPU list */
2283 static void sync_work(NbnxnPairlistCpu gmx_unused *nbl)
2287 /* Syncs the working array before adding another grid pair to the GPU list */
2288 static void sync_work(NbnxnPairlistGpu *nbl)
2290 nbl->work->cj_ind = nbl->cj4.size()*c_nbnxnGpuJgroupSize;
2293 /* Clears an NbnxnPairlistCpu data structure */
2294 static void clear_pairlist(NbnxnPairlistCpu *nbl)
2300 nbl->ciOuter.clear();
2301 nbl->cjOuter.clear();
2303 nbl->work->ncj_noq = 0;
2304 nbl->work->ncj_hlj = 0;
2307 /* Clears an NbnxnPairlistGpu data structure */
2308 static void clear_pairlist(NbnxnPairlistGpu *nbl)
2312 nbl->excl.resize(1);
2316 /* Clears a group scheme pair list */
2317 static void clear_pairlist_fep(t_nblist *nl)
2321 if (nl->jindex == nullptr)
2323 snew(nl->jindex, 1);
2328 /* Sets a simple list i-cell bounding box, including PBC shift */
2329 static inline void set_icell_bb_simple(gmx::ArrayRef<const BoundingBox> bb,
2331 real shx, real shy, real shz,
2334 bb_ci->lower.x = bb[ci].lower.x + shx;
2335 bb_ci->lower.y = bb[ci].lower.y + shy;
2336 bb_ci->lower.z = bb[ci].lower.z + shz;
2337 bb_ci->upper.x = bb[ci].upper.x + shx;
2338 bb_ci->upper.y = bb[ci].upper.y + shy;
2339 bb_ci->upper.z = bb[ci].upper.z + shz;
2342 /* Sets a simple list i-cell bounding box, including PBC shift */
2343 static inline void set_icell_bb(const Grid &iGrid,
2345 real shx, real shy, real shz,
2346 NbnxnPairlistCpuWork *work)
2348 set_icell_bb_simple(iGrid.iBoundingBoxes(), ci, shx, shy, shz,
2349 &work->iClusterData.bb[0]);
2353 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2354 static void set_icell_bbxxxx_supersub(gmx::ArrayRef<const float> bb,
2356 real shx, real shy, real shz,
2359 constexpr int cellBBStride = packedBoundingBoxesIndex(c_gpuNumClusterPerCell);
2360 constexpr int pbbStride = c_packedBoundingBoxesDimSize;
2361 const int ia = ci*cellBBStride;
2362 for (int m = 0; m < cellBBStride; m += c_packedBoundingBoxesSize)
2364 for (int i = 0; i < pbbStride; i++)
2366 bb_ci[m + 0*pbbStride + i] = bb[ia + m + 0*pbbStride + i] + shx;
2367 bb_ci[m + 1*pbbStride + i] = bb[ia + m + 1*pbbStride + i] + shy;
2368 bb_ci[m + 2*pbbStride + i] = bb[ia + m + 2*pbbStride + i] + shz;
2369 bb_ci[m + 3*pbbStride + i] = bb[ia + m + 3*pbbStride + i] + shx;
2370 bb_ci[m + 4*pbbStride + i] = bb[ia + m + 4*pbbStride + i] + shy;
2371 bb_ci[m + 5*pbbStride + i] = bb[ia + m + 5*pbbStride + i] + shz;
2377 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2378 gmx_unused static void set_icell_bb_supersub(gmx::ArrayRef<const BoundingBox> bb,
2380 real shx, real shy, real shz,
2383 for (int i = 0; i < c_gpuNumClusterPerCell; i++)
2385 set_icell_bb_simple(bb, ci*c_gpuNumClusterPerCell+i,
2391 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2392 gmx_unused static void set_icell_bb(const Grid &iGrid,
2394 real shx, real shy, real shz,
2395 NbnxnPairlistGpuWork *work)
2398 set_icell_bbxxxx_supersub(iGrid.packedBoundingBoxes(), ci, shx, shy, shz,
2399 work->iSuperClusterData.bbPacked.data());
2401 set_icell_bb_supersub(iGrid.iBoundingBoxes(), ci, shx, shy, shz,
2402 work->iSuperClusterData.bb.data());
2406 /* Copies PBC shifted i-cell atom coordinates x,y,z to working array */
2407 static void icell_set_x_simple(int ci,
2408 real shx, real shy, real shz,
2409 int stride, const real *x,
2410 NbnxnPairlistCpuWork::IClusterData *iClusterData)
2412 const int ia = ci*c_nbnxnCpuIClusterSize;
2414 for (int i = 0; i < c_nbnxnCpuIClusterSize; i++)
2416 iClusterData->x[i*STRIDE_XYZ+XX] = x[(ia+i)*stride+XX] + shx;
2417 iClusterData->x[i*STRIDE_XYZ+YY] = x[(ia+i)*stride+YY] + shy;
2418 iClusterData->x[i*STRIDE_XYZ+ZZ] = x[(ia+i)*stride+ZZ] + shz;
2422 static void icell_set_x(int ci,
2423 real shx, real shy, real shz,
2424 int stride, const real *x,
2425 const Nbnxm::KernelType kernelType,
2426 NbnxnPairlistCpuWork *work)
2431 #ifdef GMX_NBNXN_SIMD_4XN
2432 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
2433 icell_set_x_simd_4xn(ci, shx, shy, shz, stride, x, work);
2436 #ifdef GMX_NBNXN_SIMD_2XNN
2437 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
2438 icell_set_x_simd_2xnn(ci, shx, shy, shz, stride, x, work);
2442 case Nbnxm::KernelType::Cpu4x4_PlainC:
2443 icell_set_x_simple(ci, shx, shy, shz, stride, x, &work->iClusterData);
2446 GMX_ASSERT(false, "Unhandled case");
2451 /* Copies PBC shifted super-cell atom coordinates x,y,z to working array */
2452 static void icell_set_x(int ci,
2453 real shx, real shy, real shz,
2454 int stride, const real *x,
2455 Nbnxm::KernelType gmx_unused kernelType,
2456 NbnxnPairlistGpuWork *work)
2458 #if !GMX_SIMD4_HAVE_REAL
2460 real * x_ci = work->iSuperClusterData.x.data();
2462 int ia = ci*c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize;
2463 for (int i = 0; i < c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize; i++)
2465 x_ci[i*DIM + XX] = x[(ia+i)*stride + XX] + shx;
2466 x_ci[i*DIM + YY] = x[(ia+i)*stride + YY] + shy;
2467 x_ci[i*DIM + ZZ] = x[(ia+i)*stride + ZZ] + shz;
2470 #else /* !GMX_SIMD4_HAVE_REAL */
2472 real * x_ci = work->iSuperClusterData.xSimd.data();
2474 for (int si = 0; si < c_gpuNumClusterPerCell; si++)
2476 for (int i = 0; i < c_nbnxnGpuClusterSize; i += GMX_SIMD4_WIDTH)
2478 int io = si*c_nbnxnGpuClusterSize + i;
2479 int ia = ci*c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize + io;
2480 for (int j = 0; j < GMX_SIMD4_WIDTH; j++)
2482 x_ci[io*DIM + j + XX*GMX_SIMD4_WIDTH] = x[(ia + j)*stride + XX] + shx;
2483 x_ci[io*DIM + j + YY*GMX_SIMD4_WIDTH] = x[(ia + j)*stride + YY] + shy;
2484 x_ci[io*DIM + j + ZZ*GMX_SIMD4_WIDTH] = x[(ia + j)*stride + ZZ] + shz;
2489 #endif /* !GMX_SIMD4_HAVE_REAL */
2492 static real minimum_subgrid_size_xy(const Grid &grid)
2494 const Grid::Dimensions &dims = grid.dimensions();
2496 if (grid.geometry().isSimple)
2498 return std::min(dims.cellSize[XX], dims.cellSize[YY]);
2502 return std::min(dims.cellSize[XX]/c_gpuNumClusterPerCellX,
2503 dims.cellSize[YY]/c_gpuNumClusterPerCellY);
2507 static real effective_buffer_1x1_vs_MxN(const Grid &iGrid,
2510 const real eff_1x1_buffer_fac_overest = 0.1;
2512 /* Determine an atom-pair list cut-off buffer size for atom pairs,
2513 * to be added to rlist (including buffer) used for MxN.
2514 * This is for converting an MxN list to a 1x1 list. This means we can't
2515 * use the normal buffer estimate, as we have an MxN list in which
2516 * some atom pairs beyond rlist are missing. We want to capture
2517 * the beneficial effect of buffering by extra pairs just outside rlist,
2518 * while removing the useless pairs that are further away from rlist.
2519 * (Also the buffer could have been set manually not using the estimate.)
2520 * This buffer size is an overestimate.
2521 * We add 10% of the smallest grid sub-cell dimensions.
2522 * Note that the z-size differs per cell and we don't use this,
2523 * so we overestimate.
2524 * With PME, the 10% value gives a buffer that is somewhat larger
2525 * than the effective buffer with a tolerance of 0.005 kJ/mol/ps.
2526 * Smaller tolerances or using RF lead to a smaller effective buffer,
2527 * so 10% gives a safe overestimate.
2529 return eff_1x1_buffer_fac_overest*(minimum_subgrid_size_xy(iGrid) +
2530 minimum_subgrid_size_xy(jGrid));
2533 /* Estimates the interaction volume^2 for non-local interactions */
2534 static real nonlocal_vol2(const struct gmx_domdec_zones_t *zones, const rvec ls, real r)
2542 /* Here we simply add up the volumes of 1, 2 or 3 1D decomposition
2543 * not home interaction volume^2. As these volumes are not additive,
2544 * this is an overestimate, but it would only be significant in the limit
2545 * of small cells, where we anyhow need to split the lists into
2546 * as small parts as possible.
2549 for (int z = 0; z < zones->n; z++)
2551 if (zones->shift[z][XX] + zones->shift[z][YY] + zones->shift[z][ZZ] == 1)
2556 for (int d = 0; d < DIM; d++)
2558 if (zones->shift[z][d] == 0)
2562 za *= zones->size[z].x1[d] - zones->size[z].x0[d];
2566 /* 4 octants of a sphere */
2567 vold_est = 0.25*M_PI*r*r*r*r;
2568 /* 4 quarter pie slices on the edges */
2569 vold_est += 4*cl*M_PI/6.0*r*r*r;
2570 /* One rectangular volume on a face */
2571 vold_est += ca*0.5*r*r;
2573 vol2_est_tot += vold_est*za;
2577 return vol2_est_tot;
2580 /* Estimates the average size of a full j-list for super/sub setup */
2581 static void get_nsubpair_target(const PairSearch &pairSearch,
2582 const InteractionLocality iloc,
2584 const int min_ci_balanced,
2585 int *nsubpair_target,
2586 float *nsubpair_tot_est)
2588 /* The target value of 36 seems to be the optimum for Kepler.
2589 * Maxwell is less sensitive to the exact value.
2591 const int nsubpair_target_min = 36;
2592 real r_eff_sup, vol_est, nsp_est, nsp_est_nl;
2594 const Grid &grid = pairSearch.gridSet().grids()[0];
2596 /* We don't need to balance list sizes if:
2597 * - We didn't request balancing.
2598 * - The number of grid cells >= the number of lists requested,
2599 * since we will always generate at least #cells lists.
2600 * - We don't have any cells, since then there won't be any lists.
2602 if (min_ci_balanced <= 0 || grid.numCells() >= min_ci_balanced || grid.numCells() == 0)
2604 /* nsubpair_target==0 signals no balancing */
2605 *nsubpair_target = 0;
2606 *nsubpair_tot_est = 0;
2612 const int numAtomsCluster = grid.geometry().numAtomsICluster;
2613 const Grid::Dimensions &dims = grid.dimensions();
2615 ls[XX] = dims.cellSize[XX]/c_gpuNumClusterPerCellX;
2616 ls[YY] = dims.cellSize[YY]/c_gpuNumClusterPerCellY;
2617 ls[ZZ] = numAtomsCluster/(dims.atomDensity*ls[XX]*ls[YY]);
2619 /* The formulas below are a heuristic estimate of the average nsj per si*/
2620 r_eff_sup = rlist + nbnxn_get_rlist_effective_inc(numAtomsCluster, ls);
2622 if (!pairSearch.domainSetup().haveDomDec ||
2623 pairSearch.domainSetup().zones->n == 1)
2630 gmx::square(dims.atomDensity/numAtomsCluster)*
2631 nonlocal_vol2(pairSearch.domainSetup().zones, ls, r_eff_sup);
2634 if (iloc == InteractionLocality::Local)
2636 /* Sub-cell interacts with itself */
2637 vol_est = ls[XX]*ls[YY]*ls[ZZ];
2638 /* 6/2 rectangular volume on the faces */
2639 vol_est += (ls[XX]*ls[YY] + ls[XX]*ls[ZZ] + ls[YY]*ls[ZZ])*r_eff_sup;
2640 /* 12/2 quarter pie slices on the edges */
2641 vol_est += 2*(ls[XX] + ls[YY] + ls[ZZ])*0.25*M_PI*gmx::square(r_eff_sup);
2642 /* 4 octants of a sphere */
2643 vol_est += 0.5*4.0/3.0*M_PI*gmx::power3(r_eff_sup);
2645 /* Estimate the number of cluster pairs as the local number of
2646 * clusters times the volume they interact with times the density.
2648 nsp_est = grid.numClusters()*vol_est*dims.atomDensity/numAtomsCluster;
2650 /* Subtract the non-local pair count */
2651 nsp_est -= nsp_est_nl;
2653 /* For small cut-offs nsp_est will be an underesimate.
2654 * With DD nsp_est_nl is an overestimate so nsp_est can get negative.
2655 * So to avoid too small or negative nsp_est we set a minimum of
2656 * all cells interacting with all 3^3 direct neighbors (3^3-1)/2+1=14.
2657 * This might be a slight overestimate for small non-periodic groups of
2658 * atoms as will occur for a local domain with DD, but for small
2659 * groups of atoms we'll anyhow be limited by nsubpair_target_min,
2660 * so this overestimation will not matter.
2662 nsp_est = std::max(nsp_est, grid.numClusters()*14._real);
2666 fprintf(debug, "nsp_est local %5.1f non-local %5.1f\n",
2667 nsp_est, nsp_est_nl);
2672 nsp_est = nsp_est_nl;
2675 /* Thus the (average) maximum j-list size should be as follows.
2676 * Since there is overhead, we shouldn't make the lists too small
2677 * (and we can't chop up j-groups) so we use a minimum target size of 36.
2679 *nsubpair_target = std::max(nsubpair_target_min,
2680 roundToInt(nsp_est/min_ci_balanced));
2681 *nsubpair_tot_est = static_cast<int>(nsp_est);
2685 fprintf(debug, "nbl nsp estimate %.1f, nsubpair_target %d\n",
2686 nsp_est, *nsubpair_target);
2690 /* Debug list print function */
2691 static void print_nblist_ci_cj(FILE *fp, const NbnxnPairlistCpu *nbl)
2693 for (const nbnxn_ci_t &ciEntry : nbl->ci)
2695 fprintf(fp, "ci %4d shift %2d ncj %3d\n",
2696 ciEntry.ci, ciEntry.shift,
2697 ciEntry.cj_ind_end - ciEntry.cj_ind_start);
2699 for (int j = ciEntry.cj_ind_start; j < ciEntry.cj_ind_end; j++)
2701 fprintf(fp, " cj %5d imask %x\n",
2708 /* Debug list print function */
2709 static void print_nblist_sci_cj(FILE *fp, const NbnxnPairlistGpu *nbl)
2711 for (const nbnxn_sci_t &sci : nbl->sci)
2713 fprintf(fp, "ci %4d shift %2d ncj4 %2d\n",
2715 sci.numJClusterGroups());
2718 for (int j4 = sci.cj4_ind_start; j4 < sci.cj4_ind_end; j4++)
2720 for (int j = 0; j < c_nbnxnGpuJgroupSize; j++)
2722 fprintf(fp, " sj %5d imask %x\n",
2724 nbl->cj4[j4].imei[0].imask);
2725 for (int si = 0; si < c_gpuNumClusterPerCell; si++)
2727 if (nbl->cj4[j4].imei[0].imask & (1U << (j*c_gpuNumClusterPerCell + si)))
2734 fprintf(fp, "ci %4d shift %2d ncj4 %2d ncp %3d\n",
2736 sci.numJClusterGroups(),
2741 /* Combine pair lists *nbl generated on multiple threads nblc */
2742 static void combine_nblists(int nnbl, NbnxnPairlistGpu **nbl,
2743 NbnxnPairlistGpu *nblc)
2745 int nsci = nblc->sci.size();
2746 int ncj4 = nblc->cj4.size();
2747 int nexcl = nblc->excl.size();
2748 for (int i = 0; i < nnbl; i++)
2750 nsci += nbl[i]->sci.size();
2751 ncj4 += nbl[i]->cj4.size();
2752 nexcl += nbl[i]->excl.size();
2755 /* Resize with the final, combined size, so we can fill in parallel */
2756 /* NOTE: For better performance we should use default initialization */
2757 nblc->sci.resize(nsci);
2758 nblc->cj4.resize(ncj4);
2759 nblc->excl.resize(nexcl);
2761 /* Each thread should copy its own data to the combined arrays,
2762 * as otherwise data will go back and forth between different caches.
2764 #if GMX_OPENMP && !(defined __clang_analyzer__)
2765 int nthreads = gmx_omp_nthreads_get(emntPairsearch);
2768 #pragma omp parallel for num_threads(nthreads) schedule(static)
2769 for (int n = 0; n < nnbl; n++)
2773 /* Determine the offset in the combined data for our thread.
2774 * Note that the original sizes in nblc are lost.
2776 int sci_offset = nsci;
2777 int cj4_offset = ncj4;
2778 int excl_offset = nexcl;
2780 for (int i = n; i < nnbl; i++)
2782 sci_offset -= nbl[i]->sci.size();
2783 cj4_offset -= nbl[i]->cj4.size();
2784 excl_offset -= nbl[i]->excl.size();
2787 const NbnxnPairlistGpu &nbli = *nbl[n];
2789 for (size_t i = 0; i < nbli.sci.size(); i++)
2791 nblc->sci[sci_offset + i] = nbli.sci[i];
2792 nblc->sci[sci_offset + i].cj4_ind_start += cj4_offset;
2793 nblc->sci[sci_offset + i].cj4_ind_end += cj4_offset;
2796 for (size_t j4 = 0; j4 < nbli.cj4.size(); j4++)
2798 nblc->cj4[cj4_offset + j4] = nbli.cj4[j4];
2799 nblc->cj4[cj4_offset + j4].imei[0].excl_ind += excl_offset;
2800 nblc->cj4[cj4_offset + j4].imei[1].excl_ind += excl_offset;
2803 for (size_t j4 = 0; j4 < nbli.excl.size(); j4++)
2805 nblc->excl[excl_offset + j4] = nbli.excl[j4];
2808 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2811 for (int n = 0; n < nnbl; n++)
2813 nblc->nci_tot += nbl[n]->nci_tot;
2817 static void balance_fep_lists(gmx::ArrayRef<PairsearchWork> work,
2818 nbnxn_pairlist_set_t *nbl_lists)
2821 int nri_tot, nrj_tot, nrj_target;
2825 nnbl = nbl_lists->nnbl;
2829 /* Nothing to balance */
2833 /* Count the total i-lists and pairs */
2836 for (int th = 0; th < nnbl; th++)
2838 nri_tot += nbl_lists->nbl_fep[th]->nri;
2839 nrj_tot += nbl_lists->nbl_fep[th]->nrj;
2842 nrj_target = (nrj_tot + nnbl - 1)/nnbl;
2844 assert(gmx_omp_nthreads_get(emntNonbonded) == nnbl);
2846 #pragma omp parallel for schedule(static) num_threads(nnbl)
2847 for (int th = 0; th < nnbl; th++)
2851 t_nblist *nbl = work[th].nbl_fep.get();
2853 /* Note that here we allocate for the total size, instead of
2854 * a per-thread esimate (which is hard to obtain).
2856 if (nri_tot > nbl->maxnri)
2858 nbl->maxnri = over_alloc_large(nri_tot);
2859 reallocate_nblist(nbl);
2861 if (nri_tot > nbl->maxnri || nrj_tot > nbl->maxnrj)
2863 nbl->maxnrj = over_alloc_small(nrj_tot);
2864 srenew(nbl->jjnr, nbl->maxnrj);
2865 srenew(nbl->excl_fep, nbl->maxnrj);
2868 clear_pairlist_fep(nbl);
2870 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2873 /* Loop over the source lists and assign and copy i-entries */
2875 nbld = work[th_dest].nbl_fep.get();
2876 for (int th = 0; th < nnbl; th++)
2880 nbls = nbl_lists->nbl_fep[th];
2882 for (int i = 0; i < nbls->nri; i++)
2886 /* The number of pairs in this i-entry */
2887 nrj = nbls->jindex[i+1] - nbls->jindex[i];
2889 /* Decide if list th_dest is too large and we should procede
2890 * to the next destination list.
2892 if (th_dest+1 < nnbl && nbld->nrj > 0 &&
2893 nbld->nrj + nrj - nrj_target > nrj_target - nbld->nrj)
2896 nbld = work[th_dest].nbl_fep.get();
2899 nbld->iinr[nbld->nri] = nbls->iinr[i];
2900 nbld->gid[nbld->nri] = nbls->gid[i];
2901 nbld->shift[nbld->nri] = nbls->shift[i];
2903 for (int j = nbls->jindex[i]; j < nbls->jindex[i+1]; j++)
2905 nbld->jjnr[nbld->nrj] = nbls->jjnr[j];
2906 nbld->excl_fep[nbld->nrj] = nbls->excl_fep[j];
2910 nbld->jindex[nbld->nri] = nbld->nrj;
2914 /* Swap the list pointers */
2915 for (int th = 0; th < nnbl; th++)
2917 t_nblist *nbl_tmp = work[th].nbl_fep.release();
2918 work[th].nbl_fep.reset(nbl_lists->nbl_fep[th]);
2919 nbl_lists->nbl_fep[th] = nbl_tmp;
2923 fprintf(debug, "nbl_fep[%d] nri %4d nrj %4d\n",
2925 nbl_lists->nbl_fep[th]->nri,
2926 nbl_lists->nbl_fep[th]->nrj);
2931 /* Returns the next ci to be processes by our thread */
2932 static gmx_bool next_ci(const Grid &grid,
2933 int nth, int ci_block,
2934 int *ci_x, int *ci_y,
2940 if (*ci_b == ci_block)
2942 /* Jump to the next block assigned to this task */
2943 *ci += (nth - 1)*ci_block;
2947 if (*ci >= grid.numCells())
2952 while (*ci >= grid.firstCellInColumn(*ci_x*grid.dimensions().numCells[YY] + *ci_y + 1))
2955 if (*ci_y == grid.dimensions().numCells[YY])
2965 /* Returns the distance^2 for which we put cell pairs in the list
2966 * without checking atom pair distances. This is usually < rlist^2.
2968 static float boundingbox_only_distance2(const Grid::Dimensions &iGridDims,
2969 const Grid::Dimensions &jGridDims,
2973 /* If the distance between two sub-cell bounding boxes is less
2974 * than this distance, do not check the distance between
2975 * all particle pairs in the sub-cell, since then it is likely
2976 * that the box pair has atom pairs within the cut-off.
2977 * We use the nblist cut-off minus 0.5 times the average x/y diagonal
2978 * spacing of the sub-cells. Around 40% of the checked pairs are pruned.
2979 * Using more than 0.5 gains at most 0.5%.
2980 * If forces are calculated more than twice, the performance gain
2981 * in the force calculation outweighs the cost of checking.
2982 * Note that with subcell lists, the atom-pair distance check
2983 * is only performed when only 1 out of 8 sub-cells in within range,
2984 * this is because the GPU is much faster than the cpu.
2989 bbx = 0.5*(iGridDims.cellSize[XX] + jGridDims.cellSize[XX]);
2990 bby = 0.5*(iGridDims.cellSize[YY] + jGridDims.cellSize[YY]);
2993 bbx /= c_gpuNumClusterPerCellX;
2994 bby /= c_gpuNumClusterPerCellY;
2997 rbb2 = std::max(0.0, rlist - 0.5*std::sqrt(bbx*bbx + bby*bby));
3003 return (float)((1+GMX_FLOAT_EPS)*rbb2);
3007 static int get_ci_block_size(const Grid &iGrid,
3008 gmx_bool bDomDec, int nth)
3010 const int ci_block_enum = 5;
3011 const int ci_block_denom = 11;
3012 const int ci_block_min_atoms = 16;
3015 /* Here we decide how to distribute the blocks over the threads.
3016 * We use prime numbers to try to avoid that the grid size becomes
3017 * a multiple of the number of threads, which would lead to some
3018 * threads getting "inner" pairs and others getting boundary pairs,
3019 * which in turns will lead to load imbalance between threads.
3020 * Set the block size as 5/11/ntask times the average number of cells
3021 * in a y,z slab. This should ensure a quite uniform distribution
3022 * of the grid parts of the different thread along all three grid
3023 * zone boundaries with 3D domain decomposition. At the same time
3024 * the blocks will not become too small.
3026 ci_block = (iGrid.numCells()*ci_block_enum)/(ci_block_denom*iGrid.dimensions().numCells[XX]*nth);
3028 const int numAtomsPerCell = iGrid.geometry().numAtomsPerCell;
3030 /* Ensure the blocks are not too small: avoids cache invalidation */
3031 if (ci_block*numAtomsPerCell < ci_block_min_atoms)
3033 ci_block = (ci_block_min_atoms + numAtomsPerCell - 1)/numAtomsPerCell;
3036 /* Without domain decomposition
3037 * or with less than 3 blocks per task, divide in nth blocks.
3039 if (!bDomDec || nth*3*ci_block > iGrid.numCells())
3041 ci_block = (iGrid.numCells() + nth - 1)/nth;
3044 if (ci_block > 1 && (nth - 1)*ci_block >= iGrid.numCells())
3046 /* Some threads have no work. Although reducing the block size
3047 * does not decrease the block count on the first few threads,
3048 * with GPUs better mixing of "upper" cells that have more empty
3049 * clusters results in a somewhat lower max load over all threads.
3050 * Without GPUs the regime of so few atoms per thread is less
3051 * performance relevant, but with 8-wide SIMD the same reasoning
3052 * applies, since the pair list uses 4 i-atom "sub-clusters".
3060 /* Returns the number of bits to right-shift a cluster index to obtain
3061 * the corresponding force buffer flag index.
3063 static int getBufferFlagShift(int numAtomsPerCluster)
3065 int bufferFlagShift = 0;
3066 while ((numAtomsPerCluster << bufferFlagShift) < NBNXN_BUFFERFLAG_SIZE)
3071 return bufferFlagShift;
3074 static bool pairlistIsSimple(const NbnxnPairlistCpu gmx_unused &pairlist)
3079 static bool pairlistIsSimple(const NbnxnPairlistGpu gmx_unused &pairlist)
3084 static void makeClusterListWrapper(NbnxnPairlistCpu *nbl,
3085 const Grid gmx_unused &iGrid,
3088 const int firstCell,
3090 const bool excludeSubDiagonal,
3091 const nbnxn_atomdata_t *nbat,
3094 const Nbnxm::KernelType kernelType,
3095 int *numDistanceChecks)
3099 case Nbnxm::KernelType::Cpu4x4_PlainC:
3100 makeClusterListSimple(jGrid,
3101 nbl, ci, firstCell, lastCell,
3107 #ifdef GMX_NBNXN_SIMD_4XN
3108 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
3109 makeClusterListSimd4xn(jGrid,
3110 nbl, ci, firstCell, lastCell,
3117 #ifdef GMX_NBNXN_SIMD_2XNN
3118 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
3119 makeClusterListSimd2xnn(jGrid,
3120 nbl, ci, firstCell, lastCell,
3128 GMX_ASSERT(false, "Unhandled kernel type");
3132 static void makeClusterListWrapper(NbnxnPairlistGpu *nbl,
3133 const Grid &gmx_unused iGrid,
3136 const int firstCell,
3138 const bool excludeSubDiagonal,
3139 const nbnxn_atomdata_t *nbat,
3142 Nbnxm::KernelType gmx_unused kernelType,
3143 int *numDistanceChecks)
3145 for (int cj = firstCell; cj <= lastCell; cj++)
3147 make_cluster_list_supersub(iGrid, jGrid,
3150 nbat->xstride, nbat->x().data(),
3156 static int getNumSimpleJClustersInList(const NbnxnPairlistCpu &nbl)
3158 return nbl.cj.size();
3161 static int getNumSimpleJClustersInList(const gmx_unused NbnxnPairlistGpu &nbl)
3166 static void incrementNumSimpleJClustersInList(NbnxnPairlistCpu *nbl,
3169 nbl->ncjInUse += nbl->cj.size() - ncj_old_j;
3172 static void incrementNumSimpleJClustersInList(NbnxnPairlistGpu gmx_unused *nbl,
3173 int gmx_unused ncj_old_j)
3177 static void checkListSizeConsistency(const NbnxnPairlistCpu &nbl,
3178 const bool haveFreeEnergy)
3180 GMX_RELEASE_ASSERT(static_cast<size_t>(nbl.ncjInUse) == nbl.cj.size() || haveFreeEnergy,
3181 "Without free-energy all cj pair-list entries should be in use. "
3182 "Note that subsequent code does not make use of the equality, "
3183 "this check is only here to catch bugs");
3186 static void checkListSizeConsistency(const NbnxnPairlistGpu gmx_unused &nbl,
3187 bool gmx_unused haveFreeEnergy)
3189 /* We currently can not check consistency here */
3192 /* Set the buffer flags for newly added entries in the list */
3193 static void setBufferFlags(const NbnxnPairlistCpu &nbl,
3194 const int ncj_old_j,
3195 const int gridj_flag_shift,
3196 gmx_bitmask_t *gridj_flag,
3199 if (gmx::ssize(nbl.cj) > ncj_old_j)
3201 int cbFirst = nbl.cj[ncj_old_j].cj >> gridj_flag_shift;
3202 int cbLast = nbl.cj.back().cj >> gridj_flag_shift;
3203 for (int cb = cbFirst; cb <= cbLast; cb++)
3205 bitmask_init_bit(&gridj_flag[cb], th);
3210 static void setBufferFlags(const NbnxnPairlistGpu gmx_unused &nbl,
3211 int gmx_unused ncj_old_j,
3212 int gmx_unused gridj_flag_shift,
3213 gmx_bitmask_t gmx_unused *gridj_flag,
3216 GMX_ASSERT(false, "This function should never be called");
3219 /* Generates the part of pair-list nbl assigned to our thread */
3220 template <typename T>
3221 static void nbnxn_make_pairlist_part(const PairSearch &pairSearch,
3224 PairsearchWork *work,
3225 const nbnxn_atomdata_t *nbat,
3226 const t_blocka &exclusions,
3228 const Nbnxm::KernelType kernelType,
3230 gmx_bool bFBufferFlag,
3233 float nsubpair_tot_est,
3242 int ci_b, ci, ci_x, ci_y, ci_xy;
3244 real bx0, bx1, by0, by1, bz0, bz1;
3246 real d2cx, d2z, d2z_cx, d2z_cy, d2zx, d2zxy, d2xy;
3247 int cxf, cxl, cyf, cyf_x, cyl;
3248 int numDistanceChecks;
3249 int gridi_flag_shift = 0, gridj_flag_shift = 0;
3250 gmx_bitmask_t *gridj_flag = nullptr;
3251 int ncj_old_i, ncj_old_j;
3253 if (jGrid.geometry().isSimple != pairlistIsSimple(*nbl) ||
3254 iGrid.geometry().isSimple != pairlistIsSimple(*nbl))
3256 gmx_incons("Grid incompatible with pair-list");
3260 GMX_ASSERT(nbl->na_ci == jGrid.geometry().numAtomsICluster,
3261 "The cluster sizes in the list and grid should match");
3262 nbl->na_cj = Nbnxm::JClusterSizePerKernelType[kernelType];
3263 na_cj_2log = get_2log(nbl->na_cj);
3269 /* Determine conversion of clusters to flag blocks */
3270 gridi_flag_shift = getBufferFlagShift(nbl->na_ci);
3271 gridj_flag_shift = getBufferFlagShift(nbl->na_cj);
3273 gridj_flag = work->buffer_flags.flag;
3276 const Nbnxm::GridSet &gridSet = pairSearch.gridSet();
3278 gridSet.getBox(box);
3280 const bool haveFep = gridSet.haveFep();
3282 const real rlist2 = nbl->rlist*nbl->rlist;
3284 if (haveFep && !pairlistIsSimple(*nbl))
3286 /* Determine an atom-pair list cut-off distance for FEP atom pairs.
3287 * We should not simply use rlist, since then we would not have
3288 * the small, effective buffering of the NxN lists.
3289 * The buffer is on overestimate, but the resulting cost for pairs
3290 * beyond rlist is neglible compared to the FEP pairs within rlist.
3292 rl_fep2 = nbl->rlist + effective_buffer_1x1_vs_MxN(iGrid, jGrid);
3296 fprintf(debug, "nbl_fep atom-pair rlist %f\n", rl_fep2);
3298 rl_fep2 = rl_fep2*rl_fep2;
3301 const Grid::Dimensions &iGridDims = iGrid.dimensions();
3302 const Grid::Dimensions &jGridDims = jGrid.dimensions();
3304 rbb2 = boundingbox_only_distance2(iGridDims, jGridDims, nbl->rlist, pairlistIsSimple(*nbl));
3308 fprintf(debug, "nbl bounding box only distance %f\n", std::sqrt(rbb2));
3311 const bool isIntraGridList = (&iGrid == &jGrid);
3313 /* Set the shift range */
3314 for (int d = 0; d < DIM; d++)
3316 /* Check if we need periodicity shifts.
3317 * Without PBC or with domain decomposition we don't need them.
3319 if (d >= ePBC2npbcdim(pairSearch.domainSetup().ePBC) ||
3320 pairSearch.domainSetup().haveDomDecPerDim[d])
3326 const real listRangeCellToCell =
3327 listRangeForGridCellToGridCell(rlist, iGrid.dimensions(), jGrid.dimensions());
3329 box[XX][XX] - fabs(box[YY][XX]) - fabs(box[ZZ][XX]) < listRangeCellToCell)
3339 const bool bSimple = pairlistIsSimple(*nbl);
3340 gmx::ArrayRef<const BoundingBox> bb_i;
3342 gmx::ArrayRef<const float> pbb_i;
3345 bb_i = iGrid.iBoundingBoxes();
3349 pbb_i = iGrid.packedBoundingBoxes();
3352 /* We use the normal bounding box format for both grid types */
3353 bb_i = iGrid.iBoundingBoxes();
3355 gmx::ArrayRef<const BoundingBox1D> bbcz_i = iGrid.zBoundingBoxes();
3356 gmx::ArrayRef<const int> flags_i = iGrid.clusterFlags();
3357 gmx::ArrayRef<const BoundingBox1D> bbcz_j = jGrid.zBoundingBoxes();
3358 int cell0_i = iGrid.cellOffset();
3362 fprintf(debug, "nbl nc_i %d col.av. %.1f ci_block %d\n",
3363 iGrid.numCells(), iGrid.numCells()/static_cast<double>(iGrid.numColumns()), ci_block);
3366 numDistanceChecks = 0;
3368 const real listRangeBBToJCell2 = gmx::square(listRangeForBoundingBoxToGridCell(rlist, jGrid.dimensions()));
3370 /* Initially ci_b and ci to 1 before where we want them to start,
3371 * as they will both be incremented in next_ci.
3374 ci = th*ci_block - 1;
3377 while (next_ci(iGrid, nth, ci_block, &ci_x, &ci_y, &ci_b, &ci))
3379 if (bSimple && flags_i[ci] == 0)
3384 ncj_old_i = getNumSimpleJClustersInList(*nbl);
3387 if (!isIntraGridList && shp[XX] == 0)
3391 bx1 = bb_i[ci].upper.x;
3395 bx1 = iGridDims.lowerCorner[XX] + (ci_x+1)*iGridDims.cellSize[XX];
3397 if (bx1 < jGridDims.lowerCorner[XX])
3399 d2cx = gmx::square(jGridDims.lowerCorner[XX] - bx1);
3401 if (d2cx >= listRangeBBToJCell2)
3408 ci_xy = ci_x*iGridDims.numCells[YY] + ci_y;
3410 /* Loop over shift vectors in three dimensions */
3411 for (int tz = -shp[ZZ]; tz <= shp[ZZ]; tz++)
3413 const real shz = tz*box[ZZ][ZZ];
3415 bz0 = bbcz_i[ci].lower + shz;
3416 bz1 = bbcz_i[ci].upper + shz;
3424 d2z = gmx::square(bz1);
3428 d2z = gmx::square(bz0 - box[ZZ][ZZ]);
3431 d2z_cx = d2z + d2cx;
3433 if (d2z_cx >= rlist2)
3438 bz1_frac = bz1/iGrid.numCellsInColumn(ci_xy);
3443 /* The check with bz1_frac close to or larger than 1 comes later */
3445 for (int ty = -shp[YY]; ty <= shp[YY]; ty++)
3447 const real shy = ty*box[YY][YY] + tz*box[ZZ][YY];
3451 by0 = bb_i[ci].lower.y + shy;
3452 by1 = bb_i[ci].upper.y + shy;
3456 by0 = iGridDims.lowerCorner[YY] + (ci_y )*iGridDims.cellSize[YY] + shy;
3457 by1 = iGridDims.lowerCorner[YY] + (ci_y + 1)*iGridDims.cellSize[YY] + shy;
3460 get_cell_range<YY>(by0, by1,
3471 if (by1 < jGridDims.lowerCorner[YY])
3473 d2z_cy += gmx::square(jGridDims.lowerCorner[YY] - by1);
3475 else if (by0 > jGridDims.upperCorner[YY])
3477 d2z_cy += gmx::square(by0 - jGridDims.upperCorner[YY]);
3480 for (int tx = -shp[XX]; tx <= shp[XX]; tx++)
3482 const int shift = XYZ2IS(tx, ty, tz);
3484 const bool excludeSubDiagonal = (isIntraGridList && shift == CENTRAL);
3486 if (c_pbcShiftBackward && isIntraGridList && shift > CENTRAL)
3491 const real shx = tx*box[XX][XX] + ty*box[YY][XX] + tz*box[ZZ][XX];
3495 bx0 = bb_i[ci].lower.x + shx;
3496 bx1 = bb_i[ci].upper.x + shx;
3500 bx0 = iGridDims.lowerCorner[XX] + (ci_x )*iGridDims.cellSize[XX] + shx;
3501 bx1 = iGridDims.lowerCorner[XX] + (ci_x+1)*iGridDims.cellSize[XX] + shx;
3504 get_cell_range<XX>(bx0, bx1,
3514 addNewIEntry(nbl, cell0_i+ci, shift, flags_i[ci]);
3516 if ((!c_pbcShiftBackward || excludeSubDiagonal) &&
3519 /* Leave the pairs with i > j.
3520 * x is the major index, so skip half of it.
3525 set_icell_bb(iGrid, ci, shx, shy, shz,
3528 icell_set_x(cell0_i+ci, shx, shy, shz,
3529 nbat->xstride, nbat->x().data(),
3533 for (int cx = cxf; cx <= cxl; cx++)
3536 if (jGridDims.lowerCorner[XX] + cx*jGridDims.cellSize[XX] > bx1)
3538 d2zx += gmx::square(jGridDims.lowerCorner[XX] + cx*jGridDims.cellSize[XX] - bx1);
3540 else if (jGridDims.lowerCorner[XX] + (cx+1)*jGridDims.cellSize[XX] < bx0)
3542 d2zx += gmx::square(jGridDims.lowerCorner[XX] + (cx+1)*jGridDims.cellSize[XX] - bx0);
3545 if (isIntraGridList &&
3547 (!c_pbcShiftBackward || shift == CENTRAL) &&
3550 /* Leave the pairs with i > j.
3551 * Skip half of y when i and j have the same x.
3560 for (int cy = cyf_x; cy <= cyl; cy++)
3562 const int columnStart = jGrid.firstCellInColumn(cx*jGridDims.numCells[YY] + cy);
3563 const int columnEnd = jGrid.firstCellInColumn(cx*jGridDims.numCells[YY] + cy + 1);
3566 if (jGridDims.lowerCorner[YY] + cy*jGridDims.cellSize[YY] > by1)
3568 d2zxy += gmx::square(jGridDims.lowerCorner[YY] + cy*jGridDims.cellSize[YY] - by1);
3570 else if (jGridDims.lowerCorner[YY] + (cy + 1)*jGridDims.cellSize[YY] < by0)
3572 d2zxy += gmx::square(jGridDims.lowerCorner[YY] + (cy + 1)*jGridDims.cellSize[YY] - by0);
3574 if (columnStart < columnEnd && d2zxy < listRangeBBToJCell2)
3576 /* To improve efficiency in the common case
3577 * of a homogeneous particle distribution,
3578 * we estimate the index of the middle cell
3579 * in range (midCell). We search down and up
3580 * starting from this index.
3582 * Note that the bbcz_j array contains bounds
3583 * for i-clusters, thus for clusters of 4 atoms.
3584 * For the common case where the j-cluster size
3585 * is 8, we could step with a stride of 2,
3586 * but we do not do this because it would
3587 * complicate this code even more.
3589 int midCell = columnStart + static_cast<int>(bz1_frac*(columnEnd - columnStart));
3590 if (midCell >= columnEnd)
3592 midCell = columnEnd - 1;
3597 /* Find the lowest cell that can possibly
3599 * Check if we hit the bottom of the grid,
3600 * if the j-cell is below the i-cell and if so,
3601 * if it is within range.
3603 int downTestCell = midCell;
3604 while (downTestCell >= columnStart &&
3605 (bbcz_j[downTestCell].upper >= bz0 ||
3606 d2xy + gmx::square(bbcz_j[downTestCell].upper - bz0) < rlist2))
3610 int firstCell = downTestCell + 1;
3612 /* Find the highest cell that can possibly
3614 * Check if we hit the top of the grid,
3615 * if the j-cell is above the i-cell and if so,
3616 * if it is within range.
3618 int upTestCell = midCell + 1;
3619 while (upTestCell < columnEnd &&
3620 (bbcz_j[upTestCell].lower <= bz1 ||
3621 d2xy + gmx::square(bbcz_j[upTestCell].lower - bz1) < rlist2))
3625 int lastCell = upTestCell - 1;
3627 #define NBNXN_REFCODE 0
3630 /* Simple reference code, for debugging,
3631 * overrides the more complex code above.
3633 firstCell = columnEnd;
3635 for (int k = columnStart; k < columnEnd; k++)
3637 if (d2xy + gmx::square(bbcz_j[k*NNBSBB_D + 1] - bz0) < rlist2 &&
3642 if (d2xy + gmx::square(bbcz_j[k*NNBSBB_D] - bz1) < rlist2 &&
3651 if (isIntraGridList)
3653 /* We want each atom/cell pair only once,
3654 * only use cj >= ci.
3656 if (!c_pbcShiftBackward || shift == CENTRAL)
3658 firstCell = std::max(firstCell, ci);
3662 if (firstCell <= lastCell)
3664 GMX_ASSERT(firstCell >= columnStart && lastCell < columnEnd, "The range should reside within the current grid column");
3666 /* For f buffer flags with simple lists */
3667 ncj_old_j = getNumSimpleJClustersInList(*nbl);
3669 makeClusterListWrapper(nbl,
3671 jGrid, firstCell, lastCell,
3676 &numDistanceChecks);
3680 setBufferFlags(*nbl, ncj_old_j, gridj_flag_shift,
3684 incrementNumSimpleJClustersInList(nbl, ncj_old_j);
3690 /* Set the exclusions for this ci list */
3691 setExclusionsForIEntry(gridSet,
3695 *getOpenIEntry(nbl),
3700 make_fep_list(gridSet.atomIndices(), nbat, nbl,
3705 iGrid, jGrid, nbl_fep);
3708 /* Close this ci list */
3711 progBal, nsubpair_tot_est,
3717 if (bFBufferFlag && getNumSimpleJClustersInList(*nbl) > ncj_old_i)
3719 bitmask_init_bit(&(work->buffer_flags.flag[(iGrid.cellOffset() + ci) >> gridi_flag_shift]), th);
3723 work->ndistc = numDistanceChecks;
3725 checkListSizeConsistency(*nbl, haveFep);
3729 fprintf(debug, "number of distance checks %d\n", numDistanceChecks);
3731 print_nblist_statistics(debug, nbl, pairSearch, rlist);
3735 fprintf(debug, "nbl FEP list pairs: %d\n", nbl_fep->nrj);
3740 static void reduce_buffer_flags(const PairSearch &pairSearch,
3742 const nbnxn_buffer_flags_t *dest)
3744 for (int s = 0; s < nsrc; s++)
3746 gmx_bitmask_t * flag = pairSearch.work()[s].buffer_flags.flag;
3748 for (int b = 0; b < dest->nflag; b++)
3750 bitmask_union(&(dest->flag[b]), flag[b]);
3755 static void print_reduction_cost(const nbnxn_buffer_flags_t *flags, int nout)
3757 int nelem, nkeep, ncopy, nred, out;
3758 gmx_bitmask_t mask_0;
3764 bitmask_init_bit(&mask_0, 0);
3765 for (int b = 0; b < flags->nflag; b++)
3767 if (bitmask_is_equal(flags->flag[b], mask_0))
3769 /* Only flag 0 is set, no copy of reduction required */
3773 else if (!bitmask_is_zero(flags->flag[b]))
3776 for (out = 0; out < nout; out++)
3778 if (bitmask_is_set(flags->flag[b], out))
3795 fprintf(debug, "nbnxn reduction: #flag %d #list %d elem %4.2f, keep %4.2f copy %4.2f red %4.2f\n",
3797 nelem/static_cast<double>(flags->nflag),
3798 nkeep/static_cast<double>(flags->nflag),
3799 ncopy/static_cast<double>(flags->nflag),
3800 nred/static_cast<double>(flags->nflag));
3803 /* Copies the list entries from src to dest when cjStart <= *cjGlobal < cjEnd.
3804 * *cjGlobal is updated with the cj count in src.
3805 * When setFlags==true, flag bit t is set in flag for all i and j clusters.
3807 template<bool setFlags>
3808 static void copySelectedListRange(const nbnxn_ci_t * gmx_restrict srcCi,
3809 const NbnxnPairlistCpu * gmx_restrict src,
3810 NbnxnPairlistCpu * gmx_restrict dest,
3811 gmx_bitmask_t *flag,
3812 int iFlagShift, int jFlagShift, int t)
3814 const int ncj = srcCi->cj_ind_end - srcCi->cj_ind_start;
3816 dest->ci.push_back(*srcCi);
3817 dest->ci.back().cj_ind_start = dest->cj.size();
3818 dest->ci.back().cj_ind_end = dest->cj.size() + ncj;
3822 bitmask_init_bit(&flag[srcCi->ci >> iFlagShift], t);
3825 for (int j = srcCi->cj_ind_start; j < srcCi->cj_ind_end; j++)
3827 dest->cj.push_back(src->cj[j]);
3831 /* NOTE: This is relatively expensive, since this
3832 * operation is done for all elements in the list,
3833 * whereas at list generation this is done only
3834 * once for each flag entry.
3836 bitmask_init_bit(&flag[src->cj[j].cj >> jFlagShift], t);
3841 /* This routine re-balances the pairlists such that all are nearly equally
3842 * sized. Only whole i-entries are moved between lists. These are moved
3843 * between the ends of the lists, such that the buffer reduction cost should
3844 * not change significantly.
3845 * Note that all original reduction flags are currently kept. This can lead
3846 * to reduction of parts of the force buffer that could be avoided. But since
3847 * the original lists are quite balanced, this will only give minor overhead.
3849 static void rebalanceSimpleLists(int numLists,
3850 NbnxnPairlistCpu * const * const srcSet,
3851 NbnxnPairlistCpu **destSet,
3852 gmx::ArrayRef<PairsearchWork> searchWork)
3855 for (int s = 0; s < numLists; s++)
3857 ncjTotal += srcSet[s]->ncjInUse;
3859 int ncjTarget = (ncjTotal + numLists - 1)/numLists;
3861 #pragma omp parallel num_threads(numLists)
3863 int t = gmx_omp_get_thread_num();
3865 int cjStart = ncjTarget* t;
3866 int cjEnd = ncjTarget*(t + 1);
3868 /* The destination pair-list for task/thread t */
3869 NbnxnPairlistCpu *dest = destSet[t];
3871 clear_pairlist(dest);
3872 dest->na_cj = srcSet[0]->na_cj;
3874 /* Note that the flags in the work struct (still) contain flags
3875 * for all entries that are present in srcSet->nbl[t].
3877 gmx_bitmask_t *flag = searchWork[t].buffer_flags.flag;
3879 int iFlagShift = getBufferFlagShift(dest->na_ci);
3880 int jFlagShift = getBufferFlagShift(dest->na_cj);
3883 for (int s = 0; s < numLists && cjGlobal < cjEnd; s++)
3885 const NbnxnPairlistCpu *src = srcSet[s];
3887 if (cjGlobal + src->ncjInUse > cjStart)
3889 for (gmx::index i = 0; i < gmx::ssize(src->ci) && cjGlobal < cjEnd; i++)
3891 const nbnxn_ci_t *srcCi = &src->ci[i];
3892 int ncj = srcCi->cj_ind_end - srcCi->cj_ind_start;
3893 if (cjGlobal >= cjStart)
3895 /* If the source list is not our own, we need to set
3896 * extra flags (the template bool parameter).
3900 copySelectedListRange
3903 flag, iFlagShift, jFlagShift, t);
3907 copySelectedListRange
3910 dest, flag, iFlagShift, jFlagShift, t);
3918 cjGlobal += src->ncjInUse;
3922 dest->ncjInUse = dest->cj.size();
3926 int ncjTotalNew = 0;
3927 for (int s = 0; s < numLists; s++)
3929 ncjTotalNew += destSet[s]->ncjInUse;
3931 GMX_RELEASE_ASSERT(ncjTotalNew == ncjTotal, "The total size of the lists before and after rebalancing should match");
3935 /* Returns if the pairlists are so imbalanced that it is worth rebalancing. */
3936 static bool checkRebalanceSimpleLists(const nbnxn_pairlist_set_t *listSet)
3938 int numLists = listSet->nnbl;
3941 for (int s = 0; s < numLists; s++)
3943 ncjMax = std::max(ncjMax, listSet->nbl[s]->ncjInUse);
3944 ncjTotal += listSet->nbl[s]->ncjInUse;
3948 fprintf(debug, "Pair-list ncjMax %d ncjTotal %d\n", ncjMax, ncjTotal);
3950 /* The rebalancing adds 3% extra time to the search. Heuristically we
3951 * determined that under common conditions the non-bonded kernel balance
3952 * improvement will outweigh this when the imbalance is more than 3%.
3953 * But this will, obviously, depend on search vs kernel time and nstlist.
3955 const real rebalanceTolerance = 1.03;
3957 return numLists*ncjMax > ncjTotal*rebalanceTolerance;
3960 /* Perform a count (linear) sort to sort the smaller lists to the end.
3961 * This avoids load imbalance on the GPU, as large lists will be
3962 * scheduled and executed first and the smaller lists later.
3963 * Load balancing between multi-processors only happens at the end
3964 * and there smaller lists lead to more effective load balancing.
3965 * The sorting is done on the cj4 count, not on the actual pair counts.
3966 * Not only does this make the sort faster, but it also results in
3967 * better load balancing than using a list sorted on exact load.
3968 * This function swaps the pointer in the pair list to avoid a copy operation.
3970 static void sort_sci(NbnxnPairlistGpu *nbl)
3972 if (nbl->cj4.size() <= nbl->sci.size())
3974 /* nsci = 0 or all sci have size 1, sorting won't change the order */
3978 NbnxnPairlistGpuWork &work = *nbl->work;
3980 /* We will distinguish differences up to double the average */
3981 const int m = (2*nbl->cj4.size())/nbl->sci.size();
3983 /* Resize work.sci_sort so we can sort into it */
3984 work.sci_sort.resize(nbl->sci.size());
3986 std::vector<int> &sort = work.sortBuffer;
3987 /* Set up m + 1 entries in sort, initialized at 0 */
3989 sort.resize(m + 1, 0);
3990 /* Count the entries of each size */
3991 for (const nbnxn_sci_t &sci : nbl->sci)
3993 int i = std::min(m, sci.numJClusterGroups());
3996 /* Calculate the offset for each count */
3999 for (int i = m - 1; i >= 0; i--)
4002 sort[i] = sort[i + 1] + s0;
4006 /* Sort entries directly into place */
4007 gmx::ArrayRef<nbnxn_sci_t> sci_sort = work.sci_sort;
4008 for (const nbnxn_sci_t &sci : nbl->sci)
4010 int i = std::min(m, sci.numJClusterGroups());
4011 sci_sort[sort[i]++] = sci;
4014 /* Swap the sci pointers so we use the new, sorted list */
4015 std::swap(nbl->sci, work.sci_sort);
4019 nonbonded_verlet_t::PairlistSets::construct(const InteractionLocality iLocality,
4020 PairSearch *pairSearch,
4021 nbnxn_atomdata_t *nbat,
4022 const t_blocka *excl,
4023 const Nbnxm::KernelType kernelType,
4027 nbnxn_pairlist_set_t *nbl_list = &pairlistSet(iLocality);
4029 const real rlist = nbl_list->params.rlistOuter;
4031 int nsubpair_target;
4032 float nsubpair_tot_est;
4035 gmx_bool CombineNBLists;
4037 int np_tot, np_noq, np_hlj, nap;
4039 nnbl = nbl_list->nnbl;
4040 CombineNBLists = nbl_list->bCombined;
4044 fprintf(debug, "ns making %d nblists\n", nnbl);
4047 nbat->bUseBufferFlags = (nbat->out.size() > 1);
4048 /* We should re-init the flags before making the first list */
4049 if (nbat->bUseBufferFlags && iLocality == InteractionLocality::Local)
4051 init_buffer_flags(&nbat->buffer_flags, nbat->numAtoms());
4055 if (iLocality == InteractionLocality::Local)
4057 /* Only zone (grid) 0 vs 0 */
4062 nzi = pairSearch->domainSetup().zones->nizone;
4065 if (!nbl_list->bSimple && minimumIlistCountForGpuBalancing_ > 0)
4067 get_nsubpair_target(*pairSearch, iLocality, rlist, minimumIlistCountForGpuBalancing_,
4068 &nsubpair_target, &nsubpair_tot_est);
4072 nsubpair_target = 0;
4073 nsubpair_tot_est = 0;
4076 /* Clear all pair-lists */
4077 for (int th = 0; th < nnbl; th++)
4079 if (nbl_list->bSimple)
4081 clear_pairlist(nbl_list->nbl[th]);
4085 clear_pairlist(nbl_list->nblGpu[th]);
4088 if (pairSearch->gridSet().haveFep())
4090 clear_pairlist_fep(nbl_list->nbl_fep[th]);
4094 const gmx_domdec_zones_t *ddZones = pairSearch->domainSetup().zones;
4096 for (int zi = 0; zi < nzi; zi++)
4098 const Grid &iGrid = pairSearch->gridSet().grids()[zi];
4102 if (iLocality == InteractionLocality::Local)
4109 zj0 = ddZones->izone[zi].j0;
4110 zj1 = ddZones->izone[zi].j1;
4116 for (int zj = zj0; zj < zj1; zj++)
4118 const Grid &jGrid = pairSearch->gridSet().grids()[zj];
4122 fprintf(debug, "ns search grid %d vs %d\n", zi, zj);
4125 pairSearch->cycleCounting_.start(PairSearch::enbsCCsearch);
4127 ci_block = get_ci_block_size(iGrid, pairSearch->domainSetup().haveDomDec, nnbl);
4129 /* With GPU: generate progressively smaller lists for
4130 * load balancing for local only or non-local with 2 zones.
4132 progBal = (iLocality == InteractionLocality::Local || ddZones->n <= 2);
4134 #pragma omp parallel for num_threads(nnbl) schedule(static)
4135 for (int th = 0; th < nnbl; th++)
4139 /* Re-init the thread-local work flag data before making
4140 * the first list (not an elegant conditional).
4142 if (nbat->bUseBufferFlags && ((zi == 0 && zj == 0)))
4144 init_buffer_flags(&pairSearch->work()[th].buffer_flags, nbat->numAtoms());
4147 if (CombineNBLists && th > 0)
4149 GMX_ASSERT(!nbl_list->bSimple, "Can only combine GPU lists");
4151 clear_pairlist(nbl_list->nblGpu[th]);
4154 auto &searchWork = pairSearch->work()[th];
4156 searchWork.cycleCounter.start();
4158 /* Divide the i super cell equally over the nblists */
4159 if (nbl_list->bSimple)
4161 nbnxn_make_pairlist_part(*pairSearch, iGrid, jGrid,
4162 &searchWork, nbat, *excl,
4166 nbat->bUseBufferFlags,
4168 progBal, nsubpair_tot_est,
4171 nbl_list->nbl_fep[th]);
4175 nbnxn_make_pairlist_part(*pairSearch, iGrid, jGrid,
4176 &searchWork, nbat, *excl,
4180 nbat->bUseBufferFlags,
4182 progBal, nsubpair_tot_est,
4184 nbl_list->nblGpu[th],
4185 nbl_list->nbl_fep[th]);
4188 searchWork.cycleCounter.stop();
4190 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
4192 pairSearch->cycleCounting_.stop(PairSearch::enbsCCsearch);
4197 for (int th = 0; th < nnbl; th++)
4199 inc_nrnb(nrnb, eNR_NBNXN_DIST2, pairSearch->work()[th].ndistc);
4201 if (nbl_list->bSimple)
4203 NbnxnPairlistCpu *nbl = nbl_list->nbl[th];
4204 np_tot += nbl->cj.size();
4205 np_noq += nbl->work->ncj_noq;
4206 np_hlj += nbl->work->ncj_hlj;
4210 NbnxnPairlistGpu *nbl = nbl_list->nblGpu[th];
4211 /* This count ignores potential subsequent pair pruning */
4212 np_tot += nbl->nci_tot;
4215 if (nbl_list->bSimple)
4217 nap = nbl_list->nbl[0]->na_ci*nbl_list->nbl[0]->na_cj;
4221 nap = gmx::square(nbl_list->nblGpu[0]->na_ci);
4223 nbl_list->natpair_ljq = (np_tot - np_noq)*nap - np_hlj*nap/2;
4224 nbl_list->natpair_lj = np_noq*nap;
4225 nbl_list->natpair_q = np_hlj*nap/2;
4227 if (CombineNBLists && nnbl > 1)
4229 GMX_ASSERT(!nbl_list->bSimple, "Can only combine GPU lists");
4230 NbnxnPairlistGpu **nbl = nbl_list->nblGpu;
4232 pairSearch->cycleCounting_.start(PairSearch::enbsCCcombine);
4234 combine_nblists(nnbl-1, nbl+1, nbl[0]);
4236 pairSearch->cycleCounting_.stop(PairSearch::enbsCCcombine);
4241 if (nbl_list->bSimple)
4243 if (nnbl > 1 && checkRebalanceSimpleLists(nbl_list))
4245 rebalanceSimpleLists(nbl_list->nnbl, nbl_list->nbl, nbl_list->nbl_work, pairSearch->work());
4247 /* Swap the pointer of the sets of pair lists */
4248 NbnxnPairlistCpu **tmp = nbl_list->nbl;
4249 nbl_list->nbl = nbl_list->nbl_work;
4250 nbl_list->nbl_work = tmp;
4255 /* Sort the entries on size, large ones first */
4256 if (CombineNBLists || nnbl == 1)
4258 sort_sci(nbl_list->nblGpu[0]);
4262 #pragma omp parallel for num_threads(nnbl) schedule(static)
4263 for (int th = 0; th < nnbl; th++)
4267 sort_sci(nbl_list->nblGpu[th]);
4269 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
4274 if (nbat->bUseBufferFlags)
4276 reduce_buffer_flags(*pairSearch, nbl_list->nnbl, &nbat->buffer_flags);
4279 if (pairSearch->gridSet().haveFep())
4281 /* Balance the free-energy lists over all the threads */
4282 balance_fep_lists(pairSearch->work(), nbl_list);
4285 if (nbl_list->bSimple)
4287 /* This is a fresh list, so not pruned, stored using ci.
4288 * ciOuter is invalid at this point.
4290 GMX_ASSERT(nbl_list->nbl[0]->ciOuter.empty(), "ciOuter is invalid so it should be empty");
4293 if (iLocality == Nbnxm::InteractionLocality::Local)
4295 outerListCreationStep_ = step;
4299 GMX_RELEASE_ASSERT(outerListCreationStep_ == step,
4300 "Outer list should be created at the same step as the inner list");
4303 /* Special performance logging stuff (env.var. GMX_NBNXN_CYCLE) */
4304 if (iLocality == InteractionLocality::Local)
4306 pairSearch->cycleCounting_.searchCount_++;
4308 if (pairSearch->cycleCounting_.recordCycles_ &&
4309 (!pairSearch->domainSetup().haveDomDec || iLocality == InteractionLocality::NonLocal) &&
4310 pairSearch->cycleCounting_.searchCount_ % 100 == 0)
4312 pairSearch->cycleCounting_.printCycles(stderr, pairSearch->work());
4315 /* If we have more than one list, they either got rebalancing (CPU)
4316 * or combined (GPU), so we should dump the final result to debug.
4318 if (debug && nbl_list->nnbl > 1)
4320 if (nbl_list->bSimple)
4322 for (int t = 0; t < nbl_list->nnbl; t++)
4324 print_nblist_statistics(debug, nbl_list->nbl[t], *pairSearch, rlist);
4329 print_nblist_statistics(debug, nbl_list->nblGpu[0], *pairSearch, rlist);
4337 if (nbl_list->bSimple)
4339 for (int t = 0; t < nbl_list->nnbl; t++)
4341 print_nblist_ci_cj(debug, nbl_list->nbl[t]);
4346 print_nblist_sci_cj(debug, nbl_list->nblGpu[0]);
4350 if (nbat->bUseBufferFlags)
4352 print_reduction_cost(&nbat->buffer_flags, nbl_list->nnbl);
4356 if (params_.useDynamicPruning && nbl_list->bSimple)
4358 nbnxnPrepareListForDynamicPruning(nbl_list);
4363 nonbonded_verlet_t::constructPairlist(const Nbnxm::InteractionLocality iLocality,
4364 const t_blocka *excl,
4368 pairlistSets_->construct(iLocality, pairSearch_.get(), nbat.get(), excl,
4369 kernelSetup_.kernelType,
4374 /* Launch the transfer of the pairlist to the GPU.
4376 * NOTE: The launch overhead is currently not timed separately
4378 Nbnxm::gpu_init_pairlist(gpu_nbv,
4379 pairlistSets().pairlistSet(iLocality).nblGpu[0],
4384 void nbnxnPrepareListForDynamicPruning(nbnxn_pairlist_set_t *listSet)
4386 GMX_RELEASE_ASSERT(listSet->bSimple, "Should only be called for simple lists");
4388 /* TODO: Restructure the lists so we have actual outer and inner
4389 * list objects so we can set a single pointer instead of
4390 * swapping several pointers.
4393 for (int i = 0; i < listSet->nnbl; i++)
4395 NbnxnPairlistCpu &list = *listSet->nbl[i];
4397 /* The search produced a list in ci/cj.
4398 * Swap the list pointers so we get the outer list is ciOuter,cjOuter
4399 * and we can prune that to get an inner list in ci/cj.
4401 GMX_RELEASE_ASSERT(list.ciOuter.empty() && list.cjOuter.empty(),
4402 "The outer lists should be empty before preparation");
4404 std::swap(list.ci, list.ciOuter);
4405 std::swap(list.cj, list.cjOuter);