2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
48 #include "gromacs/domdec/domdec_struct.h"
49 #include "gromacs/gmxlib/nrnb.h"
50 #include "gromacs/math/functions.h"
51 #include "gromacs/math/utilities.h"
52 #include "gromacs/math/vec.h"
53 #include "gromacs/mdlib/gmx_omp_nthreads.h"
54 #include "gromacs/mdtypes/group.h"
55 #include "gromacs/mdtypes/md_enums.h"
56 #include "gromacs/nbnxm/gpu_data_mgmt.h"
57 #include "gromacs/pbcutil/ishift.h"
58 #include "gromacs/pbcutil/pbc.h"
59 #include "gromacs/simd/simd.h"
60 #include "gromacs/simd/vector_operations.h"
61 #include "gromacs/utility/exceptions.h"
62 #include "gromacs/utility/fatalerror.h"
63 #include "gromacs/utility/gmxomp.h"
64 #include "gromacs/utility/listoflists.h"
65 #include "gromacs/utility/smalloc.h"
68 #include "boundingboxes.h"
69 #include "clusterdistancekerneltype.h"
71 #include "nbnxm_geometry.h"
72 #include "nbnxm_simd.h"
73 #include "pairlistset.h"
74 #include "pairlistsets.h"
75 #include "pairlistwork.h"
76 #include "pairsearch.h"
78 using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
80 using BoundingBox = Nbnxm::BoundingBox; // TODO: Remove when refactoring this file
81 using BoundingBox1D = Nbnxm::BoundingBox1D; // TODO: Remove when refactoring this file
83 using Grid = Nbnxm::Grid; // TODO: Remove when refactoring this file
85 // Convience alias for partial Nbnxn namespace usage
86 using InteractionLocality = gmx::InteractionLocality;
88 /* We shift the i-particles backward for PBC.
89 * This leads to more conditionals than shifting forward.
90 * We do this to get more balanced pair lists.
92 constexpr bool c_pbcShiftBackward = true;
94 /* Layout for the nonbonded NxN pair lists */
95 enum class NbnxnLayout
97 NoSimd4x4, // i-cluster size 4, j-cluster size 4
98 Simd4xN, // i-cluster size 4, j-cluster size SIMD width
99 Simd2xNN, // i-cluster size 4, j-cluster size half SIMD width
100 Gpu8x8x8 // i-cluster size 8, j-cluster size 8 + super-clustering
104 /* Returns the j-cluster size */
105 template<NbnxnLayout layout>
106 static constexpr int jClusterSize()
108 static_assert(layout == NbnxnLayout::NoSimd4x4 || layout == NbnxnLayout::Simd4xN
109 || layout == NbnxnLayout::Simd2xNN,
110 "Currently jClusterSize only supports CPU layouts");
112 return layout == NbnxnLayout::Simd4xN
113 ? GMX_SIMD_REAL_WIDTH
114 : (layout == NbnxnLayout::Simd2xNN ? GMX_SIMD_REAL_WIDTH / 2 : c_nbnxnCpuIClusterSize);
117 /*! \brief Returns the j-cluster index given the i-cluster index.
119 * \tparam jClusterSize The number of atoms in a j-cluster
120 * \tparam jSubClusterIndex The j-sub-cluster index (0/1), used when size(j-cluster) <
121 * size(i-cluster) \param[in] ci The i-cluster index
123 template<int jClusterSize, int jSubClusterIndex>
124 static inline int cjFromCi(int ci)
126 static_assert(jClusterSize == c_nbnxnCpuIClusterSize / 2 || jClusterSize == c_nbnxnCpuIClusterSize
127 || jClusterSize == c_nbnxnCpuIClusterSize * 2,
128 "Only j-cluster sizes 2, 4 and 8 are currently implemented");
130 static_assert(jSubClusterIndex == 0 || jSubClusterIndex == 1,
131 "Only sub-cluster indices 0 and 1 are supported");
133 if (jClusterSize == c_nbnxnCpuIClusterSize / 2)
135 if (jSubClusterIndex == 0)
141 return ((ci + 1) << 1) - 1;
144 else if (jClusterSize == c_nbnxnCpuIClusterSize)
154 /*! \brief Returns the j-cluster index given the i-cluster index.
156 * \tparam layout The pair-list layout
157 * \tparam jSubClusterIndex The j-sub-cluster index (0/1), used when size(j-cluster) <
158 * size(i-cluster) \param[in] ci The i-cluster index
160 template<NbnxnLayout layout, int jSubClusterIndex>
161 static inline int cjFromCi(int ci)
163 constexpr int clusterSize = jClusterSize<layout>();
165 return cjFromCi<clusterSize, jSubClusterIndex>(ci);
168 /* Returns the nbnxn coordinate data index given the i-cluster index */
169 template<NbnxnLayout layout>
170 static inline int xIndexFromCi(int ci)
172 constexpr int clusterSize = jClusterSize<layout>();
174 static_assert(clusterSize == c_nbnxnCpuIClusterSize / 2 || clusterSize == c_nbnxnCpuIClusterSize
175 || clusterSize == c_nbnxnCpuIClusterSize * 2,
176 "Only j-cluster sizes 2, 4 and 8 are currently implemented");
178 if (clusterSize <= c_nbnxnCpuIClusterSize)
180 /* Coordinates are stored packed in groups of 4 */
181 return ci * STRIDE_P4;
185 /* Coordinates packed in 8, i-cluster size is half the packing width */
186 return (ci >> 1) * STRIDE_P8 + (ci & 1) * (c_packX8 >> 1);
190 /* Returns the nbnxn coordinate data index given the j-cluster index */
191 template<NbnxnLayout layout>
192 static inline int xIndexFromCj(int cj)
194 constexpr int clusterSize = jClusterSize<layout>();
196 static_assert(clusterSize == c_nbnxnCpuIClusterSize / 2 || clusterSize == c_nbnxnCpuIClusterSize
197 || clusterSize == c_nbnxnCpuIClusterSize * 2,
198 "Only j-cluster sizes 2, 4 and 8 are currently implemented");
200 if (clusterSize == c_nbnxnCpuIClusterSize / 2)
202 /* Coordinates are stored packed in groups of 4 */
203 return (cj >> 1) * STRIDE_P4 + (cj & 1) * (c_packX4 >> 1);
205 else if (clusterSize == c_nbnxnCpuIClusterSize)
207 /* Coordinates are stored packed in groups of 4 */
208 return cj * STRIDE_P4;
212 /* Coordinates are stored packed in groups of 8 */
213 return cj * STRIDE_P8;
219 void nbnxn_init_pairlist_fep(t_nblist* nl)
221 nl->type = GMX_NBLIST_INTERACTION_FREE_ENERGY;
222 nl->igeometry = GMX_NBLIST_GEOMETRY_PARTICLE_PARTICLE;
223 /* The interaction functions are set in the free energy kernel fuction */
236 nl->jindex = nullptr;
238 nl->excl_fep = nullptr;
241 static void init_buffer_flags(nbnxn_buffer_flags_t* flags, int natoms)
243 flags->nflag = (natoms + NBNXN_BUFFERFLAG_SIZE - 1) / NBNXN_BUFFERFLAG_SIZE;
244 if (flags->nflag > flags->flag_nalloc)
246 flags->flag_nalloc = over_alloc_large(flags->nflag);
247 srenew(flags->flag, flags->flag_nalloc);
249 for (int b = 0; b < flags->nflag; b++)
251 bitmask_clear(&(flags->flag[b]));
255 /* Returns the pair-list cutoff between a bounding box and a grid cell given an atom-to-atom pair-list cutoff
257 * Given a cutoff distance between atoms, this functions returns the cutoff
258 * distance2 between a bounding box of a group of atoms and a grid cell.
259 * Since atoms can be geometrically outside of the cell they have been
260 * assigned to (when atom groups instead of individual atoms are assigned
261 * to cells), this distance returned can be larger than the input.
263 static real listRangeForBoundingBoxToGridCell(real rlist, const Grid::Dimensions& gridDims)
265 return rlist + gridDims.maxAtomGroupRadius;
267 /* Returns the pair-list cutoff between a grid cells given an atom-to-atom pair-list cutoff
269 * Given a cutoff distance between atoms, this functions returns the cutoff
270 * distance2 between two grid cells.
271 * Since atoms can be geometrically outside of the cell they have been
272 * assigned to (when atom groups instead of individual atoms are assigned
273 * to cells), this distance returned can be larger than the input.
275 static real listRangeForGridCellToGridCell(real rlist,
276 const Grid::Dimensions& iGridDims,
277 const Grid::Dimensions& jGridDims)
279 return rlist + iGridDims.maxAtomGroupRadius + jGridDims.maxAtomGroupRadius;
282 /* Determines the cell range along one dimension that
283 * the bounding box b0 - b1 sees.
287 get_cell_range(real b0, real b1, const Grid::Dimensions& jGridDims, real d2, real rlist, int* cf, int* cl)
289 real listRangeBBToCell2 = gmx::square(listRangeForBoundingBoxToGridCell(rlist, jGridDims));
290 real distanceInCells = (b0 - jGridDims.lowerCorner[dim]) * jGridDims.invCellSize[dim];
291 *cf = std::max(static_cast<int>(distanceInCells), 0);
294 && d2 + gmx::square((b0 - jGridDims.lowerCorner[dim]) - (*cf - 1 + 1) * jGridDims.cellSize[dim])
295 < listRangeBBToCell2)
300 *cl = std::min(static_cast<int>((b1 - jGridDims.lowerCorner[dim]) * jGridDims.invCellSize[dim]),
301 jGridDims.numCells[dim] - 1);
302 while (*cl < jGridDims.numCells[dim] - 1
303 && d2 + gmx::square((*cl + 1) * jGridDims.cellSize[dim] - (b1 - jGridDims.lowerCorner[dim]))
304 < listRangeBBToCell2)
310 /* Reference code calculating the distance^2 between two bounding boxes */
312 static float box_dist2(float bx0, float bx1, float by0,
313 float by1, float bz0, float bz1,
314 const BoundingBox *bb)
317 float dl, dh, dm, dm0;
321 dl = bx0 - bb->upper.x;
322 dh = bb->lower.x - bx1;
323 dm = std::max(dl, dh);
324 dm0 = std::max(dm, 0.0f);
327 dl = by0 - bb->upper.y;
328 dh = bb->lower.y - by1;
329 dm = std::max(dl, dh);
330 dm0 = std::max(dm, 0.0f);
333 dl = bz0 - bb->upper.z;
334 dh = bb->lower.z - bz1;
335 dm = std::max(dl, dh);
336 dm0 = std::max(dm, 0.0f);
343 #if !NBNXN_SEARCH_BB_SIMD4
345 /*! \brief Plain C code calculating the distance^2 between two bounding boxes in xyz0 format
347 * \param[in] bb_i First bounding box
348 * \param[in] bb_j Second bounding box
350 static float clusterBoundingBoxDistance2(const BoundingBox& bb_i, const BoundingBox& bb_j)
352 float dl = bb_i.lower.x - bb_j.upper.x;
353 float dh = bb_j.lower.x - bb_i.upper.x;
354 float dm = std::max(dl, dh);
355 float dm0 = std::max(dm, 0.0F);
356 float d2 = dm0 * dm0;
358 dl = bb_i.lower.y - bb_j.upper.y;
359 dh = bb_j.lower.y - bb_i.upper.y;
360 dm = std::max(dl, dh);
361 dm0 = std::max(dm, 0.0F);
364 dl = bb_i.lower.z - bb_j.upper.z;
365 dh = bb_j.lower.z - bb_i.upper.z;
366 dm = std::max(dl, dh);
367 dm0 = std::max(dm, 0.0F);
373 #else /* NBNXN_SEARCH_BB_SIMD4 */
375 /*! \brief 4-wide SIMD code calculating the distance^2 between two bounding boxes in xyz0 format
377 * \param[in] bb_i First bounding box, should be aligned for 4-wide SIMD
378 * \param[in] bb_j Second bounding box, should be aligned for 4-wide SIMD
380 static float clusterBoundingBoxDistance2(const BoundingBox& bb_i, const BoundingBox& bb_j)
382 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
385 const Simd4Float bb_i_S0 = load4(bb_i.lower.ptr());
386 const Simd4Float bb_i_S1 = load4(bb_i.upper.ptr());
387 const Simd4Float bb_j_S0 = load4(bb_j.lower.ptr());
388 const Simd4Float bb_j_S1 = load4(bb_j.upper.ptr());
390 const Simd4Float dl_S = bb_i_S0 - bb_j_S1;
391 const Simd4Float dh_S = bb_j_S0 - bb_i_S1;
393 const Simd4Float dm_S = max(dl_S, dh_S);
394 const Simd4Float dm0_S = max(dm_S, simd4SetZeroF());
396 return dotProduct(dm0_S, dm0_S);
399 /* Calculate bb bounding distances of bb_i[si,...,si+3] and store them in d2 */
400 template<int boundingBoxStart>
401 static inline void gmx_simdcall clusterBoundingBoxDistance2_xxxx_simd4_inner(const float* bb_i,
403 const Simd4Float xj_l,
404 const Simd4Float yj_l,
405 const Simd4Float zj_l,
406 const Simd4Float xj_h,
407 const Simd4Float yj_h,
408 const Simd4Float zj_h)
410 constexpr int stride = c_packedBoundingBoxesDimSize;
412 const int shi = boundingBoxStart * Nbnxm::c_numBoundingBoxBounds1D * DIM;
414 const Simd4Float zero = setZero();
416 const Simd4Float xi_l = load4(bb_i + shi + 0 * stride);
417 const Simd4Float yi_l = load4(bb_i + shi + 1 * stride);
418 const Simd4Float zi_l = load4(bb_i + shi + 2 * stride);
419 const Simd4Float xi_h = load4(bb_i + shi + 3 * stride);
420 const Simd4Float yi_h = load4(bb_i + shi + 4 * stride);
421 const Simd4Float zi_h = load4(bb_i + shi + 5 * stride);
423 const Simd4Float dx_0 = xi_l - xj_h;
424 const Simd4Float dy_0 = yi_l - yj_h;
425 const Simd4Float dz_0 = zi_l - zj_h;
427 const Simd4Float dx_1 = xj_l - xi_h;
428 const Simd4Float dy_1 = yj_l - yi_h;
429 const Simd4Float dz_1 = zj_l - zi_h;
431 const Simd4Float mx = max(dx_0, dx_1);
432 const Simd4Float my = max(dy_0, dy_1);
433 const Simd4Float mz = max(dz_0, dz_1);
435 const Simd4Float m0x = max(mx, zero);
436 const Simd4Float m0y = max(my, zero);
437 const Simd4Float m0z = max(mz, zero);
439 const Simd4Float d2x = m0x * m0x;
440 const Simd4Float d2y = m0y * m0y;
441 const Simd4Float d2z = m0z * m0z;
443 const Simd4Float d2s = d2x + d2y;
444 const Simd4Float d2t = d2s + d2z;
446 store4(d2 + boundingBoxStart, d2t);
449 /* 4-wide SIMD code for nsi bb distances for bb format xxxxyyyyzzzz */
450 static void clusterBoundingBoxDistance2_xxxx_simd4(const float* bb_j, const int nsi, const float* bb_i, float* d2)
452 constexpr int stride = c_packedBoundingBoxesDimSize;
454 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
457 const Simd4Float xj_l = Simd4Float(bb_j[0 * stride]);
458 const Simd4Float yj_l = Simd4Float(bb_j[1 * stride]);
459 const Simd4Float zj_l = Simd4Float(bb_j[2 * stride]);
460 const Simd4Float xj_h = Simd4Float(bb_j[3 * stride]);
461 const Simd4Float yj_h = Simd4Float(bb_j[4 * stride]);
462 const Simd4Float zj_h = Simd4Float(bb_j[5 * stride]);
464 /* Here we "loop" over si (0,stride) from 0 to nsi with step stride.
465 * But as we know the number of iterations is 1 or 2, we unroll manually.
467 clusterBoundingBoxDistance2_xxxx_simd4_inner<0>(bb_i, d2, xj_l, yj_l, zj_l, xj_h, yj_h, zj_h);
470 clusterBoundingBoxDistance2_xxxx_simd4_inner<stride>(bb_i, d2, xj_l, yj_l, zj_l, xj_h, yj_h, zj_h);
474 #endif /* NBNXN_SEARCH_BB_SIMD4 */
477 /* Returns if any atom pair from two clusters is within distance sqrt(rlist2) */
478 static inline gmx_bool
479 clusterpair_in_range(const NbnxnPairlistGpuWork& work, int si, int csj, int stride, const real* x_j, real rlist2)
481 #if !GMX_SIMD4_HAVE_REAL
484 * All coordinates are stored as xyzxyz...
487 const real* x_i = work.iSuperClusterData.x.data();
489 for (int i = 0; i < c_nbnxnGpuClusterSize; i++)
491 int i0 = (si * c_nbnxnGpuClusterSize + i) * DIM;
492 for (int j = 0; j < c_nbnxnGpuClusterSize; j++)
494 int j0 = (csj * c_nbnxnGpuClusterSize + j) * stride;
496 real d2 = gmx::square(x_i[i0] - x_j[j0]) + gmx::square(x_i[i0 + 1] - x_j[j0 + 1])
497 + gmx::square(x_i[i0 + 2] - x_j[j0 + 2]);
508 #else /* !GMX_SIMD4_HAVE_REAL */
510 /* 4-wide SIMD version.
511 * The coordinates x_i are stored as xxxxyyyy..., x_j is stored xyzxyz...
512 * Using 8-wide AVX(2) is not faster on Intel Sandy Bridge and Haswell.
514 static_assert(c_nbnxnGpuClusterSize == 8 || c_nbnxnGpuClusterSize == 4,
515 "A cluster is hard-coded to 4/8 atoms.");
517 Simd4Real rc2_S = Simd4Real(rlist2);
519 const real* x_i = work.iSuperClusterData.xSimd.data();
521 int dim_stride = c_nbnxnGpuClusterSize * DIM;
522 Simd4Real ix_S0 = load4(x_i + si * dim_stride + 0 * GMX_SIMD4_WIDTH);
523 Simd4Real iy_S0 = load4(x_i + si * dim_stride + 1 * GMX_SIMD4_WIDTH);
524 Simd4Real iz_S0 = load4(x_i + si * dim_stride + 2 * GMX_SIMD4_WIDTH);
526 Simd4Real ix_S1, iy_S1, iz_S1;
527 if (c_nbnxnGpuClusterSize == 8)
529 ix_S1 = load4(x_i + si * dim_stride + 3 * GMX_SIMD4_WIDTH);
530 iy_S1 = load4(x_i + si * dim_stride + 4 * GMX_SIMD4_WIDTH);
531 iz_S1 = load4(x_i + si * dim_stride + 5 * GMX_SIMD4_WIDTH);
533 /* We loop from the outer to the inner particles to maximize
534 * the chance that we find a pair in range quickly and return.
536 int j0 = csj * c_nbnxnGpuClusterSize;
537 int j1 = j0 + c_nbnxnGpuClusterSize - 1;
540 Simd4Real jx0_S, jy0_S, jz0_S;
541 Simd4Real jx1_S, jy1_S, jz1_S;
543 Simd4Real dx_S0, dy_S0, dz_S0;
544 Simd4Real dx_S1, dy_S1, dz_S1;
545 Simd4Real dx_S2, dy_S2, dz_S2;
546 Simd4Real dx_S3, dy_S3, dz_S3;
557 Simd4Bool wco_any_S01, wco_any_S23, wco_any_S;
559 jx0_S = Simd4Real(x_j[j0 * stride + 0]);
560 jy0_S = Simd4Real(x_j[j0 * stride + 1]);
561 jz0_S = Simd4Real(x_j[j0 * stride + 2]);
563 jx1_S = Simd4Real(x_j[j1 * stride + 0]);
564 jy1_S = Simd4Real(x_j[j1 * stride + 1]);
565 jz1_S = Simd4Real(x_j[j1 * stride + 2]);
567 /* Calculate distance */
568 dx_S0 = ix_S0 - jx0_S;
569 dy_S0 = iy_S0 - jy0_S;
570 dz_S0 = iz_S0 - jz0_S;
571 dx_S2 = ix_S0 - jx1_S;
572 dy_S2 = iy_S0 - jy1_S;
573 dz_S2 = iz_S0 - jz1_S;
574 if (c_nbnxnGpuClusterSize == 8)
576 dx_S1 = ix_S1 - jx0_S;
577 dy_S1 = iy_S1 - jy0_S;
578 dz_S1 = iz_S1 - jz0_S;
579 dx_S3 = ix_S1 - jx1_S;
580 dy_S3 = iy_S1 - jy1_S;
581 dz_S3 = iz_S1 - jz1_S;
584 /* rsq = dx*dx+dy*dy+dz*dz */
585 rsq_S0 = norm2(dx_S0, dy_S0, dz_S0);
586 rsq_S2 = norm2(dx_S2, dy_S2, dz_S2);
587 if (c_nbnxnGpuClusterSize == 8)
589 rsq_S1 = norm2(dx_S1, dy_S1, dz_S1);
590 rsq_S3 = norm2(dx_S3, dy_S3, dz_S3);
593 wco_S0 = (rsq_S0 < rc2_S);
594 wco_S2 = (rsq_S2 < rc2_S);
595 if (c_nbnxnGpuClusterSize == 8)
597 wco_S1 = (rsq_S1 < rc2_S);
598 wco_S3 = (rsq_S3 < rc2_S);
600 if (c_nbnxnGpuClusterSize == 8)
602 wco_any_S01 = wco_S0 || wco_S1;
603 wco_any_S23 = wco_S2 || wco_S3;
604 wco_any_S = wco_any_S01 || wco_any_S23;
608 wco_any_S = wco_S0 || wco_S2;
611 if (anyTrue(wco_any_S))
622 #endif /* !GMX_SIMD4_HAVE_REAL */
625 /* Returns the j-cluster index for index cjIndex in a cj list */
626 static inline int nblCj(gmx::ArrayRef<const nbnxn_cj_t> cjList, int cjIndex)
628 return cjList[cjIndex].cj;
631 /* Returns the j-cluster index for index cjIndex in a cj4 list */
632 static inline int nblCj(gmx::ArrayRef<const nbnxn_cj4_t> cj4List, int cjIndex)
634 return cj4List[cjIndex / c_nbnxnGpuJgroupSize].cj[cjIndex & (c_nbnxnGpuJgroupSize - 1)];
637 /* Returns the i-interaction mask of the j sub-cell for index cj_ind */
638 static unsigned int nbl_imask0(const NbnxnPairlistGpu* nbl, int cj_ind)
640 return nbl->cj4[cj_ind / c_nbnxnGpuJgroupSize].imei[0].imask;
643 NbnxnPairlistCpu::NbnxnPairlistCpu() :
644 na_ci(c_nbnxnCpuIClusterSize),
649 work(std::make_unique<NbnxnPairlistCpuWork>())
653 NbnxnPairlistGpu::NbnxnPairlistGpu(gmx::PinningPolicy pinningPolicy) :
654 na_ci(c_nbnxnGpuClusterSize),
655 na_cj(c_nbnxnGpuClusterSize),
656 na_sc(c_gpuNumClusterPerCell * c_nbnxnGpuClusterSize),
658 sci({}, { pinningPolicy }),
659 cj4({}, { pinningPolicy }),
660 excl({}, { pinningPolicy }),
662 work(std::make_unique<NbnxnPairlistGpuWork>())
664 static_assert(c_nbnxnGpuNumClusterPerSupercluster == c_gpuNumClusterPerCell,
665 "The search code assumes that the a super-cluster matches a search grid cell");
667 static_assert(sizeof(cj4[0].imei[0].imask) * 8 >= c_nbnxnGpuJgroupSize * c_gpuNumClusterPerCell,
668 "The i super-cluster cluster interaction mask does not contain a sufficient "
671 static_assert(sizeof(excl[0]) * 8 >= c_nbnxnGpuJgroupSize * c_gpuNumClusterPerCell,
672 "The GPU exclusion mask does not contain a sufficient number of bits");
674 // We always want a first entry without any exclusions
678 // TODO: Move to pairlistset.cpp
679 PairlistSet::PairlistSet(const InteractionLocality locality, const PairlistParams& pairlistParams) :
681 params_(pairlistParams)
683 isCpuType_ = (params_.pairlistType == PairlistType::Simple4x2
684 || params_.pairlistType == PairlistType::Simple4x4
685 || params_.pairlistType == PairlistType::Simple4x8);
686 // Currently GPU lists are always combined
687 combineLists_ = !isCpuType_;
689 const int numLists = gmx_omp_nthreads_get(emntNonbonded);
691 if (!combineLists_ && numLists > NBNXN_BUFFERFLAG_MAX_THREADS)
694 "%d OpenMP threads were requested. Since the non-bonded force buffer reduction "
695 "is prohibitively slow with more than %d threads, we do not allow this. Use %d "
696 "or less OpenMP threads.",
697 numLists, NBNXN_BUFFERFLAG_MAX_THREADS, NBNXN_BUFFERFLAG_MAX_THREADS);
702 cpuLists_.resize(numLists);
705 cpuListsWork_.resize(numLists);
710 /* Only list 0 is used on the GPU, use normal allocation for i>0 */
711 gpuLists_.emplace_back(gmx::PinningPolicy::PinnedIfSupported);
712 /* Lists 0 to numLists are use for constructing lists in parallel
713 * on the CPU using numLists threads (and then merged into list 0).
715 for (int i = 1; i < numLists; i++)
717 gpuLists_.emplace_back(gmx::PinningPolicy::CannotBePinned);
722 fepLists_.resize(numLists);
724 /* Execute in order to avoid memory interleaving between threads */
725 #pragma omp parallel for num_threads(numLists) schedule(static)
726 for (int i = 0; i < numLists; i++)
730 /* We used to allocate all normal lists locally on each thread
731 * as well. The question is if allocating the object on the
732 * master thread (but all contained list memory thread local)
733 * impacts performance.
735 fepLists_[i] = std::make_unique<t_nblist>();
736 nbnxn_init_pairlist_fep(fepLists_[i].get());
738 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
743 /* Print statistics of a pair list, used for debug output */
744 static void print_nblist_statistics(FILE* fp,
745 const NbnxnPairlistCpu& nbl,
746 const Nbnxm::GridSet& gridSet,
749 const Grid& grid = gridSet.grids()[0];
750 const Grid::Dimensions& dims = grid.dimensions();
752 fprintf(fp, "nbl nci %zu ncj %d\n", nbl.ci.size(), nbl.ncjInUse);
753 const int numAtomsJCluster = grid.geometry().numAtomsJCluster;
754 const double numAtomsPerCell = nbl.ncjInUse / static_cast<double>(grid.numCells()) * numAtomsJCluster;
755 fprintf(fp, "nbl na_cj %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n", nbl.na_cj, rl,
756 nbl.ncjInUse, nbl.ncjInUse / static_cast<double>(grid.numCells()), numAtomsPerCell,
758 / (0.5 * 4.0 / 3.0 * M_PI * rl * rl * rl * grid.numCells() * numAtomsJCluster
759 / (dims.gridSize[XX] * dims.gridSize[YY] * dims.gridSize[ZZ])));
761 fprintf(fp, "nbl average j cell list length %.1f\n",
762 0.25 * nbl.ncjInUse / std::max(static_cast<double>(nbl.ci.size()), 1.0));
764 int cs[SHIFTS] = { 0 };
766 for (const nbnxn_ci_t& ciEntry : nbl.ci)
768 cs[ciEntry.shift & NBNXN_CI_SHIFT] += ciEntry.cj_ind_end - ciEntry.cj_ind_start;
770 int j = ciEntry.cj_ind_start;
771 while (j < ciEntry.cj_ind_end && nbl.cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
777 fprintf(fp, "nbl cell pairs, total: %zu excl: %d %.1f%%\n", nbl.cj.size(), npexcl,
778 100 * npexcl / std::max(static_cast<double>(nbl.cj.size()), 1.0));
779 for (int s = 0; s < SHIFTS; s++)
783 fprintf(fp, "nbl shift %2d ncj %3d\n", s, cs[s]);
788 /* Print statistics of a pair lists, used for debug output */
789 static void print_nblist_statistics(FILE* fp,
790 const NbnxnPairlistGpu& nbl,
791 const Nbnxm::GridSet& gridSet,
794 const Grid& grid = gridSet.grids()[0];
795 const Grid::Dimensions& dims = grid.dimensions();
797 fprintf(fp, "nbl nsci %zu ncj4 %zu nsi %d excl4 %zu\n", nbl.sci.size(), nbl.cj4.size(),
798 nbl.nci_tot, nbl.excl.size());
799 const int numAtomsCluster = grid.geometry().numAtomsICluster;
800 const double numAtomsPerCell = nbl.nci_tot / static_cast<double>(grid.numClusters()) * numAtomsCluster;
801 fprintf(fp, "nbl na_c %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n", nbl.na_ci, rl,
802 nbl.nci_tot, nbl.nci_tot / static_cast<double>(grid.numClusters()), numAtomsPerCell,
804 / (0.5 * 4.0 / 3.0 * M_PI * rl * rl * rl * grid.numClusters() * numAtomsCluster
805 / (dims.gridSize[XX] * dims.gridSize[YY] * dims.gridSize[ZZ])));
810 int c[c_gpuNumClusterPerCell + 1] = { 0 };
811 for (const nbnxn_sci_t& sci : nbl.sci)
814 for (int j4 = sci.cj4_ind_start; j4 < sci.cj4_ind_end; j4++)
816 for (int j = 0; j < c_nbnxnGpuJgroupSize; j++)
819 for (int si = 0; si < c_gpuNumClusterPerCell; si++)
821 if (nbl.cj4[j4].imei[0].imask & (1U << (j * c_gpuNumClusterPerCell + si)))
831 sum_nsp2 += nsp * nsp;
832 nsp_max = std::max(nsp_max, nsp);
834 if (!nbl.sci.empty())
836 sum_nsp /= nbl.sci.size();
837 sum_nsp2 /= nbl.sci.size();
839 fprintf(fp, "nbl #cluster-pairs: av %.1f stddev %.1f max %d\n", sum_nsp,
840 std::sqrt(sum_nsp2 - sum_nsp * sum_nsp), nsp_max);
842 if (!nbl.cj4.empty())
844 for (int b = 0; b <= c_gpuNumClusterPerCell; b++)
846 fprintf(fp, "nbl j-list #i-subcell %d %7d %4.1f\n", b, c[b],
847 100.0 * c[b] / size_t{ nbl.cj4.size() * c_nbnxnGpuJgroupSize });
852 /* Returns a reference to the exclusion mask for j-cluster-group \p cj4 and warp \p warp
853 * Generates a new exclusion entry when the j-cluster-group uses
854 * the default all-interaction mask at call time, so the returned mask
855 * can be modified when needed.
857 static nbnxn_excl_t& get_exclusion_mask(NbnxnPairlistGpu* nbl, int cj4, int warp)
859 if (nbl->cj4[cj4].imei[warp].excl_ind == 0)
861 /* No exclusions set, make a new list entry */
862 const size_t oldSize = nbl->excl.size();
863 GMX_ASSERT(oldSize >= 1, "We should always have entry [0]");
864 /* Add entry with default values: no exclusions */
865 nbl->excl.resize(oldSize + 1);
866 nbl->cj4[cj4].imei[warp].excl_ind = oldSize;
869 return nbl->excl[nbl->cj4[cj4].imei[warp].excl_ind];
872 /* Sets self exclusions and excludes half of the double pairs in the self cluster-pair \p nbl->cj4[cj4Index].cj[jOffsetInGroup]
874 * \param[in,out] nbl The cluster pair list
875 * \param[in] cj4Index The j-cluster group index into \p nbl->cj4
876 * \param[in] jOffsetInGroup The j-entry offset in \p nbl->cj4[cj4Index]
877 * \param[in] iClusterInCell The i-cluster index in the cell
879 static void setSelfAndNewtonExclusionsGpu(NbnxnPairlistGpu* nbl,
881 const int jOffsetInGroup,
882 const int iClusterInCell)
884 constexpr int numJatomsPerPart = c_nbnxnGpuClusterSize / c_nbnxnGpuClusterpairSplit;
886 /* The exclusions are stored separately for each part of the split */
887 for (int part = 0; part < c_nbnxnGpuClusterpairSplit; part++)
889 const int jOffset = part * numJatomsPerPart;
890 /* Make a new exclusion mask entry for each part, if we don't already have one yet */
891 nbnxn_excl_t& excl = get_exclusion_mask(nbl, cj4Index, part);
893 /* Set all bits with j-index <= i-index */
894 for (int jIndexInPart = 0; jIndexInPart < numJatomsPerPart; jIndexInPart++)
896 for (int i = jOffset + jIndexInPart; i < c_nbnxnGpuClusterSize; i++)
898 excl.pair[jIndexInPart * c_nbnxnGpuClusterSize + i] &=
899 ~(1U << (jOffsetInGroup * c_gpuNumClusterPerCell + iClusterInCell));
905 /* Returns a diagonal or off-diagonal interaction mask for plain C lists */
906 static unsigned int get_imask(gmx_bool rdiag, int ci, int cj)
908 return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
911 /* Returns a diagonal or off-diagonal interaction mask for cj-size=2 */
912 gmx_unused static unsigned int get_imask_simd_j2(gmx_bool rdiag, int ci, int cj)
914 return (rdiag && ci * 2 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_0
915 : (rdiag && ci * 2 + 1 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_1
916 : NBNXN_INTERACTION_MASK_ALL));
919 /* Returns a diagonal or off-diagonal interaction mask for cj-size=4 */
920 gmx_unused static unsigned int get_imask_simd_j4(gmx_bool rdiag, int ci, int cj)
922 return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
925 /* Returns a diagonal or off-diagonal interaction mask for cj-size=8 */
926 gmx_unused static unsigned int get_imask_simd_j8(gmx_bool rdiag, int ci, int cj)
928 return (rdiag && ci == cj * 2 ? NBNXN_INTERACTION_MASK_DIAG_J8_0
929 : (rdiag && ci == cj * 2 + 1 ? NBNXN_INTERACTION_MASK_DIAG_J8_1
930 : NBNXN_INTERACTION_MASK_ALL));
934 # if GMX_SIMD_REAL_WIDTH == 2
935 # define get_imask_simd_4xn get_imask_simd_j2
937 # if GMX_SIMD_REAL_WIDTH == 4
938 # define get_imask_simd_4xn get_imask_simd_j4
940 # if GMX_SIMD_REAL_WIDTH == 8
941 # define get_imask_simd_4xn get_imask_simd_j8
942 # define get_imask_simd_2xnn get_imask_simd_j4
944 # if GMX_SIMD_REAL_WIDTH == 16
945 # define get_imask_simd_2xnn get_imask_simd_j8
949 /* Plain C code for checking and adding cluster-pairs to the list.
951 * \param[in] gridj The j-grid
952 * \param[in,out] nbl The pair-list to store the cluster pairs in
953 * \param[in] icluster The index of the i-cluster
954 * \param[in] jclusterFirst The first cluster in the j-range
955 * \param[in] jclusterLast The last cluster in the j-range
956 * \param[in] excludeSubDiagonal Exclude atom pairs with i-index > j-index
957 * \param[in] x_j Coordinates for the j-atom, in xyz format
958 * \param[in] rlist2 The squared list cut-off
959 * \param[in] rbb2 The squared cut-off for putting cluster-pairs in the list based on bounding box distance only
960 * \param[in,out] numDistanceChecks The number of distance checks performed
962 static void makeClusterListSimple(const Grid& jGrid,
963 NbnxnPairlistCpu* nbl,
967 bool excludeSubDiagonal,
968 const real* gmx_restrict x_j,
971 int* gmx_restrict numDistanceChecks)
973 const BoundingBox* gmx_restrict bb_ci = nbl->work->iClusterData.bb.data();
974 const real* gmx_restrict x_ci = nbl->work->iClusterData.x.data();
979 while (!InRange && jclusterFirst <= jclusterLast)
981 real d2 = clusterBoundingBoxDistance2(bb_ci[0], jGrid.jBoundingBoxes()[jclusterFirst]);
982 *numDistanceChecks += 2;
984 /* Check if the distance is within the distance where
985 * we use only the bounding box distance rbb,
986 * or within the cut-off and there is at least one atom pair
987 * within the cut-off.
993 else if (d2 < rlist2)
995 int cjf_gl = jGrid.cellOffset() + jclusterFirst;
996 for (int i = 0; i < c_nbnxnCpuIClusterSize && !InRange; i++)
998 for (int j = 0; j < c_nbnxnCpuIClusterSize; j++)
1002 || (gmx::square(x_ci[i * STRIDE_XYZ + XX]
1003 - x_j[(cjf_gl * c_nbnxnCpuIClusterSize + j) * STRIDE_XYZ + XX])
1004 + gmx::square(x_ci[i * STRIDE_XYZ + YY]
1005 - x_j[(cjf_gl * c_nbnxnCpuIClusterSize + j) * STRIDE_XYZ + YY])
1006 + gmx::square(x_ci[i * STRIDE_XYZ + ZZ]
1007 - x_j[(cjf_gl * c_nbnxnCpuIClusterSize + j) * STRIDE_XYZ + ZZ])
1011 *numDistanceChecks += c_nbnxnCpuIClusterSize * c_nbnxnCpuIClusterSize;
1024 while (!InRange && jclusterLast > jclusterFirst)
1026 real d2 = clusterBoundingBoxDistance2(bb_ci[0], jGrid.jBoundingBoxes()[jclusterLast]);
1027 *numDistanceChecks += 2;
1029 /* Check if the distance is within the distance where
1030 * we use only the bounding box distance rbb,
1031 * or within the cut-off and there is at least one atom pair
1032 * within the cut-off.
1038 else if (d2 < rlist2)
1040 int cjl_gl = jGrid.cellOffset() + jclusterLast;
1041 for (int i = 0; i < c_nbnxnCpuIClusterSize && !InRange; i++)
1043 for (int j = 0; j < c_nbnxnCpuIClusterSize; j++)
1047 || (gmx::square(x_ci[i * STRIDE_XYZ + XX]
1048 - x_j[(cjl_gl * c_nbnxnCpuIClusterSize + j) * STRIDE_XYZ + XX])
1049 + gmx::square(x_ci[i * STRIDE_XYZ + YY]
1050 - x_j[(cjl_gl * c_nbnxnCpuIClusterSize + j) * STRIDE_XYZ + YY])
1051 + gmx::square(x_ci[i * STRIDE_XYZ + ZZ]
1052 - x_j[(cjl_gl * c_nbnxnCpuIClusterSize + j) * STRIDE_XYZ + ZZ])
1056 *numDistanceChecks += c_nbnxnCpuIClusterSize * c_nbnxnCpuIClusterSize;
1064 if (jclusterFirst <= jclusterLast)
1066 for (int jcluster = jclusterFirst; jcluster <= jclusterLast; jcluster++)
1068 /* Store cj and the interaction mask */
1070 cjEntry.cj = jGrid.cellOffset() + jcluster;
1071 cjEntry.excl = get_imask(excludeSubDiagonal, icluster, jcluster);
1072 nbl->cj.push_back(cjEntry);
1074 /* Increase the closing index in the i list */
1075 nbl->ci.back().cj_ind_end = nbl->cj.size();
1079 #ifdef GMX_NBNXN_SIMD_4XN
1080 # include "pairlist_simd_4xm.h"
1082 #ifdef GMX_NBNXN_SIMD_2XNN
1083 # include "pairlist_simd_2xmm.h"
1086 /* Plain C or SIMD4 code for making a pair list of super-cell sci vs scj.
1087 * Checks bounding box distances and possibly atom pair distances.
1089 static void make_cluster_list_supersub(const Grid& iGrid,
1091 NbnxnPairlistGpu* nbl,
1094 const bool excludeSubDiagonal,
1099 int* numDistanceChecks)
1101 NbnxnPairlistGpuWork& work = *nbl->work;
1104 const float* pbb_ci = work.iSuperClusterData.bbPacked.data();
1106 const BoundingBox* bb_ci = work.iSuperClusterData.bb.data();
1109 assert(c_nbnxnGpuClusterSize == iGrid.geometry().numAtomsICluster);
1110 assert(c_nbnxnGpuClusterSize == jGrid.geometry().numAtomsICluster);
1112 /* We generate the pairlist mainly based on bounding-box distances
1113 * and do atom pair distance based pruning on the GPU.
1114 * Only if a j-group contains a single cluster-pair, we try to prune
1115 * that pair based on atom distances on the CPU to avoid empty j-groups.
1117 #define PRUNE_LIST_CPU_ONE 1
1118 #define PRUNE_LIST_CPU_ALL 0
1120 #if PRUNE_LIST_CPU_ONE
1124 float* d2l = work.distanceBuffer.data();
1126 for (int subc = 0; subc < jGrid.numClustersPerCell()[scj]; subc++)
1128 const int cj4_ind = work.cj_ind / c_nbnxnGpuJgroupSize;
1129 const int cj_offset = work.cj_ind - cj4_ind * c_nbnxnGpuJgroupSize;
1130 const int cj = scj * c_gpuNumClusterPerCell + subc;
1132 const int cj_gl = jGrid.cellOffset() * c_gpuNumClusterPerCell + cj;
1135 if (excludeSubDiagonal && sci == scj)
1141 ci1 = iGrid.numClustersPerCell()[sci];
1145 /* Determine all ci1 bb distances in one call with SIMD4 */
1146 const int offset = packedBoundingBoxesIndex(cj) + (cj & (c_packedBoundingBoxesDimSize - 1));
1147 clusterBoundingBoxDistance2_xxxx_simd4(jGrid.packedBoundingBoxes().data() + offset, ci1,
1149 *numDistanceChecks += c_nbnxnGpuClusterSize * 2;
1153 unsigned int imask = 0;
1154 /* We use a fixed upper-bound instead of ci1 to help optimization */
1155 for (int ci = 0; ci < c_gpuNumClusterPerCell; ci++)
1163 /* Determine the bb distance between ci and cj */
1164 d2l[ci] = clusterBoundingBoxDistance2(bb_ci[ci], jGrid.jBoundingBoxes()[cj]);
1165 *numDistanceChecks += 2;
1169 #if PRUNE_LIST_CPU_ALL
1170 /* Check if the distance is within the distance where
1171 * we use only the bounding box distance rbb,
1172 * or within the cut-off and there is at least one atom pair
1173 * within the cut-off. This check is very costly.
1175 *numDistanceChecks += c_nbnxnGpuClusterSize * c_nbnxnGpuClusterSize;
1176 if (d2 < rbb2 || (d2 < rlist2 && clusterpair_in_range(work, ci, cj_gl, stride, x, rlist2)))
1178 /* Check if the distance between the two bounding boxes
1179 * in within the pair-list cut-off.
1184 /* Flag this i-subcell to be taken into account */
1185 imask |= (1U << (cj_offset * c_gpuNumClusterPerCell + ci));
1187 #if PRUNE_LIST_CPU_ONE
1195 #if PRUNE_LIST_CPU_ONE
1196 /* If we only found 1 pair, check if any atoms are actually
1197 * within the cut-off, so we could get rid of it.
1199 if (npair == 1 && d2l[ci_last] >= rbb2
1200 && !clusterpair_in_range(work, ci_last, cj_gl, stride, x, rlist2))
1202 imask &= ~(1U << (cj_offset * c_gpuNumClusterPerCell + ci_last));
1209 /* We have at least one cluster pair: add a j-entry */
1210 if (static_cast<size_t>(cj4_ind) == nbl->cj4.size())
1212 nbl->cj4.resize(nbl->cj4.size() + 1);
1214 nbnxn_cj4_t* cj4 = &nbl->cj4[cj4_ind];
1216 cj4->cj[cj_offset] = cj_gl;
1218 /* Set the exclusions for the ci==sj entry.
1219 * Here we don't bother to check if this entry is actually flagged,
1220 * as it will nearly always be in the list.
1222 if (excludeSubDiagonal && sci == scj)
1224 setSelfAndNewtonExclusionsGpu(nbl, cj4_ind, cj_offset, subc);
1227 /* Copy the cluster interaction mask to the list */
1228 for (int w = 0; w < c_nbnxnGpuClusterpairSplit; w++)
1230 cj4->imei[w].imask |= imask;
1233 nbl->work->cj_ind++;
1235 /* Keep the count */
1236 nbl->nci_tot += npair;
1238 /* Increase the closing index in i super-cell list */
1239 nbl->sci.back().cj4_ind_end =
1240 (nbl->work->cj_ind + c_nbnxnGpuJgroupSize - 1) / c_nbnxnGpuJgroupSize;
1245 /* Returns how many contiguous j-clusters we have starting in the i-list */
1246 template<typename CjListType>
1247 static int numContiguousJClusters(const int cjIndexStart,
1248 const int cjIndexEnd,
1249 gmx::ArrayRef<const CjListType> cjList)
1251 const int firstJCluster = nblCj(cjList, cjIndexStart);
1253 int numContiguous = 0;
1255 while (cjIndexStart + numContiguous < cjIndexEnd
1256 && nblCj(cjList, cjIndexStart + numContiguous) == firstJCluster + numContiguous)
1261 return numContiguous;
1265 * \brief Helper struct for efficient searching for excluded atoms in a j-list
1269 /*! \brief Constructs a j-list range from \p cjList with the given index range */
1270 template<typename CjListType>
1271 JListRanges(int cjIndexStart, int cjIndexEnd, gmx::ArrayRef<const CjListType> cjList);
1273 int cjIndexStart; //!< The start index in the j-list
1274 int cjIndexEnd; //!< The end index in the j-list
1275 int cjFirst; //!< The j-cluster with index cjIndexStart
1276 int cjLast; //!< The j-cluster with index cjIndexEnd-1
1277 int numDirect; //!< Up to cjIndexStart+numDirect the j-clusters are cjFirst + the index offset
1281 template<typename CjListType>
1282 JListRanges::JListRanges(int cjIndexStart, int cjIndexEnd, gmx::ArrayRef<const CjListType> cjList) :
1283 cjIndexStart(cjIndexStart),
1284 cjIndexEnd(cjIndexEnd)
1286 GMX_ASSERT(cjIndexEnd > cjIndexStart, "JListRanges should only be called with non-empty lists");
1288 cjFirst = nblCj(cjList, cjIndexStart);
1289 cjLast = nblCj(cjList, cjIndexEnd - 1);
1291 /* Determine how many contiguous j-cells we have starting
1292 * from the first i-cell. This number can be used to directly
1293 * calculate j-cell indices for excluded atoms.
1295 numDirect = numContiguousJClusters(cjIndexStart, cjIndexEnd, cjList);
1299 /* Return the index of \p jCluster in the given range or -1 when not present
1301 * Note: This code is executed very often and therefore performance is
1302 * important. It should be inlined and fully optimized.
1304 template<typename CjListType>
1305 static inline int findJClusterInJList(int jCluster,
1306 const JListRanges& ranges,
1307 gmx::ArrayRef<const CjListType> cjList)
1311 if (jCluster < ranges.cjFirst + ranges.numDirect)
1313 /* We can calculate the index directly using the offset */
1314 index = ranges.cjIndexStart + jCluster - ranges.cjFirst;
1318 /* Search for jCluster using bisection */
1320 int rangeStart = ranges.cjIndexStart + ranges.numDirect;
1321 int rangeEnd = ranges.cjIndexEnd;
1323 while (index == -1 && rangeStart < rangeEnd)
1325 rangeMiddle = (rangeStart + rangeEnd) >> 1;
1327 const int clusterMiddle = nblCj(cjList, rangeMiddle);
1329 if (jCluster == clusterMiddle)
1331 index = rangeMiddle;
1333 else if (jCluster < clusterMiddle)
1335 rangeEnd = rangeMiddle;
1339 rangeStart = rangeMiddle + 1;
1347 // TODO: Get rid of the two functions below by renaming sci to ci (or something better)
1349 /* Return the i-entry in the list we are currently operating on */
1350 static nbnxn_ci_t* getOpenIEntry(NbnxnPairlistCpu* nbl)
1352 return &nbl->ci.back();
1355 /* Return the i-entry in the list we are currently operating on */
1356 static nbnxn_sci_t* getOpenIEntry(NbnxnPairlistGpu* nbl)
1358 return &nbl->sci.back();
1361 /* Set all atom-pair exclusions for a simple type list i-entry
1363 * Set all atom-pair exclusions from the topology stored in exclusions
1364 * as masks in the pair-list for simple list entry iEntry.
1366 static void setExclusionsForIEntry(const Nbnxm::GridSet& gridSet,
1367 NbnxnPairlistCpu* nbl,
1368 gmx_bool diagRemoved,
1370 const nbnxn_ci_t& iEntry,
1371 const ListOfLists<int>& exclusions)
1373 if (iEntry.cj_ind_end == iEntry.cj_ind_start)
1375 /* Empty list: no exclusions */
1379 const JListRanges ranges(iEntry.cj_ind_start, iEntry.cj_ind_end, gmx::makeConstArrayRef(nbl->cj));
1381 const int iCluster = iEntry.ci;
1383 gmx::ArrayRef<const int> cell = gridSet.cells();
1384 gmx::ArrayRef<const int> atomIndices = gridSet.atomIndices();
1386 /* Loop over the atoms in the i-cluster */
1387 for (int i = 0; i < nbl->na_ci; i++)
1389 const int iIndex = iCluster * nbl->na_ci + i;
1390 const int iAtom = atomIndices[iIndex];
1393 /* Loop over the topology-based exclusions for this i-atom */
1394 for (const int jAtom : exclusions[iAtom])
1398 /* The self exclusion are already set, save some time */
1402 /* Get the index of the j-atom in the nbnxn atom data */
1403 const int jIndex = cell[jAtom];
1405 /* Without shifts we only calculate interactions j>i
1406 * for one-way pair-lists.
1408 if (diagRemoved && jIndex <= iIndex)
1413 const int jCluster = (jIndex >> na_cj_2log);
1415 /* Could the cluster se be in our list? */
1416 if (jCluster >= ranges.cjFirst && jCluster <= ranges.cjLast)
1419 findJClusterInJList(jCluster, ranges, gmx::makeConstArrayRef(nbl->cj));
1423 /* We found an exclusion, clear the corresponding
1426 const int innerJ = jIndex - (jCluster << na_cj_2log);
1428 nbl->cj[index].excl &= ~(1U << ((i << na_cj_2log) + innerJ));
1436 /* Add a new i-entry to the FEP list and copy the i-properties */
1437 static inline void fep_list_new_nri_copy(t_nblist* nlist)
1439 /* Add a new i-entry */
1442 assert(nlist->nri < nlist->maxnri);
1444 /* Duplicate the last i-entry, except for jindex, which continues */
1445 nlist->iinr[nlist->nri] = nlist->iinr[nlist->nri - 1];
1446 nlist->shift[nlist->nri] = nlist->shift[nlist->nri - 1];
1447 nlist->gid[nlist->nri] = nlist->gid[nlist->nri - 1];
1448 nlist->jindex[nlist->nri] = nlist->nrj;
1451 /* Rellocate FEP list for size nl->maxnri, TODO: replace by C++ */
1452 static void reallocate_nblist(t_nblist* nl)
1457 "reallocating neigborlist (ielec=%d, ivdw=%d, igeometry=%d, type=%d), maxnri=%d\n",
1458 nl->ielec, nl->ivdw, nl->igeometry, nl->type, nl->maxnri);
1460 srenew(nl->iinr, nl->maxnri);
1461 srenew(nl->gid, nl->maxnri);
1462 srenew(nl->shift, nl->maxnri);
1463 srenew(nl->jindex, nl->maxnri + 1);
1466 /* For load balancing of the free-energy lists over threads, we set
1467 * the maximum nrj size of an i-entry to 40. This leads to good
1468 * load balancing in the worst case scenario of a single perturbed
1469 * particle on 16 threads, while not introducing significant overhead.
1470 * Note that half of the perturbed pairs will anyhow end up in very small lists,
1471 * since non perturbed i-particles will see few perturbed j-particles).
1473 const int max_nrj_fep = 40;
1475 /* Exclude the perturbed pairs from the Verlet list. This is only done to avoid
1476 * singularities for overlapping particles (0/0), since the charges and
1477 * LJ parameters have been zeroed in the nbnxn data structure.
1478 * Simultaneously make a group pair list for the perturbed pairs.
1480 static void make_fep_list(gmx::ArrayRef<const int> atomIndices,
1481 const nbnxn_atomdata_t* nbat,
1482 NbnxnPairlistCpu* nbl,
1483 gmx_bool bDiagRemoved,
1485 real gmx_unused shx,
1486 real gmx_unused shy,
1487 real gmx_unused shz,
1488 real gmx_unused rlist_fep2,
1493 int ci, cj_ind_start, cj_ind_end, cja, cjr;
1495 int gid_i = 0, gid_j, gid;
1496 int egp_shift, egp_mask;
1498 int ind_i, ind_j, ai, aj;
1500 gmx_bool bFEP_i, bFEP_i_all;
1502 if (nbl_ci->cj_ind_end == nbl_ci->cj_ind_start)
1510 cj_ind_start = nbl_ci->cj_ind_start;
1511 cj_ind_end = nbl_ci->cj_ind_end;
1513 /* In worst case we have alternating energy groups
1514 * and create #atom-pair lists, which means we need the size
1515 * of a cluster pair (na_ci*na_cj) times the number of cj's.
1517 nri_max = nbl->na_ci * nbl->na_cj * (cj_ind_end - cj_ind_start);
1518 if (nlist->nri + nri_max > nlist->maxnri)
1520 nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
1521 reallocate_nblist(nlist);
1524 const int numAtomsJCluster = jGrid.geometry().numAtomsJCluster;
1526 const nbnxn_atomdata_t::Params& nbatParams = nbat->params();
1528 const int ngid = nbatParams.nenergrp;
1530 /* TODO: Consider adding a check in grompp and changing this to an assert */
1531 const int numBitsInEnergyGroupIdsForAtomsInJCluster = sizeof(gid_cj) * 8;
1532 if (ngid * numAtomsJCluster > numBitsInEnergyGroupIdsForAtomsInJCluster)
1535 "The Verlet scheme with %dx%d kernels and free-energy only supports up to %zu "
1537 iGrid.geometry().numAtomsICluster, numAtomsJCluster,
1538 (sizeof(gid_cj) * 8) / numAtomsJCluster);
1541 egp_shift = nbatParams.neg_2log;
1542 egp_mask = (1 << egp_shift) - 1;
1544 /* Loop over the atoms in the i sub-cell */
1546 for (int i = 0; i < nbl->na_ci; i++)
1548 ind_i = ci * nbl->na_ci + i;
1549 ai = atomIndices[ind_i];
1553 nlist->jindex[nri + 1] = nlist->jindex[nri];
1554 nlist->iinr[nri] = ai;
1555 /* The actual energy group pair index is set later */
1556 nlist->gid[nri] = 0;
1557 nlist->shift[nri] = nbl_ci->shift & NBNXN_CI_SHIFT;
1559 bFEP_i = iGrid.atomIsPerturbed(ci - iGrid.cellOffset(), i);
1561 bFEP_i_all = bFEP_i_all && bFEP_i;
1563 if (nlist->nrj + (cj_ind_end - cj_ind_start) * nbl->na_cj > nlist->maxnrj)
1565 nlist->maxnrj = over_alloc_small(nlist->nrj + (cj_ind_end - cj_ind_start) * nbl->na_cj);
1566 srenew(nlist->jjnr, nlist->maxnrj);
1567 srenew(nlist->excl_fep, nlist->maxnrj);
1572 gid_i = (nbatParams.energrp[ci] >> (egp_shift * i)) & egp_mask;
1575 for (int cj_ind = cj_ind_start; cj_ind < cj_ind_end; cj_ind++)
1577 unsigned int fep_cj;
1579 cja = nbl->cj[cj_ind].cj;
1581 if (numAtomsJCluster == jGrid.geometry().numAtomsICluster)
1583 cjr = cja - jGrid.cellOffset();
1584 fep_cj = jGrid.fepBits(cjr);
1587 gid_cj = nbatParams.energrp[cja];
1590 else if (2 * numAtomsJCluster == jGrid.geometry().numAtomsICluster)
1592 cjr = cja - jGrid.cellOffset() * 2;
1593 /* Extract half of the ci fep/energrp mask */
1594 fep_cj = (jGrid.fepBits(cjr >> 1) >> ((cjr & 1) * numAtomsJCluster))
1595 & ((1 << numAtomsJCluster) - 1);
1598 gid_cj = nbatParams.energrp[cja >> 1] >> ((cja & 1) * numAtomsJCluster * egp_shift)
1599 & ((1 << (numAtomsJCluster * egp_shift)) - 1);
1604 cjr = cja - (jGrid.cellOffset() >> 1);
1605 /* Combine two ci fep masks/energrp */
1606 fep_cj = jGrid.fepBits(cjr * 2)
1607 + (jGrid.fepBits(cjr * 2 + 1) << jGrid.geometry().numAtomsICluster);
1610 gid_cj = nbatParams.energrp[cja * 2]
1611 + (nbatParams.energrp[cja * 2 + 1]
1612 << (jGrid.geometry().numAtomsICluster * egp_shift));
1616 if (bFEP_i || fep_cj != 0)
1618 for (int j = 0; j < nbl->na_cj; j++)
1620 /* Is this interaction perturbed and not excluded? */
1621 ind_j = cja * nbl->na_cj + j;
1622 aj = atomIndices[ind_j];
1623 if (aj >= 0 && (bFEP_i || (fep_cj & (1 << j))) && (!bDiagRemoved || ind_j >= ind_i))
1627 gid_j = (gid_cj >> (j * egp_shift)) & egp_mask;
1628 gid = GID(gid_i, gid_j, ngid);
1630 if (nlist->nrj > nlist->jindex[nri] && nlist->gid[nri] != gid)
1632 /* Energy group pair changed: new list */
1633 fep_list_new_nri_copy(nlist);
1636 nlist->gid[nri] = gid;
1639 if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
1641 fep_list_new_nri_copy(nlist);
1645 /* Add it to the FEP list */
1646 nlist->jjnr[nlist->nrj] = aj;
1647 nlist->excl_fep[nlist->nrj] =
1648 (nbl->cj[cj_ind].excl >> (i * nbl->na_cj + j)) & 1;
1651 /* Exclude it from the normal list.
1652 * Note that the charge has been set to zero,
1653 * but we need to avoid 0/0, as perturbed atoms
1654 * can be on top of each other.
1656 nbl->cj[cj_ind].excl &= ~(1U << (i * nbl->na_cj + j));
1662 if (nlist->nrj > nlist->jindex[nri])
1664 /* Actually add this new, non-empty, list */
1666 nlist->jindex[nlist->nri] = nlist->nrj;
1673 /* All interactions are perturbed, we can skip this entry */
1674 nbl_ci->cj_ind_end = cj_ind_start;
1675 nbl->ncjInUse -= cj_ind_end - cj_ind_start;
1679 /* Return the index of atom a within a cluster */
1680 static inline int cj_mod_cj4(int cj)
1682 return cj & (c_nbnxnGpuJgroupSize - 1);
1685 /* Convert a j-cluster to a cj4 group */
1686 static inline int cj_to_cj4(int cj)
1688 return cj / c_nbnxnGpuJgroupSize;
1691 /* Return the index of an j-atom within a warp */
1692 static inline int a_mod_wj(int a)
1694 return a & (c_nbnxnGpuClusterSize / c_nbnxnGpuClusterpairSplit - 1);
1697 /* As make_fep_list above, but for super/sub lists. */
1698 static void make_fep_list(gmx::ArrayRef<const int> atomIndices,
1699 const nbnxn_atomdata_t* nbat,
1700 NbnxnPairlistGpu* nbl,
1701 gmx_bool bDiagRemoved,
1702 const nbnxn_sci_t* nbl_sci,
1713 int ind_i, ind_j, ai, aj;
1717 const nbnxn_cj4_t* cj4;
1719 const int numJClusterGroups = nbl_sci->numJClusterGroups();
1720 if (numJClusterGroups == 0)
1726 const int sci = nbl_sci->sci;
1728 const int cj4_ind_start = nbl_sci->cj4_ind_start;
1729 const int cj4_ind_end = nbl_sci->cj4_ind_end;
1731 /* Here we process one super-cell, max #atoms na_sc, versus a list
1732 * cj4 entries, each with max c_nbnxnGpuJgroupSize cj's, each
1733 * of size na_cj atoms.
1734 * On the GPU we don't support energy groups (yet).
1735 * So for each of the na_sc i-atoms, we need max one FEP list
1736 * for each max_nrj_fep j-atoms.
1738 nri_max = nbl->na_sc * nbl->na_cj * (1 + (numJClusterGroups * c_nbnxnGpuJgroupSize) / max_nrj_fep);
1739 if (nlist->nri + nri_max > nlist->maxnri)
1741 nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
1742 reallocate_nblist(nlist);
1745 /* Loop over the atoms in the i super-cluster */
1746 for (int c = 0; c < c_gpuNumClusterPerCell; c++)
1748 c_abs = sci * c_gpuNumClusterPerCell + c;
1750 for (int i = 0; i < nbl->na_ci; i++)
1752 ind_i = c_abs * nbl->na_ci + i;
1753 ai = atomIndices[ind_i];
1757 nlist->jindex[nri + 1] = nlist->jindex[nri];
1758 nlist->iinr[nri] = ai;
1759 /* With GPUs, energy groups are not supported */
1760 nlist->gid[nri] = 0;
1761 nlist->shift[nri] = nbl_sci->shift & NBNXN_CI_SHIFT;
1763 bFEP_i = iGrid.atomIsPerturbed(c_abs - iGrid.cellOffset() * c_gpuNumClusterPerCell, i);
1765 xi = nbat->x()[ind_i * nbat->xstride + XX] + shx;
1766 yi = nbat->x()[ind_i * nbat->xstride + YY] + shy;
1767 zi = nbat->x()[ind_i * nbat->xstride + ZZ] + shz;
1769 const int nrjMax = nlist->nrj + numJClusterGroups * c_nbnxnGpuJgroupSize * nbl->na_cj;
1770 if (nrjMax > nlist->maxnrj)
1772 nlist->maxnrj = over_alloc_small(nrjMax);
1773 srenew(nlist->jjnr, nlist->maxnrj);
1774 srenew(nlist->excl_fep, nlist->maxnrj);
1777 for (int cj4_ind = cj4_ind_start; cj4_ind < cj4_ind_end; cj4_ind++)
1779 cj4 = &nbl->cj4[cj4_ind];
1781 for (int gcj = 0; gcj < c_nbnxnGpuJgroupSize; gcj++)
1783 if ((cj4->imei[0].imask & (1U << (gcj * c_gpuNumClusterPerCell + c))) == 0)
1785 /* Skip this ci for this cj */
1789 const int cjr = cj4->cj[gcj] - jGrid.cellOffset() * c_gpuNumClusterPerCell;
1791 if (bFEP_i || jGrid.clusterIsPerturbed(cjr))
1793 for (int j = 0; j < nbl->na_cj; j++)
1795 /* Is this interaction perturbed and not excluded? */
1796 ind_j = (jGrid.cellOffset() * c_gpuNumClusterPerCell + cjr) * nbl->na_cj + j;
1797 aj = atomIndices[ind_j];
1798 if (aj >= 0 && (bFEP_i || jGrid.atomIsPerturbed(cjr, j))
1799 && (!bDiagRemoved || ind_j >= ind_i))
1802 unsigned int excl_bit;
1806 j / (c_nbnxnGpuClusterSize / c_nbnxnGpuClusterpairSplit);
1807 nbnxn_excl_t& excl = get_exclusion_mask(nbl, cj4_ind, jHalf);
1809 excl_pair = a_mod_wj(j) * nbl->na_ci + i;
1810 excl_bit = (1U << (gcj * c_gpuNumClusterPerCell + c));
1812 dx = nbat->x()[ind_j * nbat->xstride + XX] - xi;
1813 dy = nbat->x()[ind_j * nbat->xstride + YY] - yi;
1814 dz = nbat->x()[ind_j * nbat->xstride + ZZ] - zi;
1816 /* The unpruned GPU list has more than 2/3
1817 * of the atom pairs beyond rlist. Using
1818 * this list will cause a lot of overhead
1819 * in the CPU FEP kernels, especially
1820 * relative to the fast GPU kernels.
1821 * So we prune the FEP list here.
1823 if (dx * dx + dy * dy + dz * dz < rlist_fep2)
1825 if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
1827 fep_list_new_nri_copy(nlist);
1831 /* Add it to the FEP list */
1832 nlist->jjnr[nlist->nrj] = aj;
1833 nlist->excl_fep[nlist->nrj] =
1834 (excl.pair[excl_pair] & excl_bit) ? 1 : 0;
1838 /* Exclude it from the normal list.
1839 * Note that the charge and LJ parameters have
1840 * been set to zero, but we need to avoid 0/0,
1841 * as perturbed atoms can be on top of each other.
1843 excl.pair[excl_pair] &= ~excl_bit;
1847 /* Note that we could mask out this pair in imask
1848 * if all i- and/or all j-particles are perturbed.
1849 * But since the perturbed pairs on the CPU will
1850 * take an order of magnitude more time, the GPU
1851 * will finish before the CPU and there is no gain.
1857 if (nlist->nrj > nlist->jindex[nri])
1859 /* Actually add this new, non-empty, list */
1861 nlist->jindex[nlist->nri] = nlist->nrj;
1868 /* Set all atom-pair exclusions for a GPU type list i-entry
1870 * Sets all atom-pair exclusions from the topology stored in exclusions
1871 * as masks in the pair-list for i-super-cluster list entry iEntry.
1873 static void setExclusionsForIEntry(const Nbnxm::GridSet& gridSet,
1874 NbnxnPairlistGpu* nbl,
1875 gmx_bool diagRemoved,
1876 int gmx_unused na_cj_2log,
1877 const nbnxn_sci_t& iEntry,
1878 const ListOfLists<int>& exclusions)
1880 if (iEntry.numJClusterGroups() == 0)
1886 /* Set the search ranges using start and end j-cluster indices.
1887 * Note that here we can not use cj4_ind_end, since the last cj4
1888 * can be only partially filled, so we use cj_ind.
1890 const JListRanges ranges(iEntry.cj4_ind_start * c_nbnxnGpuJgroupSize, nbl->work->cj_ind,
1891 gmx::makeConstArrayRef(nbl->cj4));
1893 GMX_ASSERT(nbl->na_ci == c_nbnxnGpuClusterSize, "na_ci should match the GPU cluster size");
1894 constexpr int c_clusterSize = c_nbnxnGpuClusterSize;
1895 constexpr int c_superClusterSize = c_nbnxnGpuNumClusterPerSupercluster * c_nbnxnGpuClusterSize;
1897 const int iSuperCluster = iEntry.sci;
1899 gmx::ArrayRef<const int> atomIndices = gridSet.atomIndices();
1900 gmx::ArrayRef<const int> cell = gridSet.cells();
1902 /* Loop over the atoms in the i super-cluster */
1903 for (int i = 0; i < c_superClusterSize; i++)
1905 const int iIndex = iSuperCluster * c_superClusterSize + i;
1906 const int iAtom = atomIndices[iIndex];
1909 const int iCluster = i / c_clusterSize;
1911 /* Loop over the topology-based exclusions for this i-atom */
1912 for (const int jAtom : exclusions[iAtom])
1916 /* The self exclusions are already set, save some time */
1920 /* Get the index of the j-atom in the nbnxn atom data */
1921 const int jIndex = cell[jAtom];
1923 /* Without shifts we only calculate interactions j>i
1924 * for one-way pair-lists.
1926 /* NOTE: We would like to use iIndex on the right hand side,
1927 * but that makes this routine 25% slower with gcc6/7.
1928 * Even using c_superClusterSize makes it slower.
1929 * Either of these changes triggers peeling of the exclIndex
1930 * loop, which apparently leads to far less efficient code.
1932 if (diagRemoved && jIndex <= iSuperCluster * nbl->na_sc + i)
1937 const int jCluster = jIndex / c_clusterSize;
1939 /* Check whether the cluster is in our list? */
1940 if (jCluster >= ranges.cjFirst && jCluster <= ranges.cjLast)
1943 findJClusterInJList(jCluster, ranges, gmx::makeConstArrayRef(nbl->cj4));
1947 /* We found an exclusion, clear the corresponding
1950 const unsigned int pairMask =
1951 (1U << (cj_mod_cj4(index) * c_gpuNumClusterPerCell + iCluster));
1952 /* Check if the i-cluster interacts with the j-cluster */
1953 if (nbl_imask0(nbl, index) & pairMask)
1955 const int innerI = (i & (c_clusterSize - 1));
1956 const int innerJ = (jIndex & (c_clusterSize - 1));
1958 /* Determine which j-half (CUDA warp) we are in */
1959 const int jHalf = innerJ / (c_clusterSize / c_nbnxnGpuClusterpairSplit);
1961 nbnxn_excl_t& interactionMask =
1962 get_exclusion_mask(nbl, cj_to_cj4(index), jHalf);
1964 interactionMask.pair[a_mod_wj(innerJ) * c_clusterSize + innerI] &= ~pairMask;
1973 /* Make a new ci entry at the back of nbl->ci */
1974 static void addNewIEntry(NbnxnPairlistCpu* nbl, int ci, int shift, int flags)
1978 ciEntry.shift = shift;
1979 /* Store the interaction flags along with the shift */
1980 ciEntry.shift |= flags;
1981 ciEntry.cj_ind_start = nbl->cj.size();
1982 ciEntry.cj_ind_end = nbl->cj.size();
1983 nbl->ci.push_back(ciEntry);
1986 /* Make a new sci entry at index nbl->nsci */
1987 static void addNewIEntry(NbnxnPairlistGpu* nbl, int sci, int shift, int gmx_unused flags)
1989 nbnxn_sci_t sciEntry;
1991 sciEntry.shift = shift;
1992 sciEntry.cj4_ind_start = nbl->cj4.size();
1993 sciEntry.cj4_ind_end = nbl->cj4.size();
1995 nbl->sci.push_back(sciEntry);
1998 /* Sort the simple j-list cj on exclusions.
1999 * Entries with exclusions will all be sorted to the beginning of the list.
2001 static void sort_cj_excl(nbnxn_cj_t* cj, int ncj, NbnxnPairlistCpuWork* work)
2003 work->cj.resize(ncj);
2005 /* Make a list of the j-cells involving exclusions */
2007 for (int j = 0; j < ncj; j++)
2009 if (cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
2011 work->cj[jnew++] = cj[j];
2014 /* Check if there are exclusions at all or not just the first entry */
2015 if (!((jnew == 0) || (jnew == 1 && cj[0].excl != NBNXN_INTERACTION_MASK_ALL)))
2017 for (int j = 0; j < ncj; j++)
2019 if (cj[j].excl == NBNXN_INTERACTION_MASK_ALL)
2021 work->cj[jnew++] = cj[j];
2024 for (int j = 0; j < ncj; j++)
2026 cj[j] = work->cj[j];
2031 /* Close this simple list i entry */
2032 static void closeIEntry(NbnxnPairlistCpu* nbl,
2033 int gmx_unused sp_max_av,
2034 gmx_bool gmx_unused progBal,
2035 float gmx_unused nsp_tot_est,
2036 int gmx_unused thread,
2037 int gmx_unused nthread)
2039 nbnxn_ci_t& ciEntry = nbl->ci.back();
2041 /* All content of the new ci entry have already been filled correctly,
2042 * we only need to sort and increase counts or remove the entry when empty.
2044 const int jlen = ciEntry.cj_ind_end - ciEntry.cj_ind_start;
2047 sort_cj_excl(nbl->cj.data() + ciEntry.cj_ind_start, jlen, nbl->work.get());
2049 /* The counts below are used for non-bonded pair/flop counts
2050 * and should therefore match the available kernel setups.
2052 if (!(ciEntry.shift & NBNXN_CI_DO_COUL(0)))
2054 nbl->work->ncj_noq += jlen;
2056 else if ((ciEntry.shift & NBNXN_CI_HALF_LJ(0)) || !(ciEntry.shift & NBNXN_CI_DO_LJ(0)))
2058 nbl->work->ncj_hlj += jlen;
2063 /* Entry is empty: remove it */
2068 /* Split sci entry for load balancing on the GPU.
2069 * Splitting ensures we have enough lists to fully utilize the whole GPU.
2070 * With progBal we generate progressively smaller lists, which improves
2071 * load balancing. As we only know the current count on our own thread,
2072 * we will need to estimate the current total amount of i-entries.
2073 * As the lists get concatenated later, this estimate depends
2074 * both on nthread and our own thread index.
2076 static void split_sci_entry(NbnxnPairlistGpu* nbl,
2089 /* Estimate the total numbers of ci's of the nblist combined
2090 * over all threads using the target number of ci's.
2092 nsp_est = (nsp_tot_est * thread) / nthread + nbl->nci_tot;
2094 /* The first ci blocks should be larger, to avoid overhead.
2095 * The last ci blocks should be smaller, to improve load balancing.
2096 * The factor 3/2 makes the first block 3/2 times the target average
2097 * and ensures that the total number of blocks end up equal to
2098 * that of equally sized blocks of size nsp_target_av.
2100 nsp_max = static_cast<int>(nsp_target_av * (nsp_tot_est * 1.5 / (nsp_est + nsp_tot_est)));
2104 nsp_max = nsp_target_av;
2107 const int cj4_start = nbl->sci.back().cj4_ind_start;
2108 const int cj4_end = nbl->sci.back().cj4_ind_end;
2109 const int j4len = cj4_end - cj4_start;
2111 if (j4len > 1 && j4len * c_gpuNumClusterPerCell * c_nbnxnGpuJgroupSize > nsp_max)
2113 /* Modify the last ci entry and process the cj4's again */
2119 for (int cj4 = cj4_start; cj4 < cj4_end; cj4++)
2121 int nsp_cj4_p = nsp_cj4;
2122 /* Count the number of cluster pairs in this cj4 group */
2124 for (int p = 0; p < c_gpuNumClusterPerCell * c_nbnxnGpuJgroupSize; p++)
2126 nsp_cj4 += (nbl->cj4[cj4].imei[0].imask >> p) & 1;
2129 /* If adding the current cj4 with nsp_cj4 pairs get us further
2130 * away from our target nsp_max, split the list before this cj4.
2132 if (nsp > 0 && nsp_max - nsp < nsp + nsp_cj4 - nsp_max)
2134 /* Split the list at cj4 */
2135 nbl->sci.back().cj4_ind_end = cj4;
2136 /* Create a new sci entry */
2138 sciNew.sci = nbl->sci.back().sci;
2139 sciNew.shift = nbl->sci.back().shift;
2140 sciNew.cj4_ind_start = cj4;
2141 nbl->sci.push_back(sciNew);
2144 nsp_cj4_e = nsp_cj4_p;
2150 /* Put the remaining cj4's in the last sci entry */
2151 nbl->sci.back().cj4_ind_end = cj4_end;
2153 /* Possibly balance out the last two sci's
2154 * by moving the last cj4 of the second last sci.
2156 if (nsp_sci - nsp_cj4_e >= nsp + nsp_cj4_e)
2158 GMX_ASSERT(nbl->sci.size() >= 2, "We expect at least two elements");
2159 nbl->sci[nbl->sci.size() - 2].cj4_ind_end--;
2160 nbl->sci[nbl->sci.size() - 1].cj4_ind_start--;
2165 /* Clost this super/sub list i entry */
2166 static void closeIEntry(NbnxnPairlistGpu* nbl, int nsp_max_av, gmx_bool progBal, float nsp_tot_est, int thread, int nthread)
2168 nbnxn_sci_t& sciEntry = *getOpenIEntry(nbl);
2170 /* All content of the new ci entry have already been filled correctly,
2171 * we only need to, potentially, split or remove the entry when empty.
2173 int j4len = sciEntry.numJClusterGroups();
2176 /* We can only have complete blocks of 4 j-entries in a list,
2177 * so round the count up before closing.
2179 int ncj4 = (nbl->work->cj_ind + c_nbnxnGpuJgroupSize - 1) / c_nbnxnGpuJgroupSize;
2180 nbl->work->cj_ind = ncj4 * c_nbnxnGpuJgroupSize;
2184 /* Measure the size of the new entry and potentially split it */
2185 split_sci_entry(nbl, nsp_max_av, progBal, nsp_tot_est, thread, nthread);
2190 /* Entry is empty: remove it */
2191 nbl->sci.pop_back();
2195 /* Syncs the working array before adding another grid pair to the GPU list */
2196 static void sync_work(NbnxnPairlistCpu gmx_unused* nbl) {}
2198 /* Syncs the working array before adding another grid pair to the GPU list */
2199 static void sync_work(NbnxnPairlistGpu* nbl)
2201 nbl->work->cj_ind = nbl->cj4.size() * c_nbnxnGpuJgroupSize;
2204 /* Clears an NbnxnPairlistCpu data structure */
2205 static void clear_pairlist(NbnxnPairlistCpu* nbl)
2211 nbl->ciOuter.clear();
2212 nbl->cjOuter.clear();
2214 nbl->work->ncj_noq = 0;
2215 nbl->work->ncj_hlj = 0;
2218 /* Clears an NbnxnPairlistGpu data structure */
2219 static void clear_pairlist(NbnxnPairlistGpu* nbl)
2223 nbl->excl.resize(1);
2227 /* Clears an atom-atom-style pair list */
2228 static void clear_pairlist_fep(t_nblist* nl)
2232 if (nl->jindex == nullptr)
2234 snew(nl->jindex, 1);
2239 /* Sets a simple list i-cell bounding box, including PBC shift */
2241 set_icell_bb_simple(gmx::ArrayRef<const BoundingBox> bb, int ci, real shx, real shy, real shz, BoundingBox* bb_ci)
2243 bb_ci->lower.x = bb[ci].lower.x + shx;
2244 bb_ci->lower.y = bb[ci].lower.y + shy;
2245 bb_ci->lower.z = bb[ci].lower.z + shz;
2246 bb_ci->upper.x = bb[ci].upper.x + shx;
2247 bb_ci->upper.y = bb[ci].upper.y + shy;
2248 bb_ci->upper.z = bb[ci].upper.z + shz;
2251 /* Sets a simple list i-cell bounding box, including PBC shift */
2252 static inline void set_icell_bb(const Grid& iGrid, int ci, real shx, real shy, real shz, NbnxnPairlistCpuWork* work)
2254 set_icell_bb_simple(iGrid.iBoundingBoxes(), ci, shx, shy, shz, &work->iClusterData.bb[0]);
2258 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2259 static void set_icell_bbxxxx_supersub(gmx::ArrayRef<const float> bb, int ci, real shx, real shy, real shz, float* bb_ci)
2261 constexpr int cellBBStride = packedBoundingBoxesIndex(c_gpuNumClusterPerCell);
2262 constexpr int pbbStride = c_packedBoundingBoxesDimSize;
2263 const int ia = ci * cellBBStride;
2264 for (int m = 0; m < cellBBStride; m += c_packedBoundingBoxesSize)
2266 for (int i = 0; i < pbbStride; i++)
2268 bb_ci[m + 0 * pbbStride + i] = bb[ia + m + 0 * pbbStride + i] + shx;
2269 bb_ci[m + 1 * pbbStride + i] = bb[ia + m + 1 * pbbStride + i] + shy;
2270 bb_ci[m + 2 * pbbStride + i] = bb[ia + m + 2 * pbbStride + i] + shz;
2271 bb_ci[m + 3 * pbbStride + i] = bb[ia + m + 3 * pbbStride + i] + shx;
2272 bb_ci[m + 4 * pbbStride + i] = bb[ia + m + 4 * pbbStride + i] + shy;
2273 bb_ci[m + 5 * pbbStride + i] = bb[ia + m + 5 * pbbStride + i] + shz;
2279 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2280 gmx_unused static void set_icell_bb_supersub(gmx::ArrayRef<const BoundingBox> bb,
2287 for (int i = 0; i < c_gpuNumClusterPerCell; i++)
2289 set_icell_bb_simple(bb, ci * c_gpuNumClusterPerCell + i, shx, shy, shz, &bb_ci[i]);
2293 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2294 gmx_unused static void set_icell_bb(const Grid& iGrid, int ci, real shx, real shy, real shz, NbnxnPairlistGpuWork* work)
2297 set_icell_bbxxxx_supersub(iGrid.packedBoundingBoxes(), ci, shx, shy, shz,
2298 work->iSuperClusterData.bbPacked.data());
2300 set_icell_bb_supersub(iGrid.iBoundingBoxes(), ci, shx, shy, shz, work->iSuperClusterData.bb.data());
2304 /* Copies PBC shifted i-cell atom coordinates x,y,z to working array */
2305 static void icell_set_x_simple(int ci,
2311 NbnxnPairlistCpuWork::IClusterData* iClusterData)
2313 const int ia = ci * c_nbnxnCpuIClusterSize;
2315 for (int i = 0; i < c_nbnxnCpuIClusterSize; i++)
2317 iClusterData->x[i * STRIDE_XYZ + XX] = x[(ia + i) * stride + XX] + shx;
2318 iClusterData->x[i * STRIDE_XYZ + YY] = x[(ia + i) * stride + YY] + shy;
2319 iClusterData->x[i * STRIDE_XYZ + ZZ] = x[(ia + i) * stride + ZZ] + shz;
2323 static void icell_set_x(int ci,
2329 const ClusterDistanceKernelType kernelType,
2330 NbnxnPairlistCpuWork* work)
2335 # ifdef GMX_NBNXN_SIMD_4XN
2336 case ClusterDistanceKernelType::CpuSimd_4xM:
2337 icell_set_x_simd_4xn(ci, shx, shy, shz, stride, x, work);
2340 # ifdef GMX_NBNXN_SIMD_2XNN
2341 case ClusterDistanceKernelType::CpuSimd_2xMM:
2342 icell_set_x_simd_2xnn(ci, shx, shy, shz, stride, x, work);
2346 case ClusterDistanceKernelType::CpuPlainC:
2347 icell_set_x_simple(ci, shx, shy, shz, stride, x, &work->iClusterData);
2349 default: GMX_ASSERT(false, "Unhandled case"); break;
2353 /* Copies PBC shifted super-cell atom coordinates x,y,z to working array */
2354 static void icell_set_x(int ci,
2360 ClusterDistanceKernelType gmx_unused kernelType,
2361 NbnxnPairlistGpuWork* work)
2363 #if !GMX_SIMD4_HAVE_REAL
2365 real* x_ci = work->iSuperClusterData.x.data();
2367 int ia = ci * c_gpuNumClusterPerCell * c_nbnxnGpuClusterSize;
2368 for (int i = 0; i < c_gpuNumClusterPerCell * c_nbnxnGpuClusterSize; i++)
2370 x_ci[i * DIM + XX] = x[(ia + i) * stride + XX] + shx;
2371 x_ci[i * DIM + YY] = x[(ia + i) * stride + YY] + shy;
2372 x_ci[i * DIM + ZZ] = x[(ia + i) * stride + ZZ] + shz;
2375 #else /* !GMX_SIMD4_HAVE_REAL */
2377 real* x_ci = work->iSuperClusterData.xSimd.data();
2379 for (int si = 0; si < c_gpuNumClusterPerCell; si++)
2381 for (int i = 0; i < c_nbnxnGpuClusterSize; i += GMX_SIMD4_WIDTH)
2383 int io = si * c_nbnxnGpuClusterSize + i;
2384 int ia = ci * c_gpuNumClusterPerCell * c_nbnxnGpuClusterSize + io;
2385 for (int j = 0; j < GMX_SIMD4_WIDTH; j++)
2387 x_ci[io * DIM + j + XX * GMX_SIMD4_WIDTH] = x[(ia + j) * stride + XX] + shx;
2388 x_ci[io * DIM + j + YY * GMX_SIMD4_WIDTH] = x[(ia + j) * stride + YY] + shy;
2389 x_ci[io * DIM + j + ZZ * GMX_SIMD4_WIDTH] = x[(ia + j) * stride + ZZ] + shz;
2394 #endif /* !GMX_SIMD4_HAVE_REAL */
2397 static real minimum_subgrid_size_xy(const Grid& grid)
2399 const Grid::Dimensions& dims = grid.dimensions();
2401 if (grid.geometry().isSimple)
2403 return std::min(dims.cellSize[XX], dims.cellSize[YY]);
2407 return std::min(dims.cellSize[XX] / c_gpuNumClusterPerCellX,
2408 dims.cellSize[YY] / c_gpuNumClusterPerCellY);
2412 static real effective_buffer_1x1_vs_MxN(const Grid& iGrid, const Grid& jGrid)
2414 const real eff_1x1_buffer_fac_overest = 0.1;
2416 /* Determine an atom-pair list cut-off buffer size for atom pairs,
2417 * to be added to rlist (including buffer) used for MxN.
2418 * This is for converting an MxN list to a 1x1 list. This means we can't
2419 * use the normal buffer estimate, as we have an MxN list in which
2420 * some atom pairs beyond rlist are missing. We want to capture
2421 * the beneficial effect of buffering by extra pairs just outside rlist,
2422 * while removing the useless pairs that are further away from rlist.
2423 * (Also the buffer could have been set manually not using the estimate.)
2424 * This buffer size is an overestimate.
2425 * We add 10% of the smallest grid sub-cell dimensions.
2426 * Note that the z-size differs per cell and we don't use this,
2427 * so we overestimate.
2428 * With PME, the 10% value gives a buffer that is somewhat larger
2429 * than the effective buffer with a tolerance of 0.005 kJ/mol/ps.
2430 * Smaller tolerances or using RF lead to a smaller effective buffer,
2431 * so 10% gives a safe overestimate.
2433 return eff_1x1_buffer_fac_overest * (minimum_subgrid_size_xy(iGrid) + minimum_subgrid_size_xy(jGrid));
2436 /* Estimates the interaction volume^2 for non-local interactions */
2437 static real nonlocal_vol2(const struct gmx_domdec_zones_t* zones, const rvec ls, real r)
2445 /* Here we simply add up the volumes of 1, 2 or 3 1D decomposition
2446 * not home interaction volume^2. As these volumes are not additive,
2447 * this is an overestimate, but it would only be significant in the limit
2448 * of small cells, where we anyhow need to split the lists into
2449 * as small parts as possible.
2452 for (int z = 0; z < zones->n; z++)
2454 if (zones->shift[z][XX] + zones->shift[z][YY] + zones->shift[z][ZZ] == 1)
2459 for (int d = 0; d < DIM; d++)
2461 if (zones->shift[z][d] == 0)
2465 za *= zones->size[z].x1[d] - zones->size[z].x0[d];
2469 /* 4 octants of a sphere */
2470 vold_est = 0.25 * M_PI * r * r * r * r;
2471 /* 4 quarter pie slices on the edges */
2472 vold_est += 4 * cl * M_PI / 6.0 * r * r * r;
2473 /* One rectangular volume on a face */
2474 vold_est += ca * 0.5 * r * r;
2476 vol2_est_tot += vold_est * za;
2480 return vol2_est_tot;
2483 /* Estimates the average size of a full j-list for super/sub setup */
2484 static void get_nsubpair_target(const Nbnxm::GridSet& gridSet,
2485 const InteractionLocality iloc,
2487 const int min_ci_balanced,
2488 int* nsubpair_target,
2489 float* nsubpair_tot_est)
2491 /* The target value of 36 seems to be the optimum for Kepler.
2492 * Maxwell is less sensitive to the exact value.
2494 const int nsubpair_target_min = 36;
2495 real r_eff_sup, vol_est, nsp_est, nsp_est_nl;
2497 const Grid& grid = gridSet.grids()[0];
2499 /* We don't need to balance list sizes if:
2500 * - We didn't request balancing.
2501 * - The number of grid cells >= the number of lists requested,
2502 * since we will always generate at least #cells lists.
2503 * - We don't have any cells, since then there won't be any lists.
2505 if (min_ci_balanced <= 0 || grid.numCells() >= min_ci_balanced || grid.numCells() == 0)
2507 /* nsubpair_target==0 signals no balancing */
2508 *nsubpair_target = 0;
2509 *nsubpair_tot_est = 0;
2515 const int numAtomsCluster = grid.geometry().numAtomsICluster;
2516 const Grid::Dimensions& dims = grid.dimensions();
2518 ls[XX] = dims.cellSize[XX] / c_gpuNumClusterPerCellX;
2519 ls[YY] = dims.cellSize[YY] / c_gpuNumClusterPerCellY;
2520 ls[ZZ] = numAtomsCluster / (dims.atomDensity * ls[XX] * ls[YY]);
2522 /* The formulas below are a heuristic estimate of the average nsj per si*/
2523 r_eff_sup = rlist + nbnxn_get_rlist_effective_inc(numAtomsCluster, ls);
2525 if (!gridSet.domainSetup().haveMultipleDomains || gridSet.domainSetup().zones->n == 1)
2531 nsp_est_nl = gmx::square(dims.atomDensity / numAtomsCluster)
2532 * nonlocal_vol2(gridSet.domainSetup().zones, ls, r_eff_sup);
2535 if (iloc == InteractionLocality::Local)
2537 /* Sub-cell interacts with itself */
2538 vol_est = ls[XX] * ls[YY] * ls[ZZ];
2539 /* 6/2 rectangular volume on the faces */
2540 vol_est += (ls[XX] * ls[YY] + ls[XX] * ls[ZZ] + ls[YY] * ls[ZZ]) * r_eff_sup;
2541 /* 12/2 quarter pie slices on the edges */
2542 vol_est += 2 * (ls[XX] + ls[YY] + ls[ZZ]) * 0.25 * M_PI * gmx::square(r_eff_sup);
2543 /* 4 octants of a sphere */
2544 vol_est += 0.5 * 4.0 / 3.0 * M_PI * gmx::power3(r_eff_sup);
2546 /* Estimate the number of cluster pairs as the local number of
2547 * clusters times the volume they interact with times the density.
2549 nsp_est = grid.numClusters() * vol_est * dims.atomDensity / numAtomsCluster;
2551 /* Subtract the non-local pair count */
2552 nsp_est -= nsp_est_nl;
2554 /* For small cut-offs nsp_est will be an underesimate.
2555 * With DD nsp_est_nl is an overestimate so nsp_est can get negative.
2556 * So to avoid too small or negative nsp_est we set a minimum of
2557 * all cells interacting with all 3^3 direct neighbors (3^3-1)/2+1=14.
2558 * This might be a slight overestimate for small non-periodic groups of
2559 * atoms as will occur for a local domain with DD, but for small
2560 * groups of atoms we'll anyhow be limited by nsubpair_target_min,
2561 * so this overestimation will not matter.
2563 nsp_est = std::max(nsp_est, grid.numClusters() * 14._real);
2567 fprintf(debug, "nsp_est local %5.1f non-local %5.1f\n", nsp_est, nsp_est_nl);
2572 nsp_est = nsp_est_nl;
2575 /* Thus the (average) maximum j-list size should be as follows.
2576 * Since there is overhead, we shouldn't make the lists too small
2577 * (and we can't chop up j-groups) so we use a minimum target size of 36.
2579 *nsubpair_target = std::max(nsubpair_target_min, roundToInt(nsp_est / min_ci_balanced));
2580 *nsubpair_tot_est = nsp_est;
2584 fprintf(debug, "nbl nsp estimate %.1f, nsubpair_target %d\n", nsp_est, *nsubpair_target);
2588 /* Debug list print function */
2589 static void print_nblist_ci_cj(FILE* fp, const NbnxnPairlistCpu& nbl)
2591 for (const nbnxn_ci_t& ciEntry : nbl.ci)
2593 fprintf(fp, "ci %4d shift %2d ncj %3d\n", ciEntry.ci, ciEntry.shift,
2594 ciEntry.cj_ind_end - ciEntry.cj_ind_start);
2596 for (int j = ciEntry.cj_ind_start; j < ciEntry.cj_ind_end; j++)
2598 fprintf(fp, " cj %5d imask %x\n", nbl.cj[j].cj, nbl.cj[j].excl);
2603 /* Debug list print function */
2604 static void print_nblist_sci_cj(FILE* fp, const NbnxnPairlistGpu& nbl)
2606 for (const nbnxn_sci_t& sci : nbl.sci)
2608 fprintf(fp, "ci %4d shift %2d ncj4 %2d\n", sci.sci, sci.shift, sci.numJClusterGroups());
2611 for (int j4 = sci.cj4_ind_start; j4 < sci.cj4_ind_end; j4++)
2613 for (int j = 0; j < c_nbnxnGpuJgroupSize; j++)
2615 fprintf(fp, " sj %5d imask %x\n", nbl.cj4[j4].cj[j], nbl.cj4[j4].imei[0].imask);
2616 for (int si = 0; si < c_gpuNumClusterPerCell; si++)
2618 if (nbl.cj4[j4].imei[0].imask & (1U << (j * c_gpuNumClusterPerCell + si)))
2625 fprintf(fp, "ci %4d shift %2d ncj4 %2d ncp %3d\n", sci.sci, sci.shift,
2626 sci.numJClusterGroups(), ncp);
2630 /* Combine pair lists *nbl generated on multiple threads nblc */
2631 static void combine_nblists(gmx::ArrayRef<const NbnxnPairlistGpu> nbls, NbnxnPairlistGpu* nblc)
2633 int nsci = nblc->sci.size();
2634 int ncj4 = nblc->cj4.size();
2635 int nexcl = nblc->excl.size();
2636 for (auto& nbl : nbls)
2638 nsci += nbl.sci.size();
2639 ncj4 += nbl.cj4.size();
2640 nexcl += nbl.excl.size();
2643 /* Resize with the final, combined size, so we can fill in parallel */
2644 /* NOTE: For better performance we should use default initialization */
2645 nblc->sci.resize(nsci);
2646 nblc->cj4.resize(ncj4);
2647 nblc->excl.resize(nexcl);
2649 /* Each thread should copy its own data to the combined arrays,
2650 * as otherwise data will go back and forth between different caches.
2652 const int gmx_unused nthreads = gmx_omp_nthreads_get(emntPairsearch);
2654 #pragma omp parallel for num_threads(nthreads) schedule(static)
2655 for (gmx::index n = 0; n < nbls.ssize(); n++)
2659 /* Determine the offset in the combined data for our thread.
2660 * Note that the original sizes in nblc are lost.
2662 int sci_offset = nsci;
2663 int cj4_offset = ncj4;
2664 int excl_offset = nexcl;
2666 for (gmx::index i = n; i < nbls.ssize(); i++)
2668 sci_offset -= nbls[i].sci.size();
2669 cj4_offset -= nbls[i].cj4.size();
2670 excl_offset -= nbls[i].excl.size();
2673 const NbnxnPairlistGpu& nbli = nbls[n];
2675 for (size_t i = 0; i < nbli.sci.size(); i++)
2677 nblc->sci[sci_offset + i] = nbli.sci[i];
2678 nblc->sci[sci_offset + i].cj4_ind_start += cj4_offset;
2679 nblc->sci[sci_offset + i].cj4_ind_end += cj4_offset;
2682 for (size_t j4 = 0; j4 < nbli.cj4.size(); j4++)
2684 nblc->cj4[cj4_offset + j4] = nbli.cj4[j4];
2685 nblc->cj4[cj4_offset + j4].imei[0].excl_ind += excl_offset;
2686 nblc->cj4[cj4_offset + j4].imei[1].excl_ind += excl_offset;
2689 for (size_t j4 = 0; j4 < nbli.excl.size(); j4++)
2691 nblc->excl[excl_offset + j4] = nbli.excl[j4];
2694 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
2697 for (auto& nbl : nbls)
2699 nblc->nci_tot += nbl.nci_tot;
2703 static void balance_fep_lists(gmx::ArrayRef<std::unique_ptr<t_nblist>> fepLists,
2704 gmx::ArrayRef<PairsearchWork> work)
2706 const int numLists = fepLists.ssize();
2710 /* Nothing to balance */
2714 /* Count the total i-lists and pairs */
2717 for (const auto& list : fepLists)
2719 nri_tot += list->nri;
2720 nrj_tot += list->nrj;
2723 const int nrj_target = (nrj_tot + numLists - 1) / numLists;
2725 GMX_ASSERT(gmx_omp_nthreads_get(emntNonbonded) == numLists,
2726 "We should have as many work objects as FEP lists");
2728 #pragma omp parallel for schedule(static) num_threads(numLists)
2729 for (int th = 0; th < numLists; th++)
2733 t_nblist* nbl = work[th].nbl_fep.get();
2735 /* Note that here we allocate for the total size, instead of
2736 * a per-thread esimate (which is hard to obtain).
2738 if (nri_tot > nbl->maxnri)
2740 nbl->maxnri = over_alloc_large(nri_tot);
2741 reallocate_nblist(nbl);
2743 if (nri_tot > nbl->maxnri || nrj_tot > nbl->maxnrj)
2745 nbl->maxnrj = over_alloc_small(nrj_tot);
2746 srenew(nbl->jjnr, nbl->maxnrj);
2747 srenew(nbl->excl_fep, nbl->maxnrj);
2750 clear_pairlist_fep(nbl);
2752 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
2755 /* Loop over the source lists and assign and copy i-entries */
2757 t_nblist* nbld = work[th_dest].nbl_fep.get();
2758 for (int th = 0; th < numLists; th++)
2760 const t_nblist* nbls = fepLists[th].get();
2762 for (int i = 0; i < nbls->nri; i++)
2766 /* The number of pairs in this i-entry */
2767 nrj = nbls->jindex[i + 1] - nbls->jindex[i];
2769 /* Decide if list th_dest is too large and we should procede
2770 * to the next destination list.
2772 if (th_dest + 1 < numLists && nbld->nrj > 0
2773 && nbld->nrj + nrj - nrj_target > nrj_target - nbld->nrj)
2776 nbld = work[th_dest].nbl_fep.get();
2779 nbld->iinr[nbld->nri] = nbls->iinr[i];
2780 nbld->gid[nbld->nri] = nbls->gid[i];
2781 nbld->shift[nbld->nri] = nbls->shift[i];
2783 for (int j = nbls->jindex[i]; j < nbls->jindex[i + 1]; j++)
2785 nbld->jjnr[nbld->nrj] = nbls->jjnr[j];
2786 nbld->excl_fep[nbld->nrj] = nbls->excl_fep[j];
2790 nbld->jindex[nbld->nri] = nbld->nrj;
2794 /* Swap the list pointers */
2795 for (int th = 0; th < numLists; th++)
2797 fepLists[th].swap(work[th].nbl_fep);
2801 fprintf(debug, "nbl_fep[%d] nri %4d nrj %4d\n", th, fepLists[th]->nri, fepLists[th]->nrj);
2806 /* Returns the next ci to be processes by our thread */
2807 static gmx_bool next_ci(const Grid& grid, int nth, int ci_block, int* ci_x, int* ci_y, int* ci_b, int* ci)
2812 if (*ci_b == ci_block)
2814 /* Jump to the next block assigned to this task */
2815 *ci += (nth - 1) * ci_block;
2819 if (*ci >= grid.numCells())
2824 while (*ci >= grid.firstCellInColumn(*ci_x * grid.dimensions().numCells[YY] + *ci_y + 1))
2827 if (*ci_y == grid.dimensions().numCells[YY])
2837 /* Returns the distance^2 for which we put cell pairs in the list
2838 * without checking atom pair distances. This is usually < rlist^2.
2840 static float boundingbox_only_distance2(const Grid::Dimensions& iGridDims,
2841 const Grid::Dimensions& jGridDims,
2845 /* If the distance between two sub-cell bounding boxes is less
2846 * than this distance, do not check the distance between
2847 * all particle pairs in the sub-cell, since then it is likely
2848 * that the box pair has atom pairs within the cut-off.
2849 * We use the nblist cut-off minus 0.5 times the average x/y diagonal
2850 * spacing of the sub-cells. Around 40% of the checked pairs are pruned.
2851 * Using more than 0.5 gains at most 0.5%.
2852 * If forces are calculated more than twice, the performance gain
2853 * in the force calculation outweighs the cost of checking.
2854 * Note that with subcell lists, the atom-pair distance check
2855 * is only performed when only 1 out of 8 sub-cells in within range,
2856 * this is because the GPU is much faster than the cpu.
2861 bbx = 0.5 * (iGridDims.cellSize[XX] + jGridDims.cellSize[XX]);
2862 bby = 0.5 * (iGridDims.cellSize[YY] + jGridDims.cellSize[YY]);
2865 bbx /= c_gpuNumClusterPerCellX;
2866 bby /= c_gpuNumClusterPerCellY;
2869 rbb2 = std::max(0.0, rlist - 0.5 * std::sqrt(bbx * bbx + bby * bby));
2875 return (float)((1 + GMX_FLOAT_EPS) * rbb2);
2879 static int get_ci_block_size(const Grid& iGrid, const bool haveMultipleDomains, const int numLists)
2881 const int ci_block_enum = 5;
2882 const int ci_block_denom = 11;
2883 const int ci_block_min_atoms = 16;
2886 /* Here we decide how to distribute the blocks over the threads.
2887 * We use prime numbers to try to avoid that the grid size becomes
2888 * a multiple of the number of threads, which would lead to some
2889 * threads getting "inner" pairs and others getting boundary pairs,
2890 * which in turns will lead to load imbalance between threads.
2891 * Set the block size as 5/11/ntask times the average number of cells
2892 * in a y,z slab. This should ensure a quite uniform distribution
2893 * of the grid parts of the different thread along all three grid
2894 * zone boundaries with 3D domain decomposition. At the same time
2895 * the blocks will not become too small.
2897 GMX_ASSERT(iGrid.dimensions().numCells[XX] > 0, "Grid can't be empty");
2898 GMX_ASSERT(numLists > 0, "We need at least one list");
2899 ci_block = (iGrid.numCells() * ci_block_enum)
2900 / (ci_block_denom * iGrid.dimensions().numCells[XX] * numLists);
2902 const int numAtomsPerCell = iGrid.geometry().numAtomsPerCell;
2904 /* Ensure the blocks are not too small: avoids cache invalidation */
2905 if (ci_block * numAtomsPerCell < ci_block_min_atoms)
2907 ci_block = (ci_block_min_atoms + numAtomsPerCell - 1) / numAtomsPerCell;
2910 /* Without domain decomposition
2911 * or with less than 3 blocks per task, divide in nth blocks.
2913 if (!haveMultipleDomains || numLists * 3 * ci_block > iGrid.numCells())
2915 ci_block = (iGrid.numCells() + numLists - 1) / numLists;
2918 if (ci_block > 1 && (numLists - 1) * ci_block >= iGrid.numCells())
2920 /* Some threads have no work. Although reducing the block size
2921 * does not decrease the block count on the first few threads,
2922 * with GPUs better mixing of "upper" cells that have more empty
2923 * clusters results in a somewhat lower max load over all threads.
2924 * Without GPUs the regime of so few atoms per thread is less
2925 * performance relevant, but with 8-wide SIMD the same reasoning
2926 * applies, since the pair list uses 4 i-atom "sub-clusters".
2934 /* Returns the number of bits to right-shift a cluster index to obtain
2935 * the corresponding force buffer flag index.
2937 static int getBufferFlagShift(int numAtomsPerCluster)
2939 int bufferFlagShift = 0;
2940 while ((numAtomsPerCluster << bufferFlagShift) < NBNXN_BUFFERFLAG_SIZE)
2945 return bufferFlagShift;
2948 static bool pairlistIsSimple(const NbnxnPairlistCpu gmx_unused& pairlist)
2953 static bool pairlistIsSimple(const NbnxnPairlistGpu gmx_unused& pairlist)
2958 static void makeClusterListWrapper(NbnxnPairlistCpu* nbl,
2959 const Grid gmx_unused& iGrid,
2962 const int firstCell,
2964 const bool excludeSubDiagonal,
2965 const nbnxn_atomdata_t* nbat,
2968 const ClusterDistanceKernelType kernelType,
2969 int* numDistanceChecks)
2973 case ClusterDistanceKernelType::CpuPlainC:
2974 makeClusterListSimple(jGrid, nbl, ci, firstCell, lastCell, excludeSubDiagonal,
2975 nbat->x().data(), rlist2, rbb2, numDistanceChecks);
2977 #ifdef GMX_NBNXN_SIMD_4XN
2978 case ClusterDistanceKernelType::CpuSimd_4xM:
2979 makeClusterListSimd4xn(jGrid, nbl, ci, firstCell, lastCell, excludeSubDiagonal,
2980 nbat->x().data(), rlist2, rbb2, numDistanceChecks);
2983 #ifdef GMX_NBNXN_SIMD_2XNN
2984 case ClusterDistanceKernelType::CpuSimd_2xMM:
2985 makeClusterListSimd2xnn(jGrid, nbl, ci, firstCell, lastCell, excludeSubDiagonal,
2986 nbat->x().data(), rlist2, rbb2, numDistanceChecks);
2989 default: GMX_ASSERT(false, "Unhandled kernel type");
2993 static void makeClusterListWrapper(NbnxnPairlistGpu* nbl,
2994 const Grid& gmx_unused iGrid,
2997 const int firstCell,
2999 const bool excludeSubDiagonal,
3000 const nbnxn_atomdata_t* nbat,
3003 ClusterDistanceKernelType gmx_unused kernelType,
3004 int* numDistanceChecks)
3006 for (int cj = firstCell; cj <= lastCell; cj++)
3008 make_cluster_list_supersub(iGrid, jGrid, nbl, ci, cj, excludeSubDiagonal, nbat->xstride,
3009 nbat->x().data(), rlist2, rbb2, numDistanceChecks);
3013 static int getNumSimpleJClustersInList(const NbnxnPairlistCpu& nbl)
3015 return nbl.cj.size();
3018 static int getNumSimpleJClustersInList(const gmx_unused NbnxnPairlistGpu& nbl)
3023 static void incrementNumSimpleJClustersInList(NbnxnPairlistCpu* nbl, int ncj_old_j)
3025 nbl->ncjInUse += nbl->cj.size();
3026 nbl->ncjInUse -= ncj_old_j;
3029 static void incrementNumSimpleJClustersInList(NbnxnPairlistGpu gmx_unused* nbl, int gmx_unused ncj_old_j)
3033 static void checkListSizeConsistency(const NbnxnPairlistCpu& nbl, const bool haveFreeEnergy)
3035 GMX_RELEASE_ASSERT(static_cast<size_t>(nbl.ncjInUse) == nbl.cj.size() || haveFreeEnergy,
3036 "Without free-energy all cj pair-list entries should be in use. "
3037 "Note that subsequent code does not make use of the equality, "
3038 "this check is only here to catch bugs");
3041 static void checkListSizeConsistency(const NbnxnPairlistGpu gmx_unused& nbl, bool gmx_unused haveFreeEnergy)
3043 /* We currently can not check consistency here */
3046 /* Set the buffer flags for newly added entries in the list */
3047 static void setBufferFlags(const NbnxnPairlistCpu& nbl,
3048 const int ncj_old_j,
3049 const int gridj_flag_shift,
3050 gmx_bitmask_t* gridj_flag,
3053 if (gmx::ssize(nbl.cj) > ncj_old_j)
3055 int cbFirst = nbl.cj[ncj_old_j].cj >> gridj_flag_shift;
3056 int cbLast = nbl.cj.back().cj >> gridj_flag_shift;
3057 for (int cb = cbFirst; cb <= cbLast; cb++)
3059 bitmask_init_bit(&gridj_flag[cb], th);
3064 static void setBufferFlags(const NbnxnPairlistGpu gmx_unused& nbl,
3065 int gmx_unused ncj_old_j,
3066 int gmx_unused gridj_flag_shift,
3067 gmx_bitmask_t gmx_unused* gridj_flag,
3070 GMX_ASSERT(false, "This function should never be called");
3073 /* Generates the part of pair-list nbl assigned to our thread */
3074 template<typename T>
3075 static void nbnxn_make_pairlist_part(const Nbnxm::GridSet& gridSet,
3078 PairsearchWork* work,
3079 const nbnxn_atomdata_t* nbat,
3080 const ListOfLists<int>& exclusions,
3082 const PairlistType pairlistType,
3084 gmx_bool bFBufferFlag,
3087 float nsubpair_tot_est,
3097 int ci_b, ci, ci_x, ci_y, ci_xy;
3099 real bx0, bx1, by0, by1, bz0, bz1;
3101 real d2cx, d2z, d2z_cx, d2z_cy, d2zx, d2zxy, d2xy;
3102 int cxf, cxl, cyf, cyf_x, cyl;
3103 int numDistanceChecks;
3104 int gridi_flag_shift = 0, gridj_flag_shift = 0;
3105 gmx_bitmask_t* gridj_flag = nullptr;
3106 int ncj_old_i, ncj_old_j;
3108 if (jGrid.geometry().isSimple != pairlistIsSimple(*nbl)
3109 || iGrid.geometry().isSimple != pairlistIsSimple(*nbl))
3111 gmx_incons("Grid incompatible with pair-list");
3115 GMX_ASSERT(nbl->na_ci == jGrid.geometry().numAtomsICluster,
3116 "The cluster sizes in the list and grid should match");
3117 nbl->na_cj = JClusterSizePerListType[pairlistType];
3118 na_cj_2log = get_2log(nbl->na_cj);
3124 /* Determine conversion of clusters to flag blocks */
3125 gridi_flag_shift = getBufferFlagShift(nbl->na_ci);
3126 gridj_flag_shift = getBufferFlagShift(nbl->na_cj);
3128 gridj_flag = work->buffer_flags.flag;
3131 gridSet.getBox(box);
3133 const bool haveFep = gridSet.haveFep();
3135 const real rlist2 = nbl->rlist * nbl->rlist;
3137 // Select the cluster pair distance kernel type
3138 const ClusterDistanceKernelType kernelType = getClusterDistanceKernelType(pairlistType, *nbat);
3140 if (haveFep && !pairlistIsSimple(*nbl))
3142 /* Determine an atom-pair list cut-off distance for FEP atom pairs.
3143 * We should not simply use rlist, since then we would not have
3144 * the small, effective buffering of the NxN lists.
3145 * The buffer is on overestimate, but the resulting cost for pairs
3146 * beyond rlist is neglible compared to the FEP pairs within rlist.
3148 rl_fep2 = nbl->rlist + effective_buffer_1x1_vs_MxN(iGrid, jGrid);
3152 fprintf(debug, "nbl_fep atom-pair rlist %f\n", rl_fep2);
3154 rl_fep2 = rl_fep2 * rl_fep2;
3157 const Grid::Dimensions& iGridDims = iGrid.dimensions();
3158 const Grid::Dimensions& jGridDims = jGrid.dimensions();
3160 rbb2 = boundingbox_only_distance2(iGridDims, jGridDims, nbl->rlist, pairlistIsSimple(*nbl));
3164 fprintf(debug, "nbl bounding box only distance %f\n", std::sqrt(rbb2));
3167 const bool isIntraGridList = (&iGrid == &jGrid);
3169 /* Set the shift range */
3170 for (int d = 0; d < DIM; d++)
3172 /* Check if we need periodicity shifts.
3173 * Without PBC or with domain decomposition we don't need them.
3175 if (d >= ePBC2npbcdim(gridSet.domainSetup().ePBC)
3176 || gridSet.domainSetup().haveMultipleDomainsPerDim[d])
3182 const real listRangeCellToCell =
3183 listRangeForGridCellToGridCell(rlist, iGrid.dimensions(), jGrid.dimensions());
3184 if (d == XX && box[XX][XX] - fabs(box[YY][XX]) - fabs(box[ZZ][XX]) < listRangeCellToCell)
3194 const bool bSimple = pairlistIsSimple(*nbl);
3195 gmx::ArrayRef<const BoundingBox> bb_i;
3197 gmx::ArrayRef<const float> pbb_i;
3200 bb_i = iGrid.iBoundingBoxes();
3204 pbb_i = iGrid.packedBoundingBoxes();
3207 /* We use the normal bounding box format for both grid types */
3208 bb_i = iGrid.iBoundingBoxes();
3210 gmx::ArrayRef<const BoundingBox1D> bbcz_i = iGrid.zBoundingBoxes();
3211 gmx::ArrayRef<const int> flags_i = iGrid.clusterFlags();
3212 gmx::ArrayRef<const BoundingBox1D> bbcz_j = jGrid.zBoundingBoxes();
3213 int cell0_i = iGrid.cellOffset();
3217 fprintf(debug, "nbl nc_i %d col.av. %.1f ci_block %d\n", iGrid.numCells(),
3218 iGrid.numCells() / static_cast<double>(iGrid.numColumns()), ci_block);
3221 numDistanceChecks = 0;
3223 const real listRangeBBToJCell2 =
3224 gmx::square(listRangeForBoundingBoxToGridCell(rlist, jGrid.dimensions()));
3226 /* Initially ci_b and ci to 1 before where we want them to start,
3227 * as they will both be incremented in next_ci.
3230 ci = th * ci_block - 1;
3233 while (next_ci(iGrid, nth, ci_block, &ci_x, &ci_y, &ci_b, &ci))
3235 if (bSimple && flags_i[ci] == 0)
3239 ncj_old_i = getNumSimpleJClustersInList(*nbl);
3242 if (!isIntraGridList && shp[XX] == 0)
3246 bx1 = bb_i[ci].upper.x;
3250 bx1 = iGridDims.lowerCorner[XX] + (real(ci_x) + 1) * iGridDims.cellSize[XX];
3252 if (bx1 < jGridDims.lowerCorner[XX])
3254 d2cx = gmx::square(jGridDims.lowerCorner[XX] - bx1);
3256 if (d2cx >= listRangeBBToJCell2)
3263 ci_xy = ci_x * iGridDims.numCells[YY] + ci_y;
3265 /* Loop over shift vectors in three dimensions */
3266 for (int tz = -shp[ZZ]; tz <= shp[ZZ]; tz++)
3268 const real shz = real(tz) * box[ZZ][ZZ];
3270 bz0 = bbcz_i[ci].lower + shz;
3271 bz1 = bbcz_i[ci].upper + shz;
3279 d2z = gmx::square(bz1);
3283 d2z = gmx::square(bz0 - box[ZZ][ZZ]);
3286 d2z_cx = d2z + d2cx;
3288 if (d2z_cx >= rlist2)
3293 bz1_frac = bz1 / real(iGrid.numCellsInColumn(ci_xy));
3298 /* The check with bz1_frac close to or larger than 1 comes later */
3300 for (int ty = -shp[YY]; ty <= shp[YY]; ty++)
3302 const real shy = real(ty) * box[YY][YY] + real(tz) * box[ZZ][YY];
3306 by0 = bb_i[ci].lower.y + shy;
3307 by1 = bb_i[ci].upper.y + shy;
3311 by0 = iGridDims.lowerCorner[YY] + (real(ci_y)) * iGridDims.cellSize[YY] + shy;
3312 by1 = iGridDims.lowerCorner[YY] + (real(ci_y) + 1) * iGridDims.cellSize[YY] + shy;
3315 get_cell_range<YY>(by0, by1, jGridDims, d2z_cx, rlist, &cyf, &cyl);
3323 if (by1 < jGridDims.lowerCorner[YY])
3325 d2z_cy += gmx::square(jGridDims.lowerCorner[YY] - by1);
3327 else if (by0 > jGridDims.upperCorner[YY])
3329 d2z_cy += gmx::square(by0 - jGridDims.upperCorner[YY]);
3332 for (int tx = -shp[XX]; tx <= shp[XX]; tx++)
3334 const int shift = XYZ2IS(tx, ty, tz);
3336 const bool excludeSubDiagonal = (isIntraGridList && shift == CENTRAL);
3338 if (c_pbcShiftBackward && isIntraGridList && shift > CENTRAL)
3344 real(tx) * box[XX][XX] + real(ty) * box[YY][XX] + real(tz) * box[ZZ][XX];
3348 bx0 = bb_i[ci].lower.x + shx;
3349 bx1 = bb_i[ci].upper.x + shx;
3353 bx0 = iGridDims.lowerCorner[XX] + (real(ci_x)) * iGridDims.cellSize[XX] + shx;
3354 bx1 = iGridDims.lowerCorner[XX] + (real(ci_x) + 1) * iGridDims.cellSize[XX] + shx;
3357 get_cell_range<XX>(bx0, bx1, jGridDims, d2z_cy, rlist, &cxf, &cxl);
3364 addNewIEntry(nbl, cell0_i + ci, shift, flags_i[ci]);
3366 if ((!c_pbcShiftBackward || excludeSubDiagonal) && cxf < ci_x)
3368 /* Leave the pairs with i > j.
3369 * x is the major index, so skip half of it.
3374 set_icell_bb(iGrid, ci, shx, shy, shz, nbl->work.get());
3376 icell_set_x(cell0_i + ci, shx, shy, shz, nbat->xstride, nbat->x().data(),
3377 kernelType, nbl->work.get());
3379 for (int cx = cxf; cx <= cxl; cx++)
3381 const real cx_real = cx;
3383 if (jGridDims.lowerCorner[XX] + cx_real * jGridDims.cellSize[XX] > bx1)
3385 d2zx += gmx::square(jGridDims.lowerCorner[XX]
3386 + cx_real * jGridDims.cellSize[XX] - bx1);
3388 else if (jGridDims.lowerCorner[XX] + (cx_real + 1) * jGridDims.cellSize[XX] < bx0)
3390 d2zx += gmx::square(jGridDims.lowerCorner[XX]
3391 + (cx_real + 1) * jGridDims.cellSize[XX] - bx0);
3394 if (isIntraGridList && cx == 0 && (!c_pbcShiftBackward || shift == CENTRAL)
3397 /* Leave the pairs with i > j.
3398 * Skip half of y when i and j have the same x.
3407 for (int cy = cyf_x; cy <= cyl; cy++)
3409 const int columnStart =
3410 jGrid.firstCellInColumn(cx * jGridDims.numCells[YY] + cy);
3411 const int columnEnd =
3412 jGrid.firstCellInColumn(cx * jGridDims.numCells[YY] + cy + 1);
3414 const real cy_real = cy;
3416 if (jGridDims.lowerCorner[YY] + cy_real * jGridDims.cellSize[YY] > by1)
3418 d2zxy += gmx::square(jGridDims.lowerCorner[YY]
3419 + cy_real * jGridDims.cellSize[YY] - by1);
3421 else if (jGridDims.lowerCorner[YY] + (cy_real + 1) * jGridDims.cellSize[YY] < by0)
3423 d2zxy += gmx::square(jGridDims.lowerCorner[YY]
3424 + (cy_real + 1) * jGridDims.cellSize[YY] - by0);
3426 if (columnStart < columnEnd && d2zxy < listRangeBBToJCell2)
3428 /* To improve efficiency in the common case
3429 * of a homogeneous particle distribution,
3430 * we estimate the index of the middle cell
3431 * in range (midCell). We search down and up
3432 * starting from this index.
3434 * Note that the bbcz_j array contains bounds
3435 * for i-clusters, thus for clusters of 4 atoms.
3436 * For the common case where the j-cluster size
3437 * is 8, we could step with a stride of 2,
3438 * but we do not do this because it would
3439 * complicate this code even more.
3443 + static_cast<int>(bz1_frac
3444 * static_cast<real>(columnEnd - columnStart));
3445 if (midCell >= columnEnd)
3447 midCell = columnEnd - 1;
3452 /* Find the lowest cell that can possibly
3454 * Check if we hit the bottom of the grid,
3455 * if the j-cell is below the i-cell and if so,
3456 * if it is within range.
3458 int downTestCell = midCell;
3459 while (downTestCell >= columnStart
3460 && (bbcz_j[downTestCell].upper >= bz0
3461 || d2xy + gmx::square(bbcz_j[downTestCell].upper - bz0) < rlist2))
3465 int firstCell = downTestCell + 1;
3467 /* Find the highest cell that can possibly
3469 * Check if we hit the top of the grid,
3470 * if the j-cell is above the i-cell and if so,
3471 * if it is within range.
3473 int upTestCell = midCell + 1;
3474 while (upTestCell < columnEnd
3475 && (bbcz_j[upTestCell].lower <= bz1
3476 || d2xy + gmx::square(bbcz_j[upTestCell].lower - bz1) < rlist2))
3480 int lastCell = upTestCell - 1;
3482 #define NBNXN_REFCODE 0
3485 /* Simple reference code, for debugging,
3486 * overrides the more complex code above.
3488 firstCell = columnEnd;
3490 for (int k = columnStart; k < columnEnd; k++)
3492 if (d2xy + gmx::square(bbcz_j[k * NNBSBB_D + 1] - bz0) < rlist2
3497 if (d2xy + gmx::square(bbcz_j[k * NNBSBB_D] - bz1) < rlist2
3506 if (isIntraGridList)
3508 /* We want each atom/cell pair only once,
3509 * only use cj >= ci.
3511 if (!c_pbcShiftBackward || shift == CENTRAL)
3513 firstCell = std::max(firstCell, ci);
3517 if (firstCell <= lastCell)
3519 GMX_ASSERT(firstCell >= columnStart && lastCell < columnEnd,
3520 "The range should reside within the current grid "
3523 /* For f buffer flags with simple lists */
3524 ncj_old_j = getNumSimpleJClustersInList(*nbl);
3526 makeClusterListWrapper(nbl, iGrid, ci, jGrid, firstCell, lastCell,
3527 excludeSubDiagonal, nbat, rlist2, rbb2,
3528 kernelType, &numDistanceChecks);
3532 setBufferFlags(*nbl, ncj_old_j, gridj_flag_shift, gridj_flag, th);
3535 incrementNumSimpleJClustersInList(nbl, ncj_old_j);
3541 /* Set the exclusions for this ci list */
3542 setExclusionsForIEntry(gridSet, nbl, excludeSubDiagonal, na_cj_2log,
3543 *getOpenIEntry(nbl), exclusions);
3547 make_fep_list(gridSet.atomIndices(), nbat, nbl, excludeSubDiagonal,
3548 getOpenIEntry(nbl), shx, shy, shz, rl_fep2, iGrid, jGrid, nbl_fep);
3551 /* Close this ci list */
3552 closeIEntry(nbl, nsubpair_max, progBal, nsubpair_tot_est, th, nth);
3557 if (bFBufferFlag && getNumSimpleJClustersInList(*nbl) > ncj_old_i)
3559 bitmask_init_bit(&(work->buffer_flags.flag[(iGrid.cellOffset() + ci) >> gridi_flag_shift]), th);
3563 work->ndistc = numDistanceChecks;
3565 checkListSizeConsistency(*nbl, haveFep);
3569 fprintf(debug, "number of distance checks %d\n", numDistanceChecks);
3571 print_nblist_statistics(debug, *nbl, gridSet, rlist);
3575 fprintf(debug, "nbl FEP list pairs: %d\n", nbl_fep->nrj);
3580 static void reduce_buffer_flags(gmx::ArrayRef<PairsearchWork> searchWork,
3582 const nbnxn_buffer_flags_t* dest)
3584 for (int s = 0; s < nsrc; s++)
3586 gmx_bitmask_t* flag = searchWork[s].buffer_flags.flag;
3588 for (int b = 0; b < dest->nflag; b++)
3590 bitmask_union(&(dest->flag[b]), flag[b]);
3595 static void print_reduction_cost(const nbnxn_buffer_flags_t* flags, int nout)
3597 int nelem, nkeep, ncopy, nred, out;
3598 gmx_bitmask_t mask_0;
3604 bitmask_init_bit(&mask_0, 0);
3605 for (int b = 0; b < flags->nflag; b++)
3607 if (bitmask_is_equal(flags->flag[b], mask_0))
3609 /* Only flag 0 is set, no copy of reduction required */
3613 else if (!bitmask_is_zero(flags->flag[b]))
3616 for (out = 0; out < nout; out++)
3618 if (bitmask_is_set(flags->flag[b], out))
3636 "nbnxn reduction: #flag %d #list %d elem %4.2f, keep %4.2f copy %4.2f red %4.2f\n",
3637 flags->nflag, nout, nelem / static_cast<double>(flags->nflag),
3638 nkeep / static_cast<double>(flags->nflag), ncopy / static_cast<double>(flags->nflag),
3639 nred / static_cast<double>(flags->nflag));
3642 /* Copies the list entries from src to dest when cjStart <= *cjGlobal < cjEnd.
3643 * *cjGlobal is updated with the cj count in src.
3644 * When setFlags==true, flag bit t is set in flag for all i and j clusters.
3646 template<bool setFlags>
3647 static void copySelectedListRange(const nbnxn_ci_t* gmx_restrict srcCi,
3648 const NbnxnPairlistCpu* gmx_restrict src,
3649 NbnxnPairlistCpu* gmx_restrict dest,
3650 gmx_bitmask_t* flag,
3655 const int ncj = srcCi->cj_ind_end - srcCi->cj_ind_start;
3657 dest->ci.push_back(*srcCi);
3658 dest->ci.back().cj_ind_start = dest->cj.size();
3659 dest->ci.back().cj_ind_end = dest->ci.back().cj_ind_start + ncj;
3663 bitmask_init_bit(&flag[srcCi->ci >> iFlagShift], t);
3666 for (int j = srcCi->cj_ind_start; j < srcCi->cj_ind_end; j++)
3668 dest->cj.push_back(src->cj[j]);
3672 /* NOTE: This is relatively expensive, since this
3673 * operation is done for all elements in the list,
3674 * whereas at list generation this is done only
3675 * once for each flag entry.
3677 bitmask_init_bit(&flag[src->cj[j].cj >> jFlagShift], t);
3682 #if defined(__GNUC__) && !defined(__clang__) && !defined(__ICC) && __GNUC__ == 7
3683 /* Avoid gcc 7 avx512 loop vectorization bug (actually only needed with -mavx512f) */
3684 # pragma GCC push_options
3685 # pragma GCC optimize("no-tree-vectorize")
3688 /* Returns the number of cluster pairs that are in use summed over all lists */
3689 static int countClusterpairs(gmx::ArrayRef<const NbnxnPairlistCpu> pairlists)
3691 /* gcc 7 with -mavx512f can miss the contributions of 16 consecutive
3692 * elements to the sum calculated in this loop. Above we have disabled
3693 * loop vectorization to avoid this bug.
3696 for (const auto& pairlist : pairlists)
3698 ncjTotal += pairlist.ncjInUse;
3703 #if defined(__GNUC__) && !defined(__clang__) && !defined(__ICC) && __GNUC__ == 7
3704 # pragma GCC pop_options
3707 /* This routine re-balances the pairlists such that all are nearly equally
3708 * sized. Only whole i-entries are moved between lists. These are moved
3709 * between the ends of the lists, such that the buffer reduction cost should
3710 * not change significantly.
3711 * Note that all original reduction flags are currently kept. This can lead
3712 * to reduction of parts of the force buffer that could be avoided. But since
3713 * the original lists are quite balanced, this will only give minor overhead.
3715 static void rebalanceSimpleLists(gmx::ArrayRef<const NbnxnPairlistCpu> srcSet,
3716 gmx::ArrayRef<NbnxnPairlistCpu> destSet,
3717 gmx::ArrayRef<PairsearchWork> searchWork)
3719 const int ncjTotal = countClusterpairs(srcSet);
3720 const int numLists = srcSet.ssize();
3721 const int ncjTarget = (ncjTotal + numLists - 1) / numLists;
3723 #pragma omp parallel num_threads(numLists)
3725 int t = gmx_omp_get_thread_num();
3727 int cjStart = ncjTarget * t;
3728 int cjEnd = ncjTarget * (t + 1);
3730 /* The destination pair-list for task/thread t */
3731 NbnxnPairlistCpu& dest = destSet[t];
3733 clear_pairlist(&dest);
3734 dest.na_cj = srcSet[0].na_cj;
3736 /* Note that the flags in the work struct (still) contain flags
3737 * for all entries that are present in srcSet->nbl[t].
3739 gmx_bitmask_t* flag = searchWork[t].buffer_flags.flag;
3741 int iFlagShift = getBufferFlagShift(dest.na_ci);
3742 int jFlagShift = getBufferFlagShift(dest.na_cj);
3745 for (int s = 0; s < numLists && cjGlobal < cjEnd; s++)
3747 const NbnxnPairlistCpu* src = &srcSet[s];
3749 if (cjGlobal + src->ncjInUse > cjStart)
3751 for (gmx::index i = 0; i < gmx::ssize(src->ci) && cjGlobal < cjEnd; i++)
3753 const nbnxn_ci_t* srcCi = &src->ci[i];
3754 int ncj = srcCi->cj_ind_end - srcCi->cj_ind_start;
3755 if (cjGlobal >= cjStart)
3757 /* If the source list is not our own, we need to set
3758 * extra flags (the template bool parameter).
3762 copySelectedListRange<true>(srcCi, src, &dest, flag, iFlagShift, jFlagShift, t);
3766 copySelectedListRange<false>(srcCi, src, &dest, flag, iFlagShift,
3775 cjGlobal += src->ncjInUse;
3779 dest.ncjInUse = dest.cj.size();
3783 const int ncjTotalNew = countClusterpairs(destSet);
3784 GMX_RELEASE_ASSERT(ncjTotalNew == ncjTotal,
3785 "The total size of the lists before and after rebalancing should match");
3789 /* Returns if the pairlists are so imbalanced that it is worth rebalancing. */
3790 static bool checkRebalanceSimpleLists(gmx::ArrayRef<const NbnxnPairlistCpu> lists)
3792 int numLists = lists.ssize();
3795 for (int s = 0; s < numLists; s++)
3797 ncjMax = std::max(ncjMax, lists[s].ncjInUse);
3798 ncjTotal += lists[s].ncjInUse;
3802 fprintf(debug, "Pair-list ncjMax %d ncjTotal %d\n", ncjMax, ncjTotal);
3804 /* The rebalancing adds 3% extra time to the search. Heuristically we
3805 * determined that under common conditions the non-bonded kernel balance
3806 * improvement will outweigh this when the imbalance is more than 3%.
3807 * But this will, obviously, depend on search vs kernel time and nstlist.
3809 const real rebalanceTolerance = 1.03;
3811 return real(numLists * ncjMax) > real(ncjTotal) * rebalanceTolerance;
3814 /* Perform a count (linear) sort to sort the smaller lists to the end.
3815 * This avoids load imbalance on the GPU, as large lists will be
3816 * scheduled and executed first and the smaller lists later.
3817 * Load balancing between multi-processors only happens at the end
3818 * and there smaller lists lead to more effective load balancing.
3819 * The sorting is done on the cj4 count, not on the actual pair counts.
3820 * Not only does this make the sort faster, but it also results in
3821 * better load balancing than using a list sorted on exact load.
3822 * This function swaps the pointer in the pair list to avoid a copy operation.
3824 static void sort_sci(NbnxnPairlistGpu* nbl)
3826 if (nbl->cj4.size() <= nbl->sci.size())
3828 /* nsci = 0 or all sci have size 1, sorting won't change the order */
3832 NbnxnPairlistGpuWork& work = *nbl->work;
3834 /* We will distinguish differences up to double the average */
3835 const int m = static_cast<int>((2 * ssize(nbl->cj4)) / ssize(nbl->sci));
3837 /* Resize work.sci_sort so we can sort into it */
3838 work.sci_sort.resize(nbl->sci.size());
3840 std::vector<int>& sort = work.sortBuffer;
3841 /* Set up m + 1 entries in sort, initialized at 0 */
3843 sort.resize(m + 1, 0);
3844 /* Count the entries of each size */
3845 for (const nbnxn_sci_t& sci : nbl->sci)
3847 int i = std::min(m, sci.numJClusterGroups());
3850 /* Calculate the offset for each count */
3853 for (gmx::index i = m - 1; i >= 0; i--)
3856 sort[i] = sort[i + 1] + s0;
3860 /* Sort entries directly into place */
3861 gmx::ArrayRef<nbnxn_sci_t> sci_sort = work.sci_sort;
3862 for (const nbnxn_sci_t& sci : nbl->sci)
3864 int i = std::min(m, sci.numJClusterGroups());
3865 sci_sort[sort[i]++] = sci;
3868 /* Swap the sci pointers so we use the new, sorted list */
3869 std::swap(nbl->sci, work.sci_sort);
3872 /* Returns the i-zone range for pairlist construction for the give locality */
3873 static Range<int> getIZoneRange(const Nbnxm::GridSet::DomainSetup& domainSetup,
3874 const InteractionLocality locality)
3876 if (domainSetup.doTestParticleInsertion)
3878 /* With TPI we do grid 1, the inserted molecule, versus grid 0, the rest */
3881 else if (locality == InteractionLocality::Local)
3883 /* Local: only zone (grid) 0 vs 0 */
3888 /* Non-local: we need all i-zones */
3889 return { 0, int(domainSetup.zones->iZones.size()) };
3893 /* Returns the j-zone range for pairlist construction for the give locality and i-zone */
3894 static Range<int> getJZoneRange(const gmx_domdec_zones_t& ddZones,
3895 const InteractionLocality locality,
3898 if (locality == InteractionLocality::Local)
3900 /* Local: zone 0 vs 0 or with TPI 1 vs 0 */
3903 else if (iZone == 0)
3905 /* Non-local: we need to avoid the local (zone 0 vs 0) interactions */
3906 return { 1, *ddZones.iZones[iZone].jZoneRange.end() };
3910 /* Non-local with non-local i-zone: use all j-zones */
3911 return ddZones.iZones[iZone].jZoneRange;
3915 //! Prepares CPU lists produced by the search for dynamic pruning
3916 static void prepareListsForDynamicPruning(gmx::ArrayRef<NbnxnPairlistCpu> lists);
3918 void PairlistSet::constructPairlists(const Nbnxm::GridSet& gridSet,
3919 gmx::ArrayRef<PairsearchWork> searchWork,
3920 nbnxn_atomdata_t* nbat,
3921 const ListOfLists<int>& exclusions,
3922 const int minimumIlistCountForGpuBalancing,
3924 SearchCycleCounting* searchCycleCounting)
3926 const real rlist = params_.rlistOuter;
3928 int nsubpair_target;
3929 float nsubpair_tot_est;
3932 int np_tot, np_noq, np_hlj, nap;
3934 const int numLists = (isCpuType_ ? cpuLists_.size() : gpuLists_.size());
3938 fprintf(debug, "ns making %d nblists\n", numLists);
3941 nbat->bUseBufferFlags = (nbat->out.size() > 1);
3942 /* We should re-init the flags before making the first list */
3943 if (nbat->bUseBufferFlags && locality_ == InteractionLocality::Local)
3945 init_buffer_flags(&nbat->buffer_flags, nbat->numAtoms());
3948 if (!isCpuType_ && minimumIlistCountForGpuBalancing > 0)
3950 get_nsubpair_target(gridSet, locality_, rlist, minimumIlistCountForGpuBalancing,
3951 &nsubpair_target, &nsubpair_tot_est);
3955 nsubpair_target = 0;
3956 nsubpair_tot_est = 0;
3959 /* Clear all pair-lists */
3960 for (int th = 0; th < numLists; th++)
3964 clear_pairlist(&cpuLists_[th]);
3968 clear_pairlist(&gpuLists_[th]);
3971 if (params_.haveFep)
3973 clear_pairlist_fep(fepLists_[th].get());
3977 const gmx_domdec_zones_t& ddZones = *gridSet.domainSetup().zones;
3979 const auto iZoneRange = getIZoneRange(gridSet.domainSetup(), locality_);
3981 for (const int iZone : iZoneRange)
3983 const Grid& iGrid = gridSet.grids()[iZone];
3985 const auto jZoneRange = getJZoneRange(ddZones, locality_, iZone);
3987 for (int jZone : jZoneRange)
3989 const Grid& jGrid = gridSet.grids()[jZone];
3993 fprintf(debug, "ns search grid %d vs %d\n", iZone, jZone);
3996 searchCycleCounting->start(enbsCCsearch);
3998 ci_block = get_ci_block_size(iGrid, gridSet.domainSetup().haveMultipleDomains, numLists);
4000 /* With GPU: generate progressively smaller lists for
4001 * load balancing for local only or non-local with 2 zones.
4003 progBal = (locality_ == InteractionLocality::Local || ddZones.n <= 2);
4005 #pragma omp parallel for num_threads(numLists) schedule(static)
4006 for (int th = 0; th < numLists; th++)
4010 /* Re-init the thread-local work flag data before making
4011 * the first list (not an elegant conditional).
4013 if (nbat->bUseBufferFlags && (iZone == 0 && jZone == 0))
4015 init_buffer_flags(&searchWork[th].buffer_flags, nbat->numAtoms());
4018 if (combineLists_ && th > 0)
4020 GMX_ASSERT(!isCpuType_, "Can only combine GPU lists");
4022 clear_pairlist(&gpuLists_[th]);
4025 PairsearchWork& work = searchWork[th];
4027 work.cycleCounter.start();
4029 t_nblist* fepListPtr = (fepLists_.empty() ? nullptr : fepLists_[th].get());
4031 /* Divide the i cells equally over the pairlists */
4034 nbnxn_make_pairlist_part(gridSet, iGrid, jGrid, &work, nbat, exclusions, rlist,
4035 params_.pairlistType, ci_block, nbat->bUseBufferFlags,
4036 nsubpair_target, progBal, nsubpair_tot_est, th,
4037 numLists, &cpuLists_[th], fepListPtr);
4041 nbnxn_make_pairlist_part(gridSet, iGrid, jGrid, &work, nbat, exclusions, rlist,
4042 params_.pairlistType, ci_block, nbat->bUseBufferFlags,
4043 nsubpair_target, progBal, nsubpair_tot_est, th,
4044 numLists, &gpuLists_[th], fepListPtr);
4047 work.cycleCounter.stop();
4049 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
4051 searchCycleCounting->stop(enbsCCsearch);
4056 for (int th = 0; th < numLists; th++)
4058 inc_nrnb(nrnb, eNR_NBNXN_DIST2, searchWork[th].ndistc);
4062 const NbnxnPairlistCpu& nbl = cpuLists_[th];
4063 np_tot += nbl.cj.size();
4064 np_noq += nbl.work->ncj_noq;
4065 np_hlj += nbl.work->ncj_hlj;
4069 const NbnxnPairlistGpu& nbl = gpuLists_[th];
4070 /* This count ignores potential subsequent pair pruning */
4071 np_tot += nbl.nci_tot;
4076 nap = cpuLists_[0].na_ci * cpuLists_[0].na_cj;
4080 nap = gmx::square(gpuLists_[0].na_ci);
4082 natpair_ljq_ = (np_tot - np_noq) * nap - np_hlj * nap / 2;
4083 natpair_lj_ = np_noq * nap;
4084 natpair_q_ = np_hlj * nap / 2;
4086 if (combineLists_ && numLists > 1)
4088 GMX_ASSERT(!isCpuType_, "Can only combine GPU lists");
4090 searchCycleCounting->start(enbsCCcombine);
4092 combine_nblists(gmx::constArrayRefFromArray(&gpuLists_[1], numLists - 1), &gpuLists_[0]);
4094 searchCycleCounting->stop(enbsCCcombine);
4101 if (numLists > 1 && checkRebalanceSimpleLists(cpuLists_))
4103 rebalanceSimpleLists(cpuLists_, cpuListsWork_, searchWork);
4105 /* Swap the sets of pair lists */
4106 cpuLists_.swap(cpuListsWork_);
4111 /* Sort the entries on size, large ones first */
4112 if (combineLists_ || gpuLists_.size() == 1)
4114 sort_sci(&gpuLists_[0]);
4118 #pragma omp parallel for num_threads(numLists) schedule(static)
4119 for (int th = 0; th < numLists; th++)
4123 sort_sci(&gpuLists_[th]);
4125 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
4130 if (nbat->bUseBufferFlags)
4132 reduce_buffer_flags(searchWork, numLists, &nbat->buffer_flags);
4135 if (gridSet.haveFep())
4137 /* Balance the free-energy lists over all the threads */
4138 balance_fep_lists(fepLists_, searchWork);
4143 /* This is a fresh list, so not pruned, stored using ci.
4144 * ciOuter is invalid at this point.
4146 GMX_ASSERT(cpuLists_[0].ciOuter.empty(), "ciOuter is invalid so it should be empty");
4149 /* If we have more than one list, they either got rebalancing (CPU)
4150 * or combined (GPU), so we should dump the final result to debug.
4154 if (isCpuType_ && cpuLists_.size() > 1)
4156 for (auto& cpuList : cpuLists_)
4158 print_nblist_statistics(debug, cpuList, gridSet, rlist);
4161 else if (!isCpuType_ && gpuLists_.size() > 1)
4163 print_nblist_statistics(debug, gpuLists_[0], gridSet, rlist);
4173 for (auto& cpuList : cpuLists_)
4175 print_nblist_ci_cj(debug, cpuList);
4180 print_nblist_sci_cj(debug, gpuLists_[0]);
4184 if (nbat->bUseBufferFlags)
4186 print_reduction_cost(&nbat->buffer_flags, numLists);
4190 if (params_.useDynamicPruning && isCpuType_)
4192 prepareListsForDynamicPruning(cpuLists_);
4196 void PairlistSets::construct(const InteractionLocality iLocality,
4197 PairSearch* pairSearch,
4198 nbnxn_atomdata_t* nbat,
4199 const ListOfLists<int>& exclusions,
4203 pairlistSet(iLocality).constructPairlists(pairSearch->gridSet(), pairSearch->work(), nbat,
4204 exclusions, minimumIlistCountForGpuBalancing_, nrnb,
4205 &pairSearch->cycleCounting_);
4207 if (iLocality == InteractionLocality::Local)
4209 outerListCreationStep_ = step;
4213 GMX_RELEASE_ASSERT(outerListCreationStep_ == step,
4214 "Outer list should be created at the same step as the inner list");
4217 /* Special performance logging stuff (env.var. GMX_NBNXN_CYCLE) */
4218 if (iLocality == InteractionLocality::Local)
4220 pairSearch->cycleCounting_.searchCount_++;
4222 if (pairSearch->cycleCounting_.recordCycles_
4223 && (!pairSearch->gridSet().domainSetup().haveMultipleDomains || iLocality == InteractionLocality::NonLocal)
4224 && pairSearch->cycleCounting_.searchCount_ % 100 == 0)
4226 pairSearch->cycleCounting_.printCycles(stderr, pairSearch->work());
4230 void nonbonded_verlet_t::constructPairlist(const InteractionLocality iLocality,
4231 const ListOfLists<int>& exclusions,
4235 pairlistSets_->construct(iLocality, pairSearch_.get(), nbat.get(), exclusions, step, nrnb);
4239 /* Launch the transfer of the pairlist to the GPU.
4241 * NOTE: The launch overhead is currently not timed separately
4243 Nbnxm::gpu_init_pairlist(gpu_nbv, pairlistSets().pairlistSet(iLocality).gpuList(), iLocality);
4247 static void prepareListsForDynamicPruning(gmx::ArrayRef<NbnxnPairlistCpu> lists)
4249 /* TODO: Restructure the lists so we have actual outer and inner
4250 * list objects so we can set a single pointer instead of
4251 * swapping several pointers.
4254 for (auto& list : lists)
4256 /* The search produced a list in ci/cj.
4257 * Swap the list pointers so we get the outer list is ciOuter,cjOuter
4258 * and we can prune that to get an inner list in ci/cj.
4260 GMX_RELEASE_ASSERT(list.ciOuter.empty() && list.cjOuter.empty(),
4261 "The outer lists should be empty before preparation");
4263 std::swap(list.ci, list.ciOuter);
4264 std::swap(list.cj, list.cjOuter);