2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
40 * Implements the Grid class.
42 * \author Berk Hess <hess@kth.se>
43 * \ingroup module_nbnxm
55 #include "gromacs/math/utilities.h"
56 #include "gromacs/math/vec.h"
57 #include "gromacs/mdlib/gmx_omp_nthreads.h"
58 #include "gromacs/mdlib/updategroupscog.h"
59 #include "gromacs/mdtypes/forcerec.h" // only for GET_CGINFO_*
60 #include "gromacs/nbnxm/atomdata.h"
61 #include "gromacs/simd/simd.h"
62 #include "gromacs/simd/vector_operations.h"
64 #include "boundingboxes.h"
65 #include "gridsetdata.h"
66 #include "nbnxm_geometry.h"
67 #include "pairlistparams.h"
72 Grid::Geometry::Geometry(const PairlistType pairlistType) :
73 isSimple(pairlistType != PairlistType::HierarchicalNxN),
74 numAtomsICluster(IClusterSizePerListType[pairlistType]),
75 numAtomsJCluster(JClusterSizePerListType[pairlistType]),
76 numAtomsPerCell((isSimple ? 1 : c_gpuNumClusterPerCell) * numAtomsICluster),
77 numAtomsICluster2Log(get_2log(numAtomsICluster))
81 Grid::Grid(const PairlistType pairlistType, const bool& haveFep) :
82 geometry_(pairlistType),
87 /*! \brief Returns the atom density (> 0) of a rectangular grid */
88 static real gridAtomDensity(int numAtoms, const rvec lowerCorner, const rvec upperCorner)
94 /* To avoid zero density we use a minimum of 1 atom */
98 rvec_sub(upperCorner, lowerCorner, size);
100 return static_cast<real>(numAtoms) / (size[XX] * size[YY] * size[ZZ]);
103 // Get approximate dimensions of each cell. Returns the length along X and Y.
104 static std::array<real, DIM - 1> getTargetCellLength(const Grid::Geometry& geometry, const real atomDensity)
106 if (geometry.isSimple)
108 /* To minimize the zero interactions, we should make
109 * the largest of the i/j cell cubic.
111 int numAtomsInCell = std::max(geometry.numAtomsICluster, geometry.numAtomsJCluster);
113 /* Approximately cubic cells */
114 real tlen = std::cbrt(numAtomsInCell / atomDensity);
115 return { tlen, tlen };
119 /* Approximately cubic sub cells */
120 real tlen = std::cbrt(geometry.numAtomsICluster / atomDensity);
121 return { tlen * c_gpuNumClusterPerCellX, tlen * c_gpuNumClusterPerCellY };
125 static int getMaxNumCells(const Grid::Geometry& geometry, const int numAtoms, const int numColumns)
127 if (geometry.numAtomsJCluster <= geometry.numAtomsICluster)
129 return numAtoms / geometry.numAtomsPerCell + numColumns;
133 return numAtoms / geometry.numAtomsPerCell
134 + numColumns * geometry.numAtomsJCluster / geometry.numAtomsICluster;
138 void Grid::setDimensions(const int ddZone,
140 gmx::RVec lowerCorner,
141 gmx::RVec upperCorner,
143 const real maxAtomGroupRadius,
145 gmx::PinningPolicy pinningPolicy)
147 /* We allow passing lowerCorner=upperCorner, in which case we need to
148 * create a finite sized bounding box to avoid division by zero.
149 * We use a minimum size such that the volume fits in float with some
150 * margin for computing and using the atom number density.
152 constexpr real c_minimumGridSize = 1e-10;
153 for (int d = 0; d < DIM; d++)
155 GMX_ASSERT(upperCorner[d] >= lowerCorner[d],
156 "Upper corner should be larger than the lower corner");
157 if (upperCorner[d] - lowerCorner[d] < c_minimumGridSize)
159 /* Ensure we apply a correction to the bounding box */
161 std::max(std::abs(lowerCorner[d]) * GMX_REAL_EPS, 0.5_real * c_minimumGridSize);
162 lowerCorner[d] -= correction;
163 upperCorner[d] += correction;
167 /* For the home zone we compute the density when not set (=-1) or when =0 */
168 if (ddZone == 0 && atomDensity <= 0)
170 atomDensity = gridAtomDensity(numAtoms, lowerCorner, upperCorner);
173 dimensions_.atomDensity = atomDensity;
174 dimensions_.maxAtomGroupRadius = maxAtomGroupRadius;
177 rvec_sub(upperCorner, lowerCorner, size);
179 if (numAtoms > geometry_.numAtomsPerCell)
181 GMX_ASSERT(atomDensity > 0, "With one or more atoms, the density should be positive");
183 /* target cell length */
184 const std::array<real, DIM - 1> tlen = getTargetCellLength(geometry_, atomDensity);
186 /* We round ncx and ncy down, because we get less cell pairs
187 * in the pairlist when the fixed cell dimensions (x,y) are
188 * larger than the variable one (z) than the other way around.
190 dimensions_.numCells[XX] = std::max(1, static_cast<int>(size[XX] / tlen[XX]));
191 dimensions_.numCells[YY] = std::max(1, static_cast<int>(size[YY] / tlen[YY]));
195 dimensions_.numCells[XX] = 1;
196 dimensions_.numCells[YY] = 1;
199 for (int d = 0; d < DIM - 1; d++)
201 dimensions_.cellSize[d] = size[d] / dimensions_.numCells[d];
202 dimensions_.invCellSize[d] = 1 / dimensions_.cellSize[d];
207 /* This is a non-home zone, add an extra row of cells
208 * for particles communicated for bonded interactions.
209 * These can be beyond the cut-off. It doesn't matter where
210 * they end up on the grid, but for performance it's better
211 * if they don't end up in cells that can be within cut-off range.
213 dimensions_.numCells[XX]++;
214 dimensions_.numCells[YY]++;
217 /* We need one additional cell entry for particles moved by DD */
218 cxy_na_.resize(numColumns() + 1);
219 cxy_ind_.resize(numColumns() + 2);
220 changePinningPolicy(&cxy_na_, pinningPolicy);
221 changePinningPolicy(&cxy_ind_, pinningPolicy);
223 /* Worst case scenario of 1 atom in each last cell */
224 const int maxNumCells = getMaxNumCells(geometry_, numAtoms, numColumns());
226 if (!geometry_.isSimple)
228 numClusters_.resize(maxNumCells);
230 bbcz_.resize(maxNumCells);
232 /* This resize also zeros the contents, this avoid possible
233 * floating exceptions in SIMD with the unused bb elements.
235 if (geometry_.isSimple)
237 bb_.resize(maxNumCells);
242 pbb_.resize(packedBoundingBoxesIndex(maxNumCells * c_gpuNumClusterPerCell));
244 bb_.resize(maxNumCells * c_gpuNumClusterPerCell);
248 if (geometry_.numAtomsJCluster == geometry_.numAtomsICluster)
254 GMX_ASSERT(geometry_.isSimple, "Only CPU lists should have different i/j cluster sizes");
256 bbjStorage_.resize(maxNumCells * geometry_.numAtomsICluster / geometry_.numAtomsJCluster);
260 flags_.resize(maxNumCells);
263 fep_.resize(maxNumCells * geometry_.numAtomsPerCell / geometry_.numAtomsICluster);
266 copy_rvec(lowerCorner, dimensions_.lowerCorner);
267 copy_rvec(upperCorner, dimensions_.upperCorner);
268 copy_rvec(size, dimensions_.gridSize);
271 /* We need to sort paricles in grid columns on z-coordinate.
272 * As particle are very often distributed homogeneously, we use a sorting
273 * algorithm similar to pigeonhole sort. We multiply the z-coordinate
274 * by a factor, cast to an int and try to store in that hole. If the hole
275 * is full, we move this or another particle. A second pass is needed to make
276 * contiguous elements. SORT_GRID_OVERSIZE is the ratio of holes to particles.
277 * 4 is the optimal value for homogeneous particle distribution and allows
278 * for an O(#particles) sort up till distributions were all particles are
279 * concentrated in 1/4 of the space. No NlogN fallback is implemented,
280 * as it can be expensive to detect imhomogeneous particle distributions.
282 /*! \brief Ratio of grid cells to atoms */
283 static constexpr int c_sortGridRatio = 4;
284 /*! \brief Maximum ratio of holes used, in the worst case all particles
285 * end up in the last hole and we need num. atoms extra holes at the end.
287 static constexpr int c_sortGridMaxSizeFactor = c_sortGridRatio + 1;
289 /*! \brief Sorts particle index a on coordinates x along dim.
291 * Backwards tells if we want decreasing iso increasing coordinates.
292 * h0 is the minimum of the coordinate range.
293 * invh is the 1/length of the sorting range.
294 * n_per_h (>=n) is the expected average number of particles per 1/invh
295 * sort is the sorting work array.
296 * sort should have a size of at least n_per_h*c_sortGridRatio + n,
297 * or easier, allocate at least n*c_sortGridMaxSizeFactor elements.
299 static void sort_atoms(int dim,
301 int gmx_unused dd_zone,
302 bool gmx_unused relevantAtomsAreWithinGridBounds,
305 gmx::ArrayRef<const gmx::RVec> x,
309 gmx::ArrayRef<int> sort)
317 GMX_ASSERT(n <= n_per_h, "We require n <= n_per_h");
319 /* Transform the inverse range height into the inverse hole height */
320 invh *= n_per_h * c_sortGridRatio;
322 /* Set nsort to the maximum possible number of holes used.
323 * In worst case all n elements end up in the last bin.
325 int nsort = n_per_h * c_sortGridRatio + n;
327 /* Determine the index range used, so we can limit it for the second pass */
328 int zi_min = INT_MAX;
331 /* Sort the particles using a simple index sort */
332 for (int i = 0; i < n; i++)
334 /* The cast takes care of float-point rounding effects below zero.
335 * This code assumes particles are less than 1/SORT_GRID_OVERSIZE
336 * times the box height out of the box.
338 int zi = static_cast<int>((x[a[i]][dim] - h0) * invh);
341 /* As we can have rounding effect, we use > iso >= here */
342 if (relevantAtomsAreWithinGridBounds && (zi < 0 || (dd_zone == 0 && zi > n_per_h * c_sortGridRatio)))
345 "(int)((x[%d][%c]=%f - %f)*%f) = %d, not in 0 - %d*%d\n",
361 /* In a non-local domain, particles communicated for bonded interactions
362 * can be far beyond the grid size, which is set by the non-bonded
363 * cut-off distance. We sort such particles into the last cell.
365 if (zi > n_per_h * c_sortGridRatio)
367 zi = n_per_h * c_sortGridRatio;
370 /* Ideally this particle should go in sort cell zi,
371 * but that might already be in use,
372 * in that case find the first empty cell higher up
377 zi_min = std::min(zi_min, zi);
378 zi_max = std::max(zi_max, zi);
382 /* We have multiple atoms in the same sorting slot.
383 * Sort on real z for minimal bounding box size.
384 * There is an extra check for identical z to ensure
385 * well-defined output order, independent of input order
386 * to ensure binary reproducibility after restarts.
389 && (x[a[i]][dim] > x[sort[zi]][dim]
390 || (x[a[i]][dim] == x[sort[zi]][dim] && a[i] > sort[zi])))
397 /* Shift all elements by one slot until we find an empty slot */
400 while (sort[zim] >= 0)
408 zi_max = std::max(zi_max, zim);
411 zi_max = std::max(zi_max, zi);
418 for (int zi = 0; zi < nsort; zi++)
429 for (int zi = zi_max; zi >= zi_min; zi--)
440 gmx_incons("Lost particles while sorting");
445 //! Returns double up to one least significant float bit smaller than x
446 static double R2F_D(const float x)
448 return static_cast<float>(x >= 0 ? (1 - GMX_FLOAT_EPS) * x : (1 + GMX_FLOAT_EPS) * x);
450 //! Returns double up to one least significant float bit larger than x
451 static double R2F_U(const float x)
453 return static_cast<float>(x >= 0 ? (1 + GMX_FLOAT_EPS) * x : (1 - GMX_FLOAT_EPS) * x);
457 static float R2F_D(const float x)
462 static float R2F_U(const float x)
468 //! Computes the bounding box for na coordinates in order x,y,z, bb order xyz0
469 static void calc_bounding_box(int na, int stride, const real* x, BoundingBox* bb)
479 for (int j = 1; j < na; j++)
481 xl = std::min(xl, x[i + XX]);
482 xh = std::max(xh, x[i + XX]);
483 yl = std::min(yl, x[i + YY]);
484 yh = std::max(yh, x[i + YY]);
485 zl = std::min(zl, x[i + ZZ]);
486 zh = std::max(zh, x[i + ZZ]);
489 /* Note: possible double to float conversion here */
490 bb->lower.x = R2F_D(xl);
491 bb->lower.y = R2F_D(yl);
492 bb->lower.z = R2F_D(zl);
493 bb->upper.x = R2F_U(xh);
494 bb->upper.y = R2F_U(yh);
495 bb->upper.z = R2F_U(zh);
498 /*! \brief Computes the bounding box for na packed coordinates, bb order xyz0 */
499 static void calc_bounding_box_x_x4(int na, const real* x, BoundingBox* bb)
501 real xl = x[XX * c_packX4];
502 real xh = x[XX * c_packX4];
503 real yl = x[YY * c_packX4];
504 real yh = x[YY * c_packX4];
505 real zl = x[ZZ * c_packX4];
506 real zh = x[ZZ * c_packX4];
507 for (int j = 1; j < na; j++)
509 xl = std::min(xl, x[j + XX * c_packX4]);
510 xh = std::max(xh, x[j + XX * c_packX4]);
511 yl = std::min(yl, x[j + YY * c_packX4]);
512 yh = std::max(yh, x[j + YY * c_packX4]);
513 zl = std::min(zl, x[j + ZZ * c_packX4]);
514 zh = std::max(zh, x[j + ZZ * c_packX4]);
516 /* Note: possible double to float conversion here */
517 bb->lower.x = R2F_D(xl);
518 bb->lower.y = R2F_D(yl);
519 bb->lower.z = R2F_D(zl);
520 bb->upper.x = R2F_U(xh);
521 bb->upper.y = R2F_U(yh);
522 bb->upper.z = R2F_U(zh);
525 /*! \brief Computes the bounding box for na coordinates, bb order xyz0 */
526 static void calc_bounding_box_x_x8(int na, const real* x, BoundingBox* bb)
528 real xl = x[XX * c_packX8];
529 real xh = x[XX * c_packX8];
530 real yl = x[YY * c_packX8];
531 real yh = x[YY * c_packX8];
532 real zl = x[ZZ * c_packX8];
533 real zh = x[ZZ * c_packX8];
534 for (int j = 1; j < na; j++)
536 xl = std::min(xl, x[j + XX * c_packX8]);
537 xh = std::max(xh, x[j + XX * c_packX8]);
538 yl = std::min(yl, x[j + YY * c_packX8]);
539 yh = std::max(yh, x[j + YY * c_packX8]);
540 zl = std::min(zl, x[j + ZZ * c_packX8]);
541 zh = std::max(zh, x[j + ZZ * c_packX8]);
543 /* Note: possible double to float conversion here */
544 bb->lower.x = R2F_D(xl);
545 bb->lower.y = R2F_D(yl);
546 bb->lower.z = R2F_D(zl);
547 bb->upper.x = R2F_U(xh);
548 bb->upper.y = R2F_U(yh);
549 bb->upper.z = R2F_U(zh);
552 /*! \brief Computes the bounding box for na packed coordinates, bb order xyz0 */
553 gmx_unused static void calc_bounding_box_x_x4_halves(int na, const real* x, BoundingBox* bb, BoundingBox* bbj)
555 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
558 calc_bounding_box_x_x4(std::min(na, 2), x, bbj);
562 calc_bounding_box_x_x4(std::min(na - 2, 2), x + (c_packX4 >> 1), bbj + 1);
566 /* Set the "empty" bounding box to the same as the first one,
567 * so we don't need to treat special cases in the rest of the code.
569 #if NBNXN_SEARCH_BB_SIMD4
570 store4(bbj[1].lower.ptr(), load4(bbj[0].lower.ptr()));
571 store4(bbj[1].upper.ptr(), load4(bbj[0].upper.ptr()));
577 #if NBNXN_SEARCH_BB_SIMD4
578 store4(bb->lower.ptr(), min(load4(bbj[0].lower.ptr()), load4(bbj[1].lower.ptr())));
579 store4(bb->upper.ptr(), max(load4(bbj[0].upper.ptr()), load4(bbj[1].upper.ptr())));
582 bb->lower = BoundingBox::Corner::min(bbj[0].lower, bbj[1].lower);
583 bb->upper = BoundingBox::Corner::max(bbj[0].upper, bbj[1].upper);
590 /*! \brief Computes the bounding box for na coordinates in order xyz, bb order xxxxyyyyzzzz */
591 static void calc_bounding_box_xxxx(int na, int stride, const real* x, float* bb)
601 for (int j = 1; j < na; j++)
603 xl = std::min(xl, x[i + XX]);
604 xh = std::max(xh, x[i + XX]);
605 yl = std::min(yl, x[i + YY]);
606 yh = std::max(yh, x[i + YY]);
607 zl = std::min(zl, x[i + ZZ]);
608 zh = std::max(zh, x[i + ZZ]);
611 /* Note: possible double to float conversion here */
612 bb[0 * c_packedBoundingBoxesDimSize] = R2F_D(xl);
613 bb[1 * c_packedBoundingBoxesDimSize] = R2F_D(yl);
614 bb[2 * c_packedBoundingBoxesDimSize] = R2F_D(zl);
615 bb[3 * c_packedBoundingBoxesDimSize] = R2F_U(xh);
616 bb[4 * c_packedBoundingBoxesDimSize] = R2F_U(yh);
617 bb[5 * c_packedBoundingBoxesDimSize] = R2F_U(zh);
620 #endif /* NBNXN_BBXXXX */
622 #if NBNXN_SEARCH_SIMD4_FLOAT_X_BB
624 /*! \brief Computes the bounding box for na coordinates in order xyz?, bb order xyz0 */
625 static void calc_bounding_box_simd4(int na, const float* x, BoundingBox* bb)
627 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
630 static_assert(sizeof(BoundingBox::Corner) == GMX_SIMD4_WIDTH * sizeof(float),
631 "The Corner struct should hold exactly 4 floats");
633 Simd4Float bb_0_S = load4(x);
634 Simd4Float bb_1_S = bb_0_S;
636 for (int i = 1; i < na; i++)
638 Simd4Float x_S = load4(x + i * GMX_SIMD4_WIDTH);
639 bb_0_S = min(bb_0_S, x_S);
640 bb_1_S = max(bb_1_S, x_S);
643 store4(bb->lower.ptr(), bb_0_S);
644 store4(bb->upper.ptr(), bb_1_S);
649 /*! \brief Computes the bounding box for na coordinates in order xyz?, bb order xxxxyyyyzzzz */
650 static void calc_bounding_box_xxxx_simd4(int na, const float* x, BoundingBox* bb_work_aligned, real* bb)
652 calc_bounding_box_simd4(na, x, bb_work_aligned);
654 bb[0 * c_packedBoundingBoxesDimSize] = bb_work_aligned->lower.x;
655 bb[1 * c_packedBoundingBoxesDimSize] = bb_work_aligned->lower.y;
656 bb[2 * c_packedBoundingBoxesDimSize] = bb_work_aligned->lower.z;
657 bb[3 * c_packedBoundingBoxesDimSize] = bb_work_aligned->upper.x;
658 bb[4 * c_packedBoundingBoxesDimSize] = bb_work_aligned->upper.y;
659 bb[5 * c_packedBoundingBoxesDimSize] = bb_work_aligned->upper.z;
662 # endif /* NBNXN_BBXXXX */
664 #endif /* NBNXN_SEARCH_SIMD4_FLOAT_X_BB */
667 /*! \brief Combines pairs of consecutive bounding boxes */
668 static void combine_bounding_box_pairs(const Grid& grid,
669 gmx::ArrayRef<const BoundingBox> bb,
670 gmx::ArrayRef<BoundingBox> bbj)
672 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
675 for (int i = 0; i < grid.numColumns(); i++)
677 /* Starting bb in a column is expected to be 2-aligned */
678 const int sc2 = grid.firstCellInColumn(i) >> 1;
679 /* For odd numbers skip the last bb here */
680 const int nc2 = (grid.numAtomsInColumn(i) + 3) >> (2 + 1);
681 for (int c2 = sc2; c2 < sc2 + nc2; c2++)
683 #if NBNXN_SEARCH_BB_SIMD4
684 Simd4Float min_S, max_S;
686 min_S = min(load4(bb[c2 * 2 + 0].lower.ptr()), load4(bb[c2 * 2 + 1].lower.ptr()));
687 max_S = max(load4(bb[c2 * 2 + 0].upper.ptr()), load4(bb[c2 * 2 + 1].upper.ptr()));
688 store4(bbj[c2].lower.ptr(), min_S);
689 store4(bbj[c2].upper.ptr(), max_S);
691 bbj[c2].lower = BoundingBox::Corner::min(bb[c2 * 2 + 0].lower, bb[c2 * 2 + 1].lower);
692 bbj[c2].upper = BoundingBox::Corner::max(bb[c2 * 2 + 0].upper, bb[c2 * 2 + 1].upper);
695 if (((grid.numAtomsInColumn(i) + 3) >> 2) & 1)
697 /* The bb count in this column is odd: duplicate the last bb */
699 bbj[c2].lower = bb[c2 * 2].lower;
700 bbj[c2].upper = bb[c2 * 2].upper;
706 /*! \brief Prints the average bb size, used for debug output */
707 static void print_bbsizes_simple(FILE* fp, const Grid& grid)
710 for (int c = 0; c < grid.numCells(); c++)
712 const BoundingBox& bb = grid.iBoundingBoxes()[c];
713 ba[XX] += bb.upper.x - bb.lower.x;
714 ba[YY] += bb.upper.y - bb.lower.y;
715 ba[ZZ] += bb.upper.z - bb.lower.z;
717 dsvmul(1.0 / grid.numCells(), ba, ba);
719 const Grid::Dimensions& dims = grid.dimensions();
721 (dims.atomDensity > 0 ? grid.geometry().numAtomsICluster
722 / (dims.atomDensity * dims.cellSize[XX] * dims.cellSize[YY])
726 "ns bb: grid %4.2f %4.2f %4.2f abs %4.2f %4.2f %4.2f rel %4.2f %4.2f %4.2f\n",
733 ba[XX] * dims.invCellSize[XX],
734 ba[YY] * dims.invCellSize[YY],
735 dims.atomDensity > 0 ? ba[ZZ] / avgCellSizeZ : 0.0);
738 /*! \brief Prints the average bb size, used for debug output */
739 static void print_bbsizes_supersub(FILE* fp, const Grid& grid)
745 for (int c = 0; c < grid.numCells(); c++)
748 for (int s = 0; s < grid.numClustersPerCell()[c]; s += c_packedBoundingBoxesDimSize)
750 int cs_w = (c * c_gpuNumClusterPerCell + s) / c_packedBoundingBoxesDimSize;
751 auto boundingBoxes = grid.packedBoundingBoxes().subArray(
752 cs_w * c_packedBoundingBoxesSize, c_packedBoundingBoxesSize);
753 for (int i = 0; i < c_packedBoundingBoxesDimSize; i++)
755 for (int d = 0; d < DIM; d++)
757 ba[d] += boundingBoxes[(DIM + d) * c_packedBoundingBoxesDimSize + i]
758 - boundingBoxes[(0 + d) * c_packedBoundingBoxesDimSize + i];
763 for (int s = 0; s < grid.numClustersPerCell()[c]; s++)
765 const BoundingBox& bb = grid.iBoundingBoxes()[c * c_gpuNumClusterPerCell + s];
766 ba[XX] += bb.upper.x - bb.lower.x;
767 ba[YY] += bb.upper.y - bb.lower.y;
768 ba[ZZ] += bb.upper.z - bb.lower.z;
771 ns += grid.numClustersPerCell()[c];
773 dsvmul(1.0 / ns, ba, ba);
775 const Grid::Dimensions& dims = grid.dimensions();
776 const real avgClusterSizeZ =
777 (dims.atomDensity > 0 ? grid.geometry().numAtomsPerCell
778 / (dims.atomDensity * dims.cellSize[XX]
779 * dims.cellSize[YY] * c_gpuNumClusterPerCellZ)
783 "ns bb: grid %4.2f %4.2f %4.2f abs %4.2f %4.2f %4.2f rel %4.2f %4.2f %4.2f\n",
784 dims.cellSize[XX] / c_gpuNumClusterPerCellX,
785 dims.cellSize[YY] / c_gpuNumClusterPerCellY,
790 ba[XX] * c_gpuNumClusterPerCellX * dims.invCellSize[XX],
791 ba[YY] * c_gpuNumClusterPerCellY * dims.invCellSize[YY],
792 dims.atomDensity > 0 ? ba[ZZ] / avgClusterSizeZ : 0.0);
795 /*!\brief Set non-bonded interaction flags for the current cluster.
797 * Sorts atoms on LJ coefficients: !=0 first, ==0 at the end.
799 static void sort_cluster_on_flag(int numAtomsInCluster,
803 gmx::ArrayRef<int> order,
806 constexpr int c_maxNumAtomsInCluster = 8;
807 int sort1[c_maxNumAtomsInCluster];
808 int sort2[c_maxNumAtomsInCluster];
810 GMX_ASSERT(numAtomsInCluster <= c_maxNumAtomsInCluster,
811 "Need to increase c_maxNumAtomsInCluster to support larger clusters");
816 for (int s = atomStart; s < atomEnd; s += numAtomsInCluster)
818 /* Make lists for this (sub-)cell on atoms with and without LJ */
821 gmx_bool haveQ = FALSE;
823 for (int a = s; a < std::min(s + numAtomsInCluster, atomEnd); a++)
825 haveQ = haveQ || GET_CGINFO_HAS_Q(atinfo[order[a]]);
827 if (GET_CGINFO_HAS_VDW(atinfo[order[a]]))
829 sort1[n1++] = order[a];
834 sort2[n2++] = order[a];
838 /* If we don't have atoms with LJ, there's nothing to sort */
841 *flags |= NBNXN_CI_DO_LJ(subc);
843 if (2 * n1 <= numAtomsInCluster)
845 /* Only sort when strictly necessary. Ordering particles
846 * Ordering particles can lead to less accurate summation
847 * due to rounding, both for LJ and Coulomb interactions.
849 if (2 * (a_lj_max - s) >= numAtomsInCluster)
851 for (int i = 0; i < n1; i++)
853 order[atomStart + i] = sort1[i];
855 for (int j = 0; j < n2; j++)
857 order[atomStart + n1 + j] = sort2[j];
861 *flags |= NBNXN_CI_HALF_LJ(subc);
866 *flags |= NBNXN_CI_DO_COUL(subc);
872 /*! \brief Fill a pair search cell with atoms.
874 * Potentially sorts atoms and sets the interaction flags.
876 void Grid::fillCell(GridSetData* gridSetData,
877 nbnxn_atomdata_t* nbat,
881 gmx::ArrayRef<const gmx::RVec> x,
882 BoundingBox gmx_unused* bb_work_aligned)
884 const int numAtoms = atomEnd - atomStart;
886 const gmx::ArrayRef<int>& cells = gridSetData->cells;
887 const gmx::ArrayRef<int>& atomIndices = gridSetData->atomIndices;
889 if (geometry_.isSimple)
891 /* Note that non-local grids are already sorted.
892 * Then sort_cluster_on_flag will only set the flags and the sorting
893 * will not affect the atom order.
895 sort_cluster_on_flag(geometry_.numAtomsICluster,
900 flags_.data() + atomToCluster(atomStart) - cellOffset_);
905 /* Set the fep flag for perturbed atoms in this (sub-)cell */
907 /* The grid-local cluster/(sub-)cell index */
908 int cell = atomToCluster(atomStart)
909 - cellOffset_ * (geometry_.isSimple ? 1 : c_gpuNumClusterPerCell);
911 for (int at = atomStart; at < atomEnd; at++)
913 if (atomIndices[at] >= 0 && GET_CGINFO_FEP(atinfo[atomIndices[at]]))
915 fep_[cell] |= (1 << (at - atomStart));
920 /* Now we have sorted the atoms, set the cell indices */
921 for (int at = atomStart; at < atomEnd; at++)
923 cells[atomIndices[at]] = at;
926 copy_rvec_to_nbat_real(atomIndices.data() + atomStart,
928 geometry_.numAtomsICluster,
929 as_rvec_array(x.data()),
934 if (nbat->XFormat == nbatX4)
936 /* Store the bounding boxes as xyz.xyz. */
937 size_t offset = atomToCluster(atomStart - cellOffset_ * geometry_.numAtomsICluster);
938 BoundingBox* bb_ptr = bb_.data() + offset;
940 #if GMX_SIMD && GMX_SIMD_REAL_WIDTH == 2
941 if (2 * geometry_.numAtomsJCluster == geometry_.numAtomsICluster)
943 calc_bounding_box_x_x4_halves(numAtoms,
944 nbat->x().data() + atom_to_x_index<c_packX4>(atomStart),
946 bbj_.data() + offset * 2);
951 calc_bounding_box_x_x4(numAtoms, nbat->x().data() + atom_to_x_index<c_packX4>(atomStart), bb_ptr);
954 else if (nbat->XFormat == nbatX8)
956 /* Store the bounding boxes as xyz.xyz. */
957 size_t offset = atomToCluster(atomStart - cellOffset_ * geometry_.numAtomsICluster);
958 BoundingBox* bb_ptr = bb_.data() + offset;
960 calc_bounding_box_x_x8(numAtoms, nbat->x().data() + atom_to_x_index<c_packX8>(atomStart), bb_ptr);
963 else if (!geometry_.isSimple)
965 /* Store the bounding boxes in a format convenient
966 * for SIMD4 calculations: xxxxyyyyzzzz...
968 const int clusterIndex = ((atomStart - cellOffset_ * geometry_.numAtomsPerCell)
969 >> geometry_.numAtomsICluster2Log);
970 float* pbb_ptr = pbb_.data() + packedBoundingBoxesIndex(clusterIndex)
971 + (clusterIndex & (c_packedBoundingBoxesDimSize - 1));
973 # if NBNXN_SEARCH_SIMD4_FLOAT_X_BB
974 if (nbat->XFormat == nbatXYZQ)
976 GMX_ASSERT(bb_work_aligned != nullptr, "Must have valid aligned work structure");
977 calc_bounding_box_xxxx_simd4(
978 numAtoms, nbat->x().data() + atomStart * nbat->xstride, bb_work_aligned, pbb_ptr);
983 calc_bounding_box_xxxx(
984 numAtoms, nbat->xstride, nbat->x().data() + atomStart * nbat->xstride, pbb_ptr);
989 "cell %4d bb %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f\n",
990 atomToCluster(atomStart),
991 pbb_ptr[0 * c_packedBoundingBoxesDimSize],
992 pbb_ptr[3 * c_packedBoundingBoxesDimSize],
993 pbb_ptr[1 * c_packedBoundingBoxesDimSize],
994 pbb_ptr[4 * c_packedBoundingBoxesDimSize],
995 pbb_ptr[2 * c_packedBoundingBoxesDimSize],
996 pbb_ptr[5 * c_packedBoundingBoxesDimSize]);
1002 /* Store the bounding boxes as xyz.xyz. */
1003 BoundingBox* bb_ptr =
1004 bb_.data() + atomToCluster(atomStart - cellOffset_ * geometry_.numAtomsPerCell);
1006 calc_bounding_box(numAtoms, nbat->xstride, nbat->x().data() + atomStart * nbat->xstride, bb_ptr);
1010 int bbo = atomToCluster(atomStart - cellOffset_ * geometry_.numAtomsPerCell);
1012 "cell %4d bb %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f\n",
1013 atomToCluster(atomStart),
1024 void Grid::sortColumnsCpuGeometry(GridSetData* gridSetData,
1027 gmx::ArrayRef<const gmx::RVec> x,
1028 nbnxn_atomdata_t* nbat,
1029 const gmx::Range<int> columnRange,
1030 gmx::ArrayRef<int> sort_work)
1035 "cell_offset %d sorting columns %d - %d\n",
1037 *columnRange.begin(),
1038 *columnRange.end());
1041 const bool relevantAtomsAreWithinGridBounds = (dimensions_.maxAtomGroupRadius == 0);
1043 const int numAtomsPerCell = geometry_.numAtomsPerCell;
1045 /* Sort the atoms within each x,y column in 3 dimensions */
1046 for (int cxy : columnRange)
1048 const int numAtoms = numAtomsInColumn(cxy);
1049 const int numCellsZ = cxy_ind_[cxy + 1] - cxy_ind_[cxy];
1050 const int atomOffset = firstAtomInColumn(cxy);
1052 /* Sort the atoms within each x,y column on z coordinate */
1056 relevantAtomsAreWithinGridBounds,
1057 gridSetData->atomIndices.data() + atomOffset,
1060 dimensions_.lowerCorner[ZZ],
1061 1.0 / dimensions_.gridSize[ZZ],
1062 numCellsZ * numAtomsPerCell,
1065 /* Fill the ncz cells in this column */
1066 const int firstCell = firstCellInColumn(cxy);
1067 int cellFilled = firstCell;
1068 for (int cellZ = 0; cellZ < numCellsZ; cellZ++)
1070 const int cell = firstCell + cellZ;
1072 const int atomOffsetCell = atomOffset + cellZ * numAtomsPerCell;
1073 const int numAtomsLeftInColumn = std::max(numAtoms - (atomOffsetCell - atomOffset), 0);
1074 const int numAtomsCell = std::min(numAtomsPerCell, numAtomsLeftInColumn);
1076 fillCell(gridSetData, nbat, atomOffsetCell, atomOffsetCell + numAtomsCell, atinfo, x, nullptr);
1078 /* This copy to bbcz is not really necessary.
1079 * But it allows to use the same grid search code
1080 * for the simple and supersub cell setups.
1082 if (numAtomsCell > 0)
1086 bbcz_[cell].lower = bb_[cellFilled].lower.z;
1087 bbcz_[cell].upper = bb_[cellFilled].upper.z;
1090 /* Set the unused atom indices to -1 */
1091 for (int ind = numAtoms; ind < numCellsZ * numAtomsPerCell; ind++)
1093 gridSetData->atomIndices[atomOffset + ind] = -1;
1098 /* Spatially sort the atoms within one grid column */
1099 void Grid::sortColumnsGpuGeometry(GridSetData* gridSetData,
1102 gmx::ArrayRef<const gmx::RVec> x,
1103 nbnxn_atomdata_t* nbat,
1104 const gmx::Range<int> columnRange,
1105 gmx::ArrayRef<int> sort_work)
1107 BoundingBox bb_work_array[2];
1108 auto* bb_work_aligned = reinterpret_cast<BoundingBox*>(
1109 (reinterpret_cast<std::size_t>(bb_work_array + 1)) & (~(static_cast<std::size_t>(15))));
1114 "cell_offset %d sorting columns %d - %d\n",
1116 *columnRange.begin(),
1117 *columnRange.end());
1120 const bool relevantAtomsAreWithinGridBounds = (dimensions_.maxAtomGroupRadius == 0);
1122 const int numAtomsPerCell = geometry_.numAtomsPerCell;
1124 const int subdiv_x = geometry_.numAtomsICluster;
1125 const int subdiv_y = c_gpuNumClusterPerCellX * subdiv_x;
1126 const int subdiv_z = c_gpuNumClusterPerCellY * subdiv_y;
1128 /* Extract the atom index array that will be filled here */
1129 const gmx::ArrayRef<int>& atomIndices = gridSetData->atomIndices;
1131 /* Sort the atoms within each x,y column in 3 dimensions.
1132 * Loop over all columns on the x/y grid.
1134 for (int cxy : columnRange)
1136 const int gridX = cxy / dimensions_.numCells[YY];
1137 const int gridY = cxy - gridX * dimensions_.numCells[YY];
1139 const int numAtomsInColumn = cxy_na_[cxy];
1140 const int numCellsInColumn = cxy_ind_[cxy + 1] - cxy_ind_[cxy];
1141 const int atomOffset = firstAtomInColumn(cxy);
1143 /* Sort the atoms within each x,y column on z coordinate */
1147 relevantAtomsAreWithinGridBounds,
1148 atomIndices.data() + atomOffset,
1151 dimensions_.lowerCorner[ZZ],
1152 1.0 / dimensions_.gridSize[ZZ],
1153 numCellsInColumn * numAtomsPerCell,
1156 /* This loop goes over the cells and clusters along z at once */
1157 for (int sub_z = 0; sub_z < numCellsInColumn * c_gpuNumClusterPerCellZ; sub_z++)
1159 const int atomOffsetZ = atomOffset + sub_z * subdiv_z;
1160 const int numAtomsZ = std::min(subdiv_z, numAtomsInColumn - (atomOffsetZ - atomOffset));
1162 /* We have already sorted on z */
1164 if (sub_z % c_gpuNumClusterPerCellZ == 0)
1166 cz = sub_z / c_gpuNumClusterPerCellZ;
1167 const int cell = cxy_ind_[cxy] + cz;
1169 /* The number of atoms in this cell/super-cluster */
1170 const int numAtoms =
1171 std::min(numAtomsPerCell, numAtomsInColumn - (atomOffsetZ - atomOffset));
1173 numClusters_[cell] = std::min(
1174 c_gpuNumClusterPerCell,
1175 (numAtoms + geometry_.numAtomsICluster - 1) / geometry_.numAtomsICluster);
1177 /* Store the z-boundaries of the bounding box of the cell */
1178 bbcz_[cell].lower = x[atomIndices[atomOffsetZ]][ZZ];
1179 bbcz_[cell].upper = x[atomIndices[atomOffsetZ + numAtoms - 1]][ZZ];
1182 if (c_gpuNumClusterPerCellY > 1)
1184 /* Sort the atoms along y */
1188 relevantAtomsAreWithinGridBounds,
1189 atomIndices.data() + atomOffsetZ,
1192 dimensions_.lowerCorner[YY] + gridY * dimensions_.cellSize[YY],
1193 dimensions_.invCellSize[YY],
1198 for (int sub_y = 0; sub_y < c_gpuNumClusterPerCellY; sub_y++)
1200 const int atomOffsetY = atomOffsetZ + sub_y * subdiv_y;
1201 const int numAtomsY = std::min(subdiv_y, numAtomsInColumn - (atomOffsetY - atomOffset));
1203 if (c_gpuNumClusterPerCellX > 1)
1205 /* Sort the atoms along x */
1207 ((cz * c_gpuNumClusterPerCellY + sub_y) & 1) != 0,
1209 relevantAtomsAreWithinGridBounds,
1210 atomIndices.data() + atomOffsetY,
1213 dimensions_.lowerCorner[XX] + gridX * dimensions_.cellSize[XX],
1214 dimensions_.invCellSize[XX],
1219 for (int sub_x = 0; sub_x < c_gpuNumClusterPerCellX; sub_x++)
1221 const int atomOffsetX = atomOffsetY + sub_x * subdiv_x;
1222 const int numAtomsX =
1223 std::min(subdiv_x, numAtomsInColumn - (atomOffsetX - atomOffset));
1225 fillCell(gridSetData, nbat, atomOffsetX, atomOffsetX + numAtomsX, atinfo, x, bb_work_aligned);
1230 /* Set the unused atom indices to -1 */
1231 for (int ind = numAtomsInColumn; ind < numCellsInColumn * numAtomsPerCell; ind++)
1233 atomIndices[atomOffset + ind] = -1;
1238 /*! \brief Sets the cell index in the cell array for atom \p atomIndex and increments the atom count for the grid column */
1239 static void setCellAndAtomCount(gmx::ArrayRef<int> cell, int cellIndex, gmx::ArrayRef<int> cxy_na, int atomIndex)
1241 cell[atomIndex] = cellIndex;
1242 cxy_na[cellIndex] += 1;
1245 void Grid::calcColumnIndices(const Grid::Dimensions& gridDims,
1246 const gmx::UpdateGroupsCog* updateGroupsCog,
1247 const gmx::Range<int> atomRange,
1248 gmx::ArrayRef<const gmx::RVec> x,
1253 gmx::ArrayRef<int> cell,
1254 gmx::ArrayRef<int> cxy_na)
1256 const int numColumns = gridDims.numCells[XX] * gridDims.numCells[YY];
1258 /* We add one extra cell for particles which moved during DD */
1259 for (int i = 0; i < numColumns; i++)
1264 int taskAtomStart = *atomRange.begin() + static_cast<int>((thread + 0) * atomRange.size()) / nthread;
1265 int taskAtomEnd = *atomRange.begin() + static_cast<int>((thread + 1) * atomRange.size()) / nthread;
1270 for (int i = taskAtomStart; i < taskAtomEnd; i++)
1272 if (move == nullptr || move[i] >= 0)
1275 const gmx::RVec& coord = (updateGroupsCog ? updateGroupsCog->cogForAtom(i) : x[i]);
1277 /* We need to be careful with rounding,
1278 * particles might be a few bits outside the local zone.
1279 * The int cast takes care of the lower bound,
1280 * we will explicitly take care of the upper bound.
1282 int cx = static_cast<int>((coord[XX] - gridDims.lowerCorner[XX])
1283 * gridDims.invCellSize[XX]);
1284 int cy = static_cast<int>((coord[YY] - gridDims.lowerCorner[YY])
1285 * gridDims.invCellSize[YY]);
1288 if (cx < 0 || cx > gridDims.numCells[XX] || cy < 0 || cy > gridDims.numCells[YY])
1291 "grid cell cx %d cy %d out of range (max %d %d)\n"
1292 "atom %f %f %f, grid->c0 %f %f",
1295 gridDims.numCells[XX],
1296 gridDims.numCells[YY],
1300 gridDims.lowerCorner[XX],
1301 gridDims.lowerCorner[YY]);
1304 /* Take care of potential rounding issues */
1305 cx = std::min(cx, gridDims.numCells[XX] - 1);
1306 cy = std::min(cy, gridDims.numCells[YY] - 1);
1308 /* For the moment cell will contain only the, grid local,
1309 * x and y indices, not z.
1311 setCellAndAtomCount(cell, cx * gridDims.numCells[YY] + cy, cxy_na, i);
1315 /* Put this moved particle after the end of the grid,
1316 * so we can process it later without using conditionals.
1318 setCellAndAtomCount(cell, numColumns, cxy_na, i);
1325 for (int i = taskAtomStart; i < taskAtomEnd; i++)
1327 int cx = static_cast<int>((x[i][XX] - gridDims.lowerCorner[XX]) * gridDims.invCellSize[XX]);
1328 int cy = static_cast<int>((x[i][YY] - gridDims.lowerCorner[YY]) * gridDims.invCellSize[YY]);
1330 /* For non-home zones there could be particles outside
1331 * the non-bonded cut-off range, which have been communicated
1332 * for bonded interactions only. For the result it doesn't
1333 * matter where these end up on the grid. For performance
1334 * we put them in an extra row at the border.
1336 cx = std::max(cx, 0);
1337 cx = std::min(cx, gridDims.numCells[XX] - 1);
1338 cy = std::max(cy, 0);
1339 cy = std::min(cy, gridDims.numCells[YY] - 1);
1341 /* For the moment cell will contain only the, grid local,
1342 * x and y indices, not z.
1344 setCellAndAtomCount(cell, cx * gridDims.numCells[YY] + cy, cxy_na, i);
1349 /*! \brief Resizes grid and atom data which depend on the number of cells */
1350 static void resizeForNumberOfCells(const int numNbnxnAtoms,
1351 const int numAtomsMoved,
1352 GridSetData* gridSetData,
1353 nbnxn_atomdata_t* nbat)
1355 /* Note: gridSetData->cellIndices was already resized before */
1357 /* To avoid conditionals we store the moved particles at the end of a,
1358 * make sure we have enough space.
1360 gridSetData->atomIndices.resize(numNbnxnAtoms + numAtomsMoved);
1362 /* Make space in nbat for storing the atom coordinates */
1363 nbat->resizeCoordinateBuffer(numNbnxnAtoms);
1366 void Grid::setCellIndices(int ddZone,
1368 GridSetData* gridSetData,
1369 gmx::ArrayRef<GridWork> gridWork,
1370 const gmx::Range<int> atomRange,
1372 gmx::ArrayRef<const gmx::RVec> x,
1373 const int numAtomsMoved,
1374 nbnxn_atomdata_t* nbat)
1376 cellOffset_ = cellOffset;
1378 srcAtomBegin_ = *atomRange.begin();
1379 srcAtomEnd_ = *atomRange.end();
1381 const int nthread = gmx_omp_nthreads_get(emntPairsearch);
1383 const int numAtomsPerCell = geometry_.numAtomsPerCell;
1385 /* Make the cell index as a function of x and y */
1389 for (int i = 0; i < numColumns() + 1; i++)
1391 /* We set ncz_max at the beginning of the loop iso at the end
1392 * to skip i=grid->ncx*grid->numCells[YY] which are moved particles
1393 * that do not need to be ordered on the grid.
1399 int cxy_na_i = gridWork[0].numAtomsPerColumn[i];
1400 for (int thread = 1; thread < nthread; thread++)
1402 cxy_na_i += gridWork[thread].numAtomsPerColumn[i];
1404 ncz = (cxy_na_i + numAtomsPerCell - 1) / numAtomsPerCell;
1405 if (nbat->XFormat == nbatX8)
1407 /* Make the number of cell a multiple of 2 */
1408 ncz = (ncz + 1) & ~1;
1410 cxy_ind_[i + 1] = cxy_ind_[i] + ncz;
1411 /* Clear cxy_na_, so we can reuse the array below */
1414 numCellsTotal_ = cxy_ind_[numColumns()] - cxy_ind_[0];
1415 numCellsColumnMax_ = ncz_max;
1417 /* Resize grid and atom data which depend on the number of cells */
1418 resizeForNumberOfCells(atomIndexEnd(), numAtomsMoved, gridSetData, nbat);
1423 "ns na_sc %d na_c %d super-cells: %d x %d y %d z %.1f maxz %d\n",
1425 geometry_.numAtomsICluster,
1427 dimensions_.numCells[XX],
1428 dimensions_.numCells[YY],
1429 numCellsTotal_ / (static_cast<double>(numColumns())),
1434 for (int cy = 0; cy < dimensions_.numCells[YY]; cy++)
1436 for (int cx = 0; cx < dimensions_.numCells[XX]; cx++)
1438 fprintf(debug, " %2d", cxy_ind_[i + 1] - cxy_ind_[i]);
1441 fprintf(debug, "\n");
1446 /* Make sure the work array for sorting is large enough */
1447 const int worstCaseSortBufferSize = ncz_max * numAtomsPerCell * c_sortGridMaxSizeFactor;
1448 if (worstCaseSortBufferSize > gmx::index(gridWork[0].sortBuffer.size()))
1450 for (GridWork& work : gridWork)
1452 /* Elements not in use should be -1 */
1453 work.sortBuffer.resize(worstCaseSortBufferSize, -1);
1457 /* Now we know the dimensions we can fill the grid.
1458 * This is the first, unsorted fill. We sort the columns after this.
1460 gmx::ArrayRef<int> cells = gridSetData->cells;
1461 gmx::ArrayRef<int> atomIndices = gridSetData->atomIndices;
1462 for (int i : atomRange)
1464 /* At this point nbs->cell contains the local grid x,y indices */
1465 const int cxy = cells[i];
1466 atomIndices[firstAtomInColumn(cxy) + cxy_na_[cxy]++] = i;
1471 /* Set the cell indices for the moved particles */
1472 int n0 = numCellsTotal_ * numAtomsPerCell;
1473 int n1 = numCellsTotal_ * numAtomsPerCell + cxy_na_[numColumns()];
1474 for (int i = n0; i < n1; i++)
1476 cells[atomIndices[i]] = i;
1480 /* Sort the super-cell columns along z into the sub-cells. */
1481 #pragma omp parallel for num_threads(nthread) schedule(static)
1482 for (int thread = 0; thread < nthread; thread++)
1486 gmx::Range<int> columnRange(((thread + 0) * numColumns()) / nthread,
1487 ((thread + 1) * numColumns()) / nthread);
1488 if (geometry_.isSimple)
1490 sortColumnsCpuGeometry(
1491 gridSetData, ddZone, atinfo, x, nbat, columnRange, gridWork[thread].sortBuffer);
1495 sortColumnsGpuGeometry(
1496 gridSetData, ddZone, atinfo, x, nbat, columnRange, gridWork[thread].sortBuffer);
1499 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1502 if (geometry_.isSimple && nbat->XFormat == nbatX8)
1504 combine_bounding_box_pairs(*this, bb_, bbj_);
1507 if (!geometry_.isSimple)
1509 numClustersTotal_ = 0;
1510 for (int i = 0; i < numCellsTotal_; i++)
1512 numClustersTotal_ += numClusters_[i];
1518 if (geometry_.isSimple)
1520 print_bbsizes_simple(debug, *this);
1525 "ns non-zero sub-cells: %d average atoms %.2f\n",
1527 atomRange.size() / static_cast<double>(numClustersTotal_));
1529 print_bbsizes_supersub(debug, *this);
1534 } // namespace Nbnxm