2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
39 * Implements the Grid class.
41 * \author Berk Hess <hess@kth.se>
42 * \ingroup module_nbnxm
54 #include "gromacs/math/utilities.h"
55 #include "gromacs/math/vec.h"
56 #include "gromacs/mdlib/gmx_omp_nthreads.h"
57 #include "gromacs/mdlib/updategroupscog.h"
58 #include "gromacs/mdtypes/forcerec.h" // only for GET_CGINFO_*
59 #include "gromacs/nbnxm/atomdata.h"
60 #include "gromacs/nbnxm/nbnxm_geometry.h"
61 #include "gromacs/simd/simd.h"
62 #include "gromacs/simd/vector_operations.h"
64 #include "boundingboxes.h"
65 #include "gridsetdata.h"
66 #include "pairlistparams.h"
71 Grid::Geometry::Geometry(const PairlistType pairlistType) :
72 isSimple(pairlistType != PairlistType::HierarchicalNxN),
73 numAtomsICluster(IClusterSizePerListType[pairlistType]),
74 numAtomsJCluster(JClusterSizePerListType[pairlistType]),
75 numAtomsPerCell((isSimple ? 1 : c_gpuNumClusterPerCell)*numAtomsICluster),
76 numAtomsICluster2Log(get_2log(numAtomsICluster))
80 Grid::Grid(const PairlistType pairlistType,
81 const bool &haveFep) :
82 geometry_(pairlistType),
87 /*! \brief Returns the atom density (> 0) of a rectangular grid */
88 static real gridAtomDensity(int numAtoms,
89 const rvec lowerCorner,
90 const rvec upperCorner)
96 /* To avoid zero density we use a minimum of 1 atom */
100 rvec_sub(upperCorner, lowerCorner, size);
102 return static_cast<real>(numAtoms)/(size[XX]*size[YY]*size[ZZ]);
105 void Grid::setDimensions(const int ddZone,
107 const rvec lowerCorner,
108 const rvec upperCorner,
110 const real maxAtomGroupRadius,
112 gmx::PinningPolicy pinningPolicy)
114 /* For the home zone we compute the density when not set (=-1) or when =0 */
115 if (ddZone == 0 && atomDensity <= 0)
117 atomDensity = gridAtomDensity(numAtoms, lowerCorner, upperCorner);
120 dimensions_.atomDensity = atomDensity;
121 dimensions_.maxAtomGroupRadius = maxAtomGroupRadius;
124 rvec_sub(upperCorner, lowerCorner, size);
126 if (numAtoms > geometry_.numAtomsPerCell)
128 GMX_ASSERT(atomDensity > 0, "With one or more atoms, the density should be positive");
130 /* target cell length */
133 if (geometry_.isSimple)
135 /* To minimize the zero interactions, we should make
136 * the largest of the i/j cell cubic.
138 int numAtomsInCell = std::max(geometry_.numAtomsICluster,
139 geometry_.numAtomsJCluster);
141 /* Approximately cubic cells */
142 real tlen = std::cbrt(numAtomsInCell/atomDensity);
148 /* Approximately cubic sub cells */
149 real tlen = std::cbrt(geometry_.numAtomsICluster/atomDensity);
150 tlen_x = tlen*c_gpuNumClusterPerCellX;
151 tlen_y = tlen*c_gpuNumClusterPerCellY;
153 /* We round ncx and ncy down, because we get less cell pairs
154 * in the nbsist when the fixed cell dimensions (x,y) are
155 * larger than the variable one (z) than the other way around.
157 dimensions_.numCells[XX] = std::max(1, static_cast<int>(size[XX]/tlen_x));
158 dimensions_.numCells[YY] = std::max(1, static_cast<int>(size[YY]/tlen_y));
162 dimensions_.numCells[XX] = 1;
163 dimensions_.numCells[YY] = 1;
166 for (int d = 0; d < DIM - 1; d++)
168 dimensions_.cellSize[d] = size[d]/dimensions_.numCells[d];
169 dimensions_.invCellSize[d] = 1/dimensions_.cellSize[d];
174 /* This is a non-home zone, add an extra row of cells
175 * for particles communicated for bonded interactions.
176 * These can be beyond the cut-off. It doesn't matter where
177 * they end up on the grid, but for performance it's better
178 * if they don't end up in cells that can be within cut-off range.
180 dimensions_.numCells[XX]++;
181 dimensions_.numCells[YY]++;
184 /* We need one additional cell entry for particles moved by DD */
185 cxy_na_.resize(numColumns() + 1);
186 cxy_ind_.resize(numColumns() + 2);
187 changePinningPolicy(&cxy_na_, pinningPolicy);
188 changePinningPolicy(&cxy_ind_, pinningPolicy);
190 /* Worst case scenario of 1 atom in each last cell */
192 if (geometry_.numAtomsJCluster <= geometry_.numAtomsICluster)
194 maxNumCells = numAtoms/geometry_.numAtomsPerCell + numColumns();
198 maxNumCells = numAtoms/geometry_.numAtomsPerCell + numColumns()*geometry_.numAtomsJCluster/geometry_.numAtomsICluster;
201 if (!geometry_.isSimple)
203 numClusters_.resize(maxNumCells);
205 bbcz_.resize(maxNumCells);
207 /* This resize also zeros the contents, this avoid possible
208 * floating exceptions in SIMD with the unused bb elements.
210 if (geometry_.isSimple)
212 bb_.resize(maxNumCells);
217 pbb_.resize(packedBoundingBoxesIndex(maxNumCells*c_gpuNumClusterPerCell));
219 bb_.resize(maxNumCells*c_gpuNumClusterPerCell);
223 if (geometry_.numAtomsJCluster == geometry_.numAtomsICluster)
229 GMX_ASSERT(geometry_.isSimple, "Only CPU lists should have different i/j cluster sizes");
231 bbjStorage_.resize(maxNumCells*geometry_.numAtomsICluster/geometry_.numAtomsJCluster);
235 flags_.resize(maxNumCells);
238 fep_.resize(maxNumCells*geometry_.numAtomsPerCell/geometry_.numAtomsICluster);
241 copy_rvec(lowerCorner, dimensions_.lowerCorner);
242 copy_rvec(upperCorner, dimensions_.upperCorner);
243 copy_rvec(size, dimensions_.gridSize);
246 /* We need to sort paricles in grid columns on z-coordinate.
247 * As particle are very often distributed homogeneously, we use a sorting
248 * algorithm similar to pigeonhole sort. We multiply the z-coordinate
249 * by a factor, cast to an int and try to store in that hole. If the hole
250 * is full, we move this or another particle. A second pass is needed to make
251 * contiguous elements. SORT_GRID_OVERSIZE is the ratio of holes to particles.
252 * 4 is the optimal value for homogeneous particle distribution and allows
253 * for an O(#particles) sort up till distributions were all particles are
254 * concentrated in 1/4 of the space. No NlogN fallback is implemented,
255 * as it can be expensive to detect imhomogeneous particle distributions.
257 /*! \brief Ratio of grid cells to atoms */
258 static constexpr int c_sortGridRatio = 4;
259 /*! \brief Maximum ratio of holes used, in the worst case all particles
260 * end up in the last hole and we need num. atoms extra holes at the end.
262 static constexpr int c_sortGridMaxSizeFactor = c_sortGridRatio + 1;
264 /*! \brief Sorts particle index a on coordinates x along dim.
266 * Backwards tells if we want decreasing iso increasing coordinates.
267 * h0 is the minimum of the coordinate range.
268 * invh is the 1/length of the sorting range.
269 * n_per_h (>=n) is the expected average number of particles per 1/invh
270 * sort is the sorting work array.
271 * sort should have a size of at least n_per_h*c_sortGridRatio + n,
272 * or easier, allocate at least n*c_sortGridMaxSizeFactor elements.
274 static void sort_atoms(int dim, gmx_bool Backwards,
275 int gmx_unused dd_zone,
276 bool gmx_unused relevantAtomsAreWithinGridBounds,
278 gmx::ArrayRef<const gmx::RVec> x,
279 real h0, real invh, int n_per_h,
280 gmx::ArrayRef<int> sort)
288 GMX_ASSERT(n <= n_per_h, "We require n <= n_per_h");
290 /* Transform the inverse range height into the inverse hole height */
291 invh *= n_per_h*c_sortGridRatio;
293 /* Set nsort to the maximum possible number of holes used.
294 * In worst case all n elements end up in the last bin.
296 int nsort = n_per_h*c_sortGridRatio + n;
298 /* Determine the index range used, so we can limit it for the second pass */
299 int zi_min = INT_MAX;
302 /* Sort the particles using a simple index sort */
303 for (int i = 0; i < n; i++)
305 /* The cast takes care of float-point rounding effects below zero.
306 * This code assumes particles are less than 1/SORT_GRID_OVERSIZE
307 * times the box height out of the box.
309 int zi = static_cast<int>((x[a[i]][dim] - h0)*invh);
312 /* As we can have rounding effect, we use > iso >= here */
313 if (relevantAtomsAreWithinGridBounds &&
314 (zi < 0 || (dd_zone == 0 && zi > n_per_h*c_sortGridRatio)))
316 gmx_fatal(FARGS, "(int)((x[%d][%c]=%f - %f)*%f) = %d, not in 0 - %d*%d\n",
317 a[i], 'x'+dim, x[a[i]][dim], h0, invh, zi,
318 n_per_h, c_sortGridRatio);
326 /* In a non-local domain, particles communcated for bonded interactions
327 * can be far beyond the grid size, which is set by the non-bonded
328 * cut-off distance. We sort such particles into the last cell.
330 if (zi > n_per_h*c_sortGridRatio)
332 zi = n_per_h*c_sortGridRatio;
335 /* Ideally this particle should go in sort cell zi,
336 * but that might already be in use,
337 * in that case find the first empty cell higher up
342 zi_min = std::min(zi_min, zi);
343 zi_max = std::max(zi_max, zi);
347 /* We have multiple atoms in the same sorting slot.
348 * Sort on real z for minimal bounding box size.
349 * There is an extra check for identical z to ensure
350 * well-defined output order, independent of input order
351 * to ensure binary reproducibility after restarts.
353 while (sort[zi] >= 0 && ( x[a[i]][dim] > x[sort[zi]][dim] ||
354 (x[a[i]][dim] == x[sort[zi]][dim] &&
362 /* Shift all elements by one slot until we find an empty slot */
365 while (sort[zim] >= 0)
373 zi_max = std::max(zi_max, zim);
376 zi_max = std::max(zi_max, zi);
383 for (int zi = 0; zi < nsort; zi++)
394 for (int zi = zi_max; zi >= zi_min; zi--)
405 gmx_incons("Lost particles while sorting");
410 //! Returns double up to one least significant float bit smaller than x
411 static double R2F_D(const float x)
413 return static_cast<float>(x >= 0 ? (1 - GMX_FLOAT_EPS)*x : (1 + GMX_FLOAT_EPS)*x);
415 //! Returns double up to one least significant float bit larger than x
416 static double R2F_U(const float x)
418 return static_cast<float>(x >= 0 ? (1 + GMX_FLOAT_EPS)*x : (1 - GMX_FLOAT_EPS)*x);
422 static float R2F_D(const float x)
427 static float R2F_U(const float x)
433 //! Computes the bounding box for na coordinates in order x,y,z, bb order xyz0
434 static void calc_bounding_box(int na, int stride, const real *x,
438 real xl, xh, yl, yh, zl, zh;
448 for (int j = 1; j < na; j++)
450 xl = std::min(xl, x[i+XX]);
451 xh = std::max(xh, x[i+XX]);
452 yl = std::min(yl, x[i+YY]);
453 yh = std::max(yh, x[i+YY]);
454 zl = std::min(zl, x[i+ZZ]);
455 zh = std::max(zh, x[i+ZZ]);
458 /* Note: possible double to float conversion here */
459 bb->lower.x = R2F_D(xl);
460 bb->lower.y = R2F_D(yl);
461 bb->lower.z = R2F_D(zl);
462 bb->upper.x = R2F_U(xh);
463 bb->upper.y = R2F_U(yh);
464 bb->upper.z = R2F_U(zh);
467 /*! \brief Computes the bounding box for na packed coordinates, bb order xyz0 */
468 static void calc_bounding_box_x_x4(int na, const real *x,
471 real xl, xh, yl, yh, zl, zh;
479 for (int j = 1; j < na; j++)
481 xl = std::min(xl, x[j+XX*c_packX4]);
482 xh = std::max(xh, x[j+XX*c_packX4]);
483 yl = std::min(yl, x[j+YY*c_packX4]);
484 yh = std::max(yh, x[j+YY*c_packX4]);
485 zl = std::min(zl, x[j+ZZ*c_packX4]);
486 zh = std::max(zh, x[j+ZZ*c_packX4]);
488 /* Note: possible double to float conversion here */
489 bb->lower.x = R2F_D(xl);
490 bb->lower.y = R2F_D(yl);
491 bb->lower.z = R2F_D(zl);
492 bb->upper.x = R2F_U(xh);
493 bb->upper.y = R2F_U(yh);
494 bb->upper.z = R2F_U(zh);
497 /*! \brief Computes the bounding box for na coordinates, bb order xyz0 */
498 static void calc_bounding_box_x_x8(int na, const real *x,
501 real xl, xh, yl, yh, zl, zh;
509 for (int j = 1; j < na; j++)
511 xl = std::min(xl, x[j+XX*c_packX8]);
512 xh = std::max(xh, x[j+XX*c_packX8]);
513 yl = std::min(yl, x[j+YY*c_packX8]);
514 yh = std::max(yh, x[j+YY*c_packX8]);
515 zl = std::min(zl, x[j+ZZ*c_packX8]);
516 zh = std::max(zh, x[j+ZZ*c_packX8]);
518 /* Note: possible double to float conversion here */
519 bb->lower.x = R2F_D(xl);
520 bb->lower.y = R2F_D(yl);
521 bb->lower.z = R2F_D(zl);
522 bb->upper.x = R2F_U(xh);
523 bb->upper.y = R2F_U(yh);
524 bb->upper.z = R2F_U(zh);
527 /*! \brief Computes the bounding box for na packed coordinates, bb order xyz0 */
528 gmx_unused static void calc_bounding_box_x_x4_halves(int na, const real *x,
532 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
535 calc_bounding_box_x_x4(std::min(na, 2), x, bbj);
539 calc_bounding_box_x_x4(std::min(na-2, 2), x+(c_packX4>>1), bbj+1);
543 /* Set the "empty" bounding box to the same as the first one,
544 * so we don't need to treat special cases in the rest of the code.
546 #if NBNXN_SEARCH_BB_SIMD4
547 store4(bbj[1].lower.ptr(), load4(bbj[0].lower.ptr()));
548 store4(bbj[1].upper.ptr(), load4(bbj[0].upper.ptr()));
554 #if NBNXN_SEARCH_BB_SIMD4
555 store4(bb->lower.ptr(),
556 min(load4(bbj[0].lower.ptr()), load4(bbj[1].lower.ptr())));
557 store4(bb->upper.ptr(),
558 max(load4(bbj[0].upper.ptr()), load4(bbj[1].upper.ptr())));
561 bb->lower = BoundingBox::Corner::min(bbj[0].lower,
563 bb->upper = BoundingBox::Corner::max(bbj[0].upper,
571 /*! \brief Computes the bounding box for na coordinates in order xyz, bb order xxxxyyyyzzzz */
572 static void calc_bounding_box_xxxx(int na, int stride, const real *x, float *bb)
575 real xl, xh, yl, yh, zl, zh;
585 for (int j = 1; j < na; j++)
587 xl = std::min(xl, x[i+XX]);
588 xh = std::max(xh, x[i+XX]);
589 yl = std::min(yl, x[i+YY]);
590 yh = std::max(yh, x[i+YY]);
591 zl = std::min(zl, x[i+ZZ]);
592 zh = std::max(zh, x[i+ZZ]);
595 /* Note: possible double to float conversion here */
596 bb[0*c_packedBoundingBoxesDimSize] = R2F_D(xl);
597 bb[1*c_packedBoundingBoxesDimSize] = R2F_D(yl);
598 bb[2*c_packedBoundingBoxesDimSize] = R2F_D(zl);
599 bb[3*c_packedBoundingBoxesDimSize] = R2F_U(xh);
600 bb[4*c_packedBoundingBoxesDimSize] = R2F_U(yh);
601 bb[5*c_packedBoundingBoxesDimSize] = R2F_U(zh);
604 #endif /* NBNXN_BBXXXX */
606 #if NBNXN_SEARCH_SIMD4_FLOAT_X_BB
608 /*! \brief Computes the bounding box for na coordinates in order xyz?, bb order xyz0 */
609 static void calc_bounding_box_simd4(int na, const float *x,
612 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
615 static_assert(sizeof(BoundingBox::Corner) == GMX_SIMD4_WIDTH*sizeof(float),
616 "The Corner struct should hold exactly 4 floats");
618 Simd4Float bb_0_S = load4(x);
619 Simd4Float bb_1_S = bb_0_S;
621 for (int i = 1; i < na; i++)
623 Simd4Float x_S = load4(x + i*GMX_SIMD4_WIDTH);
624 bb_0_S = min(bb_0_S, x_S);
625 bb_1_S = max(bb_1_S, x_S);
628 store4(bb->lower.ptr(), bb_0_S);
629 store4(bb->upper.ptr(), bb_1_S);
634 /*! \brief Computes the bounding box for na coordinates in order xyz?, bb order xxxxyyyyzzzz */
635 static void calc_bounding_box_xxxx_simd4(int na, const float *x,
636 BoundingBox *bb_work_aligned,
639 calc_bounding_box_simd4(na, x, bb_work_aligned);
641 bb[0*c_packedBoundingBoxesDimSize] = bb_work_aligned->lower.x;
642 bb[1*c_packedBoundingBoxesDimSize] = bb_work_aligned->lower.y;
643 bb[2*c_packedBoundingBoxesDimSize] = bb_work_aligned->lower.z;
644 bb[3*c_packedBoundingBoxesDimSize] = bb_work_aligned->upper.x;
645 bb[4*c_packedBoundingBoxesDimSize] = bb_work_aligned->upper.y;
646 bb[5*c_packedBoundingBoxesDimSize] = bb_work_aligned->upper.z;
649 #endif /* NBNXN_BBXXXX */
651 #endif /* NBNXN_SEARCH_SIMD4_FLOAT_X_BB */
654 /*! \brief Combines pairs of consecutive bounding boxes */
655 static void combine_bounding_box_pairs(const Grid &grid,
656 gmx::ArrayRef<const BoundingBox> bb,
657 gmx::ArrayRef<BoundingBox> bbj)
659 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
662 for (int i = 0; i < grid.numColumns(); i++)
664 /* Starting bb in a column is expected to be 2-aligned */
665 const int sc2 = grid.firstCellInColumn(i) >> 1;
666 /* For odd numbers skip the last bb here */
667 const int nc2 = (grid.numAtomsInColumn(i) + 3) >> (2 + 1);
669 for (c2 = sc2; c2 < sc2 + nc2; c2++)
671 #if NBNXN_SEARCH_BB_SIMD4
672 Simd4Float min_S, max_S;
674 min_S = min(load4(bb[c2*2+0].lower.ptr()),
675 load4(bb[c2*2+1].lower.ptr()));
676 max_S = max(load4(bb[c2*2+0].upper.ptr()),
677 load4(bb[c2*2+1].upper.ptr()));
678 store4(bbj[c2].lower.ptr(), min_S);
679 store4(bbj[c2].upper.ptr(), max_S);
681 bbj[c2].lower = BoundingBox::Corner::min(bb[c2*2 + 0].lower,
683 bbj[c2].upper = BoundingBox::Corner::max(bb[c2*2 + 0].upper,
687 if (((grid.numAtomsInColumn(i) + 3) >> 2) & 1)
689 /* The bb count in this column is odd: duplicate the last bb */
690 bbj[c2].lower = bb[c2*2].lower;
691 bbj[c2].upper = bb[c2*2].upper;
697 /*! \brief Prints the average bb size, used for debug output */
698 static void print_bbsizes_simple(FILE *fp,
702 for (int c = 0; c < grid.numCells(); c++)
704 const BoundingBox &bb = grid.iBoundingBoxes()[c];
705 ba[XX] += bb.upper.x - bb.lower.x;
706 ba[YY] += bb.upper.y - bb.lower.y;
707 ba[ZZ] += bb.upper.z - bb.lower.z;
709 dsvmul(1.0/grid.numCells(), ba, ba);
711 const Grid::Dimensions &dims = grid.dimensions();
712 real avgCellSizeZ = (dims.atomDensity > 0 ? grid.geometry().numAtomsICluster/(dims.atomDensity*dims.cellSize[XX]*dims.cellSize[YY]) : 0.0);
714 fprintf(fp, "ns bb: grid %4.2f %4.2f %4.2f abs %4.2f %4.2f %4.2f rel %4.2f %4.2f %4.2f\n",
718 ba[XX], ba[YY], ba[ZZ],
719 ba[XX]*dims.invCellSize[XX],
720 ba[YY]*dims.invCellSize[YY],
721 dims.atomDensity > 0 ? ba[ZZ]/avgCellSizeZ : 0.0);
724 /*! \brief Prints the average bb size, used for debug output */
725 static void print_bbsizes_supersub(FILE *fp,
733 for (int c = 0; c < grid.numCells(); c++)
736 for (int s = 0; s < grid.numClustersPerCell()[c]; s += c_packedBoundingBoxesDimSize)
738 int cs_w = (c*c_gpuNumClusterPerCell + s)/c_packedBoundingBoxesDimSize;
739 auto boundingBoxes = grid.packedBoundingBoxes().subArray(cs_w*c_packedBoundingBoxesSize, c_packedBoundingBoxesSize);
740 for (int i = 0; i < c_packedBoundingBoxesDimSize; i++)
742 for (int d = 0; d < DIM; d++)
745 boundingBoxes[(DIM + d)*c_packedBoundingBoxesDimSize + i] -
746 boundingBoxes[(0 + d)*c_packedBoundingBoxesDimSize + i];
751 for (int s = 0; s < grid.numClustersPerCell()[c]; s++)
753 const BoundingBox &bb = grid.iBoundingBoxes()[c*c_gpuNumClusterPerCell + s];
754 ba[XX] += bb.upper.x - bb.lower.x;
755 ba[YY] += bb.upper.y - bb.lower.y;
756 ba[ZZ] += bb.upper.z - bb.lower.z;
759 ns += grid.numClustersPerCell()[c];
761 dsvmul(1.0/ns, ba, ba);
763 const Grid::Dimensions &dims = grid.dimensions();
764 const real avgClusterSizeZ =
765 (dims.atomDensity > 0 ? grid.geometry().numAtomsPerCell/(dims.atomDensity*dims.cellSize[XX]*dims.cellSize[YY]*c_gpuNumClusterPerCellZ) : 0.0);
767 fprintf(fp, "ns bb: grid %4.2f %4.2f %4.2f abs %4.2f %4.2f %4.2f rel %4.2f %4.2f %4.2f\n",
768 dims.cellSize[XX]/c_gpuNumClusterPerCellX,
769 dims.cellSize[YY]/c_gpuNumClusterPerCellY,
771 ba[XX], ba[YY], ba[ZZ],
772 ba[XX]*c_gpuNumClusterPerCellX*dims.invCellSize[XX],
773 ba[YY]*c_gpuNumClusterPerCellY*dims.invCellSize[YY],
774 dims.atomDensity > 0 ? ba[ZZ]/avgClusterSizeZ : 0.0);
777 /*!\brief Set non-bonded interaction flags for the current cluster.
779 * Sorts atoms on LJ coefficients: !=0 first, ==0 at the end.
781 static void sort_cluster_on_flag(int numAtomsInCluster,
785 gmx::ArrayRef<int> order,
788 constexpr int c_maxNumAtomsInCluster = 8;
789 int sort1[c_maxNumAtomsInCluster];
790 int sort2[c_maxNumAtomsInCluster];
792 GMX_ASSERT(numAtomsInCluster <= c_maxNumAtomsInCluster, "Need to increase c_maxNumAtomsInCluster to support larger clusters");
797 for (int s = atomStart; s < atomEnd; s += numAtomsInCluster)
799 /* Make lists for this (sub-)cell on atoms with and without LJ */
802 gmx_bool haveQ = FALSE;
804 for (int a = s; a < std::min(s + numAtomsInCluster, atomEnd); a++)
806 haveQ = haveQ || GET_CGINFO_HAS_Q(atinfo[order[a]]);
808 if (GET_CGINFO_HAS_VDW(atinfo[order[a]]))
810 sort1[n1++] = order[a];
815 sort2[n2++] = order[a];
819 /* If we don't have atoms with LJ, there's nothing to sort */
822 *flags |= NBNXN_CI_DO_LJ(subc);
824 if (2*n1 <= numAtomsInCluster)
826 /* Only sort when strictly necessary. Ordering particles
827 * Ordering particles can lead to less accurate summation
828 * due to rounding, both for LJ and Coulomb interactions.
830 if (2*(a_lj_max - s) >= numAtomsInCluster)
832 for (int i = 0; i < n1; i++)
834 order[atomStart + i] = sort1[i];
836 for (int j = 0; j < n2; j++)
838 order[atomStart + n1 + j] = sort2[j];
842 *flags |= NBNXN_CI_HALF_LJ(subc);
847 *flags |= NBNXN_CI_DO_COUL(subc);
853 /*! \brief Fill a pair search cell with atoms.
855 * Potentially sorts atoms and sets the interaction flags.
857 void Grid::fillCell(GridSetData *gridSetData,
858 nbnxn_atomdata_t *nbat,
862 gmx::ArrayRef<const gmx::RVec> x,
863 BoundingBox gmx_unused *bb_work_aligned)
865 const int numAtoms = atomEnd - atomStart;
867 const gmx::ArrayRef<int> &cells = gridSetData->cells;
868 const gmx::ArrayRef<int> &atomIndices = gridSetData->atomIndices;
870 if (geometry_.isSimple)
872 /* Note that non-local grids are already sorted.
873 * Then sort_cluster_on_flag will only set the flags and the sorting
874 * will not affect the atom order.
876 sort_cluster_on_flag(geometry_.numAtomsICluster, atomStart, atomEnd, atinfo, atomIndices,
877 flags_.data() + atomToCluster(atomStart) - cellOffset_);
882 /* Set the fep flag for perturbed atoms in this (sub-)cell */
884 /* The grid-local cluster/(sub-)cell index */
885 int cell = atomToCluster(atomStart) - cellOffset_*(geometry_.isSimple ? 1 : c_gpuNumClusterPerCell);
887 for (int at = atomStart; at < atomEnd; at++)
889 if (atomIndices[at] >= 0 && GET_CGINFO_FEP(atinfo[atomIndices[at]]))
891 fep_[cell] |= (1 << (at - atomStart));
896 /* Now we have sorted the atoms, set the cell indices */
897 for (int at = atomStart; at < atomEnd; at++)
899 cells[atomIndices[at]] = at;
902 copy_rvec_to_nbat_real(atomIndices.data() + atomStart, numAtoms, geometry_.numAtomsICluster,
903 as_rvec_array(x.data()),
904 nbat->XFormat, nbat->x().data(), atomStart);
906 if (nbat->XFormat == nbatX4)
908 /* Store the bounding boxes as xyz.xyz. */
909 size_t offset = atomToCluster(atomStart - cellOffset_*geometry_.numAtomsICluster);
910 BoundingBox *bb_ptr = bb_.data() + offset;
912 #if GMX_SIMD && GMX_SIMD_REAL_WIDTH == 2
913 if (2*geometry_.numAtomsJCluster == geometry_.numAtomsICluster)
915 calc_bounding_box_x_x4_halves(numAtoms, nbat->x().data() + atom_to_x_index<c_packX4>(atomStart), bb_ptr,
916 bbj_.data() + offset*2);
921 calc_bounding_box_x_x4(numAtoms, nbat->x().data() + atom_to_x_index<c_packX4>(atomStart), bb_ptr);
924 else if (nbat->XFormat == nbatX8)
926 /* Store the bounding boxes as xyz.xyz. */
927 size_t offset = atomToCluster(atomStart - cellOffset_*geometry_.numAtomsICluster);
928 BoundingBox *bb_ptr = bb_.data() + offset;
930 calc_bounding_box_x_x8(numAtoms, nbat->x().data() + atom_to_x_index<c_packX8>(atomStart), bb_ptr);
933 else if (!geometry_.isSimple)
935 /* Store the bounding boxes in a format convenient
936 * for SIMD4 calculations: xxxxyyyyzzzz...
938 const int clusterIndex = ((atomStart - cellOffset_*geometry_.numAtomsPerCell) >> geometry_.numAtomsICluster2Log);
941 packedBoundingBoxesIndex(clusterIndex) +
942 (clusterIndex & (c_packedBoundingBoxesDimSize - 1));
944 #if NBNXN_SEARCH_SIMD4_FLOAT_X_BB
945 if (nbat->XFormat == nbatXYZQ)
947 GMX_ASSERT(bb_work_aligned != nullptr, "Must have valid aligned work structure");
948 calc_bounding_box_xxxx_simd4(numAtoms, nbat->x().data() + atomStart*nbat->xstride,
949 bb_work_aligned, pbb_ptr);
954 calc_bounding_box_xxxx(numAtoms, nbat->xstride, nbat->x().data() + atomStart*nbat->xstride,
959 fprintf(debug, "cell %4d bb %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f\n",
960 atomToCluster(atomStart),
961 pbb_ptr[0*c_packedBoundingBoxesDimSize], pbb_ptr[3*c_packedBoundingBoxesDimSize],
962 pbb_ptr[1*c_packedBoundingBoxesDimSize], pbb_ptr[4*c_packedBoundingBoxesDimSize],
963 pbb_ptr[2*c_packedBoundingBoxesDimSize], pbb_ptr[5*c_packedBoundingBoxesDimSize]);
969 /* Store the bounding boxes as xyz.xyz. */
970 BoundingBox *bb_ptr = bb_.data() + atomToCluster(atomStart - cellOffset_*geometry_.numAtomsPerCell);
972 calc_bounding_box(numAtoms, nbat->xstride, nbat->x().data() + atomStart*nbat->xstride,
977 int bbo = atomToCluster(atomStart - cellOffset_*geometry_.numAtomsPerCell);
978 fprintf(debug, "cell %4d bb %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f\n",
979 atomToCluster(atomStart),
990 void Grid::sortColumnsCpuGeometry(GridSetData *gridSetData,
992 int atomStart, int atomEnd,
994 gmx::ArrayRef<const gmx::RVec> x,
995 nbnxn_atomdata_t *nbat,
996 int cxy_start, int cxy_end,
997 gmx::ArrayRef<int> sort_work)
1001 fprintf(debug, "cell_offset %d sorting columns %d - %d, atoms %d - %d\n",
1002 cellOffset_, cxy_start, cxy_end, atomStart, atomEnd);
1005 const bool relevantAtomsAreWithinGridBounds = (dimensions_.maxAtomGroupRadius == 0);
1007 const int numAtomsPerCell = geometry_.numAtomsPerCell;
1009 /* Sort the atoms within each x,y column in 3 dimensions */
1010 for (int cxy = cxy_start; cxy < cxy_end; cxy++)
1012 const int numAtoms = numAtomsInColumn(cxy);
1013 const int numCellsZ = cxy_ind_[cxy + 1] - cxy_ind_[cxy];
1014 const int atomOffset = firstAtomInColumn(cxy);
1016 /* Sort the atoms within each x,y column on z coordinate */
1017 sort_atoms(ZZ, FALSE, dd_zone,
1018 relevantAtomsAreWithinGridBounds,
1019 gridSetData->atomIndices.data() + atomOffset, numAtoms, x,
1020 dimensions_.lowerCorner[ZZ],
1021 1.0/dimensions_.gridSize[ZZ], numCellsZ*numAtomsPerCell,
1024 /* Fill the ncz cells in this column */
1025 const int firstCell = firstCellInColumn(cxy);
1026 int cellFilled = firstCell;
1027 for (int cellZ = 0; cellZ < numCellsZ; cellZ++)
1029 const int cell = firstCell + cellZ;
1031 const int atomOffsetCell = atomOffset + cellZ*numAtomsPerCell;
1032 const int numAtomsCell = std::min(numAtomsPerCell, numAtoms - (atomOffsetCell - atomOffset));
1034 fillCell(gridSetData, nbat,
1035 atomOffsetCell, atomOffsetCell + numAtomsCell, atinfo, x,
1038 /* This copy to bbcz is not really necessary.
1039 * But it allows to use the same grid search code
1040 * for the simple and supersub cell setups.
1042 if (numAtomsCell > 0)
1046 bbcz_[cell].lower = bb_[cellFilled].lower.z;
1047 bbcz_[cell].upper = bb_[cellFilled].upper.z;
1050 /* Set the unused atom indices to -1 */
1051 for (int ind = numAtoms; ind < numCellsZ*numAtomsPerCell; ind++)
1053 gridSetData->atomIndices[atomOffset + ind] = -1;
1058 /* Spatially sort the atoms within one grid column */
1059 void Grid::sortColumnsGpuGeometry(GridSetData *gridSetData,
1061 int atomStart, int atomEnd,
1063 gmx::ArrayRef<const gmx::RVec> x,
1064 nbnxn_atomdata_t *nbat,
1065 int cxy_start, int cxy_end,
1066 gmx::ArrayRef<int> sort_work)
1068 BoundingBox bb_work_array[2];
1069 BoundingBox *bb_work_aligned = reinterpret_cast<BoundingBox *>((reinterpret_cast<std::size_t>(bb_work_array + 1)) & (~(static_cast<std::size_t>(15))));
1073 fprintf(debug, "cell_offset %d sorting columns %d - %d, atoms %d - %d\n",
1074 cellOffset_, cxy_start, cxy_end, atomStart, atomEnd);
1077 const bool relevantAtomsAreWithinGridBounds = (dimensions_.maxAtomGroupRadius == 0);
1079 const int numAtomsPerCell = geometry_.numAtomsPerCell;
1081 const int subdiv_x = geometry_.numAtomsICluster;
1082 const int subdiv_y = c_gpuNumClusterPerCellX*subdiv_x;
1083 const int subdiv_z = c_gpuNumClusterPerCellY*subdiv_y;
1085 /* Extract the atom index array that will be filled here */
1086 const gmx::ArrayRef<int> &atomIndices = gridSetData->atomIndices;
1088 /* Sort the atoms within each x,y column in 3 dimensions.
1089 * Loop over all columns on the x/y grid.
1091 for (int cxy = cxy_start; cxy < cxy_end; cxy++)
1093 const int gridX = cxy/dimensions_.numCells[YY];
1094 const int gridY = cxy - gridX*dimensions_.numCells[YY];
1096 const int numAtomsInColumn = cxy_na_[cxy];
1097 const int numCellsInColumn = cxy_ind_[cxy + 1] - cxy_ind_[cxy];
1098 const int atomOffset = firstAtomInColumn(cxy);
1100 /* Sort the atoms within each x,y column on z coordinate */
1101 sort_atoms(ZZ, FALSE, dd_zone,
1102 relevantAtomsAreWithinGridBounds,
1103 atomIndices.data() + atomOffset, numAtomsInColumn, x,
1104 dimensions_.lowerCorner[ZZ],
1105 1.0/dimensions_.gridSize[ZZ], numCellsInColumn*numAtomsPerCell,
1108 /* This loop goes over the cells and clusters along z at once */
1109 for (int sub_z = 0; sub_z < numCellsInColumn*c_gpuNumClusterPerCellZ; sub_z++)
1111 const int atomOffsetZ = atomOffset + sub_z*subdiv_z;
1112 const int numAtomsZ = std::min(subdiv_z, numAtomsInColumn - (atomOffsetZ - atomOffset));
1114 /* We have already sorted on z */
1116 if (sub_z % c_gpuNumClusterPerCellZ == 0)
1118 cz = sub_z/c_gpuNumClusterPerCellZ;
1119 const int cell = cxy_ind_[cxy] + cz;
1121 /* The number of atoms in this cell/super-cluster */
1122 const int numAtoms = std::min(numAtomsPerCell, numAtomsInColumn - (atomOffsetZ - atomOffset));
1124 numClusters_[cell] = std::min(c_gpuNumClusterPerCell,
1125 (numAtoms + geometry_.numAtomsICluster - 1)/ geometry_.numAtomsICluster);
1127 /* Store the z-boundaries of the bounding box of the cell */
1128 bbcz_[cell].lower = x[atomIndices[atomOffsetZ]][ZZ];
1129 bbcz_[cell].upper = x[atomIndices[atomOffsetZ + numAtoms - 1]][ZZ];
1132 if (c_gpuNumClusterPerCellY > 1)
1134 /* Sort the atoms along y */
1135 sort_atoms(YY, (sub_z & 1) != 0, dd_zone,
1136 relevantAtomsAreWithinGridBounds,
1137 atomIndices.data() + atomOffsetZ, numAtomsZ, x,
1138 dimensions_.lowerCorner[YY] + gridY*dimensions_.cellSize[YY],
1139 dimensions_.invCellSize[YY], subdiv_z,
1143 for (int sub_y = 0; sub_y < c_gpuNumClusterPerCellY; sub_y++)
1145 const int atomOffsetY = atomOffsetZ + sub_y*subdiv_y;
1146 const int numAtomsY = std::min(subdiv_y, numAtomsInColumn - (atomOffsetY - atomOffset));
1148 if (c_gpuNumClusterPerCellX > 1)
1150 /* Sort the atoms along x */
1151 sort_atoms(XX, ((cz*c_gpuNumClusterPerCellY + sub_y) & 1) != 0, dd_zone,
1152 relevantAtomsAreWithinGridBounds,
1153 atomIndices.data() + atomOffsetY, numAtomsY, x,
1154 dimensions_.lowerCorner[XX] + gridX*dimensions_.cellSize[XX],
1155 dimensions_.invCellSize[XX], subdiv_y,
1159 for (int sub_x = 0; sub_x < c_gpuNumClusterPerCellX; sub_x++)
1161 const int atomOffsetX = atomOffsetY + sub_x*subdiv_x;
1162 const int numAtomsX = std::min(subdiv_x, numAtomsInColumn - (atomOffsetX - atomOffset));
1164 fillCell(gridSetData, nbat,
1165 atomOffsetX, atomOffsetX + numAtomsX, atinfo, x,
1171 /* Set the unused atom indices to -1 */
1172 for (int ind = numAtomsInColumn; ind < numCellsInColumn*numAtomsPerCell; ind++)
1174 atomIndices[atomOffset + ind] = -1;
1179 /*! \brief Sets the cell index in the cell array for atom \p atomIndex and increments the atom count for the grid column */
1180 static void setCellAndAtomCount(gmx::ArrayRef<int> cell,
1182 gmx::ArrayRef<int> cxy_na,
1185 cell[atomIndex] = cellIndex;
1186 cxy_na[cellIndex] += 1;
1189 void Grid::calcColumnIndices(const Grid::Dimensions &gridDims,
1190 const gmx::UpdateGroupsCog *updateGroupsCog,
1191 const int atomStart,
1193 gmx::ArrayRef<const gmx::RVec> x,
1198 gmx::ArrayRef<int> cell,
1199 gmx::ArrayRef<int> cxy_na)
1201 const int numColumns = gridDims.numCells[XX]*gridDims.numCells[YY];
1203 /* We add one extra cell for particles which moved during DD */
1204 for (int i = 0; i < numColumns; i++)
1209 int taskAtomStart = atomStart + static_cast<int>((thread + 0)*(atomEnd - atomStart))/nthread;
1210 int taskAtomEnd = atomStart + static_cast<int>((thread + 1)*(atomEnd - atomStart))/nthread;
1215 for (int i = taskAtomStart; i < taskAtomEnd; i++)
1217 if (move == nullptr || move[i] >= 0)
1220 const gmx::RVec &coord = (updateGroupsCog ? updateGroupsCog->cogForAtom(i) : x[i]);
1222 /* We need to be careful with rounding,
1223 * particles might be a few bits outside the local zone.
1224 * The int cast takes care of the lower bound,
1225 * we will explicitly take care of the upper bound.
1227 int cx = static_cast<int>((coord[XX] - gridDims.lowerCorner[XX])*gridDims.invCellSize[XX]);
1228 int cy = static_cast<int>((coord[YY] - gridDims.lowerCorner[YY])*gridDims.invCellSize[YY]);
1231 if (cx < 0 || cx > gridDims.numCells[XX] ||
1232 cy < 0 || cy > gridDims.numCells[YY])
1235 "grid cell cx %d cy %d out of range (max %d %d)\n"
1236 "atom %f %f %f, grid->c0 %f %f",
1238 gridDims.numCells[XX],
1239 gridDims.numCells[YY],
1240 x[i][XX], x[i][YY], x[i][ZZ],
1241 gridDims.lowerCorner[XX],
1242 gridDims.lowerCorner[YY]);
1245 /* Take care of potential rouding issues */
1246 cx = std::min(cx, gridDims.numCells[XX] - 1);
1247 cy = std::min(cy, gridDims.numCells[YY] - 1);
1249 /* For the moment cell will contain only the, grid local,
1250 * x and y indices, not z.
1252 setCellAndAtomCount(cell, cx*gridDims.numCells[YY] + cy, cxy_na, i);
1256 /* Put this moved particle after the end of the grid,
1257 * so we can process it later without using conditionals.
1259 setCellAndAtomCount(cell, numColumns, cxy_na, i);
1266 for (int i = taskAtomStart; i < taskAtomEnd; i++)
1268 int cx = static_cast<int>((x[i][XX] - gridDims.lowerCorner[XX])*gridDims.invCellSize[XX]);
1269 int cy = static_cast<int>((x[i][YY] - gridDims.lowerCorner[YY])*gridDims.invCellSize[YY]);
1271 /* For non-home zones there could be particles outside
1272 * the non-bonded cut-off range, which have been communicated
1273 * for bonded interactions only. For the result it doesn't
1274 * matter where these end up on the grid. For performance
1275 * we put them in an extra row at the border.
1277 cx = std::max(cx, 0);
1278 cx = std::min(cx, gridDims.numCells[XX] - 1);
1279 cy = std::max(cy, 0);
1280 cy = std::min(cy, gridDims.numCells[YY] - 1);
1282 /* For the moment cell will contain only the, grid local,
1283 * x and y indices, not z.
1285 setCellAndAtomCount(cell, cx*gridDims.numCells[YY] + cy, cxy_na, i);
1290 /*! \brief Resizes grid and atom data which depend on the number of cells */
1291 static void resizeForNumberOfCells(const int numNbnxnAtoms,
1292 const int numAtomsMoved,
1293 GridSetData *gridSetData,
1294 nbnxn_atomdata_t *nbat)
1296 /* Note: gridSetData->cellIndices was already resized before */
1298 /* To avoid conditionals we store the moved particles at the end of a,
1299 * make sure we have enough space.
1301 gridSetData->atomIndices.resize(numNbnxnAtoms + numAtomsMoved);
1303 /* Make space in nbat for storing the atom coordinates */
1304 nbat->resizeCoordinateBuffer(numNbnxnAtoms);
1307 void Grid::setCellIndices(int ddZone,
1309 GridSetData *gridSetData,
1310 gmx::ArrayRef<GridWork> gridWork,
1314 gmx::ArrayRef<const gmx::RVec> x,
1315 const int numAtomsMoved,
1316 nbnxn_atomdata_t *nbat)
1318 cellOffset_ = cellOffset;
1320 srcAtomBegin_ = atomStart;
1321 srcAtomEnd_ = atomEnd;
1323 const int nthread = gmx_omp_nthreads_get(emntPairsearch);
1325 const int numAtomsPerCell = geometry_.numAtomsPerCell;
1327 /* Make the cell index as a function of x and y */
1331 for (int i = 0; i < numColumns() + 1; i++)
1333 /* We set ncz_max at the beginning of the loop iso at the end
1334 * to skip i=grid->ncx*grid->numCells[YY] which are moved particles
1335 * that do not need to be ordered on the grid.
1341 int cxy_na_i = gridWork[0].numAtomsPerColumn[i];
1342 for (int thread = 1; thread < nthread; thread++)
1344 cxy_na_i += gridWork[thread].numAtomsPerColumn[i];
1346 ncz = (cxy_na_i + numAtomsPerCell - 1)/numAtomsPerCell;
1347 if (nbat->XFormat == nbatX8)
1349 /* Make the number of cell a multiple of 2 */
1350 ncz = (ncz + 1) & ~1;
1352 cxy_ind_[i+1] = cxy_ind_[i] + ncz;
1353 /* Clear cxy_na_, so we can reuse the array below */
1356 numCellsTotal_ = cxy_ind_[numColumns()] - cxy_ind_[0];
1357 numCellsColumnMax_ = ncz_max;
1359 /* Resize grid and atom data which depend on the number of cells */
1360 resizeForNumberOfCells(atomIndexEnd(), numAtomsMoved, gridSetData, nbat);
1364 fprintf(debug, "ns na_sc %d na_c %d super-cells: %d x %d y %d z %.1f maxz %d\n",
1365 numAtomsPerCell, geometry_.numAtomsICluster, numCellsTotal_,
1366 dimensions_.numCells[XX], dimensions_.numCells[YY],
1367 numCellsTotal_/(static_cast<double>(numColumns())),
1372 for (int cy = 0; cy < dimensions_.numCells[YY]; cy++)
1374 for (int cx = 0; cx < dimensions_.numCells[XX]; cx++)
1376 fprintf(debug, " %2d", cxy_ind_[i + 1] - cxy_ind_[i]);
1379 fprintf(debug, "\n");
1384 /* Make sure the work array for sorting is large enough */
1385 const int worstCaseSortBufferSize = ncz_max*numAtomsPerCell*c_sortGridMaxSizeFactor;
1386 if (worstCaseSortBufferSize > gmx::index(gridWork[0].sortBuffer.size()))
1388 for (GridWork &work : gridWork)
1390 /* Elements not in use should be -1 */
1391 work.sortBuffer.resize(worstCaseSortBufferSize, -1);
1395 /* Now we know the dimensions we can fill the grid.
1396 * This is the first, unsorted fill. We sort the columns after this.
1398 gmx::ArrayRef<int> cells = gridSetData->cells;
1399 gmx::ArrayRef<int> atomIndices = gridSetData->atomIndices;
1400 for (int i = atomStart; i < atomEnd; i++)
1402 /* At this point nbs->cell contains the local grid x,y indices */
1403 const int cxy = cells[i];
1404 atomIndices[firstAtomInColumn(cxy) + cxy_na_[cxy]++] = i;
1409 /* Set the cell indices for the moved particles */
1410 int n0 = numCellsTotal_*numAtomsPerCell;
1411 int n1 = numCellsTotal_*numAtomsPerCell + cxy_na_[numColumns()];
1414 for (int i = n0; i < n1; i++)
1416 cells[atomIndices[i]] = i;
1421 /* Sort the super-cell columns along z into the sub-cells. */
1422 #pragma omp parallel for num_threads(nthread) schedule(static)
1423 for (int thread = 0; thread < nthread; thread++)
1427 int columnStart = ((thread + 0)*numColumns())/nthread;
1428 int columnEnd = ((thread + 1)*numColumns())/nthread;
1429 if (geometry_.isSimple)
1431 sortColumnsCpuGeometry(gridSetData, ddZone,
1432 atomStart, atomEnd, atinfo, x, nbat,
1433 columnStart, columnEnd,
1434 gridWork[thread].sortBuffer);
1438 sortColumnsGpuGeometry(gridSetData, ddZone,
1439 atomStart, atomEnd, atinfo, x, nbat,
1440 columnStart, columnEnd,
1441 gridWork[thread].sortBuffer);
1444 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1447 if (geometry_.isSimple && nbat->XFormat == nbatX8)
1449 combine_bounding_box_pairs(*this, bb_, bbj_);
1452 if (!geometry_.isSimple)
1454 numClustersTotal_ = 0;
1455 for (int i = 0; i < numCellsTotal_; i++)
1457 numClustersTotal_ += numClusters_[i];
1463 if (geometry_.isSimple)
1465 print_bbsizes_simple(debug, *this);
1469 fprintf(debug, "ns non-zero sub-cells: %d average atoms %.2f\n",
1471 (atomEnd - atomStart)/static_cast<double>(numClustersTotal_));
1473 print_bbsizes_supersub(debug, *this);
1478 } // namespace Nbnxm