2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "nbnxn_grid.h"
46 #include "gromacs/domdec/domdec_struct.h"
47 #include "gromacs/math/utilities.h"
48 #include "gromacs/math/vec.h"
49 #include "gromacs/mdlib/gmx_omp_nthreads.h"
50 #include "gromacs/mdlib/nb_verlet.h"
51 #include "gromacs/mdlib/nbnxn_atomdata.h"
52 #include "gromacs/mdlib/nbnxn_consts.h"
53 #include "gromacs/mdlib/nbnxn_internal.h"
54 #include "gromacs/mdlib/nbnxn_search.h"
55 #include "gromacs/mdlib/nbnxn_util.h"
56 #include "gromacs/mdtypes/forcerec.h" // only for GET_CGINFO_*
57 #include "gromacs/simd/simd.h"
58 #include "gromacs/simd/vector_operations.h"
59 #include "gromacs/utility/exceptions.h"
60 #include "gromacs/utility/smalloc.h"
62 struct gmx_domdec_zones_t;
64 static real grid_atom_density(int numAtoms,
65 const rvec lowerCorner,
66 const rvec upperCorner)
72 /* To avoid zero density we use a minimum of 1 atom */
76 rvec_sub(upperCorner, lowerCorner, size);
78 return numAtoms/(size[XX]*size[YY]*size[ZZ]);
81 static void set_grid_size_xy(const nbnxn_search *nbs,
85 const rvec lowerCorner,
86 const rvec upperCorner,
90 real tlen, tlen_x, tlen_y;
92 rvec_sub(upperCorner, lowerCorner, size);
94 if (numAtoms > grid->na_sc)
96 GMX_ASSERT(atomDensity > 0, "With one or more atoms, the density should be positive");
98 /* target cell length */
101 /* To minimize the zero interactions, we should make
102 * the largest of the i/j cell cubic.
104 int numAtomsInCell = std::max(grid->na_c, grid->na_cj);
106 /* Approximately cubic cells */
107 tlen = std::cbrt(numAtomsInCell/atomDensity);
113 /* Approximately cubic sub cells */
114 tlen = std::cbrt(grid->na_c/atomDensity);
115 tlen_x = tlen*c_gpuNumClusterPerCellX;
116 tlen_y = tlen*c_gpuNumClusterPerCellY;
118 /* We round ncx and ncy down, because we get less cell pairs
119 * in the nbsist when the fixed cell dimensions (x,y) are
120 * larger than the variable one (z) than the other way around.
122 grid->numCells[XX] = std::max(1, static_cast<int>(size[XX]/tlen_x));
123 grid->numCells[YY] = std::max(1, static_cast<int>(size[YY]/tlen_y));
127 grid->numCells[XX] = 1;
128 grid->numCells[YY] = 1;
131 for (int d = 0; d < DIM - 1; d++)
133 grid->cellSize[d] = size[d]/grid->numCells[d];
134 grid->invCellSize[d] = 1/grid->cellSize[d];
139 /* This is a non-home zone, add an extra row of cells
140 * for particles communicated for bonded interactions.
141 * These can be beyond the cut-off. It doesn't matter where
142 * they end up on the grid, but for performance it's better
143 * if they don't end up in cells that can be within cut-off range.
145 grid->numCells[XX]++;
146 grid->numCells[YY]++;
149 /* We need one additional cell entry for particles moved by DD */
150 int numCellsXY = grid->numCells[XX]*grid->numCells[YY];
151 grid->cxy_na.resize(numCellsXY + 1);
152 grid->cxy_ind.resize(numCellsXY + 2);
154 for (nbnxn_search_work_t &work : nbs->work)
156 work.cxy_na.resize(numCellsXY + 1);
159 /* Worst case scenario of 1 atom in each last cell */
161 if (grid->na_cj <= grid->na_c)
163 maxNumCells = numAtoms/grid->na_sc + numCellsXY;
167 maxNumCells = numAtoms/grid->na_sc + numCellsXY*grid->na_cj/grid->na_c;
170 grid->nsubc.resize(maxNumCells);
171 grid->bbcz.resize(maxNumCells*NNBSBB_D);
173 /* This resize also zeros the contents, this avoid possible
174 * floating exceptions in SIMD with the unused bb elements.
178 grid->bb.resize(maxNumCells);
183 grid->pbb.resize(maxNumCells*c_gpuNumClusterPerCell/STRIDE_PBB*NNBSBB_XXXX);
185 grid->bb.resize(maxNumCells*c_gpuNumClusterPerCell);
191 if (grid->na_cj == grid->na_c)
193 grid->bbj = grid->bb;
197 grid->bbjStorage.resize(maxNumCells*grid->na_c/grid->na_cj);
198 grid->bbj = grid->bbjStorage;
202 grid->flags.resize(maxNumCells);
205 grid->fep.resize(maxNumCells*grid->na_sc/grid->na_c);
208 copy_rvec(lowerCorner, grid->c0);
209 copy_rvec(upperCorner, grid->c1);
210 copy_rvec(size, grid->size);
213 /* We need to sort paricles in grid columns on z-coordinate.
214 * As particle are very often distributed homogeneously, we a sorting
215 * algorithm similar to pigeonhole sort. We multiply the z-coordinate
216 * by a factor, cast to an int and try to store in that hole. If the hole
217 * is full, we move this or another particle. A second pass is needed to make
218 * contiguous elements. SORT_GRID_OVERSIZE is the ratio of holes to particles.
219 * 4 is the optimal value for homogeneous particle distribution and allows
220 * for an O(#particles) sort up till distributions were all particles are
221 * concentrated in 1/4 of the space. No NlogN fallback is implemented,
222 * as it can be expensive to detect imhomogeneous particle distributions.
223 * SGSF is the maximum ratio of holes used, in the worst case all particles
224 * end up in the last hole and we need #particles extra holes at the end.
226 #define SORT_GRID_OVERSIZE 4
227 #define SGSF (SORT_GRID_OVERSIZE + 1)
229 /* Sort particle index a on coordinates x along dim.
230 * Backwards tells if we want decreasing iso increasing coordinates.
231 * h0 is the minimum of the coordinate range.
232 * invh is the 1/length of the sorting range.
233 * n_per_h (>=n) is the expected average number of particles per 1/invh
234 * sort is the sorting work array.
235 * sort should have a size of at least n_per_h*SORT_GRID_OVERSIZE + n,
236 * or easier, allocate at least n*SGSF elements.
238 static void sort_atoms(int dim, gmx_bool Backwards,
239 int gmx_unused dd_zone,
240 int *a, int n, const rvec *x,
241 real h0, real invh, int n_per_h,
242 gmx::ArrayRef<int> sort)
253 gmx_incons("n > n_per_h");
257 /* Transform the inverse range height into the inverse hole height */
258 invh *= n_per_h*SORT_GRID_OVERSIZE;
260 /* Set nsort to the maximum possible number of holes used.
261 * In worst case all n elements end up in the last bin.
263 int nsort = n_per_h*SORT_GRID_OVERSIZE + n;
265 /* Determine the index range used, so we can limit it for the second pass */
266 int zi_min = INT_MAX;
269 /* Sort the particles using a simple index sort */
270 for (int i = 0; i < n; i++)
272 /* The cast takes care of float-point rounding effects below zero.
273 * This code assumes particles are less than 1/SORT_GRID_OVERSIZE
274 * times the box height out of the box.
276 int zi = static_cast<int>((x[a[i]][dim] - h0)*invh);
279 /* As we can have rounding effect, we use > iso >= here */
280 if (zi < 0 || (dd_zone == 0 && zi > n_per_h*SORT_GRID_OVERSIZE))
282 gmx_fatal(FARGS, "(int)((x[%d][%c]=%f - %f)*%f) = %d, not in 0 - %d*%d\n",
283 a[i], 'x'+dim, x[a[i]][dim], h0, invh, zi,
284 n_per_h, SORT_GRID_OVERSIZE);
288 /* In a non-local domain, particles communcated for bonded interactions
289 * can be far beyond the grid size, which is set by the non-bonded
290 * cut-off distance. We sort such particles into the last cell.
292 if (zi > n_per_h*SORT_GRID_OVERSIZE)
294 zi = n_per_h*SORT_GRID_OVERSIZE;
297 /* Ideally this particle should go in sort cell zi,
298 * but that might already be in use,
299 * in that case find the first empty cell higher up
304 zi_min = std::min(zi_min, zi);
305 zi_max = std::max(zi_max, zi);
309 /* We have multiple atoms in the same sorting slot.
310 * Sort on real z for minimal bounding box size.
311 * There is an extra check for identical z to ensure
312 * well-defined output order, independent of input order
313 * to ensure binary reproducibility after restarts.
315 while (sort[zi] >= 0 && ( x[a[i]][dim] > x[sort[zi]][dim] ||
316 (x[a[i]][dim] == x[sort[zi]][dim] &&
324 /* Shift all elements by one slot until we find an empty slot */
327 while (sort[zim] >= 0)
335 zi_max = std::max(zi_max, zim);
338 zi_max = std::max(zi_max, zi);
345 for (int zi = 0; zi < nsort; zi++)
356 for (int zi = zi_max; zi >= zi_min; zi--)
367 gmx_incons("Lost particles while sorting");
372 #define R2F_D(x) ((float)((x) >= 0 ? ((1-GMX_FLOAT_EPS)*(x)) : ((1+GMX_FLOAT_EPS)*(x))))
373 #define R2F_U(x) ((float)((x) >= 0 ? ((1+GMX_FLOAT_EPS)*(x)) : ((1-GMX_FLOAT_EPS)*(x))))
379 /* Coordinate order x,y,z, bb order xyz0 */
380 static void calc_bounding_box(int na, int stride, const real *x, nbnxn_bb_t *bb)
383 real xl, xh, yl, yh, zl, zh;
393 for (int j = 1; j < na; j++)
395 xl = std::min(xl, x[i+XX]);
396 xh = std::max(xh, x[i+XX]);
397 yl = std::min(yl, x[i+YY]);
398 yh = std::max(yh, x[i+YY]);
399 zl = std::min(zl, x[i+ZZ]);
400 zh = std::max(zh, x[i+ZZ]);
403 /* Note: possible double to float conversion here */
404 bb->lower[BB_X] = R2F_D(xl);
405 bb->lower[BB_Y] = R2F_D(yl);
406 bb->lower[BB_Z] = R2F_D(zl);
407 bb->upper[BB_X] = R2F_U(xh);
408 bb->upper[BB_Y] = R2F_U(yh);
409 bb->upper[BB_Z] = R2F_U(zh);
412 /* Packed coordinates, bb order xyz0 */
413 static void calc_bounding_box_x_x4(int na, const real *x, nbnxn_bb_t *bb)
415 real xl, xh, yl, yh, zl, zh;
423 for (int j = 1; j < na; j++)
425 xl = std::min(xl, x[j+XX*c_packX4]);
426 xh = std::max(xh, x[j+XX*c_packX4]);
427 yl = std::min(yl, x[j+YY*c_packX4]);
428 yh = std::max(yh, x[j+YY*c_packX4]);
429 zl = std::min(zl, x[j+ZZ*c_packX4]);
430 zh = std::max(zh, x[j+ZZ*c_packX4]);
432 /* Note: possible double to float conversion here */
433 bb->lower[BB_X] = R2F_D(xl);
434 bb->lower[BB_Y] = R2F_D(yl);
435 bb->lower[BB_Z] = R2F_D(zl);
436 bb->upper[BB_X] = R2F_U(xh);
437 bb->upper[BB_Y] = R2F_U(yh);
438 bb->upper[BB_Z] = R2F_U(zh);
441 /* Packed coordinates, bb order xyz0 */
442 static void calc_bounding_box_x_x8(int na, const real *x, nbnxn_bb_t *bb)
444 real xl, xh, yl, yh, zl, zh;
452 for (int j = 1; j < na; j++)
454 xl = std::min(xl, x[j+XX*c_packX8]);
455 xh = std::max(xh, x[j+XX*c_packX8]);
456 yl = std::min(yl, x[j+YY*c_packX8]);
457 yh = std::max(yh, x[j+YY*c_packX8]);
458 zl = std::min(zl, x[j+ZZ*c_packX8]);
459 zh = std::max(zh, x[j+ZZ*c_packX8]);
461 /* Note: possible double to float conversion here */
462 bb->lower[BB_X] = R2F_D(xl);
463 bb->lower[BB_Y] = R2F_D(yl);
464 bb->lower[BB_Z] = R2F_D(zl);
465 bb->upper[BB_X] = R2F_U(xh);
466 bb->upper[BB_Y] = R2F_U(yh);
467 bb->upper[BB_Z] = R2F_U(zh);
470 /* Packed coordinates, bb order xyz0 */
471 gmx_unused static void calc_bounding_box_x_x4_halves(int na, const real *x,
472 nbnxn_bb_t *bb, nbnxn_bb_t *bbj)
474 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
477 calc_bounding_box_x_x4(std::min(na, 2), x, bbj);
481 calc_bounding_box_x_x4(std::min(na-2, 2), x+(c_packX4>>1), bbj+1);
485 /* Set the "empty" bounding box to the same as the first one,
486 * so we don't need to treat special cases in the rest of the code.
488 #if NBNXN_SEARCH_BB_SIMD4
489 store4(&bbj[1].lower[0], load4(&bbj[0].lower[0]));
490 store4(&bbj[1].upper[0], load4(&bbj[0].upper[0]));
496 #if NBNXN_SEARCH_BB_SIMD4
497 store4(&bb->lower[0], min(load4(&bbj[0].lower[0]), load4(&bbj[1].lower[0])));
498 store4(&bb->upper[0], max(load4(&bbj[0].upper[0]), load4(&bbj[1].upper[0])));
503 for (i = 0; i < NNBSBB_C; i++)
505 bb->lower[i] = std::min(bbj[0].lower[i], bbj[1].lower[i]);
506 bb->upper[i] = std::max(bbj[0].upper[i], bbj[1].upper[i]);
512 #if NBNXN_SEARCH_BB_SIMD4
514 /* Coordinate order xyz, bb order xxxxyyyyzzzz */
515 static void calc_bounding_box_xxxx(int na, int stride, const real *x, float *bb)
518 real xl, xh, yl, yh, zl, zh;
528 for (int j = 1; j < na; j++)
530 xl = std::min(xl, x[i+XX]);
531 xh = std::max(xh, x[i+XX]);
532 yl = std::min(yl, x[i+YY]);
533 yh = std::max(yh, x[i+YY]);
534 zl = std::min(zl, x[i+ZZ]);
535 zh = std::max(zh, x[i+ZZ]);
538 /* Note: possible double to float conversion here */
539 bb[0*STRIDE_PBB] = R2F_D(xl);
540 bb[1*STRIDE_PBB] = R2F_D(yl);
541 bb[2*STRIDE_PBB] = R2F_D(zl);
542 bb[3*STRIDE_PBB] = R2F_U(xh);
543 bb[4*STRIDE_PBB] = R2F_U(yh);
544 bb[5*STRIDE_PBB] = R2F_U(zh);
547 #endif /* NBNXN_SEARCH_BB_SIMD4 */
549 #if NBNXN_SEARCH_SIMD4_FLOAT_X_BB
551 /* Coordinate order xyz?, bb order xyz0 */
552 static void calc_bounding_box_simd4(int na, const float *x, nbnxn_bb_t *bb)
554 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
557 Simd4Float bb_0_S, bb_1_S;
563 for (int i = 1; i < na; i++)
565 x_S = load4(x+i*NNBSBB_C);
566 bb_0_S = min(bb_0_S, x_S);
567 bb_1_S = max(bb_1_S, x_S);
570 store4(&bb->lower[0], bb_0_S);
571 store4(&bb->upper[0], bb_1_S);
574 /* Coordinate order xyz?, bb order xxxxyyyyzzzz */
575 static void calc_bounding_box_xxxx_simd4(int na, const float *x,
576 nbnxn_bb_t *bb_work_aligned,
579 calc_bounding_box_simd4(na, x, bb_work_aligned);
581 bb[0*STRIDE_PBB] = bb_work_aligned->lower[BB_X];
582 bb[1*STRIDE_PBB] = bb_work_aligned->lower[BB_Y];
583 bb[2*STRIDE_PBB] = bb_work_aligned->lower[BB_Z];
584 bb[3*STRIDE_PBB] = bb_work_aligned->upper[BB_X];
585 bb[4*STRIDE_PBB] = bb_work_aligned->upper[BB_Y];
586 bb[5*STRIDE_PBB] = bb_work_aligned->upper[BB_Z];
589 #endif /* NBNXN_SEARCH_SIMD4_FLOAT_X_BB */
592 /* Combines pairs of consecutive bounding boxes */
593 static void combine_bounding_box_pairs(nbnxn_grid_t *grid,
594 gmx::ArrayRef<const nbnxn_bb_t> bb)
596 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
599 for (int i = 0; i < grid->numCells[XX]*grid->numCells[YY]; i++)
601 /* Starting bb in a column is expected to be 2-aligned */
602 int sc2 = grid->cxy_ind[i]>>1;
603 /* For odd numbers skip the last bb here */
604 int nc2 = (grid->cxy_na[i]+3)>>(2+1);
606 for (c2 = sc2; c2 < sc2+nc2; c2++)
608 #if NBNXN_SEARCH_BB_SIMD4
609 Simd4Float min_S, max_S;
611 min_S = min(load4(&bb[c2*2+0].lower[0]),
612 load4(&bb[c2*2+1].lower[0]));
613 max_S = max(load4(&bb[c2*2+0].upper[0]),
614 load4(&bb[c2*2+1].upper[0]));
615 store4(&grid->bbj[c2].lower[0], min_S);
616 store4(&grid->bbj[c2].upper[0], max_S);
618 for (int j = 0; j < NNBSBB_C; j++)
620 grid->bbj[c2].lower[j] = std::min(bb[c2*2+0].lower[j],
621 bb[c2*2+1].lower[j]);
622 grid->bbj[c2].upper[j] = std::max(bb[c2*2+0].upper[j],
623 bb[c2*2+1].upper[j]);
627 if (((grid->cxy_na[i]+3)>>2) & 1)
629 /* The bb count in this column is odd: duplicate the last bb */
630 for (int j = 0; j < NNBSBB_C; j++)
632 grid->bbj[c2].lower[j] = bb[c2*2].lower[j];
633 grid->bbj[c2].upper[j] = bb[c2*2].upper[j];
640 /* Prints the average bb size, used for debug output */
641 static void print_bbsizes_simple(FILE *fp,
642 const nbnxn_grid_t *grid)
647 for (int c = 0; c < grid->nc; c++)
649 for (int d = 0; d < DIM; d++)
651 ba[d] += grid->bb[c].upper[d] - grid->bb[c].lower[d];
654 dsvmul(1.0/grid->nc, ba, ba);
656 real avgCellSizeZ = (grid->atom_density > 0 ? grid->na_c/(grid->atom_density*grid->cellSize[XX]*grid->cellSize[YY]) : 0.0);
658 fprintf(fp, "ns bb: grid %4.2f %4.2f %4.2f abs %4.2f %4.2f %4.2f rel %4.2f %4.2f %4.2f\n",
662 ba[XX], ba[YY], ba[ZZ],
663 ba[XX]*grid->invCellSize[XX],
664 ba[YY]*grid->invCellSize[YY],
665 grid->atom_density > 0 ? ba[ZZ]/avgCellSizeZ : 0.0);
668 /* Prints the average bb size, used for debug output */
669 static void print_bbsizes_supersub(FILE *fp,
670 const nbnxn_grid_t *grid)
677 for (int c = 0; c < grid->nc; c++)
680 for (int s = 0; s < grid->nsubc[c]; s += STRIDE_PBB)
682 int cs_w = (c*c_gpuNumClusterPerCell + s)/STRIDE_PBB;
683 for (int i = 0; i < STRIDE_PBB; i++)
685 for (int d = 0; d < DIM; d++)
688 grid->pbb[cs_w*NNBSBB_XXXX+(DIM+d)*STRIDE_PBB+i] -
689 grid->pbb[cs_w*NNBSBB_XXXX+ d *STRIDE_PBB+i];
694 for (int s = 0; s < grid->nsubc[c]; s++)
696 int cs = c*c_gpuNumClusterPerCell + s;
697 for (int d = 0; d < DIM; d++)
699 ba[d] += grid->bb[cs].upper[d] - grid->bb[cs].lower[d];
703 ns += grid->nsubc[c];
705 dsvmul(1.0/ns, ba, ba);
707 real avgClusterSizeZ = (grid->atom_density > 0 ? grid->na_sc/(grid->atom_density*grid->cellSize[XX]*grid->cellSize[YY]*c_gpuNumClusterPerCellZ) : 0.0);
709 fprintf(fp, "ns bb: grid %4.2f %4.2f %4.2f abs %4.2f %4.2f %4.2f rel %4.2f %4.2f %4.2f\n",
710 grid->cellSize[XX]/c_gpuNumClusterPerCellX,
711 grid->cellSize[YY]/c_gpuNumClusterPerCellY,
713 ba[XX], ba[YY], ba[ZZ],
714 ba[XX]*c_gpuNumClusterPerCellX*grid->invCellSize[XX],
715 ba[YY]*c_gpuNumClusterPerCellY*grid->invCellSize[YY],
716 grid->atom_density > 0 ? ba[ZZ]/avgClusterSizeZ : 0.0);
719 /* Set non-bonded interaction flags for the current cluster.
720 * Sorts atoms on LJ coefficients: !=0 first, ==0 at the end.
722 static void sort_cluster_on_flag(int numAtomsInCluster,
726 gmx::ArrayRef<int> order,
729 constexpr int c_maxNumAtomsInCluster = 8;
730 int sort1[c_maxNumAtomsInCluster];
731 int sort2[c_maxNumAtomsInCluster];
733 GMX_ASSERT(numAtomsInCluster <= c_maxNumAtomsInCluster, "Need to increase c_maxNumAtomsInCluster to support larger clusters");
738 for (int s = atomStart; s < atomEnd; s += numAtomsInCluster)
740 /* Make lists for this (sub-)cell on atoms with and without LJ */
743 gmx_bool haveQ = FALSE;
745 for (int a = s; a < std::min(s + numAtomsInCluster, atomEnd); a++)
747 haveQ = haveQ || GET_CGINFO_HAS_Q(atinfo[order[a]]);
749 if (GET_CGINFO_HAS_VDW(atinfo[order[a]]))
751 sort1[n1++] = order[a];
756 sort2[n2++] = order[a];
760 /* If we don't have atoms with LJ, there's nothing to sort */
763 *flags |= NBNXN_CI_DO_LJ(subc);
765 if (2*n1 <= numAtomsInCluster)
767 /* Only sort when strictly necessary. Ordering particles
768 * Ordering particles can lead to less accurate summation
769 * due to rounding, both for LJ and Coulomb interactions.
771 if (2*(a_lj_max - s) >= numAtomsInCluster)
773 for (int i = 0; i < n1; i++)
775 order[atomStart + i] = sort1[i];
777 for (int j = 0; j < n2; j++)
779 order[atomStart + n1 + j] = sort2[j];
783 *flags |= NBNXN_CI_HALF_LJ(subc);
788 *flags |= NBNXN_CI_DO_COUL(subc);
794 /* Fill a pair search cell with atoms.
795 * Potentially sorts atoms and sets the interaction flags.
797 static void fill_cell(nbnxn_search *nbs,
799 nbnxn_atomdata_t *nbat,
804 nbnxn_bb_t gmx_unused *bb_work_aligned)
806 const int numAtoms = atomEnd - atomStart;
810 /* Note that non-local grids are already sorted.
811 * Then sort_cluster_on_flag will only set the flags and the sorting
812 * will not affect the atom order.
814 sort_cluster_on_flag(grid->na_c, atomStart, atomEnd, atinfo, nbs->a,
815 grid->flags.data() + (atomStart >> grid->na_c_2log) - grid->cell0);
820 /* Set the fep flag for perturbed atoms in this (sub-)cell */
822 /* The grid-local cluster/(sub-)cell index */
823 int cell = (atomStart >> grid->na_c_2log) - grid->cell0*(grid->bSimple ? 1 : c_gpuNumClusterPerCell);
825 for (int at = atomStart; at < atomEnd; at++)
827 if (nbs->a[at] >= 0 && GET_CGINFO_FEP(atinfo[nbs->a[at]]))
829 grid->fep[cell] |= (1 << (at - atomStart));
834 /* Now we have sorted the atoms, set the cell indices */
835 for (int at = atomStart; at < atomEnd; at++)
837 nbs->cell[nbs->a[at]] = at;
840 copy_rvec_to_nbat_real(nbs->a.data() + atomStart, numAtoms, grid->na_c, x,
841 nbat->XFormat, nbat->x, atomStart);
843 if (nbat->XFormat == nbatX4)
845 /* Store the bounding boxes as xyz.xyz. */
846 size_t offset = (atomStart - grid->cell0*grid->na_sc) >> grid->na_c_2log;
847 nbnxn_bb_t *bb_ptr = grid->bb.data() + offset;
849 #if GMX_SIMD && GMX_SIMD_REAL_WIDTH == 2
850 if (2*grid->na_cj == grid->na_c)
852 calc_bounding_box_x_x4_halves(numAtoms, nbat->x + atom_to_x_index<c_packX4>(atomStart), bb_ptr,
853 grid->bbj.data() + offset*2);
858 calc_bounding_box_x_x4(numAtoms, nbat->x + atom_to_x_index<c_packX4>(atomStart), bb_ptr);
861 else if (nbat->XFormat == nbatX8)
863 /* Store the bounding boxes as xyz.xyz. */
864 size_t offset = (atomStart - grid->cell0*grid->na_sc) >> grid->na_c_2log;
865 nbnxn_bb_t *bb_ptr = grid->bb.data() + offset;
867 calc_bounding_box_x_x8(numAtoms, nbat->x + atom_to_x_index<c_packX8>(atomStart), bb_ptr);
870 else if (!grid->bSimple)
872 /* Store the bounding boxes in a format convenient
873 * for SIMD4 calculations: xxxxyyyyzzzz...
877 ((atomStart - grid->cell0*grid->na_sc) >> (grid->na_c_2log + STRIDE_PBB_2LOG))*NNBSBB_XXXX +
878 (((atomStart - grid->cell0*grid->na_sc) >> grid->na_c_2log) & (STRIDE_PBB - 1));
880 #if NBNXN_SEARCH_SIMD4_FLOAT_X_BB
881 if (nbat->XFormat == nbatXYZQ)
883 calc_bounding_box_xxxx_simd4(numAtoms, nbat->x + atomStart*nbat->xstride,
884 bb_work_aligned, pbb_ptr);
889 calc_bounding_box_xxxx(numAtoms, nbat->xstride, nbat->x + atomStart*nbat->xstride,
894 fprintf(debug, "cell %4d bb %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f\n",
895 atomStart >> grid->na_c_2log,
896 pbb_ptr[0*STRIDE_PBB], pbb_ptr[3*STRIDE_PBB],
897 pbb_ptr[1*STRIDE_PBB], pbb_ptr[4*STRIDE_PBB],
898 pbb_ptr[2*STRIDE_PBB], pbb_ptr[5*STRIDE_PBB]);
904 /* Store the bounding boxes as xyz.xyz. */
905 nbnxn_bb_t *bb_ptr = grid->bb.data() + ((atomStart - grid->cell0*grid->na_sc) >> grid->na_c_2log);
907 calc_bounding_box(numAtoms, nbat->xstride, nbat->x + atomStart*nbat->xstride,
912 int bbo = (atomStart - grid->cell0*grid->na_sc)/grid->na_c;
913 fprintf(debug, "cell %4d bb %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f\n",
914 atomStart >> grid->na_c_2log,
915 grid->bb[bbo].lower[BB_X],
916 grid->bb[bbo].lower[BB_Y],
917 grid->bb[bbo].lower[BB_Z],
918 grid->bb[bbo].upper[BB_X],
919 grid->bb[bbo].upper[BB_Y],
920 grid->bb[bbo].upper[BB_Z]);
925 /* Spatially sort the atoms within one grid column */
926 static void sort_columns_simple(nbnxn_search *nbs,
929 int atomStart, int atomEnd,
932 nbnxn_atomdata_t *nbat,
933 int cxy_start, int cxy_end,
934 gmx::ArrayRef<int> sort_work)
938 fprintf(debug, "cell0 %d sorting columns %d - %d, atoms %d - %d\n",
939 grid->cell0, cxy_start, cxy_end, atomStart, atomEnd);
942 /* Sort the atoms within each x,y column in 3 dimensions */
943 for (int cxy = cxy_start; cxy < cxy_end; cxy++)
945 int na = grid->cxy_na[cxy];
946 int ncz = grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy];
947 int ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
949 /* Sort the atoms within each x,y column on z coordinate */
950 sort_atoms(ZZ, FALSE, dd_zone,
951 nbs->a.data() + ash, na, x,
953 1.0/grid->size[ZZ], ncz*grid->na_sc,
956 /* Fill the ncz cells in this column */
957 int cfilled = grid->cxy_ind[cxy];
958 for (int cz = 0; cz < ncz; cz++)
960 int c = grid->cxy_ind[cxy] + cz;
962 int ash_c = ash + cz*grid->na_sc;
963 int na_c = std::min(grid->na_sc, na-(ash_c-ash));
965 fill_cell(nbs, grid, nbat,
966 ash_c, ash_c+na_c, atinfo, x,
969 /* This copy to bbcz is not really necessary.
970 * But it allows to use the same grid search code
971 * for the simple and supersub cell setups.
977 grid->bbcz[c*NNBSBB_D ] = grid->bb[cfilled].lower[BB_Z];
978 grid->bbcz[c*NNBSBB_D+1] = grid->bb[cfilled].upper[BB_Z];
981 /* Set the unused atom indices to -1 */
982 for (int ind = na; ind < ncz*grid->na_sc; ind++)
984 nbs->a[ash+ind] = -1;
989 /* Spatially sort the atoms within one grid column */
990 static void sort_columns_supersub(nbnxn_search *nbs,
993 int atomStart, int atomEnd,
996 nbnxn_atomdata_t *nbat,
997 int cxy_start, int cxy_end,
998 gmx::ArrayRef<int> sort_work)
1000 nbnxn_bb_t bb_work_array[2];
1001 nbnxn_bb_t *bb_work_aligned = reinterpret_cast<nbnxn_bb_t *>((reinterpret_cast<std::size_t>(bb_work_array + 1)) & (~(static_cast<std::size_t>(15))));
1005 fprintf(debug, "cell0 %d sorting columns %d - %d, atoms %d - %d\n",
1006 grid->cell0, cxy_start, cxy_end, atomStart, atomEnd);
1009 int subdiv_x = grid->na_c;
1010 int subdiv_y = c_gpuNumClusterPerCellX*subdiv_x;
1011 int subdiv_z = c_gpuNumClusterPerCellY*subdiv_y;
1013 /* Sort the atoms within each x,y column in 3 dimensions.
1014 * Loop over all columns on the x/y grid.
1016 for (int cxy = cxy_start; cxy < cxy_end; cxy++)
1018 int gridX = cxy/grid->numCells[YY];
1019 int gridY = cxy - gridX*grid->numCells[YY];
1021 int numAtomsInColumn = grid->cxy_na[cxy];
1022 int numCellsInColumn = grid->cxy_ind[cxy + 1] - grid->cxy_ind[cxy];
1023 int ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
1025 /* Sort the atoms within each x,y column on z coordinate */
1026 sort_atoms(ZZ, FALSE, dd_zone,
1027 nbs->a.data() + ash, numAtomsInColumn, x,
1029 1.0/grid->size[ZZ], numCellsInColumn*grid->na_sc,
1032 /* This loop goes over the cells and clusters along z at once */
1033 for (int sub_z = 0; sub_z < numCellsInColumn*c_gpuNumClusterPerCellZ; sub_z++)
1035 int ash_z = ash + sub_z*subdiv_z;
1036 int na_z = std::min(subdiv_z, numAtomsInColumn - (ash_z - ash));
1038 /* We have already sorted on z */
1040 if (sub_z % c_gpuNumClusterPerCellZ == 0)
1042 cz = sub_z/c_gpuNumClusterPerCellZ;
1043 int cell = grid->cxy_ind[cxy] + cz;
1045 /* The number of atoms in this cell/super-cluster */
1046 int numAtoms = std::min(grid->na_sc, numAtomsInColumn - (ash_z - ash));
1048 grid->nsubc[cell] = std::min(c_gpuNumClusterPerCell,
1049 (numAtoms + grid->na_c - 1)/grid->na_c);
1051 /* Store the z-boundaries of the bounding box of the cell */
1052 grid->bbcz[cell*NNBSBB_D ] = x[nbs->a[ash_z]][ZZ];
1053 grid->bbcz[cell*NNBSBB_D+1] = x[nbs->a[ash_z + numAtoms - 1]][ZZ];
1056 if (c_gpuNumClusterPerCellY > 1)
1058 /* Sort the atoms along y */
1059 sort_atoms(YY, (sub_z & 1), dd_zone,
1060 nbs->a.data() + ash_z, na_z, x,
1061 grid->c0[YY] + gridY*grid->cellSize[YY],
1062 grid->invCellSize[YY], subdiv_z,
1066 for (int sub_y = 0; sub_y < c_gpuNumClusterPerCellY; sub_y++)
1068 int ash_y = ash_z + sub_y*subdiv_y;
1069 int na_y = std::min(subdiv_y, numAtomsInColumn - (ash_y - ash));
1071 if (c_gpuNumClusterPerCellX > 1)
1073 /* Sort the atoms along x */
1074 sort_atoms(XX, ((cz*c_gpuNumClusterPerCellY + sub_y) & 1), dd_zone,
1075 nbs->a.data() + ash_y, na_y, x,
1076 grid->c0[XX] + gridX*grid->cellSize[XX],
1077 grid->invCellSize[XX], subdiv_y,
1081 for (int sub_x = 0; sub_x < c_gpuNumClusterPerCellX; sub_x++)
1083 int ash_x = ash_y + sub_x*subdiv_x;
1084 int na_x = std::min(subdiv_x, numAtomsInColumn - (ash_x - ash));
1086 fill_cell(nbs, grid, nbat,
1087 ash_x, ash_x + na_x, atinfo, x,
1093 /* Set the unused atom indices to -1 */
1094 for (int ind = numAtomsInColumn; ind < numCellsInColumn*grid->na_sc; ind++)
1096 nbs->a[ash + ind] = -1;
1101 /* Sets the cell index in the cell array for atom \p atomIndex and increments the atom count for the grid column */
1102 static void setCellAndAtomCount(gmx::ArrayRef<int> cell,
1104 gmx::ArrayRef<int> cxy_na,
1107 cell[atomIndex] = cellIndex;
1108 cxy_na[cellIndex] += 1;
1111 /* Determine in which grid column atoms should go */
1112 static void calc_column_indices(nbnxn_grid_t *grid,
1113 int atomStart, int atomEnd,
1115 int dd_zone, const int *move,
1116 int thread, int nthread,
1117 gmx::ArrayRef<int> cell,
1118 gmx::ArrayRef<int> cxy_na)
1120 /* We add one extra cell for particles which moved during DD */
1121 for (int i = 0; i < grid->numCells[XX]*grid->numCells[YY] + 1; i++)
1126 int taskAtomStart = atomStart + static_cast<int>((thread + 0)*(atomEnd - atomStart))/nthread;
1127 int taskAtomEnd = atomStart + static_cast<int>((thread + 1)*(atomEnd - atomStart))/nthread;
1131 for (int i = taskAtomStart; i < taskAtomEnd; i++)
1133 if (move == nullptr || move[i] >= 0)
1135 /* We need to be careful with rounding,
1136 * particles might be a few bits outside the local zone.
1137 * The int cast takes care of the lower bound,
1138 * we will explicitly take care of the upper bound.
1140 int cx = static_cast<int>((x[i][XX] - grid->c0[XX])*grid->invCellSize[XX]);
1141 int cy = static_cast<int>((x[i][YY] - grid->c0[YY])*grid->invCellSize[YY]);
1144 if (cx < 0 || cx > grid->numCells[XX] ||
1145 cy < 0 || cy > grid->numCells[YY])
1148 "grid cell cx %d cy %d out of range (max %d %d)\n"
1149 "atom %f %f %f, grid->c0 %f %f",
1150 cx, cy, grid->numCells[XX], grid->numCells[YY],
1151 x[i][XX], x[i][YY], x[i][ZZ], grid->c0[XX], grid->c0[YY]);
1154 /* Take care of potential rouding issues */
1155 cx = std::min(cx, grid->numCells[XX] - 1);
1156 cy = std::min(cy, grid->numCells[YY] - 1);
1158 /* For the moment cell will contain only the, grid local,
1159 * x and y indices, not z.
1161 setCellAndAtomCount(cell, cx*grid->numCells[YY] + cy, cxy_na, i);
1165 /* Put this moved particle after the end of the grid,
1166 * so we can process it later without using conditionals.
1168 setCellAndAtomCount(cell, grid->numCells[XX]*grid->numCells[YY], cxy_na, i);
1175 for (int i = taskAtomStart; i < taskAtomEnd; i++)
1177 int cx = static_cast<int>((x[i][XX] - grid->c0[XX])*grid->invCellSize[XX]);
1178 int cy = static_cast<int>((x[i][YY] - grid->c0[YY])*grid->invCellSize[YY]);
1180 /* For non-home zones there could be particles outside
1181 * the non-bonded cut-off range, which have been communicated
1182 * for bonded interactions only. For the result it doesn't
1183 * matter where these end up on the grid. For performance
1184 * we put them in an extra row at the border.
1186 cx = std::max(cx, 0);
1187 cx = std::min(cx, grid->numCells[XX] - 1);
1188 cy = std::max(cy, 0);
1189 cy = std::min(cy, grid->numCells[YY] - 1);
1191 /* For the moment cell will contain only the, grid local,
1192 * x and y indices, not z.
1194 setCellAndAtomCount(cell, cx*grid->numCells[YY] + cy, cxy_na, i);
1199 /* Resizes grid and atom data which depend on the number of cells */
1200 static void resizeForNumberOfCells(const nbnxn_grid_t &grid,
1203 nbnxn_atomdata_t *nbat)
1205 int numNbnxnAtoms = (grid.cell0 + grid.nc)*grid.na_sc;
1207 /* Note: nbs->cell was already resized before */
1209 /* To avoid conditionals we store the moved particles at the end of a,
1210 * make sure we have enough space.
1212 nbs->a.resize(numNbnxnAtoms + numAtomsMoved);
1214 /* We need padding up to a multiple of the buffer flag size: simply add */
1215 if (numNbnxnAtoms + NBNXN_BUFFERFLAG_SIZE > nbat->nalloc)
1217 nbnxn_atomdata_realloc(nbat, numNbnxnAtoms + NBNXN_BUFFERFLAG_SIZE);
1220 nbat->natoms = numNbnxnAtoms;
1223 /* Determine in which grid cells the atoms should go */
1224 static void calc_cell_indices(nbnxn_search *nbs,
1233 nbnxn_atomdata_t *nbat)
1235 /* First compute all grid/column indices and store them in nbs->cell */
1236 nbs->cell.resize(atomEnd);
1238 const int nthread = gmx_omp_nthreads_get(emntPairsearch);
1240 #pragma omp parallel for num_threads(nthread) schedule(static)
1241 for (int thread = 0; thread < nthread; thread++)
1245 calc_column_indices(grid, atomStart, atomEnd, x, ddZone, move, thread, nthread,
1246 nbs->cell, nbs->work[thread].cxy_na);
1248 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1251 /* Make the cell index as a function of x and y */
1254 grid->cxy_ind[0] = 0;
1255 for (int i = 0; i < grid->numCells[XX]*grid->numCells[YY] + 1; i++)
1257 /* We set ncz_max at the beginning of the loop iso at the end
1258 * to skip i=grid->ncx*grid->numCells[YY] which are moved particles
1259 * that do not need to be ordered on the grid.
1265 int cxy_na_i = nbs->work[0].cxy_na[i];
1266 for (int thread = 1; thread < nthread; thread++)
1268 cxy_na_i += nbs->work[thread].cxy_na[i];
1270 ncz = (cxy_na_i + grid->na_sc - 1)/grid->na_sc;
1271 if (nbat->XFormat == nbatX8)
1273 /* Make the number of cell a multiple of 2 */
1274 ncz = (ncz + 1) & ~1;
1276 grid->cxy_ind[i+1] = grid->cxy_ind[i] + ncz;
1277 /* Clear cxy_na, so we can reuse the array below */
1278 grid->cxy_na[i] = 0;
1280 grid->nc = grid->cxy_ind[grid->numCells[XX]*grid->numCells[YY]] - grid->cxy_ind[0];
1282 resizeForNumberOfCells(*grid, numAtomsMoved, nbs, nbat);
1286 fprintf(debug, "ns na_sc %d na_c %d super-cells: %d x %d y %d z %.1f maxz %d\n",
1287 grid->na_sc, grid->na_c, grid->nc,
1288 grid->numCells[XX], grid->numCells[YY], grid->nc/(static_cast<double>(grid->numCells[XX]*grid->numCells[YY])),
1293 for (int cy = 0; cy < grid->numCells[YY]; cy++)
1295 for (int cx = 0; cx < grid->numCells[XX]; cx++)
1297 fprintf(debug, " %2d", grid->cxy_ind[i+1]-grid->cxy_ind[i]);
1300 fprintf(debug, "\n");
1305 /* Make sure the work array for sorting is large enough */
1306 if (ncz_max*grid->na_sc*SGSF > gmx::index(nbs->work[0].sortBuffer.size()))
1308 for (nbnxn_search_work_t &work : nbs->work)
1310 /* Elements not in use should be -1 */
1311 work.sortBuffer.resize(ncz_max*grid->na_sc*SGSF, -1);
1315 /* Now we know the dimensions we can fill the grid.
1316 * This is the first, unsorted fill. We sort the columns after this.
1318 for (int i = atomStart; i < atomEnd; i++)
1320 /* At this point nbs->cell contains the local grid x,y indices */
1321 int cxy = nbs->cell[i];
1322 nbs->a[(grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc + grid->cxy_na[cxy]++] = i;
1327 /* Set the cell indices for the moved particles */
1328 int n0 = grid->nc*grid->na_sc;
1329 int n1 = grid->nc*grid->na_sc+grid->cxy_na[grid->numCells[XX]*grid->numCells[YY]];
1332 for (int i = n0; i < n1; i++)
1334 nbs->cell[nbs->a[i]] = i;
1339 /* Sort the super-cell columns along z into the sub-cells. */
1340 #pragma omp parallel for num_threads(nthread) schedule(static)
1341 for (int thread = 0; thread < nthread; thread++)
1345 int columnStart = ((thread + 0)*grid->numCells[XX]*grid->numCells[YY])/nthread;
1346 int columnEnd = ((thread + 1)*grid->numCells[XX]*grid->numCells[YY])/nthread;
1349 sort_columns_simple(nbs, ddZone, grid, atomStart, atomEnd, atinfo, x, nbat,
1350 columnStart, columnEnd,
1351 nbs->work[thread].sortBuffer);
1355 sort_columns_supersub(nbs, ddZone, grid, atomStart, atomEnd, atinfo, x, nbat,
1356 columnStart, columnEnd,
1357 nbs->work[thread].sortBuffer);
1360 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1363 if (grid->bSimple && nbat->XFormat == nbatX8)
1365 combine_bounding_box_pairs(grid, grid->bb);
1370 grid->nsubc_tot = 0;
1371 for (int i = 0; i < grid->nc; i++)
1373 grid->nsubc_tot += grid->nsubc[i];
1381 print_bbsizes_simple(debug, grid);
1385 fprintf(debug, "ns non-zero sub-cells: %d average atoms %.2f\n",
1386 grid->nsubc_tot, (atomEnd - atomStart)/static_cast<double>(grid->nsubc_tot));
1388 print_bbsizes_supersub(debug, grid);
1393 /* Sets up a grid and puts the atoms on the grid.
1394 * This function only operates on one domain of the domain decompostion.
1395 * Note that without domain decomposition there is only one domain.
1397 void nbnxn_put_on_grid(nbnxn_search_t nbs,
1401 const rvec lowerCorner,
1402 const rvec upperCorner,
1411 nbnxn_atomdata_t *nbat)
1413 nbnxn_grid_t *grid = &nbs->grid[ddZone];
1415 nbs_cycle_start(&nbs->cc[enbsCCgrid]);
1417 grid->bSimple = nbnxn_kernel_pairlist_simple(nb_kernel_type);
1419 grid->na_c = nbnxn_kernel_to_cluster_i_size(nb_kernel_type);
1420 grid->na_cj = nbnxn_kernel_to_cluster_j_size(nb_kernel_type);
1421 grid->na_sc = (grid->bSimple ? 1 : c_gpuNumClusterPerCell)*grid->na_c;
1422 grid->na_c_2log = get_2log(grid->na_c);
1424 nbat->na_c = grid->na_c;
1433 (nbs->grid[ddZone - 1].cell0 + nbs->grid[ddZone - 1].nc)*
1434 nbs->grid[ddZone- 1].na_sc/grid->na_sc;
1437 const int n = atomEnd - atomStart;
1442 copy_mat(box, nbs->box);
1444 /* Avoid zero density */
1445 if (atomDensity > 0)
1447 grid->atom_density = atomDensity;
1451 grid->atom_density = grid_atom_density(n - numAtomsMoved, lowerCorner, upperCorner);
1456 nbs->natoms_local = atomEnd - numAtomsMoved;
1457 /* We assume that nbnxn_put_on_grid is called first
1458 * for the local atoms (ddZone=0).
1460 nbs->natoms_nonlocal = atomEnd - numAtomsMoved;
1464 fprintf(debug, "natoms_local = %5d atom_density = %5.1f\n",
1465 nbs->natoms_local, grid->atom_density);
1470 nbs->natoms_nonlocal = std::max(nbs->natoms_nonlocal, atomEnd);
1473 /* We always use the home zone (grid[0]) for setting the cell size,
1474 * since determining densities for non-local zones is difficult.
1476 set_grid_size_xy(nbs, grid,
1477 ddZone, n - numAtomsMoved,
1478 lowerCorner, upperCorner,
1479 nbs->grid[0].atom_density);
1481 calc_cell_indices(nbs, ddZone, grid, atomStart, atomEnd, atinfo, x, numAtomsMoved, move, nbat);
1485 nbat->natoms_local = nbat->natoms;
1488 nbs_cycle_stop(&nbs->cc[enbsCCgrid]);
1491 /* Calls nbnxn_put_on_grid for all non-local domains */
1492 void nbnxn_put_on_grid_nonlocal(nbnxn_search_t nbs,
1493 const struct gmx_domdec_zones_t *zones,
1497 nbnxn_atomdata_t *nbat)
1499 for (int zone = 1; zone < zones->n; zone++)
1502 for (int d = 0; d < DIM; d++)
1504 c0[d] = zones->size[zone].bb_x0[d];
1505 c1[d] = zones->size[zone].bb_x1[d];
1508 nbnxn_put_on_grid(nbs, nbs->ePBC, nullptr,
1510 zones->cg_range[zone],
1511 zones->cg_range[zone+1],
1521 void nbnxn_get_ncells(nbnxn_search_t nbs, int *ncx, int *ncy)
1523 *ncx = nbs->grid[0].numCells[XX];
1524 *ncy = nbs->grid[0].numCells[YY];
1527 gmx::ArrayRef<const int> nbnxn_get_atomorder(const nbnxn_search *nbs)
1529 /* Return the atom order for the home cell (index 0) */
1530 const nbnxn_grid_t &grid = nbs->grid[0];
1532 int numIndices = grid.cxy_ind[grid.numCells[XX]*grid.numCells[YY]]*grid.na_sc;
1534 return gmx::constArrayRefFromArray(nbs->a.data(), numIndices);
1537 void nbnxn_set_atomorder(nbnxn_search_t nbs)
1539 /* Set the atom order for the home cell (index 0) */
1540 nbnxn_grid_t *grid = &nbs->grid[0];
1543 for (int cx = 0; cx < grid->numCells[XX]; cx++)
1545 for (int cy = 0; cy < grid->numCells[YY]; cy++)
1547 int cxy = cx*grid->numCells[YY] + cy;
1548 int j = grid->cxy_ind[cxy]*grid->na_sc;
1549 for (int cz = 0; cz < grid->cxy_na[cxy]; cz++)