2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "nbnxn_search.h"
44 #include "gromacs/legacyheaders/gmx_omp_nthreads.h"
45 #include "gromacs/legacyheaders/macros.h"
46 #include "gromacs/legacyheaders/nrnb.h"
47 #include "gromacs/legacyheaders/ns.h"
48 #include "gromacs/legacyheaders/types/commrec.h"
49 #include "gromacs/math/utilities.h"
50 #include "gromacs/math/vec.h"
51 #include "gromacs/mdlib/nb_verlet.h"
52 #include "gromacs/mdlib/nbnxn_atomdata.h"
53 #include "gromacs/mdlib/nbnxn_consts.h"
54 #include "gromacs/pbcutil/ishift.h"
55 #include "gromacs/pbcutil/pbc.h"
56 #include "gromacs/utility/smalloc.h"
58 /* nbnxn_internal.h included gromacs/simd/macros.h */
59 #include "gromacs/mdlib/nbnxn_internal.h"
61 #include "gromacs/simd/vector_operations.h"
64 #ifdef NBNXN_SEARCH_BB_SIMD4
65 /* Always use 4-wide SIMD for bounding box calculations */
68 /* Single precision BBs + coordinates, we can also load coordinates with SIMD */
69 # define NBNXN_SEARCH_SIMD4_FLOAT_X_BB
72 # if defined NBNXN_SEARCH_SIMD4_FLOAT_X_BB && (GPU_NSUBCELL == 4 || GPU_NSUBCELL == 8)
73 /* Store bounding boxes with x, y and z coordinates in packs of 4 */
74 # define NBNXN_PBB_SIMD4
77 /* The packed bounding box coordinate stride is always set to 4.
78 * With AVX we could use 8, but that turns out not to be faster.
81 # define STRIDE_PBB_2LOG 2
83 #endif /* NBNXN_SEARCH_BB_SIMD4 */
87 /* The functions below are macros as they are performance sensitive */
89 /* 4x4 list, pack=4: no complex conversion required */
90 /* i-cluster to j-cluster conversion */
91 #define CI_TO_CJ_J4(ci) (ci)
92 /* cluster index to coordinate array index conversion */
93 #define X_IND_CI_J4(ci) ((ci)*STRIDE_P4)
94 #define X_IND_CJ_J4(cj) ((cj)*STRIDE_P4)
96 /* 4x2 list, pack=4: j-cluster size is half the packing width */
97 /* i-cluster to j-cluster conversion */
98 #define CI_TO_CJ_J2(ci) ((ci)<<1)
99 /* cluster index to coordinate array index conversion */
100 #define X_IND_CI_J2(ci) ((ci)*STRIDE_P4)
101 #define X_IND_CJ_J2(cj) (((cj)>>1)*STRIDE_P4 + ((cj) & 1)*(PACK_X4>>1))
103 /* 4x8 list, pack=8: i-cluster size is half the packing width */
104 /* i-cluster to j-cluster conversion */
105 #define CI_TO_CJ_J8(ci) ((ci)>>1)
106 /* cluster index to coordinate array index conversion */
107 #define X_IND_CI_J8(ci) (((ci)>>1)*STRIDE_P8 + ((ci) & 1)*(PACK_X8>>1))
108 #define X_IND_CJ_J8(cj) ((cj)*STRIDE_P8)
110 /* The j-cluster size is matched to the SIMD width */
111 #if GMX_SIMD_REAL_WIDTH == 2
112 #define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J2(ci)
113 #define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J2(ci)
114 #define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J2(cj)
116 #if GMX_SIMD_REAL_WIDTH == 4
117 #define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J4(ci)
118 #define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J4(ci)
119 #define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J4(cj)
121 #if GMX_SIMD_REAL_WIDTH == 8
122 #define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J8(ci)
123 #define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J8(ci)
124 #define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J8(cj)
125 /* Half SIMD with j-cluster size */
126 #define CI_TO_CJ_SIMD_2XNN(ci) CI_TO_CJ_J4(ci)
127 #define X_IND_CI_SIMD_2XNN(ci) X_IND_CI_J4(ci)
128 #define X_IND_CJ_SIMD_2XNN(cj) X_IND_CJ_J4(cj)
130 #if GMX_SIMD_REAL_WIDTH == 16
131 #define CI_TO_CJ_SIMD_2XNN(ci) CI_TO_CJ_J8(ci)
132 #define X_IND_CI_SIMD_2XNN(ci) X_IND_CI_J8(ci)
133 #define X_IND_CJ_SIMD_2XNN(cj) X_IND_CJ_J8(cj)
135 #error "unsupported GMX_SIMD_REAL_WIDTH"
141 #endif /* GMX_NBNXN_SIMD */
144 #ifdef NBNXN_SEARCH_BB_SIMD4
145 /* Store bounding boxes corners as quadruplets: xxxxyyyyzzzz */
147 /* Size of bounding box corners quadruplet */
148 #define NNBSBB_XXXX (NNBSBB_D*DIM*STRIDE_PBB)
151 /* We shift the i-particles backward for PBC.
152 * This leads to more conditionals than shifting forward.
153 * We do this to get more balanced pair lists.
155 #define NBNXN_SHIFT_BACKWARD
158 /* This define is a lazy way to avoid interdependence of the grid
159 * and searching data structures.
161 #define NBNXN_NA_SC_MAX (GPU_NSUBCELL*NBNXN_GPU_CLUSTER_SIZE)
164 static void nbs_cycle_clear(nbnxn_cycle_t *cc)
168 for (i = 0; i < enbsCCnr; i++)
175 static double Mcyc_av(const nbnxn_cycle_t *cc)
177 return (double)cc->c*1e-6/cc->count;
180 static void nbs_cycle_print(FILE *fp, const nbnxn_search_t nbs)
186 fprintf(fp, "ns %4d grid %4.1f search %4.1f red.f %5.3f",
187 nbs->cc[enbsCCgrid].count,
188 Mcyc_av(&nbs->cc[enbsCCgrid]),
189 Mcyc_av(&nbs->cc[enbsCCsearch]),
190 Mcyc_av(&nbs->cc[enbsCCreducef]));
192 if (nbs->nthread_max > 1)
194 if (nbs->cc[enbsCCcombine].count > 0)
196 fprintf(fp, " comb %5.2f",
197 Mcyc_av(&nbs->cc[enbsCCcombine]));
199 fprintf(fp, " s. th");
200 for (t = 0; t < nbs->nthread_max; t++)
202 fprintf(fp, " %4.1f",
203 Mcyc_av(&nbs->work[t].cc[enbsCCsearch]));
209 static void nbnxn_grid_init(nbnxn_grid_t * grid)
212 grid->cxy_ind = NULL;
213 grid->cxy_nalloc = 0;
219 static int get_2log(int n)
224 while ((1<<log2) < n)
230 gmx_fatal(FARGS, "nbnxn na_c (%d) is not a power of 2", n);
236 static int nbnxn_kernel_to_ci_size(int nb_kernel_type)
238 switch (nb_kernel_type)
240 case nbnxnk4x4_PlainC:
241 case nbnxnk4xN_SIMD_4xN:
242 case nbnxnk4xN_SIMD_2xNN:
243 return NBNXN_CPU_CLUSTER_I_SIZE;
244 case nbnxnk8x8x8_CUDA:
245 case nbnxnk8x8x8_PlainC:
246 /* The cluster size for super/sub lists is only set here.
247 * Any value should work for the pair-search and atomdata code.
248 * The kernels, of course, might require a particular value.
250 return NBNXN_GPU_CLUSTER_SIZE;
252 gmx_incons("unknown kernel type");
258 int nbnxn_kernel_to_cj_size(int nb_kernel_type)
260 int nbnxn_simd_width = 0;
263 #ifdef GMX_NBNXN_SIMD
264 nbnxn_simd_width = GMX_SIMD_REAL_WIDTH;
267 switch (nb_kernel_type)
269 case nbnxnk4x4_PlainC:
270 cj_size = NBNXN_CPU_CLUSTER_I_SIZE;
272 case nbnxnk4xN_SIMD_4xN:
273 cj_size = nbnxn_simd_width;
275 case nbnxnk4xN_SIMD_2xNN:
276 cj_size = nbnxn_simd_width/2;
278 case nbnxnk8x8x8_CUDA:
279 case nbnxnk8x8x8_PlainC:
280 cj_size = nbnxn_kernel_to_ci_size(nb_kernel_type);
283 gmx_incons("unknown kernel type");
289 static int ci_to_cj(int na_cj_2log, int ci)
293 case 2: return ci; break;
294 case 1: return (ci<<1); break;
295 case 3: return (ci>>1); break;
301 gmx_bool nbnxn_kernel_pairlist_simple(int nb_kernel_type)
303 if (nb_kernel_type == nbnxnkNotSet)
305 gmx_fatal(FARGS, "Non-bonded kernel type not set for Verlet-style pair-list.");
308 switch (nb_kernel_type)
310 case nbnxnk8x8x8_CUDA:
311 case nbnxnk8x8x8_PlainC:
314 case nbnxnk4x4_PlainC:
315 case nbnxnk4xN_SIMD_4xN:
316 case nbnxnk4xN_SIMD_2xNN:
320 gmx_incons("Invalid nonbonded kernel type passed!");
325 /* Initializes a single nbnxn_pairlist_t data structure */
326 static void nbnxn_init_pairlist_fep(t_nblist *nl)
328 nl->type = GMX_NBLIST_INTERACTION_FREE_ENERGY;
329 nl->igeometry = GMX_NBLIST_GEOMETRY_PARTICLE_PARTICLE;
330 /* The interaction functions are set in the free energy kernel fuction */
349 void nbnxn_init_search(nbnxn_search_t * nbs_ptr,
351 gmx_domdec_zones_t *zones,
363 nbs->DomDec = (n_dd_cells != NULL);
365 clear_ivec(nbs->dd_dim);
371 for (d = 0; d < DIM; d++)
373 if ((*n_dd_cells)[d] > 1)
376 /* Each grid matches a DD zone */
382 snew(nbs->grid, nbs->ngrid);
383 for (g = 0; g < nbs->ngrid; g++)
385 nbnxn_grid_init(&nbs->grid[g]);
388 nbs->cell_nalloc = 0;
392 nbs->nthread_max = nthread_max;
394 /* Initialize the work data structures for each thread */
395 snew(nbs->work, nbs->nthread_max);
396 for (t = 0; t < nbs->nthread_max; t++)
398 nbs->work[t].cxy_na = NULL;
399 nbs->work[t].cxy_na_nalloc = 0;
400 nbs->work[t].sort_work = NULL;
401 nbs->work[t].sort_work_nalloc = 0;
403 snew(nbs->work[t].nbl_fep, 1);
404 nbnxn_init_pairlist_fep(nbs->work[t].nbl_fep);
407 /* Initialize detailed nbsearch cycle counting */
408 nbs->print_cycles = (getenv("GMX_NBNXN_CYCLE") != 0);
409 nbs->search_count = 0;
410 nbs_cycle_clear(nbs->cc);
411 for (t = 0; t < nbs->nthread_max; t++)
413 nbs_cycle_clear(nbs->work[t].cc);
417 static real grid_atom_density(int n, rvec corner0, rvec corner1)
423 /* To avoid zero density we use a minimum of 1 atom */
427 rvec_sub(corner1, corner0, size);
429 return n/(size[XX]*size[YY]*size[ZZ]);
432 static int set_grid_size_xy(const nbnxn_search_t nbs,
435 int n, rvec corner0, rvec corner1,
440 real adens, tlen, tlen_x, tlen_y, nc_max;
443 rvec_sub(corner1, corner0, size);
447 assert(atom_density > 0);
449 /* target cell length */
452 /* To minimize the zero interactions, we should make
453 * the largest of the i/j cell cubic.
455 na_c = max(grid->na_c, grid->na_cj);
457 /* Approximately cubic cells */
458 tlen = pow(na_c/atom_density, 1.0/3.0);
464 /* Approximately cubic sub cells */
465 tlen = pow(grid->na_c/atom_density, 1.0/3.0);
466 tlen_x = tlen*GPU_NSUBCELL_X;
467 tlen_y = tlen*GPU_NSUBCELL_Y;
469 /* We round ncx and ncy down, because we get less cell pairs
470 * in the nbsist when the fixed cell dimensions (x,y) are
471 * larger than the variable one (z) than the other way around.
473 grid->ncx = max(1, (int)(size[XX]/tlen_x));
474 grid->ncy = max(1, (int)(size[YY]/tlen_y));
482 grid->sx = size[XX]/grid->ncx;
483 grid->sy = size[YY]/grid->ncy;
484 grid->inv_sx = 1/grid->sx;
485 grid->inv_sy = 1/grid->sy;
489 /* This is a non-home zone, add an extra row of cells
490 * for particles communicated for bonded interactions.
491 * These can be beyond the cut-off. It doesn't matter where
492 * they end up on the grid, but for performance it's better
493 * if they don't end up in cells that can be within cut-off range.
499 /* We need one additional cell entry for particles moved by DD */
500 if (grid->ncx*grid->ncy+1 > grid->cxy_nalloc)
502 grid->cxy_nalloc = over_alloc_large(grid->ncx*grid->ncy+1);
503 srenew(grid->cxy_na, grid->cxy_nalloc);
504 srenew(grid->cxy_ind, grid->cxy_nalloc+1);
506 for (t = 0; t < nbs->nthread_max; t++)
508 if (grid->ncx*grid->ncy+1 > nbs->work[t].cxy_na_nalloc)
510 nbs->work[t].cxy_na_nalloc = over_alloc_large(grid->ncx*grid->ncy+1);
511 srenew(nbs->work[t].cxy_na, nbs->work[t].cxy_na_nalloc);
515 /* Worst case scenario of 1 atom in each last cell */
516 if (grid->na_cj <= grid->na_c)
518 nc_max = n/grid->na_sc + grid->ncx*grid->ncy;
522 nc_max = n/grid->na_sc + grid->ncx*grid->ncy*grid->na_cj/grid->na_c;
525 if (nc_max > grid->nc_nalloc)
527 grid->nc_nalloc = over_alloc_large(nc_max);
528 srenew(grid->nsubc, grid->nc_nalloc);
529 srenew(grid->bbcz, grid->nc_nalloc*NNBSBB_D);
531 sfree_aligned(grid->bb);
532 /* This snew also zeros the contents, this avoid possible
533 * floating exceptions in SIMD with the unused bb elements.
537 snew_aligned(grid->bb, grid->nc_nalloc, 16);
544 pbb_nalloc = grid->nc_nalloc*GPU_NSUBCELL/STRIDE_PBB*NNBSBB_XXXX;
545 snew_aligned(grid->pbb, pbb_nalloc, 16);
547 snew_aligned(grid->bb, grid->nc_nalloc*GPU_NSUBCELL, 16);
553 if (grid->na_cj == grid->na_c)
555 grid->bbj = grid->bb;
559 sfree_aligned(grid->bbj);
560 snew_aligned(grid->bbj, grid->nc_nalloc*grid->na_c/grid->na_cj, 16);
564 srenew(grid->flags, grid->nc_nalloc);
567 srenew(grid->fep, grid->nc_nalloc*grid->na_sc/grid->na_c);
571 copy_rvec(corner0, grid->c0);
572 copy_rvec(corner1, grid->c1);
573 copy_rvec(size, grid->size);
578 /* We need to sort paricles in grid columns on z-coordinate.
579 * As particle are very often distributed homogeneously, we a sorting
580 * algorithm similar to pigeonhole sort. We multiply the z-coordinate
581 * by a factor, cast to an int and try to store in that hole. If the hole
582 * is full, we move this or another particle. A second pass is needed to make
583 * contiguous elements. SORT_GRID_OVERSIZE is the ratio of holes to particles.
584 * 4 is the optimal value for homogeneous particle distribution and allows
585 * for an O(#particles) sort up till distributions were all particles are
586 * concentrated in 1/4 of the space. No NlogN fallback is implemented,
587 * as it can be expensive to detect imhomogeneous particle distributions.
588 * SGSF is the maximum ratio of holes used, in the worst case all particles
589 * end up in the last hole and we need #particles extra holes at the end.
591 #define SORT_GRID_OVERSIZE 4
592 #define SGSF (SORT_GRID_OVERSIZE + 1)
594 /* Sort particle index a on coordinates x along dim.
595 * Backwards tells if we want decreasing iso increasing coordinates.
596 * h0 is the minimum of the coordinate range.
597 * invh is the 1/length of the sorting range.
598 * n_per_h (>=n) is the expected average number of particles per 1/invh
599 * sort is the sorting work array.
600 * sort should have a size of at least n_per_h*SORT_GRID_OVERSIZE + n,
601 * or easier, allocate at least n*SGSF elements.
603 static void sort_atoms(int dim, gmx_bool Backwards,
604 int gmx_unused dd_zone,
605 int *a, int n, rvec *x,
606 real h0, real invh, int n_per_h,
610 int zi, zim, zi_min, zi_max;
622 gmx_incons("n > n_per_h");
626 /* Transform the inverse range height into the inverse hole height */
627 invh *= n_per_h*SORT_GRID_OVERSIZE;
629 /* Set nsort to the maximum possible number of holes used.
630 * In worst case all n elements end up in the last bin.
632 nsort = n_per_h*SORT_GRID_OVERSIZE + n;
634 /* Determine the index range used, so we can limit it for the second pass */
638 /* Sort the particles using a simple index sort */
639 for (i = 0; i < n; i++)
641 /* The cast takes care of float-point rounding effects below zero.
642 * This code assumes particles are less than 1/SORT_GRID_OVERSIZE
643 * times the box height out of the box.
645 zi = (int)((x[a[i]][dim] - h0)*invh);
648 /* As we can have rounding effect, we use > iso >= here */
649 if (zi < 0 || (dd_zone == 0 && zi > n_per_h*SORT_GRID_OVERSIZE))
651 gmx_fatal(FARGS, "(int)((x[%d][%c]=%f - %f)*%f) = %d, not in 0 - %d*%d\n",
652 a[i], 'x'+dim, x[a[i]][dim], h0, invh, zi,
653 n_per_h, SORT_GRID_OVERSIZE);
657 /* In a non-local domain, particles communcated for bonded interactions
658 * can be far beyond the grid size, which is set by the non-bonded
659 * cut-off distance. We sort such particles into the last cell.
661 if (zi > n_per_h*SORT_GRID_OVERSIZE)
663 zi = n_per_h*SORT_GRID_OVERSIZE;
666 /* Ideally this particle should go in sort cell zi,
667 * but that might already be in use,
668 * in that case find the first empty cell higher up
673 zi_min = min(zi_min, zi);
674 zi_max = max(zi_max, zi);
678 /* We have multiple atoms in the same sorting slot.
679 * Sort on real z for minimal bounding box size.
680 * There is an extra check for identical z to ensure
681 * well-defined output order, independent of input order
682 * to ensure binary reproducibility after restarts.
684 while (sort[zi] >= 0 && ( x[a[i]][dim] > x[sort[zi]][dim] ||
685 (x[a[i]][dim] == x[sort[zi]][dim] &&
693 /* Shift all elements by one slot until we find an empty slot */
696 while (sort[zim] >= 0)
704 zi_max = max(zi_max, zim);
707 zi_max = max(zi_max, zi);
714 for (zi = 0; zi < nsort; zi++)
725 for (zi = zi_max; zi >= zi_min; zi--)
736 gmx_incons("Lost particles while sorting");
741 #define R2F_D(x) ((float)((x) >= 0 ? ((1-GMX_FLOAT_EPS)*(x)) : ((1+GMX_FLOAT_EPS)*(x))))
742 #define R2F_U(x) ((float)((x) >= 0 ? ((1+GMX_FLOAT_EPS)*(x)) : ((1-GMX_FLOAT_EPS)*(x))))
748 /* Coordinate order x,y,z, bb order xyz0 */
749 static void calc_bounding_box(int na, int stride, const real *x, nbnxn_bb_t *bb)
752 real xl, xh, yl, yh, zl, zh;
762 for (j = 1; j < na; j++)
764 xl = min(xl, x[i+XX]);
765 xh = max(xh, x[i+XX]);
766 yl = min(yl, x[i+YY]);
767 yh = max(yh, x[i+YY]);
768 zl = min(zl, x[i+ZZ]);
769 zh = max(zh, x[i+ZZ]);
772 /* Note: possible double to float conversion here */
773 bb->lower[BB_X] = R2F_D(xl);
774 bb->lower[BB_Y] = R2F_D(yl);
775 bb->lower[BB_Z] = R2F_D(zl);
776 bb->upper[BB_X] = R2F_U(xh);
777 bb->upper[BB_Y] = R2F_U(yh);
778 bb->upper[BB_Z] = R2F_U(zh);
781 /* Packed coordinates, bb order xyz0 */
782 static void calc_bounding_box_x_x4(int na, const real *x, nbnxn_bb_t *bb)
785 real xl, xh, yl, yh, zl, zh;
793 for (j = 1; j < na; j++)
795 xl = min(xl, x[j+XX*PACK_X4]);
796 xh = max(xh, x[j+XX*PACK_X4]);
797 yl = min(yl, x[j+YY*PACK_X4]);
798 yh = max(yh, x[j+YY*PACK_X4]);
799 zl = min(zl, x[j+ZZ*PACK_X4]);
800 zh = max(zh, x[j+ZZ*PACK_X4]);
802 /* Note: possible double to float conversion here */
803 bb->lower[BB_X] = R2F_D(xl);
804 bb->lower[BB_Y] = R2F_D(yl);
805 bb->lower[BB_Z] = R2F_D(zl);
806 bb->upper[BB_X] = R2F_U(xh);
807 bb->upper[BB_Y] = R2F_U(yh);
808 bb->upper[BB_Z] = R2F_U(zh);
811 /* Packed coordinates, bb order xyz0 */
812 static void calc_bounding_box_x_x8(int na, const real *x, nbnxn_bb_t *bb)
815 real xl, xh, yl, yh, zl, zh;
823 for (j = 1; j < na; j++)
825 xl = min(xl, x[j+XX*PACK_X8]);
826 xh = max(xh, x[j+XX*PACK_X8]);
827 yl = min(yl, x[j+YY*PACK_X8]);
828 yh = max(yh, x[j+YY*PACK_X8]);
829 zl = min(zl, x[j+ZZ*PACK_X8]);
830 zh = max(zh, x[j+ZZ*PACK_X8]);
832 /* Note: possible double to float conversion here */
833 bb->lower[BB_X] = R2F_D(xl);
834 bb->lower[BB_Y] = R2F_D(yl);
835 bb->lower[BB_Z] = R2F_D(zl);
836 bb->upper[BB_X] = R2F_U(xh);
837 bb->upper[BB_Y] = R2F_U(yh);
838 bb->upper[BB_Z] = R2F_U(zh);
841 /* Packed coordinates, bb order xyz0 */
842 static void calc_bounding_box_x_x4_halves(int na, const real *x,
843 nbnxn_bb_t *bb, nbnxn_bb_t *bbj)
845 calc_bounding_box_x_x4(min(na, 2), x, bbj);
849 calc_bounding_box_x_x4(min(na-2, 2), x+(PACK_X4>>1), bbj+1);
853 /* Set the "empty" bounding box to the same as the first one,
854 * so we don't need to treat special cases in the rest of the code.
856 #ifdef NBNXN_SEARCH_BB_SIMD4
857 gmx_simd4_store_f(&bbj[1].lower[0], gmx_simd4_load_f(&bbj[0].lower[0]));
858 gmx_simd4_store_f(&bbj[1].upper[0], gmx_simd4_load_f(&bbj[0].upper[0]));
864 #ifdef NBNXN_SEARCH_BB_SIMD4
865 gmx_simd4_store_f(&bb->lower[0],
866 gmx_simd4_min_f(gmx_simd4_load_f(&bbj[0].lower[0]),
867 gmx_simd4_load_f(&bbj[1].lower[0])));
868 gmx_simd4_store_f(&bb->upper[0],
869 gmx_simd4_max_f(gmx_simd4_load_f(&bbj[0].upper[0]),
870 gmx_simd4_load_f(&bbj[1].upper[0])));
875 for (i = 0; i < NNBSBB_C; i++)
877 bb->lower[i] = min(bbj[0].lower[i], bbj[1].lower[i]);
878 bb->upper[i] = max(bbj[0].upper[i], bbj[1].upper[i]);
884 #ifdef NBNXN_SEARCH_BB_SIMD4
886 /* Coordinate order xyz, bb order xxxxyyyyzzzz */
887 static void calc_bounding_box_xxxx(int na, int stride, const real *x, float *bb)
890 real xl, xh, yl, yh, zl, zh;
900 for (j = 1; j < na; j++)
902 xl = min(xl, x[i+XX]);
903 xh = max(xh, x[i+XX]);
904 yl = min(yl, x[i+YY]);
905 yh = max(yh, x[i+YY]);
906 zl = min(zl, x[i+ZZ]);
907 zh = max(zh, x[i+ZZ]);
910 /* Note: possible double to float conversion here */
911 bb[0*STRIDE_PBB] = R2F_D(xl);
912 bb[1*STRIDE_PBB] = R2F_D(yl);
913 bb[2*STRIDE_PBB] = R2F_D(zl);
914 bb[3*STRIDE_PBB] = R2F_U(xh);
915 bb[4*STRIDE_PBB] = R2F_U(yh);
916 bb[5*STRIDE_PBB] = R2F_U(zh);
919 #endif /* NBNXN_SEARCH_BB_SIMD4 */
921 #ifdef NBNXN_SEARCH_SIMD4_FLOAT_X_BB
923 /* Coordinate order xyz?, bb order xyz0 */
924 static void calc_bounding_box_simd4(int na, const float *x, nbnxn_bb_t *bb)
926 gmx_simd4_float_t bb_0_S, bb_1_S;
927 gmx_simd4_float_t x_S;
931 bb_0_S = gmx_simd4_load_f(x);
934 for (i = 1; i < na; i++)
936 x_S = gmx_simd4_load_f(x+i*NNBSBB_C);
937 bb_0_S = gmx_simd4_min_f(bb_0_S, x_S);
938 bb_1_S = gmx_simd4_max_f(bb_1_S, x_S);
941 gmx_simd4_store_f(&bb->lower[0], bb_0_S);
942 gmx_simd4_store_f(&bb->upper[0], bb_1_S);
945 /* Coordinate order xyz?, bb order xxxxyyyyzzzz */
946 static void calc_bounding_box_xxxx_simd4(int na, const float *x,
947 nbnxn_bb_t *bb_work_aligned,
950 calc_bounding_box_simd4(na, x, bb_work_aligned);
952 bb[0*STRIDE_PBB] = bb_work_aligned->lower[BB_X];
953 bb[1*STRIDE_PBB] = bb_work_aligned->lower[BB_Y];
954 bb[2*STRIDE_PBB] = bb_work_aligned->lower[BB_Z];
955 bb[3*STRIDE_PBB] = bb_work_aligned->upper[BB_X];
956 bb[4*STRIDE_PBB] = bb_work_aligned->upper[BB_Y];
957 bb[5*STRIDE_PBB] = bb_work_aligned->upper[BB_Z];
960 #endif /* NBNXN_SEARCH_SIMD4_FLOAT_X_BB */
963 /* Combines pairs of consecutive bounding boxes */
964 static void combine_bounding_box_pairs(nbnxn_grid_t *grid, const nbnxn_bb_t *bb)
966 int i, j, sc2, nc2, c2;
968 for (i = 0; i < grid->ncx*grid->ncy; i++)
970 /* Starting bb in a column is expected to be 2-aligned */
971 sc2 = grid->cxy_ind[i]>>1;
972 /* For odd numbers skip the last bb here */
973 nc2 = (grid->cxy_na[i]+3)>>(2+1);
974 for (c2 = sc2; c2 < sc2+nc2; c2++)
976 #ifdef NBNXN_SEARCH_BB_SIMD4
977 gmx_simd4_float_t min_S, max_S;
979 min_S = gmx_simd4_min_f(gmx_simd4_load_f(&bb[c2*2+0].lower[0]),
980 gmx_simd4_load_f(&bb[c2*2+1].lower[0]));
981 max_S = gmx_simd4_max_f(gmx_simd4_load_f(&bb[c2*2+0].upper[0]),
982 gmx_simd4_load_f(&bb[c2*2+1].upper[0]));
983 gmx_simd4_store_f(&grid->bbj[c2].lower[0], min_S);
984 gmx_simd4_store_f(&grid->bbj[c2].upper[0], max_S);
986 for (j = 0; j < NNBSBB_C; j++)
988 grid->bbj[c2].lower[j] = min(bb[c2*2+0].lower[j],
989 bb[c2*2+1].lower[j]);
990 grid->bbj[c2].upper[j] = max(bb[c2*2+0].upper[j],
991 bb[c2*2+1].upper[j]);
995 if (((grid->cxy_na[i]+3)>>2) & 1)
997 /* The bb count in this column is odd: duplicate the last bb */
998 for (j = 0; j < NNBSBB_C; j++)
1000 grid->bbj[c2].lower[j] = bb[c2*2].lower[j];
1001 grid->bbj[c2].upper[j] = bb[c2*2].upper[j];
1008 /* Prints the average bb size, used for debug output */
1009 static void print_bbsizes_simple(FILE *fp,
1010 const nbnxn_grid_t *grid)
1016 for (c = 0; c < grid->nc; c++)
1018 for (d = 0; d < DIM; d++)
1020 ba[d] += grid->bb[c].upper[d] - grid->bb[c].lower[d];
1023 dsvmul(1.0/grid->nc, ba, ba);
1025 fprintf(fp, "ns bb: grid %4.2f %4.2f %4.2f abs %4.2f %4.2f %4.2f rel %4.2f %4.2f %4.2f\n",
1028 grid->na_c/(grid->atom_density*grid->sx*grid->sy),
1029 ba[XX], ba[YY], ba[ZZ],
1032 ba[ZZ]/(grid->na_c/(grid->atom_density*grid->sx*grid->sy)));
1035 /* Prints the average bb size, used for debug output */
1036 static void print_bbsizes_supersub(FILE *fp,
1037 const nbnxn_grid_t *grid)
1044 for (c = 0; c < grid->nc; c++)
1047 for (s = 0; s < grid->nsubc[c]; s += STRIDE_PBB)
1051 cs_w = (c*GPU_NSUBCELL + s)/STRIDE_PBB;
1052 for (i = 0; i < STRIDE_PBB; i++)
1054 for (d = 0; d < DIM; d++)
1057 grid->pbb[cs_w*NNBSBB_XXXX+(DIM+d)*STRIDE_PBB+i] -
1058 grid->pbb[cs_w*NNBSBB_XXXX+ d *STRIDE_PBB+i];
1063 for (s = 0; s < grid->nsubc[c]; s++)
1067 cs = c*GPU_NSUBCELL + s;
1068 for (d = 0; d < DIM; d++)
1070 ba[d] += grid->bb[cs].upper[d] - grid->bb[cs].lower[d];
1074 ns += grid->nsubc[c];
1076 dsvmul(1.0/ns, ba, ba);
1078 fprintf(fp, "ns bb: grid %4.2f %4.2f %4.2f abs %4.2f %4.2f %4.2f rel %4.2f %4.2f %4.2f\n",
1079 grid->sx/GPU_NSUBCELL_X,
1080 grid->sy/GPU_NSUBCELL_Y,
1081 grid->na_sc/(grid->atom_density*grid->sx*grid->sy*GPU_NSUBCELL_Z),
1082 ba[XX], ba[YY], ba[ZZ],
1083 ba[XX]*GPU_NSUBCELL_X/grid->sx,
1084 ba[YY]*GPU_NSUBCELL_Y/grid->sy,
1085 ba[ZZ]/(grid->na_sc/(grid->atom_density*grid->sx*grid->sy*GPU_NSUBCELL_Z)));
1088 /* Potentially sorts atoms on LJ coefficients !=0 and ==0.
1089 * Also sets interaction flags.
1091 void sort_on_lj(int na_c,
1092 int a0, int a1, const int *atinfo,
1096 int subc, s, a, n1, n2, a_lj_max, i, j;
1097 int sort1[NBNXN_NA_SC_MAX/GPU_NSUBCELL];
1098 int sort2[NBNXN_NA_SC_MAX/GPU_NSUBCELL];
1099 gmx_bool haveQ, bFEP;
1104 for (s = a0; s < a1; s += na_c)
1106 /* Make lists for this (sub-)cell on atoms with and without LJ */
1111 for (a = s; a < min(s+na_c, a1); a++)
1113 haveQ = haveQ || GET_CGINFO_HAS_Q(atinfo[order[a]]);
1115 if (GET_CGINFO_HAS_VDW(atinfo[order[a]]))
1117 sort1[n1++] = order[a];
1122 sort2[n2++] = order[a];
1126 /* If we don't have atoms with LJ, there's nothing to sort */
1129 *flags |= NBNXN_CI_DO_LJ(subc);
1133 /* Only sort when strictly necessary. Ordering particles
1134 * Ordering particles can lead to less accurate summation
1135 * due to rounding, both for LJ and Coulomb interactions.
1137 if (2*(a_lj_max - s) >= na_c)
1139 for (i = 0; i < n1; i++)
1141 order[a0+i] = sort1[i];
1143 for (j = 0; j < n2; j++)
1145 order[a0+n1+j] = sort2[j];
1149 *flags |= NBNXN_CI_HALF_LJ(subc);
1154 *flags |= NBNXN_CI_DO_COUL(subc);
1160 /* Fill a pair search cell with atoms.
1161 * Potentially sorts atoms and sets the interaction flags.
1163 void fill_cell(const nbnxn_search_t nbs,
1165 nbnxn_atomdata_t *nbat,
1169 int sx, int sy, int sz,
1170 nbnxn_bb_t gmx_unused *bb_work_aligned)
1183 sort_on_lj(grid->na_c, a0, a1, atinfo, nbs->a,
1184 grid->flags+(a0>>grid->na_c_2log)-grid->cell0);
1189 /* Set the fep flag for perturbed atoms in this (sub-)cell */
1192 /* The grid-local cluster/(sub-)cell index */
1193 c = (a0 >> grid->na_c_2log) - grid->cell0*(grid->bSimple ? 1 : GPU_NSUBCELL);
1195 for (at = a0; at < a1; at++)
1197 if (nbs->a[at] >= 0 && GET_CGINFO_FEP(atinfo[nbs->a[at]]))
1199 grid->fep[c] |= (1 << (at - a0));
1204 /* Now we have sorted the atoms, set the cell indices */
1205 for (a = a0; a < a1; a++)
1207 nbs->cell[nbs->a[a]] = a;
1210 copy_rvec_to_nbat_real(nbs->a+a0, a1-a0, grid->na_c, x,
1211 nbat->XFormat, nbat->x, a0,
1214 if (nbat->XFormat == nbatX4)
1216 /* Store the bounding boxes as xyz.xyz. */
1217 offset = (a0 - grid->cell0*grid->na_sc) >> grid->na_c_2log;
1218 bb_ptr = grid->bb + offset;
1220 #if defined GMX_NBNXN_SIMD && GMX_SIMD_REAL_WIDTH == 2
1221 if (2*grid->na_cj == grid->na_c)
1223 calc_bounding_box_x_x4_halves(na, nbat->x+X4_IND_A(a0), bb_ptr,
1224 grid->bbj+offset*2);
1229 calc_bounding_box_x_x4(na, nbat->x+X4_IND_A(a0), bb_ptr);
1232 else if (nbat->XFormat == nbatX8)
1234 /* Store the bounding boxes as xyz.xyz. */
1235 offset = (a0 - grid->cell0*grid->na_sc) >> grid->na_c_2log;
1236 bb_ptr = grid->bb + offset;
1238 calc_bounding_box_x_x8(na, nbat->x+X8_IND_A(a0), bb_ptr);
1241 else if (!grid->bSimple)
1243 /* Store the bounding boxes in a format convenient
1244 * for SIMD4 calculations: xxxxyyyyzzzz...
1248 ((a0-grid->cell0*grid->na_sc)>>(grid->na_c_2log+STRIDE_PBB_2LOG))*NNBSBB_XXXX +
1249 (((a0-grid->cell0*grid->na_sc)>>grid->na_c_2log) & (STRIDE_PBB-1));
1251 #ifdef NBNXN_SEARCH_SIMD4_FLOAT_X_BB
1252 if (nbat->XFormat == nbatXYZQ)
1254 calc_bounding_box_xxxx_simd4(na, nbat->x+a0*nbat->xstride,
1255 bb_work_aligned, pbb_ptr);
1260 calc_bounding_box_xxxx(na, nbat->xstride, nbat->x+a0*nbat->xstride,
1265 fprintf(debug, "%2d %2d %2d bb %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f\n",
1267 pbb_ptr[0*STRIDE_PBB], pbb_ptr[3*STRIDE_PBB],
1268 pbb_ptr[1*STRIDE_PBB], pbb_ptr[4*STRIDE_PBB],
1269 pbb_ptr[2*STRIDE_PBB], pbb_ptr[5*STRIDE_PBB]);
1275 /* Store the bounding boxes as xyz.xyz. */
1276 bb_ptr = grid->bb+((a0-grid->cell0*grid->na_sc)>>grid->na_c_2log);
1278 calc_bounding_box(na, nbat->xstride, nbat->x+a0*nbat->xstride,
1284 bbo = (a0 - grid->cell0*grid->na_sc)/grid->na_c;
1285 fprintf(debug, "%2d %2d %2d bb %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f\n",
1287 grid->bb[bbo].lower[BB_X],
1288 grid->bb[bbo].lower[BB_Y],
1289 grid->bb[bbo].lower[BB_Z],
1290 grid->bb[bbo].upper[BB_X],
1291 grid->bb[bbo].upper[BB_Y],
1292 grid->bb[bbo].upper[BB_Z]);
1297 /* Spatially sort the atoms within one grid column */
1298 static void sort_columns_simple(const nbnxn_search_t nbs,
1304 nbnxn_atomdata_t *nbat,
1305 int cxy_start, int cxy_end,
1309 int cx, cy, cz, ncz, cfilled, c;
1310 int na, ash, ind, a;
1315 fprintf(debug, "cell0 %d sorting columns %d - %d, atoms %d - %d\n",
1316 grid->cell0, cxy_start, cxy_end, a0, a1);
1319 /* Sort the atoms within each x,y column in 3 dimensions */
1320 for (cxy = cxy_start; cxy < cxy_end; cxy++)
1323 cy = cxy - cx*grid->ncy;
1325 na = grid->cxy_na[cxy];
1326 ncz = grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy];
1327 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
1329 /* Sort the atoms within each x,y column on z coordinate */
1330 sort_atoms(ZZ, FALSE, dd_zone,
1333 1.0/grid->size[ZZ], ncz*grid->na_sc,
1336 /* Fill the ncz cells in this column */
1337 cfilled = grid->cxy_ind[cxy];
1338 for (cz = 0; cz < ncz; cz++)
1340 c = grid->cxy_ind[cxy] + cz;
1342 ash_c = ash + cz*grid->na_sc;
1343 na_c = min(grid->na_sc, na-(ash_c-ash));
1345 fill_cell(nbs, grid, nbat,
1346 ash_c, ash_c+na_c, atinfo, x,
1347 grid->na_sc*cx + (dd_zone >> 2),
1348 grid->na_sc*cy + (dd_zone & 3),
1352 /* This copy to bbcz is not really necessary.
1353 * But it allows to use the same grid search code
1354 * for the simple and supersub cell setups.
1360 grid->bbcz[c*NNBSBB_D ] = grid->bb[cfilled].lower[BB_Z];
1361 grid->bbcz[c*NNBSBB_D+1] = grid->bb[cfilled].upper[BB_Z];
1364 /* Set the unused atom indices to -1 */
1365 for (ind = na; ind < ncz*grid->na_sc; ind++)
1367 nbs->a[ash+ind] = -1;
1372 /* Spatially sort the atoms within one grid column */
1373 static void sort_columns_supersub(const nbnxn_search_t nbs,
1379 nbnxn_atomdata_t *nbat,
1380 int cxy_start, int cxy_end,
1384 int cx, cy, cz = -1, c = -1, ncz;
1385 int na, ash, na_c, ind, a;
1386 int subdiv_z, sub_z, na_z, ash_z;
1387 int subdiv_y, sub_y, na_y, ash_y;
1388 int subdiv_x, sub_x, na_x, ash_x;
1390 nbnxn_bb_t bb_work_array[2], *bb_work_aligned;
1392 bb_work_aligned = (nbnxn_bb_t *)(((size_t)(bb_work_array+1)) & (~((size_t)15)));
1396 fprintf(debug, "cell0 %d sorting columns %d - %d, atoms %d - %d\n",
1397 grid->cell0, cxy_start, cxy_end, a0, a1);
1400 subdiv_x = grid->na_c;
1401 subdiv_y = GPU_NSUBCELL_X*subdiv_x;
1402 subdiv_z = GPU_NSUBCELL_Y*subdiv_y;
1404 /* Sort the atoms within each x,y column in 3 dimensions */
1405 for (cxy = cxy_start; cxy < cxy_end; cxy++)
1408 cy = cxy - cx*grid->ncy;
1410 na = grid->cxy_na[cxy];
1411 ncz = grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy];
1412 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
1414 /* Sort the atoms within each x,y column on z coordinate */
1415 sort_atoms(ZZ, FALSE, dd_zone,
1418 1.0/grid->size[ZZ], ncz*grid->na_sc,
1421 /* This loop goes over the supercells and subcells along z at once */
1422 for (sub_z = 0; sub_z < ncz*GPU_NSUBCELL_Z; sub_z++)
1424 ash_z = ash + sub_z*subdiv_z;
1425 na_z = min(subdiv_z, na-(ash_z-ash));
1427 /* We have already sorted on z */
1429 if (sub_z % GPU_NSUBCELL_Z == 0)
1431 cz = sub_z/GPU_NSUBCELL_Z;
1432 c = grid->cxy_ind[cxy] + cz;
1434 /* The number of atoms in this supercell */
1435 na_c = min(grid->na_sc, na-(ash_z-ash));
1437 grid->nsubc[c] = min(GPU_NSUBCELL, (na_c+grid->na_c-1)/grid->na_c);
1439 /* Store the z-boundaries of the super cell */
1440 grid->bbcz[c*NNBSBB_D ] = x[nbs->a[ash_z]][ZZ];
1441 grid->bbcz[c*NNBSBB_D+1] = x[nbs->a[ash_z+na_c-1]][ZZ];
1444 #if GPU_NSUBCELL_Y > 1
1445 /* Sort the atoms along y */
1446 sort_atoms(YY, (sub_z & 1), dd_zone,
1447 nbs->a+ash_z, na_z, x,
1448 grid->c0[YY]+cy*grid->sy,
1449 grid->inv_sy, subdiv_z,
1453 for (sub_y = 0; sub_y < GPU_NSUBCELL_Y; sub_y++)
1455 ash_y = ash_z + sub_y*subdiv_y;
1456 na_y = min(subdiv_y, na-(ash_y-ash));
1458 #if GPU_NSUBCELL_X > 1
1459 /* Sort the atoms along x */
1460 sort_atoms(XX, ((cz*GPU_NSUBCELL_Y + sub_y) & 1), dd_zone,
1461 nbs->a+ash_y, na_y, x,
1462 grid->c0[XX]+cx*grid->sx,
1463 grid->inv_sx, subdiv_y,
1467 for (sub_x = 0; sub_x < GPU_NSUBCELL_X; sub_x++)
1469 ash_x = ash_y + sub_x*subdiv_x;
1470 na_x = min(subdiv_x, na-(ash_x-ash));
1472 fill_cell(nbs, grid, nbat,
1473 ash_x, ash_x+na_x, atinfo, x,
1474 grid->na_c*(cx*GPU_NSUBCELL_X+sub_x) + (dd_zone >> 2),
1475 grid->na_c*(cy*GPU_NSUBCELL_Y+sub_y) + (dd_zone & 3),
1482 /* Set the unused atom indices to -1 */
1483 for (ind = na; ind < ncz*grid->na_sc; ind++)
1485 nbs->a[ash+ind] = -1;
1490 /* Determine in which grid column atoms should go */
1491 static void calc_column_indices(nbnxn_grid_t *grid,
1494 int dd_zone, const int *move,
1495 int thread, int nthread,
1502 /* We add one extra cell for particles which moved during DD */
1503 for (i = 0; i < grid->ncx*grid->ncy+1; i++)
1508 n0 = a0 + (int)((thread+0)*(a1 - a0))/nthread;
1509 n1 = a0 + (int)((thread+1)*(a1 - a0))/nthread;
1513 for (i = n0; i < n1; i++)
1515 if (move == NULL || move[i] >= 0)
1517 /* We need to be careful with rounding,
1518 * particles might be a few bits outside the local zone.
1519 * The int cast takes care of the lower bound,
1520 * we will explicitly take care of the upper bound.
1522 cx = (int)((x[i][XX] - grid->c0[XX])*grid->inv_sx);
1523 cy = (int)((x[i][YY] - grid->c0[YY])*grid->inv_sy);
1526 if (cx < 0 || cx > grid->ncx ||
1527 cy < 0 || cy > grid->ncy)
1530 "grid cell cx %d cy %d out of range (max %d %d)\n"
1531 "atom %f %f %f, grid->c0 %f %f",
1532 cx, cy, grid->ncx, grid->ncy,
1533 x[i][XX], x[i][YY], x[i][ZZ], grid->c0[XX], grid->c0[YY]);
1536 /* Take care of potential rouding issues */
1537 cx = min(cx, grid->ncx - 1);
1538 cy = min(cy, grid->ncy - 1);
1540 /* For the moment cell will contain only the, grid local,
1541 * x and y indices, not z.
1543 cell[i] = cx*grid->ncy + cy;
1547 /* Put this moved particle after the end of the grid,
1548 * so we can process it later without using conditionals.
1550 cell[i] = grid->ncx*grid->ncy;
1559 for (i = n0; i < n1; i++)
1561 cx = (int)((x[i][XX] - grid->c0[XX])*grid->inv_sx);
1562 cy = (int)((x[i][YY] - grid->c0[YY])*grid->inv_sy);
1564 /* For non-home zones there could be particles outside
1565 * the non-bonded cut-off range, which have been communicated
1566 * for bonded interactions only. For the result it doesn't
1567 * matter where these end up on the grid. For performance
1568 * we put them in an extra row at the border.
1571 cx = min(cx, grid->ncx - 1);
1573 cy = min(cy, grid->ncy - 1);
1575 /* For the moment cell will contain only the, grid local,
1576 * x and y indices, not z.
1578 cell[i] = cx*grid->ncy + cy;
1585 /* Determine in which grid cells the atoms should go */
1586 static void calc_cell_indices(const nbnxn_search_t nbs,
1593 nbnxn_atomdata_t *nbat)
1596 int cx, cy, cxy, ncz_max, ncz;
1597 int nthread, thread;
1598 int *cxy_na, cxy_na_i;
1600 nthread = gmx_omp_nthreads_get(emntPairsearch);
1602 #pragma omp parallel for num_threads(nthread) schedule(static)
1603 for (thread = 0; thread < nthread; thread++)
1605 calc_column_indices(grid, a0, a1, x, dd_zone, move, thread, nthread,
1606 nbs->cell, nbs->work[thread].cxy_na);
1609 /* Make the cell index as a function of x and y */
1612 grid->cxy_ind[0] = 0;
1613 for (i = 0; i < grid->ncx*grid->ncy+1; i++)
1615 /* We set ncz_max at the beginning of the loop iso at the end
1616 * to skip i=grid->ncx*grid->ncy which are moved particles
1617 * that do not need to be ordered on the grid.
1623 cxy_na_i = nbs->work[0].cxy_na[i];
1624 for (thread = 1; thread < nthread; thread++)
1626 cxy_na_i += nbs->work[thread].cxy_na[i];
1628 ncz = (cxy_na_i + grid->na_sc - 1)/grid->na_sc;
1629 if (nbat->XFormat == nbatX8)
1631 /* Make the number of cell a multiple of 2 */
1632 ncz = (ncz + 1) & ~1;
1634 grid->cxy_ind[i+1] = grid->cxy_ind[i] + ncz;
1635 /* Clear cxy_na, so we can reuse the array below */
1636 grid->cxy_na[i] = 0;
1638 grid->nc = grid->cxy_ind[grid->ncx*grid->ncy] - grid->cxy_ind[0];
1640 nbat->natoms = (grid->cell0 + grid->nc)*grid->na_sc;
1644 fprintf(debug, "ns na_sc %d na_c %d super-cells: %d x %d y %d z %.1f maxz %d\n",
1645 grid->na_sc, grid->na_c, grid->nc,
1646 grid->ncx, grid->ncy, grid->nc/((double)(grid->ncx*grid->ncy)),
1651 for (cy = 0; cy < grid->ncy; cy++)
1653 for (cx = 0; cx < grid->ncx; cx++)
1655 fprintf(debug, " %2d", grid->cxy_ind[i+1]-grid->cxy_ind[i]);
1658 fprintf(debug, "\n");
1663 /* Make sure the work array for sorting is large enough */
1664 if (ncz_max*grid->na_sc*SGSF > nbs->work[0].sort_work_nalloc)
1666 for (thread = 0; thread < nbs->nthread_max; thread++)
1668 nbs->work[thread].sort_work_nalloc =
1669 over_alloc_large(ncz_max*grid->na_sc*SGSF);
1670 srenew(nbs->work[thread].sort_work,
1671 nbs->work[thread].sort_work_nalloc);
1672 /* When not in use, all elements should be -1 */
1673 for (i = 0; i < nbs->work[thread].sort_work_nalloc; i++)
1675 nbs->work[thread].sort_work[i] = -1;
1680 /* Now we know the dimensions we can fill the grid.
1681 * This is the first, unsorted fill. We sort the columns after this.
1683 for (i = a0; i < a1; i++)
1685 /* At this point nbs->cell contains the local grid x,y indices */
1687 nbs->a[(grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc + grid->cxy_na[cxy]++] = i;
1692 /* Set the cell indices for the moved particles */
1693 n0 = grid->nc*grid->na_sc;
1694 n1 = grid->nc*grid->na_sc+grid->cxy_na[grid->ncx*grid->ncy];
1697 for (i = n0; i < n1; i++)
1699 nbs->cell[nbs->a[i]] = i;
1704 /* Sort the super-cell columns along z into the sub-cells. */
1705 #pragma omp parallel for num_threads(nthread) schedule(static)
1706 for (thread = 0; thread < nthread; thread++)
1710 sort_columns_simple(nbs, dd_zone, grid, a0, a1, atinfo, x, nbat,
1711 ((thread+0)*grid->ncx*grid->ncy)/nthread,
1712 ((thread+1)*grid->ncx*grid->ncy)/nthread,
1713 nbs->work[thread].sort_work);
1717 sort_columns_supersub(nbs, dd_zone, grid, a0, a1, atinfo, x, nbat,
1718 ((thread+0)*grid->ncx*grid->ncy)/nthread,
1719 ((thread+1)*grid->ncx*grid->ncy)/nthread,
1720 nbs->work[thread].sort_work);
1724 if (grid->bSimple && nbat->XFormat == nbatX8)
1726 combine_bounding_box_pairs(grid, grid->bb);
1731 grid->nsubc_tot = 0;
1732 for (i = 0; i < grid->nc; i++)
1734 grid->nsubc_tot += grid->nsubc[i];
1742 print_bbsizes_simple(debug, grid);
1746 fprintf(debug, "ns non-zero sub-cells: %d average atoms %.2f\n",
1747 grid->nsubc_tot, (a1-a0)/(double)grid->nsubc_tot);
1749 print_bbsizes_supersub(debug, grid);
1754 static void init_buffer_flags(nbnxn_buffer_flags_t *flags,
1759 flags->nflag = (natoms + NBNXN_BUFFERFLAG_SIZE - 1)/NBNXN_BUFFERFLAG_SIZE;
1760 if (flags->nflag > flags->flag_nalloc)
1762 flags->flag_nalloc = over_alloc_large(flags->nflag);
1763 srenew(flags->flag, flags->flag_nalloc);
1765 for (b = 0; b < flags->nflag; b++)
1767 bitmask_clear(&(flags->flag[b]));
1771 /* Sets up a grid and puts the atoms on the grid.
1772 * This function only operates on one domain of the domain decompostion.
1773 * Note that without domain decomposition there is only one domain.
1775 void nbnxn_put_on_grid(nbnxn_search_t nbs,
1776 int ePBC, matrix box,
1778 rvec corner0, rvec corner1,
1783 int nmoved, int *move,
1785 nbnxn_atomdata_t *nbat)
1789 int nc_max_grid, nc_max;
1791 grid = &nbs->grid[dd_zone];
1793 nbs_cycle_start(&nbs->cc[enbsCCgrid]);
1795 grid->bSimple = nbnxn_kernel_pairlist_simple(nb_kernel_type);
1797 grid->na_c = nbnxn_kernel_to_ci_size(nb_kernel_type);
1798 grid->na_cj = nbnxn_kernel_to_cj_size(nb_kernel_type);
1799 grid->na_sc = (grid->bSimple ? 1 : GPU_NSUBCELL)*grid->na_c;
1800 grid->na_c_2log = get_2log(grid->na_c);
1802 nbat->na_c = grid->na_c;
1811 (nbs->grid[dd_zone-1].cell0 + nbs->grid[dd_zone-1].nc)*
1812 nbs->grid[dd_zone-1].na_sc/grid->na_sc;
1820 copy_mat(box, nbs->box);
1822 /* Avoid zero density */
1823 if (atom_density > 0)
1825 grid->atom_density = atom_density;
1829 grid->atom_density = grid_atom_density(n-nmoved, corner0, corner1);
1834 nbs->natoms_local = a1 - nmoved;
1835 /* We assume that nbnxn_put_on_grid is called first
1836 * for the local atoms (dd_zone=0).
1838 nbs->natoms_nonlocal = a1 - nmoved;
1842 fprintf(debug, "natoms_local = %5d atom_density = %5.1f\n",
1843 nbs->natoms_local, grid->atom_density);
1848 nbs->natoms_nonlocal = max(nbs->natoms_nonlocal, a1);
1851 /* We always use the home zone (grid[0]) for setting the cell size,
1852 * since determining densities for non-local zones is difficult.
1854 nc_max_grid = set_grid_size_xy(nbs, grid,
1855 dd_zone, n-nmoved, corner0, corner1,
1856 nbs->grid[0].atom_density);
1858 nc_max = grid->cell0 + nc_max_grid;
1860 if (a1 > nbs->cell_nalloc)
1862 nbs->cell_nalloc = over_alloc_large(a1);
1863 srenew(nbs->cell, nbs->cell_nalloc);
1866 /* To avoid conditionals we store the moved particles at the end of a,
1867 * make sure we have enough space.
1869 if (nc_max*grid->na_sc + nmoved > nbs->a_nalloc)
1871 nbs->a_nalloc = over_alloc_large(nc_max*grid->na_sc + nmoved);
1872 srenew(nbs->a, nbs->a_nalloc);
1875 /* We need padding up to a multiple of the buffer flag size: simply add */
1876 if (nc_max*grid->na_sc + NBNXN_BUFFERFLAG_SIZE > nbat->nalloc)
1878 nbnxn_atomdata_realloc(nbat, nc_max*grid->na_sc+NBNXN_BUFFERFLAG_SIZE);
1881 calc_cell_indices(nbs, dd_zone, grid, a0, a1, atinfo, x, move, nbat);
1885 nbat->natoms_local = nbat->natoms;
1888 nbs_cycle_stop(&nbs->cc[enbsCCgrid]);
1891 /* Calls nbnxn_put_on_grid for all non-local domains */
1892 void nbnxn_put_on_grid_nonlocal(nbnxn_search_t nbs,
1893 const gmx_domdec_zones_t *zones,
1897 nbnxn_atomdata_t *nbat)
1902 for (zone = 1; zone < zones->n; zone++)
1904 for (d = 0; d < DIM; d++)
1906 c0[d] = zones->size[zone].bb_x0[d];
1907 c1[d] = zones->size[zone].bb_x1[d];
1910 nbnxn_put_on_grid(nbs, nbs->ePBC, NULL,
1912 zones->cg_range[zone],
1913 zones->cg_range[zone+1],
1923 /* Add simple grid type information to the local super/sub grid */
1924 void nbnxn_grid_add_simple(nbnxn_search_t nbs,
1925 nbnxn_atomdata_t *nbat)
1931 int nthreads gmx_unused;
1933 grid = &nbs->grid[0];
1937 gmx_incons("nbnxn_grid_simple called with a simple grid");
1940 ncd = grid->na_sc/NBNXN_CPU_CLUSTER_I_SIZE;
1942 if (grid->nc*ncd > grid->nc_nalloc_simple)
1944 grid->nc_nalloc_simple = over_alloc_large(grid->nc*ncd);
1945 srenew(grid->bbcz_simple, grid->nc_nalloc_simple*NNBSBB_D);
1946 srenew(grid->bb_simple, grid->nc_nalloc_simple);
1947 srenew(grid->flags_simple, grid->nc_nalloc_simple);
1950 sfree_aligned(grid->bbj);
1951 snew_aligned(grid->bbj, grid->nc_nalloc_simple/2, 16);
1955 bbcz = grid->bbcz_simple;
1956 bb = grid->bb_simple;
1958 nthreads = gmx_omp_nthreads_get(emntPairsearch);
1959 #pragma omp parallel for num_threads(nthreads) schedule(static)
1960 for (sc = 0; sc < grid->nc; sc++)
1964 for (c = 0; c < ncd; c++)
1968 na = NBNXN_CPU_CLUSTER_I_SIZE;
1970 nbat->type[tx*NBNXN_CPU_CLUSTER_I_SIZE+na-1] == nbat->ntype-1)
1977 switch (nbat->XFormat)
1980 /* PACK_X4==NBNXN_CPU_CLUSTER_I_SIZE, so this is simple */
1981 calc_bounding_box_x_x4(na, nbat->x+tx*STRIDE_P4,
1985 /* PACK_X8>NBNXN_CPU_CLUSTER_I_SIZE, more complicated */
1986 calc_bounding_box_x_x8(na, nbat->x+X8_IND_A(tx*NBNXN_CPU_CLUSTER_I_SIZE),
1990 calc_bounding_box(na, nbat->xstride,
1991 nbat->x+tx*NBNXN_CPU_CLUSTER_I_SIZE*nbat->xstride,
1995 bbcz[tx*NNBSBB_D+0] = bb[tx].lower[BB_Z];
1996 bbcz[tx*NNBSBB_D+1] = bb[tx].upper[BB_Z];
1998 /* No interaction optimization yet here */
1999 grid->flags_simple[tx] = NBNXN_CI_DO_LJ(0) | NBNXN_CI_DO_COUL(0);
2003 grid->flags_simple[tx] = 0;
2008 if (grid->bSimple && nbat->XFormat == nbatX8)
2010 combine_bounding_box_pairs(grid, grid->bb_simple);
2014 void nbnxn_get_ncells(nbnxn_search_t nbs, int *ncx, int *ncy)
2016 *ncx = nbs->grid[0].ncx;
2017 *ncy = nbs->grid[0].ncy;
2020 void nbnxn_get_atomorder(nbnxn_search_t nbs, int **a, int *n)
2022 const nbnxn_grid_t *grid;
2024 grid = &nbs->grid[0];
2026 /* Return the atom order for the home cell (index 0) */
2029 *n = grid->cxy_ind[grid->ncx*grid->ncy]*grid->na_sc;
2032 void nbnxn_set_atomorder(nbnxn_search_t nbs)
2035 int ao, cx, cy, cxy, cz, j;
2037 /* Set the atom order for the home cell (index 0) */
2038 grid = &nbs->grid[0];
2041 for (cx = 0; cx < grid->ncx; cx++)
2043 for (cy = 0; cy < grid->ncy; cy++)
2045 cxy = cx*grid->ncy + cy;
2046 j = grid->cxy_ind[cxy]*grid->na_sc;
2047 for (cz = 0; cz < grid->cxy_na[cxy]; cz++)
2058 /* Determines the cell range along one dimension that
2059 * the bounding box b0 - b1 sees.
2061 static void get_cell_range(real b0, real b1,
2062 int nc, real c0, real s, real invs,
2063 real d2, real r2, int *cf, int *cl)
2065 *cf = max((int)((b0 - c0)*invs), 0);
2067 while (*cf > 0 && d2 + sqr((b0 - c0) - (*cf-1+1)*s) < r2)
2072 *cl = min((int)((b1 - c0)*invs), nc-1);
2073 while (*cl < nc-1 && d2 + sqr((*cl+1)*s - (b1 - c0)) < r2)
2079 /* Reference code calculating the distance^2 between two bounding boxes */
2080 static float box_dist2(float bx0, float bx1, float by0,
2081 float by1, float bz0, float bz1,
2082 const nbnxn_bb_t *bb)
2085 float dl, dh, dm, dm0;
2089 dl = bx0 - bb->upper[BB_X];
2090 dh = bb->lower[BB_X] - bx1;
2095 dl = by0 - bb->upper[BB_Y];
2096 dh = bb->lower[BB_Y] - by1;
2101 dl = bz0 - bb->upper[BB_Z];
2102 dh = bb->lower[BB_Z] - bz1;
2110 /* Plain C code calculating the distance^2 between two bounding boxes */
2111 static float subc_bb_dist2(int si, const nbnxn_bb_t *bb_i_ci,
2112 int csj, const nbnxn_bb_t *bb_j_all)
2114 const nbnxn_bb_t *bb_i, *bb_j;
2116 float dl, dh, dm, dm0;
2118 bb_i = bb_i_ci + si;
2119 bb_j = bb_j_all + csj;
2123 dl = bb_i->lower[BB_X] - bb_j->upper[BB_X];
2124 dh = bb_j->lower[BB_X] - bb_i->upper[BB_X];
2129 dl = bb_i->lower[BB_Y] - bb_j->upper[BB_Y];
2130 dh = bb_j->lower[BB_Y] - bb_i->upper[BB_Y];
2135 dl = bb_i->lower[BB_Z] - bb_j->upper[BB_Z];
2136 dh = bb_j->lower[BB_Z] - bb_i->upper[BB_Z];
2144 #ifdef NBNXN_SEARCH_BB_SIMD4
2146 /* 4-wide SIMD code for bb distance for bb format xyz0 */
2147 static float subc_bb_dist2_simd4(int si, const nbnxn_bb_t *bb_i_ci,
2148 int csj, const nbnxn_bb_t *bb_j_all)
2150 gmx_simd4_float_t bb_i_S0, bb_i_S1;
2151 gmx_simd4_float_t bb_j_S0, bb_j_S1;
2152 gmx_simd4_float_t dl_S;
2153 gmx_simd4_float_t dh_S;
2154 gmx_simd4_float_t dm_S;
2155 gmx_simd4_float_t dm0_S;
2157 bb_i_S0 = gmx_simd4_load_f(&bb_i_ci[si].lower[0]);
2158 bb_i_S1 = gmx_simd4_load_f(&bb_i_ci[si].upper[0]);
2159 bb_j_S0 = gmx_simd4_load_f(&bb_j_all[csj].lower[0]);
2160 bb_j_S1 = gmx_simd4_load_f(&bb_j_all[csj].upper[0]);
2162 dl_S = gmx_simd4_sub_f(bb_i_S0, bb_j_S1);
2163 dh_S = gmx_simd4_sub_f(bb_j_S0, bb_i_S1);
2165 dm_S = gmx_simd4_max_f(dl_S, dh_S);
2166 dm0_S = gmx_simd4_max_f(dm_S, gmx_simd4_setzero_f());
2168 return gmx_simd4_dotproduct3_f(dm0_S, dm0_S);
2171 /* Calculate bb bounding distances of bb_i[si,...,si+3] and store them in d2 */
2172 #define SUBC_BB_DIST2_SIMD4_XXXX_INNER(si, bb_i, d2) \
2176 gmx_simd4_float_t dx_0, dy_0, dz_0; \
2177 gmx_simd4_float_t dx_1, dy_1, dz_1; \
2179 gmx_simd4_float_t mx, my, mz; \
2180 gmx_simd4_float_t m0x, m0y, m0z; \
2182 gmx_simd4_float_t d2x, d2y, d2z; \
2183 gmx_simd4_float_t d2s, d2t; \
2185 shi = si*NNBSBB_D*DIM; \
2187 xi_l = gmx_simd4_load_f(bb_i+shi+0*STRIDE_PBB); \
2188 yi_l = gmx_simd4_load_f(bb_i+shi+1*STRIDE_PBB); \
2189 zi_l = gmx_simd4_load_f(bb_i+shi+2*STRIDE_PBB); \
2190 xi_h = gmx_simd4_load_f(bb_i+shi+3*STRIDE_PBB); \
2191 yi_h = gmx_simd4_load_f(bb_i+shi+4*STRIDE_PBB); \
2192 zi_h = gmx_simd4_load_f(bb_i+shi+5*STRIDE_PBB); \
2194 dx_0 = gmx_simd4_sub_f(xi_l, xj_h); \
2195 dy_0 = gmx_simd4_sub_f(yi_l, yj_h); \
2196 dz_0 = gmx_simd4_sub_f(zi_l, zj_h); \
2198 dx_1 = gmx_simd4_sub_f(xj_l, xi_h); \
2199 dy_1 = gmx_simd4_sub_f(yj_l, yi_h); \
2200 dz_1 = gmx_simd4_sub_f(zj_l, zi_h); \
2202 mx = gmx_simd4_max_f(dx_0, dx_1); \
2203 my = gmx_simd4_max_f(dy_0, dy_1); \
2204 mz = gmx_simd4_max_f(dz_0, dz_1); \
2206 m0x = gmx_simd4_max_f(mx, zero); \
2207 m0y = gmx_simd4_max_f(my, zero); \
2208 m0z = gmx_simd4_max_f(mz, zero); \
2210 d2x = gmx_simd4_mul_f(m0x, m0x); \
2211 d2y = gmx_simd4_mul_f(m0y, m0y); \
2212 d2z = gmx_simd4_mul_f(m0z, m0z); \
2214 d2s = gmx_simd4_add_f(d2x, d2y); \
2215 d2t = gmx_simd4_add_f(d2s, d2z); \
2217 gmx_simd4_store_f(d2+si, d2t); \
2220 /* 4-wide SIMD code for nsi bb distances for bb format xxxxyyyyzzzz */
2221 static void subc_bb_dist2_simd4_xxxx(const float *bb_j,
2222 int nsi, const float *bb_i,
2225 gmx_simd4_float_t xj_l, yj_l, zj_l;
2226 gmx_simd4_float_t xj_h, yj_h, zj_h;
2227 gmx_simd4_float_t xi_l, yi_l, zi_l;
2228 gmx_simd4_float_t xi_h, yi_h, zi_h;
2230 gmx_simd4_float_t zero;
2232 zero = gmx_simd4_setzero_f();
2234 xj_l = gmx_simd4_set1_f(bb_j[0*STRIDE_PBB]);
2235 yj_l = gmx_simd4_set1_f(bb_j[1*STRIDE_PBB]);
2236 zj_l = gmx_simd4_set1_f(bb_j[2*STRIDE_PBB]);
2237 xj_h = gmx_simd4_set1_f(bb_j[3*STRIDE_PBB]);
2238 yj_h = gmx_simd4_set1_f(bb_j[4*STRIDE_PBB]);
2239 zj_h = gmx_simd4_set1_f(bb_j[5*STRIDE_PBB]);
2241 /* Here we "loop" over si (0,STRIDE_PBB) from 0 to nsi with step STRIDE_PBB.
2242 * But as we know the number of iterations is 1 or 2, we unroll manually.
2244 SUBC_BB_DIST2_SIMD4_XXXX_INNER(0, bb_i, d2);
2245 if (STRIDE_PBB < nsi)
2247 SUBC_BB_DIST2_SIMD4_XXXX_INNER(STRIDE_PBB, bb_i, d2);
2251 #endif /* NBNXN_SEARCH_BB_SIMD4 */
2253 /* Plain C function which determines if any atom pair between two cells
2254 * is within distance sqrt(rl2).
2256 static gmx_bool subc_in_range_x(int na_c,
2257 int si, const real *x_i,
2258 int csj, int stride, const real *x_j,
2264 for (i = 0; i < na_c; i++)
2266 i0 = (si*na_c + i)*DIM;
2267 for (j = 0; j < na_c; j++)
2269 j0 = (csj*na_c + j)*stride;
2271 d2 = sqr(x_i[i0 ] - x_j[j0 ]) +
2272 sqr(x_i[i0+1] - x_j[j0+1]) +
2273 sqr(x_i[i0+2] - x_j[j0+2]);
2285 #ifdef NBNXN_SEARCH_SIMD4_FLOAT_X_BB
2287 /* 4-wide SIMD function which determines if any atom pair between two cells,
2288 * both with 8 atoms, is within distance sqrt(rl2).
2289 * Using 8-wide AVX is not faster on Intel Sandy Bridge.
2291 static gmx_bool subc_in_range_simd4(int na_c,
2292 int si, const real *x_i,
2293 int csj, int stride, const real *x_j,
2296 gmx_simd4_real_t ix_S0, iy_S0, iz_S0;
2297 gmx_simd4_real_t ix_S1, iy_S1, iz_S1;
2299 gmx_simd4_real_t rc2_S;
2304 rc2_S = gmx_simd4_set1_r(rl2);
2306 dim_stride = NBNXN_GPU_CLUSTER_SIZE/STRIDE_PBB*DIM;
2307 ix_S0 = gmx_simd4_load_r(x_i+(si*dim_stride+0)*STRIDE_PBB);
2308 iy_S0 = gmx_simd4_load_r(x_i+(si*dim_stride+1)*STRIDE_PBB);
2309 iz_S0 = gmx_simd4_load_r(x_i+(si*dim_stride+2)*STRIDE_PBB);
2310 ix_S1 = gmx_simd4_load_r(x_i+(si*dim_stride+3)*STRIDE_PBB);
2311 iy_S1 = gmx_simd4_load_r(x_i+(si*dim_stride+4)*STRIDE_PBB);
2312 iz_S1 = gmx_simd4_load_r(x_i+(si*dim_stride+5)*STRIDE_PBB);
2314 /* We loop from the outer to the inner particles to maximize
2315 * the chance that we find a pair in range quickly and return.
2321 gmx_simd4_real_t jx0_S, jy0_S, jz0_S;
2322 gmx_simd4_real_t jx1_S, jy1_S, jz1_S;
2324 gmx_simd4_real_t dx_S0, dy_S0, dz_S0;
2325 gmx_simd4_real_t dx_S1, dy_S1, dz_S1;
2326 gmx_simd4_real_t dx_S2, dy_S2, dz_S2;
2327 gmx_simd4_real_t dx_S3, dy_S3, dz_S3;
2329 gmx_simd4_real_t rsq_S0;
2330 gmx_simd4_real_t rsq_S1;
2331 gmx_simd4_real_t rsq_S2;
2332 gmx_simd4_real_t rsq_S3;
2334 gmx_simd4_bool_t wco_S0;
2335 gmx_simd4_bool_t wco_S1;
2336 gmx_simd4_bool_t wco_S2;
2337 gmx_simd4_bool_t wco_S3;
2338 gmx_simd4_bool_t wco_any_S01, wco_any_S23, wco_any_S;
2340 jx0_S = gmx_simd4_set1_r(x_j[j0*stride+0]);
2341 jy0_S = gmx_simd4_set1_r(x_j[j0*stride+1]);
2342 jz0_S = gmx_simd4_set1_r(x_j[j0*stride+2]);
2344 jx1_S = gmx_simd4_set1_r(x_j[j1*stride+0]);
2345 jy1_S = gmx_simd4_set1_r(x_j[j1*stride+1]);
2346 jz1_S = gmx_simd4_set1_r(x_j[j1*stride+2]);
2348 /* Calculate distance */
2349 dx_S0 = gmx_simd4_sub_r(ix_S0, jx0_S);
2350 dy_S0 = gmx_simd4_sub_r(iy_S0, jy0_S);
2351 dz_S0 = gmx_simd4_sub_r(iz_S0, jz0_S);
2352 dx_S1 = gmx_simd4_sub_r(ix_S1, jx0_S);
2353 dy_S1 = gmx_simd4_sub_r(iy_S1, jy0_S);
2354 dz_S1 = gmx_simd4_sub_r(iz_S1, jz0_S);
2355 dx_S2 = gmx_simd4_sub_r(ix_S0, jx1_S);
2356 dy_S2 = gmx_simd4_sub_r(iy_S0, jy1_S);
2357 dz_S2 = gmx_simd4_sub_r(iz_S0, jz1_S);
2358 dx_S3 = gmx_simd4_sub_r(ix_S1, jx1_S);
2359 dy_S3 = gmx_simd4_sub_r(iy_S1, jy1_S);
2360 dz_S3 = gmx_simd4_sub_r(iz_S1, jz1_S);
2362 /* rsq = dx*dx+dy*dy+dz*dz */
2363 rsq_S0 = gmx_simd4_calc_rsq_r(dx_S0, dy_S0, dz_S0);
2364 rsq_S1 = gmx_simd4_calc_rsq_r(dx_S1, dy_S1, dz_S1);
2365 rsq_S2 = gmx_simd4_calc_rsq_r(dx_S2, dy_S2, dz_S2);
2366 rsq_S3 = gmx_simd4_calc_rsq_r(dx_S3, dy_S3, dz_S3);
2368 wco_S0 = gmx_simd4_cmplt_r(rsq_S0, rc2_S);
2369 wco_S1 = gmx_simd4_cmplt_r(rsq_S1, rc2_S);
2370 wco_S2 = gmx_simd4_cmplt_r(rsq_S2, rc2_S);
2371 wco_S3 = gmx_simd4_cmplt_r(rsq_S3, rc2_S);
2373 wco_any_S01 = gmx_simd4_or_b(wco_S0, wco_S1);
2374 wco_any_S23 = gmx_simd4_or_b(wco_S2, wco_S3);
2375 wco_any_S = gmx_simd4_or_b(wco_any_S01, wco_any_S23);
2377 if (gmx_simd4_anytrue_b(wco_any_S))
2391 /* Returns the j sub-cell for index cj_ind */
2392 static int nbl_cj(const nbnxn_pairlist_t *nbl, int cj_ind)
2394 return nbl->cj4[cj_ind >> NBNXN_GPU_JGROUP_SIZE_2LOG].cj[cj_ind & (NBNXN_GPU_JGROUP_SIZE - 1)];
2397 /* Returns the i-interaction mask of the j sub-cell for index cj_ind */
2398 static unsigned int nbl_imask0(const nbnxn_pairlist_t *nbl, int cj_ind)
2400 return nbl->cj4[cj_ind >> NBNXN_GPU_JGROUP_SIZE_2LOG].imei[0].imask;
2403 /* Ensures there is enough space for extra extra exclusion masks */
2404 static void check_excl_space(nbnxn_pairlist_t *nbl, int extra)
2406 if (nbl->nexcl+extra > nbl->excl_nalloc)
2408 nbl->excl_nalloc = over_alloc_small(nbl->nexcl+extra);
2409 nbnxn_realloc_void((void **)&nbl->excl,
2410 nbl->nexcl*sizeof(*nbl->excl),
2411 nbl->excl_nalloc*sizeof(*nbl->excl),
2412 nbl->alloc, nbl->free);
2416 /* Ensures there is enough space for ncell extra j-cells in the list */
2417 static void check_subcell_list_space_simple(nbnxn_pairlist_t *nbl,
2422 cj_max = nbl->ncj + ncell;
2424 if (cj_max > nbl->cj_nalloc)
2426 nbl->cj_nalloc = over_alloc_small(cj_max);
2427 nbnxn_realloc_void((void **)&nbl->cj,
2428 nbl->ncj*sizeof(*nbl->cj),
2429 nbl->cj_nalloc*sizeof(*nbl->cj),
2430 nbl->alloc, nbl->free);
2434 /* Ensures there is enough space for ncell extra j-subcells in the list */
2435 static void check_subcell_list_space_supersub(nbnxn_pairlist_t *nbl,
2438 int ncj4_max, j4, j, w, t;
2441 #define WARP_SIZE 32
2443 /* We can have maximally nsupercell*GPU_NSUBCELL sj lists */
2444 /* We can store 4 j-subcell - i-supercell pairs in one struct.
2445 * since we round down, we need one extra entry.
2447 ncj4_max = ((nbl->work->cj_ind + nsupercell*GPU_NSUBCELL + NBNXN_GPU_JGROUP_SIZE - 1) >> NBNXN_GPU_JGROUP_SIZE_2LOG);
2449 if (ncj4_max > nbl->cj4_nalloc)
2451 nbl->cj4_nalloc = over_alloc_small(ncj4_max);
2452 nbnxn_realloc_void((void **)&nbl->cj4,
2453 nbl->work->cj4_init*sizeof(*nbl->cj4),
2454 nbl->cj4_nalloc*sizeof(*nbl->cj4),
2455 nbl->alloc, nbl->free);
2458 if (ncj4_max > nbl->work->cj4_init)
2460 for (j4 = nbl->work->cj4_init; j4 < ncj4_max; j4++)
2462 /* No i-subcells and no excl's in the list initially */
2463 for (w = 0; w < NWARP; w++)
2465 nbl->cj4[j4].imei[w].imask = 0U;
2466 nbl->cj4[j4].imei[w].excl_ind = 0;
2470 nbl->work->cj4_init = ncj4_max;
2474 /* Set all excl masks for one GPU warp no exclusions */
2475 static void set_no_excls(nbnxn_excl_t *excl)
2479 for (t = 0; t < WARP_SIZE; t++)
2481 /* Turn all interaction bits on */
2482 excl->pair[t] = NBNXN_INTERACTION_MASK_ALL;
2486 /* Initializes a single nbnxn_pairlist_t data structure */
2487 static void nbnxn_init_pairlist(nbnxn_pairlist_t *nbl,
2489 nbnxn_alloc_t *alloc,
2494 nbl->alloc = nbnxn_alloc_aligned;
2502 nbl->free = nbnxn_free_aligned;
2509 nbl->bSimple = bSimple;
2520 /* We need one element extra in sj, so alloc initially with 1 */
2521 nbl->cj4_nalloc = 0;
2528 nbl->excl_nalloc = 0;
2530 check_excl_space(nbl, 1);
2532 set_no_excls(&nbl->excl[0]);
2538 snew_aligned(nbl->work->bb_ci, 1, NBNXN_SEARCH_BB_MEM_ALIGN);
2543 snew_aligned(nbl->work->pbb_ci, GPU_NSUBCELL/STRIDE_PBB*NNBSBB_XXXX, NBNXN_SEARCH_BB_MEM_ALIGN);
2545 snew_aligned(nbl->work->bb_ci, GPU_NSUBCELL, NBNXN_SEARCH_BB_MEM_ALIGN);
2548 snew_aligned(nbl->work->x_ci, NBNXN_NA_SC_MAX*DIM, NBNXN_SEARCH_BB_MEM_ALIGN);
2549 #ifdef GMX_NBNXN_SIMD
2550 snew_aligned(nbl->work->x_ci_simd_4xn, 1, NBNXN_MEM_ALIGN);
2551 snew_aligned(nbl->work->x_ci_simd_2xnn, 1, NBNXN_MEM_ALIGN);
2553 snew_aligned(nbl->work->d2, GPU_NSUBCELL, NBNXN_SEARCH_BB_MEM_ALIGN);
2555 nbl->work->sort = NULL;
2556 nbl->work->sort_nalloc = 0;
2557 nbl->work->sci_sort = NULL;
2558 nbl->work->sci_sort_nalloc = 0;
2561 void nbnxn_init_pairlist_set(nbnxn_pairlist_set_t *nbl_list,
2562 gmx_bool bSimple, gmx_bool bCombined,
2563 nbnxn_alloc_t *alloc,
2568 nbl_list->bSimple = bSimple;
2569 nbl_list->bCombined = bCombined;
2571 nbl_list->nnbl = gmx_omp_nthreads_get(emntNonbonded);
2573 if (!nbl_list->bCombined &&
2574 nbl_list->nnbl > NBNXN_BUFFERFLAG_MAX_THREADS)
2576 gmx_fatal(FARGS, "%d OpenMP threads were requested. Since the non-bonded force buffer reduction is prohibitively slow with more than %d threads, we do not allow this. Use %d or less OpenMP threads.",
2577 nbl_list->nnbl, NBNXN_BUFFERFLAG_MAX_THREADS, NBNXN_BUFFERFLAG_MAX_THREADS);
2580 snew(nbl_list->nbl, nbl_list->nnbl);
2581 snew(nbl_list->nbl_fep, nbl_list->nnbl);
2582 /* Execute in order to avoid memory interleaving between threads */
2583 #pragma omp parallel for num_threads(nbl_list->nnbl) schedule(static)
2584 for (i = 0; i < nbl_list->nnbl; i++)
2586 /* Allocate the nblist data structure locally on each thread
2587 * to optimize memory access for NUMA architectures.
2589 snew(nbl_list->nbl[i], 1);
2591 /* Only list 0 is used on the GPU, use normal allocation for i>0 */
2594 nbnxn_init_pairlist(nbl_list->nbl[i], nbl_list->bSimple, alloc, free);
2598 nbnxn_init_pairlist(nbl_list->nbl[i], nbl_list->bSimple, NULL, NULL);
2601 snew(nbl_list->nbl_fep[i], 1);
2602 nbnxn_init_pairlist_fep(nbl_list->nbl_fep[i]);
2606 /* Print statistics of a pair list, used for debug output */
2607 static void print_nblist_statistics_simple(FILE *fp, const nbnxn_pairlist_t *nbl,
2608 const nbnxn_search_t nbs, real rl)
2610 const nbnxn_grid_t *grid;
2615 /* This code only produces correct statistics with domain decomposition */
2616 grid = &nbs->grid[0];
2618 fprintf(fp, "nbl nci %d ncj %d\n",
2619 nbl->nci, nbl->ncj);
2620 fprintf(fp, "nbl na_sc %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
2621 nbl->na_sc, rl, nbl->ncj, nbl->ncj/(double)grid->nc,
2622 nbl->ncj/(double)grid->nc*grid->na_sc,
2623 nbl->ncj/(double)grid->nc*grid->na_sc/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid->nc*grid->na_sc/(grid->size[XX]*grid->size[YY]*grid->size[ZZ])));
2625 fprintf(fp, "nbl average j cell list length %.1f\n",
2626 0.25*nbl->ncj/(double)nbl->nci);
2628 for (s = 0; s < SHIFTS; s++)
2633 for (i = 0; i < nbl->nci; i++)
2635 cs[nbl->ci[i].shift & NBNXN_CI_SHIFT] +=
2636 nbl->ci[i].cj_ind_end - nbl->ci[i].cj_ind_start;
2638 j = nbl->ci[i].cj_ind_start;
2639 while (j < nbl->ci[i].cj_ind_end &&
2640 nbl->cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
2646 fprintf(fp, "nbl cell pairs, total: %d excl: %d %.1f%%\n",
2647 nbl->ncj, npexcl, 100*npexcl/(double)nbl->ncj);
2648 for (s = 0; s < SHIFTS; s++)
2652 fprintf(fp, "nbl shift %2d ncj %3d\n", s, cs[s]);
2657 /* Print statistics of a pair lists, used for debug output */
2658 static void print_nblist_statistics_supersub(FILE *fp, const nbnxn_pairlist_t *nbl,
2659 const nbnxn_search_t nbs, real rl)
2661 const nbnxn_grid_t *grid;
2662 int i, j4, j, si, b;
2663 int c[GPU_NSUBCELL+1];
2665 /* This code only produces correct statistics with domain decomposition */
2666 grid = &nbs->grid[0];
2668 fprintf(fp, "nbl nsci %d ncj4 %d nsi %d excl4 %d\n",
2669 nbl->nsci, nbl->ncj4, nbl->nci_tot, nbl->nexcl);
2670 fprintf(fp, "nbl na_c %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
2671 nbl->na_ci, rl, nbl->nci_tot, nbl->nci_tot/(double)grid->nsubc_tot,
2672 nbl->nci_tot/(double)grid->nsubc_tot*grid->na_c,
2673 nbl->nci_tot/(double)grid->nsubc_tot*grid->na_c/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid->nsubc_tot*grid->na_c/(grid->size[XX]*grid->size[YY]*grid->size[ZZ])));
2675 fprintf(fp, "nbl average j super cell list length %.1f\n",
2676 0.25*nbl->ncj4/(double)nbl->nsci);
2677 fprintf(fp, "nbl average i sub cell list length %.1f\n",
2678 nbl->nci_tot/((double)nbl->ncj4));
2680 for (si = 0; si <= GPU_NSUBCELL; si++)
2684 for (i = 0; i < nbl->nsci; i++)
2686 for (j4 = nbl->sci[i].cj4_ind_start; j4 < nbl->sci[i].cj4_ind_end; j4++)
2688 for (j = 0; j < NBNXN_GPU_JGROUP_SIZE; j++)
2691 for (si = 0; si < GPU_NSUBCELL; si++)
2693 if (nbl->cj4[j4].imei[0].imask & (1U << (j*GPU_NSUBCELL + si)))
2702 for (b = 0; b <= GPU_NSUBCELL; b++)
2704 fprintf(fp, "nbl j-list #i-subcell %d %7d %4.1f\n",
2705 b, c[b], 100.0*c[b]/(double)(nbl->ncj4*NBNXN_GPU_JGROUP_SIZE));
2709 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp */
2710 static void low_get_nbl_exclusions(nbnxn_pairlist_t *nbl, int cj4,
2711 int warp, nbnxn_excl_t **excl)
2713 if (nbl->cj4[cj4].imei[warp].excl_ind == 0)
2715 /* No exclusions set, make a new list entry */
2716 nbl->cj4[cj4].imei[warp].excl_ind = nbl->nexcl;
2718 *excl = &nbl->excl[nbl->cj4[cj4].imei[warp].excl_ind];
2719 set_no_excls(*excl);
2723 /* We already have some exclusions, new ones can be added to the list */
2724 *excl = &nbl->excl[nbl->cj4[cj4].imei[warp].excl_ind];
2728 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp,
2729 * generates a new element and allocates extra memory, if necessary.
2731 static void get_nbl_exclusions_1(nbnxn_pairlist_t *nbl, int cj4,
2732 int warp, nbnxn_excl_t **excl)
2734 if (nbl->cj4[cj4].imei[warp].excl_ind == 0)
2736 /* We need to make a new list entry, check if we have space */
2737 check_excl_space(nbl, 1);
2739 low_get_nbl_exclusions(nbl, cj4, warp, excl);
2742 /* Returns pointers to the exclusion mask for cj4-unit cj4 for both warps,
2743 * generates a new element and allocates extra memory, if necessary.
2745 static void get_nbl_exclusions_2(nbnxn_pairlist_t *nbl, int cj4,
2746 nbnxn_excl_t **excl_w0,
2747 nbnxn_excl_t **excl_w1)
2749 /* Check for space we might need */
2750 check_excl_space(nbl, 2);
2752 low_get_nbl_exclusions(nbl, cj4, 0, excl_w0);
2753 low_get_nbl_exclusions(nbl, cj4, 1, excl_w1);
2756 /* Sets the self exclusions i=j and pair exclusions i>j */
2757 static void set_self_and_newton_excls_supersub(nbnxn_pairlist_t *nbl,
2758 int cj4_ind, int sj_offset,
2761 nbnxn_excl_t *excl[2];
2764 /* Here we only set the set self and double pair exclusions */
2766 get_nbl_exclusions_2(nbl, cj4_ind, &excl[0], &excl[1]);
2768 /* Only minor < major bits set */
2769 for (ej = 0; ej < nbl->na_ci; ej++)
2772 for (ei = ej; ei < nbl->na_ci; ei++)
2774 excl[w]->pair[(ej & (NBNXN_GPU_JGROUP_SIZE-1))*nbl->na_ci + ei] &=
2775 ~(1U << (sj_offset*GPU_NSUBCELL + si));
2780 /* Returns a diagonal or off-diagonal interaction mask for plain C lists */
2781 static unsigned int get_imask(gmx_bool rdiag, int ci, int cj)
2783 return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
2786 /* Returns a diagonal or off-diagonal interaction mask for cj-size=2 */
2787 static unsigned int get_imask_simd_j2(gmx_bool rdiag, int ci, int cj)
2789 return (rdiag && ci*2 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_0 :
2790 (rdiag && ci*2+1 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_1 :
2791 NBNXN_INTERACTION_MASK_ALL));
2794 /* Returns a diagonal or off-diagonal interaction mask for cj-size=4 */
2795 static unsigned int get_imask_simd_j4(gmx_bool rdiag, int ci, int cj)
2797 return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
2800 /* Returns a diagonal or off-diagonal interaction mask for cj-size=8 */
2801 static unsigned int get_imask_simd_j8(gmx_bool rdiag, int ci, int cj)
2803 return (rdiag && ci == cj*2 ? NBNXN_INTERACTION_MASK_DIAG_J8_0 :
2804 (rdiag && ci == cj*2+1 ? NBNXN_INTERACTION_MASK_DIAG_J8_1 :
2805 NBNXN_INTERACTION_MASK_ALL));
2808 #ifdef GMX_NBNXN_SIMD
2809 #if GMX_SIMD_REAL_WIDTH == 2
2810 #define get_imask_simd_4xn get_imask_simd_j2
2812 #if GMX_SIMD_REAL_WIDTH == 4
2813 #define get_imask_simd_4xn get_imask_simd_j4
2815 #if GMX_SIMD_REAL_WIDTH == 8
2816 #define get_imask_simd_4xn get_imask_simd_j8
2817 #define get_imask_simd_2xnn get_imask_simd_j4
2819 #if GMX_SIMD_REAL_WIDTH == 16
2820 #define get_imask_simd_2xnn get_imask_simd_j8
2824 /* Plain C code for making a pair list of cell ci vs cell cjf-cjl.
2825 * Checks bounding box distances and possibly atom pair distances.
2827 static void make_cluster_list_simple(const nbnxn_grid_t *gridj,
2828 nbnxn_pairlist_t *nbl,
2829 int ci, int cjf, int cjl,
2830 gmx_bool remove_sub_diag,
2832 real rl2, float rbb2,
2835 const nbnxn_list_work_t *work;
2837 const nbnxn_bb_t *bb_ci;
2842 int cjf_gl, cjl_gl, cj;
2846 bb_ci = nbl->work->bb_ci;
2847 x_ci = nbl->work->x_ci;
2850 while (!InRange && cjf <= cjl)
2852 d2 = subc_bb_dist2(0, bb_ci, cjf, gridj->bb);
2855 /* Check if the distance is within the distance where
2856 * we use only the bounding box distance rbb,
2857 * or within the cut-off and there is at least one atom pair
2858 * within the cut-off.
2868 cjf_gl = gridj->cell0 + cjf;
2869 for (i = 0; i < NBNXN_CPU_CLUSTER_I_SIZE && !InRange; i++)
2871 for (j = 0; j < NBNXN_CPU_CLUSTER_I_SIZE; j++)
2873 InRange = InRange ||
2874 (sqr(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+XX]) +
2875 sqr(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+YY]) +
2876 sqr(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+ZZ]) < rl2);
2879 *ndistc += NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
2892 while (!InRange && cjl > cjf)
2894 d2 = subc_bb_dist2(0, bb_ci, cjl, gridj->bb);
2897 /* Check if the distance is within the distance where
2898 * we use only the bounding box distance rbb,
2899 * or within the cut-off and there is at least one atom pair
2900 * within the cut-off.
2910 cjl_gl = gridj->cell0 + cjl;
2911 for (i = 0; i < NBNXN_CPU_CLUSTER_I_SIZE && !InRange; i++)
2913 for (j = 0; j < NBNXN_CPU_CLUSTER_I_SIZE; j++)
2915 InRange = InRange ||
2916 (sqr(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+XX]) +
2917 sqr(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+YY]) +
2918 sqr(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+ZZ]) < rl2);
2921 *ndistc += NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
2931 for (cj = cjf; cj <= cjl; cj++)
2933 /* Store cj and the interaction mask */
2934 nbl->cj[nbl->ncj].cj = gridj->cell0 + cj;
2935 nbl->cj[nbl->ncj].excl = get_imask(remove_sub_diag, ci, cj);
2938 /* Increase the closing index in i super-cell list */
2939 nbl->ci[nbl->nci].cj_ind_end = nbl->ncj;
2943 #ifdef GMX_NBNXN_SIMD_4XN
2944 #include "gromacs/mdlib/nbnxn_search_simd_4xn.h"
2946 #ifdef GMX_NBNXN_SIMD_2XNN
2947 #include "gromacs/mdlib/nbnxn_search_simd_2xnn.h"
2950 /* Plain C or SIMD4 code for making a pair list of super-cell sci vs scj.
2951 * Checks bounding box distances and possibly atom pair distances.
2953 static void make_cluster_list_supersub(const nbnxn_grid_t *gridi,
2954 const nbnxn_grid_t *gridj,
2955 nbnxn_pairlist_t *nbl,
2957 gmx_bool sci_equals_scj,
2958 int stride, const real *x,
2959 real rl2, float rbb2,
2964 int cjo, ci1, ci, cj, cj_gl;
2965 int cj4_ind, cj_offset;
2969 const float *pbb_ci;
2971 const nbnxn_bb_t *bb_ci;
2976 #define PRUNE_LIST_CPU_ONE
2977 #ifdef PRUNE_LIST_CPU_ONE
2981 d2l = nbl->work->d2;
2984 pbb_ci = nbl->work->pbb_ci;
2986 bb_ci = nbl->work->bb_ci;
2988 x_ci = nbl->work->x_ci;
2992 for (cjo = 0; cjo < gridj->nsubc[scj]; cjo++)
2994 cj4_ind = (nbl->work->cj_ind >> NBNXN_GPU_JGROUP_SIZE_2LOG);
2995 cj_offset = nbl->work->cj_ind - cj4_ind*NBNXN_GPU_JGROUP_SIZE;
2996 cj4 = &nbl->cj4[cj4_ind];
2998 cj = scj*GPU_NSUBCELL + cjo;
3000 cj_gl = gridj->cell0*GPU_NSUBCELL + cj;
3002 /* Initialize this j-subcell i-subcell list */
3003 cj4->cj[cj_offset] = cj_gl;
3012 ci1 = gridi->nsubc[sci];
3016 /* Determine all ci1 bb distances in one call with SIMD4 */
3017 subc_bb_dist2_simd4_xxxx(gridj->pbb+(cj>>STRIDE_PBB_2LOG)*NNBSBB_XXXX+(cj & (STRIDE_PBB-1)),
3023 /* We use a fixed upper-bound instead of ci1 to help optimization */
3024 for (ci = 0; ci < GPU_NSUBCELL; ci++)
3031 #ifndef NBNXN_BBXXXX
3032 /* Determine the bb distance between ci and cj */
3033 d2l[ci] = subc_bb_dist2(ci, bb_ci, cj, gridj->bb);
3038 #ifdef PRUNE_LIST_CPU_ALL
3039 /* Check if the distance is within the distance where
3040 * we use only the bounding box distance rbb,
3041 * or within the cut-off and there is at least one atom pair
3042 * within the cut-off. This check is very costly.
3044 *ndistc += na_c*na_c;
3047 #ifdef NBNXN_PBB_SIMD4
3052 (na_c, ci, x_ci, cj_gl, stride, x, rl2)))
3054 /* Check if the distance between the two bounding boxes
3055 * in within the pair-list cut-off.
3060 /* Flag this i-subcell to be taken into account */
3061 imask |= (1U << (cj_offset*GPU_NSUBCELL+ci));
3063 #ifdef PRUNE_LIST_CPU_ONE
3071 #ifdef PRUNE_LIST_CPU_ONE
3072 /* If we only found 1 pair, check if any atoms are actually
3073 * within the cut-off, so we could get rid of it.
3075 if (npair == 1 && d2l[ci_last] >= rbb2)
3077 /* Avoid using function pointers here, as it's slower */
3079 #ifdef NBNXN_PBB_SIMD4
3080 !subc_in_range_simd4
3084 (na_c, ci_last, x_ci, cj_gl, stride, x, rl2))
3086 imask &= ~(1U << (cj_offset*GPU_NSUBCELL+ci_last));
3094 /* We have a useful sj entry, close it now */
3096 /* Set the exclucions for the ci== sj entry.
3097 * Here we don't bother to check if this entry is actually flagged,
3098 * as it will nearly always be in the list.
3102 set_self_and_newton_excls_supersub(nbl, cj4_ind, cj_offset, cjo);
3105 /* Copy the cluster interaction mask to the list */
3106 for (w = 0; w < NWARP; w++)
3108 cj4->imei[w].imask |= imask;
3111 nbl->work->cj_ind++;
3113 /* Keep the count */
3114 nbl->nci_tot += npair;
3116 /* Increase the closing index in i super-cell list */
3117 nbl->sci[nbl->nsci].cj4_ind_end =
3118 ((nbl->work->cj_ind+NBNXN_GPU_JGROUP_SIZE-1) >> NBNXN_GPU_JGROUP_SIZE_2LOG);
3123 /* Set all atom-pair exclusions from the topology stored in excl
3124 * as masks in the pair-list for simple list i-entry nbl_ci
3126 static void set_ci_top_excls(const nbnxn_search_t nbs,
3127 nbnxn_pairlist_t *nbl,
3128 gmx_bool diagRemoved,
3131 const nbnxn_ci_t *nbl_ci,
3132 const t_blocka *excl)
3136 int cj_ind_first, cj_ind_last;
3137 int cj_first, cj_last;
3139 int i, ai, aj, si, eind, ge, se;
3140 int found, cj_ind_0, cj_ind_1, cj_ind_m;
3144 nbnxn_excl_t *nbl_excl;
3145 int inner_i, inner_e;
3149 if (nbl_ci->cj_ind_end == nbl_ci->cj_ind_start)
3157 cj_ind_first = nbl_ci->cj_ind_start;
3158 cj_ind_last = nbl->ncj - 1;
3160 cj_first = nbl->cj[cj_ind_first].cj;
3161 cj_last = nbl->cj[cj_ind_last].cj;
3163 /* Determine how many contiguous j-cells we have starting
3164 * from the first i-cell. This number can be used to directly
3165 * calculate j-cell indices for excluded atoms.
3168 if (na_ci_2log == na_cj_2log)
3170 while (cj_ind_first + ndirect <= cj_ind_last &&
3171 nbl->cj[cj_ind_first+ndirect].cj == ci + ndirect)
3176 #ifdef NBNXN_SEARCH_BB_SIMD4
3179 while (cj_ind_first + ndirect <= cj_ind_last &&
3180 nbl->cj[cj_ind_first+ndirect].cj == ci_to_cj(na_cj_2log, ci) + ndirect)
3187 /* Loop over the atoms in the i super-cell */
3188 for (i = 0; i < nbl->na_sc; i++)
3190 ai = nbs->a[ci*nbl->na_sc+i];
3193 si = (i>>na_ci_2log);
3195 /* Loop over the topology-based exclusions for this i-atom */
3196 for (eind = excl->index[ai]; eind < excl->index[ai+1]; eind++)
3202 /* The self exclusion are already set, save some time */
3208 /* Without shifts we only calculate interactions j>i
3209 * for one-way pair-lists.
3211 if (diagRemoved && ge <= ci*nbl->na_sc + i)
3216 se = (ge >> na_cj_2log);
3218 /* Could the cluster se be in our list? */
3219 if (se >= cj_first && se <= cj_last)
3221 if (se < cj_first + ndirect)
3223 /* We can calculate cj_ind directly from se */
3224 found = cj_ind_first + se - cj_first;
3228 /* Search for se using bisection */
3230 cj_ind_0 = cj_ind_first + ndirect;
3231 cj_ind_1 = cj_ind_last + 1;
3232 while (found == -1 && cj_ind_0 < cj_ind_1)
3234 cj_ind_m = (cj_ind_0 + cj_ind_1)>>1;
3236 cj_m = nbl->cj[cj_ind_m].cj;
3244 cj_ind_1 = cj_ind_m;
3248 cj_ind_0 = cj_ind_m + 1;
3255 inner_i = i - (si << na_ci_2log);
3256 inner_e = ge - (se << na_cj_2log);
3258 nbl->cj[found].excl &= ~(1U<<((inner_i<<na_cj_2log) + inner_e));
3266 /* Add a new i-entry to the FEP list and copy the i-properties */
3267 static gmx_inline void fep_list_new_nri_copy(t_nblist *nlist)
3269 /* Add a new i-entry */
3272 assert(nlist->nri < nlist->maxnri);
3274 /* Duplicate the last i-entry, except for jindex, which continues */
3275 nlist->iinr[nlist->nri] = nlist->iinr[nlist->nri-1];
3276 nlist->shift[nlist->nri] = nlist->shift[nlist->nri-1];
3277 nlist->gid[nlist->nri] = nlist->gid[nlist->nri-1];
3278 nlist->jindex[nlist->nri] = nlist->nrj;
3281 /* For load balancing of the free-energy lists over threads, we set
3282 * the maximum nrj size of an i-entry to 40. This leads to good
3283 * load balancing in the worst case scenario of a single perturbed
3284 * particle on 16 threads, while not introducing significant overhead.
3285 * Note that half of the perturbed pairs will anyhow end up in very small lists,
3286 * since non perturbed i-particles will see few perturbed j-particles).
3288 const int max_nrj_fep = 40;
3290 /* Exclude the perturbed pairs from the Verlet list. This is only done to avoid
3291 * singularities for overlapping particles (0/0), since the charges and
3292 * LJ parameters have been zeroed in the nbnxn data structure.
3293 * Simultaneously make a group pair list for the perturbed pairs.
3295 static void make_fep_list(const nbnxn_search_t nbs,
3296 const nbnxn_atomdata_t *nbat,
3297 nbnxn_pairlist_t *nbl,
3298 gmx_bool bDiagRemoved,
3300 const nbnxn_grid_t *gridi,
3301 const nbnxn_grid_t *gridj,
3304 int ci, cj_ind_start, cj_ind_end, cj_ind, cja, cjr;
3306 int ngid, gid_i = 0, gid_j, gid;
3307 int egp_shift, egp_mask;
3309 int i, j, ind_i, ind_j, ai, aj;
3311 gmx_bool bFEP_i, bFEP_i_all;
3313 if (nbl_ci->cj_ind_end == nbl_ci->cj_ind_start)
3321 cj_ind_start = nbl_ci->cj_ind_start;
3322 cj_ind_end = nbl_ci->cj_ind_end;
3324 /* In worst case we have alternating energy groups
3325 * and create #atom-pair lists, which means we need the size
3326 * of a cluster pair (na_ci*na_cj) times the number of cj's.
3328 nri_max = nbl->na_ci*nbl->na_cj*(cj_ind_end - cj_ind_start);
3329 if (nlist->nri + nri_max > nlist->maxnri)
3331 nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
3332 reallocate_nblist(nlist);
3335 ngid = nbat->nenergrp;
3337 if (ngid*gridj->na_cj > sizeof(gid_cj)*8)
3339 gmx_fatal(FARGS, "The Verlet scheme with %dx%d kernels and free-energy only supports up to %d energy groups",
3340 gridi->na_c, gridj->na_cj, (sizeof(gid_cj)*8)/gridj->na_cj);
3343 egp_shift = nbat->neg_2log;
3344 egp_mask = (1<<nbat->neg_2log) - 1;
3346 /* Loop over the atoms in the i sub-cell */
3348 for (i = 0; i < nbl->na_ci; i++)
3350 ind_i = ci*nbl->na_ci + i;
3355 nlist->jindex[nri+1] = nlist->jindex[nri];
3356 nlist->iinr[nri] = ai;
3357 /* The actual energy group pair index is set later */
3358 nlist->gid[nri] = 0;
3359 nlist->shift[nri] = nbl_ci->shift & NBNXN_CI_SHIFT;
3361 bFEP_i = gridi->fep[ci - gridi->cell0] & (1 << i);
3363 bFEP_i_all = bFEP_i_all && bFEP_i;
3365 if ((nlist->nrj + cj_ind_end - cj_ind_start)*nbl->na_cj > nlist->maxnrj)
3367 nlist->maxnrj = over_alloc_small((nlist->nrj + cj_ind_end - cj_ind_start)*nbl->na_cj);
3368 srenew(nlist->jjnr, nlist->maxnrj);
3369 srenew(nlist->excl_fep, nlist->maxnrj);
3374 gid_i = (nbat->energrp[ci] >> (egp_shift*i)) & egp_mask;
3377 for (cj_ind = cj_ind_start; cj_ind < cj_ind_end; cj_ind++)
3379 unsigned int fep_cj;
3381 cja = nbl->cj[cj_ind].cj;
3383 if (gridj->na_cj == gridj->na_c)
3385 cjr = cja - gridj->cell0;
3386 fep_cj = gridj->fep[cjr];
3389 gid_cj = nbat->energrp[cja];
3392 else if (2*gridj->na_cj == gridj->na_c)
3394 cjr = cja - gridj->cell0*2;
3395 /* Extract half of the ci fep/energrp mask */
3396 fep_cj = (gridj->fep[cjr>>1] >> ((cjr&1)*gridj->na_cj)) & ((1<<gridj->na_cj) - 1);
3399 gid_cj = nbat->energrp[cja>>1] >> ((cja&1)*gridj->na_cj*egp_shift) & ((1<<(gridj->na_cj*egp_shift)) - 1);
3404 cjr = cja - (gridj->cell0>>1);
3405 /* Combine two ci fep masks/energrp */
3406 fep_cj = gridj->fep[cjr*2] + (gridj->fep[cjr*2+1] << gridj->na_c);
3409 gid_cj = nbat->energrp[cja*2] + (nbat->energrp[cja*2+1] << (gridj->na_c*egp_shift));
3413 if (bFEP_i || fep_cj != 0)
3415 for (j = 0; j < nbl->na_cj; j++)
3417 /* Is this interaction perturbed and not excluded? */
3418 ind_j = cja*nbl->na_cj + j;
3421 (bFEP_i || (fep_cj & (1 << j))) &&
3422 (!bDiagRemoved || ind_j >= ind_i))
3426 gid_j = (gid_cj >> (j*egp_shift)) & egp_mask;
3427 gid = GID(gid_i, gid_j, ngid);
3429 if (nlist->nrj > nlist->jindex[nri] &&
3430 nlist->gid[nri] != gid)
3432 /* Energy group pair changed: new list */
3433 fep_list_new_nri_copy(nlist);
3436 nlist->gid[nri] = gid;
3439 if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
3441 fep_list_new_nri_copy(nlist);
3445 /* Add it to the FEP list */
3446 nlist->jjnr[nlist->nrj] = aj;
3447 nlist->excl_fep[nlist->nrj] = (nbl->cj[cj_ind].excl >> (i*nbl->na_cj + j)) & 1;
3450 /* Exclude it from the normal list.
3451 * Note that the charge has been set to zero,
3452 * but we need to avoid 0/0, as perturbed atoms
3453 * can be on top of each other.
3455 nbl->cj[cj_ind].excl &= ~(1U << (i*nbl->na_cj + j));
3461 if (nlist->nrj > nlist->jindex[nri])
3463 /* Actually add this new, non-empty, list */
3465 nlist->jindex[nlist->nri] = nlist->nrj;
3472 /* All interactions are perturbed, we can skip this entry */
3473 nbl_ci->cj_ind_end = cj_ind_start;
3477 /* Return the index of atom a within a cluster */
3478 static gmx_inline int cj_mod_cj4(int cj)
3480 return cj & (NBNXN_GPU_JGROUP_SIZE - 1);
3483 /* Convert a j-cluster to a cj4 group */
3484 static gmx_inline int cj_to_cj4(int cj)
3486 return cj >> NBNXN_GPU_JGROUP_SIZE_2LOG;
3489 /* Return the index of an j-atom within a warp */
3490 static gmx_inline int a_mod_wj(int a)
3492 return a & (NBNXN_GPU_CLUSTER_SIZE/2 - 1);
3495 /* As make_fep_list above, but for super/sub lists. */
3496 static void make_fep_list_supersub(const nbnxn_search_t nbs,
3497 const nbnxn_atomdata_t *nbat,
3498 nbnxn_pairlist_t *nbl,
3499 gmx_bool bDiagRemoved,
3500 const nbnxn_sci_t *nbl_sci,
3505 const nbnxn_grid_t *gridi,
3506 const nbnxn_grid_t *gridj,
3509 int sci, cj4_ind_start, cj4_ind_end, cj4_ind, gcj, cjr;
3512 int i, j, ind_i, ind_j, ai, aj;
3516 const nbnxn_cj4_t *cj4;
3518 if (nbl_sci->cj4_ind_end == nbl_sci->cj4_ind_start)
3526 cj4_ind_start = nbl_sci->cj4_ind_start;
3527 cj4_ind_end = nbl_sci->cj4_ind_end;
3529 /* Here we process one super-cell, max #atoms na_sc, versus a list
3530 * cj4 entries, each with max NBNXN_GPU_JGROUP_SIZE cj's, each
3531 * of size na_cj atoms.
3532 * On the GPU we don't support energy groups (yet).
3533 * So for each of the na_sc i-atoms, we need max one FEP list
3534 * for each max_nrj_fep j-atoms.
3536 nri_max = nbl->na_sc*nbl->na_cj*(1 + ((cj4_ind_end - cj4_ind_start)*NBNXN_GPU_JGROUP_SIZE)/max_nrj_fep);
3537 if (nlist->nri + nri_max > nlist->maxnri)
3539 nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
3540 reallocate_nblist(nlist);
3543 /* Loop over the atoms in the i super-cluster */
3544 for (c = 0; c < GPU_NSUBCELL; c++)
3546 c_abs = sci*GPU_NSUBCELL + c;
3548 for (i = 0; i < nbl->na_ci; i++)
3550 ind_i = c_abs*nbl->na_ci + i;
3555 nlist->jindex[nri+1] = nlist->jindex[nri];
3556 nlist->iinr[nri] = ai;
3557 /* With GPUs, energy groups are not supported */
3558 nlist->gid[nri] = 0;
3559 nlist->shift[nri] = nbl_sci->shift & NBNXN_CI_SHIFT;
3561 bFEP_i = (gridi->fep[c_abs - gridi->cell0] & (1 << i));
3563 xi = nbat->x[ind_i*nbat->xstride+XX] + shx;
3564 yi = nbat->x[ind_i*nbat->xstride+YY] + shy;
3565 zi = nbat->x[ind_i*nbat->xstride+ZZ] + shz;
3567 if ((nlist->nrj + cj4_ind_end - cj4_ind_start)*NBNXN_GPU_JGROUP_SIZE*nbl->na_cj > nlist->maxnrj)
3569 nlist->maxnrj = over_alloc_small((nlist->nrj + cj4_ind_end - cj4_ind_start)*NBNXN_GPU_JGROUP_SIZE*nbl->na_cj);
3570 srenew(nlist->jjnr, nlist->maxnrj);
3571 srenew(nlist->excl_fep, nlist->maxnrj);
3574 for (cj4_ind = cj4_ind_start; cj4_ind < cj4_ind_end; cj4_ind++)
3576 cj4 = &nbl->cj4[cj4_ind];
3578 for (gcj = 0; gcj < NBNXN_GPU_JGROUP_SIZE; gcj++)
3580 unsigned int fep_cj;
3582 if ((cj4->imei[0].imask & (1U << (gcj*GPU_NSUBCELL + c))) == 0)
3584 /* Skip this ci for this cj */
3588 cjr = cj4->cj[gcj] - gridj->cell0*GPU_NSUBCELL;
3590 fep_cj = gridj->fep[cjr];
3592 if (bFEP_i || fep_cj != 0)
3594 for (j = 0; j < nbl->na_cj; j++)
3596 /* Is this interaction perturbed and not excluded? */
3597 ind_j = (gridj->cell0*GPU_NSUBCELL + cjr)*nbl->na_cj + j;
3600 (bFEP_i || (fep_cj & (1 << j))) &&
3601 (!bDiagRemoved || ind_j >= ind_i))
3605 unsigned int excl_bit;
3608 get_nbl_exclusions_1(nbl, cj4_ind, j>>2, &excl);
3610 excl_pair = a_mod_wj(j)*nbl->na_ci + i;
3611 excl_bit = (1U << (gcj*GPU_NSUBCELL + c));
3613 dx = nbat->x[ind_j*nbat->xstride+XX] - xi;
3614 dy = nbat->x[ind_j*nbat->xstride+YY] - yi;
3615 dz = nbat->x[ind_j*nbat->xstride+ZZ] - zi;
3617 /* The unpruned GPU list has more than 2/3
3618 * of the atom pairs beyond rlist. Using
3619 * this list will cause a lot of overhead
3620 * in the CPU FEP kernels, especially
3621 * relative to the fast GPU kernels.
3622 * So we prune the FEP list here.
3624 if (dx*dx + dy*dy + dz*dz < rlist_fep2)
3626 if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
3628 fep_list_new_nri_copy(nlist);
3632 /* Add it to the FEP list */
3633 nlist->jjnr[nlist->nrj] = aj;
3634 nlist->excl_fep[nlist->nrj] = (excl->pair[excl_pair] & excl_bit) ? 1 : 0;
3638 /* Exclude it from the normal list.
3639 * Note that the charge and LJ parameters have
3640 * been set to zero, but we need to avoid 0/0,
3641 * as perturbed atoms can be on top of each other.
3643 excl->pair[excl_pair] &= ~excl_bit;
3647 /* Note that we could mask out this pair in imask
3648 * if all i- and/or all j-particles are perturbed.
3649 * But since the perturbed pairs on the CPU will
3650 * take an order of magnitude more time, the GPU
3651 * will finish before the CPU and there is no gain.
3657 if (nlist->nrj > nlist->jindex[nri])
3659 /* Actually add this new, non-empty, list */
3661 nlist->jindex[nlist->nri] = nlist->nrj;
3668 /* Set all atom-pair exclusions from the topology stored in excl
3669 * as masks in the pair-list for i-super-cell entry nbl_sci
3671 static void set_sci_top_excls(const nbnxn_search_t nbs,
3672 nbnxn_pairlist_t *nbl,
3673 gmx_bool diagRemoved,
3675 const nbnxn_sci_t *nbl_sci,
3676 const t_blocka *excl)
3681 int cj_ind_first, cj_ind_last;
3682 int cj_first, cj_last;
3684 int i, ai, aj, si, eind, ge, se;
3685 int found, cj_ind_0, cj_ind_1, cj_ind_m;
3689 nbnxn_excl_t *nbl_excl;
3690 int inner_i, inner_e, w;
3696 if (nbl_sci->cj4_ind_end == nbl_sci->cj4_ind_start)
3704 cj_ind_first = nbl_sci->cj4_ind_start*NBNXN_GPU_JGROUP_SIZE;
3705 cj_ind_last = nbl->work->cj_ind - 1;
3707 cj_first = nbl->cj4[nbl_sci->cj4_ind_start].cj[0];
3708 cj_last = nbl_cj(nbl, cj_ind_last);
3710 /* Determine how many contiguous j-clusters we have starting
3711 * from the first i-cluster. This number can be used to directly
3712 * calculate j-cluster indices for excluded atoms.
3715 while (cj_ind_first + ndirect <= cj_ind_last &&
3716 nbl_cj(nbl, cj_ind_first+ndirect) == sci*GPU_NSUBCELL + ndirect)
3721 /* Loop over the atoms in the i super-cell */
3722 for (i = 0; i < nbl->na_sc; i++)
3724 ai = nbs->a[sci*nbl->na_sc+i];
3727 si = (i>>na_c_2log);
3729 /* Loop over the topology-based exclusions for this i-atom */
3730 for (eind = excl->index[ai]; eind < excl->index[ai+1]; eind++)
3736 /* The self exclusion are already set, save some time */
3742 /* Without shifts we only calculate interactions j>i
3743 * for one-way pair-lists.
3745 if (diagRemoved && ge <= sci*nbl->na_sc + i)
3751 /* Could the cluster se be in our list? */
3752 if (se >= cj_first && se <= cj_last)
3754 if (se < cj_first + ndirect)
3756 /* We can calculate cj_ind directly from se */
3757 found = cj_ind_first + se - cj_first;
3761 /* Search for se using bisection */
3763 cj_ind_0 = cj_ind_first + ndirect;
3764 cj_ind_1 = cj_ind_last + 1;
3765 while (found == -1 && cj_ind_0 < cj_ind_1)
3767 cj_ind_m = (cj_ind_0 + cj_ind_1)>>1;
3769 cj_m = nbl_cj(nbl, cj_ind_m);
3777 cj_ind_1 = cj_ind_m;
3781 cj_ind_0 = cj_ind_m + 1;
3788 inner_i = i - si*na_c;
3789 inner_e = ge - se*na_c;
3791 if (nbl_imask0(nbl, found) & (1U << (cj_mod_cj4(found)*GPU_NSUBCELL + si)))
3795 get_nbl_exclusions_1(nbl, cj_to_cj4(found), w, &nbl_excl);
3797 nbl_excl->pair[a_mod_wj(inner_e)*nbl->na_ci+inner_i] &=
3798 ~(1U << (cj_mod_cj4(found)*GPU_NSUBCELL + si));
3807 /* Reallocate the simple ci list for at least n entries */
3808 static void nb_realloc_ci(nbnxn_pairlist_t *nbl, int n)
3810 nbl->ci_nalloc = over_alloc_small(n);
3811 nbnxn_realloc_void((void **)&nbl->ci,
3812 nbl->nci*sizeof(*nbl->ci),
3813 nbl->ci_nalloc*sizeof(*nbl->ci),
3814 nbl->alloc, nbl->free);
3817 /* Reallocate the super-cell sci list for at least n entries */
3818 static void nb_realloc_sci(nbnxn_pairlist_t *nbl, int n)
3820 nbl->sci_nalloc = over_alloc_small(n);
3821 nbnxn_realloc_void((void **)&nbl->sci,
3822 nbl->nsci*sizeof(*nbl->sci),
3823 nbl->sci_nalloc*sizeof(*nbl->sci),
3824 nbl->alloc, nbl->free);
3827 /* Make a new ci entry at index nbl->nci */
3828 static void new_ci_entry(nbnxn_pairlist_t *nbl, int ci, int shift, int flags)
3830 if (nbl->nci + 1 > nbl->ci_nalloc)
3832 nb_realloc_ci(nbl, nbl->nci+1);
3834 nbl->ci[nbl->nci].ci = ci;
3835 nbl->ci[nbl->nci].shift = shift;
3836 /* Store the interaction flags along with the shift */
3837 nbl->ci[nbl->nci].shift |= flags;
3838 nbl->ci[nbl->nci].cj_ind_start = nbl->ncj;
3839 nbl->ci[nbl->nci].cj_ind_end = nbl->ncj;
3842 /* Make a new sci entry at index nbl->nsci */
3843 static void new_sci_entry(nbnxn_pairlist_t *nbl, int sci, int shift)
3845 if (nbl->nsci + 1 > nbl->sci_nalloc)
3847 nb_realloc_sci(nbl, nbl->nsci+1);
3849 nbl->sci[nbl->nsci].sci = sci;
3850 nbl->sci[nbl->nsci].shift = shift;
3851 nbl->sci[nbl->nsci].cj4_ind_start = nbl->ncj4;
3852 nbl->sci[nbl->nsci].cj4_ind_end = nbl->ncj4;
3855 /* Sort the simple j-list cj on exclusions.
3856 * Entries with exclusions will all be sorted to the beginning of the list.
3858 static void sort_cj_excl(nbnxn_cj_t *cj, int ncj,
3859 nbnxn_list_work_t *work)
3863 if (ncj > work->cj_nalloc)
3865 work->cj_nalloc = over_alloc_large(ncj);
3866 srenew(work->cj, work->cj_nalloc);
3869 /* Make a list of the j-cells involving exclusions */
3871 for (j = 0; j < ncj; j++)
3873 if (cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
3875 work->cj[jnew++] = cj[j];
3878 /* Check if there are exclusions at all or not just the first entry */
3879 if (!((jnew == 0) ||
3880 (jnew == 1 && cj[0].excl != NBNXN_INTERACTION_MASK_ALL)))
3882 for (j = 0; j < ncj; j++)
3884 if (cj[j].excl == NBNXN_INTERACTION_MASK_ALL)
3886 work->cj[jnew++] = cj[j];
3889 for (j = 0; j < ncj; j++)
3891 cj[j] = work->cj[j];
3896 /* Close this simple list i entry */
3897 static void close_ci_entry_simple(nbnxn_pairlist_t *nbl)
3901 /* All content of the new ci entry have already been filled correctly,
3902 * we only need to increase the count here (for non empty lists).
3904 jlen = nbl->ci[nbl->nci].cj_ind_end - nbl->ci[nbl->nci].cj_ind_start;
3907 sort_cj_excl(nbl->cj+nbl->ci[nbl->nci].cj_ind_start, jlen, nbl->work);
3909 /* The counts below are used for non-bonded pair/flop counts
3910 * and should therefore match the available kernel setups.
3912 if (!(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_COUL(0)))
3914 nbl->work->ncj_noq += jlen;
3916 else if ((nbl->ci[nbl->nci].shift & NBNXN_CI_HALF_LJ(0)) ||
3917 !(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_LJ(0)))
3919 nbl->work->ncj_hlj += jlen;
3926 /* Split sci entry for load balancing on the GPU.
3927 * Splitting ensures we have enough lists to fully utilize the whole GPU.
3928 * With progBal we generate progressively smaller lists, which improves
3929 * load balancing. As we only know the current count on our own thread,
3930 * we will need to estimate the current total amount of i-entries.
3931 * As the lists get concatenated later, this estimate depends
3932 * both on nthread and our own thread index.
3934 static void split_sci_entry(nbnxn_pairlist_t *nbl,
3935 int nsp_max_av, gmx_bool progBal, int nc_bal,
3936 int thread, int nthread)
3940 int cj4_start, cj4_end, j4len, cj4;
3942 int nsp, nsp_sci, nsp_cj4, nsp_cj4_e, nsp_cj4_p;
3947 /* Estimate the total numbers of ci's of the nblist combined
3948 * over all threads using the target number of ci's.
3950 nsci_est = nc_bal*thread/nthread + nbl->nsci;
3952 /* The first ci blocks should be larger, to avoid overhead.
3953 * The last ci blocks should be smaller, to improve load balancing.
3956 nsp_max_av*nc_bal*3/(2*(nsci_est - 1 + nc_bal)));
3960 nsp_max = nsp_max_av;
3963 cj4_start = nbl->sci[nbl->nsci-1].cj4_ind_start;
3964 cj4_end = nbl->sci[nbl->nsci-1].cj4_ind_end;
3965 j4len = cj4_end - cj4_start;
3967 if (j4len > 1 && j4len*GPU_NSUBCELL*NBNXN_GPU_JGROUP_SIZE > nsp_max)
3969 /* Remove the last ci entry and process the cj4's again */
3977 for (cj4 = cj4_start; cj4 < cj4_end; cj4++)
3979 nsp_cj4_p = nsp_cj4;
3980 /* Count the number of cluster pairs in this cj4 group */
3982 for (p = 0; p < GPU_NSUBCELL*NBNXN_GPU_JGROUP_SIZE; p++)
3984 nsp_cj4 += (nbl->cj4[cj4].imei[0].imask >> p) & 1;
3987 if (nsp_cj4 > 0 && nsp + nsp_cj4 > nsp_max)
3989 /* Split the list at cj4 */
3990 nbl->sci[sci].cj4_ind_end = cj4;
3991 /* Create a new sci entry */
3994 if (nbl->nsci+1 > nbl->sci_nalloc)
3996 nb_realloc_sci(nbl, nbl->nsci+1);
3998 nbl->sci[sci].sci = nbl->sci[nbl->nsci-1].sci;
3999 nbl->sci[sci].shift = nbl->sci[nbl->nsci-1].shift;
4000 nbl->sci[sci].cj4_ind_start = cj4;
4002 nsp_cj4_e = nsp_cj4_p;
4008 /* Put the remaining cj4's in the last sci entry */
4009 nbl->sci[sci].cj4_ind_end = cj4_end;
4011 /* Possibly balance out the last two sci's
4012 * by moving the last cj4 of the second last sci.
4014 if (nsp_sci - nsp_cj4_e >= nsp + nsp_cj4_e)
4016 nbl->sci[sci-1].cj4_ind_end--;
4017 nbl->sci[sci].cj4_ind_start--;
4024 /* Clost this super/sub list i entry */
4025 static void close_ci_entry_supersub(nbnxn_pairlist_t *nbl,
4027 gmx_bool progBal, int nc_bal,
4028 int thread, int nthread)
4033 /* All content of the new ci entry have already been filled correctly,
4034 * we only need to increase the count here (for non empty lists).
4036 j4len = nbl->sci[nbl->nsci].cj4_ind_end - nbl->sci[nbl->nsci].cj4_ind_start;
4039 /* We can only have complete blocks of 4 j-entries in a list,
4040 * so round the count up before closing.
4042 nbl->ncj4 = ((nbl->work->cj_ind + NBNXN_GPU_JGROUP_SIZE - 1) >> NBNXN_GPU_JGROUP_SIZE_2LOG);
4043 nbl->work->cj_ind = nbl->ncj4*NBNXN_GPU_JGROUP_SIZE;
4049 /* Measure the size of the new entry and potentially split it */
4050 split_sci_entry(nbl, nsp_max_av, progBal, nc_bal, thread, nthread);
4055 /* Syncs the working array before adding another grid pair to the list */
4056 static void sync_work(nbnxn_pairlist_t *nbl)
4060 nbl->work->cj_ind = nbl->ncj4*NBNXN_GPU_JGROUP_SIZE;
4061 nbl->work->cj4_init = nbl->ncj4;
4065 /* Clears an nbnxn_pairlist_t data structure */
4066 static void clear_pairlist(nbnxn_pairlist_t *nbl)
4075 nbl->work->ncj_noq = 0;
4076 nbl->work->ncj_hlj = 0;
4079 /* Clears a group scheme pair list */
4080 static void clear_pairlist_fep(t_nblist *nl)
4084 if (nl->jindex == NULL)
4086 snew(nl->jindex, 1);
4091 /* Sets a simple list i-cell bounding box, including PBC shift */
4092 static gmx_inline void set_icell_bb_simple(const nbnxn_bb_t *bb, int ci,
4093 real shx, real shy, real shz,
4096 bb_ci->lower[BB_X] = bb[ci].lower[BB_X] + shx;
4097 bb_ci->lower[BB_Y] = bb[ci].lower[BB_Y] + shy;
4098 bb_ci->lower[BB_Z] = bb[ci].lower[BB_Z] + shz;
4099 bb_ci->upper[BB_X] = bb[ci].upper[BB_X] + shx;
4100 bb_ci->upper[BB_Y] = bb[ci].upper[BB_Y] + shy;
4101 bb_ci->upper[BB_Z] = bb[ci].upper[BB_Z] + shz;
4105 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
4106 static void set_icell_bbxxxx_supersub(const float *bb, int ci,
4107 real shx, real shy, real shz,
4112 ia = ci*(GPU_NSUBCELL>>STRIDE_PBB_2LOG)*NNBSBB_XXXX;
4113 for (m = 0; m < (GPU_NSUBCELL>>STRIDE_PBB_2LOG)*NNBSBB_XXXX; m += NNBSBB_XXXX)
4115 for (i = 0; i < STRIDE_PBB; i++)
4117 bb_ci[m+0*STRIDE_PBB+i] = bb[ia+m+0*STRIDE_PBB+i] + shx;
4118 bb_ci[m+1*STRIDE_PBB+i] = bb[ia+m+1*STRIDE_PBB+i] + shy;
4119 bb_ci[m+2*STRIDE_PBB+i] = bb[ia+m+2*STRIDE_PBB+i] + shz;
4120 bb_ci[m+3*STRIDE_PBB+i] = bb[ia+m+3*STRIDE_PBB+i] + shx;
4121 bb_ci[m+4*STRIDE_PBB+i] = bb[ia+m+4*STRIDE_PBB+i] + shy;
4122 bb_ci[m+5*STRIDE_PBB+i] = bb[ia+m+5*STRIDE_PBB+i] + shz;
4128 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
4129 static void set_icell_bb_supersub(const nbnxn_bb_t *bb, int ci,
4130 real shx, real shy, real shz,
4135 for (i = 0; i < GPU_NSUBCELL; i++)
4137 set_icell_bb_simple(bb, ci*GPU_NSUBCELL+i,
4143 /* Copies PBC shifted i-cell atom coordinates x,y,z to working array */
4144 static void icell_set_x_simple(int ci,
4145 real shx, real shy, real shz,
4146 int gmx_unused na_c,
4147 int stride, const real *x,
4148 nbnxn_list_work_t *work)
4152 ia = ci*NBNXN_CPU_CLUSTER_I_SIZE;
4154 for (i = 0; i < NBNXN_CPU_CLUSTER_I_SIZE; i++)
4156 work->x_ci[i*STRIDE_XYZ+XX] = x[(ia+i)*stride+XX] + shx;
4157 work->x_ci[i*STRIDE_XYZ+YY] = x[(ia+i)*stride+YY] + shy;
4158 work->x_ci[i*STRIDE_XYZ+ZZ] = x[(ia+i)*stride+ZZ] + shz;
4162 /* Copies PBC shifted super-cell atom coordinates x,y,z to working array */
4163 static void icell_set_x_supersub(int ci,
4164 real shx, real shy, real shz,
4166 int stride, const real *x,
4167 nbnxn_list_work_t *work)
4174 ia = ci*GPU_NSUBCELL*na_c;
4175 for (i = 0; i < GPU_NSUBCELL*na_c; i++)
4177 x_ci[i*DIM + XX] = x[(ia+i)*stride + XX] + shx;
4178 x_ci[i*DIM + YY] = x[(ia+i)*stride + YY] + shy;
4179 x_ci[i*DIM + ZZ] = x[(ia+i)*stride + ZZ] + shz;
4183 #ifdef NBNXN_SEARCH_BB_SIMD4
4184 /* Copies PBC shifted super-cell packed atom coordinates to working array */
4185 static void icell_set_x_supersub_simd4(int ci,
4186 real shx, real shy, real shz,
4188 int stride, const real *x,
4189 nbnxn_list_work_t *work)
4191 int si, io, ia, i, j;
4196 for (si = 0; si < GPU_NSUBCELL; si++)
4198 for (i = 0; i < na_c; i += STRIDE_PBB)
4201 ia = ci*GPU_NSUBCELL*na_c + io;
4202 for (j = 0; j < STRIDE_PBB; j++)
4204 x_ci[io*DIM + j + XX*STRIDE_PBB] = x[(ia+j)*stride+XX] + shx;
4205 x_ci[io*DIM + j + YY*STRIDE_PBB] = x[(ia+j)*stride+YY] + shy;
4206 x_ci[io*DIM + j + ZZ*STRIDE_PBB] = x[(ia+j)*stride+ZZ] + shz;
4213 static real minimum_subgrid_size_xy(const nbnxn_grid_t *grid)
4217 return min(grid->sx, grid->sy);
4221 return min(grid->sx/GPU_NSUBCELL_X, grid->sy/GPU_NSUBCELL_Y);
4225 static real effective_buffer_1x1_vs_MxN(const nbnxn_grid_t *gridi,
4226 const nbnxn_grid_t *gridj)
4228 const real eff_1x1_buffer_fac_overest = 0.1;
4230 /* Determine an atom-pair list cut-off buffer size for atom pairs,
4231 * to be added to rlist (including buffer) used for MxN.
4232 * This is for converting an MxN list to a 1x1 list. This means we can't
4233 * use the normal buffer estimate, as we have an MxN list in which
4234 * some atom pairs beyond rlist are missing. We want to capture
4235 * the beneficial effect of buffering by extra pairs just outside rlist,
4236 * while removing the useless pairs that are further away from rlist.
4237 * (Also the buffer could have been set manually not using the estimate.)
4238 * This buffer size is an overestimate.
4239 * We add 10% of the smallest grid sub-cell dimensions.
4240 * Note that the z-size differs per cell and we don't use this,
4241 * so we overestimate.
4242 * With PME, the 10% value gives a buffer that is somewhat larger
4243 * than the effective buffer with a tolerance of 0.005 kJ/mol/ps.
4244 * Smaller tolerances or using RF lead to a smaller effective buffer,
4245 * so 10% gives a safe overestimate.
4247 return eff_1x1_buffer_fac_overest*(minimum_subgrid_size_xy(gridi) +
4248 minimum_subgrid_size_xy(gridj));
4251 /* Clusters at the cut-off only increase rlist by 60% of their size */
4252 static real nbnxn_rlist_inc_outside_fac = 0.6;
4254 /* Due to the cluster size the effective pair-list is longer than
4255 * that of a simple atom pair-list. This function gives the extra distance.
4257 real nbnxn_get_rlist_effective_inc(int cluster_size_j, real atom_density)
4260 real vol_inc_i, vol_inc_j;
4262 /* We should get this from the setup, but currently it's the same for
4263 * all setups, including GPUs.
4265 cluster_size_i = NBNXN_CPU_CLUSTER_I_SIZE;
4267 vol_inc_i = (cluster_size_i - 1)/atom_density;
4268 vol_inc_j = (cluster_size_j - 1)/atom_density;
4270 return nbnxn_rlist_inc_outside_fac*pow(vol_inc_i + vol_inc_j, 1.0/3.0);
4273 /* Estimates the interaction volume^2 for non-local interactions */
4274 static real nonlocal_vol2(const gmx_domdec_zones_t *zones, rvec ls, real r)
4283 /* Here we simply add up the volumes of 1, 2 or 3 1D decomposition
4284 * not home interaction volume^2. As these volumes are not additive,
4285 * this is an overestimate, but it would only be significant in the limit
4286 * of small cells, where we anyhow need to split the lists into
4287 * as small parts as possible.
4290 for (z = 0; z < zones->n; z++)
4292 if (zones->shift[z][XX] + zones->shift[z][YY] + zones->shift[z][ZZ] == 1)
4297 for (d = 0; d < DIM; d++)
4299 if (zones->shift[z][d] == 0)
4303 za *= zones->size[z].x1[d] - zones->size[z].x0[d];
4307 /* 4 octants of a sphere */
4308 vold_est = 0.25*M_PI*r*r*r*r;
4309 /* 4 quarter pie slices on the edges */
4310 vold_est += 4*cl*M_PI/6.0*r*r*r;
4311 /* One rectangular volume on a face */
4312 vold_est += ca*0.5*r*r;
4314 vol2_est_tot += vold_est*za;
4318 return vol2_est_tot;
4321 /* Estimates the average size of a full j-list for super/sub setup */
4322 static int get_nsubpair_max(const nbnxn_search_t nbs,
4325 int min_ci_balanced)
4327 const nbnxn_grid_t *grid;
4329 real xy_diag2, r_eff_sup, vol_est, nsp_est, nsp_est_nl;
4332 grid = &nbs->grid[0];
4334 if (min_ci_balanced <= 0 || grid->nc >= min_ci_balanced || grid->nc == 0)
4336 /* We don't need to worry */
4340 ls[XX] = (grid->c1[XX] - grid->c0[XX])/(grid->ncx*GPU_NSUBCELL_X);
4341 ls[YY] = (grid->c1[YY] - grid->c0[YY])/(grid->ncy*GPU_NSUBCELL_Y);
4342 ls[ZZ] = (grid->c1[ZZ] - grid->c0[ZZ])*grid->ncx*grid->ncy/(grid->nc*GPU_NSUBCELL_Z);
4344 /* The average squared length of the diagonal of a sub cell */
4345 xy_diag2 = ls[XX]*ls[XX] + ls[YY]*ls[YY] + ls[ZZ]*ls[ZZ];
4347 /* The formulas below are a heuristic estimate of the average nsj per si*/
4348 r_eff_sup = rlist + nbnxn_rlist_inc_outside_fac*sqr((grid->na_c - 1.0)/grid->na_c)*sqrt(xy_diag2/3);
4350 if (!nbs->DomDec || nbs->zones->n == 1)
4357 sqr(grid->atom_density/grid->na_c)*
4358 nonlocal_vol2(nbs->zones, ls, r_eff_sup);
4363 /* Sub-cell interacts with itself */
4364 vol_est = ls[XX]*ls[YY]*ls[ZZ];
4365 /* 6/2 rectangular volume on the faces */
4366 vol_est += (ls[XX]*ls[YY] + ls[XX]*ls[ZZ] + ls[YY]*ls[ZZ])*r_eff_sup;
4367 /* 12/2 quarter pie slices on the edges */
4368 vol_est += 2*(ls[XX] + ls[YY] + ls[ZZ])*0.25*M_PI*sqr(r_eff_sup);
4369 /* 4 octants of a sphere */
4370 vol_est += 0.5*4.0/3.0*M_PI*pow(r_eff_sup, 3);
4372 nsp_est = grid->nsubc_tot*vol_est*grid->atom_density/grid->na_c;
4374 /* Subtract the non-local pair count */
4375 nsp_est -= nsp_est_nl;
4379 fprintf(debug, "nsp_est local %5.1f non-local %5.1f\n",
4380 nsp_est, nsp_est_nl);
4385 nsp_est = nsp_est_nl;
4388 /* Thus the (average) maximum j-list size should be as follows */
4389 nsubpair_max = max(1, (int)(nsp_est/min_ci_balanced+0.5));
4391 /* Since the target value is a maximum (this avoids high outliers,
4392 * which lead to load imbalance), not average, we add half the
4393 * number of pairs in a cj4 block to get the average about right.
4395 nsubpair_max += GPU_NSUBCELL*NBNXN_GPU_JGROUP_SIZE/2;
4399 fprintf(debug, "nbl nsp estimate %.1f, nsubpair_max %d\n",
4400 nsp_est, nsubpair_max);
4403 return nsubpair_max;
4406 /* Debug list print function */
4407 static void print_nblist_ci_cj(FILE *fp, const nbnxn_pairlist_t *nbl)
4411 for (i = 0; i < nbl->nci; i++)
4413 fprintf(fp, "ci %4d shift %2d ncj %3d\n",
4414 nbl->ci[i].ci, nbl->ci[i].shift,
4415 nbl->ci[i].cj_ind_end - nbl->ci[i].cj_ind_start);
4417 for (j = nbl->ci[i].cj_ind_start; j < nbl->ci[i].cj_ind_end; j++)
4419 fprintf(fp, " cj %5d imask %x\n",
4426 /* Debug list print function */
4427 static void print_nblist_sci_cj(FILE *fp, const nbnxn_pairlist_t *nbl)
4429 int i, j4, j, ncp, si;
4431 for (i = 0; i < nbl->nsci; i++)
4433 fprintf(fp, "ci %4d shift %2d ncj4 %2d\n",
4434 nbl->sci[i].sci, nbl->sci[i].shift,
4435 nbl->sci[i].cj4_ind_end - nbl->sci[i].cj4_ind_start);
4438 for (j4 = nbl->sci[i].cj4_ind_start; j4 < nbl->sci[i].cj4_ind_end; j4++)
4440 for (j = 0; j < NBNXN_GPU_JGROUP_SIZE; j++)
4442 fprintf(fp, " sj %5d imask %x\n",
4444 nbl->cj4[j4].imei[0].imask);
4445 for (si = 0; si < GPU_NSUBCELL; si++)
4447 if (nbl->cj4[j4].imei[0].imask & (1U << (j*GPU_NSUBCELL + si)))
4454 fprintf(fp, "ci %4d shift %2d ncj4 %2d ncp %3d\n",
4455 nbl->sci[i].sci, nbl->sci[i].shift,
4456 nbl->sci[i].cj4_ind_end - nbl->sci[i].cj4_ind_start,
4461 /* Combine pair lists *nbl generated on multiple threads nblc */
4462 static void combine_nblists(int nnbl, nbnxn_pairlist_t **nbl,
4463 nbnxn_pairlist_t *nblc)
4465 int nsci, ncj4, nexcl;
4467 int nthreads gmx_unused;
4471 gmx_incons("combine_nblists does not support simple lists");
4476 nexcl = nblc->nexcl;
4477 for (i = 0; i < nnbl; i++)
4479 nsci += nbl[i]->nsci;
4480 ncj4 += nbl[i]->ncj4;
4481 nexcl += nbl[i]->nexcl;
4484 if (nsci > nblc->sci_nalloc)
4486 nb_realloc_sci(nblc, nsci);
4488 if (ncj4 > nblc->cj4_nalloc)
4490 nblc->cj4_nalloc = over_alloc_small(ncj4);
4491 nbnxn_realloc_void((void **)&nblc->cj4,
4492 nblc->ncj4*sizeof(*nblc->cj4),
4493 nblc->cj4_nalloc*sizeof(*nblc->cj4),
4494 nblc->alloc, nblc->free);
4496 if (nexcl > nblc->excl_nalloc)
4498 nblc->excl_nalloc = over_alloc_small(nexcl);
4499 nbnxn_realloc_void((void **)&nblc->excl,
4500 nblc->nexcl*sizeof(*nblc->excl),
4501 nblc->excl_nalloc*sizeof(*nblc->excl),
4502 nblc->alloc, nblc->free);
4505 /* Each thread should copy its own data to the combined arrays,
4506 * as otherwise data will go back and forth between different caches.
4508 nthreads = gmx_omp_nthreads_get(emntPairsearch);
4509 #pragma omp parallel for num_threads(nthreads) schedule(static)
4510 for (n = 0; n < nnbl; n++)
4517 const nbnxn_pairlist_t *nbli;
4519 /* Determine the offset in the combined data for our thread */
4520 sci_offset = nblc->nsci;
4521 cj4_offset = nblc->ncj4;
4522 ci_offset = nblc->nci_tot;
4523 excl_offset = nblc->nexcl;
4525 for (i = 0; i < n; i++)
4527 sci_offset += nbl[i]->nsci;
4528 cj4_offset += nbl[i]->ncj4;
4529 ci_offset += nbl[i]->nci_tot;
4530 excl_offset += nbl[i]->nexcl;
4535 for (i = 0; i < nbli->nsci; i++)
4537 nblc->sci[sci_offset+i] = nbli->sci[i];
4538 nblc->sci[sci_offset+i].cj4_ind_start += cj4_offset;
4539 nblc->sci[sci_offset+i].cj4_ind_end += cj4_offset;
4542 for (j4 = 0; j4 < nbli->ncj4; j4++)
4544 nblc->cj4[cj4_offset+j4] = nbli->cj4[j4];
4545 nblc->cj4[cj4_offset+j4].imei[0].excl_ind += excl_offset;
4546 nblc->cj4[cj4_offset+j4].imei[1].excl_ind += excl_offset;
4549 for (j4 = 0; j4 < nbli->nexcl; j4++)
4551 nblc->excl[excl_offset+j4] = nbli->excl[j4];
4555 for (n = 0; n < nnbl; n++)
4557 nblc->nsci += nbl[n]->nsci;
4558 nblc->ncj4 += nbl[n]->ncj4;
4559 nblc->nci_tot += nbl[n]->nci_tot;
4560 nblc->nexcl += nbl[n]->nexcl;
4564 static void balance_fep_lists(const nbnxn_search_t nbs,
4565 nbnxn_pairlist_set_t *nbl_lists)
4568 int nri_tot, nrj_tot, nrj_target;
4572 nnbl = nbl_lists->nnbl;
4576 /* Nothing to balance */
4580 /* Count the total i-lists and pairs */
4583 for (th = 0; th < nnbl; th++)
4585 nri_tot += nbl_lists->nbl_fep[th]->nri;
4586 nrj_tot += nbl_lists->nbl_fep[th]->nrj;
4589 nrj_target = (nrj_tot + nnbl - 1)/nnbl;
4591 assert(gmx_omp_nthreads_get(emntNonbonded) == nnbl);
4593 #pragma omp parallel for schedule(static) num_threads(nnbl)
4594 for (th = 0; th < nnbl; th++)
4598 nbl = nbs->work[th].nbl_fep;
4600 /* Note that here we allocate for the total size, instead of
4601 * a per-thread esimate (which is hard to obtain).
4603 if (nri_tot > nbl->maxnri)
4605 nbl->maxnri = over_alloc_large(nri_tot);
4606 reallocate_nblist(nbl);
4608 if (nri_tot > nbl->maxnri || nrj_tot > nbl->maxnrj)
4610 nbl->maxnrj = over_alloc_small(nrj_tot);
4611 srenew(nbl->jjnr, nbl->maxnrj);
4612 srenew(nbl->excl_fep, nbl->maxnrj);
4615 clear_pairlist_fep(nbl);
4618 /* Loop over the source lists and assign and copy i-entries */
4620 nbld = nbs->work[th_dest].nbl_fep;
4621 for (th = 0; th < nnbl; th++)
4626 nbls = nbl_lists->nbl_fep[th];
4628 for (i = 0; i < nbls->nri; i++)
4632 /* The number of pairs in this i-entry */
4633 nrj = nbls->jindex[i+1] - nbls->jindex[i];
4635 /* Decide if list th_dest is too large and we should procede
4636 * to the next destination list.
4638 if (th_dest+1 < nnbl && nbld->nrj > 0 &&
4639 nbld->nrj + nrj - nrj_target > nrj_target - nbld->nrj)
4642 nbld = nbs->work[th_dest].nbl_fep;
4645 nbld->iinr[nbld->nri] = nbls->iinr[i];
4646 nbld->gid[nbld->nri] = nbls->gid[i];
4647 nbld->shift[nbld->nri] = nbls->shift[i];
4649 for (j = nbls->jindex[i]; j < nbls->jindex[i+1]; j++)
4651 nbld->jjnr[nbld->nrj] = nbls->jjnr[j];
4652 nbld->excl_fep[nbld->nrj] = nbls->excl_fep[j];
4656 nbld->jindex[nbld->nri] = nbld->nrj;
4660 /* Swap the list pointers */
4661 for (th = 0; th < nnbl; th++)
4665 nbl_tmp = nbl_lists->nbl_fep[th];
4666 nbl_lists->nbl_fep[th] = nbs->work[th].nbl_fep;
4667 nbs->work[th].nbl_fep = nbl_tmp;
4671 fprintf(debug, "nbl_fep[%d] nri %4d nrj %4d\n",
4673 nbl_lists->nbl_fep[th]->nri,
4674 nbl_lists->nbl_fep[th]->nrj);
4679 /* Returns the next ci to be processes by our thread */
4680 static gmx_bool next_ci(const nbnxn_grid_t *grid,
4682 int nth, int ci_block,
4683 int *ci_x, int *ci_y,
4689 if (*ci_b == ci_block)
4691 /* Jump to the next block assigned to this task */
4692 *ci += (nth - 1)*ci_block;
4696 if (*ci >= grid->nc*conv)
4701 while (*ci >= grid->cxy_ind[*ci_x*grid->ncy + *ci_y + 1]*conv)
4704 if (*ci_y == grid->ncy)
4714 /* Returns the distance^2 for which we put cell pairs in the list
4715 * without checking atom pair distances. This is usually < rlist^2.
4717 static float boundingbox_only_distance2(const nbnxn_grid_t *gridi,
4718 const nbnxn_grid_t *gridj,
4722 /* If the distance between two sub-cell bounding boxes is less
4723 * than this distance, do not check the distance between
4724 * all particle pairs in the sub-cell, since then it is likely
4725 * that the box pair has atom pairs within the cut-off.
4726 * We use the nblist cut-off minus 0.5 times the average x/y diagonal
4727 * spacing of the sub-cells. Around 40% of the checked pairs are pruned.
4728 * Using more than 0.5 gains at most 0.5%.
4729 * If forces are calculated more than twice, the performance gain
4730 * in the force calculation outweighs the cost of checking.
4731 * Note that with subcell lists, the atom-pair distance check
4732 * is only performed when only 1 out of 8 sub-cells in within range,
4733 * this is because the GPU is much faster than the cpu.
4738 bbx = 0.5*(gridi->sx + gridj->sx);
4739 bby = 0.5*(gridi->sy + gridj->sy);
4742 bbx /= GPU_NSUBCELL_X;
4743 bby /= GPU_NSUBCELL_Y;
4746 rbb2 = sqr(max(0, rlist - 0.5*sqrt(bbx*bbx + bby*bby)));
4751 return (float)((1+GMX_FLOAT_EPS)*rbb2);
4755 static int get_ci_block_size(const nbnxn_grid_t *gridi,
4756 gmx_bool bDomDec, int nth)
4758 const int ci_block_enum = 5;
4759 const int ci_block_denom = 11;
4760 const int ci_block_min_atoms = 16;
4763 /* Here we decide how to distribute the blocks over the threads.
4764 * We use prime numbers to try to avoid that the grid size becomes
4765 * a multiple of the number of threads, which would lead to some
4766 * threads getting "inner" pairs and others getting boundary pairs,
4767 * which in turns will lead to load imbalance between threads.
4768 * Set the block size as 5/11/ntask times the average number of cells
4769 * in a y,z slab. This should ensure a quite uniform distribution
4770 * of the grid parts of the different thread along all three grid
4771 * zone boundaries with 3D domain decomposition. At the same time
4772 * the blocks will not become too small.
4774 ci_block = (gridi->nc*ci_block_enum)/(ci_block_denom*gridi->ncx*nth);
4776 /* Ensure the blocks are not too small: avoids cache invalidation */
4777 if (ci_block*gridi->na_sc < ci_block_min_atoms)
4779 ci_block = (ci_block_min_atoms + gridi->na_sc - 1)/gridi->na_sc;
4782 /* Without domain decomposition
4783 * or with less than 3 blocks per task, divide in nth blocks.
4785 if (!bDomDec || ci_block*3*nth > gridi->nc)
4787 ci_block = (gridi->nc + nth - 1)/nth;
4793 /* Generates the part of pair-list nbl assigned to our thread */
4794 static void nbnxn_make_pairlist_part(const nbnxn_search_t nbs,
4795 const nbnxn_grid_t *gridi,
4796 const nbnxn_grid_t *gridj,
4797 nbnxn_search_work_t *work,
4798 const nbnxn_atomdata_t *nbat,
4799 const t_blocka *excl,
4803 gmx_bool bFBufferFlag,
4806 int min_ci_balanced,
4808 nbnxn_pairlist_t *nbl,
4813 real rl2, rl_fep2 = 0;
4816 int ci_b, ci, ci_x, ci_y, ci_xy, cj;
4822 int conv_i, cell0_i;
4823 const nbnxn_bb_t *bb_i = NULL;
4825 const float *pbb_i = NULL;
4827 const float *bbcz_i, *bbcz_j;
4829 real bx0, bx1, by0, by1, bz0, bz1;
4831 real d2cx, d2z, d2z_cx, d2z_cy, d2zx, d2zxy, d2xy;
4832 int cxf, cxl, cyf, cyf_x, cyl;
4834 int c0, c1, cs, cf, cl;
4837 int gridi_flag_shift = 0, gridj_flag_shift = 0;
4838 gmx_bitmask_t *gridj_flag = NULL;
4839 int ncj_old_i, ncj_old_j;
4841 nbs_cycle_start(&work->cc[enbsCCsearch]);
4843 if (gridj->bSimple != nbl->bSimple)
4845 gmx_incons("Grid incompatible with pair-list");
4849 nbl->na_sc = gridj->na_sc;
4850 nbl->na_ci = gridj->na_c;
4851 nbl->na_cj = nbnxn_kernel_to_cj_size(nb_kernel_type);
4852 na_cj_2log = get_2log(nbl->na_cj);
4858 /* Determine conversion of clusters to flag blocks */
4859 gridi_flag_shift = 0;
4860 while ((nbl->na_ci<<gridi_flag_shift) < NBNXN_BUFFERFLAG_SIZE)
4864 gridj_flag_shift = 0;
4865 while ((nbl->na_cj<<gridj_flag_shift) < NBNXN_BUFFERFLAG_SIZE)
4870 gridj_flag = work->buffer_flags.flag;
4873 copy_mat(nbs->box, box);
4875 rl2 = nbl->rlist*nbl->rlist;
4877 if (nbs->bFEP && !nbl->bSimple)
4879 /* Determine an atom-pair list cut-off distance for FEP atom pairs.
4880 * We should not simply use rlist, since then we would not have
4881 * the small, effective buffering of the NxN lists.
4882 * The buffer is on overestimate, but the resulting cost for pairs
4883 * beyond rlist is neglible compared to the FEP pairs within rlist.
4885 rl_fep2 = nbl->rlist + effective_buffer_1x1_vs_MxN(gridi, gridj);
4889 fprintf(debug, "nbl_fep atom-pair rlist %f\n", rl_fep2);
4891 rl_fep2 = rl_fep2*rl_fep2;
4894 rbb2 = boundingbox_only_distance2(gridi, gridj, nbl->rlist, nbl->bSimple);
4898 fprintf(debug, "nbl bounding box only distance %f\n", sqrt(rbb2));
4901 /* Set the shift range */
4902 for (d = 0; d < DIM; d++)
4904 /* Check if we need periodicity shifts.
4905 * Without PBC or with domain decomposition we don't need them.
4907 if (d >= ePBC2npbcdim(nbs->ePBC) || nbs->dd_dim[d])
4914 box[XX][XX] - fabs(box[YY][XX]) - fabs(box[ZZ][XX]) < sqrt(rl2))
4925 if (nbl->bSimple && !gridi->bSimple)
4927 conv_i = gridi->na_sc/gridj->na_sc;
4928 bb_i = gridi->bb_simple;
4929 bbcz_i = gridi->bbcz_simple;
4930 flags_i = gridi->flags_simple;
4945 /* We use the normal bounding box format for both grid types */
4948 bbcz_i = gridi->bbcz;
4949 flags_i = gridi->flags;
4951 cell0_i = gridi->cell0*conv_i;
4953 bbcz_j = gridj->bbcz;
4957 /* Blocks of the conversion factor - 1 give a large repeat count
4958 * combined with a small block size. This should result in good
4959 * load balancing for both small and large domains.
4961 ci_block = conv_i - 1;
4965 fprintf(debug, "nbl nc_i %d col.av. %.1f ci_block %d\n",
4966 gridi->nc, gridi->nc/(double)(gridi->ncx*gridi->ncy), ci_block);
4972 /* Initially ci_b and ci to 1 before where we want them to start,
4973 * as they will both be incremented in next_ci.
4976 ci = th*ci_block - 1;
4979 while (next_ci(gridi, conv_i, nth, ci_block, &ci_x, &ci_y, &ci_b, &ci))
4981 if (nbl->bSimple && flags_i[ci] == 0)
4986 ncj_old_i = nbl->ncj;
4989 if (gridj != gridi && shp[XX] == 0)
4993 bx1 = bb_i[ci].upper[BB_X];
4997 bx1 = gridi->c0[XX] + (ci_x+1)*gridi->sx;
4999 if (bx1 < gridj->c0[XX])
5001 d2cx = sqr(gridj->c0[XX] - bx1);
5010 ci_xy = ci_x*gridi->ncy + ci_y;
5012 /* Loop over shift vectors in three dimensions */
5013 for (tz = -shp[ZZ]; tz <= shp[ZZ]; tz++)
5015 shz = tz*box[ZZ][ZZ];
5017 bz0 = bbcz_i[ci*NNBSBB_D ] + shz;
5018 bz1 = bbcz_i[ci*NNBSBB_D+1] + shz;
5030 d2z = sqr(bz0 - box[ZZ][ZZ]);
5033 d2z_cx = d2z + d2cx;
5041 bz1/((real)(gridi->cxy_ind[ci_xy+1] - gridi->cxy_ind[ci_xy]));
5046 /* The check with bz1_frac close to or larger than 1 comes later */
5048 for (ty = -shp[YY]; ty <= shp[YY]; ty++)
5050 shy = ty*box[YY][YY] + tz*box[ZZ][YY];
5054 by0 = bb_i[ci].lower[BB_Y] + shy;
5055 by1 = bb_i[ci].upper[BB_Y] + shy;
5059 by0 = gridi->c0[YY] + (ci_y )*gridi->sy + shy;
5060 by1 = gridi->c0[YY] + (ci_y+1)*gridi->sy + shy;
5063 get_cell_range(by0, by1,
5064 gridj->ncy, gridj->c0[YY], gridj->sy, gridj->inv_sy,
5074 if (by1 < gridj->c0[YY])
5076 d2z_cy += sqr(gridj->c0[YY] - by1);
5078 else if (by0 > gridj->c1[YY])
5080 d2z_cy += sqr(by0 - gridj->c1[YY]);
5083 for (tx = -shp[XX]; tx <= shp[XX]; tx++)
5085 shift = XYZ2IS(tx, ty, tz);
5087 #ifdef NBNXN_SHIFT_BACKWARD
5088 if (gridi == gridj && shift > CENTRAL)
5094 shx = tx*box[XX][XX] + ty*box[YY][XX] + tz*box[ZZ][XX];
5098 bx0 = bb_i[ci].lower[BB_X] + shx;
5099 bx1 = bb_i[ci].upper[BB_X] + shx;
5103 bx0 = gridi->c0[XX] + (ci_x )*gridi->sx + shx;
5104 bx1 = gridi->c0[XX] + (ci_x+1)*gridi->sx + shx;
5107 get_cell_range(bx0, bx1,
5108 gridj->ncx, gridj->c0[XX], gridj->sx, gridj->inv_sx,
5119 new_ci_entry(nbl, cell0_i+ci, shift, flags_i[ci]);
5123 new_sci_entry(nbl, cell0_i+ci, shift);
5126 #ifndef NBNXN_SHIFT_BACKWARD
5129 if (shift == CENTRAL && gridi == gridj &&
5133 /* Leave the pairs with i > j.
5134 * x is the major index, so skip half of it.
5141 set_icell_bb_simple(bb_i, ci, shx, shy, shz,
5147 set_icell_bbxxxx_supersub(pbb_i, ci, shx, shy, shz,
5150 set_icell_bb_supersub(bb_i, ci, shx, shy, shz,
5155 nbs->icell_set_x(cell0_i+ci, shx, shy, shz,
5156 gridi->na_c, nbat->xstride, nbat->x,
5159 for (cx = cxf; cx <= cxl; cx++)
5162 if (gridj->c0[XX] + cx*gridj->sx > bx1)
5164 d2zx += sqr(gridj->c0[XX] + cx*gridj->sx - bx1);
5166 else if (gridj->c0[XX] + (cx+1)*gridj->sx < bx0)
5168 d2zx += sqr(gridj->c0[XX] + (cx+1)*gridj->sx - bx0);
5171 #ifndef NBNXN_SHIFT_BACKWARD
5172 if (gridi == gridj &&
5173 cx == 0 && cyf < ci_y)
5175 if (gridi == gridj &&
5176 cx == 0 && shift == CENTRAL && cyf < ci_y)
5179 /* Leave the pairs with i > j.
5180 * Skip half of y when i and j have the same x.
5189 for (cy = cyf_x; cy <= cyl; cy++)
5191 c0 = gridj->cxy_ind[cx*gridj->ncy+cy];
5192 c1 = gridj->cxy_ind[cx*gridj->ncy+cy+1];
5193 #ifdef NBNXN_SHIFT_BACKWARD
5194 if (gridi == gridj &&
5195 shift == CENTRAL && c0 < ci)
5202 if (gridj->c0[YY] + cy*gridj->sy > by1)
5204 d2zxy += sqr(gridj->c0[YY] + cy*gridj->sy - by1);
5206 else if (gridj->c0[YY] + (cy+1)*gridj->sy < by0)
5208 d2zxy += sqr(gridj->c0[YY] + (cy+1)*gridj->sy - by0);
5210 if (c1 > c0 && d2zxy < rl2)
5212 cs = c0 + (int)(bz1_frac*(c1 - c0));
5220 /* Find the lowest cell that can possibly
5225 (bbcz_j[cf*NNBSBB_D+1] >= bz0 ||
5226 d2xy + sqr(bbcz_j[cf*NNBSBB_D+1] - bz0) < rl2))
5231 /* Find the highest cell that can possibly
5236 (bbcz_j[cl*NNBSBB_D] <= bz1 ||
5237 d2xy + sqr(bbcz_j[cl*NNBSBB_D] - bz1) < rl2))
5242 #ifdef NBNXN_REFCODE
5244 /* Simple reference code, for debugging,
5245 * overrides the more complex code above.
5250 for (k = c0; k < c1; k++)
5252 if (box_dist2(bx0, bx1, by0, by1, bz0, bz1, bb+k) < rl2 &&
5257 if (box_dist2(bx0, bx1, by0, by1, bz0, bz1, bb+k) < rl2 &&
5268 /* We want each atom/cell pair only once,
5269 * only use cj >= ci.
5271 #ifndef NBNXN_SHIFT_BACKWARD
5274 if (shift == CENTRAL)
5283 /* For f buffer flags with simple lists */
5284 ncj_old_j = nbl->ncj;
5286 switch (nb_kernel_type)
5288 case nbnxnk4x4_PlainC:
5289 check_subcell_list_space_simple(nbl, cl-cf+1);
5291 make_cluster_list_simple(gridj,
5293 (gridi == gridj && shift == CENTRAL),
5298 #ifdef GMX_NBNXN_SIMD_4XN
5299 case nbnxnk4xN_SIMD_4xN:
5300 check_subcell_list_space_simple(nbl, ci_to_cj(na_cj_2log, cl-cf)+2);
5301 make_cluster_list_simd_4xn(gridj,
5303 (gridi == gridj && shift == CENTRAL),
5309 #ifdef GMX_NBNXN_SIMD_2XNN
5310 case nbnxnk4xN_SIMD_2xNN:
5311 check_subcell_list_space_simple(nbl, ci_to_cj(na_cj_2log, cl-cf)+2);
5312 make_cluster_list_simd_2xnn(gridj,
5314 (gridi == gridj && shift == CENTRAL),
5320 case nbnxnk8x8x8_PlainC:
5321 case nbnxnk8x8x8_CUDA:
5322 check_subcell_list_space_supersub(nbl, cl-cf+1);
5323 for (cj = cf; cj <= cl; cj++)
5325 make_cluster_list_supersub(gridi, gridj,
5327 (gridi == gridj && shift == CENTRAL && ci == cj),
5328 nbat->xstride, nbat->x,
5334 ncpcheck += cl - cf + 1;
5336 if (bFBufferFlag && nbl->ncj > ncj_old_j)
5340 cbf = nbl->cj[ncj_old_j].cj >> gridj_flag_shift;
5341 cbl = nbl->cj[nbl->ncj-1].cj >> gridj_flag_shift;
5342 for (cb = cbf; cb <= cbl; cb++)
5344 bitmask_init_bit(&gridj_flag[cb], th);
5352 /* Set the exclusions for this ci list */
5355 set_ci_top_excls(nbs,
5357 shift == CENTRAL && gridi == gridj,
5360 &(nbl->ci[nbl->nci]),
5365 make_fep_list(nbs, nbat, nbl,
5366 shift == CENTRAL && gridi == gridj,
5367 &(nbl->ci[nbl->nci]),
5368 gridi, gridj, nbl_fep);
5373 set_sci_top_excls(nbs,
5375 shift == CENTRAL && gridi == gridj,
5377 &(nbl->sci[nbl->nsci]),
5382 make_fep_list_supersub(nbs, nbat, nbl,
5383 shift == CENTRAL && gridi == gridj,
5384 &(nbl->sci[nbl->nsci]),
5387 gridi, gridj, nbl_fep);
5391 /* Close this ci list */
5394 close_ci_entry_simple(nbl);
5398 close_ci_entry_supersub(nbl,
5400 progBal, min_ci_balanced,
5407 if (bFBufferFlag && nbl->ncj > ncj_old_i)
5409 bitmask_init_bit(&(work->buffer_flags.flag[(gridi->cell0+ci)>>gridi_flag_shift]), th);
5413 work->ndistc = ndistc;
5415 nbs_cycle_stop(&work->cc[enbsCCsearch]);
5419 fprintf(debug, "number of distance checks %d\n", ndistc);
5420 fprintf(debug, "ncpcheck %s %d\n", gridi == gridj ? "local" : "non-local",
5425 print_nblist_statistics_simple(debug, nbl, nbs, rlist);
5429 print_nblist_statistics_supersub(debug, nbl, nbs, rlist);
5434 fprintf(debug, "nbl FEP list pairs: %d\n", nbl_fep->nrj);
5439 static void reduce_buffer_flags(const nbnxn_search_t nbs,
5441 const nbnxn_buffer_flags_t *dest)
5444 gmx_bitmask_t *flag;
5446 for (s = 0; s < nsrc; s++)
5448 flag = nbs->work[s].buffer_flags.flag;
5450 for (b = 0; b < dest->nflag; b++)
5452 bitmask_union(&(dest->flag[b]), flag[b]);
5457 static void print_reduction_cost(const nbnxn_buffer_flags_t *flags, int nout)
5459 int nelem, nkeep, ncopy, nred, b, c, out;
5460 gmx_bitmask_t mask_0;
5466 bitmask_init_bit(&mask_0, 0);
5467 for (b = 0; b < flags->nflag; b++)
5469 if (bitmask_is_equal(flags->flag[b], mask_0))
5471 /* Only flag 0 is set, no copy of reduction required */
5475 else if (!bitmask_is_zero(flags->flag[b]))
5478 for (out = 0; out < nout; out++)
5480 if (bitmask_is_set(flags->flag[b], out))
5497 fprintf(debug, "nbnxn reduction: #flag %d #list %d elem %4.2f, keep %4.2f copy %4.2f red %4.2f\n",
5499 nelem/(double)(flags->nflag),
5500 nkeep/(double)(flags->nflag),
5501 ncopy/(double)(flags->nflag),
5502 nred/(double)(flags->nflag));
5505 /* Perform a count (linear) sort to sort the smaller lists to the end.
5506 * This avoids load imbalance on the GPU, as large lists will be
5507 * scheduled and executed first and the smaller lists later.
5508 * Load balancing between multi-processors only happens at the end
5509 * and there smaller lists lead to more effective load balancing.
5510 * The sorting is done on the cj4 count, not on the actual pair counts.
5511 * Not only does this make the sort faster, but it also results in
5512 * better load balancing than using a list sorted on exact load.
5513 * This function swaps the pointer in the pair list to avoid a copy operation.
5515 static void sort_sci(nbnxn_pairlist_t *nbl)
5517 nbnxn_list_work_t *work;
5518 int m, i, s, s0, s1;
5519 nbnxn_sci_t *sci_sort;
5521 if (nbl->ncj4 <= nbl->nsci)
5523 /* nsci = 0 or all sci have size 1, sorting won't change the order */
5529 /* We will distinguish differences up to double the average */
5530 m = (2*nbl->ncj4)/nbl->nsci;
5532 if (m + 1 > work->sort_nalloc)
5534 work->sort_nalloc = over_alloc_large(m + 1);
5535 srenew(work->sort, work->sort_nalloc);
5538 if (work->sci_sort_nalloc != nbl->sci_nalloc)
5540 work->sci_sort_nalloc = nbl->sci_nalloc;
5541 nbnxn_realloc_void((void **)&work->sci_sort,
5543 work->sci_sort_nalloc*sizeof(*work->sci_sort),
5544 nbl->alloc, nbl->free);
5547 /* Count the entries of each size */
5548 for (i = 0; i <= m; i++)
5552 for (s = 0; s < nbl->nsci; s++)
5554 i = min(m, nbl->sci[s].cj4_ind_end - nbl->sci[s].cj4_ind_start);
5557 /* Calculate the offset for each count */
5560 for (i = m - 1; i >= 0; i--)
5563 work->sort[i] = work->sort[i + 1] + s0;
5567 /* Sort entries directly into place */
5568 sci_sort = work->sci_sort;
5569 for (s = 0; s < nbl->nsci; s++)
5571 i = min(m, nbl->sci[s].cj4_ind_end - nbl->sci[s].cj4_ind_start);
5572 sci_sort[work->sort[i]++] = nbl->sci[s];
5575 /* Swap the sci pointers so we use the new, sorted list */
5576 work->sci_sort = nbl->sci;
5577 nbl->sci = sci_sort;
5580 /* Make a local or non-local pair-list, depending on iloc */
5581 void nbnxn_make_pairlist(const nbnxn_search_t nbs,
5582 nbnxn_atomdata_t *nbat,
5583 const t_blocka *excl,
5585 int min_ci_balanced,
5586 nbnxn_pairlist_set_t *nbl_list,
5591 nbnxn_grid_t *gridi, *gridj;
5593 int nzi, zi, zj0, zj1, zj;
5597 nbnxn_pairlist_t **nbl;
5599 gmx_bool CombineNBLists;
5601 int np_tot, np_noq, np_hlj, nap;
5603 /* Check if we are running hybrid GPU + CPU nbnxn mode */
5604 bGPUCPU = (!nbs->grid[0].bSimple && nbl_list->bSimple);
5606 nnbl = nbl_list->nnbl;
5607 nbl = nbl_list->nbl;
5608 CombineNBLists = nbl_list->bCombined;
5612 fprintf(debug, "ns making %d nblists\n", nnbl);
5615 nbat->bUseBufferFlags = (nbat->nout > 1);
5616 /* We should re-init the flags before making the first list */
5617 if (nbat->bUseBufferFlags && (LOCAL_I(iloc) || bGPUCPU))
5619 init_buffer_flags(&nbat->buffer_flags, nbat->natoms);
5622 if (nbl_list->bSimple)
5624 switch (nb_kernel_type)
5626 #ifdef GMX_NBNXN_SIMD_4XN
5627 case nbnxnk4xN_SIMD_4xN:
5628 nbs->icell_set_x = icell_set_x_simd_4xn;
5631 #ifdef GMX_NBNXN_SIMD_2XNN
5632 case nbnxnk4xN_SIMD_2xNN:
5633 nbs->icell_set_x = icell_set_x_simd_2xnn;
5637 nbs->icell_set_x = icell_set_x_simple;
5643 #ifdef NBNXN_SEARCH_BB_SIMD4
5644 nbs->icell_set_x = icell_set_x_supersub_simd4;
5646 nbs->icell_set_x = icell_set_x_supersub;
5652 /* Only zone (grid) 0 vs 0 */
5659 nzi = nbs->zones->nizone;
5662 if (!nbl_list->bSimple && min_ci_balanced > 0)
5664 nsubpair_max = get_nsubpair_max(nbs, iloc, rlist, min_ci_balanced);
5671 /* Clear all pair-lists */
5672 for (th = 0; th < nnbl; th++)
5674 clear_pairlist(nbl[th]);
5678 clear_pairlist_fep(nbl_list->nbl_fep[th]);
5682 for (zi = 0; zi < nzi; zi++)
5684 gridi = &nbs->grid[zi];
5686 if (NONLOCAL_I(iloc))
5688 zj0 = nbs->zones->izone[zi].j0;
5689 zj1 = nbs->zones->izone[zi].j1;
5695 for (zj = zj0; zj < zj1; zj++)
5697 gridj = &nbs->grid[zj];
5701 fprintf(debug, "ns search grid %d vs %d\n", zi, zj);
5704 nbs_cycle_start(&nbs->cc[enbsCCsearch]);
5706 if (nbl[0]->bSimple && !gridi->bSimple)
5708 /* Hybrid list, determine blocking later */
5713 ci_block = get_ci_block_size(gridi, nbs->DomDec, nnbl);
5716 /* With GPU: generate progressively smaller lists for
5717 * load balancing for local only or non-local with 2 zones.
5719 progBal = (LOCAL_I(iloc) || nbs->zones->n <= 2);
5721 #pragma omp parallel for num_threads(nnbl) schedule(static)
5722 for (th = 0; th < nnbl; th++)
5724 /* Re-init the thread-local work flag data before making
5725 * the first list (not an elegant conditional).
5727 if (nbat->bUseBufferFlags && ((zi == 0 && zj == 0) ||
5728 (bGPUCPU && zi == 0 && zj == 1)))
5730 init_buffer_flags(&nbs->work[th].buffer_flags, nbat->natoms);
5733 if (CombineNBLists && th > 0)
5735 clear_pairlist(nbl[th]);
5738 /* Divide the i super cell equally over the nblists */
5739 nbnxn_make_pairlist_part(nbs, gridi, gridj,
5740 &nbs->work[th], nbat, excl,
5744 nbat->bUseBufferFlags,
5746 progBal, min_ci_balanced,
5749 nbl_list->nbl_fep[th]);
5751 nbs_cycle_stop(&nbs->cc[enbsCCsearch]);
5756 for (th = 0; th < nnbl; th++)
5758 inc_nrnb(nrnb, eNR_NBNXN_DIST2, nbs->work[th].ndistc);
5760 if (nbl_list->bSimple)
5762 np_tot += nbl[th]->ncj;
5763 np_noq += nbl[th]->work->ncj_noq;
5764 np_hlj += nbl[th]->work->ncj_hlj;
5768 /* This count ignores potential subsequent pair pruning */
5769 np_tot += nbl[th]->nci_tot;
5772 nap = nbl[0]->na_ci*nbl[0]->na_cj;
5773 nbl_list->natpair_ljq = (np_tot - np_noq)*nap - np_hlj*nap/2;
5774 nbl_list->natpair_lj = np_noq*nap;
5775 nbl_list->natpair_q = np_hlj*nap/2;
5777 if (CombineNBLists && nnbl > 1)
5779 nbs_cycle_start(&nbs->cc[enbsCCcombine]);
5781 combine_nblists(nnbl-1, nbl+1, nbl[0]);
5783 nbs_cycle_stop(&nbs->cc[enbsCCcombine]);
5788 if (!nbl_list->bSimple)
5790 /* Sort the entries on size, large ones first */
5791 if (CombineNBLists || nnbl == 1)
5797 #pragma omp parallel for num_threads(nnbl) schedule(static)
5798 for (th = 0; th < nnbl; th++)
5805 if (nbat->bUseBufferFlags)
5807 reduce_buffer_flags(nbs, nnbl, &nbat->buffer_flags);
5812 /* Balance the free-energy lists over all the threads */
5813 balance_fep_lists(nbs, nbl_list);
5816 /* Special performance logging stuff (env.var. GMX_NBNXN_CYCLE) */
5819 nbs->search_count++;
5821 if (nbs->print_cycles &&
5822 (!nbs->DomDec || (nbs->DomDec && !LOCAL_I(iloc))) &&
5823 nbs->search_count % 100 == 0)
5825 nbs_cycle_print(stderr, nbs);
5828 if (debug && (CombineNBLists && nnbl > 1))
5830 if (nbl[0]->bSimple)
5832 print_nblist_statistics_simple(debug, nbl[0], nbs, rlist);
5836 print_nblist_statistics_supersub(debug, nbl[0], nbs, rlist);
5844 if (nbl[0]->bSimple)
5846 print_nblist_ci_cj(debug, nbl[0]);
5850 print_nblist_sci_cj(debug, nbl[0]);
5854 if (nbat->bUseBufferFlags)
5856 print_reduction_cost(&nbat->buffer_flags, nnbl);