2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2012, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012,2013, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
51 #include "nbnxn_consts.h"
52 #include "nbnxn_internal.h"
53 #include "nbnxn_atomdata.h"
54 #include "nbnxn_search.h"
55 #include "gmx_cyclecounter.h"
57 #include "gmx_omp_nthreads.h"
61 /* Pair search box lower and upper corner in x,y,z.
62 * Store this in 4 iso 3 reals, which is useful with SSE.
63 * To avoid complicating the code we also use 4 without SSE.
66 #define NNBSBB_B (2*NNBSBB_C)
67 /* Pair search box lower and upper bound in z only. */
69 /* Pair search box lower and upper corner x,y,z indices */
78 #ifdef NBNXN_SEARCH_BB_SSE
79 /* We use SSE or AVX-128bit for bounding box calculations */
82 /* Single precision BBs + coordinates, we can also load coordinates using SSE */
83 #define NBNXN_SEARCH_SSE_SINGLE
86 /* Include basic SSE2 stuff */
87 #include <emmintrin.h>
89 #if defined NBNXN_SEARCH_SSE_SINGLE && (GPU_NSUBCELL == 4 || GPU_NSUBCELL == 8)
90 /* Store bounding boxes with x, y and z coordinates in packs of 4 */
94 /* The width of SSE/AVX128 with single precision for bounding boxes with GPU.
95 * Here AVX-256 turns out to be slightly slower than AVX-128.
98 #define STRIDE_PBB_2LOG 2
100 #endif /* NBNXN_SEARCH_BB_SSE */
102 #ifdef GMX_NBNXN_SIMD
104 /* The functions below are macros as they are performance sensitive */
106 /* 4x4 list, pack=4: no complex conversion required */
107 /* i-cluster to j-cluster conversion */
108 #define CI_TO_CJ_J4(ci) (ci)
109 /* cluster index to coordinate array index conversion */
110 #define X_IND_CI_J4(ci) ((ci)*STRIDE_P4)
111 #define X_IND_CJ_J4(cj) ((cj)*STRIDE_P4)
113 /* 4x2 list, pack=4: j-cluster size is half the packing width */
114 /* i-cluster to j-cluster conversion */
115 #define CI_TO_CJ_J2(ci) ((ci)<<1)
116 /* cluster index to coordinate array index conversion */
117 #define X_IND_CI_J2(ci) ((ci)*STRIDE_P4)
118 #define X_IND_CJ_J2(cj) (((cj)>>1)*STRIDE_P4 + ((cj) & 1)*(PACK_X4>>1))
120 /* 4x8 list, pack=8: i-cluster size is half the packing width */
121 /* i-cluster to j-cluster conversion */
122 #define CI_TO_CJ_J8(ci) ((ci)>>1)
123 /* cluster index to coordinate array index conversion */
124 #define X_IND_CI_J8(ci) (((ci)>>1)*STRIDE_P8 + ((ci) & 1)*(PACK_X8>>1))
125 #define X_IND_CJ_J8(cj) ((cj)*STRIDE_P8)
127 /* The j-cluster size is matched to the SIMD width */
128 #if GMX_NBNXN_SIMD_BITWIDTH == 128
130 #define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J2(ci)
131 #define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J2(ci)
132 #define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J2(cj)
134 #define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J4(ci)
135 #define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J4(ci)
136 #define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J4(cj)
139 #if GMX_NBNXN_SIMD_BITWIDTH == 256
141 #define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J4(ci)
142 #define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J4(ci)
143 #define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J4(cj)
145 #define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J8(ci)
146 #define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J8(ci)
147 #define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J8(cj)
148 /* Half SIMD with j-cluster size */
149 #define CI_TO_CJ_SIMD_2XNN(ci) CI_TO_CJ_J4(ci)
150 #define X_IND_CI_SIMD_2XNN(ci) X_IND_CI_J4(ci)
151 #define X_IND_CJ_SIMD_2XNN(cj) X_IND_CJ_J4(cj)
154 #error "unsupported GMX_NBNXN_SIMD_WIDTH"
158 #endif /* GMX_NBNXN_SIMD */
161 /* Interaction masks for 4xN atom interactions.
162 * Bit i*CJ_SIZE + j tells if atom i and j interact.
164 /* All interaction mask is the same for all kernels */
165 #define NBNXN_INT_MASK_ALL 0xffffffff
166 /* 4x4 kernel diagonal mask */
167 #define NBNXN_INT_MASK_DIAG 0x08ce
168 /* 4x2 kernel diagonal masks */
169 #define NBNXN_INT_MASK_DIAG_J2_0 0x0002
170 #define NBNXN_INT_MASK_DIAG_J2_1 0x002F
171 /* 4x8 kernel diagonal masks */
172 #define NBNXN_INT_MASK_DIAG_J8_0 0xf0f8fcfe
173 #define NBNXN_INT_MASK_DIAG_J8_1 0x0080c0e0
176 #ifdef NBNXN_SEARCH_BB_SSE
177 /* Store bounding boxes corners as quadruplets: xxxxyyyyzzzz */
179 /* Size of bounding box corners quadruplet */
180 #define NNBSBB_XXXX (NNBSBB_D*DIM*STRIDE_PBB)
183 /* We shift the i-particles backward for PBC.
184 * This leads to more conditionals than shifting forward.
185 * We do this to get more balanced pair lists.
187 #define NBNXN_SHIFT_BACKWARD
190 /* This define is a lazy way to avoid interdependence of the grid
191 * and searching data structures.
193 #define NBNXN_NA_SC_MAX (GPU_NSUBCELL*NBNXN_GPU_CLUSTER_SIZE)
196 static void nbs_cycle_clear(nbnxn_cycle_t *cc)
200 for(i=0; i<enbsCCnr; i++)
207 static double Mcyc_av(const nbnxn_cycle_t *cc)
209 return (double)cc->c*1e-6/cc->count;
212 static void nbs_cycle_print(FILE *fp,const nbnxn_search_t nbs)
218 fprintf(fp,"ns %4d grid %4.1f search %4.1f red.f %5.3f",
219 nbs->cc[enbsCCgrid].count,
220 Mcyc_av(&nbs->cc[enbsCCgrid]),
221 Mcyc_av(&nbs->cc[enbsCCsearch]),
222 Mcyc_av(&nbs->cc[enbsCCreducef]));
224 if (nbs->nthread_max > 1)
226 if (nbs->cc[enbsCCcombine].count > 0)
228 fprintf(fp," comb %5.2f",
229 Mcyc_av(&nbs->cc[enbsCCcombine]));
231 fprintf(fp," s. th");
232 for(t=0; t<nbs->nthread_max; t++)
235 Mcyc_av(&nbs->work[t].cc[enbsCCsearch]));
241 static void nbnxn_grid_init(nbnxn_grid_t * grid)
244 grid->cxy_ind = NULL;
245 grid->cxy_nalloc = 0;
251 static int get_2log(int n)
256 while ((1<<log2) < n)
262 gmx_fatal(FARGS,"nbnxn na_c (%d) is not a power of 2",n);
268 static int nbnxn_kernel_to_ci_size(int nb_kernel_type)
270 switch (nb_kernel_type)
272 case nbnxnk4x4_PlainC:
273 case nbnxnk4xN_SIMD_4xN:
274 case nbnxnk4xN_SIMD_2xNN:
275 return NBNXN_CPU_CLUSTER_I_SIZE;
276 case nbnxnk8x8x8_CUDA:
277 case nbnxnk8x8x8_PlainC:
278 /* The cluster size for super/sub lists is only set here.
279 * Any value should work for the pair-search and atomdata code.
280 * The kernels, of course, might require a particular value.
282 return NBNXN_GPU_CLUSTER_SIZE;
284 gmx_incons("unknown kernel type");
290 int nbnxn_kernel_to_cj_size(int nb_kernel_type)
292 int nbnxn_simd_width=0;
295 #ifdef GMX_NBNXN_SIMD
296 nbnxn_simd_width = GMX_NBNXN_SIMD_BITWIDTH/(sizeof(real)*8);
299 switch (nb_kernel_type)
301 case nbnxnk4x4_PlainC:
302 cj_size = NBNXN_CPU_CLUSTER_I_SIZE;
304 case nbnxnk4xN_SIMD_4xN:
305 cj_size = nbnxn_simd_width;
307 case nbnxnk4xN_SIMD_2xNN:
308 cj_size = nbnxn_simd_width/2;
310 case nbnxnk8x8x8_CUDA:
311 case nbnxnk8x8x8_PlainC:
312 cj_size = nbnxn_kernel_to_ci_size(nb_kernel_type);
315 gmx_incons("unknown kernel type");
321 static int ci_to_cj(int na_cj_2log,int ci)
325 case 2: return ci; break;
326 case 1: return (ci<<1); break;
327 case 3: return (ci>>1); break;
333 gmx_bool nbnxn_kernel_pairlist_simple(int nb_kernel_type)
335 if (nb_kernel_type == nbnxnkNotSet)
337 gmx_fatal(FARGS, "Non-bonded kernel type not set for Verlet-style pair-list.");
340 switch (nb_kernel_type)
342 case nbnxnk8x8x8_CUDA:
343 case nbnxnk8x8x8_PlainC:
346 case nbnxnk4x4_PlainC:
347 case nbnxnk4xN_SIMD_4xN:
348 case nbnxnk4xN_SIMD_2xNN:
352 gmx_incons("Invalid nonbonded kernel type passed!");
357 void nbnxn_init_search(nbnxn_search_t * nbs_ptr,
359 gmx_domdec_zones_t *zones,
368 nbs->DomDec = (n_dd_cells != NULL);
370 clear_ivec(nbs->dd_dim);
378 if ((*n_dd_cells)[d] > 1)
381 /* Each grid matches a DD zone */
387 snew(nbs->grid,nbs->ngrid);
388 for(g=0; g<nbs->ngrid; g++)
390 nbnxn_grid_init(&nbs->grid[g]);
393 nbs->cell_nalloc = 0;
397 nbs->nthread_max = nthread_max;
399 /* Initialize the work data structures for each thread */
400 snew(nbs->work,nbs->nthread_max);
401 for(t=0; t<nbs->nthread_max; t++)
403 nbs->work[t].cxy_na = NULL;
404 nbs->work[t].cxy_na_nalloc = 0;
405 nbs->work[t].sort_work = NULL;
406 nbs->work[t].sort_work_nalloc = 0;
409 /* Initialize detailed nbsearch cycle counting */
410 nbs->print_cycles = (getenv("GMX_NBNXN_CYCLE") != 0);
411 nbs->search_count = 0;
412 nbs_cycle_clear(nbs->cc);
413 for(t=0; t<nbs->nthread_max; t++)
415 nbs_cycle_clear(nbs->work[t].cc);
419 static real grid_atom_density(int n,rvec corner0,rvec corner1)
423 rvec_sub(corner1,corner0,size);
425 return n/(size[XX]*size[YY]*size[ZZ]);
428 static int set_grid_size_xy(const nbnxn_search_t nbs,
431 int n,rvec corner0,rvec corner1,
437 real adens,tlen,tlen_x,tlen_y,nc_max;
440 rvec_sub(corner1,corner0,size);
444 /* target cell length */
447 /* To minimize the zero interactions, we should make
448 * the largest of the i/j cell cubic.
450 na_c = max(grid->na_c,grid->na_cj);
452 /* Approximately cubic cells */
453 tlen = pow(na_c/atom_density,1.0/3.0);
459 /* Approximately cubic sub cells */
460 tlen = pow(grid->na_c/atom_density,1.0/3.0);
461 tlen_x = tlen*GPU_NSUBCELL_X;
462 tlen_y = tlen*GPU_NSUBCELL_Y;
464 /* We round ncx and ncy down, because we get less cell pairs
465 * in the nbsist when the fixed cell dimensions (x,y) are
466 * larger than the variable one (z) than the other way around.
468 grid->ncx = max(1,(int)(size[XX]/tlen_x));
469 grid->ncy = max(1,(int)(size[YY]/tlen_y));
477 grid->sx = size[XX]/grid->ncx;
478 grid->sy = size[YY]/grid->ncy;
479 grid->inv_sx = 1/grid->sx;
480 grid->inv_sy = 1/grid->sy;
484 /* This is a non-home zone, add an extra row of cells
485 * for particles communicated for bonded interactions.
486 * These can be beyond the cut-off. It doesn't matter where
487 * they end up on the grid, but for performance it's better
488 * if they don't end up in cells that can be within cut-off range.
494 /* We need one additional cell entry for particles moved by DD */
495 if (grid->ncx*grid->ncy+1 > grid->cxy_nalloc)
497 grid->cxy_nalloc = over_alloc_large(grid->ncx*grid->ncy+1);
498 srenew(grid->cxy_na,grid->cxy_nalloc);
499 srenew(grid->cxy_ind,grid->cxy_nalloc+1);
501 for(t=0; t<nbs->nthread_max; t++)
503 if (grid->ncx*grid->ncy+1 > nbs->work[t].cxy_na_nalloc)
505 nbs->work[t].cxy_na_nalloc = over_alloc_large(grid->ncx*grid->ncy+1);
506 srenew(nbs->work[t].cxy_na,nbs->work[t].cxy_na_nalloc);
510 /* Worst case scenario of 1 atom in each last cell */
511 if (grid->na_cj <= grid->na_c)
513 nc_max = n/grid->na_sc + grid->ncx*grid->ncy;
517 nc_max = n/grid->na_sc + grid->ncx*grid->ncy*grid->na_cj/grid->na_c;
520 if (nc_max > grid->nc_nalloc)
524 grid->nc_nalloc = over_alloc_large(nc_max);
525 srenew(grid->nsubc,grid->nc_nalloc);
526 srenew(grid->bbcz,grid->nc_nalloc*NNBSBB_D);
528 bb_nalloc = grid->nc_nalloc*GPU_NSUBCELL/STRIDE_PBB*NNBSBB_XXXX;
530 bb_nalloc = grid->nc_nalloc*GPU_NSUBCELL*NNBSBB_B;
532 sfree_aligned(grid->bb);
533 /* This snew also zeros the contents, this avoid possible
534 * floating exceptions in SSE with the unused bb elements.
536 snew_aligned(grid->bb,bb_nalloc,16);
540 if (grid->na_cj == grid->na_c)
542 grid->bbj = grid->bb;
546 sfree_aligned(grid->bbj);
547 snew_aligned(grid->bbj,bb_nalloc*grid->na_c/grid->na_cj,16);
551 srenew(grid->flags,grid->nc_nalloc);
554 copy_rvec(corner0,grid->c0);
555 copy_rvec(corner1,grid->c1);
560 /* We need to sort paricles in grid columns on z-coordinate.
561 * As particle are very often distributed homogeneously, we a sorting
562 * algorithm similar to pigeonhole sort. We multiply the z-coordinate
563 * by a factor, cast to an int and try to store in that hole. If the hole
564 * is full, we move this or another particle. A second pass is needed to make
565 * contiguous elements. SORT_GRID_OVERSIZE is the ratio of holes to particles.
566 * 4 is the optimal value for homogeneous particle distribution and allows
567 * for an O(#particles) sort up till distributions were all particles are
568 * concentrated in 1/4 of the space. No NlogN fallback is implemented,
569 * as it can be expensive to detect imhomogeneous particle distributions.
570 * SGSF is the maximum ratio of holes used, in the worst case all particles
571 * end up in the last hole and we need #particles extra holes at the end.
573 #define SORT_GRID_OVERSIZE 4
574 #define SGSF (SORT_GRID_OVERSIZE + 1)
576 /* Sort particle index a on coordinates x along dim.
577 * Backwards tells if we want decreasing iso increasing coordinates.
578 * h0 is the minimum of the coordinate range.
579 * invh is the inverse hole spacing.
580 * nsort, the theortical hole limit, is only used for debugging.
581 * sort is the sorting work array.
583 static void sort_atoms(int dim,gmx_bool Backwards,
584 int *a,int n,rvec *x,
585 real h0,real invh,int nsort,int *sort)
588 int zi,zim,zi_min,zi_max;
597 /* Determine the index range used, so we can limit it for the second pass */
601 /* Sort the particles using a simple index sort */
604 /* The cast takes care of float-point rounding effects below zero.
605 * This code assumes particles are less than 1/SORT_GRID_OVERSIZE
606 * times the box height out of the box.
608 zi = (int)((x[a[i]][dim] - h0)*invh);
610 #ifdef DEBUG_NBNXN_GRIDDING
611 if (zi < 0 || zi >= nsort)
613 gmx_fatal(FARGS,"(int)((x[%d][%c]=%f - %f)*%f) = %d, not in 0 - %d\n",
614 a[i],'x'+dim,x[a[i]][dim],h0,invh,zi,nsort);
618 /* Ideally this particle should go in sort cell zi,
619 * but that might already be in use,
620 * in that case find the first empty cell higher up
625 zi_min = min(zi_min,zi);
626 zi_max = max(zi_max,zi);
630 /* We have multiple atoms in the same sorting slot.
631 * Sort on real z for minimal bounding box size.
632 * There is an extra check for identical z to ensure
633 * well-defined output order, independent of input order
634 * to ensure binary reproducibility after restarts.
636 while(sort[zi] >= 0 && ( x[a[i]][dim] > x[sort[zi]][dim] ||
637 (x[a[i]][dim] == x[sort[zi]][dim] &&
645 /* Shift all elements by one slot until we find an empty slot */
648 while (sort[zim] >= 0)
656 zi_max = max(zi_max,zim);
659 zi_max = max(zi_max,zi);
666 for(zi=0; zi<nsort; zi++)
677 for(zi=zi_max; zi>=zi_min; zi--)
688 gmx_incons("Lost particles while sorting");
693 #define R2F_D(x) ((float)((x) >= 0 ? ((1-GMX_FLOAT_EPS)*(x)) : ((1+GMX_FLOAT_EPS)*(x))))
694 #define R2F_U(x) ((float)((x) >= 0 ? ((1+GMX_FLOAT_EPS)*(x)) : ((1-GMX_FLOAT_EPS)*(x))))
700 /* Coordinate order x,y,z, bb order xyz0 */
701 static void calc_bounding_box(int na,int stride,const real *x,float *bb)
704 real xl,xh,yl,yh,zl,zh;
716 xl = min(xl,x[i+XX]);
717 xh = max(xh,x[i+XX]);
718 yl = min(yl,x[i+YY]);
719 yh = max(yh,x[i+YY]);
720 zl = min(zl,x[i+ZZ]);
721 zh = max(zh,x[i+ZZ]);
724 /* Note: possible double to float conversion here */
725 bb[BBL_X] = R2F_D(xl);
726 bb[BBL_Y] = R2F_D(yl);
727 bb[BBL_Z] = R2F_D(zl);
728 bb[BBU_X] = R2F_U(xh);
729 bb[BBU_Y] = R2F_U(yh);
730 bb[BBU_Z] = R2F_U(zh);
733 /* Packed coordinates, bb order xyz0 */
734 static void calc_bounding_box_x_x4(int na,const real *x,float *bb)
737 real xl,xh,yl,yh,zl,zh;
747 xl = min(xl,x[j+XX*PACK_X4]);
748 xh = max(xh,x[j+XX*PACK_X4]);
749 yl = min(yl,x[j+YY*PACK_X4]);
750 yh = max(yh,x[j+YY*PACK_X4]);
751 zl = min(zl,x[j+ZZ*PACK_X4]);
752 zh = max(zh,x[j+ZZ*PACK_X4]);
754 /* Note: possible double to float conversion here */
755 bb[BBL_X] = R2F_D(xl);
756 bb[BBL_Y] = R2F_D(yl);
757 bb[BBL_Z] = R2F_D(zl);
758 bb[BBU_X] = R2F_U(xh);
759 bb[BBU_Y] = R2F_U(yh);
760 bb[BBU_Z] = R2F_U(zh);
763 /* Packed coordinates, bb order xyz0 */
764 static void calc_bounding_box_x_x8(int na,const real *x,float *bb)
767 real xl,xh,yl,yh,zl,zh;
777 xl = min(xl,x[j+XX*PACK_X8]);
778 xh = max(xh,x[j+XX*PACK_X8]);
779 yl = min(yl,x[j+YY*PACK_X8]);
780 yh = max(yh,x[j+YY*PACK_X8]);
781 zl = min(zl,x[j+ZZ*PACK_X8]);
782 zh = max(zh,x[j+ZZ*PACK_X8]);
784 /* Note: possible double to float conversion here */
785 bb[BBL_X] = R2F_D(xl);
786 bb[BBL_Y] = R2F_D(yl);
787 bb[BBL_Z] = R2F_D(zl);
788 bb[BBU_X] = R2F_U(xh);
789 bb[BBU_Y] = R2F_U(yh);
790 bb[BBU_Z] = R2F_U(zh);
793 #ifdef NBNXN_SEARCH_BB_SSE
795 /* Packed coordinates, bb order xyz0 */
796 static void calc_bounding_box_x_x4_halves(int na,const real *x,
797 float *bb,float *bbj)
799 calc_bounding_box_x_x4(min(na,2),x,bbj);
803 calc_bounding_box_x_x4(min(na-2,2),x+(PACK_X4>>1),bbj+NNBSBB_B);
807 /* Set the "empty" bounding box to the same as the first one,
808 * so we don't need to treat special cases in the rest of the code.
810 _mm_store_ps(bbj+NNBSBB_B ,_mm_load_ps(bbj));
811 _mm_store_ps(bbj+NNBSBB_B+NNBSBB_C,_mm_load_ps(bbj+NNBSBB_C));
814 _mm_store_ps(bb ,_mm_min_ps(_mm_load_ps(bbj),
815 _mm_load_ps(bbj+NNBSBB_B)));
816 _mm_store_ps(bb+NNBSBB_C,_mm_max_ps(_mm_load_ps(bbj+NNBSBB_C),
817 _mm_load_ps(bbj+NNBSBB_B+NNBSBB_C)));
820 /* Coordinate order xyz, bb order xxxxyyyyzzzz */
821 static void calc_bounding_box_xxxx(int na,int stride,const real *x,float *bb)
824 real xl,xh,yl,yh,zl,zh;
836 xl = min(xl,x[i+XX]);
837 xh = max(xh,x[i+XX]);
838 yl = min(yl,x[i+YY]);
839 yh = max(yh,x[i+YY]);
840 zl = min(zl,x[i+ZZ]);
841 zh = max(zh,x[i+ZZ]);
844 /* Note: possible double to float conversion here */
845 bb[0*STRIDE_PBB] = R2F_D(xl);
846 bb[1*STRIDE_PBB] = R2F_D(yl);
847 bb[2*STRIDE_PBB] = R2F_D(zl);
848 bb[3*STRIDE_PBB] = R2F_U(xh);
849 bb[4*STRIDE_PBB] = R2F_U(yh);
850 bb[5*STRIDE_PBB] = R2F_U(zh);
853 #endif /* NBNXN_SEARCH_BB_SSE */
855 #ifdef NBNXN_SEARCH_SSE_SINGLE
857 /* Coordinate order xyz?, bb order xyz0 */
858 static void calc_bounding_box_sse(int na,const float *x,float *bb)
860 __m128 bb_0_SSE,bb_1_SSE;
865 bb_0_SSE = _mm_load_ps(x);
870 x_SSE = _mm_load_ps(x+i*NNBSBB_C);
871 bb_0_SSE = _mm_min_ps(bb_0_SSE,x_SSE);
872 bb_1_SSE = _mm_max_ps(bb_1_SSE,x_SSE);
875 _mm_store_ps(bb ,bb_0_SSE);
876 _mm_store_ps(bb+4,bb_1_SSE);
879 /* Coordinate order xyz?, bb order xxxxyyyyzzzz */
880 static void calc_bounding_box_xxxx_sse(int na,const float *x,
884 calc_bounding_box_sse(na,x,bb_work);
886 bb[0*STRIDE_PBB] = bb_work[BBL_X];
887 bb[1*STRIDE_PBB] = bb_work[BBL_Y];
888 bb[2*STRIDE_PBB] = bb_work[BBL_Z];
889 bb[3*STRIDE_PBB] = bb_work[BBU_X];
890 bb[4*STRIDE_PBB] = bb_work[BBU_Y];
891 bb[5*STRIDE_PBB] = bb_work[BBU_Z];
894 #endif /* NBNXN_SEARCH_SSE_SINGLE */
896 #ifdef NBNXN_SEARCH_BB_SSE
898 /* Combines pairs of consecutive bounding boxes */
899 static void combine_bounding_box_pairs(nbnxn_grid_t *grid,const float *bb)
902 __m128 min_SSE,max_SSE;
904 for(i=0; i<grid->ncx*grid->ncy; i++)
906 /* Starting bb in a column is expected to be 2-aligned */
907 sc2 = grid->cxy_ind[i]>>1;
908 /* For odd numbers skip the last bb here */
909 nc2 = (grid->cxy_na[i]+3)>>(2+1);
910 for(c2=sc2; c2<sc2+nc2; c2++)
912 min_SSE = _mm_min_ps(_mm_load_ps(bb+(c2*4+0)*NNBSBB_C),
913 _mm_load_ps(bb+(c2*4+2)*NNBSBB_C));
914 max_SSE = _mm_max_ps(_mm_load_ps(bb+(c2*4+1)*NNBSBB_C),
915 _mm_load_ps(bb+(c2*4+3)*NNBSBB_C));
916 _mm_store_ps(grid->bbj+(c2*2+0)*NNBSBB_C,min_SSE);
917 _mm_store_ps(grid->bbj+(c2*2+1)*NNBSBB_C,max_SSE);
919 if (((grid->cxy_na[i]+3)>>2) & 1)
921 /* Copy the last bb for odd bb count in this column */
922 for(j=0; j<NNBSBB_C; j++)
924 grid->bbj[(c2*2+0)*NNBSBB_C+j] = bb[(c2*4+0)*NNBSBB_C+j];
925 grid->bbj[(c2*2+1)*NNBSBB_C+j] = bb[(c2*4+1)*NNBSBB_C+j];
934 /* Prints the average bb size, used for debug output */
935 static void print_bbsizes_simple(FILE *fp,
936 const nbnxn_search_t nbs,
937 const nbnxn_grid_t *grid)
943 for(c=0; c<grid->nc; c++)
947 ba[d] += grid->bb[c*NNBSBB_B+NNBSBB_C+d] - grid->bb[c*NNBSBB_B+d];
950 dsvmul(1.0/grid->nc,ba,ba);
952 fprintf(fp,"ns bb: %4.2f %4.2f %4.2f %4.2f %4.2f %4.2f rel %4.2f %4.2f %4.2f\n",
953 nbs->box[XX][XX]/grid->ncx,
954 nbs->box[YY][YY]/grid->ncy,
955 nbs->box[ZZ][ZZ]*grid->ncx*grid->ncy/grid->nc,
956 ba[XX],ba[YY],ba[ZZ],
957 ba[XX]*grid->ncx/nbs->box[XX][XX],
958 ba[YY]*grid->ncy/nbs->box[YY][YY],
959 ba[ZZ]*grid->nc/(grid->ncx*grid->ncy*nbs->box[ZZ][ZZ]));
962 /* Prints the average bb size, used for debug output */
963 static void print_bbsizes_supersub(FILE *fp,
964 const nbnxn_search_t nbs,
965 const nbnxn_grid_t *grid)
972 for(c=0; c<grid->nc; c++)
975 for(s=0; s<grid->nsubc[c]; s+=STRIDE_PBB)
979 cs_w = (c*GPU_NSUBCELL + s)/STRIDE_PBB;
980 for(i=0; i<STRIDE_PBB; i++)
985 grid->bb[cs_w*NNBSBB_XXXX+(DIM+d)*STRIDE_PBB+i] -
986 grid->bb[cs_w*NNBSBB_XXXX+ d *STRIDE_PBB+i];
991 for(s=0; s<grid->nsubc[c]; s++)
995 cs = c*GPU_NSUBCELL + s;
999 grid->bb[cs*NNBSBB_B+NNBSBB_C+d] -
1000 grid->bb[cs*NNBSBB_B +d];
1004 ns += grid->nsubc[c];
1006 dsvmul(1.0/ns,ba,ba);
1008 fprintf(fp,"ns bb: %4.2f %4.2f %4.2f %4.2f %4.2f %4.2f rel %4.2f %4.2f %4.2f\n",
1009 nbs->box[XX][XX]/(grid->ncx*GPU_NSUBCELL_X),
1010 nbs->box[YY][YY]/(grid->ncy*GPU_NSUBCELL_Y),
1011 nbs->box[ZZ][ZZ]*grid->ncx*grid->ncy/(grid->nc*GPU_NSUBCELL_Z),
1012 ba[XX],ba[YY],ba[ZZ],
1013 ba[XX]*grid->ncx*GPU_NSUBCELL_X/nbs->box[XX][XX],
1014 ba[YY]*grid->ncy*GPU_NSUBCELL_Y/nbs->box[YY][YY],
1015 ba[ZZ]*grid->nc*GPU_NSUBCELL_Z/(grid->ncx*grid->ncy*nbs->box[ZZ][ZZ]));
1018 /* Potentially sorts atoms on LJ coefficients !=0 and ==0.
1019 * Also sets interaction flags.
1021 void sort_on_lj(nbnxn_atomdata_t *nbat,int na_c,
1022 int a0,int a1,const int *atinfo,
1026 int subc,s,a,n1,n2,a_lj_max,i,j;
1027 int sort1[NBNXN_NA_SC_MAX/GPU_NSUBCELL];
1028 int sort2[NBNXN_NA_SC_MAX/GPU_NSUBCELL];
1034 for(s=a0; s<a1; s+=na_c)
1036 /* Make lists for this (sub-)cell on atoms with and without LJ */
1041 for(a=s; a<min(s+na_c,a1); a++)
1043 haveQ = haveQ || GET_CGINFO_HAS_Q(atinfo[order[a]]);
1045 if (GET_CGINFO_HAS_VDW(atinfo[order[a]]))
1047 sort1[n1++] = order[a];
1052 sort2[n2++] = order[a];
1056 /* If we don't have atom with LJ, there's nothing to sort */
1059 *flags |= NBNXN_CI_DO_LJ(subc);
1063 /* Only sort when strictly necessary. Ordering particles
1064 * Ordering particles can lead to less accurate summation
1065 * due to rounding, both for LJ and Coulomb interactions.
1067 if (2*(a_lj_max - s) >= na_c)
1071 order[a0+i] = sort1[i];
1075 order[a0+n1+j] = sort2[j];
1079 *flags |= NBNXN_CI_HALF_LJ(subc);
1084 *flags |= NBNXN_CI_DO_COUL(subc);
1090 /* Fill a pair search cell with atoms.
1091 * Potentially sorts atoms and sets the interaction flags.
1093 void fill_cell(const nbnxn_search_t nbs,
1095 nbnxn_atomdata_t *nbat,
1099 int sx,int sy, int sz,
1110 sort_on_lj(nbat,grid->na_c,a0,a1,atinfo,nbs->a,
1111 grid->flags+(a0>>grid->na_c_2log)-grid->cell0);
1114 /* Now we have sorted the atoms, set the cell indices */
1115 for(a=a0; a<a1; a++)
1117 nbs->cell[nbs->a[a]] = a;
1120 copy_rvec_to_nbat_real(nbs->a+a0,a1-a0,grid->na_c,x,
1121 nbat->XFormat,nbat->x,a0,
1124 if (nbat->XFormat == nbatX4)
1126 /* Store the bounding boxes as xyz.xyz. */
1127 offset = ((a0 - grid->cell0*grid->na_sc)>>grid->na_c_2log)*NNBSBB_B;
1128 bb_ptr = grid->bb + offset;
1130 #if defined GMX_DOUBLE && defined NBNXN_SEARCH_BB_SSE
1131 if (2*grid->na_cj == grid->na_c)
1133 calc_bounding_box_x_x4_halves(na,nbat->x+X4_IND_A(a0),bb_ptr,
1134 grid->bbj+offset*2);
1139 calc_bounding_box_x_x4(na,nbat->x+X4_IND_A(a0),bb_ptr);
1142 else if (nbat->XFormat == nbatX8)
1144 /* Store the bounding boxes as xyz.xyz. */
1145 offset = ((a0 - grid->cell0*grid->na_sc)>>grid->na_c_2log)*NNBSBB_B;
1146 bb_ptr = grid->bb + offset;
1148 calc_bounding_box_x_x8(na,nbat->x+X8_IND_A(a0),bb_ptr);
1151 else if (!grid->bSimple)
1153 /* Store the bounding boxes in a format convenient
1154 * for SSE calculations: xxxxyyyyzzzz...
1158 ((a0-grid->cell0*grid->na_sc)>>(grid->na_c_2log+STRIDE_PBB_2LOG))*NNBSBB_XXXX +
1159 (((a0-grid->cell0*grid->na_sc)>>grid->na_c_2log) & (STRIDE_PBB-1));
1161 #ifdef NBNXN_SEARCH_SSE_SINGLE
1162 if (nbat->XFormat == nbatXYZQ)
1164 calc_bounding_box_xxxx_sse(na,nbat->x+a0*nbat->xstride,
1170 calc_bounding_box_xxxx(na,nbat->xstride,nbat->x+a0*nbat->xstride,
1175 fprintf(debug,"%2d %2d %2d bb %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f\n",
1177 bb_ptr[0*STRIDE_PBB],bb_ptr[3*STRIDE_PBB],
1178 bb_ptr[1*STRIDE_PBB],bb_ptr[4*STRIDE_PBB],
1179 bb_ptr[2*STRIDE_PBB],bb_ptr[5*STRIDE_PBB]);
1185 /* Store the bounding boxes as xyz.xyz. */
1186 bb_ptr = grid->bb+((a0-grid->cell0*grid->na_sc)>>grid->na_c_2log)*NNBSBB_B;
1188 calc_bounding_box(na,nbat->xstride,nbat->x+a0*nbat->xstride,
1194 bbo = (a0 - grid->cell0*grid->na_sc)/grid->na_c;
1195 fprintf(debug,"%2d %2d %2d bb %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f\n",
1197 (grid->bb+bbo*NNBSBB_B)[BBL_X],
1198 (grid->bb+bbo*NNBSBB_B)[BBU_X],
1199 (grid->bb+bbo*NNBSBB_B)[BBL_Y],
1200 (grid->bb+bbo*NNBSBB_B)[BBU_Y],
1201 (grid->bb+bbo*NNBSBB_B)[BBL_Z],
1202 (grid->bb+bbo*NNBSBB_B)[BBU_Z]);
1207 /* Spatially sort the atoms within one grid column */
1208 static void sort_columns_simple(const nbnxn_search_t nbs,
1214 nbnxn_atomdata_t *nbat,
1215 int cxy_start,int cxy_end,
1219 int cx,cy,cz,ncz,cfilled,c;
1225 fprintf(debug,"cell0 %d sorting columns %d - %d, atoms %d - %d\n",
1226 grid->cell0,cxy_start,cxy_end,a0,a1);
1229 /* Sort the atoms within each x,y column in 3 dimensions */
1230 for(cxy=cxy_start; cxy<cxy_end; cxy++)
1233 cy = cxy - cx*grid->ncy;
1235 na = grid->cxy_na[cxy];
1236 ncz = grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy];
1237 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
1239 /* Sort the atoms within each x,y column on z coordinate */
1240 sort_atoms(ZZ,FALSE,
1243 ncz*grid->na_sc*SORT_GRID_OVERSIZE/nbs->box[ZZ][ZZ],
1244 ncz*grid->na_sc*SGSF,sort_work);
1246 /* Fill the ncz cells in this column */
1247 cfilled = grid->cxy_ind[cxy];
1248 for(cz=0; cz<ncz; cz++)
1250 c = grid->cxy_ind[cxy] + cz ;
1252 ash_c = ash + cz*grid->na_sc;
1253 na_c = min(grid->na_sc,na-(ash_c-ash));
1255 fill_cell(nbs,grid,nbat,
1256 ash_c,ash_c+na_c,atinfo,x,
1257 grid->na_sc*cx + (dd_zone >> 2),
1258 grid->na_sc*cy + (dd_zone & 3),
1262 /* This copy to bbcz is not really necessary.
1263 * But it allows to use the same grid search code
1264 * for the simple and supersub cell setups.
1270 grid->bbcz[c*NNBSBB_D ] = grid->bb[cfilled*NNBSBB_B+2];
1271 grid->bbcz[c*NNBSBB_D+1] = grid->bb[cfilled*NNBSBB_B+6];
1274 /* Set the unused atom indices to -1 */
1275 for(ind=na; ind<ncz*grid->na_sc; ind++)
1277 nbs->a[ash+ind] = -1;
1282 /* Spatially sort the atoms within one grid column */
1283 static void sort_columns_supersub(const nbnxn_search_t nbs,
1289 nbnxn_atomdata_t *nbat,
1290 int cxy_start,int cxy_end,
1294 int cx,cy,cz=-1,c=-1,ncz;
1295 int na,ash,na_c,ind,a;
1296 int subdiv_z,sub_z,na_z,ash_z;
1297 int subdiv_y,sub_y,na_y,ash_y;
1298 int subdiv_x,sub_x,na_x,ash_x;
1300 /* cppcheck-suppress unassignedVariable */
1301 float bb_work_array[NNBSBB_B+3],*bb_work_align;
1303 bb_work_align = (float *)(((size_t)(bb_work_array+3)) & (~((size_t)15)));
1307 fprintf(debug,"cell0 %d sorting columns %d - %d, atoms %d - %d\n",
1308 grid->cell0,cxy_start,cxy_end,a0,a1);
1311 subdiv_x = grid->na_c;
1312 subdiv_y = GPU_NSUBCELL_X*subdiv_x;
1313 subdiv_z = GPU_NSUBCELL_Y*subdiv_y;
1315 /* Sort the atoms within each x,y column in 3 dimensions */
1316 for(cxy=cxy_start; cxy<cxy_end; cxy++)
1319 cy = cxy - cx*grid->ncy;
1321 na = grid->cxy_na[cxy];
1322 ncz = grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy];
1323 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
1325 /* Sort the atoms within each x,y column on z coordinate */
1326 sort_atoms(ZZ,FALSE,
1329 ncz*grid->na_sc*SORT_GRID_OVERSIZE/nbs->box[ZZ][ZZ],
1330 ncz*grid->na_sc*SGSF,sort_work);
1332 /* This loop goes over the supercells and subcells along z at once */
1333 for(sub_z=0; sub_z<ncz*GPU_NSUBCELL_Z; sub_z++)
1335 ash_z = ash + sub_z*subdiv_z;
1336 na_z = min(subdiv_z,na-(ash_z-ash));
1338 /* We have already sorted on z */
1340 if (sub_z % GPU_NSUBCELL_Z == 0)
1342 cz = sub_z/GPU_NSUBCELL_Z;
1343 c = grid->cxy_ind[cxy] + cz ;
1345 /* The number of atoms in this supercell */
1346 na_c = min(grid->na_sc,na-(ash_z-ash));
1348 grid->nsubc[c] = min(GPU_NSUBCELL,(na_c+grid->na_c-1)/grid->na_c);
1350 /* Store the z-boundaries of the super cell */
1351 grid->bbcz[c*NNBSBB_D ] = x[nbs->a[ash_z]][ZZ];
1352 grid->bbcz[c*NNBSBB_D+1] = x[nbs->a[ash_z+na_c-1]][ZZ];
1355 #if GPU_NSUBCELL_Y > 1
1356 /* Sort the atoms along y */
1357 sort_atoms(YY,(sub_z & 1),
1358 nbs->a+ash_z,na_z,x,
1359 grid->c0[YY]+cy*grid->sy,grid->inv_sy,
1360 subdiv_y*SGSF,sort_work);
1363 for(sub_y=0; sub_y<GPU_NSUBCELL_Y; sub_y++)
1365 ash_y = ash_z + sub_y*subdiv_y;
1366 na_y = min(subdiv_y,na-(ash_y-ash));
1368 #if GPU_NSUBCELL_X > 1
1369 /* Sort the atoms along x */
1370 sort_atoms(XX,((cz*GPU_NSUBCELL_Y + sub_y) & 1),
1371 nbs->a+ash_y,na_y,x,
1372 grid->c0[XX]+cx*grid->sx,grid->inv_sx,
1373 subdiv_x*SGSF,sort_work);
1376 for(sub_x=0; sub_x<GPU_NSUBCELL_X; sub_x++)
1378 ash_x = ash_y + sub_x*subdiv_x;
1379 na_x = min(subdiv_x,na-(ash_x-ash));
1381 fill_cell(nbs,grid,nbat,
1382 ash_x,ash_x+na_x,atinfo,x,
1383 grid->na_c*(cx*GPU_NSUBCELL_X+sub_x) + (dd_zone >> 2),
1384 grid->na_c*(cy*GPU_NSUBCELL_Y+sub_y) + (dd_zone & 3),
1391 /* Set the unused atom indices to -1 */
1392 for(ind=na; ind<ncz*grid->na_sc; ind++)
1394 nbs->a[ash+ind] = -1;
1399 /* Determine in which grid column atoms should go */
1400 static void calc_column_indices(nbnxn_grid_t *grid,
1403 int dd_zone,const int *move,
1404 int thread,int nthread,
1411 /* We add one extra cell for particles which moved during DD */
1412 for(i=0; i<grid->ncx*grid->ncy+1; i++)
1417 n0 = a0 + (int)((thread+0)*(a1 - a0))/nthread;
1418 n1 = a0 + (int)((thread+1)*(a1 - a0))/nthread;
1422 for(i=n0; i<n1; i++)
1424 if (move == NULL || move[i] >= 0)
1426 /* We need to be careful with rounding,
1427 * particles might be a few bits outside the local zone.
1428 * The int cast takes care of the lower bound,
1429 * we will explicitly take care of the upper bound.
1431 cx = (int)((x[i][XX] - grid->c0[XX])*grid->inv_sx);
1432 cy = (int)((x[i][YY] - grid->c0[YY])*grid->inv_sy);
1434 #ifdef DEBUG_NBNXN_GRIDDING
1435 if (cx < 0 || cx >= grid->ncx ||
1436 cy < 0 || cy >= grid->ncy)
1439 "grid cell cx %d cy %d out of range (max %d %d)\n"
1440 "atom %f %f %f, grid->c0 %f %f",
1441 cx,cy,grid->ncx,grid->ncy,
1442 x[i][XX],x[i][YY],x[i][ZZ],grid->c0[XX],grid->c0[YY]);
1445 /* Take care of potential rouding issues */
1446 cx = min(cx,grid->ncx - 1);
1447 cy = min(cy,grid->ncy - 1);
1449 /* For the moment cell will contain only the, grid local,
1450 * x and y indices, not z.
1452 cell[i] = cx*grid->ncy + cy;
1456 /* Put this moved particle after the end of the grid,
1457 * so we can process it later without using conditionals.
1459 cell[i] = grid->ncx*grid->ncy;
1468 for(i=n0; i<n1; i++)
1470 cx = (int)((x[i][XX] - grid->c0[XX])*grid->inv_sx);
1471 cy = (int)((x[i][YY] - grid->c0[YY])*grid->inv_sy);
1473 /* For non-home zones there could be particles outside
1474 * the non-bonded cut-off range, which have been communicated
1475 * for bonded interactions only. For the result it doesn't
1476 * matter where these end up on the grid. For performance
1477 * we put them in an extra row at the border.
1480 cx = min(cx,grid->ncx - 1);
1482 cy = min(cy,grid->ncy - 1);
1484 /* For the moment cell will contain only the, grid local,
1485 * x and y indices, not z.
1487 cell[i] = cx*grid->ncy + cy;
1494 /* Determine in which grid cells the atoms should go */
1495 static void calc_cell_indices(const nbnxn_search_t nbs,
1502 nbnxn_atomdata_t *nbat)
1505 int cx,cy,cxy,ncz_max,ncz;
1507 int *cxy_na,cxy_na_i;
1509 nthread = gmx_omp_nthreads_get(emntPairsearch);
1511 #pragma omp parallel for num_threads(nthread) schedule(static)
1512 for(thread=0; thread<nthread; thread++)
1514 calc_column_indices(grid,a0,a1,x,dd_zone,move,thread,nthread,
1515 nbs->cell,nbs->work[thread].cxy_na);
1518 /* Make the cell index as a function of x and y */
1521 grid->cxy_ind[0] = 0;
1522 for(i=0; i<grid->ncx*grid->ncy+1; i++)
1524 /* We set ncz_max at the beginning of the loop iso at the end
1525 * to skip i=grid->ncx*grid->ncy which are moved particles
1526 * that do not need to be ordered on the grid.
1532 cxy_na_i = nbs->work[0].cxy_na[i];
1533 for(thread=1; thread<nthread; thread++)
1535 cxy_na_i += nbs->work[thread].cxy_na[i];
1537 ncz = (cxy_na_i + grid->na_sc - 1)/grid->na_sc;
1538 if (nbat->XFormat == nbatX8)
1540 /* Make the number of cell a multiple of 2 */
1541 ncz = (ncz + 1) & ~1;
1543 grid->cxy_ind[i+1] = grid->cxy_ind[i] + ncz;
1544 /* Clear cxy_na, so we can reuse the array below */
1545 grid->cxy_na[i] = 0;
1547 grid->nc = grid->cxy_ind[grid->ncx*grid->ncy] - grid->cxy_ind[0];
1549 nbat->natoms = (grid->cell0 + grid->nc)*grid->na_sc;
1553 fprintf(debug,"ns na_sc %d na_c %d super-cells: %d x %d y %d z %.1f maxz %d\n",
1554 grid->na_sc,grid->na_c,grid->nc,
1555 grid->ncx,grid->ncy,grid->nc/((double)(grid->ncx*grid->ncy)),
1560 for(cy=0; cy<grid->ncy; cy++)
1562 for(cx=0; cx<grid->ncx; cx++)
1564 fprintf(debug," %2d",grid->cxy_ind[i+1]-grid->cxy_ind[i]);
1567 fprintf(debug,"\n");
1572 /* Make sure the work array for sorting is large enough */
1573 if (ncz_max*grid->na_sc*SGSF > nbs->work[0].sort_work_nalloc)
1575 for(thread=0; thread<nbs->nthread_max; thread++)
1577 nbs->work[thread].sort_work_nalloc =
1578 over_alloc_large(ncz_max*grid->na_sc*SGSF);
1579 srenew(nbs->work[thread].sort_work,
1580 nbs->work[thread].sort_work_nalloc);
1581 /* When not in use, all elements should be -1 */
1582 for(i=0; i<nbs->work[thread].sort_work_nalloc; i++)
1584 nbs->work[thread].sort_work[i] = -1;
1589 /* Now we know the dimensions we can fill the grid.
1590 * This is the first, unsorted fill. We sort the columns after this.
1592 for(i=a0; i<a1; i++)
1594 /* At this point nbs->cell contains the local grid x,y indices */
1596 nbs->a[(grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc + grid->cxy_na[cxy]++] = i;
1601 /* Set the cell indices for the moved particles */
1602 n0 = grid->nc*grid->na_sc;
1603 n1 = grid->nc*grid->na_sc+grid->cxy_na[grid->ncx*grid->ncy];
1606 for(i=n0; i<n1; i++)
1608 nbs->cell[nbs->a[i]] = i;
1613 /* Sort the super-cell columns along z into the sub-cells. */
1614 #pragma omp parallel for num_threads(nbs->nthread_max) schedule(static)
1615 for(thread=0; thread<nbs->nthread_max; thread++)
1619 sort_columns_simple(nbs,dd_zone,grid,a0,a1,atinfo,x,nbat,
1620 ((thread+0)*grid->ncx*grid->ncy)/nthread,
1621 ((thread+1)*grid->ncx*grid->ncy)/nthread,
1622 nbs->work[thread].sort_work);
1626 sort_columns_supersub(nbs,dd_zone,grid,a0,a1,atinfo,x,nbat,
1627 ((thread+0)*grid->ncx*grid->ncy)/nthread,
1628 ((thread+1)*grid->ncx*grid->ncy)/nthread,
1629 nbs->work[thread].sort_work);
1633 #ifdef NBNXN_SEARCH_BB_SSE
1634 if (grid->bSimple && nbat->XFormat == nbatX8)
1636 combine_bounding_box_pairs(grid,grid->bb);
1642 grid->nsubc_tot = 0;
1643 for(i=0; i<grid->nc; i++)
1645 grid->nsubc_tot += grid->nsubc[i];
1653 print_bbsizes_simple(debug,nbs,grid);
1657 fprintf(debug,"ns non-zero sub-cells: %d average atoms %.2f\n",
1658 grid->nsubc_tot,(a1-a0)/(double)grid->nsubc_tot);
1660 print_bbsizes_supersub(debug,nbs,grid);
1665 static void init_buffer_flags(nbnxn_buffer_flags_t *flags,
1670 flags->nflag = (natoms + NBNXN_BUFFERFLAG_SIZE - 1)/NBNXN_BUFFERFLAG_SIZE;
1671 if (flags->nflag > flags->flag_nalloc)
1673 flags->flag_nalloc = over_alloc_large(flags->nflag);
1674 srenew(flags->flag,flags->flag_nalloc);
1676 for(b=0; b<flags->nflag; b++)
1682 /* Sets up a grid and puts the atoms on the grid.
1683 * This function only operates on one domain of the domain decompostion.
1684 * Note that without domain decomposition there is only one domain.
1686 void nbnxn_put_on_grid(nbnxn_search_t nbs,
1687 int ePBC,matrix box,
1689 rvec corner0,rvec corner1,
1694 int nmoved,int *move,
1696 nbnxn_atomdata_t *nbat)
1700 int nc_max_grid,nc_max;
1702 grid = &nbs->grid[dd_zone];
1704 nbs_cycle_start(&nbs->cc[enbsCCgrid]);
1706 grid->bSimple = nbnxn_kernel_pairlist_simple(nb_kernel_type);
1708 grid->na_c = nbnxn_kernel_to_ci_size(nb_kernel_type);
1709 grid->na_cj = nbnxn_kernel_to_cj_size(nb_kernel_type);
1710 grid->na_sc = (grid->bSimple ? 1 : GPU_NSUBCELL)*grid->na_c;
1711 grid->na_c_2log = get_2log(grid->na_c);
1713 nbat->na_c = grid->na_c;
1722 (nbs->grid[dd_zone-1].cell0 + nbs->grid[dd_zone-1].nc)*
1723 nbs->grid[dd_zone-1].na_sc/grid->na_sc;
1731 copy_mat(box,nbs->box);
1733 if (atom_density >= 0)
1735 grid->atom_density = atom_density;
1739 grid->atom_density = grid_atom_density(n-nmoved,corner0,corner1);
1744 nbs->natoms_local = a1 - nmoved;
1745 /* We assume that nbnxn_put_on_grid is called first
1746 * for the local atoms (dd_zone=0).
1748 nbs->natoms_nonlocal = a1 - nmoved;
1752 nbs->natoms_nonlocal = max(nbs->natoms_nonlocal,a1);
1755 nc_max_grid = set_grid_size_xy(nbs,grid,
1756 dd_zone,n-nmoved,corner0,corner1,
1757 nbs->grid[0].atom_density,
1760 nc_max = grid->cell0 + nc_max_grid;
1762 if (a1 > nbs->cell_nalloc)
1764 nbs->cell_nalloc = over_alloc_large(a1);
1765 srenew(nbs->cell,nbs->cell_nalloc);
1768 /* To avoid conditionals we store the moved particles at the end of a,
1769 * make sure we have enough space.
1771 if (nc_max*grid->na_sc + nmoved > nbs->a_nalloc)
1773 nbs->a_nalloc = over_alloc_large(nc_max*grid->na_sc + nmoved);
1774 srenew(nbs->a,nbs->a_nalloc);
1777 /* We need padding up to a multiple of the buffer flag size: simply add */
1778 if (nc_max*grid->na_sc + NBNXN_BUFFERFLAG_SIZE > nbat->nalloc)
1780 nbnxn_atomdata_realloc(nbat,nc_max*grid->na_sc+NBNXN_BUFFERFLAG_SIZE);
1783 calc_cell_indices(nbs,dd_zone,grid,a0,a1,atinfo,x,move,nbat);
1787 nbat->natoms_local = nbat->natoms;
1790 nbs_cycle_stop(&nbs->cc[enbsCCgrid]);
1793 /* Calls nbnxn_put_on_grid for all non-local domains */
1794 void nbnxn_put_on_grid_nonlocal(nbnxn_search_t nbs,
1795 const gmx_domdec_zones_t *zones,
1799 nbnxn_atomdata_t *nbat)
1804 for(zone=1; zone<zones->n; zone++)
1806 for(d=0; d<DIM; d++)
1808 c0[d] = zones->size[zone].bb_x0[d];
1809 c1[d] = zones->size[zone].bb_x1[d];
1812 nbnxn_put_on_grid(nbs,nbs->ePBC,NULL,
1814 zones->cg_range[zone],
1815 zones->cg_range[zone+1],
1825 /* Add simple grid type information to the local super/sub grid */
1826 void nbnxn_grid_add_simple(nbnxn_search_t nbs,
1827 nbnxn_atomdata_t *nbat)
1833 grid = &nbs->grid[0];
1837 gmx_incons("nbnxn_grid_simple called with a simple grid");
1840 ncd = grid->na_sc/NBNXN_CPU_CLUSTER_I_SIZE;
1842 if (grid->nc*ncd > grid->nc_nalloc_simple)
1844 grid->nc_nalloc_simple = over_alloc_large(grid->nc*ncd);
1845 srenew(grid->bbcz_simple,grid->nc_nalloc_simple*NNBSBB_D);
1846 srenew(grid->bb_simple,grid->nc_nalloc_simple*NNBSBB_B);
1847 srenew(grid->flags_simple,grid->nc_nalloc_simple);
1850 sfree_aligned(grid->bbj);
1851 snew_aligned(grid->bbj,grid->nc_nalloc_simple/2,16);
1855 bbcz = grid->bbcz_simple;
1856 bb = grid->bb_simple;
1858 #pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntPairsearch)) schedule(static)
1859 for(sc=0; sc<grid->nc; sc++)
1863 for(c=0; c<ncd; c++)
1867 na = NBNXN_CPU_CLUSTER_I_SIZE;
1869 nbat->type[tx*NBNXN_CPU_CLUSTER_I_SIZE+na-1] == nbat->ntype-1)
1876 switch (nbat->XFormat)
1879 /* PACK_X4==NBNXN_CPU_CLUSTER_I_SIZE, so this is simple */
1880 calc_bounding_box_x_x4(na,nbat->x+tx*STRIDE_P4,
1884 /* PACK_X8>NBNXN_CPU_CLUSTER_I_SIZE, more complicated */
1885 calc_bounding_box_x_x8(na,nbat->x+X8_IND_A(tx*NBNXN_CPU_CLUSTER_I_SIZE),
1889 calc_bounding_box(na,nbat->xstride,
1890 nbat->x+tx*NBNXN_CPU_CLUSTER_I_SIZE*nbat->xstride,
1894 bbcz[tx*NNBSBB_D+0] = bb[tx*NNBSBB_B +ZZ];
1895 bbcz[tx*NNBSBB_D+1] = bb[tx*NNBSBB_B+NNBSBB_C+ZZ];
1897 /* No interaction optimization yet here */
1898 grid->flags_simple[tx] = NBNXN_CI_DO_LJ(0) | NBNXN_CI_DO_COUL(0);
1902 grid->flags_simple[tx] = 0;
1907 #ifdef NBNXN_SEARCH_BB_SSE
1908 if (grid->bSimple && nbat->XFormat == nbatX8)
1910 combine_bounding_box_pairs(grid,grid->bb_simple);
1915 void nbnxn_get_ncells(nbnxn_search_t nbs,int *ncx,int *ncy)
1917 *ncx = nbs->grid[0].ncx;
1918 *ncy = nbs->grid[0].ncy;
1921 void nbnxn_get_atomorder(nbnxn_search_t nbs,int **a,int *n)
1923 const nbnxn_grid_t *grid;
1925 grid = &nbs->grid[0];
1927 /* Return the atom order for the home cell (index 0) */
1930 *n = grid->cxy_ind[grid->ncx*grid->ncy]*grid->na_sc;
1933 void nbnxn_set_atomorder(nbnxn_search_t nbs)
1936 int ao,cx,cy,cxy,cz,j;
1938 /* Set the atom order for the home cell (index 0) */
1939 grid = &nbs->grid[0];
1942 for(cx=0; cx<grid->ncx; cx++)
1944 for(cy=0; cy<grid->ncy; cy++)
1946 cxy = cx*grid->ncy + cy;
1947 j = grid->cxy_ind[cxy]*grid->na_sc;
1948 for(cz=0; cz<grid->cxy_na[cxy]; cz++)
1959 /* Determines the cell range along one dimension that
1960 * the bounding box b0 - b1 sees.
1962 static void get_cell_range(real b0,real b1,
1963 int nc,real c0,real s,real invs,
1964 real d2,real r2,int *cf,int *cl)
1966 *cf = max((int)((b0 - c0)*invs),0);
1968 while (*cf > 0 && d2 + sqr((b0 - c0) - (*cf-1+1)*s) < r2)
1973 *cl = min((int)((b1 - c0)*invs),nc-1);
1974 while (*cl < nc-1 && d2 + sqr((*cl+1)*s - (b1 - c0)) < r2)
1980 /* Reference code calculating the distance^2 between two bounding boxes */
1981 static float box_dist2(float bx0,float bx1,float by0,
1982 float by1,float bz0,float bz1,
1990 dl = bx0 - bb[BBU_X];
1991 dh = bb[BBL_X] - bx1;
1996 dl = by0 - bb[BBU_Y];
1997 dh = bb[BBL_Y] - by1;
2002 dl = bz0 - bb[BBU_Z];
2003 dh = bb[BBL_Z] - bz1;
2011 /* Plain C code calculating the distance^2 between two bounding boxes */
2012 static float subc_bb_dist2(int si,const float *bb_i_ci,
2013 int csj,const float *bb_j_all)
2015 const float *bb_i,*bb_j;
2019 bb_i = bb_i_ci + si*NNBSBB_B;
2020 bb_j = bb_j_all + csj*NNBSBB_B;
2024 dl = bb_i[BBL_X] - bb_j[BBU_X];
2025 dh = bb_j[BBL_X] - bb_i[BBU_X];
2030 dl = bb_i[BBL_Y] - bb_j[BBU_Y];
2031 dh = bb_j[BBL_Y] - bb_i[BBU_Y];
2036 dl = bb_i[BBL_Z] - bb_j[BBU_Z];
2037 dh = bb_j[BBL_Z] - bb_i[BBU_Z];
2045 #ifdef NBNXN_SEARCH_BB_SSE
2047 /* SSE code for bb distance for bb format xyz0 */
2048 static float subc_bb_dist2_sse(int na_c,
2049 int si,const float *bb_i_ci,
2050 int csj,const float *bb_j_all)
2052 const float *bb_i,*bb_j;
2054 __m128 bb_i_SSE0,bb_i_SSE1;
2055 __m128 bb_j_SSE0,bb_j_SSE1;
2061 #ifndef GMX_X86_SSE4_1
2062 float d2_array[7],*d2_align;
2064 d2_align = (float *)(((size_t)(d2_array+3)) & (~((size_t)15)));
2069 bb_i = bb_i_ci + si*NNBSBB_B;
2070 bb_j = bb_j_all + csj*NNBSBB_B;
2072 bb_i_SSE0 = _mm_load_ps(bb_i);
2073 bb_i_SSE1 = _mm_load_ps(bb_i+NNBSBB_C);
2074 bb_j_SSE0 = _mm_load_ps(bb_j);
2075 bb_j_SSE1 = _mm_load_ps(bb_j+NNBSBB_C);
2077 dl_SSE = _mm_sub_ps(bb_i_SSE0,bb_j_SSE1);
2078 dh_SSE = _mm_sub_ps(bb_j_SSE0,bb_i_SSE1);
2080 dm_SSE = _mm_max_ps(dl_SSE,dh_SSE);
2081 dm0_SSE = _mm_max_ps(dm_SSE,_mm_setzero_ps());
2082 #ifndef GMX_X86_SSE4_1
2083 d2_SSE = _mm_mul_ps(dm0_SSE,dm0_SSE);
2085 _mm_store_ps(d2_align,d2_SSE);
2087 return d2_align[0] + d2_align[1] + d2_align[2];
2089 /* SSE4.1 dot product of components 0,1,2 */
2090 d2_SSE = _mm_dp_ps(dm0_SSE,dm0_SSE,0x71);
2092 _mm_store_ss(&d2,d2_SSE);
2098 /* Calculate bb bounding distances of bb_i[si,...,si+3] and store them in d2 */
2099 #define SUBC_BB_DIST2_SSE_XXXX_INNER(si,bb_i,d2) \
2103 __m128 dx_0,dy_0,dz_0; \
2104 __m128 dx_1,dy_1,dz_1; \
2107 __m128 m0x,m0y,m0z; \
2109 __m128 d2x,d2y,d2z; \
2112 shi = si*NNBSBB_D*DIM; \
2114 xi_l = _mm_load_ps(bb_i+shi+0*STRIDE_PBB); \
2115 yi_l = _mm_load_ps(bb_i+shi+1*STRIDE_PBB); \
2116 zi_l = _mm_load_ps(bb_i+shi+2*STRIDE_PBB); \
2117 xi_h = _mm_load_ps(bb_i+shi+3*STRIDE_PBB); \
2118 yi_h = _mm_load_ps(bb_i+shi+4*STRIDE_PBB); \
2119 zi_h = _mm_load_ps(bb_i+shi+5*STRIDE_PBB); \
2121 dx_0 = _mm_sub_ps(xi_l,xj_h); \
2122 dy_0 = _mm_sub_ps(yi_l,yj_h); \
2123 dz_0 = _mm_sub_ps(zi_l,zj_h); \
2125 dx_1 = _mm_sub_ps(xj_l,xi_h); \
2126 dy_1 = _mm_sub_ps(yj_l,yi_h); \
2127 dz_1 = _mm_sub_ps(zj_l,zi_h); \
2129 mx = _mm_max_ps(dx_0,dx_1); \
2130 my = _mm_max_ps(dy_0,dy_1); \
2131 mz = _mm_max_ps(dz_0,dz_1); \
2133 m0x = _mm_max_ps(mx,zero); \
2134 m0y = _mm_max_ps(my,zero); \
2135 m0z = _mm_max_ps(mz,zero); \
2137 d2x = _mm_mul_ps(m0x,m0x); \
2138 d2y = _mm_mul_ps(m0y,m0y); \
2139 d2z = _mm_mul_ps(m0z,m0z); \
2141 d2s = _mm_add_ps(d2x,d2y); \
2142 d2t = _mm_add_ps(d2s,d2z); \
2144 _mm_store_ps(d2+si,d2t); \
2147 /* SSE code for nsi bb distances for bb format xxxxyyyyzzzz */
2148 static void subc_bb_dist2_sse_xxxx(const float *bb_j,
2149 int nsi,const float *bb_i,
2152 __m128 xj_l,yj_l,zj_l;
2153 __m128 xj_h,yj_h,zj_h;
2154 __m128 xi_l,yi_l,zi_l;
2155 __m128 xi_h,yi_h,zi_h;
2159 zero = _mm_setzero_ps();
2161 xj_l = _mm_set1_ps(bb_j[0*STRIDE_PBB]);
2162 yj_l = _mm_set1_ps(bb_j[1*STRIDE_PBB]);
2163 zj_l = _mm_set1_ps(bb_j[2*STRIDE_PBB]);
2164 xj_h = _mm_set1_ps(bb_j[3*STRIDE_PBB]);
2165 yj_h = _mm_set1_ps(bb_j[4*STRIDE_PBB]);
2166 zj_h = _mm_set1_ps(bb_j[5*STRIDE_PBB]);
2168 /* Here we "loop" over si (0,STRIDE_PBB) from 0 to nsi with step STRIDE_PBB.
2169 * But as we know the number of iterations is 1 or 2, we unroll manually.
2171 SUBC_BB_DIST2_SSE_XXXX_INNER(0,bb_i,d2);
2172 if (STRIDE_PBB < nsi)
2174 SUBC_BB_DIST2_SSE_XXXX_INNER(STRIDE_PBB,bb_i,d2);
2178 #endif /* NBNXN_SEARCH_BB_SSE */
2180 /* Plain C function which determines if any atom pair between two cells
2181 * is within distance sqrt(rl2).
2183 static gmx_bool subc_in_range_x(int na_c,
2184 int si,const real *x_i,
2185 int csj,int stride,const real *x_j,
2191 for(i=0; i<na_c; i++)
2193 i0 = (si*na_c + i)*DIM;
2194 for(j=0; j<na_c; j++)
2196 j0 = (csj*na_c + j)*stride;
2198 d2 = sqr(x_i[i0 ] - x_j[j0 ]) +
2199 sqr(x_i[i0+1] - x_j[j0+1]) +
2200 sqr(x_i[i0+2] - x_j[j0+2]);
2212 /* SSE function which determines if any atom pair between two cells,
2213 * both with 8 atoms, is within distance sqrt(rl2).
2215 static gmx_bool subc_in_range_sse8(int na_c,
2216 int si,const real *x_i,
2217 int csj,int stride,const real *x_j,
2220 #ifdef NBNXN_SEARCH_SSE_SINGLE
2221 __m128 ix_SSE0,iy_SSE0,iz_SSE0;
2222 __m128 ix_SSE1,iy_SSE1,iz_SSE1;
2229 rc2_SSE = _mm_set1_ps(rl2);
2231 na_c_sse = NBNXN_GPU_CLUSTER_SIZE/STRIDE_PBB;
2232 ix_SSE0 = _mm_load_ps(x_i+(si*na_c_sse*DIM+0)*STRIDE_PBB);
2233 iy_SSE0 = _mm_load_ps(x_i+(si*na_c_sse*DIM+1)*STRIDE_PBB);
2234 iz_SSE0 = _mm_load_ps(x_i+(si*na_c_sse*DIM+2)*STRIDE_PBB);
2235 ix_SSE1 = _mm_load_ps(x_i+(si*na_c_sse*DIM+3)*STRIDE_PBB);
2236 iy_SSE1 = _mm_load_ps(x_i+(si*na_c_sse*DIM+4)*STRIDE_PBB);
2237 iz_SSE1 = _mm_load_ps(x_i+(si*na_c_sse*DIM+5)*STRIDE_PBB);
2239 /* We loop from the outer to the inner particles to maximize
2240 * the chance that we find a pair in range quickly and return.
2246 __m128 jx0_SSE,jy0_SSE,jz0_SSE;
2247 __m128 jx1_SSE,jy1_SSE,jz1_SSE;
2249 __m128 dx_SSE0,dy_SSE0,dz_SSE0;
2250 __m128 dx_SSE1,dy_SSE1,dz_SSE1;
2251 __m128 dx_SSE2,dy_SSE2,dz_SSE2;
2252 __m128 dx_SSE3,dy_SSE3,dz_SSE3;
2263 __m128 wco_any_SSE01,wco_any_SSE23,wco_any_SSE;
2265 jx0_SSE = _mm_load1_ps(x_j+j0*stride+0);
2266 jy0_SSE = _mm_load1_ps(x_j+j0*stride+1);
2267 jz0_SSE = _mm_load1_ps(x_j+j0*stride+2);
2269 jx1_SSE = _mm_load1_ps(x_j+j1*stride+0);
2270 jy1_SSE = _mm_load1_ps(x_j+j1*stride+1);
2271 jz1_SSE = _mm_load1_ps(x_j+j1*stride+2);
2273 /* Calculate distance */
2274 dx_SSE0 = _mm_sub_ps(ix_SSE0,jx0_SSE);
2275 dy_SSE0 = _mm_sub_ps(iy_SSE0,jy0_SSE);
2276 dz_SSE0 = _mm_sub_ps(iz_SSE0,jz0_SSE);
2277 dx_SSE1 = _mm_sub_ps(ix_SSE1,jx0_SSE);
2278 dy_SSE1 = _mm_sub_ps(iy_SSE1,jy0_SSE);
2279 dz_SSE1 = _mm_sub_ps(iz_SSE1,jz0_SSE);
2280 dx_SSE2 = _mm_sub_ps(ix_SSE0,jx1_SSE);
2281 dy_SSE2 = _mm_sub_ps(iy_SSE0,jy1_SSE);
2282 dz_SSE2 = _mm_sub_ps(iz_SSE0,jz1_SSE);
2283 dx_SSE3 = _mm_sub_ps(ix_SSE1,jx1_SSE);
2284 dy_SSE3 = _mm_sub_ps(iy_SSE1,jy1_SSE);
2285 dz_SSE3 = _mm_sub_ps(iz_SSE1,jz1_SSE);
2287 /* rsq = dx*dx+dy*dy+dz*dz */
2288 rsq_SSE0 = gmx_mm_calc_rsq_ps(dx_SSE0,dy_SSE0,dz_SSE0);
2289 rsq_SSE1 = gmx_mm_calc_rsq_ps(dx_SSE1,dy_SSE1,dz_SSE1);
2290 rsq_SSE2 = gmx_mm_calc_rsq_ps(dx_SSE2,dy_SSE2,dz_SSE2);
2291 rsq_SSE3 = gmx_mm_calc_rsq_ps(dx_SSE3,dy_SSE3,dz_SSE3);
2293 wco_SSE0 = _mm_cmplt_ps(rsq_SSE0,rc2_SSE);
2294 wco_SSE1 = _mm_cmplt_ps(rsq_SSE1,rc2_SSE);
2295 wco_SSE2 = _mm_cmplt_ps(rsq_SSE2,rc2_SSE);
2296 wco_SSE3 = _mm_cmplt_ps(rsq_SSE3,rc2_SSE);
2298 wco_any_SSE01 = _mm_or_ps(wco_SSE0,wco_SSE1);
2299 wco_any_SSE23 = _mm_or_ps(wco_SSE2,wco_SSE3);
2300 wco_any_SSE = _mm_or_ps(wco_any_SSE01,wco_any_SSE23);
2302 if (_mm_movemask_ps(wco_any_SSE))
2314 gmx_incons("SSE function called without SSE support");
2320 /* Returns the j sub-cell for index cj_ind */
2321 static int nbl_cj(const nbnxn_pairlist_t *nbl,int cj_ind)
2323 return nbl->cj4[cj_ind >> NBNXN_GPU_JGROUP_SIZE_2LOG].cj[cj_ind & (NBNXN_GPU_JGROUP_SIZE - 1)];
2326 /* Returns the i-interaction mask of the j sub-cell for index cj_ind */
2327 static unsigned nbl_imask0(const nbnxn_pairlist_t *nbl,int cj_ind)
2329 return nbl->cj4[cj_ind >> NBNXN_GPU_JGROUP_SIZE_2LOG].imei[0].imask;
2332 /* Ensures there is enough space for extra extra exclusion masks */
2333 static void check_excl_space(nbnxn_pairlist_t *nbl,int extra)
2335 if (nbl->nexcl+extra > nbl->excl_nalloc)
2337 nbl->excl_nalloc = over_alloc_small(nbl->nexcl+extra);
2338 nbnxn_realloc_void((void **)&nbl->excl,
2339 nbl->nexcl*sizeof(*nbl->excl),
2340 nbl->excl_nalloc*sizeof(*nbl->excl),
2341 nbl->alloc,nbl->free);
2345 /* Ensures there is enough space for ncell extra j-cells in the list */
2346 static void check_subcell_list_space_simple(nbnxn_pairlist_t *nbl,
2351 cj_max = nbl->ncj + ncell;
2353 if (cj_max > nbl->cj_nalloc)
2355 nbl->cj_nalloc = over_alloc_small(cj_max);
2356 nbnxn_realloc_void((void **)&nbl->cj,
2357 nbl->ncj*sizeof(*nbl->cj),
2358 nbl->cj_nalloc*sizeof(*nbl->cj),
2359 nbl->alloc,nbl->free);
2363 /* Ensures there is enough space for ncell extra j-subcells in the list */
2364 static void check_subcell_list_space_supersub(nbnxn_pairlist_t *nbl,
2367 int ncj4_max,j4,j,w,t;
2370 #define WARP_SIZE 32
2372 /* We can have maximally nsupercell*GPU_NSUBCELL sj lists */
2373 /* We can store 4 j-subcell - i-supercell pairs in one struct.
2374 * since we round down, we need one extra entry.
2376 ncj4_max = ((nbl->work->cj_ind + nsupercell*GPU_NSUBCELL + NBNXN_GPU_JGROUP_SIZE - 1) >> NBNXN_GPU_JGROUP_SIZE_2LOG);
2378 if (ncj4_max > nbl->cj4_nalloc)
2380 nbl->cj4_nalloc = over_alloc_small(ncj4_max);
2381 nbnxn_realloc_void((void **)&nbl->cj4,
2382 nbl->work->cj4_init*sizeof(*nbl->cj4),
2383 nbl->cj4_nalloc*sizeof(*nbl->cj4),
2384 nbl->alloc,nbl->free);
2387 if (ncj4_max > nbl->work->cj4_init)
2389 for(j4=nbl->work->cj4_init; j4<ncj4_max; j4++)
2391 /* No i-subcells and no excl's in the list initially */
2392 for(w=0; w<NWARP; w++)
2394 nbl->cj4[j4].imei[w].imask = 0U;
2395 nbl->cj4[j4].imei[w].excl_ind = 0;
2399 nbl->work->cj4_init = ncj4_max;
2403 /* Set all excl masks for one GPU warp no exclusions */
2404 static void set_no_excls(nbnxn_excl_t *excl)
2408 for(t=0; t<WARP_SIZE; t++)
2410 /* Turn all interaction bits on */
2411 excl->pair[t] = NBNXN_INT_MASK_ALL;
2415 /* Initializes a single nbnxn_pairlist_t data structure */
2416 static void nbnxn_init_pairlist(nbnxn_pairlist_t *nbl,
2418 nbnxn_alloc_t *alloc,
2423 nbl->alloc = nbnxn_alloc_aligned;
2431 nbl->free = nbnxn_free_aligned;
2438 nbl->bSimple = bSimple;
2449 /* We need one element extra in sj, so alloc initially with 1 */
2450 nbl->cj4_nalloc = 0;
2457 nbl->excl_nalloc = 0;
2459 check_excl_space(nbl,1);
2461 set_no_excls(&nbl->excl[0]);
2466 snew_aligned(nbl->work->bb_ci,GPU_NSUBCELL/STRIDE_PBB*NNBSBB_XXXX,NBNXN_MEM_ALIGN);
2468 snew_aligned(nbl->work->bb_ci,GPU_NSUBCELL*NNBSBB_B,NBNXN_MEM_ALIGN);
2470 snew_aligned(nbl->work->x_ci,NBNXN_NA_SC_MAX*DIM,NBNXN_MEM_ALIGN);
2471 #ifdef GMX_NBNXN_SIMD
2472 snew_aligned(nbl->work->x_ci_simd_4xn,1,NBNXN_MEM_ALIGN);
2473 snew_aligned(nbl->work->x_ci_simd_2xnn,1,NBNXN_MEM_ALIGN);
2475 snew_aligned(nbl->work->d2,GPU_NSUBCELL,NBNXN_MEM_ALIGN);
2478 void nbnxn_init_pairlist_set(nbnxn_pairlist_set_t *nbl_list,
2479 gmx_bool bSimple, gmx_bool bCombined,
2480 nbnxn_alloc_t *alloc,
2485 nbl_list->bSimple = bSimple;
2486 nbl_list->bCombined = bCombined;
2488 nbl_list->nnbl = gmx_omp_nthreads_get(emntNonbonded);
2490 if (!nbl_list->bCombined &&
2491 nbl_list->nnbl > NBNXN_BUFFERFLAG_MAX_THREADS)
2493 gmx_fatal(FARGS,"%d OpenMP threads were requested. Since the non-bonded force buffer reduction is prohibitively slow with more than %d threads, we do not allow this. Use %d or less OpenMP threads.",
2494 nbl_list->nnbl,NBNXN_BUFFERFLAG_MAX_THREADS,NBNXN_BUFFERFLAG_MAX_THREADS);
2497 snew(nbl_list->nbl,nbl_list->nnbl);
2498 /* Execute in order to avoid memory interleaving between threads */
2499 #pragma omp parallel for num_threads(nbl_list->nnbl) schedule(static)
2500 for(i=0; i<nbl_list->nnbl; i++)
2502 /* Allocate the nblist data structure locally on each thread
2503 * to optimize memory access for NUMA architectures.
2505 snew(nbl_list->nbl[i],1);
2507 /* Only list 0 is used on the GPU, use normal allocation for i>0 */
2510 nbnxn_init_pairlist(nbl_list->nbl[i],nbl_list->bSimple,alloc,free);
2514 nbnxn_init_pairlist(nbl_list->nbl[i],nbl_list->bSimple,NULL,NULL);
2519 /* Print statistics of a pair list, used for debug output */
2520 static void print_nblist_statistics_simple(FILE *fp,const nbnxn_pairlist_t *nbl,
2521 const nbnxn_search_t nbs,real rl)
2523 const nbnxn_grid_t *grid;
2528 /* This code only produces correct statistics with domain decomposition */
2529 grid = &nbs->grid[0];
2531 fprintf(fp,"nbl nci %d ncj %d\n",
2533 fprintf(fp,"nbl na_sc %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
2534 nbl->na_sc,rl,nbl->ncj,nbl->ncj/(double)grid->nc,
2535 nbl->ncj/(double)grid->nc*grid->na_sc,
2536 nbl->ncj/(double)grid->nc*grid->na_sc/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid->nc*grid->na_sc/det(nbs->box)));
2538 fprintf(fp,"nbl average j cell list length %.1f\n",
2539 0.25*nbl->ncj/(double)nbl->nci);
2541 for(s=0; s<SHIFTS; s++)
2546 for(i=0; i<nbl->nci; i++)
2548 cs[nbl->ci[i].shift & NBNXN_CI_SHIFT] +=
2549 nbl->ci[i].cj_ind_end - nbl->ci[i].cj_ind_start;
2551 j = nbl->ci[i].cj_ind_start;
2552 while (j < nbl->ci[i].cj_ind_end &&
2553 nbl->cj[j].excl != NBNXN_INT_MASK_ALL)
2559 fprintf(fp,"nbl cell pairs, total: %d excl: %d %.1f%%\n",
2560 nbl->ncj,npexcl,100*npexcl/(double)nbl->ncj);
2561 for(s=0; s<SHIFTS; s++)
2565 fprintf(fp,"nbl shift %2d ncj %3d\n",s,cs[s]);
2570 /* Print statistics of a pair lists, used for debug output */
2571 static void print_nblist_statistics_supersub(FILE *fp,const nbnxn_pairlist_t *nbl,
2572 const nbnxn_search_t nbs,real rl)
2574 const nbnxn_grid_t *grid;
2576 int c[GPU_NSUBCELL+1];
2578 /* This code only produces correct statistics with domain decomposition */
2579 grid = &nbs->grid[0];
2581 fprintf(fp,"nbl nsci %d ncj4 %d nsi %d excl4 %d\n",
2582 nbl->nsci,nbl->ncj4,nbl->nci_tot,nbl->nexcl);
2583 fprintf(fp,"nbl na_c %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
2584 nbl->na_ci,rl,nbl->nci_tot,nbl->nci_tot/(double)grid->nsubc_tot,
2585 nbl->nci_tot/(double)grid->nsubc_tot*grid->na_c,
2586 nbl->nci_tot/(double)grid->nsubc_tot*grid->na_c/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid->nsubc_tot*grid->na_c/det(nbs->box)));
2588 fprintf(fp,"nbl average j super cell list length %.1f\n",
2589 0.25*nbl->ncj4/(double)nbl->nsci);
2590 fprintf(fp,"nbl average i sub cell list length %.1f\n",
2591 nbl->nci_tot/((double)nbl->ncj4));
2593 for(si=0; si<=GPU_NSUBCELL; si++)
2597 for(i=0; i<nbl->nsci; i++)
2599 for(j4=nbl->sci[i].cj4_ind_start; j4<nbl->sci[i].cj4_ind_end; j4++)
2601 for(j=0; j<NBNXN_GPU_JGROUP_SIZE; j++)
2604 for(si=0; si<GPU_NSUBCELL; si++)
2606 if (nbl->cj4[j4].imei[0].imask & (1U << (j*GPU_NSUBCELL + si)))
2615 for(b=0; b<=GPU_NSUBCELL; b++)
2617 fprintf(fp,"nbl j-list #i-subcell %d %7d %4.1f\n",
2618 b,c[b],100.0*c[b]/(double)(nbl->ncj4*NBNXN_GPU_JGROUP_SIZE));
2622 /* Print the full pair list, used for debug output */
2623 static void print_supersub_nsp(const char *fn,
2624 const nbnxn_pairlist_t *nbl,
2631 sprintf(buf,"%s_%s.xvg",fn,NONLOCAL_I(iloc) ? "nl" : "l");
2632 fp = ffopen(buf,"w");
2634 for(i=0; i<nbl->nci; i++)
2637 for(j4=nbl->sci[i].cj4_ind_start; j4<nbl->sci[i].cj4_ind_end; j4++)
2639 for(p=0; p<NBNXN_GPU_JGROUP_SIZE*GPU_NSUBCELL; p++)
2641 nsp += (nbl->cj4[j4].imei[0].imask >> p) & 1;
2644 fprintf(fp,"%4d %3d %3d\n",
2647 nbl->sci[i].cj4_ind_end-nbl->sci[i].cj4_ind_start);
2653 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp */
2654 static void low_get_nbl_exclusions(nbnxn_pairlist_t *nbl,int cj4,
2655 int warp,nbnxn_excl_t **excl)
2657 if (nbl->cj4[cj4].imei[warp].excl_ind == 0)
2659 /* No exclusions set, make a new list entry */
2660 nbl->cj4[cj4].imei[warp].excl_ind = nbl->nexcl;
2662 *excl = &nbl->excl[nbl->cj4[cj4].imei[warp].excl_ind];
2663 set_no_excls(*excl);
2667 /* We already have some exclusions, new ones can be added to the list */
2668 *excl = &nbl->excl[nbl->cj4[cj4].imei[warp].excl_ind];
2672 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp,
2673 * allocates extra memory, if necessary.
2675 static void get_nbl_exclusions_1(nbnxn_pairlist_t *nbl,int cj4,
2676 int warp,nbnxn_excl_t **excl)
2678 if (nbl->cj4[cj4].imei[warp].excl_ind == 0)
2680 /* We need to make a new list entry, check if we have space */
2681 check_excl_space(nbl,1);
2683 low_get_nbl_exclusions(nbl,cj4,warp,excl);
2686 /* Returns pointers to the exclusion mask for cj4-unit cj4 for both warps,
2687 * allocates extra memory, if necessary.
2689 static void get_nbl_exclusions_2(nbnxn_pairlist_t *nbl,int cj4,
2690 nbnxn_excl_t **excl_w0,
2691 nbnxn_excl_t **excl_w1)
2693 /* Check for space we might need */
2694 check_excl_space(nbl,2);
2696 low_get_nbl_exclusions(nbl,cj4,0,excl_w0);
2697 low_get_nbl_exclusions(nbl,cj4,1,excl_w1);
2700 /* Sets the self exclusions i=j and pair exclusions i>j */
2701 static void set_self_and_newton_excls_supersub(nbnxn_pairlist_t *nbl,
2702 int cj4_ind,int sj_offset,
2705 nbnxn_excl_t *excl[2];
2708 /* Here we only set the set self and double pair exclusions */
2710 get_nbl_exclusions_2(nbl,cj4_ind,&excl[0],&excl[1]);
2712 /* Only minor < major bits set */
2713 for(ej=0; ej<nbl->na_ci; ej++)
2716 for(ei=ej; ei<nbl->na_ci; ei++)
2718 excl[w]->pair[(ej & (NBNXN_GPU_JGROUP_SIZE-1))*nbl->na_ci + ei] &=
2719 ~(1U << (sj_offset*GPU_NSUBCELL + si));
2724 /* Returns a diagonal or off-diagonal interaction mask for plain C lists */
2725 static unsigned int get_imask(gmx_bool rdiag,int ci,int cj)
2727 return (rdiag && ci == cj ? NBNXN_INT_MASK_DIAG : NBNXN_INT_MASK_ALL);
2730 /* Returns a diagonal or off-diagonal interaction mask for SIMD128 lists */
2731 static unsigned int get_imask_x86_simd128(gmx_bool rdiag,int ci,int cj)
2733 #ifndef GMX_DOUBLE /* cj-size = 4 */
2734 return (rdiag && ci == cj ? NBNXN_INT_MASK_DIAG : NBNXN_INT_MASK_ALL);
2735 #else /* cj-size = 2 */
2736 return (rdiag && ci*2 == cj ? NBNXN_INT_MASK_DIAG_J2_0 :
2737 (rdiag && ci*2+1 == cj ? NBNXN_INT_MASK_DIAG_J2_1 :
2738 NBNXN_INT_MASK_ALL));
2742 /* Returns a diagonal or off-diagonal interaction mask for SIMD256 lists */
2743 static unsigned int get_imask_x86_simd256(gmx_bool rdiag,int ci,int cj)
2745 #ifndef GMX_DOUBLE /* cj-size = 8 */
2746 return (rdiag && ci == cj*2 ? NBNXN_INT_MASK_DIAG_J8_0 :
2747 (rdiag && ci == cj*2+1 ? NBNXN_INT_MASK_DIAG_J8_1 :
2748 NBNXN_INT_MASK_ALL));
2749 #else /* cj-size = 4 */
2750 return (rdiag && ci == cj ? NBNXN_INT_MASK_DIAG : NBNXN_INT_MASK_ALL);
2754 #ifdef GMX_NBNXN_SIMD
2755 #if GMX_NBNXN_SIMD_BITWIDTH == 128
2756 #define get_imask_x86_simd_4xn get_imask_x86_simd128
2758 #if GMX_NBNXN_SIMD_BITWIDTH == 256
2759 #define get_imask_x86_simd_4xn get_imask_x86_simd256
2760 #define get_imask_x86_simd_2xnn get_imask_x86_simd128
2762 #error "unsupported GMX_NBNXN_SIMD_BITWIDTH"
2767 /* Plain C code for making a pair list of cell ci vs cell cjf-cjl.
2768 * Checks bounding box distances and possibly atom pair distances.
2770 static void make_cluster_list_simple(const nbnxn_grid_t *gridj,
2771 nbnxn_pairlist_t *nbl,
2772 int ci,int cjf,int cjl,
2773 gmx_bool remove_sub_diag,
2775 real rl2,float rbb2,
2778 const nbnxn_list_work_t *work;
2785 int cjf_gl,cjl_gl,cj;
2789 bb_ci = nbl->work->bb_ci;
2790 x_ci = nbl->work->x_ci;
2793 while (!InRange && cjf <= cjl)
2795 d2 = subc_bb_dist2(0,bb_ci,cjf,gridj->bb);
2798 /* Check if the distance is within the distance where
2799 * we use only the bounding box distance rbb,
2800 * or within the cut-off and there is at least one atom pair
2801 * within the cut-off.
2811 cjf_gl = gridj->cell0 + cjf;
2812 for(i=0; i<NBNXN_CPU_CLUSTER_I_SIZE && !InRange; i++)
2814 for(j=0; j<NBNXN_CPU_CLUSTER_I_SIZE; j++)
2816 InRange = InRange ||
2817 (sqr(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+XX]) +
2818 sqr(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+YY]) +
2819 sqr(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+ZZ]) < rl2);
2822 *ndistc += NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
2835 while (!InRange && cjl > cjf)
2837 d2 = subc_bb_dist2(0,bb_ci,cjl,gridj->bb);
2840 /* Check if the distance is within the distance where
2841 * we use only the bounding box distance rbb,
2842 * or within the cut-off and there is at least one atom pair
2843 * within the cut-off.
2853 cjl_gl = gridj->cell0 + cjl;
2854 for(i=0; i<NBNXN_CPU_CLUSTER_I_SIZE && !InRange; i++)
2856 for(j=0; j<NBNXN_CPU_CLUSTER_I_SIZE; j++)
2858 InRange = InRange ||
2859 (sqr(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+XX]) +
2860 sqr(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+YY]) +
2861 sqr(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+ZZ]) < rl2);
2864 *ndistc += NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
2874 for(cj=cjf; cj<=cjl; cj++)
2876 /* Store cj and the interaction mask */
2877 nbl->cj[nbl->ncj].cj = gridj->cell0 + cj;
2878 nbl->cj[nbl->ncj].excl = get_imask(remove_sub_diag,ci,cj);
2881 /* Increase the closing index in i super-cell list */
2882 nbl->ci[nbl->nci].cj_ind_end = nbl->ncj;
2886 #ifdef GMX_NBNXN_SIMD_4XN
2887 #include "nbnxn_search_simd_4xn.h"
2889 #ifdef GMX_NBNXN_SIMD_2XNN
2890 #include "nbnxn_search_simd_2xnn.h"
2893 /* Plain C or SSE code for making a pair list of super-cell sci vs scj.
2894 * Checks bounding box distances and possibly atom pair distances.
2896 static void make_cluster_list_supersub(const nbnxn_search_t nbs,
2897 const nbnxn_grid_t *gridi,
2898 const nbnxn_grid_t *gridj,
2899 nbnxn_pairlist_t *nbl,
2901 gmx_bool sci_equals_scj,
2902 int stride,const real *x,
2903 real rl2,float rbb2,
2908 int cjo,ci1,ci,cj,cj_gl;
2909 int cj4_ind,cj_offset;
2916 #define PRUNE_LIST_CPU_ONE
2917 #ifdef PRUNE_LIST_CPU_ONE
2921 d2l = nbl->work->d2;
2923 bb_ci = nbl->work->bb_ci;
2924 x_ci = nbl->work->x_ci;
2928 for(cjo=0; cjo<gridj->nsubc[scj]; cjo++)
2930 cj4_ind = (nbl->work->cj_ind >> NBNXN_GPU_JGROUP_SIZE_2LOG);
2931 cj_offset = nbl->work->cj_ind - cj4_ind*NBNXN_GPU_JGROUP_SIZE;
2932 cj4 = &nbl->cj4[cj4_ind];
2934 cj = scj*GPU_NSUBCELL + cjo;
2936 cj_gl = gridj->cell0*GPU_NSUBCELL + cj;
2938 /* Initialize this j-subcell i-subcell list */
2939 cj4->cj[cj_offset] = cj_gl;
2948 ci1 = gridi->nsubc[sci];
2952 /* Determine all ci1 bb distances in one call with SSE */
2953 subc_bb_dist2_sse_xxxx(gridj->bb+(cj>>STRIDE_PBB_2LOG)*NNBSBB_XXXX+(cj & (STRIDE_PBB-1)),
2959 /* We use a fixed upper-bound instead of ci1 to help optimization */
2960 for(ci=0; ci<GPU_NSUBCELL; ci++)
2967 #ifndef NBNXN_BBXXXX
2968 /* Determine the bb distance between ci and cj */
2969 d2l[ci] = subc_bb_dist2(ci,bb_ci,cj,gridj->bb);
2974 #ifdef PRUNE_LIST_CPU_ALL
2975 /* Check if the distance is within the distance where
2976 * we use only the bounding box distance rbb,
2977 * or within the cut-off and there is at least one atom pair
2978 * within the cut-off. This check is very costly.
2980 *ndistc += na_c*na_c;
2982 (d2 < rl2 && subc_in_range_x(na_c,ci,x_ci,cj_gl,stride,x,rl2)))
2984 /* Check if the distance between the two bounding boxes
2985 * in within the pair-list cut-off.
2990 /* Flag this i-subcell to be taken into account */
2991 imask |= (1U << (cj_offset*GPU_NSUBCELL+ci));
2993 #ifdef PRUNE_LIST_CPU_ONE
3001 #ifdef PRUNE_LIST_CPU_ONE
3002 /* If we only found 1 pair, check if any atoms are actually
3003 * within the cut-off, so we could get rid of it.
3005 if (npair == 1 && d2l[ci_last] >= rbb2)
3007 /* Avoid using function pointers here, as it's slower */
3009 #ifdef NBNXN_PBB_SSE
3014 (na_c,ci_last,x_ci,cj_gl,stride,x,rl2))
3016 imask &= ~(1U << (cj_offset*GPU_NSUBCELL+ci_last));
3024 /* We have a useful sj entry, close it now */
3026 /* Set the exclucions for the ci== sj entry.
3027 * Here we don't bother to check if this entry is actually flagged,
3028 * as it will nearly always be in the list.
3032 set_self_and_newton_excls_supersub(nbl,cj4_ind,cj_offset,cjo);
3035 /* Copy the cluster interaction mask to the list */
3036 for(w=0; w<NWARP; w++)
3038 cj4->imei[w].imask |= imask;
3041 nbl->work->cj_ind++;
3043 /* Keep the count */
3044 nbl->nci_tot += npair;
3046 /* Increase the closing index in i super-cell list */
3047 nbl->sci[nbl->nsci].cj4_ind_end =
3048 ((nbl->work->cj_ind+NBNXN_GPU_JGROUP_SIZE-1) >> NBNXN_GPU_JGROUP_SIZE_2LOG);
3053 /* Set all atom-pair exclusions from the topology stored in excl
3054 * as masks in the pair-list for simple list i-entry nbl_ci
3056 static void set_ci_top_excls(const nbnxn_search_t nbs,
3057 nbnxn_pairlist_t *nbl,
3058 gmx_bool diagRemoved,
3061 const nbnxn_ci_t *nbl_ci,
3062 const t_blocka *excl)
3066 int cj_ind_first,cj_ind_last;
3067 int cj_first,cj_last;
3069 int i,ai,aj,si,eind,ge,se;
3070 int found,cj_ind_0,cj_ind_1,cj_ind_m;
3074 nbnxn_excl_t *nbl_excl;
3075 int inner_i,inner_e;
3079 if (nbl_ci->cj_ind_end == nbl_ci->cj_ind_start)
3087 cj_ind_first = nbl_ci->cj_ind_start;
3088 cj_ind_last = nbl->ncj - 1;
3090 cj_first = nbl->cj[cj_ind_first].cj;
3091 cj_last = nbl->cj[cj_ind_last].cj;
3093 /* Determine how many contiguous j-cells we have starting
3094 * from the first i-cell. This number can be used to directly
3095 * calculate j-cell indices for excluded atoms.
3098 if (na_ci_2log == na_cj_2log)
3100 while (cj_ind_first + ndirect <= cj_ind_last &&
3101 nbl->cj[cj_ind_first+ndirect].cj == ci + ndirect)
3106 #ifdef NBNXN_SEARCH_BB_SSE
3109 while (cj_ind_first + ndirect <= cj_ind_last &&
3110 nbl->cj[cj_ind_first+ndirect].cj == ci_to_cj(na_cj_2log,ci) + ndirect)
3117 /* Loop over the atoms in the i super-cell */
3118 for(i=0; i<nbl->na_sc; i++)
3120 ai = nbs->a[ci*nbl->na_sc+i];
3123 si = (i>>na_ci_2log);
3125 /* Loop over the topology-based exclusions for this i-atom */
3126 for(eind=excl->index[ai]; eind<excl->index[ai+1]; eind++)
3132 /* The self exclusion are already set, save some time */
3138 /* Without shifts we only calculate interactions j>i
3139 * for one-way pair-lists.
3141 if (diagRemoved && ge <= ci*nbl->na_sc + i)
3146 se = (ge >> na_cj_2log);
3148 /* Could the cluster se be in our list? */
3149 if (se >= cj_first && se <= cj_last)
3151 if (se < cj_first + ndirect)
3153 /* We can calculate cj_ind directly from se */
3154 found = cj_ind_first + se - cj_first;
3158 /* Search for se using bisection */
3160 cj_ind_0 = cj_ind_first + ndirect;
3161 cj_ind_1 = cj_ind_last + 1;
3162 while (found == -1 && cj_ind_0 < cj_ind_1)
3164 cj_ind_m = (cj_ind_0 + cj_ind_1)>>1;
3166 cj_m = nbl->cj[cj_ind_m].cj;
3174 cj_ind_1 = cj_ind_m;
3178 cj_ind_0 = cj_ind_m + 1;
3185 inner_i = i - (si << na_ci_2log);
3186 inner_e = ge - (se << na_cj_2log);
3188 nbl->cj[found].excl &= ~(1U<<((inner_i<<na_cj_2log) + inner_e));
3196 /* Set all atom-pair exclusions from the topology stored in excl
3197 * as masks in the pair-list for i-super-cell entry nbl_sci
3199 static void set_sci_top_excls(const nbnxn_search_t nbs,
3200 nbnxn_pairlist_t *nbl,
3201 gmx_bool diagRemoved,
3203 const nbnxn_sci_t *nbl_sci,
3204 const t_blocka *excl)
3209 int cj_ind_first,cj_ind_last;
3210 int cj_first,cj_last;
3212 int i,ai,aj,si,eind,ge,se;
3213 int found,cj_ind_0,cj_ind_1,cj_ind_m;
3217 nbnxn_excl_t *nbl_excl;
3218 int inner_i,inner_e,w;
3224 if (nbl_sci->cj4_ind_end == nbl_sci->cj4_ind_start)
3232 cj_ind_first = nbl_sci->cj4_ind_start*NBNXN_GPU_JGROUP_SIZE;
3233 cj_ind_last = nbl->work->cj_ind - 1;
3235 cj_first = nbl->cj4[nbl_sci->cj4_ind_start].cj[0];
3236 cj_last = nbl_cj(nbl,cj_ind_last);
3238 /* Determine how many contiguous j-clusters we have starting
3239 * from the first i-cluster. This number can be used to directly
3240 * calculate j-cluster indices for excluded atoms.
3243 while (cj_ind_first + ndirect <= cj_ind_last &&
3244 nbl_cj(nbl,cj_ind_first+ndirect) == sci*GPU_NSUBCELL + ndirect)
3249 /* Loop over the atoms in the i super-cell */
3250 for(i=0; i<nbl->na_sc; i++)
3252 ai = nbs->a[sci*nbl->na_sc+i];
3255 si = (i>>na_c_2log);
3257 /* Loop over the topology-based exclusions for this i-atom */
3258 for(eind=excl->index[ai]; eind<excl->index[ai+1]; eind++)
3264 /* The self exclusion are already set, save some time */
3270 /* Without shifts we only calculate interactions j>i
3271 * for one-way pair-lists.
3273 if (diagRemoved && ge <= sci*nbl->na_sc + i)
3279 /* Could the cluster se be in our list? */
3280 if (se >= cj_first && se <= cj_last)
3282 if (se < cj_first + ndirect)
3284 /* We can calculate cj_ind directly from se */
3285 found = cj_ind_first + se - cj_first;
3289 /* Search for se using bisection */
3291 cj_ind_0 = cj_ind_first + ndirect;
3292 cj_ind_1 = cj_ind_last + 1;
3293 while (found == -1 && cj_ind_0 < cj_ind_1)
3295 cj_ind_m = (cj_ind_0 + cj_ind_1)>>1;
3297 cj_m = nbl_cj(nbl,cj_ind_m);
3305 cj_ind_1 = cj_ind_m;
3309 cj_ind_0 = cj_ind_m + 1;
3316 inner_i = i - si*na_c;
3317 inner_e = ge - se*na_c;
3319 /* Macro for getting the index of atom a within a cluster */
3320 #define AMODCJ4(a) ((a) & (NBNXN_GPU_JGROUP_SIZE - 1))
3321 /* Macro for converting an atom number to a cluster number */
3322 #define A2CJ4(a) ((a) >> NBNXN_GPU_JGROUP_SIZE_2LOG)
3323 /* Macro for getting the index of an i-atom within a warp */
3324 #define AMODWI(a) ((a) & (NBNXN_GPU_CLUSTER_SIZE/2 - 1))
3326 if (nbl_imask0(nbl,found) & (1U << (AMODCJ4(found)*GPU_NSUBCELL + si)))
3330 get_nbl_exclusions_1(nbl,A2CJ4(found),w,&nbl_excl);
3332 nbl_excl->pair[AMODWI(inner_e)*nbl->na_ci+inner_i] &=
3333 ~(1U << (AMODCJ4(found)*GPU_NSUBCELL + si));
3346 /* Reallocate the simple ci list for at least n entries */
3347 static void nb_realloc_ci(nbnxn_pairlist_t *nbl,int n)
3349 nbl->ci_nalloc = over_alloc_small(n);
3350 nbnxn_realloc_void((void **)&nbl->ci,
3351 nbl->nci*sizeof(*nbl->ci),
3352 nbl->ci_nalloc*sizeof(*nbl->ci),
3353 nbl->alloc,nbl->free);
3356 /* Reallocate the super-cell sci list for at least n entries */
3357 static void nb_realloc_sci(nbnxn_pairlist_t *nbl,int n)
3359 nbl->sci_nalloc = over_alloc_small(n);
3360 nbnxn_realloc_void((void **)&nbl->sci,
3361 nbl->nsci*sizeof(*nbl->sci),
3362 nbl->sci_nalloc*sizeof(*nbl->sci),
3363 nbl->alloc,nbl->free);
3366 /* Make a new ci entry at index nbl->nci */
3367 static void new_ci_entry(nbnxn_pairlist_t *nbl,int ci,int shift,int flags,
3368 nbnxn_list_work_t *work)
3370 if (nbl->nci + 1 > nbl->ci_nalloc)
3372 nb_realloc_ci(nbl,nbl->nci+1);
3374 nbl->ci[nbl->nci].ci = ci;
3375 nbl->ci[nbl->nci].shift = shift;
3376 /* Store the interaction flags along with the shift */
3377 nbl->ci[nbl->nci].shift |= flags;
3378 nbl->ci[nbl->nci].cj_ind_start = nbl->ncj;
3379 nbl->ci[nbl->nci].cj_ind_end = nbl->ncj;
3382 /* Make a new sci entry at index nbl->nsci */
3383 static void new_sci_entry(nbnxn_pairlist_t *nbl,int sci,int shift,int flags,
3384 nbnxn_list_work_t *work)
3386 if (nbl->nsci + 1 > nbl->sci_nalloc)
3388 nb_realloc_sci(nbl,nbl->nsci+1);
3390 nbl->sci[nbl->nsci].sci = sci;
3391 nbl->sci[nbl->nsci].shift = shift;
3392 nbl->sci[nbl->nsci].cj4_ind_start = nbl->ncj4;
3393 nbl->sci[nbl->nsci].cj4_ind_end = nbl->ncj4;
3396 /* Sort the simple j-list cj on exclusions.
3397 * Entries with exclusions will all be sorted to the beginning of the list.
3399 static void sort_cj_excl(nbnxn_cj_t *cj,int ncj,
3400 nbnxn_list_work_t *work)
3404 if (ncj > work->cj_nalloc)
3406 work->cj_nalloc = over_alloc_large(ncj);
3407 srenew(work->cj,work->cj_nalloc);
3410 /* Make a list of the j-cells involving exclusions */
3412 for(j=0; j<ncj; j++)
3414 if (cj[j].excl != NBNXN_INT_MASK_ALL)
3416 work->cj[jnew++] = cj[j];
3419 /* Check if there are exclusions at all or not just the first entry */
3420 if (!((jnew == 0) ||
3421 (jnew == 1 && cj[0].excl != NBNXN_INT_MASK_ALL)))
3423 for(j=0; j<ncj; j++)
3425 if (cj[j].excl == NBNXN_INT_MASK_ALL)
3427 work->cj[jnew++] = cj[j];
3430 for(j=0; j<ncj; j++)
3432 cj[j] = work->cj[j];
3437 /* Close this simple list i entry */
3438 static void close_ci_entry_simple(nbnxn_pairlist_t *nbl)
3442 /* All content of the new ci entry have already been filled correctly,
3443 * we only need to increase the count here (for non empty lists).
3445 jlen = nbl->ci[nbl->nci].cj_ind_end - nbl->ci[nbl->nci].cj_ind_start;
3448 sort_cj_excl(nbl->cj+nbl->ci[nbl->nci].cj_ind_start,jlen,nbl->work);
3450 /* The counts below are used for non-bonded pair/flop counts
3451 * and should therefore match the available kernel setups.
3453 if (!(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_COUL(0)))
3455 nbl->work->ncj_noq += jlen;
3457 else if ((nbl->ci[nbl->nci].shift & NBNXN_CI_HALF_LJ(0)) ||
3458 !(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_LJ(0)))
3460 nbl->work->ncj_hlj += jlen;
3467 /* Split sci entry for load balancing on the GPU.
3468 * As we only now the current count on our own thread,
3469 * we will need to estimate the current total amount of i-entries.
3470 * As the lists get concatenated later, this estimate depends
3471 * both on nthread and our own thread index thread.
3473 static void split_sci_entry(nbnxn_pairlist_t *nbl,
3474 int nsp_max_av,gmx_bool progBal,int nc_bal,
3475 int thread,int nthread)
3479 int cj4_start,cj4_end,j4len,cj4;
3481 int nsp,nsp_sci,nsp_cj4,nsp_cj4_e,nsp_cj4_p;
3484 /* Estimate the total numbers of ci's of the nblist combined
3485 * over all threads using the target number of ci's.
3487 nsci_est = nc_bal*thread/nthread + nbl->nsci;
3490 /* The first ci blocks should be larger, to avoid overhead.
3491 * The last ci blocks should be smaller, to improve load balancing.
3494 nsp_max_av*nc_bal*3/(2*(nsci_est - 1 + nc_bal)));
3498 nsp_max = nsp_max_av;
3501 cj4_start = nbl->sci[nbl->nsci-1].cj4_ind_start;
3502 cj4_end = nbl->sci[nbl->nsci-1].cj4_ind_end;
3503 j4len = cj4_end - cj4_start;
3505 if (j4len > 1 && j4len*GPU_NSUBCELL*NBNXN_GPU_JGROUP_SIZE > nsp_max)
3507 /* Remove the last ci entry and process the cj4's again */
3516 while (cj4 < cj4_end)
3518 nsp_cj4_p = nsp_cj4;
3520 for(p=0; p<GPU_NSUBCELL*NBNXN_GPU_JGROUP_SIZE; p++)
3522 nsp_cj4 += (nbl->cj4[cj4].imei[0].imask >> p) & 1;
3526 if (nsp > nsp_max && nsp > nsp_cj4)
3528 nbl->sci[sci].cj4_ind_end = cj4;
3531 if (nbl->nsci+1 > nbl->sci_nalloc)
3533 nb_realloc_sci(nbl,nbl->nsci+1);
3535 nbl->sci[sci].sci = nbl->sci[nbl->nsci-1].sci;
3536 nbl->sci[sci].shift = nbl->sci[nbl->nsci-1].shift;
3537 nbl->sci[sci].cj4_ind_start = cj4;
3538 nsp_sci = nsp - nsp_cj4;
3539 nsp_cj4_e = nsp_cj4_p;
3546 /* Put the remaining cj4's in a new ci entry */
3547 nbl->sci[sci].cj4_ind_end = cj4_end;
3549 /* Possibly balance out the last two ci's
3550 * by moving the last cj4 of the second last ci.
3552 if (nsp_sci - nsp_cj4_e >= nsp + nsp_cj4_e)
3554 nbl->sci[sci-1].cj4_ind_end--;
3555 nbl->sci[sci].cj4_ind_start--;
3563 /* Clost this super/sub list i entry */
3564 static void close_ci_entry_supersub(nbnxn_pairlist_t *nbl,
3566 gmx_bool progBal,int nc_bal,
3567 int thread,int nthread)
3572 /* All content of the new ci entry have already been filled correctly,
3573 * we only need to increase the count here (for non empty lists).
3575 j4len = nbl->sci[nbl->nsci].cj4_ind_end - nbl->sci[nbl->nsci].cj4_ind_start;
3578 /* We can only have complete blocks of 4 j-entries in a list,
3579 * so round the count up before closing.
3581 nbl->ncj4 = ((nbl->work->cj_ind + NBNXN_GPU_JGROUP_SIZE - 1) >> NBNXN_GPU_JGROUP_SIZE_2LOG);
3582 nbl->work->cj_ind = nbl->ncj4*NBNXN_GPU_JGROUP_SIZE;
3588 split_sci_entry(nbl,nsp_max_av,progBal,nc_bal,thread,nthread);
3593 /* Syncs the working array before adding another grid pair to the list */
3594 static void sync_work(nbnxn_pairlist_t *nbl)
3598 nbl->work->cj_ind = nbl->ncj4*NBNXN_GPU_JGROUP_SIZE;
3599 nbl->work->cj4_init = nbl->ncj4;
3603 /* Clears an nbnxn_pairlist_t data structure */
3604 static void clear_pairlist(nbnxn_pairlist_t *nbl)
3613 nbl->work->ncj_noq = 0;
3614 nbl->work->ncj_hlj = 0;
3617 /* Sets a simple list i-cell bounding box, including PBC shift */
3618 static void set_icell_bb_simple(const float *bb,int ci,
3619 real shx,real shy,real shz,
3625 bb_ci[BBL_X] = bb[ia+BBL_X] + shx;
3626 bb_ci[BBL_Y] = bb[ia+BBL_Y] + shy;
3627 bb_ci[BBL_Z] = bb[ia+BBL_Z] + shz;
3628 bb_ci[BBU_X] = bb[ia+BBU_X] + shx;
3629 bb_ci[BBU_Y] = bb[ia+BBU_Y] + shy;
3630 bb_ci[BBU_Z] = bb[ia+BBU_Z] + shz;
3633 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
3634 static void set_icell_bb_supersub(const float *bb,int ci,
3635 real shx,real shy,real shz,
3641 ia = ci*(GPU_NSUBCELL>>STRIDE_PBB_2LOG)*NNBSBB_XXXX;
3642 for(m=0; m<(GPU_NSUBCELL>>STRIDE_PBB_2LOG)*NNBSBB_XXXX; m+=NNBSBB_XXXX)
3644 for(i=0; i<STRIDE_PBB; i++)
3646 bb_ci[m+0*STRIDE_PBB+i] = bb[ia+m+0*STRIDE_PBB+i] + shx;
3647 bb_ci[m+1*STRIDE_PBB+i] = bb[ia+m+1*STRIDE_PBB+i] + shy;
3648 bb_ci[m+2*STRIDE_PBB+i] = bb[ia+m+2*STRIDE_PBB+i] + shz;
3649 bb_ci[m+3*STRIDE_PBB+i] = bb[ia+m+3*STRIDE_PBB+i] + shx;
3650 bb_ci[m+4*STRIDE_PBB+i] = bb[ia+m+4*STRIDE_PBB+i] + shy;
3651 bb_ci[m+5*STRIDE_PBB+i] = bb[ia+m+5*STRIDE_PBB+i] + shz;
3655 ia = ci*GPU_NSUBCELL*NNBSBB_B;
3656 for(i=0; i<GPU_NSUBCELL*NNBSBB_B; i+=NNBSBB_B)
3658 bb_ci[i+BBL_X] = bb[ia+i+BBL_X] + shx;
3659 bb_ci[i+BBL_Y] = bb[ia+i+BBL_Y] + shy;
3660 bb_ci[i+BBL_Z] = bb[ia+i+BBL_Z] + shz;
3661 bb_ci[i+BBU_X] = bb[ia+i+BBU_X] + shx;
3662 bb_ci[i+BBU_Y] = bb[ia+i+BBU_Y] + shy;
3663 bb_ci[i+BBU_Z] = bb[ia+i+BBU_Z] + shz;
3668 /* Copies PBC shifted i-cell atom coordinates x,y,z to working array */
3669 static void icell_set_x_simple(int ci,
3670 real shx,real shy,real shz,
3672 int stride,const real *x,
3673 nbnxn_list_work_t *work)
3677 ia = ci*NBNXN_CPU_CLUSTER_I_SIZE;
3679 for(i=0; i<NBNXN_CPU_CLUSTER_I_SIZE; i++)
3681 work->x_ci[i*STRIDE_XYZ+XX] = x[(ia+i)*stride+XX] + shx;
3682 work->x_ci[i*STRIDE_XYZ+YY] = x[(ia+i)*stride+YY] + shy;
3683 work->x_ci[i*STRIDE_XYZ+ZZ] = x[(ia+i)*stride+ZZ] + shz;
3687 /* Copies PBC shifted super-cell atom coordinates x,y,z to working array */
3688 static void icell_set_x_supersub(int ci,
3689 real shx,real shy,real shz,
3691 int stride,const real *x,
3692 nbnxn_list_work_t *work)
3699 ia = ci*GPU_NSUBCELL*na_c;
3700 for(i=0; i<GPU_NSUBCELL*na_c; i++)
3702 x_ci[i*DIM + XX] = x[(ia+i)*stride + XX] + shx;
3703 x_ci[i*DIM + YY] = x[(ia+i)*stride + YY] + shy;
3704 x_ci[i*DIM + ZZ] = x[(ia+i)*stride + ZZ] + shz;
3708 #ifdef NBNXN_SEARCH_BB_SSE
3709 /* Copies PBC shifted super-cell packed atom coordinates to working array */
3710 static void icell_set_x_supersub_sse8(int ci,
3711 real shx,real shy,real shz,
3713 int stride,const real *x,
3714 nbnxn_list_work_t *work)
3721 for(si=0; si<GPU_NSUBCELL; si++)
3723 for(i=0; i<na_c; i+=STRIDE_PBB)
3726 ia = ci*GPU_NSUBCELL*na_c + io;
3727 for(j=0; j<STRIDE_PBB; j++)
3729 x_ci[io*DIM + j + XX*STRIDE_PBB] = x[(ia+j)*stride+XX] + shx;
3730 x_ci[io*DIM + j + YY*STRIDE_PBB] = x[(ia+j)*stride+YY] + shy;
3731 x_ci[io*DIM + j + ZZ*STRIDE_PBB] = x[(ia+j)*stride+ZZ] + shz;
3738 static real nbnxn_rlist_inc_nonloc_fac = 0.6;
3740 /* Due to the cluster size the effective pair-list is longer than
3741 * that of a simple atom pair-list. This function gives the extra distance.
3743 real nbnxn_get_rlist_effective_inc(int cluster_size,real atom_density)
3745 return ((0.5 + nbnxn_rlist_inc_nonloc_fac)*sqr(((cluster_size) - 1.0)/(cluster_size))*pow((cluster_size)/(atom_density),1.0/3.0));
3748 /* Estimates the interaction volume^2 for non-local interactions */
3749 static real nonlocal_vol2(const gmx_domdec_zones_t *zones,rvec ls,real r)
3758 /* Here we simply add up the volumes of 1, 2 or 3 1D decomposition
3759 * not home interaction volume^2. As these volumes are not additive,
3760 * this is an overestimate, but it would only be significant in the limit
3761 * of small cells, where we anyhow need to split the lists into
3762 * as small parts as possible.
3765 for(z=0; z<zones->n; z++)
3767 if (zones->shift[z][XX] + zones->shift[z][YY] + zones->shift[z][ZZ] == 1)
3772 for(d=0; d<DIM; d++)
3774 if (zones->shift[z][d] == 0)
3778 za *= zones->size[z].x1[d] - zones->size[z].x0[d];
3782 /* 4 octants of a sphere */
3783 vold_est = 0.25*M_PI*r*r*r*r;
3784 /* 4 quarter pie slices on the edges */
3785 vold_est += 4*cl*M_PI/6.0*r*r*r;
3786 /* One rectangular volume on a face */
3787 vold_est += ca*0.5*r*r;
3789 vol2_est_tot += vold_est*za;
3793 return vol2_est_tot;
3796 /* Estimates the average size of a full j-list for super/sub setup */
3797 static int get_nsubpair_max(const nbnxn_search_t nbs,
3800 int min_ci_balanced)
3802 const nbnxn_grid_t *grid;
3804 real xy_diag2,r_eff_sup,vol_est,nsp_est,nsp_est_nl;
3807 grid = &nbs->grid[0];
3809 ls[XX] = (grid->c1[XX] - grid->c0[XX])/(grid->ncx*GPU_NSUBCELL_X);
3810 ls[YY] = (grid->c1[YY] - grid->c0[YY])/(grid->ncy*GPU_NSUBCELL_Y);
3811 ls[ZZ] = (grid->c1[ZZ] - grid->c0[ZZ])*grid->ncx*grid->ncy/(grid->nc*GPU_NSUBCELL_Z);
3813 /* The average squared length of the diagonal of a sub cell */
3814 xy_diag2 = ls[XX]*ls[XX] + ls[YY]*ls[YY] + ls[ZZ]*ls[ZZ];
3816 /* The formulas below are a heuristic estimate of the average nsj per si*/
3817 r_eff_sup = rlist + nbnxn_rlist_inc_nonloc_fac*sqr((grid->na_c - 1.0)/grid->na_c)*sqrt(xy_diag2/3);
3819 if (!nbs->DomDec || nbs->zones->n == 1)
3826 sqr(grid->atom_density/grid->na_c)*
3827 nonlocal_vol2(nbs->zones,ls,r_eff_sup);
3832 /* Sub-cell interacts with itself */
3833 vol_est = ls[XX]*ls[YY]*ls[ZZ];
3834 /* 6/2 rectangular volume on the faces */
3835 vol_est += (ls[XX]*ls[YY] + ls[XX]*ls[ZZ] + ls[YY]*ls[ZZ])*r_eff_sup;
3836 /* 12/2 quarter pie slices on the edges */
3837 vol_est += 2*(ls[XX] + ls[YY] + ls[ZZ])*0.25*M_PI*sqr(r_eff_sup);
3838 /* 4 octants of a sphere */
3839 vol_est += 0.5*4.0/3.0*M_PI*pow(r_eff_sup,3);
3841 nsp_est = grid->nsubc_tot*vol_est*grid->atom_density/grid->na_c;
3843 /* Subtract the non-local pair count */
3844 nsp_est -= nsp_est_nl;
3848 fprintf(debug,"nsp_est local %5.1f non-local %5.1f\n",
3849 nsp_est,nsp_est_nl);
3854 nsp_est = nsp_est_nl;
3857 if (min_ci_balanced <= 0 || grid->nc >= min_ci_balanced || grid->nc == 0)
3859 /* We don't need to worry */
3864 /* Thus the (average) maximum j-list size should be as follows */
3865 nsubpair_max = max(1,(int)(nsp_est/min_ci_balanced+0.5));
3867 /* Since the target value is a maximum (this avoid high outliers,
3868 * which lead to load imbalance), not average, we get more lists
3869 * than we ask for (to compensate we need to add GPU_NSUBCELL*4/4).
3870 * But more importantly, the optimal GPU performance moves
3871 * to lower number of block for very small blocks.
3872 * To compensate we add the maximum pair count per cj4.
3874 nsubpair_max += GPU_NSUBCELL*NBNXN_CPU_CLUSTER_I_SIZE;
3879 fprintf(debug,"nbl nsp estimate %.1f, nsubpair_max %d\n",
3880 nsp_est,nsubpair_max);
3883 return nsubpair_max;
3886 /* Debug list print function */
3887 static void print_nblist_ci_cj(FILE *fp,const nbnxn_pairlist_t *nbl)
3891 for(i=0; i<nbl->nci; i++)
3893 fprintf(fp,"ci %4d shift %2d ncj %3d\n",
3894 nbl->ci[i].ci,nbl->ci[i].shift,
3895 nbl->ci[i].cj_ind_end - nbl->ci[i].cj_ind_start);
3897 for(j=nbl->ci[i].cj_ind_start; j<nbl->ci[i].cj_ind_end; j++)
3899 fprintf(fp," cj %5d imask %x\n",
3906 /* Debug list print function */
3907 static void print_nblist_sci_cj(FILE *fp,const nbnxn_pairlist_t *nbl)
3911 for(i=0; i<nbl->nsci; i++)
3913 fprintf(fp,"ci %4d shift %2d ncj4 %2d\n",
3914 nbl->sci[i].sci,nbl->sci[i].shift,
3915 nbl->sci[i].cj4_ind_end - nbl->sci[i].cj4_ind_start);
3917 for(j4=nbl->sci[i].cj4_ind_start; j4<nbl->sci[i].cj4_ind_end; j4++)
3919 for(j=0; j<NBNXN_GPU_JGROUP_SIZE; j++)
3921 fprintf(fp," sj %5d imask %x\n",
3923 nbl->cj4[j4].imei[0].imask);
3929 /* Combine pair lists *nbl generated on multiple threads nblc */
3930 static void combine_nblists(int nnbl,nbnxn_pairlist_t **nbl,
3931 nbnxn_pairlist_t *nblc)
3933 int nsci,ncj4,nexcl;
3938 gmx_incons("combine_nblists does not support simple lists");
3943 nexcl = nblc->nexcl;
3944 for(i=0; i<nnbl; i++)
3946 nsci += nbl[i]->nsci;
3947 ncj4 += nbl[i]->ncj4;
3948 nexcl += nbl[i]->nexcl;
3951 if (nsci > nblc->sci_nalloc)
3953 nb_realloc_sci(nblc,nsci);
3955 if (ncj4 > nblc->cj4_nalloc)
3957 nblc->cj4_nalloc = over_alloc_small(ncj4);
3958 nbnxn_realloc_void((void **)&nblc->cj4,
3959 nblc->ncj4*sizeof(*nblc->cj4),
3960 nblc->cj4_nalloc*sizeof(*nblc->cj4),
3961 nblc->alloc,nblc->free);
3963 if (nexcl > nblc->excl_nalloc)
3965 nblc->excl_nalloc = over_alloc_small(nexcl);
3966 nbnxn_realloc_void((void **)&nblc->excl,
3967 nblc->nexcl*sizeof(*nblc->excl),
3968 nblc->excl_nalloc*sizeof(*nblc->excl),
3969 nblc->alloc,nblc->free);
3972 /* Each thread should copy its own data to the combined arrays,
3973 * as otherwise data will go back and forth between different caches.
3975 #pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntPairsearch)) schedule(static)
3976 for(n=0; n<nnbl; n++)
3983 const nbnxn_pairlist_t *nbli;
3985 /* Determine the offset in the combined data for our thread */
3986 sci_offset = nblc->nsci;
3987 cj4_offset = nblc->ncj4;
3988 ci_offset = nblc->nci_tot;
3989 excl_offset = nblc->nexcl;
3993 sci_offset += nbl[i]->nsci;
3994 cj4_offset += nbl[i]->ncj4;
3995 ci_offset += nbl[i]->nci_tot;
3996 excl_offset += nbl[i]->nexcl;
4001 for(i=0; i<nbli->nsci; i++)
4003 nblc->sci[sci_offset+i] = nbli->sci[i];
4004 nblc->sci[sci_offset+i].cj4_ind_start += cj4_offset;
4005 nblc->sci[sci_offset+i].cj4_ind_end += cj4_offset;
4008 for(j4=0; j4<nbli->ncj4; j4++)
4010 nblc->cj4[cj4_offset+j4] = nbli->cj4[j4];
4011 nblc->cj4[cj4_offset+j4].imei[0].excl_ind += excl_offset;
4012 nblc->cj4[cj4_offset+j4].imei[1].excl_ind += excl_offset;
4015 for(j4=0; j4<nbli->nexcl; j4++)
4017 nblc->excl[excl_offset+j4] = nbli->excl[j4];
4021 for(n=0; n<nnbl; n++)
4023 nblc->nsci += nbl[n]->nsci;
4024 nblc->ncj4 += nbl[n]->ncj4;
4025 nblc->nci_tot += nbl[n]->nci_tot;
4026 nblc->nexcl += nbl[n]->nexcl;
4030 /* Returns the next ci to be processes by our thread */
4031 static gmx_bool next_ci(const nbnxn_grid_t *grid,
4033 int nth,int ci_block,
4034 int *ci_x,int *ci_y,
4040 if (*ci_b == ci_block)
4042 /* Jump to the next block assigned to this task */
4043 *ci += (nth - 1)*ci_block;
4047 if (*ci >= grid->nc*conv)
4052 while (*ci >= grid->cxy_ind[*ci_x*grid->ncy + *ci_y + 1]*conv)
4055 if (*ci_y == grid->ncy)
4065 /* Returns the distance^2 for which we put cell pairs in the list
4066 * without checking atom pair distances. This is usually < rlist^2.
4068 static float boundingbox_only_distance2(const nbnxn_grid_t *gridi,
4069 const nbnxn_grid_t *gridj,
4073 /* If the distance between two sub-cell bounding boxes is less
4074 * than this distance, do not check the distance between
4075 * all particle pairs in the sub-cell, since then it is likely
4076 * that the box pair has atom pairs within the cut-off.
4077 * We use the nblist cut-off minus 0.5 times the average x/y diagonal
4078 * spacing of the sub-cells. Around 40% of the checked pairs are pruned.
4079 * Using more than 0.5 gains at most 0.5%.
4080 * If forces are calculated more than twice, the performance gain
4081 * in the force calculation outweighs the cost of checking.
4082 * Note that with subcell lists, the atom-pair distance check
4083 * is only performed when only 1 out of 8 sub-cells in within range,
4084 * this is because the GPU is much faster than the cpu.
4089 bbx = 0.5*(gridi->sx + gridj->sx);
4090 bby = 0.5*(gridi->sy + gridj->sy);
4093 bbx /= GPU_NSUBCELL_X;
4094 bby /= GPU_NSUBCELL_Y;
4097 rbb2 = sqr(max(0,rlist - 0.5*sqrt(bbx*bbx + bby*bby)));
4102 return (float)((1+GMX_FLOAT_EPS)*rbb2);
4106 static int get_ci_block_size(const nbnxn_grid_t *gridi,
4107 gmx_bool bDomDec, int nth)
4109 const int ci_block_enum = 5;
4110 const int ci_block_denom = 11;
4111 const int ci_block_min_atoms = 16;
4114 /* Here we decide how to distribute the blocks over the threads.
4115 * We use prime numbers to try to avoid that the grid size becomes
4116 * a multiple of the number of threads, which would lead to some
4117 * threads getting "inner" pairs and others getting boundary pairs,
4118 * which in turns will lead to load imbalance between threads.
4119 * Set the block size as 5/11/ntask times the average number of cells
4120 * in a y,z slab. This should ensure a quite uniform distribution
4121 * of the grid parts of the different thread along all three grid
4122 * zone boundaries with 3D domain decomposition. At the same time
4123 * the blocks will not become too small.
4125 ci_block = (gridi->nc*ci_block_enum)/(ci_block_denom*gridi->ncx*nth);
4127 /* Ensure the blocks are not too small: avoids cache invalidation */
4128 if (ci_block*gridi->na_sc < ci_block_min_atoms)
4130 ci_block = (ci_block_min_atoms + gridi->na_sc - 1)/gridi->na_sc;
4133 /* Without domain decomposition
4134 * or with less than 3 blocks per task, divide in nth blocks.
4136 if (!bDomDec || ci_block*3*nth > gridi->nc)
4138 ci_block = (gridi->nc + nth - 1)/nth;
4144 /* Generates the part of pair-list nbl assigned to our thread */
4145 static void nbnxn_make_pairlist_part(const nbnxn_search_t nbs,
4146 const nbnxn_grid_t *gridi,
4147 const nbnxn_grid_t *gridj,
4148 nbnxn_search_work_t *work,
4149 const nbnxn_atomdata_t *nbat,
4150 const t_blocka *excl,
4154 gmx_bool bFBufferFlag,
4157 int min_ci_balanced,
4159 nbnxn_pairlist_t *nbl)
4166 int ci_b,ci,ci_x,ci_y,ci_xy,cj;
4173 const float *bb_i,*bbcz_i,*bbcz_j;
4175 real bx0,bx1,by0,by1,bz0,bz1;
4177 real d2cx,d2z,d2z_cx,d2z_cy,d2zx,d2zxy,d2xy;
4178 int cxf,cxl,cyf,cyf_x,cyl;
4183 int gridi_flag_shift=0,gridj_flag_shift=0;
4184 unsigned *gridj_flag=NULL;
4185 int ncj_old_i,ncj_old_j;
4187 nbs_cycle_start(&work->cc[enbsCCsearch]);
4189 if (gridj->bSimple != nbl->bSimple)
4191 gmx_incons("Grid incompatible with pair-list");
4195 nbl->na_sc = gridj->na_sc;
4196 nbl->na_ci = gridj->na_c;
4197 nbl->na_cj = nbnxn_kernel_to_cj_size(nb_kernel_type);
4198 na_cj_2log = get_2log(nbl->na_cj);
4204 /* Determine conversion of clusters to flag blocks */
4205 gridi_flag_shift = 0;
4206 while ((nbl->na_ci<<gridi_flag_shift) < NBNXN_BUFFERFLAG_SIZE)
4210 gridj_flag_shift = 0;
4211 while ((nbl->na_cj<<gridj_flag_shift) < NBNXN_BUFFERFLAG_SIZE)
4216 gridj_flag = work->buffer_flags.flag;
4219 copy_mat(nbs->box,box);
4221 rl2 = nbl->rlist*nbl->rlist;
4223 rbb2 = boundingbox_only_distance2(gridi,gridj,nbl->rlist,nbl->bSimple);
4227 fprintf(debug,"nbl bounding box only distance %f\n",sqrt(rbb2));
4230 /* Set the shift range */
4231 for(d=0; d<DIM; d++)
4233 /* Check if we need periodicity shifts.
4234 * Without PBC or with domain decomposition we don't need them.
4236 if (d >= ePBC2npbcdim(nbs->ePBC) || nbs->dd_dim[d])
4243 box[XX][XX] - fabs(box[YY][XX]) - fabs(box[ZZ][XX]) < sqrt(rl2))
4254 if (nbl->bSimple && !gridi->bSimple)
4256 conv_i = gridi->na_sc/gridj->na_sc;
4257 bb_i = gridi->bb_simple;
4258 bbcz_i = gridi->bbcz_simple;
4259 flags_i = gridi->flags_simple;
4265 bbcz_i = gridi->bbcz;
4266 flags_i = gridi->flags;
4268 cell0_i = gridi->cell0*conv_i;
4270 bbcz_j = gridj->bbcz;
4274 /* Blocks of the conversion factor - 1 give a large repeat count
4275 * combined with a small block size. This should result in good
4276 * load balancing for both small and large domains.
4278 ci_block = conv_i - 1;
4282 fprintf(debug,"nbl nc_i %d col.av. %.1f ci_block %d\n",
4283 gridi->nc,gridi->nc/(double)(gridi->ncx*gridi->ncy),ci_block);
4289 /* Initially ci_b and ci to 1 before where we want them to start,
4290 * as they will both be incremented in next_ci.
4293 ci = th*ci_block - 1;
4296 while (next_ci(gridi,conv_i,nth,ci_block,&ci_x,&ci_y,&ci_b,&ci))
4298 if (nbl->bSimple && flags_i[ci] == 0)
4303 ncj_old_i = nbl->ncj;
4306 if (gridj != gridi && shp[XX] == 0)
4310 bx1 = bb_i[ci*NNBSBB_B+NNBSBB_C+XX];
4314 bx1 = gridi->c0[XX] + (ci_x+1)*gridi->sx;
4316 if (bx1 < gridj->c0[XX])
4318 d2cx = sqr(gridj->c0[XX] - bx1);
4327 ci_xy = ci_x*gridi->ncy + ci_y;
4329 /* Loop over shift vectors in three dimensions */
4330 for (tz=-shp[ZZ]; tz<=shp[ZZ]; tz++)
4332 shz = tz*box[ZZ][ZZ];
4334 bz0 = bbcz_i[ci*NNBSBB_D ] + shz;
4335 bz1 = bbcz_i[ci*NNBSBB_D+1] + shz;
4347 d2z = sqr(bz0 - box[ZZ][ZZ]);
4350 d2z_cx = d2z + d2cx;
4358 bz1/((real)(gridi->cxy_ind[ci_xy+1] - gridi->cxy_ind[ci_xy]));
4363 /* The check with bz1_frac close to or larger than 1 comes later */
4365 for (ty=-shp[YY]; ty<=shp[YY]; ty++)
4367 shy = ty*box[YY][YY] + tz*box[ZZ][YY];
4371 by0 = bb_i[ci*NNBSBB_B +YY] + shy;
4372 by1 = bb_i[ci*NNBSBB_B+NNBSBB_C+YY] + shy;
4376 by0 = gridi->c0[YY] + (ci_y )*gridi->sy + shy;
4377 by1 = gridi->c0[YY] + (ci_y+1)*gridi->sy + shy;
4380 get_cell_range(by0,by1,
4381 gridj->ncy,gridj->c0[YY],gridj->sy,gridj->inv_sy,
4391 if (by1 < gridj->c0[YY])
4393 d2z_cy += sqr(gridj->c0[YY] - by1);
4395 else if (by0 > gridj->c1[YY])
4397 d2z_cy += sqr(by0 - gridj->c1[YY]);
4400 for (tx=-shp[XX]; tx<=shp[XX]; tx++)
4402 shift = XYZ2IS(tx,ty,tz);
4404 #ifdef NBNXN_SHIFT_BACKWARD
4405 if (gridi == gridj && shift > CENTRAL)
4411 shx = tx*box[XX][XX] + ty*box[YY][XX] + tz*box[ZZ][XX];
4415 bx0 = bb_i[ci*NNBSBB_B +XX] + shx;
4416 bx1 = bb_i[ci*NNBSBB_B+NNBSBB_C+XX] + shx;
4420 bx0 = gridi->c0[XX] + (ci_x )*gridi->sx + shx;
4421 bx1 = gridi->c0[XX] + (ci_x+1)*gridi->sx + shx;
4424 get_cell_range(bx0,bx1,
4425 gridj->ncx,gridj->c0[XX],gridj->sx,gridj->inv_sx,
4436 new_ci_entry(nbl,cell0_i+ci,shift,flags_i[ci],
4441 new_sci_entry(nbl,cell0_i+ci,shift,flags_i[ci],
4445 #ifndef NBNXN_SHIFT_BACKWARD
4448 if (shift == CENTRAL && gridi == gridj &&
4452 /* Leave the pairs with i > j.
4453 * x is the major index, so skip half of it.
4460 set_icell_bb_simple(bb_i,ci,shx,shy,shz,
4465 set_icell_bb_supersub(bb_i,ci,shx,shy,shz,
4469 nbs->icell_set_x(cell0_i+ci,shx,shy,shz,
4470 gridi->na_c,nbat->xstride,nbat->x,
4473 for(cx=cxf; cx<=cxl; cx++)
4476 if (gridj->c0[XX] + cx*gridj->sx > bx1)
4478 d2zx += sqr(gridj->c0[XX] + cx*gridj->sx - bx1);
4480 else if (gridj->c0[XX] + (cx+1)*gridj->sx < bx0)
4482 d2zx += sqr(gridj->c0[XX] + (cx+1)*gridj->sx - bx0);
4485 #ifndef NBNXN_SHIFT_BACKWARD
4486 if (gridi == gridj &&
4487 cx == 0 && cyf < ci_y)
4489 if (gridi == gridj &&
4490 cx == 0 && shift == CENTRAL && cyf < ci_y)
4493 /* Leave the pairs with i > j.
4494 * Skip half of y when i and j have the same x.
4503 for(cy=cyf_x; cy<=cyl; cy++)
4505 c0 = gridj->cxy_ind[cx*gridj->ncy+cy];
4506 c1 = gridj->cxy_ind[cx*gridj->ncy+cy+1];
4507 #ifdef NBNXN_SHIFT_BACKWARD
4508 if (gridi == gridj &&
4509 shift == CENTRAL && c0 < ci)
4516 if (gridj->c0[YY] + cy*gridj->sy > by1)
4518 d2zxy += sqr(gridj->c0[YY] + cy*gridj->sy - by1);
4520 else if (gridj->c0[YY] + (cy+1)*gridj->sy < by0)
4522 d2zxy += sqr(gridj->c0[YY] + (cy+1)*gridj->sy - by0);
4524 if (c1 > c0 && d2zxy < rl2)
4526 cs = c0 + (int)(bz1_frac*(c1 - c0));
4534 /* Find the lowest cell that can possibly
4539 (bbcz_j[cf*NNBSBB_D+1] >= bz0 ||
4540 d2xy + sqr(bbcz_j[cf*NNBSBB_D+1] - bz0) < rl2))
4545 /* Find the highest cell that can possibly
4550 (bbcz_j[cl*NNBSBB_D] <= bz1 ||
4551 d2xy + sqr(bbcz_j[cl*NNBSBB_D] - bz1) < rl2))
4556 #ifdef NBNXN_REFCODE
4558 /* Simple reference code, for debugging,
4559 * overrides the more complex code above.
4564 for(k=c0; k<c1; k++)
4566 if (box_dist2(bx0,bx1,by0,by1,bz0,bz1,
4567 bb+k*NNBSBB_B) < rl2 &&
4572 if (box_dist2(bx0,bx1,by0,by1,bz0,bz1,
4573 bb+k*NNBSBB_B) < rl2 &&
4584 /* We want each atom/cell pair only once,
4585 * only use cj >= ci.
4587 #ifndef NBNXN_SHIFT_BACKWARD
4590 if (shift == CENTRAL)
4599 /* For f buffer flags with simple lists */
4600 ncj_old_j = nbl->ncj;
4602 switch (nb_kernel_type)
4604 case nbnxnk4x4_PlainC:
4605 check_subcell_list_space_simple(nbl,cl-cf+1);
4607 make_cluster_list_simple(gridj,
4609 (gridi == gridj && shift == CENTRAL),
4614 #ifdef GMX_NBNXN_SIMD_4XN
4615 case nbnxnk4xN_SIMD_4xN:
4616 check_subcell_list_space_simple(nbl,ci_to_cj(na_cj_2log,cl-cf)+2);
4617 make_cluster_list_simd_4xn(gridj,
4619 (gridi == gridj && shift == CENTRAL),
4625 #ifdef GMX_NBNXN_SIMD_2XNN
4626 case nbnxnk4xN_SIMD_2xNN:
4627 check_subcell_list_space_simple(nbl,ci_to_cj(na_cj_2log,cl-cf)+2);
4628 make_cluster_list_simd_2xnn(gridj,
4630 (gridi == gridj && shift == CENTRAL),
4636 case nbnxnk8x8x8_PlainC:
4637 case nbnxnk8x8x8_CUDA:
4638 check_subcell_list_space_supersub(nbl,cl-cf+1);
4639 for(cj=cf; cj<=cl; cj++)
4641 make_cluster_list_supersub(nbs,gridi,gridj,
4643 (gridi == gridj && shift == CENTRAL && ci == cj),
4644 nbat->xstride,nbat->x,
4650 ncpcheck += cl - cf + 1;
4652 if (bFBufferFlag && nbl->ncj > ncj_old_j)
4656 cbf = nbl->cj[ncj_old_j].cj >> gridj_flag_shift;
4657 cbl = nbl->cj[nbl->ncj-1].cj >> gridj_flag_shift;
4658 for(cb=cbf; cb<=cbl; cb++)
4660 gridj_flag[cb] = 1U<<th;
4668 /* Set the exclusions for this ci list */
4671 set_ci_top_excls(nbs,
4673 shift == CENTRAL && gridi == gridj,
4676 &(nbl->ci[nbl->nci]),
4681 set_sci_top_excls(nbs,
4683 shift == CENTRAL && gridi == gridj,
4685 &(nbl->sci[nbl->nsci]),
4689 /* Close this ci list */
4692 close_ci_entry_simple(nbl);
4696 close_ci_entry_supersub(nbl,
4698 progBal,min_ci_balanced,
4705 if (bFBufferFlag && nbl->ncj > ncj_old_i)
4707 work->buffer_flags.flag[(gridi->cell0+ci)>>gridi_flag_shift] = 1U<<th;
4711 work->ndistc = ndistc;
4713 nbs_cycle_stop(&work->cc[enbsCCsearch]);
4717 fprintf(debug,"number of distance checks %d\n",ndistc);
4718 fprintf(debug,"ncpcheck %s %d\n",gridi==gridj ? "local" : "non-local",
4723 print_nblist_statistics_simple(debug,nbl,nbs,rlist);
4727 print_nblist_statistics_supersub(debug,nbl,nbs,rlist);
4733 static void reduce_buffer_flags(const nbnxn_search_t nbs,
4735 const nbnxn_buffer_flags_t *dest)
4738 const unsigned *flag;
4740 for(s=0; s<nsrc; s++)
4742 flag = nbs->work[s].buffer_flags.flag;
4744 for(b=0; b<dest->nflag; b++)
4746 dest->flag[b] |= flag[b];
4751 static void print_reduction_cost(const nbnxn_buffer_flags_t *flags,int nout)
4753 int nelem,nkeep,ncopy,nred,b,c,out;
4759 for(b=0; b<flags->nflag; b++)
4761 if (flags->flag[b] == 1)
4763 /* Only flag 0 is set, no copy of reduction required */
4767 else if (flags->flag[b] > 0)
4770 for(out=0; out<nout; out++)
4772 if (flags->flag[b] & (1U<<out))
4789 fprintf(debug,"nbnxn reduction: #flag %d #list %d elem %4.2f, keep %4.2f copy %4.2f red %4.2f\n",
4791 nelem/(double)(flags->nflag),
4792 nkeep/(double)(flags->nflag),
4793 ncopy/(double)(flags->nflag),
4794 nred/(double)(flags->nflag));
4797 /* Make a local or non-local pair-list, depending on iloc */
4798 void nbnxn_make_pairlist(const nbnxn_search_t nbs,
4799 nbnxn_atomdata_t *nbat,
4800 const t_blocka *excl,
4802 int min_ci_balanced,
4803 nbnxn_pairlist_set_t *nbl_list,
4808 nbnxn_grid_t *gridi,*gridj;
4810 int nzi,zi,zj0,zj1,zj;
4814 nbnxn_pairlist_t **nbl;
4816 gmx_bool CombineNBLists;
4817 int np_tot,np_noq,np_hlj,nap;
4819 /* Check if we are running hybrid GPU + CPU nbnxn mode */
4820 bGPUCPU = (!nbs->grid[0].bSimple && nbl_list->bSimple);
4822 nnbl = nbl_list->nnbl;
4823 nbl = nbl_list->nbl;
4824 CombineNBLists = nbl_list->bCombined;
4828 fprintf(debug,"ns making %d nblists\n", nnbl);
4831 nbat->bUseBufferFlags = (nbat->nout > 1);
4832 /* We should re-init the flags before making the first list */
4833 if (nbat->bUseBufferFlags && (LOCAL_I(iloc) || bGPUCPU))
4835 init_buffer_flags(&nbat->buffer_flags,nbat->natoms);
4838 if (nbl_list->bSimple)
4840 switch (nb_kernel_type)
4842 #ifdef GMX_NBNXN_SIMD_4XN
4843 case nbnxnk4xN_SIMD_4xN:
4844 nbs->icell_set_x = icell_set_x_simd_4xn;
4847 #ifdef GMX_NBNXN_SIMD_2XNN
4848 case nbnxnk4xN_SIMD_2xNN:
4849 nbs->icell_set_x = icell_set_x_simd_2xnn;
4853 nbs->icell_set_x = icell_set_x_simple;
4859 #ifdef NBNXN_SEARCH_BB_SSE
4860 nbs->icell_set_x = icell_set_x_supersub_sse8;
4862 nbs->icell_set_x = icell_set_x_supersub;
4868 /* Only zone (grid) 0 vs 0 */
4875 nzi = nbs->zones->nizone;
4878 if (!nbl_list->bSimple && min_ci_balanced > 0)
4880 nsubpair_max = get_nsubpair_max(nbs,iloc,rlist,min_ci_balanced);
4887 /* Clear all pair-lists */
4888 for(th=0; th<nnbl; th++)
4890 clear_pairlist(nbl[th]);
4893 for(zi=0; zi<nzi; zi++)
4895 gridi = &nbs->grid[zi];
4897 if (NONLOCAL_I(iloc))
4899 zj0 = nbs->zones->izone[zi].j0;
4900 zj1 = nbs->zones->izone[zi].j1;
4906 for(zj=zj0; zj<zj1; zj++)
4908 gridj = &nbs->grid[zj];
4912 fprintf(debug,"ns search grid %d vs %d\n",zi,zj);
4915 nbs_cycle_start(&nbs->cc[enbsCCsearch]);
4917 if (nbl[0]->bSimple && !gridi->bSimple)
4919 /* Hybrid list, determine blocking later */
4924 ci_block = get_ci_block_size(gridi,nbs->DomDec,nnbl);
4927 #pragma omp parallel for num_threads(nnbl) schedule(static)
4928 for(th=0; th<nnbl; th++)
4930 /* Re-init the thread-local work flag data before making
4931 * the first list (not an elegant conditional).
4933 if (nbat->bUseBufferFlags && ((zi == 0 && zj == 0) ||
4934 (bGPUCPU && zi == 0 && zj == 1)))
4936 init_buffer_flags(&nbs->work[th].buffer_flags,nbat->natoms);
4939 if (CombineNBLists && th > 0)
4941 clear_pairlist(nbl[th]);
4944 /* Divide the i super cell equally over the nblists */
4945 nbnxn_make_pairlist_part(nbs,gridi,gridj,
4946 &nbs->work[th],nbat,excl,
4950 nbat->bUseBufferFlags,
4952 (LOCAL_I(iloc) || nbs->zones->n <= 2),
4957 nbs_cycle_stop(&nbs->cc[enbsCCsearch]);
4962 for(th=0; th<nnbl; th++)
4964 inc_nrnb(nrnb,eNR_NBNXN_DIST2,nbs->work[th].ndistc);
4966 if (nbl_list->bSimple)
4968 np_tot += nbl[th]->ncj;
4969 np_noq += nbl[th]->work->ncj_noq;
4970 np_hlj += nbl[th]->work->ncj_hlj;
4974 /* This count ignores potential subsequent pair pruning */
4975 np_tot += nbl[th]->nci_tot;
4978 nap = nbl[0]->na_ci*nbl[0]->na_cj;
4979 nbl_list->natpair_ljq = (np_tot - np_noq)*nap - np_hlj*nap/2;
4980 nbl_list->natpair_lj = np_noq*nap;
4981 nbl_list->natpair_q = np_hlj*nap/2;
4983 if (CombineNBLists && nnbl > 1)
4985 nbs_cycle_start(&nbs->cc[enbsCCcombine]);
4987 combine_nblists(nnbl-1,nbl+1,nbl[0]);
4989 nbs_cycle_stop(&nbs->cc[enbsCCcombine]);
4994 if (nbat->bUseBufferFlags)
4996 reduce_buffer_flags(nbs,nnbl,&nbat->buffer_flags);
5000 print_supersub_nsp("nsubpair",nbl[0],iloc);
5003 /* Special performance logging stuff (env.var. GMX_NBNXN_CYCLE) */
5006 nbs->search_count++;
5008 if (nbs->print_cycles &&
5009 (!nbs->DomDec || (nbs->DomDec && !LOCAL_I(iloc))) &&
5010 nbs->search_count % 100 == 0)
5012 nbs_cycle_print(stderr,nbs);
5015 if (debug && (CombineNBLists && nnbl > 1))
5017 if (nbl[0]->bSimple)
5019 print_nblist_statistics_simple(debug,nbl[0],nbs,rlist);
5023 print_nblist_statistics_supersub(debug,nbl[0],nbs,rlist);
5031 if (nbl[0]->bSimple)
5033 print_nblist_ci_cj(debug,nbl[0]);
5037 print_nblist_sci_cj(debug,nbl[0]);
5041 if (nbat->bUseBufferFlags)
5043 print_reduction_cost(&nbat->buffer_flags,nnbl);