2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "nbnxn_search.h"
49 #include "gromacs/domdec/domdec_struct.h"
50 #include "gromacs/gmxlib/nrnb.h"
51 #include "gromacs/math/functions.h"
52 #include "gromacs/math/utilities.h"
53 #include "gromacs/math/vec.h"
54 #include "gromacs/mdlib/gmx_omp_nthreads.h"
55 #include "gromacs/mdlib/nb_verlet.h"
56 #include "gromacs/mdlib/nbnxn_atomdata.h"
57 #include "gromacs/mdlib/nbnxn_consts.h"
58 #include "gromacs/mdlib/nbnxn_grid.h"
59 #include "gromacs/mdlib/nbnxn_internal.h"
60 #include "gromacs/mdlib/nbnxn_simd.h"
61 #include "gromacs/mdlib/nbnxn_util.h"
62 #include "gromacs/mdlib/ns.h"
63 #include "gromacs/mdtypes/group.h"
64 #include "gromacs/mdtypes/md_enums.h"
65 #include "gromacs/pbcutil/ishift.h"
66 #include "gromacs/pbcutil/pbc.h"
67 #include "gromacs/simd/simd.h"
68 #include "gromacs/simd/vector_operations.h"
69 #include "gromacs/utility/exceptions.h"
70 #include "gromacs/utility/fatalerror.h"
71 #include "gromacs/utility/smalloc.h"
73 using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
77 /* The functions below are macros as they are performance sensitive */
79 /* 4x4 list, pack=4: no complex conversion required */
80 /* i-cluster to j-cluster conversion */
81 #define CI_TO_CJ_J4(ci) (ci)
82 /* cluster index to coordinate array index conversion */
83 #define X_IND_CI_J4(ci) ((ci)*STRIDE_P4)
84 #define X_IND_CJ_J4(cj) ((cj)*STRIDE_P4)
86 /* 4x2 list, pack=4: j-cluster size is half the packing width */
87 /* i-cluster to j-cluster conversion */
88 #define CI_TO_CJ_J2(ci) ((ci)<<1)
89 /* cluster index to coordinate array index conversion */
90 #define X_IND_CI_J2(ci) ((ci)*STRIDE_P4)
91 #define X_IND_CJ_J2(cj) (((cj)>>1)*STRIDE_P4 + ((cj) & 1)*(PACK_X4>>1))
93 /* 4x8 list, pack=8: i-cluster size is half the packing width */
94 /* i-cluster to j-cluster conversion */
95 #define CI_TO_CJ_J8(ci) ((ci)>>1)
96 /* cluster index to coordinate array index conversion */
97 #define X_IND_CI_J8(ci) (((ci)>>1)*STRIDE_P8 + ((ci) & 1)*(PACK_X8>>1))
98 #define X_IND_CJ_J8(cj) ((cj)*STRIDE_P8)
100 /* The j-cluster size is matched to the SIMD width */
101 #if GMX_SIMD_REAL_WIDTH == 2
102 #define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J2(ci)
103 #define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J2(ci)
104 #define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J2(cj)
106 #if GMX_SIMD_REAL_WIDTH == 4
107 #define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J4(ci)
108 #define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J4(ci)
109 #define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J4(cj)
111 #if GMX_SIMD_REAL_WIDTH == 8
112 #define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J8(ci)
113 #define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J8(ci)
114 #define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J8(cj)
115 /* Half SIMD with j-cluster size */
116 #define CI_TO_CJ_SIMD_2XNN(ci) CI_TO_CJ_J4(ci)
117 #define X_IND_CI_SIMD_2XNN(ci) X_IND_CI_J4(ci)
118 #define X_IND_CJ_SIMD_2XNN(cj) X_IND_CJ_J4(cj)
120 #if GMX_SIMD_REAL_WIDTH == 16
121 #define CI_TO_CJ_SIMD_2XNN(ci) CI_TO_CJ_J8(ci)
122 #define X_IND_CI_SIMD_2XNN(ci) X_IND_CI_J8(ci)
123 #define X_IND_CJ_SIMD_2XNN(cj) X_IND_CJ_J8(cj)
125 #error "unsupported GMX_SIMD_REAL_WIDTH"
134 /* We shift the i-particles backward for PBC.
135 * This leads to more conditionals than shifting forward.
136 * We do this to get more balanced pair lists.
138 #define NBNXN_SHIFT_BACKWARD
141 static void nbs_cycle_clear(nbnxn_cycle_t *cc)
143 for (int i = 0; i < enbsCCnr; i++)
150 static double Mcyc_av(const nbnxn_cycle_t *cc)
152 return (double)cc->c*1e-6/cc->count;
155 static void nbs_cycle_print(FILE *fp, const nbnxn_search_t nbs)
158 fprintf(fp, "ns %4d grid %4.1f search %4.1f red.f %5.3f",
159 nbs->cc[enbsCCgrid].count,
160 Mcyc_av(&nbs->cc[enbsCCgrid]),
161 Mcyc_av(&nbs->cc[enbsCCsearch]),
162 Mcyc_av(&nbs->cc[enbsCCreducef]));
164 if (nbs->nthread_max > 1)
166 if (nbs->cc[enbsCCcombine].count > 0)
168 fprintf(fp, " comb %5.2f",
169 Mcyc_av(&nbs->cc[enbsCCcombine]));
171 fprintf(fp, " s. th");
172 for (int t = 0; t < nbs->nthread_max; t++)
174 fprintf(fp, " %4.1f",
175 Mcyc_av(&nbs->work[t].cc[enbsCCsearch]));
181 static gmx_inline int ci_to_cj(int na_cj_2log, int ci)
185 case 2: return ci; break;
186 case 1: return (ci<<1); break;
187 case 3: return (ci>>1); break;
193 gmx_bool nbnxn_kernel_pairlist_simple(int nb_kernel_type)
195 if (nb_kernel_type == nbnxnkNotSet)
197 gmx_fatal(FARGS, "Non-bonded kernel type not set for Verlet-style pair-list.");
200 switch (nb_kernel_type)
202 case nbnxnk8x8x8_GPU:
203 case nbnxnk8x8x8_PlainC:
206 case nbnxnk4x4_PlainC:
207 case nbnxnk4xN_SIMD_4xN:
208 case nbnxnk4xN_SIMD_2xNN:
212 gmx_incons("Invalid nonbonded kernel type passed!");
217 /* Initializes a single nbnxn_pairlist_t data structure */
218 static void nbnxn_init_pairlist_fep(t_nblist *nl)
220 nl->type = GMX_NBLIST_INTERACTION_FREE_ENERGY;
221 nl->igeometry = GMX_NBLIST_GEOMETRY_PARTICLE_PARTICLE;
222 /* The interaction functions are set in the free energy kernel fuction */
241 void nbnxn_init_search(nbnxn_search_t * nbs_ptr,
243 struct gmx_domdec_zones_t *zones,
255 nbs->DomDec = (n_dd_cells != NULL);
257 clear_ivec(nbs->dd_dim);
263 for (int d = 0; d < DIM; d++)
265 if ((*n_dd_cells)[d] > 1)
268 /* Each grid matches a DD zone */
274 nbnxn_grids_init(nbs, ngrid);
277 nbs->cell_nalloc = 0;
281 nbs->nthread_max = nthread_max;
283 /* Initialize the work data structures for each thread */
284 snew(nbs->work, nbs->nthread_max);
285 for (int t = 0; t < nbs->nthread_max; t++)
287 nbs->work[t].cxy_na = NULL;
288 nbs->work[t].cxy_na_nalloc = 0;
289 nbs->work[t].sort_work = NULL;
290 nbs->work[t].sort_work_nalloc = 0;
292 snew(nbs->work[t].nbl_fep, 1);
293 nbnxn_init_pairlist_fep(nbs->work[t].nbl_fep);
296 /* Initialize detailed nbsearch cycle counting */
297 nbs->print_cycles = (getenv("GMX_NBNXN_CYCLE") != 0);
298 nbs->search_count = 0;
299 nbs_cycle_clear(nbs->cc);
300 for (int t = 0; t < nbs->nthread_max; t++)
302 nbs_cycle_clear(nbs->work[t].cc);
306 static void init_buffer_flags(nbnxn_buffer_flags_t *flags,
309 flags->nflag = (natoms + NBNXN_BUFFERFLAG_SIZE - 1)/NBNXN_BUFFERFLAG_SIZE;
310 if (flags->nflag > flags->flag_nalloc)
312 flags->flag_nalloc = over_alloc_large(flags->nflag);
313 srenew(flags->flag, flags->flag_nalloc);
315 for (int b = 0; b < flags->nflag; b++)
317 bitmask_clear(&(flags->flag[b]));
321 /* Determines the cell range along one dimension that
322 * the bounding box b0 - b1 sees.
324 static void get_cell_range(real b0, real b1,
325 int nc, real c0, real s, real invs,
326 real d2, real r2, int *cf, int *cl)
328 *cf = std::max(static_cast<int>((b0 - c0)*invs), 0);
330 while (*cf > 0 && d2 + gmx::square((b0 - c0) - (*cf-1+1)*s) < r2)
335 *cl = std::min(static_cast<int>((b1 - c0)*invs), nc-1);
336 while (*cl < nc-1 && d2 + gmx::square((*cl+1)*s - (b1 - c0)) < r2)
342 /* Reference code calculating the distance^2 between two bounding boxes */
343 static float box_dist2(float bx0, float bx1, float by0,
344 float by1, float bz0, float bz1,
345 const nbnxn_bb_t *bb)
348 float dl, dh, dm, dm0;
352 dl = bx0 - bb->upper[BB_X];
353 dh = bb->lower[BB_X] - bx1;
354 dm = std::max(dl, dh);
355 dm0 = std::max(dm, 0.0f);
358 dl = by0 - bb->upper[BB_Y];
359 dh = bb->lower[BB_Y] - by1;
360 dm = std::max(dl, dh);
361 dm0 = std::max(dm, 0.0f);
364 dl = bz0 - bb->upper[BB_Z];
365 dh = bb->lower[BB_Z] - bz1;
366 dm = std::max(dl, dh);
367 dm0 = std::max(dm, 0.0f);
373 /* Plain C code calculating the distance^2 between two bounding boxes */
374 static float subc_bb_dist2(int si, const nbnxn_bb_t *bb_i_ci,
375 int csj, const nbnxn_bb_t *bb_j_all)
377 const nbnxn_bb_t *bb_i, *bb_j;
379 float dl, dh, dm, dm0;
382 bb_j = bb_j_all + csj;
386 dl = bb_i->lower[BB_X] - bb_j->upper[BB_X];
387 dh = bb_j->lower[BB_X] - bb_i->upper[BB_X];
388 dm = std::max(dl, dh);
389 dm0 = std::max(dm, 0.0f);
392 dl = bb_i->lower[BB_Y] - bb_j->upper[BB_Y];
393 dh = bb_j->lower[BB_Y] - bb_i->upper[BB_Y];
394 dm = std::max(dl, dh);
395 dm0 = std::max(dm, 0.0f);
398 dl = bb_i->lower[BB_Z] - bb_j->upper[BB_Z];
399 dh = bb_j->lower[BB_Z] - bb_i->upper[BB_Z];
400 dm = std::max(dl, dh);
401 dm0 = std::max(dm, 0.0f);
407 #ifdef NBNXN_SEARCH_BB_SIMD4
409 /* 4-wide SIMD code for bb distance for bb format xyz0 */
410 static float subc_bb_dist2_simd4(int si, const nbnxn_bb_t *bb_i_ci,
411 int csj, const nbnxn_bb_t *bb_j_all)
413 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
416 Simd4Float bb_i_S0, bb_i_S1;
417 Simd4Float bb_j_S0, bb_j_S1;
423 bb_i_S0 = load4(&bb_i_ci[si].lower[0]);
424 bb_i_S1 = load4(&bb_i_ci[si].upper[0]);
425 bb_j_S0 = load4(&bb_j_all[csj].lower[0]);
426 bb_j_S1 = load4(&bb_j_all[csj].upper[0]);
428 dl_S = bb_i_S0 - bb_j_S1;
429 dh_S = bb_j_S0 - bb_i_S1;
431 dm_S = max(dl_S, dh_S);
432 dm0_S = max(dm_S, simd4SetZeroF());
434 return dotProduct(dm0_S, dm0_S);
437 /* Calculate bb bounding distances of bb_i[si,...,si+3] and store them in d2 */
438 #define SUBC_BB_DIST2_SIMD4_XXXX_INNER(si, bb_i, d2) \
442 Simd4Float dx_0, dy_0, dz_0; \
443 Simd4Float dx_1, dy_1, dz_1; \
445 Simd4Float mx, my, mz; \
446 Simd4Float m0x, m0y, m0z; \
448 Simd4Float d2x, d2y, d2z; \
449 Simd4Float d2s, d2t; \
451 shi = si*NNBSBB_D*DIM; \
453 xi_l = load4(bb_i+shi+0*STRIDE_PBB); \
454 yi_l = load4(bb_i+shi+1*STRIDE_PBB); \
455 zi_l = load4(bb_i+shi+2*STRIDE_PBB); \
456 xi_h = load4(bb_i+shi+3*STRIDE_PBB); \
457 yi_h = load4(bb_i+shi+4*STRIDE_PBB); \
458 zi_h = load4(bb_i+shi+5*STRIDE_PBB); \
460 dx_0 = xi_l - xj_h; \
461 dy_0 = yi_l - yj_h; \
462 dz_0 = zi_l - zj_h; \
464 dx_1 = xj_l - xi_h; \
465 dy_1 = yj_l - yi_h; \
466 dz_1 = zj_l - zi_h; \
468 mx = max(dx_0, dx_1); \
469 my = max(dy_0, dy_1); \
470 mz = max(dz_0, dz_1); \
472 m0x = max(mx, zero); \
473 m0y = max(my, zero); \
474 m0z = max(mz, zero); \
483 store4(d2+si, d2t); \
486 /* 4-wide SIMD code for nsi bb distances for bb format xxxxyyyyzzzz */
487 static void subc_bb_dist2_simd4_xxxx(const float *bb_j,
488 int nsi, const float *bb_i,
491 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
494 Simd4Float xj_l, yj_l, zj_l;
495 Simd4Float xj_h, yj_h, zj_h;
496 Simd4Float xi_l, yi_l, zi_l;
497 Simd4Float xi_h, yi_h, zi_h;
503 xj_l = Simd4Float(bb_j[0*STRIDE_PBB]);
504 yj_l = Simd4Float(bb_j[1*STRIDE_PBB]);
505 zj_l = Simd4Float(bb_j[2*STRIDE_PBB]);
506 xj_h = Simd4Float(bb_j[3*STRIDE_PBB]);
507 yj_h = Simd4Float(bb_j[4*STRIDE_PBB]);
508 zj_h = Simd4Float(bb_j[5*STRIDE_PBB]);
510 /* Here we "loop" over si (0,STRIDE_PBB) from 0 to nsi with step STRIDE_PBB.
511 * But as we know the number of iterations is 1 or 2, we unroll manually.
513 SUBC_BB_DIST2_SIMD4_XXXX_INNER(0, bb_i, d2);
514 if (STRIDE_PBB < nsi)
516 SUBC_BB_DIST2_SIMD4_XXXX_INNER(STRIDE_PBB, bb_i, d2);
520 #endif /* NBNXN_SEARCH_BB_SIMD4 */
523 /* Returns if any atom pair from two clusters is within distance sqrt(rl2) */
524 static gmx_inline gmx_bool
525 clusterpair_in_range(const nbnxn_list_work_t *work,
527 int csj, int stride, const real *x_j,
530 #ifndef NBNXN_SEARCH_BB_SIMD4
533 * All coordinates are stored as xyzxyz...
536 const real *x_i = work->x_ci;
538 for (int i = 0; i < nbnxn_gpu_cluster_size; i++)
540 int i0 = (si*nbnxn_gpu_cluster_size + i)*DIM;
541 for (int j = 0; j < nbnxn_gpu_cluster_size; j++)
543 int j0 = (csj*nbnxn_gpu_cluster_size + j)*stride;
545 real d2 = gmx::square(x_i[i0 ] - x_j[j0 ]) + gmx::square(x_i[i0+1] - x_j[j0+1]) + gmx::square(x_i[i0+2] - x_j[j0+2]);
556 #else /* !NBNXN_SEARCH_BB_SIMD4 */
558 /* 4-wide SIMD version.
559 * A cluster is hard-coded to 8 atoms.
560 * The coordinates x_i are stored as xxxxyyyy..., x_j is stored xyzxyz...
561 * Using 8-wide AVX is not faster on Intel Sandy Bridge.
563 assert(nbnxn_gpu_cluster_size == 8);
565 Simd4Real rc2_S = Simd4Real(rl2);
567 const real *x_i = work->x_ci_simd;
569 int dim_stride = nbnxn_gpu_cluster_size*DIM;
570 Simd4Real ix_S0 = load4(x_i + si*dim_stride + 0*GMX_SIMD4_WIDTH);
571 Simd4Real iy_S0 = load4(x_i + si*dim_stride + 1*GMX_SIMD4_WIDTH);
572 Simd4Real iz_S0 = load4(x_i + si*dim_stride + 2*GMX_SIMD4_WIDTH);
573 Simd4Real ix_S1 = load4(x_i + si*dim_stride + 3*GMX_SIMD4_WIDTH);
574 Simd4Real iy_S1 = load4(x_i + si*dim_stride + 4*GMX_SIMD4_WIDTH);
575 Simd4Real iz_S1 = load4(x_i + si*dim_stride + 5*GMX_SIMD4_WIDTH);
577 /* We loop from the outer to the inner particles to maximize
578 * the chance that we find a pair in range quickly and return.
580 int j0 = csj*nbnxn_gpu_cluster_size;
581 int j1 = j0 + nbnxn_gpu_cluster_size - 1;
584 Simd4Real jx0_S, jy0_S, jz0_S;
585 Simd4Real jx1_S, jy1_S, jz1_S;
587 Simd4Real dx_S0, dy_S0, dz_S0;
588 Simd4Real dx_S1, dy_S1, dz_S1;
589 Simd4Real dx_S2, dy_S2, dz_S2;
590 Simd4Real dx_S3, dy_S3, dz_S3;
601 Simd4Bool wco_any_S01, wco_any_S23, wco_any_S;
603 jx0_S = Simd4Real(x_j[j0*stride+0]);
604 jy0_S = Simd4Real(x_j[j0*stride+1]);
605 jz0_S = Simd4Real(x_j[j0*stride+2]);
607 jx1_S = Simd4Real(x_j[j1*stride+0]);
608 jy1_S = Simd4Real(x_j[j1*stride+1]);
609 jz1_S = Simd4Real(x_j[j1*stride+2]);
611 /* Calculate distance */
612 dx_S0 = ix_S0 - jx0_S;
613 dy_S0 = iy_S0 - jy0_S;
614 dz_S0 = iz_S0 - jz0_S;
615 dx_S1 = ix_S1 - jx0_S;
616 dy_S1 = iy_S1 - jy0_S;
617 dz_S1 = iz_S1 - jz0_S;
618 dx_S2 = ix_S0 - jx1_S;
619 dy_S2 = iy_S0 - jy1_S;
620 dz_S2 = iz_S0 - jz1_S;
621 dx_S3 = ix_S1 - jx1_S;
622 dy_S3 = iy_S1 - jy1_S;
623 dz_S3 = iz_S1 - jz1_S;
625 /* rsq = dx*dx+dy*dy+dz*dz */
626 rsq_S0 = norm2(dx_S0, dy_S0, dz_S0);
627 rsq_S1 = norm2(dx_S1, dy_S1, dz_S1);
628 rsq_S2 = norm2(dx_S2, dy_S2, dz_S2);
629 rsq_S3 = norm2(dx_S3, dy_S3, dz_S3);
631 wco_S0 = (rsq_S0 < rc2_S);
632 wco_S1 = (rsq_S1 < rc2_S);
633 wco_S2 = (rsq_S2 < rc2_S);
634 wco_S3 = (rsq_S3 < rc2_S);
636 wco_any_S01 = wco_S0 || wco_S1;
637 wco_any_S23 = wco_S2 || wco_S3;
638 wco_any_S = wco_any_S01 || wco_any_S23;
640 if (anyTrue(wco_any_S))
651 #endif /* !NBNXN_SEARCH_BB_SIMD4 */
654 /* Returns the j sub-cell for index cj_ind */
655 static int nbl_cj(const nbnxn_pairlist_t *nbl, int cj_ind)
657 return nbl->cj4[cj_ind/nbnxn_gpu_jgroup_size].cj[cj_ind & (nbnxn_gpu_jgroup_size - 1)];
660 /* Returns the i-interaction mask of the j sub-cell for index cj_ind */
661 static unsigned int nbl_imask0(const nbnxn_pairlist_t *nbl, int cj_ind)
663 return nbl->cj4[cj_ind/nbnxn_gpu_jgroup_size].imei[0].imask;
666 /* Ensures there is enough space for extra extra exclusion masks */
667 static void check_excl_space(nbnxn_pairlist_t *nbl, int extra)
669 if (nbl->nexcl+extra > nbl->excl_nalloc)
671 nbl->excl_nalloc = over_alloc_small(nbl->nexcl+extra);
672 nbnxn_realloc_void((void **)&nbl->excl,
673 nbl->nexcl*sizeof(*nbl->excl),
674 nbl->excl_nalloc*sizeof(*nbl->excl),
675 nbl->alloc, nbl->free);
679 /* Ensures there is enough space for ncell extra j-cells in the list */
680 static void check_cell_list_space_simple(nbnxn_pairlist_t *nbl,
685 cj_max = nbl->ncj + ncell;
687 if (cj_max > nbl->cj_nalloc)
689 nbl->cj_nalloc = over_alloc_small(cj_max);
690 nbnxn_realloc_void((void **)&nbl->cj,
691 nbl->ncj*sizeof(*nbl->cj),
692 nbl->cj_nalloc*sizeof(*nbl->cj),
693 nbl->alloc, nbl->free);
697 /* Ensures there is enough space for ncell extra j-clusters in the list */
698 static void check_cell_list_space_supersub(nbnxn_pairlist_t *nbl,
703 /* We can have maximally nsupercell*gpu_ncluster_per_cell sj lists */
704 /* We can store 4 j-subcell - i-supercell pairs in one struct.
705 * since we round down, we need one extra entry.
707 ncj4_max = ((nbl->work->cj_ind + ncell*gpu_ncluster_per_cell + nbnxn_gpu_jgroup_size - 1)/nbnxn_gpu_jgroup_size);
709 if (ncj4_max > nbl->cj4_nalloc)
711 nbl->cj4_nalloc = over_alloc_small(ncj4_max);
712 nbnxn_realloc_void((void **)&nbl->cj4,
713 nbl->work->cj4_init*sizeof(*nbl->cj4),
714 nbl->cj4_nalloc*sizeof(*nbl->cj4),
715 nbl->alloc, nbl->free);
718 if (ncj4_max > nbl->work->cj4_init)
720 for (int j4 = nbl->work->cj4_init; j4 < ncj4_max; j4++)
722 /* No i-subcells and no excl's in the list initially */
723 for (w = 0; w < nbnxn_gpu_clusterpair_split; w++)
725 nbl->cj4[j4].imei[w].imask = 0U;
726 nbl->cj4[j4].imei[w].excl_ind = 0;
730 nbl->work->cj4_init = ncj4_max;
734 /* Set all excl masks for one GPU warp no exclusions */
735 static void set_no_excls(nbnxn_excl_t *excl)
737 for (int t = 0; t < nbnxn_gpu_excl_size; t++)
739 /* Turn all interaction bits on */
740 excl->pair[t] = NBNXN_INTERACTION_MASK_ALL;
744 /* Initializes a single nbnxn_pairlist_t data structure */
745 static void nbnxn_init_pairlist(nbnxn_pairlist_t *nbl,
747 nbnxn_alloc_t *alloc,
752 nbl->alloc = nbnxn_alloc_aligned;
760 nbl->free = nbnxn_free_aligned;
767 nbl->bSimple = bSimple;
781 /* We need one element extra in sj, so alloc initially with 1 */
788 GMX_ASSERT(nbnxn_gpu_ncluster_per_supercluster == gpu_ncluster_per_cell, "The search code assumes that the a super-cluster matches a search grid cell");
790 GMX_ASSERT(sizeof(nbl->cj4[0].imei[0].imask)*8 >= nbnxn_gpu_jgroup_size*gpu_ncluster_per_cell, "The i super-cluster cluster interaction mask does not contain a sufficient number of bits");
791 GMX_ASSERT(sizeof(nbl->excl[0])*8 >= nbnxn_gpu_jgroup_size*gpu_ncluster_per_cell, "The GPU exclusion mask does not contain a sufficient number of bits");
794 nbl->excl_nalloc = 0;
796 check_excl_space(nbl, 1);
798 set_no_excls(&nbl->excl[0]);
804 snew_aligned(nbl->work->bb_ci, 1, NBNXN_SEARCH_BB_MEM_ALIGN);
809 snew_aligned(nbl->work->pbb_ci, gpu_ncluster_per_cell/STRIDE_PBB*NNBSBB_XXXX, NBNXN_SEARCH_BB_MEM_ALIGN);
811 snew_aligned(nbl->work->bb_ci, gpu_ncluster_per_cell, NBNXN_SEARCH_BB_MEM_ALIGN);
814 int gpu_clusterpair_nc = gpu_ncluster_per_cell*nbnxn_gpu_cluster_size*DIM;
815 snew(nbl->work->x_ci, gpu_clusterpair_nc);
817 snew_aligned(nbl->work->x_ci_simd,
818 std::max(NBNXN_CPU_CLUSTER_I_SIZE*DIM*GMX_SIMD_REAL_WIDTH,
820 GMX_SIMD_REAL_WIDTH);
822 snew_aligned(nbl->work->d2, gpu_ncluster_per_cell, NBNXN_SEARCH_BB_MEM_ALIGN);
824 nbl->work->sort = NULL;
825 nbl->work->sort_nalloc = 0;
826 nbl->work->sci_sort = NULL;
827 nbl->work->sci_sort_nalloc = 0;
830 void nbnxn_init_pairlist_set(nbnxn_pairlist_set_t *nbl_list,
831 gmx_bool bSimple, gmx_bool bCombined,
832 nbnxn_alloc_t *alloc,
835 nbl_list->bSimple = bSimple;
836 nbl_list->bCombined = bCombined;
838 nbl_list->nnbl = gmx_omp_nthreads_get(emntNonbonded);
840 if (!nbl_list->bCombined &&
841 nbl_list->nnbl > NBNXN_BUFFERFLAG_MAX_THREADS)
843 gmx_fatal(FARGS, "%d OpenMP threads were requested. Since the non-bonded force buffer reduction is prohibitively slow with more than %d threads, we do not allow this. Use %d or less OpenMP threads.",
844 nbl_list->nnbl, NBNXN_BUFFERFLAG_MAX_THREADS, NBNXN_BUFFERFLAG_MAX_THREADS);
847 snew(nbl_list->nbl, nbl_list->nnbl);
848 snew(nbl_list->nbl_fep, nbl_list->nnbl);
849 /* Execute in order to avoid memory interleaving between threads */
850 #pragma omp parallel for num_threads(nbl_list->nnbl) schedule(static)
851 for (int i = 0; i < nbl_list->nnbl; i++)
855 /* Allocate the nblist data structure locally on each thread
856 * to optimize memory access for NUMA architectures.
858 snew(nbl_list->nbl[i], 1);
860 /* Only list 0 is used on the GPU, use normal allocation for i>0 */
863 nbnxn_init_pairlist(nbl_list->nbl[i], nbl_list->bSimple, alloc, free);
867 nbnxn_init_pairlist(nbl_list->nbl[i], nbl_list->bSimple, NULL, NULL);
870 snew(nbl_list->nbl_fep[i], 1);
871 nbnxn_init_pairlist_fep(nbl_list->nbl_fep[i]);
873 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
877 /* Print statistics of a pair list, used for debug output */
878 static void print_nblist_statistics_simple(FILE *fp, const nbnxn_pairlist_t *nbl,
879 const nbnxn_search_t nbs, real rl)
881 const nbnxn_grid_t *grid;
885 /* This code only produces correct statistics with domain decomposition */
886 grid = &nbs->grid[0];
888 fprintf(fp, "nbl nci %d ncj %d\n",
890 fprintf(fp, "nbl na_sc %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
891 nbl->na_sc, rl, nbl->ncj, nbl->ncj/(double)grid->nc,
892 nbl->ncj/(double)grid->nc*grid->na_sc,
893 nbl->ncj/(double)grid->nc*grid->na_sc/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid->nc*grid->na_sc/(grid->size[XX]*grid->size[YY]*grid->size[ZZ])));
895 fprintf(fp, "nbl average j cell list length %.1f\n",
896 0.25*nbl->ncj/(double)std::max(nbl->nci, 1));
898 for (int s = 0; s < SHIFTS; s++)
903 for (int i = 0; i < nbl->nci; i++)
905 cs[nbl->ci[i].shift & NBNXN_CI_SHIFT] +=
906 nbl->ci[i].cj_ind_end - nbl->ci[i].cj_ind_start;
908 int j = nbl->ci[i].cj_ind_start;
909 while (j < nbl->ci[i].cj_ind_end &&
910 nbl->cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
916 fprintf(fp, "nbl cell pairs, total: %d excl: %d %.1f%%\n",
917 nbl->ncj, npexcl, 100*npexcl/(double)std::max(nbl->ncj, 1));
918 for (int s = 0; s < SHIFTS; s++)
922 fprintf(fp, "nbl shift %2d ncj %3d\n", s, cs[s]);
927 /* Print statistics of a pair lists, used for debug output */
928 static void print_nblist_statistics_supersub(FILE *fp, const nbnxn_pairlist_t *nbl,
929 const nbnxn_search_t nbs, real rl)
931 const nbnxn_grid_t *grid;
933 int c[gpu_ncluster_per_cell + 1];
934 double sum_nsp, sum_nsp2;
937 /* This code only produces correct statistics with domain decomposition */
938 grid = &nbs->grid[0];
940 fprintf(fp, "nbl nsci %d ncj4 %d nsi %d excl4 %d\n",
941 nbl->nsci, nbl->ncj4, nbl->nci_tot, nbl->nexcl);
942 fprintf(fp, "nbl na_c %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
943 nbl->na_ci, rl, nbl->nci_tot, nbl->nci_tot/(double)grid->nsubc_tot,
944 nbl->nci_tot/(double)grid->nsubc_tot*grid->na_c,
945 nbl->nci_tot/(double)grid->nsubc_tot*grid->na_c/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid->nsubc_tot*grid->na_c/(grid->size[XX]*grid->size[YY]*grid->size[ZZ])));
950 for (int si = 0; si <= gpu_ncluster_per_cell; si++)
954 for (int i = 0; i < nbl->nsci; i++)
959 for (int j4 = nbl->sci[i].cj4_ind_start; j4 < nbl->sci[i].cj4_ind_end; j4++)
961 for (int j = 0; j < nbnxn_gpu_jgroup_size; j++)
964 for (int si = 0; si < gpu_ncluster_per_cell; si++)
966 if (nbl->cj4[j4].imei[0].imask & (1U << (j*gpu_ncluster_per_cell + si)))
977 nsp_max = std::max(nsp_max, nsp);
981 sum_nsp /= nbl->nsci;
982 sum_nsp2 /= nbl->nsci;
984 fprintf(fp, "nbl #cluster-pairs: av %.1f stddev %.1f max %d\n",
985 sum_nsp, std::sqrt(sum_nsp2 - sum_nsp*sum_nsp), nsp_max);
989 for (b = 0; b <= gpu_ncluster_per_cell; b++)
991 fprintf(fp, "nbl j-list #i-subcell %d %7d %4.1f\n",
993 100.0*c[b]/(double)(nbl->ncj4*nbnxn_gpu_jgroup_size));
998 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp */
999 static void low_get_nbl_exclusions(nbnxn_pairlist_t *nbl, int cj4,
1000 int warp, nbnxn_excl_t **excl)
1002 if (nbl->cj4[cj4].imei[warp].excl_ind == 0)
1004 /* No exclusions set, make a new list entry */
1005 nbl->cj4[cj4].imei[warp].excl_ind = nbl->nexcl;
1007 *excl = &nbl->excl[nbl->cj4[cj4].imei[warp].excl_ind];
1008 set_no_excls(*excl);
1012 /* We already have some exclusions, new ones can be added to the list */
1013 *excl = &nbl->excl[nbl->cj4[cj4].imei[warp].excl_ind];
1017 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp,
1018 * generates a new element and allocates extra memory, if necessary.
1020 static void get_nbl_exclusions_1(nbnxn_pairlist_t *nbl, int cj4,
1021 int warp, nbnxn_excl_t **excl)
1023 if (nbl->cj4[cj4].imei[warp].excl_ind == 0)
1025 /* We need to make a new list entry, check if we have space */
1026 check_excl_space(nbl, 1);
1028 low_get_nbl_exclusions(nbl, cj4, warp, excl);
1031 /* Returns pointers to the exclusion masks for cj4-unit cj4 for both warps,
1032 * generates a new element and allocates extra memory, if necessary.
1034 static void get_nbl_exclusions_2(nbnxn_pairlist_t *nbl, int cj4,
1035 nbnxn_excl_t **excl_w0,
1036 nbnxn_excl_t **excl_w1)
1038 /* Check for space we might need */
1039 check_excl_space(nbl, 2);
1041 low_get_nbl_exclusions(nbl, cj4, 0, excl_w0);
1042 low_get_nbl_exclusions(nbl, cj4, 1, excl_w1);
1045 /* Sets the self exclusions i=j and pair exclusions i>j */
1046 static void set_self_and_newton_excls_supersub(nbnxn_pairlist_t *nbl,
1047 int cj4_ind, int sj_offset,
1048 int i_cluster_in_cell)
1050 nbnxn_excl_t *excl[nbnxn_gpu_clusterpair_split];
1052 /* Here we only set the set self and double pair exclusions */
1054 assert(nbnxn_gpu_clusterpair_split == 2);
1056 get_nbl_exclusions_2(nbl, cj4_ind, &excl[0], &excl[1]);
1058 /* Only minor < major bits set */
1059 for (int ej = 0; ej < nbl->na_ci; ej++)
1062 for (int ei = ej; ei < nbl->na_ci; ei++)
1064 excl[w]->pair[(ej & (nbnxn_gpu_jgroup_size-1))*nbl->na_ci + ei] &=
1065 ~(1U << (sj_offset*gpu_ncluster_per_cell + i_cluster_in_cell));
1070 /* Returns a diagonal or off-diagonal interaction mask for plain C lists */
1071 static unsigned int get_imask(gmx_bool rdiag, int ci, int cj)
1073 return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
1076 /* Returns a diagonal or off-diagonal interaction mask for cj-size=2 */
1077 static unsigned int get_imask_simd_j2(gmx_bool rdiag, int ci, int cj)
1079 return (rdiag && ci*2 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_0 :
1080 (rdiag && ci*2+1 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_1 :
1081 NBNXN_INTERACTION_MASK_ALL));
1084 /* Returns a diagonal or off-diagonal interaction mask for cj-size=4 */
1085 static unsigned int get_imask_simd_j4(gmx_bool rdiag, int ci, int cj)
1087 return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
1090 /* Returns a diagonal or off-diagonal interaction mask for cj-size=8 */
1091 static unsigned int get_imask_simd_j8(gmx_bool rdiag, int ci, int cj)
1093 return (rdiag && ci == cj*2 ? NBNXN_INTERACTION_MASK_DIAG_J8_0 :
1094 (rdiag && ci == cj*2+1 ? NBNXN_INTERACTION_MASK_DIAG_J8_1 :
1095 NBNXN_INTERACTION_MASK_ALL));
1099 #if GMX_SIMD_REAL_WIDTH == 2
1100 #define get_imask_simd_4xn get_imask_simd_j2
1102 #if GMX_SIMD_REAL_WIDTH == 4
1103 #define get_imask_simd_4xn get_imask_simd_j4
1105 #if GMX_SIMD_REAL_WIDTH == 8
1106 #define get_imask_simd_4xn get_imask_simd_j8
1107 #define get_imask_simd_2xnn get_imask_simd_j4
1109 #if GMX_SIMD_REAL_WIDTH == 16
1110 #define get_imask_simd_2xnn get_imask_simd_j8
1114 /* Plain C code for making a pair list of cell ci vs cell cjf-cjl.
1115 * Checks bounding box distances and possibly atom pair distances.
1117 static void make_cluster_list_simple(const nbnxn_grid_t *gridj,
1118 nbnxn_pairlist_t *nbl,
1119 int ci, int cjf, int cjl,
1120 gmx_bool remove_sub_diag,
1122 real rl2, float rbb2,
1125 const nbnxn_bb_t *bb_ci;
1132 bb_ci = nbl->work->bb_ci;
1133 x_ci = nbl->work->x_ci;
1136 while (!InRange && cjf <= cjl)
1138 d2 = subc_bb_dist2(0, bb_ci, cjf, gridj->bb);
1141 /* Check if the distance is within the distance where
1142 * we use only the bounding box distance rbb,
1143 * or within the cut-off and there is at least one atom pair
1144 * within the cut-off.
1152 cjf_gl = gridj->cell0 + cjf;
1153 for (int i = 0; i < NBNXN_CPU_CLUSTER_I_SIZE && !InRange; i++)
1155 for (int j = 0; j < NBNXN_CPU_CLUSTER_I_SIZE; j++)
1157 InRange = InRange ||
1158 (gmx::square(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+XX]) +
1159 gmx::square(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+YY]) +
1160 gmx::square(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+ZZ]) < rl2);
1163 *ndistc += NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
1176 while (!InRange && cjl > cjf)
1178 d2 = subc_bb_dist2(0, bb_ci, cjl, gridj->bb);
1181 /* Check if the distance is within the distance where
1182 * we use only the bounding box distance rbb,
1183 * or within the cut-off and there is at least one atom pair
1184 * within the cut-off.
1192 cjl_gl = gridj->cell0 + cjl;
1193 for (int i = 0; i < NBNXN_CPU_CLUSTER_I_SIZE && !InRange; i++)
1195 for (int j = 0; j < NBNXN_CPU_CLUSTER_I_SIZE; j++)
1197 InRange = InRange ||
1198 (gmx::square(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+XX]) +
1199 gmx::square(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+YY]) +
1200 gmx::square(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+ZZ]) < rl2);
1203 *ndistc += NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
1213 for (int cj = cjf; cj <= cjl; cj++)
1215 /* Store cj and the interaction mask */
1216 nbl->cj[nbl->ncj].cj = gridj->cell0 + cj;
1217 nbl->cj[nbl->ncj].excl = get_imask(remove_sub_diag, ci, cj);
1220 /* Increase the closing index in i super-cell list */
1221 nbl->ci[nbl->nci].cj_ind_end = nbl->ncj;
1225 #ifdef GMX_NBNXN_SIMD_4XN
1226 #include "gromacs/mdlib/nbnxn_search_simd_4xn.h"
1228 #ifdef GMX_NBNXN_SIMD_2XNN
1229 #include "gromacs/mdlib/nbnxn_search_simd_2xnn.h"
1232 /* Plain C or SIMD4 code for making a pair list of super-cell sci vs scj.
1233 * Checks bounding box distances and possibly atom pair distances.
1235 static void make_cluster_list_supersub(const nbnxn_grid_t *gridi,
1236 const nbnxn_grid_t *gridj,
1237 nbnxn_pairlist_t *nbl,
1239 gmx_bool sci_equals_scj,
1240 int stride, const real *x,
1241 real rl2, float rbb2,
1244 nbnxn_list_work_t *work = nbl->work;
1247 const float *pbb_ci = work->pbb_ci;
1249 const nbnxn_bb_t *bb_ci = work->bb_ci;
1252 const int na_c = nbnxn_gpu_cluster_size;
1253 assert(na_c == gridi->na_c);
1254 assert(na_c == gridj->na_c);
1256 /* We generate the pairlist mainly based on bounding-box distances
1257 * and do atom pair distance based pruning on the GPU.
1258 * Only if a j-group contains a single cluster-pair, we try to prune
1259 * that pair based on atom distances on the CPU to avoid empty j-groups.
1261 #define PRUNE_LIST_CPU_ONE
1262 #ifdef PRUNE_LIST_CPU_ONE
1266 float *d2l = work->d2;
1268 for (int subc = 0; subc < gridj->nsubc[scj]; subc++)
1270 int cj4_ind = nbl->work->cj_ind/nbnxn_gpu_jgroup_size;
1271 int cj_offset = nbl->work->cj_ind - cj4_ind*nbnxn_gpu_jgroup_size;
1272 nbnxn_cj4_t *cj4 = &nbl->cj4[cj4_ind];
1274 int cj = scj*gpu_ncluster_per_cell + subc;
1276 int cj_gl = gridj->cell0*gpu_ncluster_per_cell + cj;
1278 /* Initialize this j-subcell i-subcell list */
1279 cj4->cj[cj_offset] = cj_gl;
1288 ci1 = gridi->nsubc[sci];
1292 /* Determine all ci1 bb distances in one call with SIMD4 */
1293 subc_bb_dist2_simd4_xxxx(gridj->pbb+(cj>>STRIDE_PBB_2LOG)*NNBSBB_XXXX+(cj & (STRIDE_PBB-1)),
1299 unsigned int imask = 0;
1300 /* We use a fixed upper-bound instead of ci1 to help optimization */
1301 for (int ci = 0; ci < gpu_ncluster_per_cell; ci++)
1308 #ifndef NBNXN_BBXXXX
1309 /* Determine the bb distance between ci and cj */
1310 d2l[ci] = subc_bb_dist2(ci, bb_ci, cj, gridj->bb);
1315 #ifdef PRUNE_LIST_CPU_ALL
1316 /* Check if the distance is within the distance where
1317 * we use only the bounding box distance rbb,
1318 * or within the cut-off and there is at least one atom pair
1319 * within the cut-off. This check is very costly.
1321 *ndistc += na_c*na_c;
1324 clusterpair_in_range(work, ci, cj_gl, stride, x, rl2)))
1326 /* Check if the distance between the two bounding boxes
1327 * in within the pair-list cut-off.
1332 /* Flag this i-subcell to be taken into account */
1333 imask |= (1U << (cj_offset*gpu_ncluster_per_cell + ci));
1335 #ifdef PRUNE_LIST_CPU_ONE
1343 #ifdef PRUNE_LIST_CPU_ONE
1344 /* If we only found 1 pair, check if any atoms are actually
1345 * within the cut-off, so we could get rid of it.
1347 if (npair == 1 && d2l[ci_last] >= rbb2 &&
1348 !clusterpair_in_range(work, ci_last, cj_gl, stride, x, rl2))
1350 imask &= ~(1U << (cj_offset*gpu_ncluster_per_cell + ci_last));
1357 /* We have a useful sj entry, close it now */
1359 /* Set the exclucions for the ci== sj entry.
1360 * Here we don't bother to check if this entry is actually flagged,
1361 * as it will nearly always be in the list.
1365 set_self_and_newton_excls_supersub(nbl, cj4_ind, cj_offset, subc);
1368 /* Copy the cluster interaction mask to the list */
1369 for (int w = 0; w < nbnxn_gpu_clusterpair_split; w++)
1371 cj4->imei[w].imask |= imask;
1374 nbl->work->cj_ind++;
1376 /* Keep the count */
1377 nbl->nci_tot += npair;
1379 /* Increase the closing index in i super-cell list */
1380 nbl->sci[nbl->nsci].cj4_ind_end =
1381 (nbl->work->cj_ind + nbnxn_gpu_jgroup_size - 1)/nbnxn_gpu_jgroup_size;
1386 /* Set all atom-pair exclusions from the topology stored in excl
1387 * as masks in the pair-list for simple list i-entry nbl_ci
1389 static void set_ci_top_excls(const nbnxn_search_t nbs,
1390 nbnxn_pairlist_t *nbl,
1391 gmx_bool diagRemoved,
1394 const nbnxn_ci_t *nbl_ci,
1395 const t_blocka *excl)
1399 int cj_ind_first, cj_ind_last;
1400 int cj_first, cj_last;
1402 int ai, aj, si, ge, se;
1403 int found, cj_ind_0, cj_ind_1, cj_ind_m;
1405 int inner_i, inner_e;
1409 if (nbl_ci->cj_ind_end == nbl_ci->cj_ind_start)
1417 cj_ind_first = nbl_ci->cj_ind_start;
1418 cj_ind_last = nbl->ncj - 1;
1420 cj_first = nbl->cj[cj_ind_first].cj;
1421 cj_last = nbl->cj[cj_ind_last].cj;
1423 /* Determine how many contiguous j-cells we have starting
1424 * from the first i-cell. This number can be used to directly
1425 * calculate j-cell indices for excluded atoms.
1428 if (na_ci_2log == na_cj_2log)
1430 while (cj_ind_first + ndirect <= cj_ind_last &&
1431 nbl->cj[cj_ind_first+ndirect].cj == ci + ndirect)
1436 #ifdef NBNXN_SEARCH_BB_SIMD4
1439 while (cj_ind_first + ndirect <= cj_ind_last &&
1440 nbl->cj[cj_ind_first+ndirect].cj == ci_to_cj(na_cj_2log, ci) + ndirect)
1447 /* Loop over the atoms in the i super-cell */
1448 for (int i = 0; i < nbl->na_sc; i++)
1450 ai = nbs->a[ci*nbl->na_sc+i];
1453 si = (i>>na_ci_2log);
1455 /* Loop over the topology-based exclusions for this i-atom */
1456 for (int eind = excl->index[ai]; eind < excl->index[ai+1]; eind++)
1462 /* The self exclusion are already set, save some time */
1468 /* Without shifts we only calculate interactions j>i
1469 * for one-way pair-lists.
1471 if (diagRemoved && ge <= ci*nbl->na_sc + i)
1476 se = (ge >> na_cj_2log);
1478 /* Could the cluster se be in our list? */
1479 if (se >= cj_first && se <= cj_last)
1481 if (se < cj_first + ndirect)
1483 /* We can calculate cj_ind directly from se */
1484 found = cj_ind_first + se - cj_first;
1488 /* Search for se using bisection */
1490 cj_ind_0 = cj_ind_first + ndirect;
1491 cj_ind_1 = cj_ind_last + 1;
1492 while (found == -1 && cj_ind_0 < cj_ind_1)
1494 cj_ind_m = (cj_ind_0 + cj_ind_1)>>1;
1496 cj_m = nbl->cj[cj_ind_m].cj;
1504 cj_ind_1 = cj_ind_m;
1508 cj_ind_0 = cj_ind_m + 1;
1515 inner_i = i - (si << na_ci_2log);
1516 inner_e = ge - (se << na_cj_2log);
1518 nbl->cj[found].excl &= ~(1U<<((inner_i<<na_cj_2log) + inner_e));
1526 /* Add a new i-entry to the FEP list and copy the i-properties */
1527 static gmx_inline void fep_list_new_nri_copy(t_nblist *nlist)
1529 /* Add a new i-entry */
1532 assert(nlist->nri < nlist->maxnri);
1534 /* Duplicate the last i-entry, except for jindex, which continues */
1535 nlist->iinr[nlist->nri] = nlist->iinr[nlist->nri-1];
1536 nlist->shift[nlist->nri] = nlist->shift[nlist->nri-1];
1537 nlist->gid[nlist->nri] = nlist->gid[nlist->nri-1];
1538 nlist->jindex[nlist->nri] = nlist->nrj;
1541 /* For load balancing of the free-energy lists over threads, we set
1542 * the maximum nrj size of an i-entry to 40. This leads to good
1543 * load balancing in the worst case scenario of a single perturbed
1544 * particle on 16 threads, while not introducing significant overhead.
1545 * Note that half of the perturbed pairs will anyhow end up in very small lists,
1546 * since non perturbed i-particles will see few perturbed j-particles).
1548 const int max_nrj_fep = 40;
1550 /* Exclude the perturbed pairs from the Verlet list. This is only done to avoid
1551 * singularities for overlapping particles (0/0), since the charges and
1552 * LJ parameters have been zeroed in the nbnxn data structure.
1553 * Simultaneously make a group pair list for the perturbed pairs.
1555 static void make_fep_list(const nbnxn_search_t nbs,
1556 const nbnxn_atomdata_t *nbat,
1557 nbnxn_pairlist_t *nbl,
1558 gmx_bool bDiagRemoved,
1560 const nbnxn_grid_t *gridi,
1561 const nbnxn_grid_t *gridj,
1564 int ci, cj_ind_start, cj_ind_end, cja, cjr;
1566 int ngid, gid_i = 0, gid_j, gid;
1567 int egp_shift, egp_mask;
1569 int ind_i, ind_j, ai, aj;
1571 gmx_bool bFEP_i, bFEP_i_all;
1573 if (nbl_ci->cj_ind_end == nbl_ci->cj_ind_start)
1581 cj_ind_start = nbl_ci->cj_ind_start;
1582 cj_ind_end = nbl_ci->cj_ind_end;
1584 /* In worst case we have alternating energy groups
1585 * and create #atom-pair lists, which means we need the size
1586 * of a cluster pair (na_ci*na_cj) times the number of cj's.
1588 nri_max = nbl->na_ci*nbl->na_cj*(cj_ind_end - cj_ind_start);
1589 if (nlist->nri + nri_max > nlist->maxnri)
1591 nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
1592 reallocate_nblist(nlist);
1595 ngid = nbat->nenergrp;
1597 if (static_cast<std::size_t>(ngid*gridj->na_cj) > sizeof(gid_cj)*8)
1599 gmx_fatal(FARGS, "The Verlet scheme with %dx%d kernels and free-energy only supports up to %d energy groups",
1600 gridi->na_c, gridj->na_cj, (sizeof(gid_cj)*8)/gridj->na_cj);
1603 egp_shift = nbat->neg_2log;
1604 egp_mask = (1<<nbat->neg_2log) - 1;
1606 /* Loop over the atoms in the i sub-cell */
1608 for (int i = 0; i < nbl->na_ci; i++)
1610 ind_i = ci*nbl->na_ci + i;
1615 nlist->jindex[nri+1] = nlist->jindex[nri];
1616 nlist->iinr[nri] = ai;
1617 /* The actual energy group pair index is set later */
1618 nlist->gid[nri] = 0;
1619 nlist->shift[nri] = nbl_ci->shift & NBNXN_CI_SHIFT;
1621 bFEP_i = gridi->fep[ci - gridi->cell0] & (1 << i);
1623 bFEP_i_all = bFEP_i_all && bFEP_i;
1625 if ((nlist->nrj + cj_ind_end - cj_ind_start)*nbl->na_cj > nlist->maxnrj)
1627 nlist->maxnrj = over_alloc_small((nlist->nrj + cj_ind_end - cj_ind_start)*nbl->na_cj);
1628 srenew(nlist->jjnr, nlist->maxnrj);
1629 srenew(nlist->excl_fep, nlist->maxnrj);
1634 gid_i = (nbat->energrp[ci] >> (egp_shift*i)) & egp_mask;
1637 for (int cj_ind = cj_ind_start; cj_ind < cj_ind_end; cj_ind++)
1639 unsigned int fep_cj;
1641 cja = nbl->cj[cj_ind].cj;
1643 if (gridj->na_cj == gridj->na_c)
1645 cjr = cja - gridj->cell0;
1646 fep_cj = gridj->fep[cjr];
1649 gid_cj = nbat->energrp[cja];
1652 else if (2*gridj->na_cj == gridj->na_c)
1654 cjr = cja - gridj->cell0*2;
1655 /* Extract half of the ci fep/energrp mask */
1656 fep_cj = (gridj->fep[cjr>>1] >> ((cjr&1)*gridj->na_cj)) & ((1<<gridj->na_cj) - 1);
1659 gid_cj = nbat->energrp[cja>>1] >> ((cja&1)*gridj->na_cj*egp_shift) & ((1<<(gridj->na_cj*egp_shift)) - 1);
1664 cjr = cja - (gridj->cell0>>1);
1665 /* Combine two ci fep masks/energrp */
1666 fep_cj = gridj->fep[cjr*2] + (gridj->fep[cjr*2+1] << gridj->na_c);
1669 gid_cj = nbat->energrp[cja*2] + (nbat->energrp[cja*2+1] << (gridj->na_c*egp_shift));
1673 if (bFEP_i || fep_cj != 0)
1675 for (int j = 0; j < nbl->na_cj; j++)
1677 /* Is this interaction perturbed and not excluded? */
1678 ind_j = cja*nbl->na_cj + j;
1681 (bFEP_i || (fep_cj & (1 << j))) &&
1682 (!bDiagRemoved || ind_j >= ind_i))
1686 gid_j = (gid_cj >> (j*egp_shift)) & egp_mask;
1687 gid = GID(gid_i, gid_j, ngid);
1689 if (nlist->nrj > nlist->jindex[nri] &&
1690 nlist->gid[nri] != gid)
1692 /* Energy group pair changed: new list */
1693 fep_list_new_nri_copy(nlist);
1696 nlist->gid[nri] = gid;
1699 if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
1701 fep_list_new_nri_copy(nlist);
1705 /* Add it to the FEP list */
1706 nlist->jjnr[nlist->nrj] = aj;
1707 nlist->excl_fep[nlist->nrj] = (nbl->cj[cj_ind].excl >> (i*nbl->na_cj + j)) & 1;
1710 /* Exclude it from the normal list.
1711 * Note that the charge has been set to zero,
1712 * but we need to avoid 0/0, as perturbed atoms
1713 * can be on top of each other.
1715 nbl->cj[cj_ind].excl &= ~(1U << (i*nbl->na_cj + j));
1721 if (nlist->nrj > nlist->jindex[nri])
1723 /* Actually add this new, non-empty, list */
1725 nlist->jindex[nlist->nri] = nlist->nrj;
1732 /* All interactions are perturbed, we can skip this entry */
1733 nbl_ci->cj_ind_end = cj_ind_start;
1737 /* Return the index of atom a within a cluster */
1738 static gmx_inline int cj_mod_cj4(int cj)
1740 return cj & (nbnxn_gpu_jgroup_size - 1);
1743 /* Convert a j-cluster to a cj4 group */
1744 static gmx_inline int cj_to_cj4(int cj)
1746 return cj/nbnxn_gpu_jgroup_size;
1749 /* Return the index of an j-atom within a warp */
1750 static gmx_inline int a_mod_wj(int a)
1752 return a & (nbnxn_gpu_cluster_size/nbnxn_gpu_clusterpair_split - 1);
1755 /* As make_fep_list above, but for super/sub lists. */
1756 static void make_fep_list_supersub(const nbnxn_search_t nbs,
1757 const nbnxn_atomdata_t *nbat,
1758 nbnxn_pairlist_t *nbl,
1759 gmx_bool bDiagRemoved,
1760 const nbnxn_sci_t *nbl_sci,
1765 const nbnxn_grid_t *gridi,
1766 const nbnxn_grid_t *gridj,
1769 int sci, cj4_ind_start, cj4_ind_end, cjr;
1772 int ind_i, ind_j, ai, aj;
1776 const nbnxn_cj4_t *cj4;
1778 if (nbl_sci->cj4_ind_end == nbl_sci->cj4_ind_start)
1786 cj4_ind_start = nbl_sci->cj4_ind_start;
1787 cj4_ind_end = nbl_sci->cj4_ind_end;
1789 /* Here we process one super-cell, max #atoms na_sc, versus a list
1790 * cj4 entries, each with max nbnxn_gpu_jgroup_size cj's, each
1791 * of size na_cj atoms.
1792 * On the GPU we don't support energy groups (yet).
1793 * So for each of the na_sc i-atoms, we need max one FEP list
1794 * for each max_nrj_fep j-atoms.
1796 nri_max = nbl->na_sc*nbl->na_cj*(1 + ((cj4_ind_end - cj4_ind_start)*nbnxn_gpu_jgroup_size)/max_nrj_fep);
1797 if (nlist->nri + nri_max > nlist->maxnri)
1799 nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
1800 reallocate_nblist(nlist);
1803 /* Loop over the atoms in the i super-cluster */
1804 for (int c = 0; c < gpu_ncluster_per_cell; c++)
1806 c_abs = sci*gpu_ncluster_per_cell + c;
1808 for (int i = 0; i < nbl->na_ci; i++)
1810 ind_i = c_abs*nbl->na_ci + i;
1815 nlist->jindex[nri+1] = nlist->jindex[nri];
1816 nlist->iinr[nri] = ai;
1817 /* With GPUs, energy groups are not supported */
1818 nlist->gid[nri] = 0;
1819 nlist->shift[nri] = nbl_sci->shift & NBNXN_CI_SHIFT;
1821 bFEP_i = (gridi->fep[c_abs - gridi->cell0*gpu_ncluster_per_cell] & (1 << i));
1823 xi = nbat->x[ind_i*nbat->xstride+XX] + shx;
1824 yi = nbat->x[ind_i*nbat->xstride+YY] + shy;
1825 zi = nbat->x[ind_i*nbat->xstride+ZZ] + shz;
1827 if ((nlist->nrj + cj4_ind_end - cj4_ind_start)*nbnxn_gpu_jgroup_size*nbl->na_cj > nlist->maxnrj)
1829 nlist->maxnrj = over_alloc_small((nlist->nrj + cj4_ind_end - cj4_ind_start)*nbnxn_gpu_jgroup_size*nbl->na_cj);
1830 srenew(nlist->jjnr, nlist->maxnrj);
1831 srenew(nlist->excl_fep, nlist->maxnrj);
1834 for (int cj4_ind = cj4_ind_start; cj4_ind < cj4_ind_end; cj4_ind++)
1836 cj4 = &nbl->cj4[cj4_ind];
1838 for (int gcj = 0; gcj < nbnxn_gpu_jgroup_size; gcj++)
1840 unsigned int fep_cj;
1842 if ((cj4->imei[0].imask & (1U << (gcj*gpu_ncluster_per_cell + c))) == 0)
1844 /* Skip this ci for this cj */
1848 cjr = cj4->cj[gcj] - gridj->cell0*gpu_ncluster_per_cell;
1850 fep_cj = gridj->fep[cjr];
1852 if (bFEP_i || fep_cj != 0)
1854 for (int j = 0; j < nbl->na_cj; j++)
1856 /* Is this interaction perturbed and not excluded? */
1857 ind_j = (gridj->cell0*gpu_ncluster_per_cell + cjr)*nbl->na_cj + j;
1860 (bFEP_i || (fep_cj & (1 << j))) &&
1861 (!bDiagRemoved || ind_j >= ind_i))
1865 unsigned int excl_bit;
1868 get_nbl_exclusions_1(nbl, cj4_ind, j>>2, &excl);
1870 excl_pair = a_mod_wj(j)*nbl->na_ci + i;
1871 excl_bit = (1U << (gcj*gpu_ncluster_per_cell + c));
1873 dx = nbat->x[ind_j*nbat->xstride+XX] - xi;
1874 dy = nbat->x[ind_j*nbat->xstride+YY] - yi;
1875 dz = nbat->x[ind_j*nbat->xstride+ZZ] - zi;
1877 /* The unpruned GPU list has more than 2/3
1878 * of the atom pairs beyond rlist. Using
1879 * this list will cause a lot of overhead
1880 * in the CPU FEP kernels, especially
1881 * relative to the fast GPU kernels.
1882 * So we prune the FEP list here.
1884 if (dx*dx + dy*dy + dz*dz < rlist_fep2)
1886 if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
1888 fep_list_new_nri_copy(nlist);
1892 /* Add it to the FEP list */
1893 nlist->jjnr[nlist->nrj] = aj;
1894 nlist->excl_fep[nlist->nrj] = (excl->pair[excl_pair] & excl_bit) ? 1 : 0;
1898 /* Exclude it from the normal list.
1899 * Note that the charge and LJ parameters have
1900 * been set to zero, but we need to avoid 0/0,
1901 * as perturbed atoms can be on top of each other.
1903 excl->pair[excl_pair] &= ~excl_bit;
1907 /* Note that we could mask out this pair in imask
1908 * if all i- and/or all j-particles are perturbed.
1909 * But since the perturbed pairs on the CPU will
1910 * take an order of magnitude more time, the GPU
1911 * will finish before the CPU and there is no gain.
1917 if (nlist->nrj > nlist->jindex[nri])
1919 /* Actually add this new, non-empty, list */
1921 nlist->jindex[nlist->nri] = nlist->nrj;
1928 /* Set all atom-pair exclusions from the topology stored in excl
1929 * as masks in the pair-list for i-super-cell entry nbl_sci
1931 static void set_sci_top_excls(const nbnxn_search_t nbs,
1932 nbnxn_pairlist_t *nbl,
1933 gmx_bool diagRemoved,
1935 const nbnxn_sci_t *nbl_sci,
1936 const t_blocka *excl)
1941 int cj_ind_first, cj_ind_last;
1942 int cj_first, cj_last;
1944 int ai, aj, si, ge, se;
1945 int found, cj_ind_0, cj_ind_1, cj_ind_m;
1947 nbnxn_excl_t *nbl_excl;
1948 int inner_i, inner_e, w;
1954 if (nbl_sci->cj4_ind_end == nbl_sci->cj4_ind_start)
1962 cj_ind_first = nbl_sci->cj4_ind_start*nbnxn_gpu_jgroup_size;
1963 cj_ind_last = nbl->work->cj_ind - 1;
1965 cj_first = nbl->cj4[nbl_sci->cj4_ind_start].cj[0];
1966 cj_last = nbl_cj(nbl, cj_ind_last);
1968 /* Determine how many contiguous j-clusters we have starting
1969 * from the first i-cluster. This number can be used to directly
1970 * calculate j-cluster indices for excluded atoms.
1973 while (cj_ind_first + ndirect <= cj_ind_last &&
1974 nbl_cj(nbl, cj_ind_first+ndirect) == sci*gpu_ncluster_per_cell + ndirect)
1979 /* Loop over the atoms in the i super-cell */
1980 for (int i = 0; i < nbl->na_sc; i++)
1982 ai = nbs->a[sci*nbl->na_sc+i];
1985 si = (i>>na_c_2log);
1987 /* Loop over the topology-based exclusions for this i-atom */
1988 for (int eind = excl->index[ai]; eind < excl->index[ai+1]; eind++)
1994 /* The self exclusion are already set, save some time */
2000 /* Without shifts we only calculate interactions j>i
2001 * for one-way pair-lists.
2003 if (diagRemoved && ge <= sci*nbl->na_sc + i)
2009 /* Could the cluster se be in our list? */
2010 if (se >= cj_first && se <= cj_last)
2012 if (se < cj_first + ndirect)
2014 /* We can calculate cj_ind directly from se */
2015 found = cj_ind_first + se - cj_first;
2019 /* Search for se using bisection */
2021 cj_ind_0 = cj_ind_first + ndirect;
2022 cj_ind_1 = cj_ind_last + 1;
2023 while (found == -1 && cj_ind_0 < cj_ind_1)
2025 cj_ind_m = (cj_ind_0 + cj_ind_1)>>1;
2027 cj_m = nbl_cj(nbl, cj_ind_m);
2035 cj_ind_1 = cj_ind_m;
2039 cj_ind_0 = cj_ind_m + 1;
2046 inner_i = i - si*na_c;
2047 inner_e = ge - se*na_c;
2049 if (nbl_imask0(nbl, found) & (1U << (cj_mod_cj4(found)*gpu_ncluster_per_cell + si)))
2053 get_nbl_exclusions_1(nbl, cj_to_cj4(found), w, &nbl_excl);
2055 nbl_excl->pair[a_mod_wj(inner_e)*nbl->na_ci+inner_i] &=
2056 ~(1U << (cj_mod_cj4(found)*gpu_ncluster_per_cell + si));
2065 /* Reallocate the simple ci list for at least n entries */
2066 static void nb_realloc_ci(nbnxn_pairlist_t *nbl, int n)
2068 nbl->ci_nalloc = over_alloc_small(n);
2069 nbnxn_realloc_void((void **)&nbl->ci,
2070 nbl->nci*sizeof(*nbl->ci),
2071 nbl->ci_nalloc*sizeof(*nbl->ci),
2072 nbl->alloc, nbl->free);
2075 /* Reallocate the super-cell sci list for at least n entries */
2076 static void nb_realloc_sci(nbnxn_pairlist_t *nbl, int n)
2078 nbl->sci_nalloc = over_alloc_small(n);
2079 nbnxn_realloc_void((void **)&nbl->sci,
2080 nbl->nsci*sizeof(*nbl->sci),
2081 nbl->sci_nalloc*sizeof(*nbl->sci),
2082 nbl->alloc, nbl->free);
2085 /* Make a new ci entry at index nbl->nci */
2086 static void new_ci_entry(nbnxn_pairlist_t *nbl, int ci, int shift, int flags)
2088 if (nbl->nci + 1 > nbl->ci_nalloc)
2090 nb_realloc_ci(nbl, nbl->nci+1);
2092 nbl->ci[nbl->nci].ci = ci;
2093 nbl->ci[nbl->nci].shift = shift;
2094 /* Store the interaction flags along with the shift */
2095 nbl->ci[nbl->nci].shift |= flags;
2096 nbl->ci[nbl->nci].cj_ind_start = nbl->ncj;
2097 nbl->ci[nbl->nci].cj_ind_end = nbl->ncj;
2100 /* Make a new sci entry at index nbl->nsci */
2101 static void new_sci_entry(nbnxn_pairlist_t *nbl, int sci, int shift)
2103 if (nbl->nsci + 1 > nbl->sci_nalloc)
2105 nb_realloc_sci(nbl, nbl->nsci+1);
2107 nbl->sci[nbl->nsci].sci = sci;
2108 nbl->sci[nbl->nsci].shift = shift;
2109 nbl->sci[nbl->nsci].cj4_ind_start = nbl->ncj4;
2110 nbl->sci[nbl->nsci].cj4_ind_end = nbl->ncj4;
2113 /* Sort the simple j-list cj on exclusions.
2114 * Entries with exclusions will all be sorted to the beginning of the list.
2116 static void sort_cj_excl(nbnxn_cj_t *cj, int ncj,
2117 nbnxn_list_work_t *work)
2121 if (ncj > work->cj_nalloc)
2123 work->cj_nalloc = over_alloc_large(ncj);
2124 srenew(work->cj, work->cj_nalloc);
2127 /* Make a list of the j-cells involving exclusions */
2129 for (int j = 0; j < ncj; j++)
2131 if (cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
2133 work->cj[jnew++] = cj[j];
2136 /* Check if there are exclusions at all or not just the first entry */
2137 if (!((jnew == 0) ||
2138 (jnew == 1 && cj[0].excl != NBNXN_INTERACTION_MASK_ALL)))
2140 for (int j = 0; j < ncj; j++)
2142 if (cj[j].excl == NBNXN_INTERACTION_MASK_ALL)
2144 work->cj[jnew++] = cj[j];
2147 for (int j = 0; j < ncj; j++)
2149 cj[j] = work->cj[j];
2154 /* Close this simple list i entry */
2155 static void close_ci_entry_simple(nbnxn_pairlist_t *nbl)
2159 /* All content of the new ci entry have already been filled correctly,
2160 * we only need to increase the count here (for non empty lists).
2162 jlen = nbl->ci[nbl->nci].cj_ind_end - nbl->ci[nbl->nci].cj_ind_start;
2165 sort_cj_excl(nbl->cj+nbl->ci[nbl->nci].cj_ind_start, jlen, nbl->work);
2167 /* The counts below are used for non-bonded pair/flop counts
2168 * and should therefore match the available kernel setups.
2170 if (!(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_COUL(0)))
2172 nbl->work->ncj_noq += jlen;
2174 else if ((nbl->ci[nbl->nci].shift & NBNXN_CI_HALF_LJ(0)) ||
2175 !(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_LJ(0)))
2177 nbl->work->ncj_hlj += jlen;
2184 /* Split sci entry for load balancing on the GPU.
2185 * Splitting ensures we have enough lists to fully utilize the whole GPU.
2186 * With progBal we generate progressively smaller lists, which improves
2187 * load balancing. As we only know the current count on our own thread,
2188 * we will need to estimate the current total amount of i-entries.
2189 * As the lists get concatenated later, this estimate depends
2190 * both on nthread and our own thread index.
2192 static void split_sci_entry(nbnxn_pairlist_t *nbl,
2194 gmx_bool progBal, int nsp_tot_est,
2195 int thread, int nthread)
2199 int cj4_start, cj4_end, j4len;
2201 int nsp, nsp_sci, nsp_cj4, nsp_cj4_e, nsp_cj4_p;
2205 /* Estimate the total numbers of ci's of the nblist combined
2206 * over all threads using the target number of ci's.
2208 nsp_est = (nsp_tot_est*thread)/nthread + nbl->nci_tot;
2210 /* The first ci blocks should be larger, to avoid overhead.
2211 * The last ci blocks should be smaller, to improve load balancing.
2212 * The factor 3/2 makes the first block 3/2 times the target average
2213 * and ensures that the total number of blocks end up equal to
2214 * that with of equally sized blocks of size nsp_target_av.
2216 nsp_max = nsp_target_av*nsp_tot_est*3/(2*(nsp_est + nsp_tot_est));
2220 nsp_max = nsp_target_av;
2223 /* Since nsp_max is a maximum/cut-off (this avoids high outliers,
2224 * which lead to load imbalance), not an average, we add half the
2225 * number of pairs in a cj4 block to get the average about right.
2227 nsp_max += gpu_ncluster_per_cell*nbnxn_gpu_jgroup_size/2;
2229 cj4_start = nbl->sci[nbl->nsci-1].cj4_ind_start;
2230 cj4_end = nbl->sci[nbl->nsci-1].cj4_ind_end;
2231 j4len = cj4_end - cj4_start;
2233 if (j4len > 1 && j4len*gpu_ncluster_per_cell*nbnxn_gpu_jgroup_size > nsp_max)
2235 /* Remove the last ci entry and process the cj4's again */
2243 for (int cj4 = cj4_start; cj4 < cj4_end; cj4++)
2245 nsp_cj4_p = nsp_cj4;
2246 /* Count the number of cluster pairs in this cj4 group */
2248 for (int p = 0; p < gpu_ncluster_per_cell*nbnxn_gpu_jgroup_size; p++)
2250 nsp_cj4 += (nbl->cj4[cj4].imei[0].imask >> p) & 1;
2253 /* Check if we should split at this cj4 to get a list of size nsp */
2254 if (nsp > 0 && nsp + nsp_cj4 > nsp_max)
2256 /* Split the list at cj4 */
2257 nbl->sci[sci].cj4_ind_end = cj4;
2258 /* Create a new sci entry */
2261 if (nbl->nsci+1 > nbl->sci_nalloc)
2263 nb_realloc_sci(nbl, nbl->nsci+1);
2265 nbl->sci[sci].sci = nbl->sci[nbl->nsci-1].sci;
2266 nbl->sci[sci].shift = nbl->sci[nbl->nsci-1].shift;
2267 nbl->sci[sci].cj4_ind_start = cj4;
2269 nsp_cj4_e = nsp_cj4_p;
2275 /* Put the remaining cj4's in the last sci entry */
2276 nbl->sci[sci].cj4_ind_end = cj4_end;
2278 /* Possibly balance out the last two sci's
2279 * by moving the last cj4 of the second last sci.
2281 if (nsp_sci - nsp_cj4_e >= nsp + nsp_cj4_e)
2283 nbl->sci[sci-1].cj4_ind_end--;
2284 nbl->sci[sci].cj4_ind_start--;
2291 /* Clost this super/sub list i entry */
2292 static void close_ci_entry_supersub(nbnxn_pairlist_t *nbl,
2294 gmx_bool progBal, int nsp_tot_est,
2295 int thread, int nthread)
2297 /* All content of the new ci entry have already been filled correctly,
2298 * we only need to increase the count here (for non empty lists).
2300 int j4len = nbl->sci[nbl->nsci].cj4_ind_end - nbl->sci[nbl->nsci].cj4_ind_start;
2303 /* We can only have complete blocks of 4 j-entries in a list,
2304 * so round the count up before closing.
2306 nbl->ncj4 = (nbl->work->cj_ind + nbnxn_gpu_jgroup_size - 1)/nbnxn_gpu_jgroup_size;
2307 nbl->work->cj_ind = nbl->ncj4*nbnxn_gpu_jgroup_size;
2313 /* Measure the size of the new entry and potentially split it */
2314 split_sci_entry(nbl, nsp_max_av, progBal, nsp_tot_est,
2320 /* Syncs the working array before adding another grid pair to the list */
2321 static void sync_work(nbnxn_pairlist_t *nbl)
2325 nbl->work->cj_ind = nbl->ncj4*nbnxn_gpu_jgroup_size;
2326 nbl->work->cj4_init = nbl->ncj4;
2330 /* Clears an nbnxn_pairlist_t data structure */
2331 static void clear_pairlist(nbnxn_pairlist_t *nbl)
2340 nbl->work->ncj_noq = 0;
2341 nbl->work->ncj_hlj = 0;
2344 /* Clears a group scheme pair list */
2345 static void clear_pairlist_fep(t_nblist *nl)
2349 if (nl->jindex == NULL)
2351 snew(nl->jindex, 1);
2356 /* Sets a simple list i-cell bounding box, including PBC shift */
2357 static gmx_inline void set_icell_bb_simple(const nbnxn_bb_t *bb, int ci,
2358 real shx, real shy, real shz,
2361 bb_ci->lower[BB_X] = bb[ci].lower[BB_X] + shx;
2362 bb_ci->lower[BB_Y] = bb[ci].lower[BB_Y] + shy;
2363 bb_ci->lower[BB_Z] = bb[ci].lower[BB_Z] + shz;
2364 bb_ci->upper[BB_X] = bb[ci].upper[BB_X] + shx;
2365 bb_ci->upper[BB_Y] = bb[ci].upper[BB_Y] + shy;
2366 bb_ci->upper[BB_Z] = bb[ci].upper[BB_Z] + shz;
2370 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2371 static void set_icell_bbxxxx_supersub(const float *bb, int ci,
2372 real shx, real shy, real shz,
2375 int ia = ci*(gpu_ncluster_per_cell >> STRIDE_PBB_2LOG)*NNBSBB_XXXX;
2376 for (int m = 0; m < (gpu_ncluster_per_cell >> STRIDE_PBB_2LOG)*NNBSBB_XXXX; m += NNBSBB_XXXX)
2378 for (int i = 0; i < STRIDE_PBB; i++)
2380 bb_ci[m+0*STRIDE_PBB+i] = bb[ia+m+0*STRIDE_PBB+i] + shx;
2381 bb_ci[m+1*STRIDE_PBB+i] = bb[ia+m+1*STRIDE_PBB+i] + shy;
2382 bb_ci[m+2*STRIDE_PBB+i] = bb[ia+m+2*STRIDE_PBB+i] + shz;
2383 bb_ci[m+3*STRIDE_PBB+i] = bb[ia+m+3*STRIDE_PBB+i] + shx;
2384 bb_ci[m+4*STRIDE_PBB+i] = bb[ia+m+4*STRIDE_PBB+i] + shy;
2385 bb_ci[m+5*STRIDE_PBB+i] = bb[ia+m+5*STRIDE_PBB+i] + shz;
2391 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2392 static void set_icell_bb_supersub(const nbnxn_bb_t *bb, int ci,
2393 real shx, real shy, real shz,
2396 for (int i = 0; i < gpu_ncluster_per_cell; i++)
2398 set_icell_bb_simple(bb, ci*gpu_ncluster_per_cell+i,
2404 /* Copies PBC shifted i-cell atom coordinates x,y,z to working array */
2405 static void icell_set_x_simple(int ci,
2406 real shx, real shy, real shz,
2407 int stride, const real *x,
2408 nbnxn_list_work_t *work)
2410 int ia = ci*NBNXN_CPU_CLUSTER_I_SIZE;
2412 for (int i = 0; i < NBNXN_CPU_CLUSTER_I_SIZE; i++)
2414 work->x_ci[i*STRIDE_XYZ+XX] = x[(ia+i)*stride+XX] + shx;
2415 work->x_ci[i*STRIDE_XYZ+YY] = x[(ia+i)*stride+YY] + shy;
2416 work->x_ci[i*STRIDE_XYZ+ZZ] = x[(ia+i)*stride+ZZ] + shz;
2420 /* Copies PBC shifted super-cell atom coordinates x,y,z to working array */
2421 static void icell_set_x_supersub(int ci,
2422 real shx, real shy, real shz,
2423 int stride, const real *x,
2424 nbnxn_list_work_t *work)
2426 #ifndef NBNXN_SEARCH_BB_SIMD4
2428 real * x_ci = work->x_ci;
2430 int ia = ci*gpu_ncluster_per_cell*nbnxn_gpu_cluster_size;
2431 for (int i = 0; i < gpu_ncluster_per_cell*nbnxn_gpu_cluster_size; i++)
2433 x_ci[i*DIM + XX] = x[(ia+i)*stride + XX] + shx;
2434 x_ci[i*DIM + YY] = x[(ia+i)*stride + YY] + shy;
2435 x_ci[i*DIM + ZZ] = x[(ia+i)*stride + ZZ] + shz;
2438 #else /* !NBNXN_SEARCH_BB_SIMD4 */
2440 real * x_ci = work->x_ci_simd;
2442 for (int si = 0; si < gpu_ncluster_per_cell; si++)
2444 for (int i = 0; i < nbnxn_gpu_cluster_size; i += GMX_SIMD4_WIDTH)
2446 int io = si*nbnxn_gpu_cluster_size + i;
2447 int ia = ci*gpu_ncluster_per_cell*nbnxn_gpu_cluster_size + io;
2448 for (int j = 0; j < GMX_SIMD4_WIDTH; j++)
2450 x_ci[io*DIM + j + XX*GMX_SIMD4_WIDTH] = x[(ia + j)*stride + XX] + shx;
2451 x_ci[io*DIM + j + YY*GMX_SIMD4_WIDTH] = x[(ia + j)*stride + YY] + shy;
2452 x_ci[io*DIM + j + ZZ*GMX_SIMD4_WIDTH] = x[(ia + j)*stride + ZZ] + shz;
2457 #endif /* !NBNXN_SEARCH_BB_SIMD4 */
2460 static real minimum_subgrid_size_xy(const nbnxn_grid_t *grid)
2464 return std::min(grid->sx, grid->sy);
2468 return std::min(grid->sx/gpu_ncluster_per_cell_x,
2469 grid->sy/gpu_ncluster_per_cell_y);
2473 static real effective_buffer_1x1_vs_MxN(const nbnxn_grid_t *gridi,
2474 const nbnxn_grid_t *gridj)
2476 const real eff_1x1_buffer_fac_overest = 0.1;
2478 /* Determine an atom-pair list cut-off buffer size for atom pairs,
2479 * to be added to rlist (including buffer) used for MxN.
2480 * This is for converting an MxN list to a 1x1 list. This means we can't
2481 * use the normal buffer estimate, as we have an MxN list in which
2482 * some atom pairs beyond rlist are missing. We want to capture
2483 * the beneficial effect of buffering by extra pairs just outside rlist,
2484 * while removing the useless pairs that are further away from rlist.
2485 * (Also the buffer could have been set manually not using the estimate.)
2486 * This buffer size is an overestimate.
2487 * We add 10% of the smallest grid sub-cell dimensions.
2488 * Note that the z-size differs per cell and we don't use this,
2489 * so we overestimate.
2490 * With PME, the 10% value gives a buffer that is somewhat larger
2491 * than the effective buffer with a tolerance of 0.005 kJ/mol/ps.
2492 * Smaller tolerances or using RF lead to a smaller effective buffer,
2493 * so 10% gives a safe overestimate.
2495 return eff_1x1_buffer_fac_overest*(minimum_subgrid_size_xy(gridi) +
2496 minimum_subgrid_size_xy(gridj));
2499 /* Clusters at the cut-off only increase rlist by 60% of their size */
2500 static real nbnxn_rlist_inc_outside_fac = 0.6;
2502 /* Due to the cluster size the effective pair-list is longer than
2503 * that of a simple atom pair-list. This function gives the extra distance.
2505 real nbnxn_get_rlist_effective_inc(int cluster_size_j, real atom_density)
2508 real vol_inc_i, vol_inc_j;
2510 /* We should get this from the setup, but currently it's the same for
2511 * all setups, including GPUs.
2513 cluster_size_i = NBNXN_CPU_CLUSTER_I_SIZE;
2515 vol_inc_i = (cluster_size_i - 1)/atom_density;
2516 vol_inc_j = (cluster_size_j - 1)/atom_density;
2518 return nbnxn_rlist_inc_outside_fac*std::cbrt(vol_inc_i + vol_inc_j);
2521 /* Estimates the interaction volume^2 for non-local interactions */
2522 static real nonlocal_vol2(const struct gmx_domdec_zones_t *zones, rvec ls, real r)
2530 /* Here we simply add up the volumes of 1, 2 or 3 1D decomposition
2531 * not home interaction volume^2. As these volumes are not additive,
2532 * this is an overestimate, but it would only be significant in the limit
2533 * of small cells, where we anyhow need to split the lists into
2534 * as small parts as possible.
2537 for (int z = 0; z < zones->n; z++)
2539 if (zones->shift[z][XX] + zones->shift[z][YY] + zones->shift[z][ZZ] == 1)
2544 for (int d = 0; d < DIM; d++)
2546 if (zones->shift[z][d] == 0)
2550 za *= zones->size[z].x1[d] - zones->size[z].x0[d];
2554 /* 4 octants of a sphere */
2555 vold_est = 0.25*M_PI*r*r*r*r;
2556 /* 4 quarter pie slices on the edges */
2557 vold_est += 4*cl*M_PI/6.0*r*r*r;
2558 /* One rectangular volume on a face */
2559 vold_est += ca*0.5*r*r;
2561 vol2_est_tot += vold_est*za;
2565 return vol2_est_tot;
2568 /* Estimates the average size of a full j-list for super/sub setup */
2569 static void get_nsubpair_target(const nbnxn_search_t nbs,
2572 int min_ci_balanced,
2573 int *nsubpair_target,
2574 int *nsubpair_tot_est)
2576 /* The target value of 36 seems to be the optimum for Kepler.
2577 * Maxwell is less sensitive to the exact value.
2579 const int nsubpair_target_min = 36;
2580 const nbnxn_grid_t *grid;
2582 real xy_diag2, r_eff_sup, vol_est, nsp_est, nsp_est_nl;
2584 grid = &nbs->grid[0];
2586 if (min_ci_balanced <= 0 || grid->nc >= min_ci_balanced || grid->nc == 0)
2588 /* We don't need to balance the list sizes */
2589 *nsubpair_target = 0;
2590 *nsubpair_tot_est = 0;
2595 ls[XX] = (grid->c1[XX] - grid->c0[XX])/(grid->ncx*gpu_ncluster_per_cell_x);
2596 ls[YY] = (grid->c1[YY] - grid->c0[YY])/(grid->ncy*gpu_ncluster_per_cell_y);
2597 ls[ZZ] = grid->na_c/(grid->atom_density*ls[XX]*ls[YY]);
2599 /* The average squared length of the diagonal of a sub cell */
2600 xy_diag2 = ls[XX]*ls[XX] + ls[YY]*ls[YY] + ls[ZZ]*ls[ZZ];
2602 /* The formulas below are a heuristic estimate of the average nsj per si*/
2603 r_eff_sup = rlist + nbnxn_rlist_inc_outside_fac*gmx::square((grid->na_c - 1.0)/grid->na_c)*std::sqrt(xy_diag2/3);
2605 if (!nbs->DomDec || nbs->zones->n == 1)
2612 gmx::square(grid->atom_density/grid->na_c)*
2613 nonlocal_vol2(nbs->zones, ls, r_eff_sup);
2618 /* Sub-cell interacts with itself */
2619 vol_est = ls[XX]*ls[YY]*ls[ZZ];
2620 /* 6/2 rectangular volume on the faces */
2621 vol_est += (ls[XX]*ls[YY] + ls[XX]*ls[ZZ] + ls[YY]*ls[ZZ])*r_eff_sup;
2622 /* 12/2 quarter pie slices on the edges */
2623 vol_est += 2*(ls[XX] + ls[YY] + ls[ZZ])*0.25*M_PI*gmx::square(r_eff_sup);
2624 /* 4 octants of a sphere */
2625 vol_est += 0.5*4.0/3.0*M_PI*gmx::power3(r_eff_sup);
2627 /* Estimate the number of cluster pairs as the local number of
2628 * clusters times the volume they interact with times the density.
2630 nsp_est = grid->nsubc_tot*vol_est*grid->atom_density/grid->na_c;
2632 /* Subtract the non-local pair count */
2633 nsp_est -= nsp_est_nl;
2635 /* For small cut-offs nsp_est will be an underesimate.
2636 * With DD nsp_est_nl is an overestimate so nsp_est can get negative.
2637 * So to avoid too small or negative nsp_est we set a minimum of
2638 * all cells interacting with all 3^3 direct neighbors (3^3-1)/2+1=14.
2639 * This might be a slight overestimate for small non-periodic groups of
2640 * atoms as will occur for a local domain with DD, but for small
2641 * groups of atoms we'll anyhow be limited by nsubpair_target_min,
2642 * so this overestimation will not matter.
2644 nsp_est = std::max(nsp_est, grid->nsubc_tot*static_cast<real>(14));
2648 fprintf(debug, "nsp_est local %5.1f non-local %5.1f\n",
2649 nsp_est, nsp_est_nl);
2654 nsp_est = nsp_est_nl;
2657 /* Thus the (average) maximum j-list size should be as follows.
2658 * Since there is overhead, we shouldn't make the lists too small
2659 * (and we can't chop up j-groups) so we use a minimum target size of 36.
2661 *nsubpair_target = std::max(nsubpair_target_min,
2662 static_cast<int>(nsp_est/min_ci_balanced + 0.5));
2663 *nsubpair_tot_est = static_cast<int>(nsp_est);
2667 fprintf(debug, "nbl nsp estimate %.1f, nsubpair_target %d\n",
2668 nsp_est, *nsubpair_target);
2672 /* Debug list print function */
2673 static void print_nblist_ci_cj(FILE *fp, const nbnxn_pairlist_t *nbl)
2675 for (int i = 0; i < nbl->nci; i++)
2677 fprintf(fp, "ci %4d shift %2d ncj %3d\n",
2678 nbl->ci[i].ci, nbl->ci[i].shift,
2679 nbl->ci[i].cj_ind_end - nbl->ci[i].cj_ind_start);
2681 for (int j = nbl->ci[i].cj_ind_start; j < nbl->ci[i].cj_ind_end; j++)
2683 fprintf(fp, " cj %5d imask %x\n",
2690 /* Debug list print function */
2691 static void print_nblist_sci_cj(FILE *fp, const nbnxn_pairlist_t *nbl)
2693 for (int i = 0; i < nbl->nsci; i++)
2695 fprintf(fp, "ci %4d shift %2d ncj4 %2d\n",
2696 nbl->sci[i].sci, nbl->sci[i].shift,
2697 nbl->sci[i].cj4_ind_end - nbl->sci[i].cj4_ind_start);
2700 for (int j4 = nbl->sci[i].cj4_ind_start; j4 < nbl->sci[i].cj4_ind_end; j4++)
2702 for (int j = 0; j < nbnxn_gpu_jgroup_size; j++)
2704 fprintf(fp, " sj %5d imask %x\n",
2706 nbl->cj4[j4].imei[0].imask);
2707 for (int si = 0; si < gpu_ncluster_per_cell; si++)
2709 if (nbl->cj4[j4].imei[0].imask & (1U << (j*gpu_ncluster_per_cell + si)))
2716 fprintf(fp, "ci %4d shift %2d ncj4 %2d ncp %3d\n",
2717 nbl->sci[i].sci, nbl->sci[i].shift,
2718 nbl->sci[i].cj4_ind_end - nbl->sci[i].cj4_ind_start,
2723 /* Combine pair lists *nbl generated on multiple threads nblc */
2724 static void combine_nblists(int nnbl, nbnxn_pairlist_t **nbl,
2725 nbnxn_pairlist_t *nblc)
2727 int nsci, ncj4, nexcl;
2731 gmx_incons("combine_nblists does not support simple lists");
2736 nexcl = nblc->nexcl;
2737 for (int i = 0; i < nnbl; i++)
2739 nsci += nbl[i]->nsci;
2740 ncj4 += nbl[i]->ncj4;
2741 nexcl += nbl[i]->nexcl;
2744 if (nsci > nblc->sci_nalloc)
2746 nb_realloc_sci(nblc, nsci);
2748 if (ncj4 > nblc->cj4_nalloc)
2750 nblc->cj4_nalloc = over_alloc_small(ncj4);
2751 nbnxn_realloc_void((void **)&nblc->cj4,
2752 nblc->ncj4*sizeof(*nblc->cj4),
2753 nblc->cj4_nalloc*sizeof(*nblc->cj4),
2754 nblc->alloc, nblc->free);
2756 if (nexcl > nblc->excl_nalloc)
2758 nblc->excl_nalloc = over_alloc_small(nexcl);
2759 nbnxn_realloc_void((void **)&nblc->excl,
2760 nblc->nexcl*sizeof(*nblc->excl),
2761 nblc->excl_nalloc*sizeof(*nblc->excl),
2762 nblc->alloc, nblc->free);
2765 /* Each thread should copy its own data to the combined arrays,
2766 * as otherwise data will go back and forth between different caches.
2768 #if GMX_OPENMP && !(defined __clang_analyzer__)
2769 // cppcheck-suppress unreadVariable
2770 int nthreads = gmx_omp_nthreads_get(emntPairsearch);
2773 #pragma omp parallel for num_threads(nthreads) schedule(static)
2774 for (int n = 0; n < nnbl; n++)
2781 const nbnxn_pairlist_t *nbli;
2783 /* Determine the offset in the combined data for our thread */
2784 sci_offset = nblc->nsci;
2785 cj4_offset = nblc->ncj4;
2786 excl_offset = nblc->nexcl;
2788 for (int i = 0; i < n; i++)
2790 sci_offset += nbl[i]->nsci;
2791 cj4_offset += nbl[i]->ncj4;
2792 excl_offset += nbl[i]->nexcl;
2797 for (int i = 0; i < nbli->nsci; i++)
2799 nblc->sci[sci_offset+i] = nbli->sci[i];
2800 nblc->sci[sci_offset+i].cj4_ind_start += cj4_offset;
2801 nblc->sci[sci_offset+i].cj4_ind_end += cj4_offset;
2804 for (int j4 = 0; j4 < nbli->ncj4; j4++)
2806 nblc->cj4[cj4_offset+j4] = nbli->cj4[j4];
2807 nblc->cj4[cj4_offset+j4].imei[0].excl_ind += excl_offset;
2808 nblc->cj4[cj4_offset+j4].imei[1].excl_ind += excl_offset;
2811 for (int j4 = 0; j4 < nbli->nexcl; j4++)
2813 nblc->excl[excl_offset+j4] = nbli->excl[j4];
2816 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2819 for (int n = 0; n < nnbl; n++)
2821 nblc->nsci += nbl[n]->nsci;
2822 nblc->ncj4 += nbl[n]->ncj4;
2823 nblc->nci_tot += nbl[n]->nci_tot;
2824 nblc->nexcl += nbl[n]->nexcl;
2828 static void balance_fep_lists(const nbnxn_search_t nbs,
2829 nbnxn_pairlist_set_t *nbl_lists)
2832 int nri_tot, nrj_tot, nrj_target;
2836 nnbl = nbl_lists->nnbl;
2840 /* Nothing to balance */
2844 /* Count the total i-lists and pairs */
2847 for (int th = 0; th < nnbl; th++)
2849 nri_tot += nbl_lists->nbl_fep[th]->nri;
2850 nrj_tot += nbl_lists->nbl_fep[th]->nrj;
2853 nrj_target = (nrj_tot + nnbl - 1)/nnbl;
2855 assert(gmx_omp_nthreads_get(emntNonbonded) == nnbl);
2857 #pragma omp parallel for schedule(static) num_threads(nnbl)
2858 for (int th = 0; th < nnbl; th++)
2864 nbl = nbs->work[th].nbl_fep;
2866 /* Note that here we allocate for the total size, instead of
2867 * a per-thread esimate (which is hard to obtain).
2869 if (nri_tot > nbl->maxnri)
2871 nbl->maxnri = over_alloc_large(nri_tot);
2872 reallocate_nblist(nbl);
2874 if (nri_tot > nbl->maxnri || nrj_tot > nbl->maxnrj)
2876 nbl->maxnrj = over_alloc_small(nrj_tot);
2877 srenew(nbl->jjnr, nbl->maxnrj);
2878 srenew(nbl->excl_fep, nbl->maxnrj);
2881 clear_pairlist_fep(nbl);
2883 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2886 /* Loop over the source lists and assign and copy i-entries */
2888 nbld = nbs->work[th_dest].nbl_fep;
2889 for (int th = 0; th < nnbl; th++)
2893 nbls = nbl_lists->nbl_fep[th];
2895 for (int i = 0; i < nbls->nri; i++)
2899 /* The number of pairs in this i-entry */
2900 nrj = nbls->jindex[i+1] - nbls->jindex[i];
2902 /* Decide if list th_dest is too large and we should procede
2903 * to the next destination list.
2905 if (th_dest+1 < nnbl && nbld->nrj > 0 &&
2906 nbld->nrj + nrj - nrj_target > nrj_target - nbld->nrj)
2909 nbld = nbs->work[th_dest].nbl_fep;
2912 nbld->iinr[nbld->nri] = nbls->iinr[i];
2913 nbld->gid[nbld->nri] = nbls->gid[i];
2914 nbld->shift[nbld->nri] = nbls->shift[i];
2916 for (int j = nbls->jindex[i]; j < nbls->jindex[i+1]; j++)
2918 nbld->jjnr[nbld->nrj] = nbls->jjnr[j];
2919 nbld->excl_fep[nbld->nrj] = nbls->excl_fep[j];
2923 nbld->jindex[nbld->nri] = nbld->nrj;
2927 /* Swap the list pointers */
2928 for (int th = 0; th < nnbl; th++)
2932 nbl_tmp = nbl_lists->nbl_fep[th];
2933 nbl_lists->nbl_fep[th] = nbs->work[th].nbl_fep;
2934 nbs->work[th].nbl_fep = nbl_tmp;
2938 fprintf(debug, "nbl_fep[%d] nri %4d nrj %4d\n",
2940 nbl_lists->nbl_fep[th]->nri,
2941 nbl_lists->nbl_fep[th]->nrj);
2946 /* Returns the next ci to be processes by our thread */
2947 static gmx_bool next_ci(const nbnxn_grid_t *grid,
2949 int nth, int ci_block,
2950 int *ci_x, int *ci_y,
2956 if (*ci_b == ci_block)
2958 /* Jump to the next block assigned to this task */
2959 *ci += (nth - 1)*ci_block;
2963 if (*ci >= grid->nc*conv)
2968 while (*ci >= grid->cxy_ind[*ci_x*grid->ncy + *ci_y + 1]*conv)
2971 if (*ci_y == grid->ncy)
2981 /* Returns the distance^2 for which we put cell pairs in the list
2982 * without checking atom pair distances. This is usually < rlist^2.
2984 static float boundingbox_only_distance2(const nbnxn_grid_t *gridi,
2985 const nbnxn_grid_t *gridj,
2989 /* If the distance between two sub-cell bounding boxes is less
2990 * than this distance, do not check the distance between
2991 * all particle pairs in the sub-cell, since then it is likely
2992 * that the box pair has atom pairs within the cut-off.
2993 * We use the nblist cut-off minus 0.5 times the average x/y diagonal
2994 * spacing of the sub-cells. Around 40% of the checked pairs are pruned.
2995 * Using more than 0.5 gains at most 0.5%.
2996 * If forces are calculated more than twice, the performance gain
2997 * in the force calculation outweighs the cost of checking.
2998 * Note that with subcell lists, the atom-pair distance check
2999 * is only performed when only 1 out of 8 sub-cells in within range,
3000 * this is because the GPU is much faster than the cpu.
3005 bbx = 0.5*(gridi->sx + gridj->sx);
3006 bby = 0.5*(gridi->sy + gridj->sy);
3009 bbx /= gpu_ncluster_per_cell_x;
3010 bby /= gpu_ncluster_per_cell_y;
3013 rbb2 = std::max(0.0, rlist - 0.5*std::sqrt(bbx*bbx + bby*bby));
3019 return (float)((1+GMX_FLOAT_EPS)*rbb2);
3023 static int get_ci_block_size(const nbnxn_grid_t *gridi,
3024 gmx_bool bDomDec, int nth)
3026 const int ci_block_enum = 5;
3027 const int ci_block_denom = 11;
3028 const int ci_block_min_atoms = 16;
3031 /* Here we decide how to distribute the blocks over the threads.
3032 * We use prime numbers to try to avoid that the grid size becomes
3033 * a multiple of the number of threads, which would lead to some
3034 * threads getting "inner" pairs and others getting boundary pairs,
3035 * which in turns will lead to load imbalance between threads.
3036 * Set the block size as 5/11/ntask times the average number of cells
3037 * in a y,z slab. This should ensure a quite uniform distribution
3038 * of the grid parts of the different thread along all three grid
3039 * zone boundaries with 3D domain decomposition. At the same time
3040 * the blocks will not become too small.
3042 ci_block = (gridi->nc*ci_block_enum)/(ci_block_denom*gridi->ncx*nth);
3044 /* Ensure the blocks are not too small: avoids cache invalidation */
3045 if (ci_block*gridi->na_sc < ci_block_min_atoms)
3047 ci_block = (ci_block_min_atoms + gridi->na_sc - 1)/gridi->na_sc;
3050 /* Without domain decomposition
3051 * or with less than 3 blocks per task, divide in nth blocks.
3053 if (!bDomDec || nth*3*ci_block > gridi->nc)
3055 ci_block = (gridi->nc + nth - 1)/nth;
3058 if (ci_block > 1 && (nth - 1)*ci_block >= gridi->nc)
3060 /* Some threads have no work. Although reducing the block size
3061 * does not decrease the block count on the first few threads,
3062 * with GPUs better mixing of "upper" cells that have more empty
3063 * clusters results in a somewhat lower max load over all threads.
3064 * Without GPUs the regime of so few atoms per thread is less
3065 * performance relevant, but with 8-wide SIMD the same reasoning
3066 * applies, since the pair list uses 4 i-atom "sub-clusters".
3074 /* Generates the part of pair-list nbl assigned to our thread */
3075 static void nbnxn_make_pairlist_part(const nbnxn_search_t nbs,
3076 const nbnxn_grid_t *gridi,
3077 const nbnxn_grid_t *gridj,
3078 nbnxn_search_work_t *work,
3079 const nbnxn_atomdata_t *nbat,
3080 const t_blocka *excl,
3084 gmx_bool bFBufferFlag,
3087 int nsubpair_tot_est,
3089 nbnxn_pairlist_t *nbl,
3094 real rl2, rl_fep2 = 0;
3096 int ci_b, ci, ci_x, ci_y, ci_xy, cj;
3100 int conv_i, cell0_i;
3101 const nbnxn_bb_t *bb_i = NULL;
3103 const float *pbb_i = NULL;
3105 const float *bbcz_i, *bbcz_j;
3107 real bx0, bx1, by0, by1, bz0, bz1;
3109 real d2cx, d2z, d2z_cx, d2z_cy, d2zx, d2zxy, d2xy;
3110 int cxf, cxl, cyf, cyf_x, cyl;
3111 int c0, c1, cs, cf, cl;
3114 int gridi_flag_shift = 0, gridj_flag_shift = 0;
3115 gmx_bitmask_t *gridj_flag = NULL;
3116 int ncj_old_i, ncj_old_j;
3118 nbs_cycle_start(&work->cc[enbsCCsearch]);
3120 if (gridj->bSimple != nbl->bSimple)
3122 gmx_incons("Grid incompatible with pair-list");
3126 nbl->na_sc = gridj->na_sc;
3127 nbl->na_ci = gridj->na_c;
3128 nbl->na_cj = nbnxn_kernel_to_cluster_j_size(nb_kernel_type);
3129 na_cj_2log = get_2log(nbl->na_cj);
3135 /* Determine conversion of clusters to flag blocks */
3136 gridi_flag_shift = 0;
3137 while ((nbl->na_ci<<gridi_flag_shift) < NBNXN_BUFFERFLAG_SIZE)
3141 gridj_flag_shift = 0;
3142 while ((nbl->na_cj<<gridj_flag_shift) < NBNXN_BUFFERFLAG_SIZE)
3147 gridj_flag = work->buffer_flags.flag;
3150 copy_mat(nbs->box, box);
3152 rl2 = nbl->rlist*nbl->rlist;
3154 if (nbs->bFEP && !nbl->bSimple)
3156 /* Determine an atom-pair list cut-off distance for FEP atom pairs.
3157 * We should not simply use rlist, since then we would not have
3158 * the small, effective buffering of the NxN lists.
3159 * The buffer is on overestimate, but the resulting cost for pairs
3160 * beyond rlist is neglible compared to the FEP pairs within rlist.
3162 rl_fep2 = nbl->rlist + effective_buffer_1x1_vs_MxN(gridi, gridj);
3166 fprintf(debug, "nbl_fep atom-pair rlist %f\n", rl_fep2);
3168 rl_fep2 = rl_fep2*rl_fep2;
3171 rbb2 = boundingbox_only_distance2(gridi, gridj, nbl->rlist, nbl->bSimple);
3175 fprintf(debug, "nbl bounding box only distance %f\n", std::sqrt(rbb2));
3178 /* Set the shift range */
3179 for (int d = 0; d < DIM; d++)
3181 /* Check if we need periodicity shifts.
3182 * Without PBC or with domain decomposition we don't need them.
3184 if (d >= ePBC2npbcdim(nbs->ePBC) || nbs->dd_dim[d])
3191 box[XX][XX] - fabs(box[YY][XX]) - fabs(box[ZZ][XX]) < std::sqrt(rl2))
3202 if (nbl->bSimple && !gridi->bSimple)
3204 conv_i = gridi->na_sc/gridj->na_sc;
3205 bb_i = gridi->bb_simple;
3206 bbcz_i = gridi->bbcz_simple;
3207 flags_i = gridi->flags_simple;
3222 /* We use the normal bounding box format for both grid types */
3225 bbcz_i = gridi->bbcz;
3226 flags_i = gridi->flags;
3228 cell0_i = gridi->cell0*conv_i;
3230 bbcz_j = gridj->bbcz;
3234 /* Blocks of the conversion factor - 1 give a large repeat count
3235 * combined with a small block size. This should result in good
3236 * load balancing for both small and large domains.
3238 ci_block = conv_i - 1;
3242 fprintf(debug, "nbl nc_i %d col.av. %.1f ci_block %d\n",
3243 gridi->nc, gridi->nc/(double)(gridi->ncx*gridi->ncy), ci_block);
3249 /* Initially ci_b and ci to 1 before where we want them to start,
3250 * as they will both be incremented in next_ci.
3253 ci = th*ci_block - 1;
3256 while (next_ci(gridi, conv_i, nth, ci_block, &ci_x, &ci_y, &ci_b, &ci))
3258 if (nbl->bSimple && flags_i[ci] == 0)
3263 ncj_old_i = nbl->ncj;
3266 if (gridj != gridi && shp[XX] == 0)
3270 bx1 = bb_i[ci].upper[BB_X];
3274 bx1 = gridi->c0[XX] + (ci_x+1)*gridi->sx;
3276 if (bx1 < gridj->c0[XX])
3278 d2cx = gmx::square(gridj->c0[XX] - bx1);
3287 ci_xy = ci_x*gridi->ncy + ci_y;
3289 /* Loop over shift vectors in three dimensions */
3290 for (int tz = -shp[ZZ]; tz <= shp[ZZ]; tz++)
3292 shz = tz*box[ZZ][ZZ];
3294 bz0 = bbcz_i[ci*NNBSBB_D ] + shz;
3295 bz1 = bbcz_i[ci*NNBSBB_D+1] + shz;
3303 d2z = gmx::square(bz1);
3307 d2z = gmx::square(bz0 - box[ZZ][ZZ]);
3310 d2z_cx = d2z + d2cx;
3317 bz1_frac = bz1/(gridi->cxy_ind[ci_xy+1] - gridi->cxy_ind[ci_xy]);
3322 /* The check with bz1_frac close to or larger than 1 comes later */
3324 for (int ty = -shp[YY]; ty <= shp[YY]; ty++)
3326 shy = ty*box[YY][YY] + tz*box[ZZ][YY];
3330 by0 = bb_i[ci].lower[BB_Y] + shy;
3331 by1 = bb_i[ci].upper[BB_Y] + shy;
3335 by0 = gridi->c0[YY] + (ci_y )*gridi->sy + shy;
3336 by1 = gridi->c0[YY] + (ci_y+1)*gridi->sy + shy;
3339 get_cell_range(by0, by1,
3340 gridj->ncy, gridj->c0[YY], gridj->sy, gridj->inv_sy,
3350 if (by1 < gridj->c0[YY])
3352 d2z_cy += gmx::square(gridj->c0[YY] - by1);
3354 else if (by0 > gridj->c1[YY])
3356 d2z_cy += gmx::square(by0 - gridj->c1[YY]);
3359 for (int tx = -shp[XX]; tx <= shp[XX]; tx++)
3361 shift = XYZ2IS(tx, ty, tz);
3363 #ifdef NBNXN_SHIFT_BACKWARD
3364 if (gridi == gridj && shift > CENTRAL)
3370 shx = tx*box[XX][XX] + ty*box[YY][XX] + tz*box[ZZ][XX];
3374 bx0 = bb_i[ci].lower[BB_X] + shx;
3375 bx1 = bb_i[ci].upper[BB_X] + shx;
3379 bx0 = gridi->c0[XX] + (ci_x )*gridi->sx + shx;
3380 bx1 = gridi->c0[XX] + (ci_x+1)*gridi->sx + shx;
3383 get_cell_range(bx0, bx1,
3384 gridj->ncx, gridj->c0[XX], gridj->sx, gridj->inv_sx,
3395 new_ci_entry(nbl, cell0_i+ci, shift, flags_i[ci]);
3399 new_sci_entry(nbl, cell0_i+ci, shift);
3402 #ifndef NBNXN_SHIFT_BACKWARD
3405 if (shift == CENTRAL && gridi == gridj &&
3409 /* Leave the pairs with i > j.
3410 * x is the major index, so skip half of it.
3417 set_icell_bb_simple(bb_i, ci, shx, shy, shz,
3423 set_icell_bbxxxx_supersub(pbb_i, ci, shx, shy, shz,
3426 set_icell_bb_supersub(bb_i, ci, shx, shy, shz,
3431 nbs->icell_set_x(cell0_i+ci, shx, shy, shz,
3432 nbat->xstride, nbat->x,
3435 for (int cx = cxf; cx <= cxl; cx++)
3438 if (gridj->c0[XX] + cx*gridj->sx > bx1)
3440 d2zx += gmx::square(gridj->c0[XX] + cx*gridj->sx - bx1);
3442 else if (gridj->c0[XX] + (cx+1)*gridj->sx < bx0)
3444 d2zx += gmx::square(gridj->c0[XX] + (cx+1)*gridj->sx - bx0);
3447 #ifndef NBNXN_SHIFT_BACKWARD
3448 if (gridi == gridj &&
3449 cx == 0 && cyf < ci_y)
3451 if (gridi == gridj &&
3452 cx == 0 && shift == CENTRAL && cyf < ci_y)
3455 /* Leave the pairs with i > j.
3456 * Skip half of y when i and j have the same x.
3465 for (int cy = cyf_x; cy <= cyl; cy++)
3467 c0 = gridj->cxy_ind[cx*gridj->ncy+cy];
3468 c1 = gridj->cxy_ind[cx*gridj->ncy+cy+1];
3469 #ifdef NBNXN_SHIFT_BACKWARD
3470 if (gridi == gridj &&
3471 shift == CENTRAL && c0 < ci)
3478 if (gridj->c0[YY] + cy*gridj->sy > by1)
3480 d2zxy += gmx::square(gridj->c0[YY] + cy*gridj->sy - by1);
3482 else if (gridj->c0[YY] + (cy+1)*gridj->sy < by0)
3484 d2zxy += gmx::square(gridj->c0[YY] + (cy+1)*gridj->sy - by0);
3486 if (c1 > c0 && d2zxy < rl2)
3488 cs = c0 + static_cast<int>(bz1_frac*(c1 - c0));
3496 /* Find the lowest cell that can possibly
3501 (bbcz_j[cf*NNBSBB_D+1] >= bz0 ||
3502 d2xy + gmx::square(bbcz_j[cf*NNBSBB_D+1] - bz0) < rl2))
3507 /* Find the highest cell that can possibly
3512 (bbcz_j[cl*NNBSBB_D] <= bz1 ||
3513 d2xy + gmx::square(bbcz_j[cl*NNBSBB_D] - bz1) < rl2))
3518 #ifdef NBNXN_REFCODE
3520 /* Simple reference code, for debugging,
3521 * overrides the more complex code above.
3525 for (int k = c0; k < c1; k++)
3527 if (box_dist2(bx0, bx1, by0, by1, bz0, bz1, bb+k) < rl2 &&
3532 if (box_dist2(bx0, bx1, by0, by1, bz0, bz1, bb+k) < rl2 &&
3543 /* We want each atom/cell pair only once,
3544 * only use cj >= ci.
3546 #ifndef NBNXN_SHIFT_BACKWARD
3547 cf = std::max(cf, ci);
3549 if (shift == CENTRAL)
3551 cf = std::max(cf, ci);
3558 /* For f buffer flags with simple lists */
3559 ncj_old_j = nbl->ncj;
3561 switch (nb_kernel_type)
3563 case nbnxnk4x4_PlainC:
3564 check_cell_list_space_simple(nbl, cl-cf+1);
3566 make_cluster_list_simple(gridj,
3568 (gridi == gridj && shift == CENTRAL),
3573 #ifdef GMX_NBNXN_SIMD_4XN
3574 case nbnxnk4xN_SIMD_4xN:
3575 check_cell_list_space_simple(nbl, ci_to_cj(na_cj_2log, cl-cf)+2);
3576 make_cluster_list_simd_4xn(gridj,
3578 (gridi == gridj && shift == CENTRAL),
3584 #ifdef GMX_NBNXN_SIMD_2XNN
3585 case nbnxnk4xN_SIMD_2xNN:
3586 check_cell_list_space_simple(nbl, ci_to_cj(na_cj_2log, cl-cf)+2);
3587 make_cluster_list_simd_2xnn(gridj,
3589 (gridi == gridj && shift == CENTRAL),
3595 case nbnxnk8x8x8_PlainC:
3596 case nbnxnk8x8x8_GPU:
3597 check_cell_list_space_supersub(nbl, cl-cf+1);
3598 for (cj = cf; cj <= cl; cj++)
3600 make_cluster_list_supersub(gridi, gridj,
3602 (gridi == gridj && shift == CENTRAL && ci == cj),
3603 nbat->xstride, nbat->x,
3609 ncpcheck += cl - cf + 1;
3611 if (bFBufferFlag && nbl->ncj > ncj_old_j)
3613 int cbf = nbl->cj[ncj_old_j].cj >> gridj_flag_shift;
3614 int cbl = nbl->cj[nbl->ncj-1].cj >> gridj_flag_shift;
3615 for (int cb = cbf; cb <= cbl; cb++)
3617 bitmask_init_bit(&gridj_flag[cb], th);
3625 /* Set the exclusions for this ci list */
3628 set_ci_top_excls(nbs,
3630 shift == CENTRAL && gridi == gridj,
3633 &(nbl->ci[nbl->nci]),
3638 make_fep_list(nbs, nbat, nbl,
3639 shift == CENTRAL && gridi == gridj,
3640 &(nbl->ci[nbl->nci]),
3641 gridi, gridj, nbl_fep);
3646 set_sci_top_excls(nbs,
3648 shift == CENTRAL && gridi == gridj,
3650 &(nbl->sci[nbl->nsci]),
3655 make_fep_list_supersub(nbs, nbat, nbl,
3656 shift == CENTRAL && gridi == gridj,
3657 &(nbl->sci[nbl->nsci]),
3660 gridi, gridj, nbl_fep);
3664 /* Close this ci list */
3667 close_ci_entry_simple(nbl);
3671 close_ci_entry_supersub(nbl,
3673 progBal, nsubpair_tot_est,
3680 if (bFBufferFlag && nbl->ncj > ncj_old_i)
3682 bitmask_init_bit(&(work->buffer_flags.flag[(gridi->cell0+ci)>>gridi_flag_shift]), th);
3686 work->ndistc = ndistc;
3688 nbs_cycle_stop(&work->cc[enbsCCsearch]);
3692 fprintf(debug, "number of distance checks %d\n", ndistc);
3693 fprintf(debug, "ncpcheck %s %d\n", gridi == gridj ? "local" : "non-local",
3698 print_nblist_statistics_simple(debug, nbl, nbs, rlist);
3702 print_nblist_statistics_supersub(debug, nbl, nbs, rlist);
3707 fprintf(debug, "nbl FEP list pairs: %d\n", nbl_fep->nrj);
3712 static void reduce_buffer_flags(const nbnxn_search_t nbs,
3714 const nbnxn_buffer_flags_t *dest)
3716 for (int s = 0; s < nsrc; s++)
3718 gmx_bitmask_t * flag = nbs->work[s].buffer_flags.flag;
3720 for (int b = 0; b < dest->nflag; b++)
3722 bitmask_union(&(dest->flag[b]), flag[b]);
3727 static void print_reduction_cost(const nbnxn_buffer_flags_t *flags, int nout)
3729 int nelem, nkeep, ncopy, nred, out;
3730 gmx_bitmask_t mask_0;
3736 bitmask_init_bit(&mask_0, 0);
3737 for (int b = 0; b < flags->nflag; b++)
3739 if (bitmask_is_equal(flags->flag[b], mask_0))
3741 /* Only flag 0 is set, no copy of reduction required */
3745 else if (!bitmask_is_zero(flags->flag[b]))
3748 for (out = 0; out < nout; out++)
3750 if (bitmask_is_set(flags->flag[b], out))
3767 fprintf(debug, "nbnxn reduction: #flag %d #list %d elem %4.2f, keep %4.2f copy %4.2f red %4.2f\n",
3769 nelem/(double)(flags->nflag),
3770 nkeep/(double)(flags->nflag),
3771 ncopy/(double)(flags->nflag),
3772 nred/(double)(flags->nflag));
3775 /* Perform a count (linear) sort to sort the smaller lists to the end.
3776 * This avoids load imbalance on the GPU, as large lists will be
3777 * scheduled and executed first and the smaller lists later.
3778 * Load balancing between multi-processors only happens at the end
3779 * and there smaller lists lead to more effective load balancing.
3780 * The sorting is done on the cj4 count, not on the actual pair counts.
3781 * Not only does this make the sort faster, but it also results in
3782 * better load balancing than using a list sorted on exact load.
3783 * This function swaps the pointer in the pair list to avoid a copy operation.
3785 static void sort_sci(nbnxn_pairlist_t *nbl)
3787 nbnxn_list_work_t *work;
3789 nbnxn_sci_t *sci_sort;
3791 if (nbl->ncj4 <= nbl->nsci)
3793 /* nsci = 0 or all sci have size 1, sorting won't change the order */
3799 /* We will distinguish differences up to double the average */
3800 m = (2*nbl->ncj4)/nbl->nsci;
3802 if (m + 1 > work->sort_nalloc)
3804 work->sort_nalloc = over_alloc_large(m + 1);
3805 srenew(work->sort, work->sort_nalloc);
3808 if (work->sci_sort_nalloc != nbl->sci_nalloc)
3810 work->sci_sort_nalloc = nbl->sci_nalloc;
3811 nbnxn_realloc_void((void **)&work->sci_sort,
3813 work->sci_sort_nalloc*sizeof(*work->sci_sort),
3814 nbl->alloc, nbl->free);
3817 /* Count the entries of each size */
3818 for (int i = 0; i <= m; i++)
3822 for (int s = 0; s < nbl->nsci; s++)
3824 int i = std::min(m, nbl->sci[s].cj4_ind_end - nbl->sci[s].cj4_ind_start);
3827 /* Calculate the offset for each count */
3830 for (int i = m - 1; i >= 0; i--)
3833 work->sort[i] = work->sort[i + 1] + s0;
3837 /* Sort entries directly into place */
3838 sci_sort = work->sci_sort;
3839 for (int s = 0; s < nbl->nsci; s++)
3841 int i = std::min(m, nbl->sci[s].cj4_ind_end - nbl->sci[s].cj4_ind_start);
3842 sci_sort[work->sort[i]++] = nbl->sci[s];
3845 /* Swap the sci pointers so we use the new, sorted list */
3846 work->sci_sort = nbl->sci;
3847 nbl->sci = sci_sort;
3850 /* Make a local or non-local pair-list, depending on iloc */
3851 void nbnxn_make_pairlist(const nbnxn_search_t nbs,
3852 nbnxn_atomdata_t *nbat,
3853 const t_blocka *excl,
3855 int min_ci_balanced,
3856 nbnxn_pairlist_set_t *nbl_list,
3861 nbnxn_grid_t *gridi, *gridj;
3864 int nsubpair_target, nsubpair_tot_est;
3866 nbnxn_pairlist_t **nbl;
3868 gmx_bool CombineNBLists;
3870 int np_tot, np_noq, np_hlj, nap;
3872 /* Check if we are running hybrid GPU + CPU nbnxn mode */
3873 bGPUCPU = (!nbs->grid[0].bSimple && nbl_list->bSimple);
3875 nnbl = nbl_list->nnbl;
3876 nbl = nbl_list->nbl;
3877 CombineNBLists = nbl_list->bCombined;
3881 fprintf(debug, "ns making %d nblists\n", nnbl);
3884 nbat->bUseBufferFlags = (nbat->nout > 1);
3885 /* We should re-init the flags before making the first list */
3886 if (nbat->bUseBufferFlags && (LOCAL_I(iloc) || bGPUCPU))
3888 init_buffer_flags(&nbat->buffer_flags, nbat->natoms);
3891 if (nbl_list->bSimple)
3894 switch (nb_kernel_type)
3896 #ifdef GMX_NBNXN_SIMD_4XN
3897 case nbnxnk4xN_SIMD_4xN:
3898 nbs->icell_set_x = icell_set_x_simd_4xn;
3901 #ifdef GMX_NBNXN_SIMD_2XNN
3902 case nbnxnk4xN_SIMD_2xNN:
3903 nbs->icell_set_x = icell_set_x_simd_2xnn;
3907 nbs->icell_set_x = icell_set_x_simple;
3911 /* MSVC 2013 complains about switch statements without case */
3912 nbs->icell_set_x = icell_set_x_simple;
3917 nbs->icell_set_x = icell_set_x_supersub;
3922 /* Only zone (grid) 0 vs 0 */
3929 nzi = nbs->zones->nizone;
3932 if (!nbl_list->bSimple && min_ci_balanced > 0)
3934 get_nsubpair_target(nbs, iloc, rlist, min_ci_balanced,
3935 &nsubpair_target, &nsubpair_tot_est);
3939 nsubpair_target = 0;
3940 nsubpair_tot_est = 0;
3943 /* Clear all pair-lists */
3944 for (int th = 0; th < nnbl; th++)
3946 clear_pairlist(nbl[th]);
3950 clear_pairlist_fep(nbl_list->nbl_fep[th]);
3954 for (int zi = 0; zi < nzi; zi++)
3956 gridi = &nbs->grid[zi];
3958 if (NONLOCAL_I(iloc))
3960 zj0 = nbs->zones->izone[zi].j0;
3961 zj1 = nbs->zones->izone[zi].j1;
3967 for (int zj = zj0; zj < zj1; zj++)
3969 gridj = &nbs->grid[zj];
3973 fprintf(debug, "ns search grid %d vs %d\n", zi, zj);
3976 nbs_cycle_start(&nbs->cc[enbsCCsearch]);
3978 if (nbl[0]->bSimple && !gridi->bSimple)
3980 /* Hybrid list, determine blocking later */
3985 ci_block = get_ci_block_size(gridi, nbs->DomDec, nnbl);
3988 /* With GPU: generate progressively smaller lists for
3989 * load balancing for local only or non-local with 2 zones.
3991 progBal = (LOCAL_I(iloc) || nbs->zones->n <= 2);
3993 #pragma omp parallel for num_threads(nnbl) schedule(static)
3994 for (int th = 0; th < nnbl; th++)
3998 /* Re-init the thread-local work flag data before making
3999 * the first list (not an elegant conditional).
4001 if (nbat->bUseBufferFlags && ((zi == 0 && zj == 0) ||
4002 (bGPUCPU && zi == 0 && zj == 1)))
4004 init_buffer_flags(&nbs->work[th].buffer_flags, nbat->natoms);
4007 if (CombineNBLists && th > 0)
4009 clear_pairlist(nbl[th]);
4012 /* Divide the i super cell equally over the nblists */
4013 nbnxn_make_pairlist_part(nbs, gridi, gridj,
4014 &nbs->work[th], nbat, excl,
4018 nbat->bUseBufferFlags,
4020 progBal, nsubpair_tot_est,
4023 nbl_list->nbl_fep[th]);
4025 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
4027 nbs_cycle_stop(&nbs->cc[enbsCCsearch]);
4032 for (int th = 0; th < nnbl; th++)
4034 inc_nrnb(nrnb, eNR_NBNXN_DIST2, nbs->work[th].ndistc);
4036 if (nbl_list->bSimple)
4038 np_tot += nbl[th]->ncj;
4039 np_noq += nbl[th]->work->ncj_noq;
4040 np_hlj += nbl[th]->work->ncj_hlj;
4044 /* This count ignores potential subsequent pair pruning */
4045 np_tot += nbl[th]->nci_tot;
4048 nap = nbl[0]->na_ci*nbl[0]->na_cj;
4049 nbl_list->natpair_ljq = (np_tot - np_noq)*nap - np_hlj*nap/2;
4050 nbl_list->natpair_lj = np_noq*nap;
4051 nbl_list->natpair_q = np_hlj*nap/2;
4053 if (CombineNBLists && nnbl > 1)
4055 nbs_cycle_start(&nbs->cc[enbsCCcombine]);
4057 combine_nblists(nnbl-1, nbl+1, nbl[0]);
4059 nbs_cycle_stop(&nbs->cc[enbsCCcombine]);
4064 if (!nbl_list->bSimple)
4066 /* Sort the entries on size, large ones first */
4067 if (CombineNBLists || nnbl == 1)
4073 #pragma omp parallel for num_threads(nnbl) schedule(static)
4074 for (int th = 0; th < nnbl; th++)
4080 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
4085 if (nbat->bUseBufferFlags)
4087 reduce_buffer_flags(nbs, nnbl, &nbat->buffer_flags);
4092 /* Balance the free-energy lists over all the threads */
4093 balance_fep_lists(nbs, nbl_list);
4096 /* Special performance logging stuff (env.var. GMX_NBNXN_CYCLE) */
4099 nbs->search_count++;
4101 if (nbs->print_cycles &&
4102 (!nbs->DomDec || (nbs->DomDec && !LOCAL_I(iloc))) &&
4103 nbs->search_count % 100 == 0)
4105 nbs_cycle_print(stderr, nbs);
4108 if (debug && (CombineNBLists && nnbl > 1))
4110 if (nbl[0]->bSimple)
4112 print_nblist_statistics_simple(debug, nbl[0], nbs, rlist);
4116 print_nblist_statistics_supersub(debug, nbl[0], nbs, rlist);
4124 if (nbl[0]->bSimple)
4126 print_nblist_ci_cj(debug, nbl[0]);
4130 print_nblist_sci_cj(debug, nbl[0]);
4134 if (nbat->bUseBufferFlags)
4136 print_reduction_cost(&nbat->buffer_flags, nnbl);