2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "nbnxn_search.h"
49 #include "gromacs/domdec/domdec_struct.h"
50 #include "gromacs/gmxlib/nrnb.h"
51 #include "gromacs/math/functions.h"
52 #include "gromacs/math/utilities.h"
53 #include "gromacs/math/vec.h"
54 #include "gromacs/mdlib/gmx_omp_nthreads.h"
55 #include "gromacs/mdlib/nb_verlet.h"
56 #include "gromacs/mdlib/nbnxn_atomdata.h"
57 #include "gromacs/mdlib/nbnxn_consts.h"
58 #include "gromacs/mdlib/nbnxn_grid.h"
59 #include "gromacs/mdlib/nbnxn_internal.h"
60 #include "gromacs/mdlib/nbnxn_simd.h"
61 #include "gromacs/mdlib/nbnxn_util.h"
62 #include "gromacs/mdlib/ns.h"
63 #include "gromacs/mdtypes/group.h"
64 #include "gromacs/mdtypes/md_enums.h"
65 #include "gromacs/pbcutil/ishift.h"
66 #include "gromacs/pbcutil/pbc.h"
67 #include "gromacs/simd/simd.h"
68 #include "gromacs/simd/vector_operations.h"
69 #include "gromacs/topology/block.h"
70 #include "gromacs/utility/exceptions.h"
71 #include "gromacs/utility/fatalerror.h"
72 #include "gromacs/utility/gmxomp.h"
73 #include "gromacs/utility/smalloc.h"
75 using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
78 /* We shift the i-particles backward for PBC.
79 * This leads to more conditionals than shifting forward.
80 * We do this to get more balanced pair lists.
82 constexpr bool c_pbcShiftBackward = true;
85 static void nbs_cycle_clear(nbnxn_cycle_t *cc)
87 for (int i = 0; i < enbsCCnr; i++)
94 static double Mcyc_av(const nbnxn_cycle_t *cc)
96 return (double)cc->c*1e-6/cc->count;
99 static void nbs_cycle_print(FILE *fp, const nbnxn_search_t nbs)
102 fprintf(fp, "ns %4d grid %4.1f search %4.1f red.f %5.3f",
103 nbs->cc[enbsCCgrid].count,
104 Mcyc_av(&nbs->cc[enbsCCgrid]),
105 Mcyc_av(&nbs->cc[enbsCCsearch]),
106 Mcyc_av(&nbs->cc[enbsCCreducef]));
108 if (nbs->nthread_max > 1)
110 if (nbs->cc[enbsCCcombine].count > 0)
112 fprintf(fp, " comb %5.2f",
113 Mcyc_av(&nbs->cc[enbsCCcombine]));
115 fprintf(fp, " s. th");
116 for (int t = 0; t < nbs->nthread_max; t++)
118 fprintf(fp, " %4.1f",
119 Mcyc_av(&nbs->work[t].cc[enbsCCsearch]));
125 /* Layout for the nonbonded NxN pair lists */
126 enum class NbnxnLayout
128 NoSimd4x4, // i-cluster size 4, j-cluster size 4
129 Simd4xN, // i-cluster size 4, j-cluster size SIMD width
130 Simd2xNN, // i-cluster size 4, j-cluster size half SIMD width
131 Gpu8x8x8 // i-cluster size 8, j-cluster size 8 + super-clustering
134 /* Returns the j-cluster size */
135 template <NbnxnLayout layout>
136 static constexpr int jClusterSize()
139 static_assert(layout == NbnxnLayout::NoSimd4x4 || layout == NbnxnLayout::Simd4xN || layout == NbnxnLayout::Simd2xNN, "Currently jClusterSize only supports CPU layouts");
141 return layout == NbnxnLayout::Simd4xN ? GMX_SIMD_REAL_WIDTH : (layout == NbnxnLayout::Simd2xNN ? GMX_SIMD_REAL_WIDTH/2 : NBNXN_CPU_CLUSTER_I_SIZE);
143 static_assert(layout == NbnxnLayout::NoSimd4x4, "Currently without SIMD, jClusterSize only supports NoSimd4x4");
145 return NBNXN_CPU_CLUSTER_I_SIZE;
149 /* Returns the j-cluster index given the i-cluster index */
150 template <int jClusterSize>
151 static inline int cjFromCi(int ci)
153 static_assert(jClusterSize == NBNXN_CPU_CLUSTER_I_SIZE/2 || jClusterSize == NBNXN_CPU_CLUSTER_I_SIZE || jClusterSize == NBNXN_CPU_CLUSTER_I_SIZE*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
155 if (jClusterSize == NBNXN_CPU_CLUSTER_I_SIZE/2)
159 else if (jClusterSize == NBNXN_CPU_CLUSTER_I_SIZE)
169 /* Returns the j-cluster index given the i-cluster index */
170 template <NbnxnLayout layout>
171 static inline int cjFromCi(int ci)
173 constexpr int clusterSize = jClusterSize<layout>();
175 return cjFromCi<clusterSize>(ci);
178 /* Returns the nbnxn coordinate data index given the i-cluster index */
179 template <NbnxnLayout layout>
180 static inline int xIndexFromCi(int ci)
182 constexpr int clusterSize = jClusterSize<layout>();
184 static_assert(clusterSize == NBNXN_CPU_CLUSTER_I_SIZE/2 || clusterSize == NBNXN_CPU_CLUSTER_I_SIZE || clusterSize == NBNXN_CPU_CLUSTER_I_SIZE*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
186 if (clusterSize <= NBNXN_CPU_CLUSTER_I_SIZE)
188 /* Coordinates are stored packed in groups of 4 */
193 /* Coordinates packed in 8, i-cluster size is half the packing width */
194 return (ci >> 1)*STRIDE_P8 + (ci & 1)*(c_packX8 >> 1);
198 /* Returns the nbnxn coordinate data index given the j-cluster index */
199 template <NbnxnLayout layout>
200 static inline int xIndexFromCj(int cj)
202 constexpr int clusterSize = jClusterSize<layout>();
204 static_assert(clusterSize == NBNXN_CPU_CLUSTER_I_SIZE/2 || clusterSize == NBNXN_CPU_CLUSTER_I_SIZE || clusterSize == NBNXN_CPU_CLUSTER_I_SIZE*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
206 if (clusterSize == NBNXN_CPU_CLUSTER_I_SIZE/2)
208 /* Coordinates are stored packed in groups of 4 */
209 return (cj >> 1)*STRIDE_P4 + (cj & 1)*(c_packX4 >> 1);
211 else if (clusterSize == NBNXN_CPU_CLUSTER_I_SIZE)
213 /* Coordinates are stored packed in groups of 4 */
218 /* Coordinates are stored packed in groups of 8 */
223 gmx_bool nbnxn_kernel_pairlist_simple(int nb_kernel_type)
225 if (nb_kernel_type == nbnxnkNotSet)
227 gmx_fatal(FARGS, "Non-bonded kernel type not set for Verlet-style pair-list.");
230 switch (nb_kernel_type)
232 case nbnxnk8x8x8_GPU:
233 case nbnxnk8x8x8_PlainC:
236 case nbnxnk4x4_PlainC:
237 case nbnxnk4xN_SIMD_4xN:
238 case nbnxnk4xN_SIMD_2xNN:
242 gmx_incons("Invalid nonbonded kernel type passed!");
247 /* Initializes a single nbnxn_pairlist_t data structure */
248 static void nbnxn_init_pairlist_fep(t_nblist *nl)
250 nl->type = GMX_NBLIST_INTERACTION_FREE_ENERGY;
251 nl->igeometry = GMX_NBLIST_GEOMETRY_PARTICLE_PARTICLE;
252 /* The interaction functions are set in the free energy kernel fuction */
265 nl->jindex = nullptr;
267 nl->excl_fep = nullptr;
271 void nbnxn_init_search(nbnxn_search_t * nbs_ptr,
273 struct gmx_domdec_zones_t *zones,
285 nbs->DomDec = (n_dd_cells != nullptr);
287 clear_ivec(nbs->dd_dim);
293 for (int d = 0; d < DIM; d++)
295 if ((*n_dd_cells)[d] > 1)
298 /* Each grid matches a DD zone */
304 nbnxn_grids_init(nbs, ngrid);
307 nbs->cell_nalloc = 0;
311 nbs->nthread_max = nthread_max;
313 /* Initialize the work data structures for each thread */
314 snew(nbs->work, nbs->nthread_max);
315 for (int t = 0; t < nbs->nthread_max; t++)
317 nbs->work[t].cxy_na = nullptr;
318 nbs->work[t].cxy_na_nalloc = 0;
319 nbs->work[t].sort_work = nullptr;
320 nbs->work[t].sort_work_nalloc = 0;
322 snew(nbs->work[t].nbl_fep, 1);
323 nbnxn_init_pairlist_fep(nbs->work[t].nbl_fep);
326 /* Initialize detailed nbsearch cycle counting */
327 nbs->print_cycles = (getenv("GMX_NBNXN_CYCLE") != nullptr);
328 nbs->search_count = 0;
329 nbs_cycle_clear(nbs->cc);
330 for (int t = 0; t < nbs->nthread_max; t++)
332 nbs_cycle_clear(nbs->work[t].cc);
336 static void init_buffer_flags(nbnxn_buffer_flags_t *flags,
339 flags->nflag = (natoms + NBNXN_BUFFERFLAG_SIZE - 1)/NBNXN_BUFFERFLAG_SIZE;
340 if (flags->nflag > flags->flag_nalloc)
342 flags->flag_nalloc = over_alloc_large(flags->nflag);
343 srenew(flags->flag, flags->flag_nalloc);
345 for (int b = 0; b < flags->nflag; b++)
347 bitmask_clear(&(flags->flag[b]));
351 /* Determines the cell range along one dimension that
352 * the bounding box b0 - b1 sees.
354 static void get_cell_range(real b0, real b1,
355 int nc, real c0, real s, real invs,
356 real d2, real r2, int *cf, int *cl)
358 *cf = std::max(static_cast<int>((b0 - c0)*invs), 0);
360 while (*cf > 0 && d2 + gmx::square((b0 - c0) - (*cf-1+1)*s) < r2)
365 *cl = std::min(static_cast<int>((b1 - c0)*invs), nc-1);
366 while (*cl < nc-1 && d2 + gmx::square((*cl+1)*s - (b1 - c0)) < r2)
372 /* Reference code calculating the distance^2 between two bounding boxes */
374 static float box_dist2(float bx0, float bx1, float by0,
375 float by1, float bz0, float bz1,
376 const nbnxn_bb_t *bb)
379 float dl, dh, dm, dm0;
383 dl = bx0 - bb->upper[BB_X];
384 dh = bb->lower[BB_X] - bx1;
385 dm = std::max(dl, dh);
386 dm0 = std::max(dm, 0.0f);
389 dl = by0 - bb->upper[BB_Y];
390 dh = bb->lower[BB_Y] - by1;
391 dm = std::max(dl, dh);
392 dm0 = std::max(dm, 0.0f);
395 dl = bz0 - bb->upper[BB_Z];
396 dh = bb->lower[BB_Z] - bz1;
397 dm = std::max(dl, dh);
398 dm0 = std::max(dm, 0.0f);
405 /* Plain C code calculating the distance^2 between two bounding boxes */
406 static float subc_bb_dist2(int si, const nbnxn_bb_t *bb_i_ci,
407 int csj, const nbnxn_bb_t *bb_j_all)
409 const nbnxn_bb_t *bb_i, *bb_j;
411 float dl, dh, dm, dm0;
414 bb_j = bb_j_all + csj;
418 dl = bb_i->lower[BB_X] - bb_j->upper[BB_X];
419 dh = bb_j->lower[BB_X] - bb_i->upper[BB_X];
420 dm = std::max(dl, dh);
421 dm0 = std::max(dm, 0.0f);
424 dl = bb_i->lower[BB_Y] - bb_j->upper[BB_Y];
425 dh = bb_j->lower[BB_Y] - bb_i->upper[BB_Y];
426 dm = std::max(dl, dh);
427 dm0 = std::max(dm, 0.0f);
430 dl = bb_i->lower[BB_Z] - bb_j->upper[BB_Z];
431 dh = bb_j->lower[BB_Z] - bb_i->upper[BB_Z];
432 dm = std::max(dl, dh);
433 dm0 = std::max(dm, 0.0f);
439 #if NBNXN_SEARCH_BB_SIMD4
441 /* 4-wide SIMD code for bb distance for bb format xyz0 */
442 static float subc_bb_dist2_simd4(int si, const nbnxn_bb_t *bb_i_ci,
443 int csj, const nbnxn_bb_t *bb_j_all)
445 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
448 Simd4Float bb_i_S0, bb_i_S1;
449 Simd4Float bb_j_S0, bb_j_S1;
455 bb_i_S0 = load4(&bb_i_ci[si].lower[0]);
456 bb_i_S1 = load4(&bb_i_ci[si].upper[0]);
457 bb_j_S0 = load4(&bb_j_all[csj].lower[0]);
458 bb_j_S1 = load4(&bb_j_all[csj].upper[0]);
460 dl_S = bb_i_S0 - bb_j_S1;
461 dh_S = bb_j_S0 - bb_i_S1;
463 dm_S = max(dl_S, dh_S);
464 dm0_S = max(dm_S, simd4SetZeroF());
466 return dotProduct(dm0_S, dm0_S);
469 /* Calculate bb bounding distances of bb_i[si,...,si+3] and store them in d2 */
470 #define SUBC_BB_DIST2_SIMD4_XXXX_INNER(si, bb_i, d2) \
474 Simd4Float dx_0, dy_0, dz_0; \
475 Simd4Float dx_1, dy_1, dz_1; \
477 Simd4Float mx, my, mz; \
478 Simd4Float m0x, m0y, m0z; \
480 Simd4Float d2x, d2y, d2z; \
481 Simd4Float d2s, d2t; \
483 shi = si*NNBSBB_D*DIM; \
485 xi_l = load4(bb_i+shi+0*STRIDE_PBB); \
486 yi_l = load4(bb_i+shi+1*STRIDE_PBB); \
487 zi_l = load4(bb_i+shi+2*STRIDE_PBB); \
488 xi_h = load4(bb_i+shi+3*STRIDE_PBB); \
489 yi_h = load4(bb_i+shi+4*STRIDE_PBB); \
490 zi_h = load4(bb_i+shi+5*STRIDE_PBB); \
492 dx_0 = xi_l - xj_h; \
493 dy_0 = yi_l - yj_h; \
494 dz_0 = zi_l - zj_h; \
496 dx_1 = xj_l - xi_h; \
497 dy_1 = yj_l - yi_h; \
498 dz_1 = zj_l - zi_h; \
500 mx = max(dx_0, dx_1); \
501 my = max(dy_0, dy_1); \
502 mz = max(dz_0, dz_1); \
504 m0x = max(mx, zero); \
505 m0y = max(my, zero); \
506 m0z = max(mz, zero); \
515 store4(d2+si, d2t); \
518 /* 4-wide SIMD code for nsi bb distances for bb format xxxxyyyyzzzz */
519 static void subc_bb_dist2_simd4_xxxx(const float *bb_j,
520 int nsi, const float *bb_i,
523 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
526 Simd4Float xj_l, yj_l, zj_l;
527 Simd4Float xj_h, yj_h, zj_h;
528 Simd4Float xi_l, yi_l, zi_l;
529 Simd4Float xi_h, yi_h, zi_h;
535 xj_l = Simd4Float(bb_j[0*STRIDE_PBB]);
536 yj_l = Simd4Float(bb_j[1*STRIDE_PBB]);
537 zj_l = Simd4Float(bb_j[2*STRIDE_PBB]);
538 xj_h = Simd4Float(bb_j[3*STRIDE_PBB]);
539 yj_h = Simd4Float(bb_j[4*STRIDE_PBB]);
540 zj_h = Simd4Float(bb_j[5*STRIDE_PBB]);
542 /* Here we "loop" over si (0,STRIDE_PBB) from 0 to nsi with step STRIDE_PBB.
543 * But as we know the number of iterations is 1 or 2, we unroll manually.
545 SUBC_BB_DIST2_SIMD4_XXXX_INNER(0, bb_i, d2);
546 if (STRIDE_PBB < nsi)
548 SUBC_BB_DIST2_SIMD4_XXXX_INNER(STRIDE_PBB, bb_i, d2);
552 #endif /* NBNXN_SEARCH_BB_SIMD4 */
555 /* Returns if any atom pair from two clusters is within distance sqrt(rlist2) */
556 static inline gmx_bool
557 clusterpair_in_range(const nbnxn_list_work_t *work,
559 int csj, int stride, const real *x_j,
562 #if !GMX_SIMD4_HAVE_REAL
565 * All coordinates are stored as xyzxyz...
568 const real *x_i = work->x_ci;
570 for (int i = 0; i < c_nbnxnGpuClusterSize; i++)
572 int i0 = (si*c_nbnxnGpuClusterSize + i)*DIM;
573 for (int j = 0; j < c_nbnxnGpuClusterSize; j++)
575 int j0 = (csj*c_nbnxnGpuClusterSize + j)*stride;
577 real d2 = gmx::square(x_i[i0 ] - x_j[j0 ]) + gmx::square(x_i[i0+1] - x_j[j0+1]) + gmx::square(x_i[i0+2] - x_j[j0+2]);
588 #else /* !GMX_SIMD4_HAVE_REAL */
590 /* 4-wide SIMD version.
591 * The coordinates x_i are stored as xxxxyyyy..., x_j is stored xyzxyz...
592 * Using 8-wide AVX(2) is not faster on Intel Sandy Bridge and Haswell.
594 static_assert(c_nbnxnGpuClusterSize == 8 || c_nbnxnGpuClusterSize == 4,
595 "A cluster is hard-coded to 4/8 atoms.");
597 Simd4Real rc2_S = Simd4Real(rlist2);
599 const real *x_i = work->x_ci_simd;
601 int dim_stride = c_nbnxnGpuClusterSize*DIM;
602 Simd4Real ix_S0 = load4(x_i + si*dim_stride + 0*GMX_SIMD4_WIDTH);
603 Simd4Real iy_S0 = load4(x_i + si*dim_stride + 1*GMX_SIMD4_WIDTH);
604 Simd4Real iz_S0 = load4(x_i + si*dim_stride + 2*GMX_SIMD4_WIDTH);
606 Simd4Real ix_S1, iy_S1, iz_S1;
607 if (c_nbnxnGpuClusterSize == 8)
609 ix_S1 = load4(x_i + si*dim_stride + 3*GMX_SIMD4_WIDTH);
610 iy_S1 = load4(x_i + si*dim_stride + 4*GMX_SIMD4_WIDTH);
611 iz_S1 = load4(x_i + si*dim_stride + 5*GMX_SIMD4_WIDTH);
613 /* We loop from the outer to the inner particles to maximize
614 * the chance that we find a pair in range quickly and return.
616 int j0 = csj*c_nbnxnGpuClusterSize;
617 int j1 = j0 + c_nbnxnGpuClusterSize - 1;
620 Simd4Real jx0_S, jy0_S, jz0_S;
621 Simd4Real jx1_S, jy1_S, jz1_S;
623 Simd4Real dx_S0, dy_S0, dz_S0;
624 Simd4Real dx_S1, dy_S1, dz_S1;
625 Simd4Real dx_S2, dy_S2, dz_S2;
626 Simd4Real dx_S3, dy_S3, dz_S3;
637 Simd4Bool wco_any_S01, wco_any_S23, wco_any_S;
639 jx0_S = Simd4Real(x_j[j0*stride+0]);
640 jy0_S = Simd4Real(x_j[j0*stride+1]);
641 jz0_S = Simd4Real(x_j[j0*stride+2]);
643 jx1_S = Simd4Real(x_j[j1*stride+0]);
644 jy1_S = Simd4Real(x_j[j1*stride+1]);
645 jz1_S = Simd4Real(x_j[j1*stride+2]);
647 /* Calculate distance */
648 dx_S0 = ix_S0 - jx0_S;
649 dy_S0 = iy_S0 - jy0_S;
650 dz_S0 = iz_S0 - jz0_S;
651 dx_S2 = ix_S0 - jx1_S;
652 dy_S2 = iy_S0 - jy1_S;
653 dz_S2 = iz_S0 - jz1_S;
654 if (c_nbnxnGpuClusterSize == 8)
656 dx_S1 = ix_S1 - jx0_S;
657 dy_S1 = iy_S1 - jy0_S;
658 dz_S1 = iz_S1 - jz0_S;
659 dx_S3 = ix_S1 - jx1_S;
660 dy_S3 = iy_S1 - jy1_S;
661 dz_S3 = iz_S1 - jz1_S;
664 /* rsq = dx*dx+dy*dy+dz*dz */
665 rsq_S0 = norm2(dx_S0, dy_S0, dz_S0);
666 rsq_S2 = norm2(dx_S2, dy_S2, dz_S2);
667 if (c_nbnxnGpuClusterSize == 8)
669 rsq_S1 = norm2(dx_S1, dy_S1, dz_S1);
670 rsq_S3 = norm2(dx_S3, dy_S3, dz_S3);
673 wco_S0 = (rsq_S0 < rc2_S);
674 wco_S2 = (rsq_S2 < rc2_S);
675 if (c_nbnxnGpuClusterSize == 8)
677 wco_S1 = (rsq_S1 < rc2_S);
678 wco_S3 = (rsq_S3 < rc2_S);
680 if (c_nbnxnGpuClusterSize == 8)
682 wco_any_S01 = wco_S0 || wco_S1;
683 wco_any_S23 = wco_S2 || wco_S3;
684 wco_any_S = wco_any_S01 || wco_any_S23;
688 wco_any_S = wco_S0 || wco_S2;
691 if (anyTrue(wco_any_S))
702 #endif /* !GMX_SIMD4_HAVE_REAL */
705 /* Returns the j-cluster index for index cjIndex in a cj list */
706 static inline int nblCj(const nbnxn_cj_t *cjList, int cjIndex)
708 return cjList[cjIndex].cj;
711 /* Returns the j-cluster index for index cjIndex in a cj4 list */
712 static inline int nblCj(const nbnxn_cj4_t *cj4List, int cjIndex)
714 return cj4List[cjIndex/c_nbnxnGpuJgroupSize].cj[cjIndex & (c_nbnxnGpuJgroupSize - 1)];
717 /* Returns the i-interaction mask of the j sub-cell for index cj_ind */
718 static unsigned int nbl_imask0(const nbnxn_pairlist_t *nbl, int cj_ind)
720 return nbl->cj4[cj_ind/c_nbnxnGpuJgroupSize].imei[0].imask;
723 /* Ensures there is enough space for extra extra exclusion masks */
724 static void check_excl_space(nbnxn_pairlist_t *nbl, int extra)
726 if (nbl->nexcl+extra > nbl->excl_nalloc)
728 nbl->excl_nalloc = over_alloc_small(nbl->nexcl+extra);
729 nbnxn_realloc_void((void **)&nbl->excl,
730 nbl->nexcl*sizeof(*nbl->excl),
731 nbl->excl_nalloc*sizeof(*nbl->excl),
732 nbl->alloc, nbl->free);
736 /* Ensures there is enough space for maxNumExtraClusters extra j-clusters in the list */
737 static void check_cell_list_space_simple(nbnxn_pairlist_t *nbl,
738 int maxNumExtraClusters)
742 cj_max = nbl->ncj + maxNumExtraClusters;
744 if (cj_max > nbl->cj_nalloc)
746 nbl->cj_nalloc = over_alloc_small(cj_max);
747 nbnxn_realloc_void((void **)&nbl->cj,
748 nbl->ncj*sizeof(*nbl->cj),
749 nbl->cj_nalloc*sizeof(*nbl->cj),
750 nbl->alloc, nbl->free);
752 nbnxn_realloc_void((void **)&nbl->cjOuter,
753 nbl->ncj*sizeof(*nbl->cjOuter),
754 nbl->cj_nalloc*sizeof(*nbl->cjOuter),
755 nbl->alloc, nbl->free);
759 /* Ensures there is enough space for ncell extra j-clusters in the list */
760 static void check_cell_list_space_supersub(nbnxn_pairlist_t *nbl,
765 /* We can have maximally nsupercell*c_gpuNumClusterPerCell sj lists */
766 /* We can store 4 j-subcell - i-supercell pairs in one struct.
767 * since we round down, we need one extra entry.
769 ncj4_max = ((nbl->work->cj_ind + ncell*c_gpuNumClusterPerCell + c_nbnxnGpuJgroupSize - 1)/c_nbnxnGpuJgroupSize);
771 if (ncj4_max > nbl->cj4_nalloc)
773 nbl->cj4_nalloc = over_alloc_small(ncj4_max);
774 nbnxn_realloc_void((void **)&nbl->cj4,
775 nbl->work->cj4_init*sizeof(*nbl->cj4),
776 nbl->cj4_nalloc*sizeof(*nbl->cj4),
777 nbl->alloc, nbl->free);
780 if (ncj4_max > nbl->work->cj4_init)
782 for (int j4 = nbl->work->cj4_init; j4 < ncj4_max; j4++)
784 /* No i-subcells and no excl's in the list initially */
785 for (w = 0; w < c_nbnxnGpuClusterpairSplit; w++)
787 nbl->cj4[j4].imei[w].imask = 0U;
788 nbl->cj4[j4].imei[w].excl_ind = 0;
792 nbl->work->cj4_init = ncj4_max;
796 /* Set all excl masks for one GPU warp no exclusions */
797 static void set_no_excls(nbnxn_excl_t *excl)
799 for (int t = 0; t < c_nbnxnGpuExclSize; t++)
801 /* Turn all interaction bits on */
802 excl->pair[t] = NBNXN_INTERACTION_MASK_ALL;
806 /* Initializes a single nbnxn_pairlist_t data structure */
807 static void nbnxn_init_pairlist(nbnxn_pairlist_t *nbl,
809 nbnxn_alloc_t *alloc,
812 if (alloc == nullptr)
814 nbl->alloc = nbnxn_alloc_aligned;
822 nbl->free = nbnxn_free_aligned;
829 nbl->bSimple = bSimple;
844 /* We need one element extra in sj, so alloc initially with 1 */
851 GMX_ASSERT(c_nbnxnGpuNumClusterPerSupercluster == c_gpuNumClusterPerCell, "The search code assumes that the a super-cluster matches a search grid cell");
853 GMX_ASSERT(sizeof(nbl->cj4[0].imei[0].imask)*8 >= c_nbnxnGpuJgroupSize*c_gpuNumClusterPerCell, "The i super-cluster cluster interaction mask does not contain a sufficient number of bits");
854 GMX_ASSERT(sizeof(nbl->excl[0])*8 >= c_nbnxnGpuJgroupSize*c_gpuNumClusterPerCell, "The GPU exclusion mask does not contain a sufficient number of bits");
857 nbl->excl_nalloc = 0;
859 check_excl_space(nbl, 1);
861 set_no_excls(&nbl->excl[0]);
867 snew_aligned(nbl->work->bb_ci, 1, NBNXN_SEARCH_BB_MEM_ALIGN);
872 snew_aligned(nbl->work->pbb_ci, c_gpuNumClusterPerCell/STRIDE_PBB*NNBSBB_XXXX, NBNXN_SEARCH_BB_MEM_ALIGN);
874 snew_aligned(nbl->work->bb_ci, c_gpuNumClusterPerCell, NBNXN_SEARCH_BB_MEM_ALIGN);
877 int gpu_clusterpair_nc = c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize*DIM;
878 snew(nbl->work->x_ci, gpu_clusterpair_nc);
880 snew_aligned(nbl->work->x_ci_simd,
881 std::max(NBNXN_CPU_CLUSTER_I_SIZE*DIM*GMX_SIMD_REAL_WIDTH,
883 GMX_SIMD_REAL_WIDTH);
885 snew_aligned(nbl->work->d2, c_gpuNumClusterPerCell, NBNXN_SEARCH_BB_MEM_ALIGN);
887 nbl->work->sort = nullptr;
888 nbl->work->sort_nalloc = 0;
889 nbl->work->sci_sort = nullptr;
890 nbl->work->sci_sort_nalloc = 0;
893 void nbnxn_init_pairlist_set(nbnxn_pairlist_set_t *nbl_list,
894 gmx_bool bSimple, gmx_bool bCombined,
895 nbnxn_alloc_t *alloc,
898 nbl_list->bSimple = bSimple;
899 nbl_list->bCombined = bCombined;
901 nbl_list->nnbl = gmx_omp_nthreads_get(emntNonbonded);
903 if (!nbl_list->bCombined &&
904 nbl_list->nnbl > NBNXN_BUFFERFLAG_MAX_THREADS)
906 gmx_fatal(FARGS, "%d OpenMP threads were requested. Since the non-bonded force buffer reduction is prohibitively slow with more than %d threads, we do not allow this. Use %d or less OpenMP threads.",
907 nbl_list->nnbl, NBNXN_BUFFERFLAG_MAX_THREADS, NBNXN_BUFFERFLAG_MAX_THREADS);
910 snew(nbl_list->nbl, nbl_list->nnbl);
911 if (bSimple && nbl_list->nnbl > 1)
913 snew(nbl_list->nbl_work, nbl_list->nnbl);
915 snew(nbl_list->nbl_fep, nbl_list->nnbl);
916 /* Execute in order to avoid memory interleaving between threads */
917 #pragma omp parallel for num_threads(nbl_list->nnbl) schedule(static)
918 for (int i = 0; i < nbl_list->nnbl; i++)
922 /* Allocate the nblist data structure locally on each thread
923 * to optimize memory access for NUMA architectures.
925 snew(nbl_list->nbl[i], 1);
927 /* Only list 0 is used on the GPU, use normal allocation for i>0 */
928 if (!bSimple && i == 0)
930 nbnxn_init_pairlist(nbl_list->nbl[i], nbl_list->bSimple, alloc, free);
934 nbnxn_init_pairlist(nbl_list->nbl[i], nbl_list->bSimple, nullptr, nullptr);
935 if (bSimple && nbl_list->nnbl > 1)
937 snew(nbl_list->nbl_work[i], 1);
938 nbnxn_init_pairlist(nbl_list->nbl_work[i], nbl_list->bSimple, nullptr, nullptr);
942 snew(nbl_list->nbl_fep[i], 1);
943 nbnxn_init_pairlist_fep(nbl_list->nbl_fep[i]);
945 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
949 /* Print statistics of a pair list, used for debug output */
950 static void print_nblist_statistics_simple(FILE *fp, const nbnxn_pairlist_t *nbl,
951 const nbnxn_search_t nbs, real rl)
953 const nbnxn_grid_t *grid;
957 grid = &nbs->grid[0];
959 fprintf(fp, "nbl nci %d ncj %d\n",
960 nbl->nci, nbl->ncjInUse);
961 fprintf(fp, "nbl na_sc %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
962 nbl->na_sc, rl, nbl->ncjInUse, nbl->ncjInUse/(double)grid->nc,
963 nbl->ncjInUse/(double)grid->nc*grid->na_sc,
964 nbl->ncjInUse/(double)grid->nc*grid->na_sc/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid->nc*grid->na_sc/(grid->size[XX]*grid->size[YY]*grid->size[ZZ])));
966 fprintf(fp, "nbl average j cell list length %.1f\n",
967 0.25*nbl->ncjInUse/(double)std::max(nbl->nci, 1));
969 for (int s = 0; s < SHIFTS; s++)
974 for (int i = 0; i < nbl->nci; i++)
976 cs[nbl->ci[i].shift & NBNXN_CI_SHIFT] +=
977 nbl->ci[i].cj_ind_end - nbl->ci[i].cj_ind_start;
979 int j = nbl->ci[i].cj_ind_start;
980 while (j < nbl->ci[i].cj_ind_end &&
981 nbl->cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
987 fprintf(fp, "nbl cell pairs, total: %d excl: %d %.1f%%\n",
988 nbl->ncj, npexcl, 100*npexcl/(double)std::max(nbl->ncj, 1));
989 for (int s = 0; s < SHIFTS; s++)
993 fprintf(fp, "nbl shift %2d ncj %3d\n", s, cs[s]);
998 /* Print statistics of a pair lists, used for debug output */
999 static void print_nblist_statistics_supersub(FILE *fp, const nbnxn_pairlist_t *nbl,
1000 const nbnxn_search_t nbs, real rl)
1002 const nbnxn_grid_t *grid;
1004 int c[c_gpuNumClusterPerCell + 1];
1005 double sum_nsp, sum_nsp2;
1008 /* This code only produces correct statistics with domain decomposition */
1009 grid = &nbs->grid[0];
1011 fprintf(fp, "nbl nsci %d ncj4 %d nsi %d excl4 %d\n",
1012 nbl->nsci, nbl->ncj4, nbl->nci_tot, nbl->nexcl);
1013 fprintf(fp, "nbl na_c %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
1014 nbl->na_ci, rl, nbl->nci_tot, nbl->nci_tot/(double)grid->nsubc_tot,
1015 nbl->nci_tot/(double)grid->nsubc_tot*grid->na_c,
1016 nbl->nci_tot/(double)grid->nsubc_tot*grid->na_c/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid->nsubc_tot*grid->na_c/(grid->size[XX]*grid->size[YY]*grid->size[ZZ])));
1021 for (int si = 0; si <= c_gpuNumClusterPerCell; si++)
1025 for (int i = 0; i < nbl->nsci; i++)
1030 for (int j4 = nbl->sci[i].cj4_ind_start; j4 < nbl->sci[i].cj4_ind_end; j4++)
1032 for (int j = 0; j < c_nbnxnGpuJgroupSize; j++)
1035 for (int si = 0; si < c_gpuNumClusterPerCell; si++)
1037 if (nbl->cj4[j4].imei[0].imask & (1U << (j*c_gpuNumClusterPerCell + si)))
1047 sum_nsp2 += nsp*nsp;
1048 nsp_max = std::max(nsp_max, nsp);
1052 sum_nsp /= nbl->nsci;
1053 sum_nsp2 /= nbl->nsci;
1055 fprintf(fp, "nbl #cluster-pairs: av %.1f stddev %.1f max %d\n",
1056 sum_nsp, std::sqrt(sum_nsp2 - sum_nsp*sum_nsp), nsp_max);
1060 for (b = 0; b <= c_gpuNumClusterPerCell; b++)
1062 fprintf(fp, "nbl j-list #i-subcell %d %7d %4.1f\n",
1064 100.0*c[b]/(double)(nbl->ncj4*c_nbnxnGpuJgroupSize));
1069 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp */
1070 static void low_get_nbl_exclusions(nbnxn_pairlist_t *nbl, int cj4,
1071 int warp, nbnxn_excl_t **excl)
1073 if (nbl->cj4[cj4].imei[warp].excl_ind == 0)
1075 /* No exclusions set, make a new list entry */
1076 nbl->cj4[cj4].imei[warp].excl_ind = nbl->nexcl;
1078 *excl = &nbl->excl[nbl->cj4[cj4].imei[warp].excl_ind];
1079 set_no_excls(*excl);
1083 /* We already have some exclusions, new ones can be added to the list */
1084 *excl = &nbl->excl[nbl->cj4[cj4].imei[warp].excl_ind];
1088 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp,
1089 * generates a new element and allocates extra memory, if necessary.
1091 static void get_nbl_exclusions_1(nbnxn_pairlist_t *nbl, int cj4,
1092 int warp, nbnxn_excl_t **excl)
1094 if (nbl->cj4[cj4].imei[warp].excl_ind == 0)
1096 /* We need to make a new list entry, check if we have space */
1097 check_excl_space(nbl, 1);
1099 low_get_nbl_exclusions(nbl, cj4, warp, excl);
1102 /* Returns pointers to the exclusion masks for cj4-unit cj4 for both warps,
1103 * generates a new element and allocates extra memory, if necessary.
1105 static void get_nbl_exclusions_2(nbnxn_pairlist_t *nbl, int cj4,
1106 nbnxn_excl_t **excl_w0,
1107 nbnxn_excl_t **excl_w1)
1109 /* Check for space we might need */
1110 check_excl_space(nbl, 2);
1112 low_get_nbl_exclusions(nbl, cj4, 0, excl_w0);
1113 low_get_nbl_exclusions(nbl, cj4, 1, excl_w1);
1116 /* Sets the self exclusions i=j and pair exclusions i>j */
1117 static void set_self_and_newton_excls_supersub(nbnxn_pairlist_t *nbl,
1118 int cj4_ind, int sj_offset,
1119 int i_cluster_in_cell)
1121 nbnxn_excl_t *excl[c_nbnxnGpuClusterpairSplit];
1123 /* Here we only set the set self and double pair exclusions */
1125 static_assert(c_nbnxnGpuClusterpairSplit == 2, "");
1127 get_nbl_exclusions_2(nbl, cj4_ind, &excl[0], &excl[1]);
1129 /* Only minor < major bits set */
1130 for (int ej = 0; ej < nbl->na_ci; ej++)
1133 for (int ei = ej; ei < nbl->na_ci; ei++)
1135 excl[w]->pair[(ej & (c_nbnxnGpuJgroupSize-1))*nbl->na_ci + ei] &=
1136 ~(1U << (sj_offset*c_gpuNumClusterPerCell + i_cluster_in_cell));
1141 /* Returns a diagonal or off-diagonal interaction mask for plain C lists */
1142 static unsigned int get_imask(gmx_bool rdiag, int ci, int cj)
1144 return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
1147 /* Returns a diagonal or off-diagonal interaction mask for cj-size=2 */
1148 gmx_unused static unsigned int get_imask_simd_j2(gmx_bool rdiag, int ci, int cj)
1150 return (rdiag && ci*2 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_0 :
1151 (rdiag && ci*2+1 == cj ? NBNXN_INTERACTION_MASK_DIAG_J2_1 :
1152 NBNXN_INTERACTION_MASK_ALL));
1155 /* Returns a diagonal or off-diagonal interaction mask for cj-size=4 */
1156 gmx_unused static unsigned int get_imask_simd_j4(gmx_bool rdiag, int ci, int cj)
1158 return (rdiag && ci == cj ? NBNXN_INTERACTION_MASK_DIAG : NBNXN_INTERACTION_MASK_ALL);
1161 /* Returns a diagonal or off-diagonal interaction mask for cj-size=8 */
1162 gmx_unused static unsigned int get_imask_simd_j8(gmx_bool rdiag, int ci, int cj)
1164 return (rdiag && ci == cj*2 ? NBNXN_INTERACTION_MASK_DIAG_J8_0 :
1165 (rdiag && ci == cj*2+1 ? NBNXN_INTERACTION_MASK_DIAG_J8_1 :
1166 NBNXN_INTERACTION_MASK_ALL));
1170 #if GMX_SIMD_REAL_WIDTH == 2
1171 #define get_imask_simd_4xn get_imask_simd_j2
1173 #if GMX_SIMD_REAL_WIDTH == 4
1174 #define get_imask_simd_4xn get_imask_simd_j4
1176 #if GMX_SIMD_REAL_WIDTH == 8
1177 #define get_imask_simd_4xn get_imask_simd_j8
1178 #define get_imask_simd_2xnn get_imask_simd_j4
1180 #if GMX_SIMD_REAL_WIDTH == 16
1181 #define get_imask_simd_2xnn get_imask_simd_j8
1185 /* Plain C code for checking and adding cluster-pairs to the list.
1187 * \param[in] gridj The j-grid
1188 * \param[in,out] nbl The pair-list to store the cluster pairs in
1189 * \param[in] icluster The index of the i-cluster
1190 * \param[in] jclusterFirst The first cluster in the j-range
1191 * \param[in] jclusterLast The last cluster in the j-range
1192 * \param[in] excludeSubDiagonal Exclude atom pairs with i-index > j-index
1193 * \param[in] x_j Coordinates for the j-atom, in xyz format
1194 * \param[in] rlist2 The squared list cut-off
1195 * \param[in] rbb2 The squared cut-off for putting cluster-pairs in the list based on bounding box distance only
1196 * \param[in,out] numDistanceChecks The number of distance checks performed
1199 makeClusterListSimple(const nbnxn_grid_t * gridj,
1200 nbnxn_pairlist_t * nbl,
1204 bool excludeSubDiagonal,
1205 const real * gmx_restrict x_j,
1208 int * gmx_restrict numDistanceChecks)
1210 const nbnxn_bb_t * gmx_restrict bb_ci = nbl->work->bb_ci;
1211 const real * gmx_restrict x_ci = nbl->work->x_ci;
1216 while (!InRange && jclusterFirst <= jclusterLast)
1218 real d2 = subc_bb_dist2(0, bb_ci, jclusterFirst, gridj->bb);
1219 *numDistanceChecks += 2;
1221 /* Check if the distance is within the distance where
1222 * we use only the bounding box distance rbb,
1223 * or within the cut-off and there is at least one atom pair
1224 * within the cut-off.
1230 else if (d2 < rlist2)
1232 int cjf_gl = gridj->cell0 + jclusterFirst;
1233 for (int i = 0; i < NBNXN_CPU_CLUSTER_I_SIZE && !InRange; i++)
1235 for (int j = 0; j < NBNXN_CPU_CLUSTER_I_SIZE; j++)
1237 InRange = InRange ||
1238 (gmx::square(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+XX]) +
1239 gmx::square(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+YY]) +
1240 gmx::square(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+ZZ]) < rlist2);
1243 *numDistanceChecks += NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
1256 while (!InRange && jclusterLast > jclusterFirst)
1258 real d2 = subc_bb_dist2(0, bb_ci, jclusterLast, gridj->bb);
1259 *numDistanceChecks += 2;
1261 /* Check if the distance is within the distance where
1262 * we use only the bounding box distance rbb,
1263 * or within the cut-off and there is at least one atom pair
1264 * within the cut-off.
1270 else if (d2 < rlist2)
1272 int cjl_gl = gridj->cell0 + jclusterLast;
1273 for (int i = 0; i < NBNXN_CPU_CLUSTER_I_SIZE && !InRange; i++)
1275 for (int j = 0; j < NBNXN_CPU_CLUSTER_I_SIZE; j++)
1277 InRange = InRange ||
1278 (gmx::square(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+XX]) +
1279 gmx::square(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+YY]) +
1280 gmx::square(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+ZZ]) < rlist2);
1283 *numDistanceChecks += NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
1291 if (jclusterFirst <= jclusterLast)
1293 for (int jcluster = jclusterFirst; jcluster <= jclusterLast; jcluster++)
1295 /* Store cj and the interaction mask */
1296 nbl->cj[nbl->ncj].cj = gridj->cell0 + jcluster;
1297 nbl->cj[nbl->ncj].excl = get_imask(excludeSubDiagonal, icluster, jcluster);
1300 /* Increase the closing index in i super-cell list */
1301 nbl->ci[nbl->nci].cj_ind_end = nbl->ncj;
1305 #ifdef GMX_NBNXN_SIMD_4XN
1306 #include "gromacs/mdlib/nbnxn_search_simd_4xn.h"
1308 #ifdef GMX_NBNXN_SIMD_2XNN
1309 #include "gromacs/mdlib/nbnxn_search_simd_2xnn.h"
1312 /* Plain C or SIMD4 code for making a pair list of super-cell sci vs scj.
1313 * Checks bounding box distances and possibly atom pair distances.
1315 static void make_cluster_list_supersub(const nbnxn_grid_t *gridi,
1316 const nbnxn_grid_t *gridj,
1317 nbnxn_pairlist_t *nbl,
1319 gmx_bool sci_equals_scj,
1320 int stride, const real *x,
1321 real rlist2, float rbb2,
1322 int *numDistanceChecks)
1324 nbnxn_list_work_t *work = nbl->work;
1327 const float *pbb_ci = work->pbb_ci;
1329 const nbnxn_bb_t *bb_ci = work->bb_ci;
1332 assert(c_nbnxnGpuClusterSize == gridi->na_c);
1333 assert(c_nbnxnGpuClusterSize == gridj->na_c);
1335 /* We generate the pairlist mainly based on bounding-box distances
1336 * and do atom pair distance based pruning on the GPU.
1337 * Only if a j-group contains a single cluster-pair, we try to prune
1338 * that pair based on atom distances on the CPU to avoid empty j-groups.
1340 #define PRUNE_LIST_CPU_ONE 1
1341 #define PRUNE_LIST_CPU_ALL 0
1343 #if PRUNE_LIST_CPU_ONE
1347 float *d2l = work->d2;
1349 for (int subc = 0; subc < gridj->nsubc[scj]; subc++)
1351 int cj4_ind = nbl->work->cj_ind/c_nbnxnGpuJgroupSize;
1352 int cj_offset = nbl->work->cj_ind - cj4_ind*c_nbnxnGpuJgroupSize;
1353 nbnxn_cj4_t *cj4 = &nbl->cj4[cj4_ind];
1355 int cj = scj*c_gpuNumClusterPerCell + subc;
1357 int cj_gl = gridj->cell0*c_gpuNumClusterPerCell + cj;
1359 /* Initialize this j-subcell i-subcell list */
1360 cj4->cj[cj_offset] = cj_gl;
1369 ci1 = gridi->nsubc[sci];
1373 /* Determine all ci1 bb distances in one call with SIMD4 */
1374 subc_bb_dist2_simd4_xxxx(gridj->pbb+(cj>>STRIDE_PBB_2LOG)*NNBSBB_XXXX+(cj & (STRIDE_PBB-1)),
1376 *numDistanceChecks += c_nbnxnGpuClusterSize*2;
1380 unsigned int imask = 0;
1381 /* We use a fixed upper-bound instead of ci1 to help optimization */
1382 for (int ci = 0; ci < c_gpuNumClusterPerCell; ci++)
1390 /* Determine the bb distance between ci and cj */
1391 d2l[ci] = subc_bb_dist2(ci, bb_ci, cj, gridj->bb);
1392 *numDistanceChecks += 2;
1396 #if PRUNE_LIST_CPU_ALL
1397 /* Check if the distance is within the distance where
1398 * we use only the bounding box distance rbb,
1399 * or within the cut-off and there is at least one atom pair
1400 * within the cut-off. This check is very costly.
1402 *numDistanceChecks += c_nbnxnGpuClusterSize*c_nbnxnGpuClusterSize;
1405 clusterpair_in_range(work, ci, cj_gl, stride, x, rlist2)))
1407 /* Check if the distance between the two bounding boxes
1408 * in within the pair-list cut-off.
1413 /* Flag this i-subcell to be taken into account */
1414 imask |= (1U << (cj_offset*c_gpuNumClusterPerCell + ci));
1416 #if PRUNE_LIST_CPU_ONE
1424 #if PRUNE_LIST_CPU_ONE
1425 /* If we only found 1 pair, check if any atoms are actually
1426 * within the cut-off, so we could get rid of it.
1428 if (npair == 1 && d2l[ci_last] >= rbb2 &&
1429 !clusterpair_in_range(work, ci_last, cj_gl, stride, x, rlist2))
1431 imask &= ~(1U << (cj_offset*c_gpuNumClusterPerCell + ci_last));
1438 /* We have a useful sj entry, close it now */
1440 /* Set the exclusions for the ci==sj entry.
1441 * Here we don't bother to check if this entry is actually flagged,
1442 * as it will nearly always be in the list.
1446 set_self_and_newton_excls_supersub(nbl, cj4_ind, cj_offset, subc);
1449 /* Copy the cluster interaction mask to the list */
1450 for (int w = 0; w < c_nbnxnGpuClusterpairSplit; w++)
1452 cj4->imei[w].imask |= imask;
1455 nbl->work->cj_ind++;
1457 /* Keep the count */
1458 nbl->nci_tot += npair;
1460 /* Increase the closing index in i super-cell list */
1461 nbl->sci[nbl->nsci].cj4_ind_end =
1462 (nbl->work->cj_ind + c_nbnxnGpuJgroupSize - 1)/c_nbnxnGpuJgroupSize;
1467 /* Returns how many contiguous j-clusters we have starting in the i-list */
1468 template <typename CjListType>
1469 static int numContiguousJClusters(const int cjIndexStart,
1470 const int cjIndexEnd,
1471 const CjListType &cjList)
1473 const int firstJCluster = nblCj(cjList, cjIndexStart);
1475 int numContiguous = 0;
1477 while (cjIndexStart + numContiguous < cjIndexEnd &&
1478 nblCj(cjList, cjIndexStart + numContiguous) == firstJCluster + numContiguous)
1483 return numContiguous;
1486 /* Helper struct for efficient searching for excluded atoms in a j-list */
1490 template <typename CjListType>
1491 JListRanges(int cjIndexStart,
1493 const CjListType &cjList);
1495 int cjIndexStart; // The start index in the j-list
1496 int cjIndexEnd; // The end index in the j-list
1497 int cjFirst; // The j-cluster with index cjIndexStart
1498 int cjLast; // The j-cluster with index cjIndexEnd-1
1499 int numDirect; // Up to cjIndexStart+numDirect the j-clusters are cjFirst + the index offset
1502 template <typename CjListType>
1503 JListRanges::JListRanges(int cjIndexStart,
1505 const CjListType &cjList) :
1506 cjIndexStart(cjIndexStart),
1507 cjIndexEnd(cjIndexEnd)
1509 GMX_ASSERT(cjIndexEnd > cjIndexStart, "JListRanges should only be called with non-empty lists");
1511 cjFirst = nblCj(cjList, cjIndexStart);
1512 cjLast = nblCj(cjList, cjIndexEnd - 1);
1514 /* Determine how many contiguous j-cells we have starting
1515 * from the first i-cell. This number can be used to directly
1516 * calculate j-cell indices for excluded atoms.
1518 numDirect = numContiguousJClusters(cjIndexStart, cjIndexEnd, cjList);
1521 /* Return the index of \p jCluster in the given range or -1 when not present
1523 * Note: This code is executed very often and therefore performance is
1524 * important. It should be inlined and fully optimized.
1526 template <typename CjListType>
1527 static inline int findJClusterInJList(int jCluster,
1528 const JListRanges &ranges,
1529 const CjListType &cjList)
1533 if (jCluster < ranges.cjFirst + ranges.numDirect)
1535 /* We can calculate the index directly using the offset */
1536 index = ranges.cjIndexStart + jCluster - ranges.cjFirst;
1540 /* Search for jCluster using bisection */
1542 int rangeStart = ranges.cjIndexStart + ranges.numDirect;
1543 int rangeEnd = ranges.cjIndexEnd;
1545 while (index == -1 && rangeStart < rangeEnd)
1547 rangeMiddle = (rangeStart + rangeEnd) >> 1;
1549 const int clusterMiddle = nblCj(cjList, rangeMiddle);
1551 if (jCluster == clusterMiddle)
1553 index = rangeMiddle;
1555 else if (jCluster < clusterMiddle)
1557 rangeEnd = rangeMiddle;
1561 rangeStart = rangeMiddle + 1;
1569 /* Set all atom-pair exclusions for a simple type list i-entry
1571 * Set all atom-pair exclusions from the topology stored in exclusions
1572 * as masks in the pair-list for simple list entry iEntry.
1575 setExclusionsForSimpleIentry(const nbnxn_search_t nbs,
1576 nbnxn_pairlist_t *nbl,
1577 gmx_bool diagRemoved,
1579 const nbnxn_ci_t &iEntry,
1580 const t_blocka &exclusions)
1582 if (iEntry.cj_ind_end == iEntry.cj_ind_start)
1584 /* Empty list: no exclusions */
1588 const JListRanges ranges(iEntry.cj_ind_start, iEntry.cj_ind_end, nbl->cj);
1590 const int iCluster = iEntry.ci;
1592 const int *cell = nbs->cell;
1594 /* Loop over the atoms in the i-cluster */
1595 for (int i = 0; i < nbl->na_sc; i++)
1597 const int iIndex = iCluster*nbl->na_sc + i;
1598 const int iAtom = nbs->a[iIndex];
1601 /* Loop over the topology-based exclusions for this i-atom */
1602 for (int exclIndex = exclusions.index[iAtom]; exclIndex < exclusions.index[iAtom + 1]; exclIndex++)
1604 const int jAtom = exclusions.a[exclIndex];
1608 /* The self exclusion are already set, save some time */
1612 /* Get the index of the j-atom in the nbnxn atom data */
1613 const int jIndex = cell[jAtom];
1615 /* Without shifts we only calculate interactions j>i
1616 * for one-way pair-lists.
1618 if (diagRemoved && jIndex <= iIndex)
1623 const int jCluster = (jIndex >> na_cj_2log);
1625 /* Could the cluster se be in our list? */
1626 if (jCluster >= ranges.cjFirst && jCluster <= ranges.cjLast)
1629 findJClusterInJList(jCluster, ranges, nbl->cj);
1633 /* We found an exclusion, clear the corresponding
1636 const int innerJ = jIndex - (jCluster << na_cj_2log);
1638 nbl->cj[index].excl &= ~(1U << ((i << na_cj_2log) + innerJ));
1646 /* Add a new i-entry to the FEP list and copy the i-properties */
1647 static inline void fep_list_new_nri_copy(t_nblist *nlist)
1649 /* Add a new i-entry */
1652 assert(nlist->nri < nlist->maxnri);
1654 /* Duplicate the last i-entry, except for jindex, which continues */
1655 nlist->iinr[nlist->nri] = nlist->iinr[nlist->nri-1];
1656 nlist->shift[nlist->nri] = nlist->shift[nlist->nri-1];
1657 nlist->gid[nlist->nri] = nlist->gid[nlist->nri-1];
1658 nlist->jindex[nlist->nri] = nlist->nrj;
1661 /* For load balancing of the free-energy lists over threads, we set
1662 * the maximum nrj size of an i-entry to 40. This leads to good
1663 * load balancing in the worst case scenario of a single perturbed
1664 * particle on 16 threads, while not introducing significant overhead.
1665 * Note that half of the perturbed pairs will anyhow end up in very small lists,
1666 * since non perturbed i-particles will see few perturbed j-particles).
1668 const int max_nrj_fep = 40;
1670 /* Exclude the perturbed pairs from the Verlet list. This is only done to avoid
1671 * singularities for overlapping particles (0/0), since the charges and
1672 * LJ parameters have been zeroed in the nbnxn data structure.
1673 * Simultaneously make a group pair list for the perturbed pairs.
1675 static void make_fep_list(const nbnxn_search_t nbs,
1676 const nbnxn_atomdata_t *nbat,
1677 nbnxn_pairlist_t *nbl,
1678 gmx_bool bDiagRemoved,
1680 const nbnxn_grid_t *gridi,
1681 const nbnxn_grid_t *gridj,
1684 int ci, cj_ind_start, cj_ind_end, cja, cjr;
1686 int ngid, gid_i = 0, gid_j, gid;
1687 int egp_shift, egp_mask;
1689 int ind_i, ind_j, ai, aj;
1691 gmx_bool bFEP_i, bFEP_i_all;
1693 if (nbl_ci->cj_ind_end == nbl_ci->cj_ind_start)
1701 cj_ind_start = nbl_ci->cj_ind_start;
1702 cj_ind_end = nbl_ci->cj_ind_end;
1704 /* In worst case we have alternating energy groups
1705 * and create #atom-pair lists, which means we need the size
1706 * of a cluster pair (na_ci*na_cj) times the number of cj's.
1708 nri_max = nbl->na_ci*nbl->na_cj*(cj_ind_end - cj_ind_start);
1709 if (nlist->nri + nri_max > nlist->maxnri)
1711 nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
1712 reallocate_nblist(nlist);
1715 ngid = nbat->nenergrp;
1717 if (static_cast<std::size_t>(ngid*gridj->na_cj) > sizeof(gid_cj)*8)
1719 gmx_fatal(FARGS, "The Verlet scheme with %dx%d kernels and free-energy only supports up to %d energy groups",
1720 gridi->na_c, gridj->na_cj, (sizeof(gid_cj)*8)/gridj->na_cj);
1723 egp_shift = nbat->neg_2log;
1724 egp_mask = (1<<nbat->neg_2log) - 1;
1726 /* Loop over the atoms in the i sub-cell */
1728 for (int i = 0; i < nbl->na_ci; i++)
1730 ind_i = ci*nbl->na_ci + i;
1735 nlist->jindex[nri+1] = nlist->jindex[nri];
1736 nlist->iinr[nri] = ai;
1737 /* The actual energy group pair index is set later */
1738 nlist->gid[nri] = 0;
1739 nlist->shift[nri] = nbl_ci->shift & NBNXN_CI_SHIFT;
1741 bFEP_i = gridi->fep[ci - gridi->cell0] & (1 << i);
1743 bFEP_i_all = bFEP_i_all && bFEP_i;
1745 if (nlist->nrj + (cj_ind_end - cj_ind_start)*nbl->na_cj > nlist->maxnrj)
1747 nlist->maxnrj = over_alloc_small(nlist->nrj + (cj_ind_end - cj_ind_start)*nbl->na_cj);
1748 srenew(nlist->jjnr, nlist->maxnrj);
1749 srenew(nlist->excl_fep, nlist->maxnrj);
1754 gid_i = (nbat->energrp[ci] >> (egp_shift*i)) & egp_mask;
1757 for (int cj_ind = cj_ind_start; cj_ind < cj_ind_end; cj_ind++)
1759 unsigned int fep_cj;
1761 cja = nbl->cj[cj_ind].cj;
1763 if (gridj->na_cj == gridj->na_c)
1765 cjr = cja - gridj->cell0;
1766 fep_cj = gridj->fep[cjr];
1769 gid_cj = nbat->energrp[cja];
1772 else if (2*gridj->na_cj == gridj->na_c)
1774 cjr = cja - gridj->cell0*2;
1775 /* Extract half of the ci fep/energrp mask */
1776 fep_cj = (gridj->fep[cjr>>1] >> ((cjr&1)*gridj->na_cj)) & ((1<<gridj->na_cj) - 1);
1779 gid_cj = nbat->energrp[cja>>1] >> ((cja&1)*gridj->na_cj*egp_shift) & ((1<<(gridj->na_cj*egp_shift)) - 1);
1784 cjr = cja - (gridj->cell0>>1);
1785 /* Combine two ci fep masks/energrp */
1786 fep_cj = gridj->fep[cjr*2] + (gridj->fep[cjr*2+1] << gridj->na_c);
1789 gid_cj = nbat->energrp[cja*2] + (nbat->energrp[cja*2+1] << (gridj->na_c*egp_shift));
1793 if (bFEP_i || fep_cj != 0)
1795 for (int j = 0; j < nbl->na_cj; j++)
1797 /* Is this interaction perturbed and not excluded? */
1798 ind_j = cja*nbl->na_cj + j;
1801 (bFEP_i || (fep_cj & (1 << j))) &&
1802 (!bDiagRemoved || ind_j >= ind_i))
1806 gid_j = (gid_cj >> (j*egp_shift)) & egp_mask;
1807 gid = GID(gid_i, gid_j, ngid);
1809 if (nlist->nrj > nlist->jindex[nri] &&
1810 nlist->gid[nri] != gid)
1812 /* Energy group pair changed: new list */
1813 fep_list_new_nri_copy(nlist);
1816 nlist->gid[nri] = gid;
1819 if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
1821 fep_list_new_nri_copy(nlist);
1825 /* Add it to the FEP list */
1826 nlist->jjnr[nlist->nrj] = aj;
1827 nlist->excl_fep[nlist->nrj] = (nbl->cj[cj_ind].excl >> (i*nbl->na_cj + j)) & 1;
1830 /* Exclude it from the normal list.
1831 * Note that the charge has been set to zero,
1832 * but we need to avoid 0/0, as perturbed atoms
1833 * can be on top of each other.
1835 nbl->cj[cj_ind].excl &= ~(1U << (i*nbl->na_cj + j));
1841 if (nlist->nrj > nlist->jindex[nri])
1843 /* Actually add this new, non-empty, list */
1845 nlist->jindex[nlist->nri] = nlist->nrj;
1852 /* All interactions are perturbed, we can skip this entry */
1853 nbl_ci->cj_ind_end = cj_ind_start;
1854 nbl->ncjInUse -= cj_ind_end - cj_ind_start;
1858 /* Return the index of atom a within a cluster */
1859 static inline int cj_mod_cj4(int cj)
1861 return cj & (c_nbnxnGpuJgroupSize - 1);
1864 /* Convert a j-cluster to a cj4 group */
1865 static inline int cj_to_cj4(int cj)
1867 return cj/c_nbnxnGpuJgroupSize;
1870 /* Return the index of an j-atom within a warp */
1871 static inline int a_mod_wj(int a)
1873 return a & (c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit - 1);
1876 /* As make_fep_list above, but for super/sub lists. */
1877 static void make_fep_list_supersub(const nbnxn_search_t nbs,
1878 const nbnxn_atomdata_t *nbat,
1879 nbnxn_pairlist_t *nbl,
1880 gmx_bool bDiagRemoved,
1881 const nbnxn_sci_t *nbl_sci,
1886 const nbnxn_grid_t *gridi,
1887 const nbnxn_grid_t *gridj,
1890 int sci, cj4_ind_start, cj4_ind_end, cjr;
1893 int ind_i, ind_j, ai, aj;
1897 const nbnxn_cj4_t *cj4;
1899 if (nbl_sci->cj4_ind_end == nbl_sci->cj4_ind_start)
1907 cj4_ind_start = nbl_sci->cj4_ind_start;
1908 cj4_ind_end = nbl_sci->cj4_ind_end;
1910 /* Here we process one super-cell, max #atoms na_sc, versus a list
1911 * cj4 entries, each with max c_nbnxnGpuJgroupSize cj's, each
1912 * of size na_cj atoms.
1913 * On the GPU we don't support energy groups (yet).
1914 * So for each of the na_sc i-atoms, we need max one FEP list
1915 * for each max_nrj_fep j-atoms.
1917 nri_max = nbl->na_sc*nbl->na_cj*(1 + ((cj4_ind_end - cj4_ind_start)*c_nbnxnGpuJgroupSize)/max_nrj_fep);
1918 if (nlist->nri + nri_max > nlist->maxnri)
1920 nlist->maxnri = over_alloc_large(nlist->nri + nri_max);
1921 reallocate_nblist(nlist);
1924 /* Loop over the atoms in the i super-cluster */
1925 for (int c = 0; c < c_gpuNumClusterPerCell; c++)
1927 c_abs = sci*c_gpuNumClusterPerCell + c;
1929 for (int i = 0; i < nbl->na_ci; i++)
1931 ind_i = c_abs*nbl->na_ci + i;
1936 nlist->jindex[nri+1] = nlist->jindex[nri];
1937 nlist->iinr[nri] = ai;
1938 /* With GPUs, energy groups are not supported */
1939 nlist->gid[nri] = 0;
1940 nlist->shift[nri] = nbl_sci->shift & NBNXN_CI_SHIFT;
1942 bFEP_i = (gridi->fep[c_abs - gridi->cell0*c_gpuNumClusterPerCell] & (1 << i));
1944 xi = nbat->x[ind_i*nbat->xstride+XX] + shx;
1945 yi = nbat->x[ind_i*nbat->xstride+YY] + shy;
1946 zi = nbat->x[ind_i*nbat->xstride+ZZ] + shz;
1948 if ((nlist->nrj + cj4_ind_end - cj4_ind_start)*c_nbnxnGpuJgroupSize*nbl->na_cj > nlist->maxnrj)
1950 nlist->maxnrj = over_alloc_small((nlist->nrj + cj4_ind_end - cj4_ind_start)*c_nbnxnGpuJgroupSize*nbl->na_cj);
1951 srenew(nlist->jjnr, nlist->maxnrj);
1952 srenew(nlist->excl_fep, nlist->maxnrj);
1955 for (int cj4_ind = cj4_ind_start; cj4_ind < cj4_ind_end; cj4_ind++)
1957 cj4 = &nbl->cj4[cj4_ind];
1959 for (int gcj = 0; gcj < c_nbnxnGpuJgroupSize; gcj++)
1961 unsigned int fep_cj;
1963 if ((cj4->imei[0].imask & (1U << (gcj*c_gpuNumClusterPerCell + c))) == 0)
1965 /* Skip this ci for this cj */
1969 cjr = cj4->cj[gcj] - gridj->cell0*c_gpuNumClusterPerCell;
1971 fep_cj = gridj->fep[cjr];
1973 if (bFEP_i || fep_cj != 0)
1975 for (int j = 0; j < nbl->na_cj; j++)
1977 /* Is this interaction perturbed and not excluded? */
1978 ind_j = (gridj->cell0*c_gpuNumClusterPerCell + cjr)*nbl->na_cj + j;
1981 (bFEP_i || (fep_cj & (1 << j))) &&
1982 (!bDiagRemoved || ind_j >= ind_i))
1986 unsigned int excl_bit;
1989 const int jHalf = j/(c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit);
1990 get_nbl_exclusions_1(nbl, cj4_ind, jHalf, &excl);
1992 excl_pair = a_mod_wj(j)*nbl->na_ci + i;
1993 excl_bit = (1U << (gcj*c_gpuNumClusterPerCell + c));
1995 dx = nbat->x[ind_j*nbat->xstride+XX] - xi;
1996 dy = nbat->x[ind_j*nbat->xstride+YY] - yi;
1997 dz = nbat->x[ind_j*nbat->xstride+ZZ] - zi;
1999 /* The unpruned GPU list has more than 2/3
2000 * of the atom pairs beyond rlist. Using
2001 * this list will cause a lot of overhead
2002 * in the CPU FEP kernels, especially
2003 * relative to the fast GPU kernels.
2004 * So we prune the FEP list here.
2006 if (dx*dx + dy*dy + dz*dz < rlist_fep2)
2008 if (nlist->nrj - nlist->jindex[nri] >= max_nrj_fep)
2010 fep_list_new_nri_copy(nlist);
2014 /* Add it to the FEP list */
2015 nlist->jjnr[nlist->nrj] = aj;
2016 nlist->excl_fep[nlist->nrj] = (excl->pair[excl_pair] & excl_bit) ? 1 : 0;
2020 /* Exclude it from the normal list.
2021 * Note that the charge and LJ parameters have
2022 * been set to zero, but we need to avoid 0/0,
2023 * as perturbed atoms can be on top of each other.
2025 excl->pair[excl_pair] &= ~excl_bit;
2029 /* Note that we could mask out this pair in imask
2030 * if all i- and/or all j-particles are perturbed.
2031 * But since the perturbed pairs on the CPU will
2032 * take an order of magnitude more time, the GPU
2033 * will finish before the CPU and there is no gain.
2039 if (nlist->nrj > nlist->jindex[nri])
2041 /* Actually add this new, non-empty, list */
2043 nlist->jindex[nlist->nri] = nlist->nrj;
2050 /* Set all atom-pair exclusions for a GPU type list i-entry
2052 * Sets all atom-pair exclusions from the topology stored in exclusions
2053 * as masks in the pair-list for i-super-cluster list entry iEntry.
2056 setExclusionsForGpuIentry(const nbnxn_search_t nbs,
2057 nbnxn_pairlist_t *nbl,
2058 gmx_bool diagRemoved,
2059 const nbnxn_sci_t &iEntry,
2060 const t_blocka &exclusions)
2062 if (iEntry.cj4_ind_end == iEntry.cj4_ind_start)
2068 /* Set the search ranges using start and end j-cluster indices.
2069 * Note that here we can not use cj4_ind_end, since the last cj4
2070 * can be only partially filled, so we use cj_ind.
2072 const JListRanges ranges(iEntry.cj4_ind_start*c_nbnxnGpuJgroupSize,
2076 GMX_ASSERT(nbl->na_ci == c_nbnxnGpuClusterSize, "na_ci should match the GPU cluster size");
2077 constexpr int c_clusterSize = c_nbnxnGpuClusterSize;
2078 constexpr int c_superClusterSize = c_nbnxnGpuNumClusterPerSupercluster*c_nbnxnGpuClusterSize;
2080 const int iSuperCluster = iEntry.sci;
2082 const int *cell = nbs->cell;
2084 /* Loop over the atoms in the i super-cluster */
2085 for (int i = 0; i < c_superClusterSize; i++)
2087 const int iIndex = iSuperCluster*c_superClusterSize + i;
2088 const int iAtom = nbs->a[iIndex];
2091 const int iCluster = i/c_clusterSize;
2093 /* Loop over the topology-based exclusions for this i-atom */
2094 for (int exclIndex = exclusions.index[iAtom]; exclIndex < exclusions.index[iAtom + 1]; exclIndex++)
2096 const int jAtom = exclusions.a[exclIndex];
2100 /* The self exclusions are already set, save some time */
2104 /* Get the index of the j-atom in the nbnxn atom data */
2105 const int jIndex = cell[jAtom];
2107 /* Without shifts we only calculate interactions j>i
2108 * for one-way pair-lists.
2110 /* NOTE: We would like to use iIndex on the right hand side,
2111 * but that makes this routine 25% slower with gcc6/7.
2112 * Even using c_superClusterSize makes it slower.
2113 * Either of these changes triggers peeling of the exclIndex
2114 * loop, which apparently leads to far less efficient code.
2116 if (diagRemoved && jIndex <= iSuperCluster*nbl->na_sc + i)
2121 const int jCluster = jIndex/c_clusterSize;
2123 /* Check whether the cluster is in our list? */
2124 if (jCluster >= ranges.cjFirst && jCluster <= ranges.cjLast)
2127 findJClusterInJList(jCluster, ranges, nbl->cj4);
2131 /* We found an exclusion, clear the corresponding
2134 const unsigned int pairMask = (1U << (cj_mod_cj4(index)*c_gpuNumClusterPerCell + iCluster));
2135 /* Check if the i-cluster interacts with the j-cluster */
2136 if (nbl_imask0(nbl, index) & pairMask)
2138 const int innerI = (i & (c_clusterSize - 1));
2139 const int innerJ = (jIndex & (c_clusterSize - 1));
2141 /* Determine which j-half (CUDA warp) we are in */
2142 const int jHalf = innerJ/(c_clusterSize/c_nbnxnGpuClusterpairSplit);
2144 nbnxn_excl_t *interactionMask;
2145 get_nbl_exclusions_1(nbl, cj_to_cj4(index), jHalf, &interactionMask);
2147 interactionMask->pair[a_mod_wj(innerJ)*c_clusterSize + innerI] &= ~pairMask;
2156 /* Reallocate the simple ci list for at least n entries */
2157 static void nb_realloc_ci(nbnxn_pairlist_t *nbl, int n)
2159 nbl->ci_nalloc = over_alloc_small(n);
2160 nbnxn_realloc_void((void **)&nbl->ci,
2161 nbl->nci*sizeof(*nbl->ci),
2162 nbl->ci_nalloc*sizeof(*nbl->ci),
2163 nbl->alloc, nbl->free);
2165 nbnxn_realloc_void((void **)&nbl->ciOuter,
2166 nbl->nci*sizeof(*nbl->ciOuter),
2167 nbl->ci_nalloc*sizeof(*nbl->ciOuter),
2168 nbl->alloc, nbl->free);
2171 /* Reallocate the super-cell sci list for at least n entries */
2172 static void nb_realloc_sci(nbnxn_pairlist_t *nbl, int n)
2174 nbl->sci_nalloc = over_alloc_small(n);
2175 nbnxn_realloc_void((void **)&nbl->sci,
2176 nbl->nsci*sizeof(*nbl->sci),
2177 nbl->sci_nalloc*sizeof(*nbl->sci),
2178 nbl->alloc, nbl->free);
2181 /* Make a new ci entry at index nbl->nci */
2182 static void new_ci_entry(nbnxn_pairlist_t *nbl, int ci, int shift, int flags)
2184 if (nbl->nci + 1 > nbl->ci_nalloc)
2186 nb_realloc_ci(nbl, nbl->nci+1);
2188 nbl->ci[nbl->nci].ci = ci;
2189 nbl->ci[nbl->nci].shift = shift;
2190 /* Store the interaction flags along with the shift */
2191 nbl->ci[nbl->nci].shift |= flags;
2192 nbl->ci[nbl->nci].cj_ind_start = nbl->ncj;
2193 nbl->ci[nbl->nci].cj_ind_end = nbl->ncj;
2196 /* Make a new sci entry at index nbl->nsci */
2197 static void new_sci_entry(nbnxn_pairlist_t *nbl, int sci, int shift)
2199 if (nbl->nsci + 1 > nbl->sci_nalloc)
2201 nb_realloc_sci(nbl, nbl->nsci+1);
2203 nbl->sci[nbl->nsci].sci = sci;
2204 nbl->sci[nbl->nsci].shift = shift;
2205 nbl->sci[nbl->nsci].cj4_ind_start = nbl->ncj4;
2206 nbl->sci[nbl->nsci].cj4_ind_end = nbl->ncj4;
2209 /* Sort the simple j-list cj on exclusions.
2210 * Entries with exclusions will all be sorted to the beginning of the list.
2212 static void sort_cj_excl(nbnxn_cj_t *cj, int ncj,
2213 nbnxn_list_work_t *work)
2217 if (ncj > work->cj_nalloc)
2219 work->cj_nalloc = over_alloc_large(ncj);
2220 srenew(work->cj, work->cj_nalloc);
2223 /* Make a list of the j-cells involving exclusions */
2225 for (int j = 0; j < ncj; j++)
2227 if (cj[j].excl != NBNXN_INTERACTION_MASK_ALL)
2229 work->cj[jnew++] = cj[j];
2232 /* Check if there are exclusions at all or not just the first entry */
2233 if (!((jnew == 0) ||
2234 (jnew == 1 && cj[0].excl != NBNXN_INTERACTION_MASK_ALL)))
2236 for (int j = 0; j < ncj; j++)
2238 if (cj[j].excl == NBNXN_INTERACTION_MASK_ALL)
2240 work->cj[jnew++] = cj[j];
2243 for (int j = 0; j < ncj; j++)
2245 cj[j] = work->cj[j];
2250 /* Close this simple list i entry */
2251 static void close_ci_entry_simple(nbnxn_pairlist_t *nbl)
2255 /* All content of the new ci entry have already been filled correctly,
2256 * we only need to increase the count here (for non empty lists).
2258 jlen = nbl->ci[nbl->nci].cj_ind_end - nbl->ci[nbl->nci].cj_ind_start;
2261 sort_cj_excl(nbl->cj+nbl->ci[nbl->nci].cj_ind_start, jlen, nbl->work);
2263 /* The counts below are used for non-bonded pair/flop counts
2264 * and should therefore match the available kernel setups.
2266 if (!(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_COUL(0)))
2268 nbl->work->ncj_noq += jlen;
2270 else if ((nbl->ci[nbl->nci].shift & NBNXN_CI_HALF_LJ(0)) ||
2271 !(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_LJ(0)))
2273 nbl->work->ncj_hlj += jlen;
2280 /* Split sci entry for load balancing on the GPU.
2281 * Splitting ensures we have enough lists to fully utilize the whole GPU.
2282 * With progBal we generate progressively smaller lists, which improves
2283 * load balancing. As we only know the current count on our own thread,
2284 * we will need to estimate the current total amount of i-entries.
2285 * As the lists get concatenated later, this estimate depends
2286 * both on nthread and our own thread index.
2288 static void split_sci_entry(nbnxn_pairlist_t *nbl,
2290 gmx_bool progBal, float nsp_tot_est,
2291 int thread, int nthread)
2294 int cj4_start, cj4_end, j4len;
2296 int nsp, nsp_sci, nsp_cj4, nsp_cj4_e, nsp_cj4_p;
2302 /* Estimate the total numbers of ci's of the nblist combined
2303 * over all threads using the target number of ci's.
2305 nsp_est = (nsp_tot_est*thread)/nthread + nbl->nci_tot;
2307 /* The first ci blocks should be larger, to avoid overhead.
2308 * The last ci blocks should be smaller, to improve load balancing.
2309 * The factor 3/2 makes the first block 3/2 times the target average
2310 * and ensures that the total number of blocks end up equal to
2311 * that of equally sized blocks of size nsp_target_av.
2313 nsp_max = static_cast<int>(nsp_target_av*(nsp_tot_est*1.5/(nsp_est + nsp_tot_est)));
2317 nsp_max = nsp_target_av;
2320 cj4_start = nbl->sci[nbl->nsci-1].cj4_ind_start;
2321 cj4_end = nbl->sci[nbl->nsci-1].cj4_ind_end;
2322 j4len = cj4_end - cj4_start;
2324 if (j4len > 1 && j4len*c_gpuNumClusterPerCell*c_nbnxnGpuJgroupSize > nsp_max)
2326 /* Remove the last ci entry and process the cj4's again */
2334 for (int cj4 = cj4_start; cj4 < cj4_end; cj4++)
2336 nsp_cj4_p = nsp_cj4;
2337 /* Count the number of cluster pairs in this cj4 group */
2339 for (int p = 0; p < c_gpuNumClusterPerCell*c_nbnxnGpuJgroupSize; p++)
2341 nsp_cj4 += (nbl->cj4[cj4].imei[0].imask >> p) & 1;
2344 /* If adding the current cj4 with nsp_cj4 pairs get us further
2345 * away from our target nsp_max, split the list before this cj4.
2347 if (nsp > 0 && nsp_max - nsp < nsp + nsp_cj4 - nsp_max)
2349 /* Split the list at cj4 */
2350 nbl->sci[sci].cj4_ind_end = cj4;
2351 /* Create a new sci entry */
2354 if (nbl->nsci+1 > nbl->sci_nalloc)
2356 nb_realloc_sci(nbl, nbl->nsci+1);
2358 nbl->sci[sci].sci = nbl->sci[nbl->nsci-1].sci;
2359 nbl->sci[sci].shift = nbl->sci[nbl->nsci-1].shift;
2360 nbl->sci[sci].cj4_ind_start = cj4;
2362 nsp_cj4_e = nsp_cj4_p;
2368 /* Put the remaining cj4's in the last sci entry */
2369 nbl->sci[sci].cj4_ind_end = cj4_end;
2371 /* Possibly balance out the last two sci's
2372 * by moving the last cj4 of the second last sci.
2374 if (nsp_sci - nsp_cj4_e >= nsp + nsp_cj4_e)
2376 nbl->sci[sci-1].cj4_ind_end--;
2377 nbl->sci[sci].cj4_ind_start--;
2384 /* Clost this super/sub list i entry */
2385 static void close_ci_entry_supersub(nbnxn_pairlist_t *nbl,
2387 gmx_bool progBal, float nsp_tot_est,
2388 int thread, int nthread)
2390 /* All content of the new ci entry have already been filled correctly,
2391 * we only need to increase the count here (for non empty lists).
2393 int j4len = nbl->sci[nbl->nsci].cj4_ind_end - nbl->sci[nbl->nsci].cj4_ind_start;
2396 /* We can only have complete blocks of 4 j-entries in a list,
2397 * so round the count up before closing.
2399 nbl->ncj4 = (nbl->work->cj_ind + c_nbnxnGpuJgroupSize - 1)/c_nbnxnGpuJgroupSize;
2400 nbl->work->cj_ind = nbl->ncj4*c_nbnxnGpuJgroupSize;
2406 /* Measure the size of the new entry and potentially split it */
2407 split_sci_entry(nbl, nsp_max_av, progBal, nsp_tot_est,
2413 /* Syncs the working array before adding another grid pair to the list */
2414 static void sync_work(nbnxn_pairlist_t *nbl)
2418 nbl->work->cj_ind = nbl->ncj4*c_nbnxnGpuJgroupSize;
2419 nbl->work->cj4_init = nbl->ncj4;
2423 /* Clears an nbnxn_pairlist_t data structure */
2424 static void clear_pairlist(nbnxn_pairlist_t *nbl)
2435 nbl->work->ncj_noq = 0;
2436 nbl->work->ncj_hlj = 0;
2439 /* Clears a group scheme pair list */
2440 static void clear_pairlist_fep(t_nblist *nl)
2444 if (nl->jindex == nullptr)
2446 snew(nl->jindex, 1);
2451 /* Sets a simple list i-cell bounding box, including PBC shift */
2452 static inline void set_icell_bb_simple(const nbnxn_bb_t *bb, int ci,
2453 real shx, real shy, real shz,
2456 bb_ci->lower[BB_X] = bb[ci].lower[BB_X] + shx;
2457 bb_ci->lower[BB_Y] = bb[ci].lower[BB_Y] + shy;
2458 bb_ci->lower[BB_Z] = bb[ci].lower[BB_Z] + shz;
2459 bb_ci->upper[BB_X] = bb[ci].upper[BB_X] + shx;
2460 bb_ci->upper[BB_Y] = bb[ci].upper[BB_Y] + shy;
2461 bb_ci->upper[BB_Z] = bb[ci].upper[BB_Z] + shz;
2465 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2466 static void set_icell_bbxxxx_supersub(const float *bb, int ci,
2467 real shx, real shy, real shz,
2470 int ia = ci*(c_gpuNumClusterPerCell >> STRIDE_PBB_2LOG)*NNBSBB_XXXX;
2471 for (int m = 0; m < (c_gpuNumClusterPerCell >> STRIDE_PBB_2LOG)*NNBSBB_XXXX; m += NNBSBB_XXXX)
2473 for (int i = 0; i < STRIDE_PBB; i++)
2475 bb_ci[m+0*STRIDE_PBB+i] = bb[ia+m+0*STRIDE_PBB+i] + shx;
2476 bb_ci[m+1*STRIDE_PBB+i] = bb[ia+m+1*STRIDE_PBB+i] + shy;
2477 bb_ci[m+2*STRIDE_PBB+i] = bb[ia+m+2*STRIDE_PBB+i] + shz;
2478 bb_ci[m+3*STRIDE_PBB+i] = bb[ia+m+3*STRIDE_PBB+i] + shx;
2479 bb_ci[m+4*STRIDE_PBB+i] = bb[ia+m+4*STRIDE_PBB+i] + shy;
2480 bb_ci[m+5*STRIDE_PBB+i] = bb[ia+m+5*STRIDE_PBB+i] + shz;
2486 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2487 gmx_unused static void set_icell_bb_supersub(const nbnxn_bb_t *bb, int ci,
2488 real shx, real shy, real shz,
2491 for (int i = 0; i < c_gpuNumClusterPerCell; i++)
2493 set_icell_bb_simple(bb, ci*c_gpuNumClusterPerCell+i,
2499 /* Copies PBC shifted i-cell atom coordinates x,y,z to working array */
2500 static void icell_set_x_simple(int ci,
2501 real shx, real shy, real shz,
2502 int stride, const real *x,
2503 nbnxn_list_work_t *work)
2505 int ia = ci*NBNXN_CPU_CLUSTER_I_SIZE;
2507 for (int i = 0; i < NBNXN_CPU_CLUSTER_I_SIZE; i++)
2509 work->x_ci[i*STRIDE_XYZ+XX] = x[(ia+i)*stride+XX] + shx;
2510 work->x_ci[i*STRIDE_XYZ+YY] = x[(ia+i)*stride+YY] + shy;
2511 work->x_ci[i*STRIDE_XYZ+ZZ] = x[(ia+i)*stride+ZZ] + shz;
2515 /* Copies PBC shifted super-cell atom coordinates x,y,z to working array */
2516 static void icell_set_x_supersub(int ci,
2517 real shx, real shy, real shz,
2518 int stride, const real *x,
2519 nbnxn_list_work_t *work)
2521 #if !GMX_SIMD4_HAVE_REAL
2523 real * x_ci = work->x_ci;
2525 int ia = ci*c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize;
2526 for (int i = 0; i < c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize; i++)
2528 x_ci[i*DIM + XX] = x[(ia+i)*stride + XX] + shx;
2529 x_ci[i*DIM + YY] = x[(ia+i)*stride + YY] + shy;
2530 x_ci[i*DIM + ZZ] = x[(ia+i)*stride + ZZ] + shz;
2533 #else /* !GMX_SIMD4_HAVE_REAL */
2535 real * x_ci = work->x_ci_simd;
2537 for (int si = 0; si < c_gpuNumClusterPerCell; si++)
2539 for (int i = 0; i < c_nbnxnGpuClusterSize; i += GMX_SIMD4_WIDTH)
2541 int io = si*c_nbnxnGpuClusterSize + i;
2542 int ia = ci*c_gpuNumClusterPerCell*c_nbnxnGpuClusterSize + io;
2543 for (int j = 0; j < GMX_SIMD4_WIDTH; j++)
2545 x_ci[io*DIM + j + XX*GMX_SIMD4_WIDTH] = x[(ia + j)*stride + XX] + shx;
2546 x_ci[io*DIM + j + YY*GMX_SIMD4_WIDTH] = x[(ia + j)*stride + YY] + shy;
2547 x_ci[io*DIM + j + ZZ*GMX_SIMD4_WIDTH] = x[(ia + j)*stride + ZZ] + shz;
2552 #endif /* !GMX_SIMD4_HAVE_REAL */
2555 static real minimum_subgrid_size_xy(const nbnxn_grid_t *grid)
2559 return std::min(grid->sx, grid->sy);
2563 return std::min(grid->sx/c_gpuNumClusterPerCellX,
2564 grid->sy/c_gpuNumClusterPerCellY);
2568 static real effective_buffer_1x1_vs_MxN(const nbnxn_grid_t *gridi,
2569 const nbnxn_grid_t *gridj)
2571 const real eff_1x1_buffer_fac_overest = 0.1;
2573 /* Determine an atom-pair list cut-off buffer size for atom pairs,
2574 * to be added to rlist (including buffer) used for MxN.
2575 * This is for converting an MxN list to a 1x1 list. This means we can't
2576 * use the normal buffer estimate, as we have an MxN list in which
2577 * some atom pairs beyond rlist are missing. We want to capture
2578 * the beneficial effect of buffering by extra pairs just outside rlist,
2579 * while removing the useless pairs that are further away from rlist.
2580 * (Also the buffer could have been set manually not using the estimate.)
2581 * This buffer size is an overestimate.
2582 * We add 10% of the smallest grid sub-cell dimensions.
2583 * Note that the z-size differs per cell and we don't use this,
2584 * so we overestimate.
2585 * With PME, the 10% value gives a buffer that is somewhat larger
2586 * than the effective buffer with a tolerance of 0.005 kJ/mol/ps.
2587 * Smaller tolerances or using RF lead to a smaller effective buffer,
2588 * so 10% gives a safe overestimate.
2590 return eff_1x1_buffer_fac_overest*(minimum_subgrid_size_xy(gridi) +
2591 minimum_subgrid_size_xy(gridj));
2594 /* Clusters at the cut-off only increase rlist by 60% of their size */
2595 static real nbnxn_rlist_inc_outside_fac = 0.6;
2597 /* Due to the cluster size the effective pair-list is longer than
2598 * that of a simple atom pair-list. This function gives the extra distance.
2600 real nbnxn_get_rlist_effective_inc(int cluster_size_j, real atom_density)
2603 real vol_inc_i, vol_inc_j;
2605 /* We should get this from the setup, but currently it's the same for
2606 * all setups, including GPUs.
2608 cluster_size_i = NBNXN_CPU_CLUSTER_I_SIZE;
2610 vol_inc_i = (cluster_size_i - 1)/atom_density;
2611 vol_inc_j = (cluster_size_j - 1)/atom_density;
2613 return nbnxn_rlist_inc_outside_fac*std::cbrt(vol_inc_i + vol_inc_j);
2616 /* Estimates the interaction volume^2 for non-local interactions */
2617 static real nonlocal_vol2(const struct gmx_domdec_zones_t *zones, rvec ls, real r)
2625 /* Here we simply add up the volumes of 1, 2 or 3 1D decomposition
2626 * not home interaction volume^2. As these volumes are not additive,
2627 * this is an overestimate, but it would only be significant in the limit
2628 * of small cells, where we anyhow need to split the lists into
2629 * as small parts as possible.
2632 for (int z = 0; z < zones->n; z++)
2634 if (zones->shift[z][XX] + zones->shift[z][YY] + zones->shift[z][ZZ] == 1)
2639 for (int d = 0; d < DIM; d++)
2641 if (zones->shift[z][d] == 0)
2645 za *= zones->size[z].x1[d] - zones->size[z].x0[d];
2649 /* 4 octants of a sphere */
2650 vold_est = 0.25*M_PI*r*r*r*r;
2651 /* 4 quarter pie slices on the edges */
2652 vold_est += 4*cl*M_PI/6.0*r*r*r;
2653 /* One rectangular volume on a face */
2654 vold_est += ca*0.5*r*r;
2656 vol2_est_tot += vold_est*za;
2660 return vol2_est_tot;
2663 /* Estimates the average size of a full j-list for super/sub setup */
2664 static void get_nsubpair_target(const nbnxn_search_t nbs,
2667 int min_ci_balanced,
2668 int *nsubpair_target,
2669 float *nsubpair_tot_est)
2671 /* The target value of 36 seems to be the optimum for Kepler.
2672 * Maxwell is less sensitive to the exact value.
2674 const int nsubpair_target_min = 36;
2675 const nbnxn_grid_t *grid;
2677 real r_eff_sup, vol_est, nsp_est, nsp_est_nl;
2679 grid = &nbs->grid[0];
2681 /* We don't need to balance list sizes if:
2682 * - We didn't request balancing.
2683 * - The number of grid cells >= the number of lists requested,
2684 * since we will always generate at least #cells lists.
2685 * - We don't have any cells, since then there won't be any lists.
2687 if (min_ci_balanced <= 0 || grid->nc >= min_ci_balanced || grid->nc == 0)
2689 /* nsubpair_target==0 signals no balancing */
2690 *nsubpair_target = 0;
2691 *nsubpair_tot_est = 0;
2696 ls[XX] = (grid->c1[XX] - grid->c0[XX])/(grid->ncx*c_gpuNumClusterPerCellX);
2697 ls[YY] = (grid->c1[YY] - grid->c0[YY])/(grid->ncy*c_gpuNumClusterPerCellY);
2698 ls[ZZ] = grid->na_c/(grid->atom_density*ls[XX]*ls[YY]);
2700 /* The average length of the diagonal of a sub cell */
2701 real diagonal = std::sqrt(ls[XX]*ls[XX] + ls[YY]*ls[YY] + ls[ZZ]*ls[ZZ]);
2703 /* The formulas below are a heuristic estimate of the average nsj per si*/
2704 r_eff_sup = rlist + nbnxn_rlist_inc_outside_fac*gmx::square((grid->na_c - 1.0)/grid->na_c)*0.5*diagonal;
2706 if (!nbs->DomDec || nbs->zones->n == 1)
2713 gmx::square(grid->atom_density/grid->na_c)*
2714 nonlocal_vol2(nbs->zones, ls, r_eff_sup);
2719 /* Sub-cell interacts with itself */
2720 vol_est = ls[XX]*ls[YY]*ls[ZZ];
2721 /* 6/2 rectangular volume on the faces */
2722 vol_est += (ls[XX]*ls[YY] + ls[XX]*ls[ZZ] + ls[YY]*ls[ZZ])*r_eff_sup;
2723 /* 12/2 quarter pie slices on the edges */
2724 vol_est += 2*(ls[XX] + ls[YY] + ls[ZZ])*0.25*M_PI*gmx::square(r_eff_sup);
2725 /* 4 octants of a sphere */
2726 vol_est += 0.5*4.0/3.0*M_PI*gmx::power3(r_eff_sup);
2728 /* Estimate the number of cluster pairs as the local number of
2729 * clusters times the volume they interact with times the density.
2731 nsp_est = grid->nsubc_tot*vol_est*grid->atom_density/grid->na_c;
2733 /* Subtract the non-local pair count */
2734 nsp_est -= nsp_est_nl;
2736 /* For small cut-offs nsp_est will be an underesimate.
2737 * With DD nsp_est_nl is an overestimate so nsp_est can get negative.
2738 * So to avoid too small or negative nsp_est we set a minimum of
2739 * all cells interacting with all 3^3 direct neighbors (3^3-1)/2+1=14.
2740 * This might be a slight overestimate for small non-periodic groups of
2741 * atoms as will occur for a local domain with DD, but for small
2742 * groups of atoms we'll anyhow be limited by nsubpair_target_min,
2743 * so this overestimation will not matter.
2745 nsp_est = std::max(nsp_est, grid->nsubc_tot*static_cast<real>(14));
2749 fprintf(debug, "nsp_est local %5.1f non-local %5.1f\n",
2750 nsp_est, nsp_est_nl);
2755 nsp_est = nsp_est_nl;
2758 /* Thus the (average) maximum j-list size should be as follows.
2759 * Since there is overhead, we shouldn't make the lists too small
2760 * (and we can't chop up j-groups) so we use a minimum target size of 36.
2762 *nsubpair_target = std::max(nsubpair_target_min,
2763 static_cast<int>(nsp_est/min_ci_balanced + 0.5));
2764 *nsubpair_tot_est = static_cast<int>(nsp_est);
2768 fprintf(debug, "nbl nsp estimate %.1f, nsubpair_target %d\n",
2769 nsp_est, *nsubpair_target);
2773 /* Debug list print function */
2774 static void print_nblist_ci_cj(FILE *fp, const nbnxn_pairlist_t *nbl)
2776 for (int i = 0; i < nbl->nci; i++)
2778 fprintf(fp, "ci %4d shift %2d ncj %3d\n",
2779 nbl->ci[i].ci, nbl->ci[i].shift,
2780 nbl->ci[i].cj_ind_end - nbl->ci[i].cj_ind_start);
2782 for (int j = nbl->ci[i].cj_ind_start; j < nbl->ci[i].cj_ind_end; j++)
2784 fprintf(fp, " cj %5d imask %x\n",
2791 /* Debug list print function */
2792 static void print_nblist_sci_cj(FILE *fp, const nbnxn_pairlist_t *nbl)
2794 for (int i = 0; i < nbl->nsci; i++)
2796 fprintf(fp, "ci %4d shift %2d ncj4 %2d\n",
2797 nbl->sci[i].sci, nbl->sci[i].shift,
2798 nbl->sci[i].cj4_ind_end - nbl->sci[i].cj4_ind_start);
2801 for (int j4 = nbl->sci[i].cj4_ind_start; j4 < nbl->sci[i].cj4_ind_end; j4++)
2803 for (int j = 0; j < c_nbnxnGpuJgroupSize; j++)
2805 fprintf(fp, " sj %5d imask %x\n",
2807 nbl->cj4[j4].imei[0].imask);
2808 for (int si = 0; si < c_gpuNumClusterPerCell; si++)
2810 if (nbl->cj4[j4].imei[0].imask & (1U << (j*c_gpuNumClusterPerCell + si)))
2817 fprintf(fp, "ci %4d shift %2d ncj4 %2d ncp %3d\n",
2818 nbl->sci[i].sci, nbl->sci[i].shift,
2819 nbl->sci[i].cj4_ind_end - nbl->sci[i].cj4_ind_start,
2824 /* Combine pair lists *nbl generated on multiple threads nblc */
2825 static void combine_nblists(int nnbl, nbnxn_pairlist_t **nbl,
2826 nbnxn_pairlist_t *nblc)
2828 int nsci, ncj4, nexcl;
2832 gmx_incons("combine_nblists does not support simple lists");
2837 nexcl = nblc->nexcl;
2838 for (int i = 0; i < nnbl; i++)
2840 nsci += nbl[i]->nsci;
2841 ncj4 += nbl[i]->ncj4;
2842 nexcl += nbl[i]->nexcl;
2845 if (nsci > nblc->sci_nalloc)
2847 nb_realloc_sci(nblc, nsci);
2849 if (ncj4 > nblc->cj4_nalloc)
2851 nblc->cj4_nalloc = over_alloc_small(ncj4);
2852 nbnxn_realloc_void((void **)&nblc->cj4,
2853 nblc->ncj4*sizeof(*nblc->cj4),
2854 nblc->cj4_nalloc*sizeof(*nblc->cj4),
2855 nblc->alloc, nblc->free);
2857 if (nexcl > nblc->excl_nalloc)
2859 nblc->excl_nalloc = over_alloc_small(nexcl);
2860 nbnxn_realloc_void((void **)&nblc->excl,
2861 nblc->nexcl*sizeof(*nblc->excl),
2862 nblc->excl_nalloc*sizeof(*nblc->excl),
2863 nblc->alloc, nblc->free);
2866 /* Each thread should copy its own data to the combined arrays,
2867 * as otherwise data will go back and forth between different caches.
2869 #if GMX_OPENMP && !(defined __clang_analyzer__)
2870 // cppcheck-suppress unreadVariable
2871 int nthreads = gmx_omp_nthreads_get(emntPairsearch);
2874 #pragma omp parallel for num_threads(nthreads) schedule(static)
2875 for (int n = 0; n < nnbl; n++)
2882 const nbnxn_pairlist_t *nbli;
2884 /* Determine the offset in the combined data for our thread */
2885 sci_offset = nblc->nsci;
2886 cj4_offset = nblc->ncj4;
2887 excl_offset = nblc->nexcl;
2889 for (int i = 0; i < n; i++)
2891 sci_offset += nbl[i]->nsci;
2892 cj4_offset += nbl[i]->ncj4;
2893 excl_offset += nbl[i]->nexcl;
2898 for (int i = 0; i < nbli->nsci; i++)
2900 nblc->sci[sci_offset+i] = nbli->sci[i];
2901 nblc->sci[sci_offset+i].cj4_ind_start += cj4_offset;
2902 nblc->sci[sci_offset+i].cj4_ind_end += cj4_offset;
2905 for (int j4 = 0; j4 < nbli->ncj4; j4++)
2907 nblc->cj4[cj4_offset+j4] = nbli->cj4[j4];
2908 nblc->cj4[cj4_offset+j4].imei[0].excl_ind += excl_offset;
2909 nblc->cj4[cj4_offset+j4].imei[1].excl_ind += excl_offset;
2912 for (int j4 = 0; j4 < nbli->nexcl; j4++)
2914 nblc->excl[excl_offset+j4] = nbli->excl[j4];
2917 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2920 for (int n = 0; n < nnbl; n++)
2922 nblc->nsci += nbl[n]->nsci;
2923 nblc->ncj4 += nbl[n]->ncj4;
2924 nblc->nci_tot += nbl[n]->nci_tot;
2925 nblc->nexcl += nbl[n]->nexcl;
2929 static void balance_fep_lists(const nbnxn_search_t nbs,
2930 nbnxn_pairlist_set_t *nbl_lists)
2933 int nri_tot, nrj_tot, nrj_target;
2937 nnbl = nbl_lists->nnbl;
2941 /* Nothing to balance */
2945 /* Count the total i-lists and pairs */
2948 for (int th = 0; th < nnbl; th++)
2950 nri_tot += nbl_lists->nbl_fep[th]->nri;
2951 nrj_tot += nbl_lists->nbl_fep[th]->nrj;
2954 nrj_target = (nrj_tot + nnbl - 1)/nnbl;
2956 assert(gmx_omp_nthreads_get(emntNonbonded) == nnbl);
2958 #pragma omp parallel for schedule(static) num_threads(nnbl)
2959 for (int th = 0; th < nnbl; th++)
2965 nbl = nbs->work[th].nbl_fep;
2967 /* Note that here we allocate for the total size, instead of
2968 * a per-thread esimate (which is hard to obtain).
2970 if (nri_tot > nbl->maxnri)
2972 nbl->maxnri = over_alloc_large(nri_tot);
2973 reallocate_nblist(nbl);
2975 if (nri_tot > nbl->maxnri || nrj_tot > nbl->maxnrj)
2977 nbl->maxnrj = over_alloc_small(nrj_tot);
2978 srenew(nbl->jjnr, nbl->maxnrj);
2979 srenew(nbl->excl_fep, nbl->maxnrj);
2982 clear_pairlist_fep(nbl);
2984 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2987 /* Loop over the source lists and assign and copy i-entries */
2989 nbld = nbs->work[th_dest].nbl_fep;
2990 for (int th = 0; th < nnbl; th++)
2994 nbls = nbl_lists->nbl_fep[th];
2996 for (int i = 0; i < nbls->nri; i++)
3000 /* The number of pairs in this i-entry */
3001 nrj = nbls->jindex[i+1] - nbls->jindex[i];
3003 /* Decide if list th_dest is too large and we should procede
3004 * to the next destination list.
3006 if (th_dest+1 < nnbl && nbld->nrj > 0 &&
3007 nbld->nrj + nrj - nrj_target > nrj_target - nbld->nrj)
3010 nbld = nbs->work[th_dest].nbl_fep;
3013 nbld->iinr[nbld->nri] = nbls->iinr[i];
3014 nbld->gid[nbld->nri] = nbls->gid[i];
3015 nbld->shift[nbld->nri] = nbls->shift[i];
3017 for (int j = nbls->jindex[i]; j < nbls->jindex[i+1]; j++)
3019 nbld->jjnr[nbld->nrj] = nbls->jjnr[j];
3020 nbld->excl_fep[nbld->nrj] = nbls->excl_fep[j];
3024 nbld->jindex[nbld->nri] = nbld->nrj;
3028 /* Swap the list pointers */
3029 for (int th = 0; th < nnbl; th++)
3033 nbl_tmp = nbl_lists->nbl_fep[th];
3034 nbl_lists->nbl_fep[th] = nbs->work[th].nbl_fep;
3035 nbs->work[th].nbl_fep = nbl_tmp;
3039 fprintf(debug, "nbl_fep[%d] nri %4d nrj %4d\n",
3041 nbl_lists->nbl_fep[th]->nri,
3042 nbl_lists->nbl_fep[th]->nrj);
3047 /* Returns the next ci to be processes by our thread */
3048 static gmx_bool next_ci(const nbnxn_grid_t *grid,
3049 int nth, int ci_block,
3050 int *ci_x, int *ci_y,
3056 if (*ci_b == ci_block)
3058 /* Jump to the next block assigned to this task */
3059 *ci += (nth - 1)*ci_block;
3063 if (*ci >= grid->nc)
3068 while (*ci >= grid->cxy_ind[*ci_x*grid->ncy + *ci_y + 1])
3071 if (*ci_y == grid->ncy)
3081 /* Returns the distance^2 for which we put cell pairs in the list
3082 * without checking atom pair distances. This is usually < rlist^2.
3084 static float boundingbox_only_distance2(const nbnxn_grid_t *gridi,
3085 const nbnxn_grid_t *gridj,
3089 /* If the distance between two sub-cell bounding boxes is less
3090 * than this distance, do not check the distance between
3091 * all particle pairs in the sub-cell, since then it is likely
3092 * that the box pair has atom pairs within the cut-off.
3093 * We use the nblist cut-off minus 0.5 times the average x/y diagonal
3094 * spacing of the sub-cells. Around 40% of the checked pairs are pruned.
3095 * Using more than 0.5 gains at most 0.5%.
3096 * If forces are calculated more than twice, the performance gain
3097 * in the force calculation outweighs the cost of checking.
3098 * Note that with subcell lists, the atom-pair distance check
3099 * is only performed when only 1 out of 8 sub-cells in within range,
3100 * this is because the GPU is much faster than the cpu.
3105 bbx = 0.5*(gridi->sx + gridj->sx);
3106 bby = 0.5*(gridi->sy + gridj->sy);
3109 bbx /= c_gpuNumClusterPerCellX;
3110 bby /= c_gpuNumClusterPerCellY;
3113 rbb2 = std::max(0.0, rlist - 0.5*std::sqrt(bbx*bbx + bby*bby));
3119 return (float)((1+GMX_FLOAT_EPS)*rbb2);
3123 static int get_ci_block_size(const nbnxn_grid_t *gridi,
3124 gmx_bool bDomDec, int nth)
3126 const int ci_block_enum = 5;
3127 const int ci_block_denom = 11;
3128 const int ci_block_min_atoms = 16;
3131 /* Here we decide how to distribute the blocks over the threads.
3132 * We use prime numbers to try to avoid that the grid size becomes
3133 * a multiple of the number of threads, which would lead to some
3134 * threads getting "inner" pairs and others getting boundary pairs,
3135 * which in turns will lead to load imbalance between threads.
3136 * Set the block size as 5/11/ntask times the average number of cells
3137 * in a y,z slab. This should ensure a quite uniform distribution
3138 * of the grid parts of the different thread along all three grid
3139 * zone boundaries with 3D domain decomposition. At the same time
3140 * the blocks will not become too small.
3142 ci_block = (gridi->nc*ci_block_enum)/(ci_block_denom*gridi->ncx*nth);
3144 /* Ensure the blocks are not too small: avoids cache invalidation */
3145 if (ci_block*gridi->na_sc < ci_block_min_atoms)
3147 ci_block = (ci_block_min_atoms + gridi->na_sc - 1)/gridi->na_sc;
3150 /* Without domain decomposition
3151 * or with less than 3 blocks per task, divide in nth blocks.
3153 if (!bDomDec || nth*3*ci_block > gridi->nc)
3155 ci_block = (gridi->nc + nth - 1)/nth;
3158 if (ci_block > 1 && (nth - 1)*ci_block >= gridi->nc)
3160 /* Some threads have no work. Although reducing the block size
3161 * does not decrease the block count on the first few threads,
3162 * with GPUs better mixing of "upper" cells that have more empty
3163 * clusters results in a somewhat lower max load over all threads.
3164 * Without GPUs the regime of so few atoms per thread is less
3165 * performance relevant, but with 8-wide SIMD the same reasoning
3166 * applies, since the pair list uses 4 i-atom "sub-clusters".
3174 /* Returns the number of bits to right-shift a cluster index to obtain
3175 * the corresponding force buffer flag index.
3177 static int getBufferFlagShift(int numAtomsPerCluster)
3179 int bufferFlagShift = 0;
3180 while ((numAtomsPerCluster << bufferFlagShift) < NBNXN_BUFFERFLAG_SIZE)
3185 return bufferFlagShift;
3188 /* Generates the part of pair-list nbl assigned to our thread */
3189 static void nbnxn_make_pairlist_part(const nbnxn_search_t nbs,
3190 const nbnxn_grid_t *gridi,
3191 const nbnxn_grid_t *gridj,
3192 nbnxn_search_work_t *work,
3193 const nbnxn_atomdata_t *nbat,
3194 const t_blocka &exclusions,
3198 gmx_bool bFBufferFlag,
3201 float nsubpair_tot_est,
3203 nbnxn_pairlist_t *nbl,
3208 real rlist2, rl_fep2 = 0;
3210 int ci_b, ci, ci_x, ci_y, ci_xy, cj;
3215 const nbnxn_bb_t *bb_i = nullptr;
3217 const float *pbb_i = nullptr;
3219 const float *bbcz_i, *bbcz_j;
3221 real bx0, bx1, by0, by1, bz0, bz1;
3223 real d2cx, d2z, d2z_cx, d2z_cy, d2zx, d2zxy, d2xy;
3224 int cxf, cxl, cyf, cyf_x, cyl;
3225 int numDistanceChecks;
3226 int gridi_flag_shift = 0, gridj_flag_shift = 0;
3227 gmx_bitmask_t *gridj_flag = nullptr;
3228 int ncj_old_i, ncj_old_j;
3230 nbs_cycle_start(&work->cc[enbsCCsearch]);
3232 if (gridj->bSimple != nbl->bSimple)
3234 gmx_incons("Grid incompatible with pair-list");
3238 nbl->na_sc = gridj->na_sc;
3239 nbl->na_ci = gridj->na_c;
3240 nbl->na_cj = nbnxn_kernel_to_cluster_j_size(nb_kernel_type);
3241 na_cj_2log = get_2log(nbl->na_cj);
3247 /* Determine conversion of clusters to flag blocks */
3248 gridi_flag_shift = getBufferFlagShift(nbl->na_ci);
3249 gridj_flag_shift = getBufferFlagShift(nbl->na_cj);
3251 gridj_flag = work->buffer_flags.flag;
3254 copy_mat(nbs->box, box);
3256 rlist2 = nbl->rlist*nbl->rlist;
3258 if (nbs->bFEP && !nbl->bSimple)
3260 /* Determine an atom-pair list cut-off distance for FEP atom pairs.
3261 * We should not simply use rlist, since then we would not have
3262 * the small, effective buffering of the NxN lists.
3263 * The buffer is on overestimate, but the resulting cost for pairs
3264 * beyond rlist is neglible compared to the FEP pairs within rlist.
3266 rl_fep2 = nbl->rlist + effective_buffer_1x1_vs_MxN(gridi, gridj);
3270 fprintf(debug, "nbl_fep atom-pair rlist %f\n", rl_fep2);
3272 rl_fep2 = rl_fep2*rl_fep2;
3275 rbb2 = boundingbox_only_distance2(gridi, gridj, nbl->rlist, nbl->bSimple);
3279 fprintf(debug, "nbl bounding box only distance %f\n", std::sqrt(rbb2));
3282 /* Set the shift range */
3283 for (int d = 0; d < DIM; d++)
3285 /* Check if we need periodicity shifts.
3286 * Without PBC or with domain decomposition we don't need them.
3288 if (d >= ePBC2npbcdim(nbs->ePBC) || nbs->dd_dim[d])
3295 box[XX][XX] - fabs(box[YY][XX]) - fabs(box[ZZ][XX]) < std::sqrt(rlist2))
3316 /* We use the normal bounding box format for both grid types */
3319 bbcz_i = gridi->bbcz;
3320 flags_i = gridi->flags;
3321 cell0_i = gridi->cell0;
3323 bbcz_j = gridj->bbcz;
3327 fprintf(debug, "nbl nc_i %d col.av. %.1f ci_block %d\n",
3328 gridi->nc, gridi->nc/(double)(gridi->ncx*gridi->ncy), ci_block);
3331 numDistanceChecks = 0;
3333 /* Initially ci_b and ci to 1 before where we want them to start,
3334 * as they will both be incremented in next_ci.
3337 ci = th*ci_block - 1;
3340 while (next_ci(gridi, nth, ci_block, &ci_x, &ci_y, &ci_b, &ci))
3342 if (nbl->bSimple && flags_i[ci] == 0)
3347 ncj_old_i = nbl->ncj;
3350 if (gridj != gridi && shp[XX] == 0)
3354 bx1 = bb_i[ci].upper[BB_X];
3358 bx1 = gridi->c0[XX] + (ci_x+1)*gridi->sx;
3360 if (bx1 < gridj->c0[XX])
3362 d2cx = gmx::square(gridj->c0[XX] - bx1);
3371 ci_xy = ci_x*gridi->ncy + ci_y;
3373 /* Loop over shift vectors in three dimensions */
3374 for (int tz = -shp[ZZ]; tz <= shp[ZZ]; tz++)
3376 shz = tz*box[ZZ][ZZ];
3378 bz0 = bbcz_i[ci*NNBSBB_D ] + shz;
3379 bz1 = bbcz_i[ci*NNBSBB_D+1] + shz;
3387 d2z = gmx::square(bz1);
3391 d2z = gmx::square(bz0 - box[ZZ][ZZ]);
3394 d2z_cx = d2z + d2cx;
3396 if (d2z_cx >= rlist2)
3401 bz1_frac = bz1/(gridi->cxy_ind[ci_xy+1] - gridi->cxy_ind[ci_xy]);
3406 /* The check with bz1_frac close to or larger than 1 comes later */
3408 for (int ty = -shp[YY]; ty <= shp[YY]; ty++)
3410 shy = ty*box[YY][YY] + tz*box[ZZ][YY];
3414 by0 = bb_i[ci].lower[BB_Y] + shy;
3415 by1 = bb_i[ci].upper[BB_Y] + shy;
3419 by0 = gridi->c0[YY] + (ci_y )*gridi->sy + shy;
3420 by1 = gridi->c0[YY] + (ci_y+1)*gridi->sy + shy;
3423 get_cell_range(by0, by1,
3424 gridj->ncy, gridj->c0[YY], gridj->sy, gridj->inv_sy,
3434 if (by1 < gridj->c0[YY])
3436 d2z_cy += gmx::square(gridj->c0[YY] - by1);
3438 else if (by0 > gridj->c1[YY])
3440 d2z_cy += gmx::square(by0 - gridj->c1[YY]);
3443 for (int tx = -shp[XX]; tx <= shp[XX]; tx++)
3445 shift = XYZ2IS(tx, ty, tz);
3447 if (c_pbcShiftBackward && gridi == gridj && shift > CENTRAL)
3452 shx = tx*box[XX][XX] + ty*box[YY][XX] + tz*box[ZZ][XX];
3456 bx0 = bb_i[ci].lower[BB_X] + shx;
3457 bx1 = bb_i[ci].upper[BB_X] + shx;
3461 bx0 = gridi->c0[XX] + (ci_x )*gridi->sx + shx;
3462 bx1 = gridi->c0[XX] + (ci_x+1)*gridi->sx + shx;
3465 get_cell_range(bx0, bx1,
3466 gridj->ncx, gridj->c0[XX], gridj->sx, gridj->inv_sx,
3477 new_ci_entry(nbl, cell0_i+ci, shift, flags_i[ci]);
3481 new_sci_entry(nbl, cell0_i+ci, shift);
3484 if ((!c_pbcShiftBackward || (shift == CENTRAL &&
3488 /* Leave the pairs with i > j.
3489 * x is the major index, so skip half of it.
3496 set_icell_bb_simple(bb_i, ci, shx, shy, shz,
3502 set_icell_bbxxxx_supersub(pbb_i, ci, shx, shy, shz,
3505 set_icell_bb_supersub(bb_i, ci, shx, shy, shz,
3510 nbs->icell_set_x(cell0_i+ci, shx, shy, shz,
3511 nbat->xstride, nbat->x,
3514 for (int cx = cxf; cx <= cxl; cx++)
3517 if (gridj->c0[XX] + cx*gridj->sx > bx1)
3519 d2zx += gmx::square(gridj->c0[XX] + cx*gridj->sx - bx1);
3521 else if (gridj->c0[XX] + (cx+1)*gridj->sx < bx0)
3523 d2zx += gmx::square(gridj->c0[XX] + (cx+1)*gridj->sx - bx0);
3526 if (gridi == gridj &&
3528 (!c_pbcShiftBackward || shift == CENTRAL) &&
3531 /* Leave the pairs with i > j.
3532 * Skip half of y when i and j have the same x.
3541 for (int cy = cyf_x; cy <= cyl; cy++)
3543 const int columnStart = gridj->cxy_ind[cx*gridj->ncy + cy];
3544 const int columnEnd = gridj->cxy_ind[cx*gridj->ncy + cy + 1];
3547 if (gridj->c0[YY] + cy*gridj->sy > by1)
3549 d2zxy += gmx::square(gridj->c0[YY] + cy*gridj->sy - by1);
3551 else if (gridj->c0[YY] + (cy+1)*gridj->sy < by0)
3553 d2zxy += gmx::square(gridj->c0[YY] + (cy+1)*gridj->sy - by0);
3555 if (columnStart < columnEnd && d2zxy < rlist2)
3557 /* To improve efficiency in the common case
3558 * of a homogeneous particle distribution,
3559 * we estimate the index of the middle cell
3560 * in range (midCell). We search down and up
3561 * starting from this index.
3563 * Note that the bbcz_j array contains bounds
3564 * for i-clusters, thus for clusters of 4 atoms.
3565 * For the common case where the j-cluster size
3566 * is 8, we could step with a stride of 2,
3567 * but we do not do this because it would
3568 * complicate this code even more.
3570 int midCell = columnStart + static_cast<int>(bz1_frac*(columnEnd - columnStart));
3571 if (midCell >= columnEnd)
3573 midCell = columnEnd - 1;
3578 /* Find the lowest cell that can possibly
3580 * Check if we hit the bottom of the grid,
3581 * if the j-cell is below the i-cell and if so,
3582 * if it is within range.
3584 int downTestCell = midCell;
3585 while (downTestCell >= columnStart &&
3586 (bbcz_j[downTestCell*NNBSBB_D + 1] >= bz0 ||
3587 d2xy + gmx::square(bbcz_j[downTestCell*NNBSBB_D + 1] - bz0) < rlist2))
3591 int firstCell = downTestCell + 1;
3593 /* Find the highest cell that can possibly
3595 * Check if we hit the top of the grid,
3596 * if the j-cell is above the i-cell and if so,
3597 * if it is within range.
3599 int upTestCell = midCell + 1;
3600 while (upTestCell < columnEnd &&
3601 (bbcz_j[upTestCell*NNBSBB_D] <= bz1 ||
3602 d2xy + gmx::square(bbcz_j[upTestCell*NNBSBB_D] - bz1) < rlist2))
3606 int lastCell = upTestCell - 1;
3608 #define NBNXN_REFCODE 0
3611 /* Simple reference code, for debugging,
3612 * overrides the more complex code above.
3614 firstCell = columnEnd;
3616 for (int k = columnStart; k < columnEnd; k++)
3618 if (d2xy + gmx::square(bbcz_j[k*NNBSBB_D + 1] - bz0) < rlist2 &&
3623 if (d2xy + gmx::square(bbcz_j[k*NNBSBB_D] - bz1) < rlist2 &&
3634 /* We want each atom/cell pair only once,
3635 * only use cj >= ci.
3637 if (!c_pbcShiftBackward || shift == CENTRAL)
3639 firstCell = std::max(firstCell, ci);
3643 if (firstCell <= lastCell)
3645 GMX_ASSERT(firstCell >= columnStart && lastCell < columnEnd, "The range should reside within the current grid column");
3647 /* For f buffer flags with simple lists */
3648 ncj_old_j = nbl->ncj;
3652 /* We have a maximum of 2 j-clusters
3653 * per i-cluster sized cell.
3655 check_cell_list_space_simple(nbl, 2*(lastCell - firstCell + 1));
3659 check_cell_list_space_supersub(nbl, lastCell - firstCell + 1);
3662 switch (nb_kernel_type)
3664 case nbnxnk4x4_PlainC:
3665 makeClusterListSimple(gridj,
3666 nbl, ci, firstCell, lastCell,
3667 (gridi == gridj && shift == CENTRAL),
3670 &numDistanceChecks);
3672 #ifdef GMX_NBNXN_SIMD_4XN
3673 case nbnxnk4xN_SIMD_4xN:
3674 makeClusterListSimd4xn(gridj,
3675 nbl, ci, firstCell, lastCell,
3676 (gridi == gridj && shift == CENTRAL),
3679 &numDistanceChecks);
3682 #ifdef GMX_NBNXN_SIMD_2XNN
3683 case nbnxnk4xN_SIMD_2xNN:
3684 makeClusterListSimd2xnn(gridj,
3685 nbl, ci, firstCell, lastCell,
3686 (gridi == gridj && shift == CENTRAL),
3689 &numDistanceChecks);
3692 case nbnxnk8x8x8_PlainC:
3693 case nbnxnk8x8x8_GPU:
3694 for (cj = firstCell; cj <= lastCell; cj++)
3696 make_cluster_list_supersub(gridi, gridj,
3698 (gridi == gridj && shift == CENTRAL && ci == cj),
3699 nbat->xstride, nbat->x,
3701 &numDistanceChecks);
3706 if (bFBufferFlag && nbl->ncj > ncj_old_j)
3708 int cbf = nbl->cj[ncj_old_j].cj >> gridj_flag_shift;
3709 int cbl = nbl->cj[nbl->ncj-1].cj >> gridj_flag_shift;
3710 for (int cb = cbf; cb <= cbl; cb++)
3712 bitmask_init_bit(&gridj_flag[cb], th);
3716 nbl->ncjInUse += nbl->ncj - ncj_old_j;
3722 /* Set the exclusions for this ci list */
3725 setExclusionsForSimpleIentry(nbs,
3727 shift == CENTRAL && gridi == gridj,
3734 make_fep_list(nbs, nbat, nbl,
3735 shift == CENTRAL && gridi == gridj,
3736 &(nbl->ci[nbl->nci]),
3737 gridi, gridj, nbl_fep);
3742 setExclusionsForGpuIentry(nbs,
3744 shift == CENTRAL && gridi == gridj,
3745 nbl->sci[nbl->nsci],
3750 make_fep_list_supersub(nbs, nbat, nbl,
3751 shift == CENTRAL && gridi == gridj,
3752 &(nbl->sci[nbl->nsci]),
3755 gridi, gridj, nbl_fep);
3759 /* Close this ci list */
3762 close_ci_entry_simple(nbl);
3766 close_ci_entry_supersub(nbl,
3768 progBal, nsubpair_tot_est,
3775 if (bFBufferFlag && nbl->ncj > ncj_old_i)
3777 bitmask_init_bit(&(work->buffer_flags.flag[(gridi->cell0+ci)>>gridi_flag_shift]), th);
3781 work->ndistc = numDistanceChecks;
3783 nbs_cycle_stop(&work->cc[enbsCCsearch]);
3785 GMX_ASSERT(nbl->ncjInUse == nbl->ncj || nbs->bFEP, "Without free-energy all cj pair-list entries should be in use. Note that subsequent code does not make use of the equality, this check is only here to catch bugs");
3789 fprintf(debug, "number of distance checks %d\n", numDistanceChecks);
3793 print_nblist_statistics_simple(debug, nbl, nbs, rlist);
3797 print_nblist_statistics_supersub(debug, nbl, nbs, rlist);
3802 fprintf(debug, "nbl FEP list pairs: %d\n", nbl_fep->nrj);
3807 static void reduce_buffer_flags(const nbnxn_search_t nbs,
3809 const nbnxn_buffer_flags_t *dest)
3811 for (int s = 0; s < nsrc; s++)
3813 gmx_bitmask_t * flag = nbs->work[s].buffer_flags.flag;
3815 for (int b = 0; b < dest->nflag; b++)
3817 bitmask_union(&(dest->flag[b]), flag[b]);
3822 static void print_reduction_cost(const nbnxn_buffer_flags_t *flags, int nout)
3824 int nelem, nkeep, ncopy, nred, out;
3825 gmx_bitmask_t mask_0;
3831 bitmask_init_bit(&mask_0, 0);
3832 for (int b = 0; b < flags->nflag; b++)
3834 if (bitmask_is_equal(flags->flag[b], mask_0))
3836 /* Only flag 0 is set, no copy of reduction required */
3840 else if (!bitmask_is_zero(flags->flag[b]))
3843 for (out = 0; out < nout; out++)
3845 if (bitmask_is_set(flags->flag[b], out))
3862 fprintf(debug, "nbnxn reduction: #flag %d #list %d elem %4.2f, keep %4.2f copy %4.2f red %4.2f\n",
3864 nelem/(double)(flags->nflag),
3865 nkeep/(double)(flags->nflag),
3866 ncopy/(double)(flags->nflag),
3867 nred/(double)(flags->nflag));
3870 /* Copies the list entries from src to dest when cjStart <= *cjGlobal < cjEnd.
3871 * *cjGlobal is updated with the cj count in src.
3872 * When setFlags==true, flag bit t is set in flag for all i and j clusters.
3874 template<bool setFlags>
3875 static void copySelectedListRange(const nbnxn_ci_t * gmx_restrict srcCi,
3876 const nbnxn_pairlist_t * gmx_restrict src,
3877 nbnxn_pairlist_t * gmx_restrict dest,
3878 gmx_bitmask_t *flag,
3879 int iFlagShift, int jFlagShift, int t)
3881 int ncj = srcCi->cj_ind_end - srcCi->cj_ind_start;
3883 if (dest->nci + 1 >= dest->ci_nalloc)
3885 nb_realloc_ci(dest, dest->nci + 1);
3887 check_cell_list_space_simple(dest, ncj);
3889 dest->ci[dest->nci] = *srcCi;
3890 dest->ci[dest->nci].cj_ind_start = dest->ncj;
3891 dest->ci[dest->nci].cj_ind_end = dest->ncj + ncj;
3895 bitmask_init_bit(&flag[srcCi->ci >> iFlagShift], t);
3898 for (int j = srcCi->cj_ind_start; j < srcCi->cj_ind_end; j++)
3900 dest->cj[dest->ncj++] = src->cj[j];
3904 /* NOTE: This is relatively expensive, since this
3905 * operation is done for all elements in the list,
3906 * whereas at list generation this is done only
3907 * once for each flag entry.
3909 bitmask_init_bit(&flag[src->cj[j].cj >> jFlagShift], t);
3916 /* This routine re-balances the pairlists such that all are nearly equally
3917 * sized. Only whole i-entries are moved between lists. These are moved
3918 * between the ends of the lists, such that the buffer reduction cost should
3919 * not change significantly.
3920 * Note that all original reduction flags are currently kept. This can lead
3921 * to reduction of parts of the force buffer that could be avoided. But since
3922 * the original lists are quite balanced, this will only give minor overhead.
3924 static void rebalanceSimpleLists(int numLists,
3925 nbnxn_pairlist_t * const * const srcSet,
3926 nbnxn_pairlist_t **destSet,
3927 nbnxn_search_work_t *searchWork)
3930 for (int s = 0; s < numLists; s++)
3932 ncjTotal += srcSet[s]->ncjInUse;
3934 int ncjTarget = (ncjTotal + numLists - 1)/numLists;
3936 #pragma omp parallel num_threads(numLists)
3938 int t = gmx_omp_get_thread_num();
3940 int cjStart = ncjTarget* t;
3941 int cjEnd = ncjTarget*(t + 1);
3943 /* The destination pair-list for task/thread t */
3944 nbnxn_pairlist_t *dest = destSet[t];
3946 clear_pairlist(dest);
3947 dest->bSimple = srcSet[0]->bSimple;
3948 dest->na_ci = srcSet[0]->na_ci;
3949 dest->na_cj = srcSet[0]->na_cj;
3951 /* Note that the flags in the work struct (still) contain flags
3952 * for all entries that are present in srcSet->nbl[t].
3954 gmx_bitmask_t *flag = searchWork[t].buffer_flags.flag;
3956 int iFlagShift = getBufferFlagShift(dest->na_ci);
3957 int jFlagShift = getBufferFlagShift(dest->na_cj);
3960 for (int s = 0; s < numLists && cjGlobal < cjEnd; s++)
3962 const nbnxn_pairlist_t *src = srcSet[s];
3964 if (cjGlobal + src->ncjInUse > cjStart)
3966 for (int i = 0; i < src->nci && cjGlobal < cjEnd; i++)
3968 const nbnxn_ci_t *srcCi = &src->ci[i];
3969 int ncj = srcCi->cj_ind_end - srcCi->cj_ind_start;
3970 if (cjGlobal >= cjStart)
3972 /* If the source list is not our own, we need to set
3973 * extra flags (the template bool parameter).
3977 copySelectedListRange
3980 flag, iFlagShift, jFlagShift, t);
3984 copySelectedListRange
3987 dest, flag, iFlagShift, jFlagShift, t);
3995 cjGlobal += src->ncjInUse;
3999 dest->ncjInUse = dest->ncj;
4003 int ncjTotalNew = 0;
4004 for (int s = 0; s < numLists; s++)
4006 ncjTotalNew += destSet[s]->ncjInUse;
4008 GMX_RELEASE_ASSERT(ncjTotalNew == ncjTotal, "The total size of the lists before and after rebalancing should match");
4012 /* Returns if the pairlists are so imbalanced that it is worth rebalancing. */
4013 static bool checkRebalanceSimpleLists(const nbnxn_pairlist_set_t *listSet)
4015 int numLists = listSet->nnbl;
4018 for (int s = 0; s < numLists; s++)
4020 ncjMax = std::max(ncjMax, listSet->nbl[s]->ncjInUse);
4021 ncjTotal += listSet->nbl[s]->ncjInUse;
4025 fprintf(debug, "Pair-list ncjMax %d ncjTotal %d\n", ncjMax, ncjTotal);
4027 /* The rebalancing adds 3% extra time to the search. Heuristically we
4028 * determined that under common conditions the non-bonded kernel balance
4029 * improvement will outweigh this when the imbalance is more than 3%.
4030 * But this will, obviously, depend on search vs kernel time and nstlist.
4032 const real rebalanceTolerance = 1.03;
4034 return numLists*ncjMax > ncjTotal*rebalanceTolerance;
4037 /* Perform a count (linear) sort to sort the smaller lists to the end.
4038 * This avoids load imbalance on the GPU, as large lists will be
4039 * scheduled and executed first and the smaller lists later.
4040 * Load balancing between multi-processors only happens at the end
4041 * and there smaller lists lead to more effective load balancing.
4042 * The sorting is done on the cj4 count, not on the actual pair counts.
4043 * Not only does this make the sort faster, but it also results in
4044 * better load balancing than using a list sorted on exact load.
4045 * This function swaps the pointer in the pair list to avoid a copy operation.
4047 static void sort_sci(nbnxn_pairlist_t *nbl)
4049 nbnxn_list_work_t *work;
4051 nbnxn_sci_t *sci_sort;
4053 if (nbl->ncj4 <= nbl->nsci)
4055 /* nsci = 0 or all sci have size 1, sorting won't change the order */
4061 /* We will distinguish differences up to double the average */
4062 m = (2*nbl->ncj4)/nbl->nsci;
4064 if (m + 1 > work->sort_nalloc)
4066 work->sort_nalloc = over_alloc_large(m + 1);
4067 srenew(work->sort, work->sort_nalloc);
4070 if (work->sci_sort_nalloc != nbl->sci_nalloc)
4072 work->sci_sort_nalloc = nbl->sci_nalloc;
4073 nbnxn_realloc_void((void **)&work->sci_sort,
4075 work->sci_sort_nalloc*sizeof(*work->sci_sort),
4076 nbl->alloc, nbl->free);
4079 /* Count the entries of each size */
4080 for (int i = 0; i <= m; i++)
4084 for (int s = 0; s < nbl->nsci; s++)
4086 int i = std::min(m, nbl->sci[s].cj4_ind_end - nbl->sci[s].cj4_ind_start);
4089 /* Calculate the offset for each count */
4092 for (int i = m - 1; i >= 0; i--)
4095 work->sort[i] = work->sort[i + 1] + s0;
4099 /* Sort entries directly into place */
4100 sci_sort = work->sci_sort;
4101 for (int s = 0; s < nbl->nsci; s++)
4103 int i = std::min(m, nbl->sci[s].cj4_ind_end - nbl->sci[s].cj4_ind_start);
4104 sci_sort[work->sort[i]++] = nbl->sci[s];
4107 /* Swap the sci pointers so we use the new, sorted list */
4108 work->sci_sort = nbl->sci;
4109 nbl->sci = sci_sort;
4112 /* Make a local or non-local pair-list, depending on iloc */
4113 void nbnxn_make_pairlist(const nbnxn_search_t nbs,
4114 nbnxn_atomdata_t *nbat,
4115 const t_blocka *excl,
4117 int min_ci_balanced,
4118 nbnxn_pairlist_set_t *nbl_list,
4123 nbnxn_grid_t *gridi, *gridj;
4125 int nsubpair_target;
4126 float nsubpair_tot_est;
4128 nbnxn_pairlist_t **nbl;
4130 gmx_bool CombineNBLists;
4132 int np_tot, np_noq, np_hlj, nap;
4134 nnbl = nbl_list->nnbl;
4135 nbl = nbl_list->nbl;
4136 CombineNBLists = nbl_list->bCombined;
4140 fprintf(debug, "ns making %d nblists\n", nnbl);
4143 nbat->bUseBufferFlags = (nbat->nout > 1);
4144 /* We should re-init the flags before making the first list */
4145 if (nbat->bUseBufferFlags && LOCAL_I(iloc))
4147 init_buffer_flags(&nbat->buffer_flags, nbat->natoms);
4150 if (nbl_list->bSimple)
4153 switch (nb_kernel_type)
4155 #ifdef GMX_NBNXN_SIMD_4XN
4156 case nbnxnk4xN_SIMD_4xN:
4157 nbs->icell_set_x = icell_set_x_simd_4xn;
4160 #ifdef GMX_NBNXN_SIMD_2XNN
4161 case nbnxnk4xN_SIMD_2xNN:
4162 nbs->icell_set_x = icell_set_x_simd_2xnn;
4166 nbs->icell_set_x = icell_set_x_simple;
4170 /* MSVC 2013 complains about switch statements without case */
4171 nbs->icell_set_x = icell_set_x_simple;
4176 nbs->icell_set_x = icell_set_x_supersub;
4181 /* Only zone (grid) 0 vs 0 */
4188 nzi = nbs->zones->nizone;
4191 if (!nbl_list->bSimple && min_ci_balanced > 0)
4193 get_nsubpair_target(nbs, iloc, rlist, min_ci_balanced,
4194 &nsubpair_target, &nsubpair_tot_est);
4198 nsubpair_target = 0;
4199 nsubpair_tot_est = 0;
4202 /* Clear all pair-lists */
4203 for (int th = 0; th < nnbl; th++)
4205 clear_pairlist(nbl[th]);
4209 clear_pairlist_fep(nbl_list->nbl_fep[th]);
4213 for (int zi = 0; zi < nzi; zi++)
4215 gridi = &nbs->grid[zi];
4217 if (NONLOCAL_I(iloc))
4219 zj0 = nbs->zones->izone[zi].j0;
4220 zj1 = nbs->zones->izone[zi].j1;
4226 for (int zj = zj0; zj < zj1; zj++)
4228 gridj = &nbs->grid[zj];
4232 fprintf(debug, "ns search grid %d vs %d\n", zi, zj);
4235 nbs_cycle_start(&nbs->cc[enbsCCsearch]);
4237 ci_block = get_ci_block_size(gridi, nbs->DomDec, nnbl);
4239 /* With GPU: generate progressively smaller lists for
4240 * load balancing for local only or non-local with 2 zones.
4242 progBal = (LOCAL_I(iloc) || nbs->zones->n <= 2);
4244 #pragma omp parallel for num_threads(nnbl) schedule(static)
4245 for (int th = 0; th < nnbl; th++)
4249 /* Re-init the thread-local work flag data before making
4250 * the first list (not an elegant conditional).
4252 if (nbat->bUseBufferFlags && ((zi == 0 && zj == 0)))
4254 init_buffer_flags(&nbs->work[th].buffer_flags, nbat->natoms);
4257 if (CombineNBLists && th > 0)
4259 clear_pairlist(nbl[th]);
4262 /* Divide the i super cell equally over the nblists */
4263 nbnxn_make_pairlist_part(nbs, gridi, gridj,
4264 &nbs->work[th], nbat, *excl,
4268 nbat->bUseBufferFlags,
4270 progBal, nsubpair_tot_est,
4273 nbl_list->nbl_fep[th]);
4275 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
4277 nbs_cycle_stop(&nbs->cc[enbsCCsearch]);
4282 for (int th = 0; th < nnbl; th++)
4284 inc_nrnb(nrnb, eNR_NBNXN_DIST2, nbs->work[th].ndistc);
4286 if (nbl_list->bSimple)
4288 np_tot += nbl[th]->ncj;
4289 np_noq += nbl[th]->work->ncj_noq;
4290 np_hlj += nbl[th]->work->ncj_hlj;
4294 /* This count ignores potential subsequent pair pruning */
4295 np_tot += nbl[th]->nci_tot;
4298 nap = nbl[0]->na_ci*nbl[0]->na_cj;
4299 nbl_list->natpair_ljq = (np_tot - np_noq)*nap - np_hlj*nap/2;
4300 nbl_list->natpair_lj = np_noq*nap;
4301 nbl_list->natpair_q = np_hlj*nap/2;
4303 if (CombineNBLists && nnbl > 1)
4305 nbs_cycle_start(&nbs->cc[enbsCCcombine]);
4307 combine_nblists(nnbl-1, nbl+1, nbl[0]);
4309 nbs_cycle_stop(&nbs->cc[enbsCCcombine]);
4314 if (nbl_list->bSimple)
4316 if (nnbl > 1 && checkRebalanceSimpleLists(nbl_list))
4318 rebalanceSimpleLists(nbl_list->nnbl, nbl_list->nbl, nbl_list->nbl_work, nbs->work);
4320 /* Swap the pointer of the sets of pair lists */
4321 nbnxn_pairlist_t **tmp = nbl_list->nbl;
4322 nbl_list->nbl = nbl_list->nbl_work;
4323 nbl_list->nbl_work = tmp;
4328 /* Sort the entries on size, large ones first */
4329 if (CombineNBLists || nnbl == 1)
4335 #pragma omp parallel for num_threads(nnbl) schedule(static)
4336 for (int th = 0; th < nnbl; th++)
4342 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
4347 if (nbat->bUseBufferFlags)
4349 reduce_buffer_flags(nbs, nbl_list->nnbl, &nbat->buffer_flags);
4354 /* Balance the free-energy lists over all the threads */
4355 balance_fep_lists(nbs, nbl_list);
4358 /* This is a fresh list, so not pruned, stored using ci and nci.
4359 * ciOuter and nciOuter are invalid at this point.
4361 GMX_ASSERT(nbl_list->nbl[0]->nciOuter == -1, "nciOuter should have been set to -1 to signal that it is invalid");
4363 /* Special performance logging stuff (env.var. GMX_NBNXN_CYCLE) */
4366 nbs->search_count++;
4368 if (nbs->print_cycles &&
4369 (!nbs->DomDec || !LOCAL_I(iloc)) &&
4370 nbs->search_count % 100 == 0)
4372 nbs_cycle_print(stderr, nbs);
4375 /* If we have more than one list, they either got rebalancing (CPU)
4376 * or combined (GPU), so we should dump the final result to debug.
4378 if (debug && nbl_list->nnbl > 1)
4380 if (nbl_list->bSimple)
4382 for (int t = 0; t < nbl_list->nnbl; t++)
4384 print_nblist_statistics_simple(debug, nbl_list->nbl[t], nbs, rlist);
4389 print_nblist_statistics_supersub(debug, nbl_list->nbl[0], nbs, rlist);
4397 if (nbl_list->bSimple)
4399 for (int t = 0; t < nbl_list->nnbl; t++)
4401 print_nblist_ci_cj(debug, nbl_list->nbl[t]);
4406 print_nblist_sci_cj(debug, nbl_list->nbl[0]);
4410 if (nbat->bUseBufferFlags)
4412 print_reduction_cost(&nbat->buffer_flags, nbl_list->nnbl);
4417 void nbnxnPrepareListForDynamicPruning(nbnxn_pairlist_set_t *listSet)
4419 /* TODO: Restructure the lists so we have actual outer and inner
4420 * list objects so we can set a single pointer instead of
4421 * swapping several pointers.
4424 for (int i = 0; i < listSet->nnbl; i++)
4426 /* The search produced a list in ci/cj.
4427 * Swap the list pointers so we get the outer list is ciOuter,cjOuter
4428 * and we can prune that to get an inner list in ci/cj.
4430 nbnxn_pairlist_t *list = listSet->nbl[i];
4431 list->nciOuter = list->nci;
4433 nbnxn_ci_t *ciTmp = list->ciOuter;
4434 list->ciOuter = list->ci;
4437 nbnxn_cj_t *cjTmp = list->cjOuter;
4438 list->cjOuter = list->cj;
4441 /* Signal that this inner list is currently invalid */