2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "nbnxn_atomdata.h"
50 #include "thread_mpi/atomic.h"
52 #include "gromacs/math/functions.h"
53 #include "gromacs/math/utilities.h"
54 #include "gromacs/math/vec.h"
55 #include "gromacs/mdlib/gmx_omp_nthreads.h"
56 #include "gromacs/mdlib/nb_verlet.h"
57 #include "gromacs/mdlib/nbnxn_consts.h"
58 #include "gromacs/mdlib/nbnxn_internal.h"
59 #include "gromacs/mdlib/nbnxn_search.h"
60 #include "gromacs/mdlib/nbnxn_util.h"
61 #include "gromacs/pbcutil/ishift.h"
62 #include "gromacs/simd/simd.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxomp.h"
66 #include "gromacs/utility/smalloc.h"
68 using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
70 /* Default nbnxn allocation routine, allocates NBNXN_MEM_ALIGN byte aligned */
71 void nbnxn_alloc_aligned(void **ptr, size_t nbytes)
73 *ptr = save_malloc_aligned("ptr", __FILE__, __LINE__, nbytes, 1, NBNXN_MEM_ALIGN);
76 /* Free function for memory allocated with nbnxn_alloc_aligned */
77 void nbnxn_free_aligned(void *ptr)
82 /* Reallocation wrapper function for nbnxn data structures */
83 void nbnxn_realloc_void(void **ptr,
84 int nbytes_copy, int nbytes_new,
90 ma(&ptr_new, nbytes_new);
92 if (nbytes_new > 0 && ptr_new == nullptr)
94 gmx_fatal(FARGS, "Allocation of %d bytes failed", nbytes_new);
99 if (nbytes_new < nbytes_copy)
101 gmx_incons("In nbnxn_realloc_void: new size less than copy size");
103 memcpy(ptr_new, *ptr, nbytes_copy);
112 /* Reallocate the nbnxn_atomdata_t for a size of n atoms */
113 void nbnxn_atomdata_realloc(nbnxn_atomdata_t *nbat, int n)
117 nbnxn_realloc_void((void **)&nbat->type,
118 nbat->natoms*sizeof(*nbat->type),
119 n*sizeof(*nbat->type),
120 nbat->alloc, nbat->free);
121 nbnxn_realloc_void((void **)&nbat->lj_comb,
122 nbat->natoms*2*sizeof(*nbat->lj_comb),
123 n*2*sizeof(*nbat->lj_comb),
124 nbat->alloc, nbat->free);
125 if (nbat->XFormat != nbatXYZQ)
127 nbnxn_realloc_void((void **)&nbat->q,
128 nbat->natoms*sizeof(*nbat->q),
130 nbat->alloc, nbat->free);
132 if (nbat->nenergrp > 1)
134 nbnxn_realloc_void((void **)&nbat->energrp,
135 nbat->natoms/nbat->na_c*sizeof(*nbat->energrp),
136 n/nbat->na_c*sizeof(*nbat->energrp),
137 nbat->alloc, nbat->free);
139 nbnxn_realloc_void((void **)&nbat->x,
140 nbat->natoms*nbat->xstride*sizeof(*nbat->x),
141 n*nbat->xstride*sizeof(*nbat->x),
142 nbat->alloc, nbat->free);
143 for (t = 0; t < nbat->nout; t++)
145 /* Allocate one element extra for possible signaling with GPUs */
146 nbnxn_realloc_void((void **)&nbat->out[t].f,
147 nbat->natoms*nbat->fstride*sizeof(*nbat->out[t].f),
148 n*nbat->fstride*sizeof(*nbat->out[t].f),
149 nbat->alloc, nbat->free);
154 /* Initializes an nbnxn_atomdata_output_t data structure */
155 static void nbnxn_atomdata_output_init(nbnxn_atomdata_output_t *out,
157 int nenergrp, int stride,
161 ma((void **)&out->fshift, SHIFTS*DIM*sizeof(*out->fshift));
162 out->nV = nenergrp*nenergrp;
163 ma((void **)&out->Vvdw, out->nV*sizeof(*out->Vvdw));
164 ma((void **)&out->Vc, out->nV*sizeof(*out->Vc ));
166 if (nb_kernel_type == nbnxnk4xN_SIMD_4xN ||
167 nb_kernel_type == nbnxnk4xN_SIMD_2xNN)
169 int cj_size = nbnxn_kernel_to_cluster_j_size(nb_kernel_type);
170 out->nVS = nenergrp*nenergrp*stride*(cj_size>>1)*cj_size;
171 ma((void **)&out->VSvdw, out->nVS*sizeof(*out->VSvdw));
172 ma((void **)&out->VSc, out->nVS*sizeof(*out->VSc ));
180 static void copy_int_to_nbat_int(const int *a, int na, int na_round,
181 const int *in, int fill, int *innb)
186 for (i = 0; i < na; i++)
188 innb[j++] = in[a[i]];
190 /* Complete the partially filled last cell with fill */
191 for (; i < na_round; i++)
197 static void clear_nbat_real(int na, int nbatFormat, real *xnb, int a0)
204 for (int a = 0; a < na; a++)
206 for (int d = 0; d < DIM; d++)
208 xnb[(a0+a)*STRIDE_XYZ+d] = 0;
213 for (int a = 0; a < na; a++)
215 for (int d = 0; d < DIM; d++)
217 xnb[(a0+a)*STRIDE_XYZQ+d] = 0;
222 j = atom_to_x_index<c_packX4>(a0);
223 c = a0 & (c_packX4 - 1);
224 for (int a = 0; a < na; a++)
226 xnb[j+XX*c_packX4] = 0;
227 xnb[j+YY*c_packX4] = 0;
228 xnb[j+ZZ*c_packX4] = 0;
233 j += (DIM-1)*c_packX4;
239 j = atom_to_x_index<c_packX8>(a0);
240 c = a0 & (c_packX8-1);
241 for (int a = 0; a < na; a++)
243 xnb[j+XX*c_packX8] = 0;
244 xnb[j+YY*c_packX8] = 0;
245 xnb[j+ZZ*c_packX8] = 0;
250 j += (DIM-1)*c_packX8;
258 void copy_rvec_to_nbat_real(const int *a, int na, int na_round,
259 const rvec *x, int nbatFormat,
262 /* We complete partially filled cells, can only be the last one in each
263 * column, with coordinates farAway. The actual coordinate value does
264 * not influence the results, since these filler particles do not interact.
265 * Clusters with normal atoms + fillers have a bounding box based only
266 * on the coordinates of the atoms. Clusters with only fillers have as
267 * the bounding box the coordinates of the first filler. Such clusters
268 * are not considered as i-entries, but they are considered as j-entries.
269 * So for performance it is better to have their bounding boxes far away,
270 * such that filler only clusters don't end up in the pair list.
272 const real farAway = -1000000;
280 for (i = 0; i < na; i++)
282 xnb[j++] = x[a[i]][XX];
283 xnb[j++] = x[a[i]][YY];
284 xnb[j++] = x[a[i]][ZZ];
286 /* Complete the partially filled last cell with farAway elements */
287 for (; i < na_round; i++)
296 for (i = 0; i < na; i++)
298 xnb[j++] = x[a[i]][XX];
299 xnb[j++] = x[a[i]][YY];
300 xnb[j++] = x[a[i]][ZZ];
303 /* Complete the partially filled last cell with zeros */
304 for (; i < na_round; i++)
313 j = atom_to_x_index<c_packX4>(a0);
314 c = a0 & (c_packX4-1);
315 for (i = 0; i < na; i++)
317 xnb[j+XX*c_packX4] = x[a[i]][XX];
318 xnb[j+YY*c_packX4] = x[a[i]][YY];
319 xnb[j+ZZ*c_packX4] = x[a[i]][ZZ];
324 j += (DIM-1)*c_packX4;
328 /* Complete the partially filled last cell with zeros */
329 for (; i < na_round; i++)
331 xnb[j+XX*c_packX4] = farAway;
332 xnb[j+YY*c_packX4] = farAway;
333 xnb[j+ZZ*c_packX4] = farAway;
338 j += (DIM-1)*c_packX4;
344 j = atom_to_x_index<c_packX8>(a0);
345 c = a0 & (c_packX8 - 1);
346 for (i = 0; i < na; i++)
348 xnb[j+XX*c_packX8] = x[a[i]][XX];
349 xnb[j+YY*c_packX8] = x[a[i]][YY];
350 xnb[j+ZZ*c_packX8] = x[a[i]][ZZ];
355 j += (DIM-1)*c_packX8;
359 /* Complete the partially filled last cell with zeros */
360 for (; i < na_round; i++)
362 xnb[j+XX*c_packX8] = farAway;
363 xnb[j+YY*c_packX8] = farAway;
364 xnb[j+ZZ*c_packX8] = farAway;
369 j += (DIM-1)*c_packX8;
375 gmx_incons("Unsupported nbnxn_atomdata_t format");
379 /* Stores the LJ parameter data in a format convenient for different kernels */
380 static void set_lj_parameter_data(nbnxn_atomdata_t *nbat, gmx_bool bSIMD)
384 int nt = nbat->ntype;
389 /* nbfp_aligned stores two parameters using the stride most suitable
390 * for the present SIMD architecture, as specified by the constant
391 * c_simdBestPairAlignment from the SIMD header.
392 * There's a slight inefficiency in allocating and initializing nbfp_aligned
393 * when it might not be used, but introducing the conditional code is not
396 nbat->alloc((void **)&nbat->nbfp_aligned,
397 nt*nt*c_simdBestPairAlignment*sizeof(*nbat->nbfp_aligned));
398 for (int i = 0; i < nt; i++)
400 for (int j = 0; j < nt; j++)
402 nbat->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+0] = nbat->nbfp[(i*nt+j)*2+0];
403 nbat->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+1] = nbat->nbfp[(i*nt+j)*2+1];
404 nbat->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+2] = 0;
405 nbat->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+3] = 0;
411 /* We use combination rule data for SIMD combination rule kernels
412 * and with LJ-PME kernels. We then only need parameters per atom type,
413 * not per pair of atom types.
415 switch (nbat->comb_rule)
418 nbat->comb_rule = ljcrGEOM;
420 for (int i = 0; i < nt; i++)
422 /* Store the sqrt of the diagonal from the nbfp matrix */
423 nbat->nbfp_comb[i*2 ] = std::sqrt(nbat->nbfp[(i*nt+i)*2 ]);
424 nbat->nbfp_comb[i*2+1] = std::sqrt(nbat->nbfp[(i*nt+i)*2+1]);
428 for (int i = 0; i < nt; i++)
430 /* Get 6*C6 and 12*C12 from the diagonal of the nbfp matrix */
431 c6 = nbat->nbfp[(i*nt+i)*2 ];
432 c12 = nbat->nbfp[(i*nt+i)*2+1];
433 if (c6 > 0 && c12 > 0)
435 /* We store 0.5*2^1/6*sigma and sqrt(4*3*eps),
436 * so we get 6*C6 and 12*C12 after combining.
438 nbat->nbfp_comb[i*2 ] = 0.5*gmx::sixthroot(c12/c6);
439 nbat->nbfp_comb[i*2+1] = std::sqrt(c6*c6/c12);
443 nbat->nbfp_comb[i*2 ] = 0;
444 nbat->nbfp_comb[i*2+1] = 0;
449 /* We always store the full matrix (see code above) */
452 gmx_incons("Unknown combination rule");
459 nbnxn_atomdata_init_simple_exclusion_masks(nbnxn_atomdata_t *nbat)
461 const int simd_width = GMX_SIMD_REAL_WIDTH;
463 /* Set the diagonal cluster pair exclusion mask setup data.
464 * In the kernel we check 0 < j - i to generate the masks.
465 * Here we store j - i for generating the mask for the first i,
466 * we substract 0.5 to avoid rounding issues.
467 * In the kernel we can subtract 1 to generate the subsequent mask.
469 int simd_4xn_diag_size;
471 simd_4xn_diag_size = std::max(NBNXN_CPU_CLUSTER_I_SIZE, simd_width);
472 snew_aligned(nbat->simd_4xn_diagonal_j_minus_i, simd_4xn_diag_size, NBNXN_MEM_ALIGN);
473 for (int j = 0; j < simd_4xn_diag_size; j++)
475 nbat->simd_4xn_diagonal_j_minus_i[j] = j - 0.5;
478 snew_aligned(nbat->simd_2xnn_diagonal_j_minus_i, simd_width, NBNXN_MEM_ALIGN);
479 for (int j = 0; j < simd_width/2; j++)
481 /* The j-cluster size is half the SIMD width */
482 nbat->simd_2xnn_diagonal_j_minus_i[j] = j - 0.5;
483 /* The next half of the SIMD width is for i + 1 */
484 nbat->simd_2xnn_diagonal_j_minus_i[simd_width/2+j] = j - 1 - 0.5;
487 /* We use up to 32 bits for exclusion masking.
488 * The same masks are used for the 4xN and 2x(N+N) kernels.
489 * The masks are read either into integer SIMD registers or into
490 * real SIMD registers (together with a cast).
491 * In single precision this means the real and integer SIMD registers
494 simd_excl_size = NBNXN_CPU_CLUSTER_I_SIZE*simd_width;
495 #if GMX_DOUBLE && !GMX_SIMD_HAVE_INT32_LOGICAL
496 snew_aligned(nbat->simd_exclusion_filter64, simd_excl_size, NBNXN_MEM_ALIGN);
498 snew_aligned(nbat->simd_exclusion_filter, simd_excl_size, NBNXN_MEM_ALIGN);
501 for (int j = 0; j < simd_excl_size; j++)
503 /* Set the consecutive bits for masking pair exclusions */
504 #if GMX_DOUBLE && !GMX_SIMD_HAVE_INT32_LOGICAL
505 nbat->simd_exclusion_filter64[j] = (1U << j);
507 nbat->simd_exclusion_filter[j] = (1U << j);
511 #if !GMX_SIMD_HAVE_LOGICAL && !GMX_SIMD_HAVE_INT32_LOGICAL
512 // If the SIMD implementation has no bitwise logical operation support
513 // whatsoever we cannot use the normal masking. Instead,
514 // we generate a vector of all 2^4 possible ways an i atom
515 // interacts with its 4 j atoms. Each array entry contains
516 // GMX_SIMD_REAL_WIDTH values that are read with a single aligned SIMD load.
517 // Since there is no logical value representation in this case, we use
518 // any nonzero value to indicate 'true', while zero mean 'false'.
519 // This can then be converted to a SIMD boolean internally in the SIMD
520 // module by comparing to zero.
521 // Each array entry encodes how this i atom will interact with the 4 j atoms.
522 // Matching code exists in set_ci_top_excls() to generate indices into this array.
523 // Those indices are used in the kernels.
525 simd_excl_size = NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
526 const real simdFalse = 0.0;
527 const real simdTrue = 1.0;
528 real *simd_interaction_array;
530 snew_aligned(simd_interaction_array, simd_excl_size * GMX_SIMD_REAL_WIDTH, NBNXN_MEM_ALIGN);
531 for (int j = 0; j < simd_excl_size; j++)
533 int index = j * GMX_SIMD_REAL_WIDTH;
534 for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
536 simd_interaction_array[index + i] = (j & (1 << i)) ? simdTrue : simdFalse;
539 nbat->simd_interaction_array = simd_interaction_array;
544 /* Initializes an nbnxn_atomdata_t data structure */
545 void nbnxn_atomdata_init(FILE *fp,
546 nbnxn_atomdata_t *nbat,
548 int enbnxninitcombrule,
549 int ntype, const real *nbfp,
552 nbnxn_alloc_t *alloc,
558 gmx_bool simple, bCombGeom, bCombLB, bSIMD;
560 if (alloc == nullptr)
562 nbat->alloc = nbnxn_alloc_aligned;
570 nbat->free = nbnxn_free_aligned;
579 fprintf(debug, "There are %d atom types in the system, adding one for nbnxn_atomdata_t\n", ntype);
581 nbat->ntype = ntype + 1;
582 nbat->alloc((void **)&nbat->nbfp,
583 nbat->ntype*nbat->ntype*2*sizeof(*nbat->nbfp));
584 nbat->alloc((void **)&nbat->nbfp_comb, nbat->ntype*2*sizeof(*nbat->nbfp_comb));
586 /* A tolerance of 1e-5 seems reasonable for (possibly hand-typed)
587 * force-field floating point parameters.
590 ptr = getenv("GMX_LJCOMB_TOL");
595 sscanf(ptr, "%lf", &dbl);
601 /* Temporarily fill nbat->nbfp_comb with sigma and epsilon
602 * to check for the LB rule.
604 for (int i = 0; i < ntype; i++)
606 c6 = nbfp[(i*ntype+i)*2 ]/6.0;
607 c12 = nbfp[(i*ntype+i)*2+1]/12.0;
608 if (c6 > 0 && c12 > 0)
610 nbat->nbfp_comb[i*2 ] = gmx::sixthroot(c12/c6);
611 nbat->nbfp_comb[i*2+1] = 0.25*c6*c6/c12;
613 else if (c6 == 0 && c12 == 0)
615 nbat->nbfp_comb[i*2 ] = 0;
616 nbat->nbfp_comb[i*2+1] = 0;
620 /* Can not use LB rule with only dispersion or repulsion */
625 for (int i = 0; i < nbat->ntype; i++)
627 for (int j = 0; j < nbat->ntype; j++)
629 if (i < ntype && j < ntype)
631 /* fr->nbfp has been updated, so that array too now stores c6/c12 including
632 * the 6.0/12.0 prefactors to save 2 flops in the most common case (force-only).
634 c6 = nbfp[(i*ntype+j)*2 ];
635 c12 = nbfp[(i*ntype+j)*2+1];
636 nbat->nbfp[(i*nbat->ntype+j)*2 ] = c6;
637 nbat->nbfp[(i*nbat->ntype+j)*2+1] = c12;
639 /* Compare 6*C6 and 12*C12 for geometric cobination rule */
640 bCombGeom = bCombGeom &&
641 gmx_within_tol(c6*c6, nbfp[(i*ntype+i)*2 ]*nbfp[(j*ntype+j)*2 ], tol) &&
642 gmx_within_tol(c12*c12, nbfp[(i*ntype+i)*2+1]*nbfp[(j*ntype+j)*2+1], tol);
644 /* Compare C6 and C12 for Lorentz-Berthelot combination rule */
648 ((c6 == 0 && c12 == 0 &&
649 (nbat->nbfp_comb[i*2+1] == 0 || nbat->nbfp_comb[j*2+1] == 0)) ||
650 (c6 > 0 && c12 > 0 &&
651 gmx_within_tol(gmx::sixthroot(c12/c6),
652 0.5*(nbat->nbfp_comb[i*2]+nbat->nbfp_comb[j*2]), tol) &&
653 gmx_within_tol(0.25*c6*c6/c12, std::sqrt(nbat->nbfp_comb[i*2+1]*nbat->nbfp_comb[j*2+1]), tol)));
657 /* Add zero parameters for the additional dummy atom type */
658 nbat->nbfp[(i*nbat->ntype+j)*2 ] = 0;
659 nbat->nbfp[(i*nbat->ntype+j)*2+1] = 0;
665 fprintf(debug, "Combination rules: geometric %d Lorentz-Berthelot %d\n",
669 simple = nbnxn_kernel_pairlist_simple(nb_kernel_type);
671 switch (enbnxninitcombrule)
673 case enbnxninitcombruleDETECT:
674 /* We prefer the geometic combination rule,
675 * as that gives a slightly faster kernel than the LB rule.
679 nbat->comb_rule = ljcrGEOM;
683 nbat->comb_rule = ljcrLB;
687 nbat->comb_rule = ljcrNONE;
689 nbat->free(nbat->nbfp_comb);
694 if (nbat->comb_rule == ljcrNONE)
696 fprintf(fp, "Using full Lennard-Jones parameter combination matrix\n\n");
700 fprintf(fp, "Using %s Lennard-Jones combination rule\n\n",
701 nbat->comb_rule == ljcrGEOM ? "geometric" : "Lorentz-Berthelot");
705 case enbnxninitcombruleGEOM:
706 nbat->comb_rule = ljcrGEOM;
708 case enbnxninitcombruleLB:
709 nbat->comb_rule = ljcrLB;
711 case enbnxninitcombruleNONE:
712 nbat->comb_rule = ljcrNONE;
714 nbat->free(nbat->nbfp_comb);
717 gmx_incons("Unknown enbnxninitcombrule");
720 bSIMD = (nb_kernel_type == nbnxnk4xN_SIMD_4xN ||
721 nb_kernel_type == nbnxnk4xN_SIMD_2xNN);
723 set_lj_parameter_data(nbat, bSIMD);
726 nbat->type = nullptr;
727 nbat->lj_comb = nullptr;
734 pack_x = std::max(NBNXN_CPU_CLUSTER_I_SIZE,
735 nbnxn_kernel_to_cluster_j_size(nb_kernel_type));
739 nbat->XFormat = nbatX4;
742 nbat->XFormat = nbatX8;
745 gmx_incons("Unsupported packing width");
750 nbat->XFormat = nbatXYZ;
753 nbat->FFormat = nbat->XFormat;
757 nbat->XFormat = nbatXYZQ;
758 nbat->FFormat = nbatXYZ;
761 nbat->nenergrp = n_energygroups;
764 /* Energy groups not supported yet for super-sub lists */
765 if (n_energygroups > 1 && fp != nullptr)
767 fprintf(fp, "\nNOTE: With GPUs, reporting energy group contributions is not supported\n\n");
771 /* Temporary storage goes as #grp^3*simd_width^2/2, so limit to 64 */
772 if (nbat->nenergrp > 64)
774 gmx_fatal(FARGS, "With NxN kernels not more than 64 energy groups are supported\n");
777 while (nbat->nenergrp > (1<<nbat->neg_2log))
781 nbat->energrp = nullptr;
782 nbat->alloc((void **)&nbat->shift_vec, SHIFTS*sizeof(*nbat->shift_vec));
783 nbat->xstride = (nbat->XFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
784 nbat->fstride = (nbat->FFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
790 nbnxn_atomdata_init_simple_exclusion_masks(nbat);
794 /* Initialize the output data structures */
796 snew(nbat->out, nbat->nout);
798 for (int i = 0; i < nbat->nout; i++)
800 nbnxn_atomdata_output_init(&nbat->out[i],
802 nbat->nenergrp, 1<<nbat->neg_2log,
805 nbat->buffer_flags.flag = nullptr;
806 nbat->buffer_flags.flag_nalloc = 0;
808 nth = gmx_omp_nthreads_get(emntNonbonded);
810 ptr = getenv("GMX_USE_TREEREDUCE");
813 nbat->bUseTreeReduce = strtol(ptr, nullptr, 10);
816 else if (nth > 8) /*on the CPU we currently don't benefit even at 32*/
818 nbat->bUseTreeReduce = 1;
823 nbat->bUseTreeReduce = 0;
825 if (nbat->bUseTreeReduce)
829 fprintf(fp, "Using tree force reduction\n\n");
831 snew(nbat->syncStep, nth);
835 template<int packSize>
836 static void copy_lj_to_nbat_lj_comb(const real *ljparam_type,
837 const int *type, int na,
840 /* The LJ params follow the combination rule:
841 * copy the params for the type array to the atom array.
843 for (int is = 0; is < na; is += packSize)
845 for (int k = 0; k < packSize; k++)
848 ljparam_at[is*2 + k] = ljparam_type[type[i]*2 ];
849 ljparam_at[is*2 + packSize + k] = ljparam_type[type[i]*2 + 1];
854 /* Sets the atom type in nbnxn_atomdata_t */
855 static void nbnxn_atomdata_set_atomtypes(nbnxn_atomdata_t *nbat,
857 const nbnxn_search_t nbs,
860 for (int g = 0; g < ngrid; g++)
862 const nbnxn_grid_t * grid = &nbs->grid[g];
864 /* Loop over all columns and copy and fill */
865 for (int i = 0; i < grid->ncx*grid->ncy; i++)
867 int ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
868 int ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
870 copy_int_to_nbat_int(nbs->a+ash, grid->cxy_na[i], ncz*grid->na_sc,
871 type, nbat->ntype-1, nbat->type+ash);
876 /* Sets the LJ combination rule parameters in nbnxn_atomdata_t */
877 static void nbnxn_atomdata_set_ljcombparams(nbnxn_atomdata_t *nbat,
879 const nbnxn_search_t nbs)
881 if (nbat->comb_rule != ljcrNONE)
883 for (int g = 0; g < ngrid; g++)
885 const nbnxn_grid_t * grid = &nbs->grid[g];
887 /* Loop over all columns and copy and fill */
888 for (int i = 0; i < grid->ncx*grid->ncy; i++)
890 int ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
891 int ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
893 if (nbat->XFormat == nbatX4)
895 copy_lj_to_nbat_lj_comb<c_packX4>(nbat->nbfp_comb,
898 nbat->lj_comb + ash*2);
900 else if (nbat->XFormat == nbatX8)
902 copy_lj_to_nbat_lj_comb<c_packX8>(nbat->nbfp_comb,
905 nbat->lj_comb + ash*2);
907 else if (nbat->XFormat == nbatXYZQ)
909 copy_lj_to_nbat_lj_comb<1>(nbat->nbfp_comb,
912 nbat->lj_comb + ash*2);
919 /* Sets the charges in nbnxn_atomdata_t *nbat */
920 static void nbnxn_atomdata_set_charges(nbnxn_atomdata_t *nbat,
922 const nbnxn_search_t nbs,
928 for (int g = 0; g < ngrid; g++)
930 const nbnxn_grid_t * grid = &nbs->grid[g];
932 /* Loop over all columns and copy and fill */
933 for (int cxy = 0; cxy < grid->ncx*grid->ncy; cxy++)
935 int ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
936 int na = grid->cxy_na[cxy];
937 int na_round = (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
939 if (nbat->XFormat == nbatXYZQ)
941 q = nbat->x + ash*STRIDE_XYZQ + ZZ + 1;
942 for (i = 0; i < na; i++)
944 *q = charge[nbs->a[ash+i]];
947 /* Complete the partially filled last cell with zeros */
948 for (; i < na_round; i++)
957 for (i = 0; i < na; i++)
959 *q = charge[nbs->a[ash+i]];
962 /* Complete the partially filled last cell with zeros */
963 for (; i < na_round; i++)
973 /* Set the charges of perturbed atoms in nbnxn_atomdata_t to 0.
974 * This is to automatically remove the RF/PME self term in the nbnxn kernels.
975 * Part of the zero interactions are still calculated in the normal kernels.
976 * All perturbed interactions are calculated in the free energy kernel,
977 * using the original charge and LJ data, not nbnxn_atomdata_t.
979 static void nbnxn_atomdata_mask_fep(nbnxn_atomdata_t *nbat,
981 const nbnxn_search_t nbs)
986 if (nbat->XFormat == nbatXYZQ)
988 q = nbat->x + ZZ + 1;
989 stride_q = STRIDE_XYZQ;
997 for (int g = 0; g < ngrid; g++)
999 const nbnxn_grid_t * grid = &nbs->grid[g];
1006 nsubc = c_gpuNumClusterPerCell;
1009 int c_offset = grid->cell0*grid->na_sc;
1011 /* Loop over all columns and copy and fill */
1012 for (int c = 0; c < grid->nc*nsubc; c++)
1014 /* Does this cluster contain perturbed particles? */
1015 if (grid->fep[c] != 0)
1017 for (int i = 0; i < grid->na_c; i++)
1019 /* Is this a perturbed particle? */
1020 if (grid->fep[c] & (1 << i))
1022 int ind = c_offset + c*grid->na_c + i;
1023 /* Set atom type and charge to non-interacting */
1024 nbat->type[ind] = nbat->ntype - 1;
1025 q[ind*stride_q] = 0;
1033 /* Copies the energy group indices to a reordered and packed array */
1034 static void copy_egp_to_nbat_egps(const int *a, int na, int na_round,
1035 int na_c, int bit_shift,
1036 const int *in, int *innb)
1042 for (i = 0; i < na; i += na_c)
1044 /* Store na_c energy group numbers into one int */
1046 for (int sa = 0; sa < na_c; sa++)
1051 comb |= (GET_CGINFO_GID(in[at]) << (sa*bit_shift));
1056 /* Complete the partially filled last cell with fill */
1057 for (; i < na_round; i += na_c)
1063 /* Set the energy group indices for atoms in nbnxn_atomdata_t */
1064 static void nbnxn_atomdata_set_energygroups(nbnxn_atomdata_t *nbat,
1066 const nbnxn_search_t nbs,
1069 if (nbat->nenergrp == 1)
1074 for (int g = 0; g < ngrid; g++)
1076 const nbnxn_grid_t * grid = &nbs->grid[g];
1078 /* Loop over all columns and copy and fill */
1079 for (int i = 0; i < grid->ncx*grid->ncy; i++)
1081 int ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
1082 int ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
1084 copy_egp_to_nbat_egps(nbs->a+ash, grid->cxy_na[i], ncz*grid->na_sc,
1085 nbat->na_c, nbat->neg_2log,
1086 atinfo, nbat->energrp+(ash>>grid->na_c_2log));
1091 /* Sets all required atom parameter data in nbnxn_atomdata_t */
1092 void nbnxn_atomdata_set(nbnxn_atomdata_t *nbat,
1094 const nbnxn_search_t nbs,
1095 const t_mdatoms *mdatoms,
1100 if (locality == eatLocal)
1109 nbnxn_atomdata_set_atomtypes(nbat, ngrid, nbs, mdatoms->typeA);
1111 nbnxn_atomdata_set_charges(nbat, ngrid, nbs, mdatoms->chargeA);
1115 nbnxn_atomdata_mask_fep(nbat, ngrid, nbs);
1118 /* This must be done after masking types for FEP */
1119 nbnxn_atomdata_set_ljcombparams(nbat, ngrid, nbs);
1121 nbnxn_atomdata_set_energygroups(nbat, ngrid, nbs, atinfo);
1124 /* Copies the shift vector array to nbnxn_atomdata_t */
1125 void nbnxn_atomdata_copy_shiftvec(gmx_bool bDynamicBox,
1127 nbnxn_atomdata_t *nbat)
1131 nbat->bDynamicBox = bDynamicBox;
1132 for (i = 0; i < SHIFTS; i++)
1134 copy_rvec(shift_vec[i], nbat->shift_vec[i]);
1138 /* Copies (and reorders) the coordinates to nbnxn_atomdata_t */
1139 void nbnxn_atomdata_copy_x_to_nbat_x(const nbnxn_search_t nbs,
1143 nbnxn_atomdata_t *nbat)
1166 nbat->natoms_local = nbs->grid[0].nc*nbs->grid[0].na_sc;
1169 nth = gmx_omp_nthreads_get(emntPairsearch);
1171 #pragma omp parallel for num_threads(nth) schedule(static)
1172 for (th = 0; th < nth; th++)
1176 for (int g = g0; g < g1; g++)
1178 const nbnxn_grid_t *grid;
1181 grid = &nbs->grid[g];
1183 cxy0 = (grid->ncx*grid->ncy* th +nth-1)/nth;
1184 cxy1 = (grid->ncx*grid->ncy*(th+1)+nth-1)/nth;
1186 for (int cxy = cxy0; cxy < cxy1; cxy++)
1188 int na, ash, na_fill;
1190 na = grid->cxy_na[cxy];
1191 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
1193 if (g == 0 && FillLocal)
1196 (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
1200 /* We fill only the real particle locations.
1201 * We assume the filling entries at the end have been
1202 * properly set before during pair-list generation.
1206 copy_rvec_to_nbat_real(nbs->a+ash, na, na_fill, x,
1207 nbat->XFormat, nbat->x, ash);
1211 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1216 nbnxn_atomdata_clear_reals(real * gmx_restrict dest,
1219 for (int i = i0; i < i1; i++)
1226 nbnxn_atomdata_reduce_reals(real * gmx_restrict dest,
1228 real ** gmx_restrict src,
1234 /* The destination buffer contains data, add to it */
1235 for (int i = i0; i < i1; i++)
1237 for (int s = 0; s < nsrc; s++)
1239 dest[i] += src[s][i];
1245 /* The destination buffer is unitialized, set it first */
1246 for (int i = i0; i < i1; i++)
1248 dest[i] = src[0][i];
1249 for (int s = 1; s < nsrc; s++)
1251 dest[i] += src[s][i];
1258 nbnxn_atomdata_reduce_reals_simd(real gmx_unused * gmx_restrict dest,
1259 gmx_bool gmx_unused bDestSet,
1260 real gmx_unused ** gmx_restrict src,
1261 int gmx_unused nsrc,
1262 int gmx_unused i0, int gmx_unused i1)
1265 /* The SIMD width here is actually independent of that in the kernels,
1266 * but we use the same width for simplicity (usually optimal anyhow).
1268 SimdReal dest_SSE, src_SSE;
1272 for (int i = i0; i < i1; i += GMX_SIMD_REAL_WIDTH)
1274 dest_SSE = load(dest+i);
1275 for (int s = 0; s < nsrc; s++)
1277 src_SSE = load(src[s]+i);
1278 dest_SSE = dest_SSE + src_SSE;
1280 store(dest+i, dest_SSE);
1285 for (int i = i0; i < i1; i += GMX_SIMD_REAL_WIDTH)
1287 dest_SSE = load(src[0]+i);
1288 for (int s = 1; s < nsrc; s++)
1290 src_SSE = load(src[s]+i);
1291 dest_SSE = dest_SSE + src_SSE;
1293 store(dest+i, dest_SSE);
1299 /* Add part of the force array(s) from nbnxn_atomdata_t to f */
1301 nbnxn_atomdata_add_nbat_f_to_f_part(const nbnxn_search_t nbs,
1302 const nbnxn_atomdata_t *nbat,
1303 nbnxn_atomdata_output_t *out,
1313 /* Loop over all columns and copy and fill */
1314 switch (nbat->FFormat)
1322 for (int a = a0; a < a1; a++)
1324 int i = cell[a]*nbat->fstride;
1327 f[a][YY] += fnb[i+1];
1328 f[a][ZZ] += fnb[i+2];
1333 for (int a = a0; a < a1; a++)
1335 int i = cell[a]*nbat->fstride;
1337 for (int fa = 0; fa < nfa; fa++)
1339 f[a][XX] += out[fa].f[i];
1340 f[a][YY] += out[fa].f[i+1];
1341 f[a][ZZ] += out[fa].f[i+2];
1351 for (int a = a0; a < a1; a++)
1353 int i = atom_to_x_index<c_packX4>(cell[a]);
1355 f[a][XX] += fnb[i+XX*c_packX4];
1356 f[a][YY] += fnb[i+YY*c_packX4];
1357 f[a][ZZ] += fnb[i+ZZ*c_packX4];
1362 for (int a = a0; a < a1; a++)
1364 int i = atom_to_x_index<c_packX4>(cell[a]);
1366 for (int fa = 0; fa < nfa; fa++)
1368 f[a][XX] += out[fa].f[i+XX*c_packX4];
1369 f[a][YY] += out[fa].f[i+YY*c_packX4];
1370 f[a][ZZ] += out[fa].f[i+ZZ*c_packX4];
1380 for (int a = a0; a < a1; a++)
1382 int i = atom_to_x_index<c_packX8>(cell[a]);
1384 f[a][XX] += fnb[i+XX*c_packX8];
1385 f[a][YY] += fnb[i+YY*c_packX8];
1386 f[a][ZZ] += fnb[i+ZZ*c_packX8];
1391 for (int a = a0; a < a1; a++)
1393 int i = atom_to_x_index<c_packX8>(cell[a]);
1395 for (int fa = 0; fa < nfa; fa++)
1397 f[a][XX] += out[fa].f[i+XX*c_packX8];
1398 f[a][YY] += out[fa].f[i+YY*c_packX8];
1399 f[a][ZZ] += out[fa].f[i+ZZ*c_packX8];
1405 gmx_incons("Unsupported nbnxn_atomdata_t format");
1409 static gmx_inline unsigned char reverse_bits(unsigned char b)
1411 /* http://graphics.stanford.edu/~seander/bithacks.html#ReverseByteWith64BitsDiv */
1412 return (b * 0x0202020202ULL & 0x010884422010ULL) % 1023;
1415 static void nbnxn_atomdata_add_nbat_f_to_f_treereduce(const nbnxn_atomdata_t *nbat,
1418 const nbnxn_buffer_flags_t *flags = &nbat->buffer_flags;
1420 int next_pow2 = 1<<(gmx::log2I(nth-1)+1);
1422 assert(nbat->nout == nth); /* tree-reduce currently only works for nout==nth */
1424 memset(nbat->syncStep, 0, sizeof(*(nbat->syncStep))*nth);
1426 #pragma omp parallel num_threads(nth)
1434 th = gmx_omp_get_thread_num();
1436 for (group_size = 2; group_size < 2*next_pow2; group_size *= 2)
1438 int index[2], group_pos, partner_pos, wu;
1439 int partner_th = th ^ (group_size/2);
1444 /* wait on partner thread - replaces full barrier */
1445 int sync_th, sync_group_size;
1447 tMPI_Atomic_memory_barrier(); /* gurantee data is saved before marking work as done */
1448 tMPI_Atomic_set(&(nbat->syncStep[th]), group_size/2); /* mark previous step as completed */
1450 /* find thread to sync with. Equal to partner_th unless nth is not a power of two. */
1451 for (sync_th = partner_th, sync_group_size = group_size; sync_th >= nth && sync_group_size > 2; sync_group_size /= 2)
1453 sync_th &= ~(sync_group_size/4);
1455 if (sync_th < nth) /* otherwise nothing to sync index[1] will be >=nout */
1457 /* wait on the thread which computed input data in previous step */
1458 while (tMPI_Atomic_get((volatile tMPI_Atomic_t*)&(nbat->syncStep[sync_th])) < group_size/2)
1462 /* guarantee that no later load happens before wait loop is finisehd */
1463 tMPI_Atomic_memory_barrier();
1465 #else /* TMPI_ATOMICS */
1470 /* Calculate buffers to sum (result goes into first buffer) */
1471 group_pos = th % group_size;
1472 index[0] = th - group_pos;
1473 index[1] = index[0] + group_size/2;
1475 /* If no second buffer, nothing to do */
1476 if (index[1] >= nbat->nout && group_size > 2)
1481 #if NBNXN_BUFFERFLAG_MAX_THREADS > 256
1482 #error reverse_bits assumes max 256 threads
1484 /* Position is permuted so that one of the 2 vectors being added was computed on the same thread in the previous step.
1485 This improves locality and enables to sync with just a single thread between steps (=the levels in the btree).
1486 The permutation which allows this corresponds to reversing the bits of the group position.
1488 group_pos = reverse_bits(group_pos)/(256/group_size);
1490 partner_pos = group_pos ^ 1;
1492 /* loop over two work-units (own and partner) */
1493 for (wu = 0; wu < 2; wu++)
1497 if (partner_th < nth)
1499 break; /* partner exists we don't have to do his work */
1503 group_pos = partner_pos;
1507 /* Calculate the cell-block range for our thread */
1508 b0 = (flags->nflag* group_pos )/group_size;
1509 b1 = (flags->nflag*(group_pos+1))/group_size;
1511 for (b = b0; b < b1; b++)
1513 i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1514 i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1516 if (bitmask_is_set(flags->flag[b], index[1]) || group_size > 2)
1519 nbnxn_atomdata_reduce_reals_simd
1521 nbnxn_atomdata_reduce_reals
1523 (nbat->out[index[0]].f,
1524 bitmask_is_set(flags->flag[b], index[0]) || group_size > 2,
1525 &(nbat->out[index[1]].f), 1, i0, i1);
1528 else if (!bitmask_is_set(flags->flag[b], index[0]))
1530 nbnxn_atomdata_clear_reals(nbat->out[index[0]].f,
1537 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1542 static void nbnxn_atomdata_add_nbat_f_to_f_stdreduce(const nbnxn_atomdata_t *nbat,
1545 #pragma omp parallel for num_threads(nth) schedule(static)
1546 for (int th = 0; th < nth; th++)
1550 const nbnxn_buffer_flags_t *flags;
1552 real *fptr[NBNXN_BUFFERFLAG_MAX_THREADS];
1554 flags = &nbat->buffer_flags;
1556 /* Calculate the cell-block range for our thread */
1557 int b0 = (flags->nflag* th )/nth;
1558 int b1 = (flags->nflag*(th+1))/nth;
1560 for (int b = b0; b < b1; b++)
1562 int i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1563 int i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1566 for (int out = 1; out < nbat->nout; out++)
1568 if (bitmask_is_set(flags->flag[b], out))
1570 fptr[nfptr++] = nbat->out[out].f;
1576 nbnxn_atomdata_reduce_reals_simd
1578 nbnxn_atomdata_reduce_reals
1581 bitmask_is_set(flags->flag[b], 0),
1585 else if (!bitmask_is_set(flags->flag[b], 0))
1587 nbnxn_atomdata_clear_reals(nbat->out[0].f,
1592 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1596 /* Add the force array(s) from nbnxn_atomdata_t to f */
1597 void nbnxn_atomdata_add_nbat_f_to_f(const nbnxn_search_t nbs,
1599 const nbnxn_atomdata_t *nbat,
1604 nbs_cycle_start(&nbs->cc[enbsCCreducef]);
1610 na = nbs->natoms_nonlocal;
1614 na = nbs->natoms_local;
1617 a0 = nbs->natoms_local;
1618 na = nbs->natoms_nonlocal - nbs->natoms_local;
1622 int nth = gmx_omp_nthreads_get(emntNonbonded);
1626 if (locality != eatAll)
1628 gmx_incons("add_f_to_f called with nout>1 and locality!=eatAll");
1631 /* Reduce the force thread output buffers into buffer 0, before adding
1632 * them to the, differently ordered, "real" force buffer.
1634 if (nbat->bUseTreeReduce)
1636 nbnxn_atomdata_add_nbat_f_to_f_treereduce(nbat, nth);
1640 nbnxn_atomdata_add_nbat_f_to_f_stdreduce(nbat, nth);
1643 #pragma omp parallel for num_threads(nth) schedule(static)
1644 for (int th = 0; th < nth; th++)
1648 nbnxn_atomdata_add_nbat_f_to_f_part(nbs, nbat,
1655 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1658 nbs_cycle_stop(&nbs->cc[enbsCCreducef]);
1661 /* Adds the shift forces from nbnxn_atomdata_t to fshift */
1662 void nbnxn_atomdata_add_nbat_fshift_to_fshift(const nbnxn_atomdata_t *nbat,
1665 const nbnxn_atomdata_output_t * out = nbat->out;
1667 for (int s = 0; s < SHIFTS; s++)
1671 for (int th = 0; th < nbat->nout; th++)
1673 sum[XX] += out[th].fshift[s*DIM+XX];
1674 sum[YY] += out[th].fshift[s*DIM+YY];
1675 sum[ZZ] += out[th].fshift[s*DIM+ZZ];
1677 rvec_inc(fshift[s], sum);