2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
46 #include "gromacs/math/vec.h"
47 #include "nbnxn_consts.h"
48 #include "nbnxn_internal.h"
49 #include "nbnxn_atomdata.h"
50 #include "nbnxn_search.h"
51 #include "gmx_omp_nthreads.h"
52 #include "thread_mpi/atomic.h"
54 #include "gromacs/pbcutil/ishift.h"
55 #include "gromacs/utility/gmxomp.h"
56 #include "gromacs/utility/smalloc.h"
58 /* Default nbnxn allocation routine, allocates NBNXN_MEM_ALIGN byte aligned */
59 void nbnxn_alloc_aligned(void **ptr, size_t nbytes)
61 *ptr = save_malloc_aligned("ptr", __FILE__, __LINE__, nbytes, 1, NBNXN_MEM_ALIGN);
64 /* Free function for memory allocated with nbnxn_alloc_aligned */
65 void nbnxn_free_aligned(void *ptr)
70 /* Reallocation wrapper function for nbnxn data structures */
71 void nbnxn_realloc_void(void **ptr,
72 int nbytes_copy, int nbytes_new,
78 ma(&ptr_new, nbytes_new);
80 if (nbytes_new > 0 && ptr_new == NULL)
82 gmx_fatal(FARGS, "Allocation of %d bytes failed", nbytes_new);
87 if (nbytes_new < nbytes_copy)
89 gmx_incons("In nbnxn_realloc_void: new size less than copy size");
91 memcpy(ptr_new, *ptr, nbytes_copy);
100 /* Reallocate the nbnxn_atomdata_t for a size of n atoms */
101 void nbnxn_atomdata_realloc(nbnxn_atomdata_t *nbat, int n)
105 nbnxn_realloc_void((void **)&nbat->type,
106 nbat->natoms*sizeof(*nbat->type),
107 n*sizeof(*nbat->type),
108 nbat->alloc, nbat->free);
109 nbnxn_realloc_void((void **)&nbat->lj_comb,
110 nbat->natoms*2*sizeof(*nbat->lj_comb),
111 n*2*sizeof(*nbat->lj_comb),
112 nbat->alloc, nbat->free);
113 if (nbat->XFormat != nbatXYZQ)
115 nbnxn_realloc_void((void **)&nbat->q,
116 nbat->natoms*sizeof(*nbat->q),
118 nbat->alloc, nbat->free);
120 if (nbat->nenergrp > 1)
122 nbnxn_realloc_void((void **)&nbat->energrp,
123 nbat->natoms/nbat->na_c*sizeof(*nbat->energrp),
124 n/nbat->na_c*sizeof(*nbat->energrp),
125 nbat->alloc, nbat->free);
127 nbnxn_realloc_void((void **)&nbat->x,
128 nbat->natoms*nbat->xstride*sizeof(*nbat->x),
129 n*nbat->xstride*sizeof(*nbat->x),
130 nbat->alloc, nbat->free);
131 for (t = 0; t < nbat->nout; t++)
133 /* Allocate one element extra for possible signaling with CUDA */
134 nbnxn_realloc_void((void **)&nbat->out[t].f,
135 nbat->natoms*nbat->fstride*sizeof(*nbat->out[t].f),
136 n*nbat->fstride*sizeof(*nbat->out[t].f),
137 nbat->alloc, nbat->free);
142 /* Initializes an nbnxn_atomdata_output_t data structure */
143 static void nbnxn_atomdata_output_init(nbnxn_atomdata_output_t *out,
145 int nenergrp, int stride,
151 ma((void **)&out->fshift, SHIFTS*DIM*sizeof(*out->fshift));
152 out->nV = nenergrp*nenergrp;
153 ma((void **)&out->Vvdw, out->nV*sizeof(*out->Vvdw));
154 ma((void **)&out->Vc, out->nV*sizeof(*out->Vc ));
156 if (nb_kernel_type == nbnxnk4xN_SIMD_4xN ||
157 nb_kernel_type == nbnxnk4xN_SIMD_2xNN)
159 cj_size = nbnxn_kernel_to_cj_size(nb_kernel_type);
160 out->nVS = nenergrp*nenergrp*stride*(cj_size>>1)*cj_size;
161 ma((void **)&out->VSvdw, out->nVS*sizeof(*out->VSvdw));
162 ma((void **)&out->VSc, out->nVS*sizeof(*out->VSc ));
170 static void copy_int_to_nbat_int(const int *a, int na, int na_round,
171 const int *in, int fill, int *innb)
176 for (i = 0; i < na; i++)
178 innb[j++] = in[a[i]];
180 /* Complete the partially filled last cell with fill */
181 for (; i < na_round; i++)
187 static void clear_nbat_real(int na, int nbatFormat, real *xnb, int a0)
194 for (a = 0; a < na; a++)
196 for (d = 0; d < DIM; d++)
198 xnb[(a0+a)*STRIDE_XYZ+d] = 0;
203 for (a = 0; a < na; a++)
205 for (d = 0; d < DIM; d++)
207 xnb[(a0+a)*STRIDE_XYZQ+d] = 0;
213 c = a0 & (PACK_X4-1);
214 for (a = 0; a < na; a++)
216 xnb[j+XX*PACK_X4] = 0;
217 xnb[j+YY*PACK_X4] = 0;
218 xnb[j+ZZ*PACK_X4] = 0;
223 j += (DIM-1)*PACK_X4;
230 c = a0 & (PACK_X8-1);
231 for (a = 0; a < na; a++)
233 xnb[j+XX*PACK_X8] = 0;
234 xnb[j+YY*PACK_X8] = 0;
235 xnb[j+ZZ*PACK_X8] = 0;
240 j += (DIM-1)*PACK_X8;
248 void copy_rvec_to_nbat_real(const int *a, int na, int na_round,
249 rvec *x, int nbatFormat, real *xnb, int a0,
250 int cx, int cy, int cz)
254 /* We might need to place filler particles to fill up the cell to na_round.
255 * The coefficients (LJ and q) for such particles are zero.
256 * But we might still get NaN as 0*NaN when distances are too small.
257 * We hope that -107 nm is far away enough from to zero
258 * to avoid accidental short distances to particles shifted down for pbc.
260 #define NBAT_FAR_AWAY 107
266 for (i = 0; i < na; i++)
268 xnb[j++] = x[a[i]][XX];
269 xnb[j++] = x[a[i]][YY];
270 xnb[j++] = x[a[i]][ZZ];
272 /* Complete the partially filled last cell with copies of the last element.
273 * This simplifies the bounding box calculation and avoid
274 * numerical issues with atoms that are coincidentally close.
276 for (; i < na_round; i++)
278 xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
279 xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
280 xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
285 for (i = 0; i < na; i++)
287 xnb[j++] = x[a[i]][XX];
288 xnb[j++] = x[a[i]][YY];
289 xnb[j++] = x[a[i]][ZZ];
292 /* Complete the partially filled last cell with particles far apart */
293 for (; i < na_round; i++)
295 xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
296 xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
297 xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
303 c = a0 & (PACK_X4-1);
304 for (i = 0; i < na; i++)
306 xnb[j+XX*PACK_X4] = x[a[i]][XX];
307 xnb[j+YY*PACK_X4] = x[a[i]][YY];
308 xnb[j+ZZ*PACK_X4] = x[a[i]][ZZ];
313 j += (DIM-1)*PACK_X4;
317 /* Complete the partially filled last cell with particles far apart */
318 for (; i < na_round; i++)
320 xnb[j+XX*PACK_X4] = -NBAT_FAR_AWAY*(1 + cx);
321 xnb[j+YY*PACK_X4] = -NBAT_FAR_AWAY*(1 + cy);
322 xnb[j+ZZ*PACK_X4] = -NBAT_FAR_AWAY*(1 + cz + i);
327 j += (DIM-1)*PACK_X4;
334 c = a0 & (PACK_X8 - 1);
335 for (i = 0; i < na; i++)
337 xnb[j+XX*PACK_X8] = x[a[i]][XX];
338 xnb[j+YY*PACK_X8] = x[a[i]][YY];
339 xnb[j+ZZ*PACK_X8] = x[a[i]][ZZ];
344 j += (DIM-1)*PACK_X8;
348 /* Complete the partially filled last cell with particles far apart */
349 for (; i < na_round; i++)
351 xnb[j+XX*PACK_X8] = -NBAT_FAR_AWAY*(1 + cx);
352 xnb[j+YY*PACK_X8] = -NBAT_FAR_AWAY*(1 + cy);
353 xnb[j+ZZ*PACK_X8] = -NBAT_FAR_AWAY*(1 + cz + i);
358 j += (DIM-1)*PACK_X8;
364 gmx_incons("Unsupported nbnxn_atomdata_t format");
368 /* Stores the LJ parameter data in a format convenient for different kernels */
369 static void set_lj_parameter_data(nbnxn_atomdata_t *nbat, gmx_bool bSIMD)
378 /* nbfp_s4 stores two parameters using a stride of 4,
379 * because this would suit x86 SIMD single-precision
380 * quad-load intrinsics. There's a slight inefficiency in
381 * allocating and initializing nbfp_s4 when it might not
382 * be used, but introducing the conditional code is not
383 * really worth it. */
384 nbat->alloc((void **)&nbat->nbfp_s4, nt*nt*4*sizeof(*nbat->nbfp_s4));
385 for (i = 0; i < nt; i++)
387 for (j = 0; j < nt; j++)
389 nbat->nbfp_s4[(i*nt+j)*4+0] = nbat->nbfp[(i*nt+j)*2+0];
390 nbat->nbfp_s4[(i*nt+j)*4+1] = nbat->nbfp[(i*nt+j)*2+1];
391 nbat->nbfp_s4[(i*nt+j)*4+2] = 0;
392 nbat->nbfp_s4[(i*nt+j)*4+3] = 0;
397 /* We use combination rule data for SIMD combination rule kernels
398 * and with LJ-PME kernels. We then only need parameters per atom type,
399 * not per pair of atom types.
401 switch (nbat->comb_rule)
404 nbat->comb_rule = ljcrGEOM;
406 for (i = 0; i < nt; i++)
408 /* Store the sqrt of the diagonal from the nbfp matrix */
409 nbat->nbfp_comb[i*2 ] = sqrt(nbat->nbfp[(i*nt+i)*2 ]);
410 nbat->nbfp_comb[i*2+1] = sqrt(nbat->nbfp[(i*nt+i)*2+1]);
414 for (i = 0; i < nt; i++)
416 /* Get 6*C6 and 12*C12 from the diagonal of the nbfp matrix */
417 c6 = nbat->nbfp[(i*nt+i)*2 ];
418 c12 = nbat->nbfp[(i*nt+i)*2+1];
419 if (c6 > 0 && c12 > 0)
421 /* We store 0.5*2^1/6*sigma and sqrt(4*3*eps),
422 * so we get 6*C6 and 12*C12 after combining.
424 nbat->nbfp_comb[i*2 ] = 0.5*pow(c12/c6, 1.0/6.0);
425 nbat->nbfp_comb[i*2+1] = sqrt(c6*c6/c12);
429 nbat->nbfp_comb[i*2 ] = 0;
430 nbat->nbfp_comb[i*2+1] = 0;
435 /* We always store the full matrix (see code above) */
438 gmx_incons("Unknown combination rule");
443 #ifdef GMX_NBNXN_SIMD
445 nbnxn_atomdata_init_simple_exclusion_masks(nbnxn_atomdata_t *nbat)
448 const int simd_width = GMX_SIMD_REAL_WIDTH;
450 /* Set the diagonal cluster pair exclusion mask setup data.
451 * In the kernel we check 0 < j - i to generate the masks.
452 * Here we store j - i for generating the mask for the first i,
453 * we substract 0.5 to avoid rounding issues.
454 * In the kernel we can subtract 1 to generate the subsequent mask.
456 int simd_4xn_diag_size;
457 const real simdFalse = -1, simdTrue = 1;
458 real *simd_interaction_array;
460 simd_4xn_diag_size = max(NBNXN_CPU_CLUSTER_I_SIZE, simd_width);
461 snew_aligned(nbat->simd_4xn_diagonal_j_minus_i, simd_4xn_diag_size, NBNXN_MEM_ALIGN);
462 for (j = 0; j < simd_4xn_diag_size; j++)
464 nbat->simd_4xn_diagonal_j_minus_i[j] = j - 0.5;
467 snew_aligned(nbat->simd_2xnn_diagonal_j_minus_i, simd_width, NBNXN_MEM_ALIGN);
468 for (j = 0; j < simd_width/2; j++)
470 /* The j-cluster size is half the SIMD width */
471 nbat->simd_2xnn_diagonal_j_minus_i[j] = j - 0.5;
472 /* The next half of the SIMD width is for i + 1 */
473 nbat->simd_2xnn_diagonal_j_minus_i[simd_width/2+j] = j - 1 - 0.5;
476 /* We use up to 32 bits for exclusion masking.
477 * The same masks are used for the 4xN and 2x(N+N) kernels.
478 * The masks are read either into epi32 SIMD registers or into
479 * real SIMD registers (together with a cast).
480 * In single precision this means the real and epi32 SIMD registers
482 * In double precision the epi32 registers can be smaller than
483 * the real registers, so depending on the architecture, we might
484 * need to use two, identical, 32-bit masks per real.
486 simd_excl_size = NBNXN_CPU_CLUSTER_I_SIZE*simd_width;
487 snew_aligned(nbat->simd_exclusion_filter1, simd_excl_size, NBNXN_MEM_ALIGN);
488 snew_aligned(nbat->simd_exclusion_filter2, simd_excl_size*2, NBNXN_MEM_ALIGN);
490 for (j = 0; j < simd_excl_size; j++)
492 /* Set the consecutive bits for masking pair exclusions */
493 nbat->simd_exclusion_filter1[j] = (1U << j);
494 nbat->simd_exclusion_filter2[j*2 + 0] = (1U << j);
495 nbat->simd_exclusion_filter2[j*2 + 1] = (1U << j);
498 #if (defined GMX_SIMD_IBM_QPX)
499 /* The QPX kernels shouldn't do the bit masking that is done on
500 * x86, because the SIMD units lack bit-wise operations. Instead,
501 * we generate a vector of all 2^4 possible ways an i atom
502 * interacts with its 4 j atoms. Each array entry contains
503 * simd_width signed ints that are read in a single SIMD
504 * load. These ints must contain values that will be interpreted
505 * as true and false when loaded in the SIMD floating-point
506 * registers, ie. any positive or any negative value,
507 * respectively. Each array entry encodes how this i atom will
508 * interact with the 4 j atoms. Matching code exists in
509 * set_ci_top_excls() to generate indices into this array. Those
510 * indices are used in the kernels. */
512 simd_excl_size = NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
513 const int qpx_simd_width = GMX_SIMD_REAL_WIDTH;
514 snew_aligned(simd_interaction_array, simd_excl_size * qpx_simd_width, NBNXN_MEM_ALIGN);
515 for (j = 0; j < simd_excl_size; j++)
517 int index = j * qpx_simd_width;
518 for (i = 0; i < qpx_simd_width; i++)
520 simd_interaction_array[index + i] = (j & (1 << i)) ? simdTrue : simdFalse;
523 nbat->simd_interaction_array = simd_interaction_array;
528 /* Initializes an nbnxn_atomdata_t data structure */
529 void nbnxn_atomdata_init(FILE *fp,
530 nbnxn_atomdata_t *nbat,
532 int enbnxninitcombrule,
533 int ntype, const real *nbfp,
536 nbnxn_alloc_t *alloc,
542 gmx_bool simple, bCombGeom, bCombLB, bSIMD;
546 nbat->alloc = nbnxn_alloc_aligned;
554 nbat->free = nbnxn_free_aligned;
563 fprintf(debug, "There are %d atom types in the system, adding one for nbnxn_atomdata_t\n", ntype);
565 nbat->ntype = ntype + 1;
566 nbat->alloc((void **)&nbat->nbfp,
567 nbat->ntype*nbat->ntype*2*sizeof(*nbat->nbfp));
568 nbat->alloc((void **)&nbat->nbfp_comb, nbat->ntype*2*sizeof(*nbat->nbfp_comb));
570 /* A tolerance of 1e-5 seems reasonable for (possibly hand-typed)
571 * force-field floating point parameters.
574 ptr = getenv("GMX_LJCOMB_TOL");
579 sscanf(ptr, "%lf", &dbl);
585 /* Temporarily fill nbat->nbfp_comb with sigma and epsilon
586 * to check for the LB rule.
588 for (i = 0; i < ntype; i++)
590 c6 = nbfp[(i*ntype+i)*2 ]/6.0;
591 c12 = nbfp[(i*ntype+i)*2+1]/12.0;
592 if (c6 > 0 && c12 > 0)
594 nbat->nbfp_comb[i*2 ] = pow(c12/c6, 1.0/6.0);
595 nbat->nbfp_comb[i*2+1] = 0.25*c6*c6/c12;
597 else if (c6 == 0 && c12 == 0)
599 nbat->nbfp_comb[i*2 ] = 0;
600 nbat->nbfp_comb[i*2+1] = 0;
604 /* Can not use LB rule with only dispersion or repulsion */
609 for (i = 0; i < nbat->ntype; i++)
611 for (j = 0; j < nbat->ntype; j++)
613 if (i < ntype && j < ntype)
615 /* fr->nbfp has been updated, so that array too now stores c6/c12 including
616 * the 6.0/12.0 prefactors to save 2 flops in the most common case (force-only).
618 c6 = nbfp[(i*ntype+j)*2 ];
619 c12 = nbfp[(i*ntype+j)*2+1];
620 nbat->nbfp[(i*nbat->ntype+j)*2 ] = c6;
621 nbat->nbfp[(i*nbat->ntype+j)*2+1] = c12;
623 /* Compare 6*C6 and 12*C12 for geometric cobination rule */
624 bCombGeom = bCombGeom &&
625 gmx_within_tol(c6*c6, nbfp[(i*ntype+i)*2 ]*nbfp[(j*ntype+j)*2 ], tol) &&
626 gmx_within_tol(c12*c12, nbfp[(i*ntype+i)*2+1]*nbfp[(j*ntype+j)*2+1], tol);
628 /* Compare C6 and C12 for Lorentz-Berthelot combination rule */
632 ((c6 == 0 && c12 == 0 &&
633 (nbat->nbfp_comb[i*2+1] == 0 || nbat->nbfp_comb[j*2+1] == 0)) ||
634 (c6 > 0 && c12 > 0 &&
635 gmx_within_tol(pow(c12/c6, 1.0/6.0), 0.5*(nbat->nbfp_comb[i*2]+nbat->nbfp_comb[j*2]), tol) &&
636 gmx_within_tol(0.25*c6*c6/c12, sqrt(nbat->nbfp_comb[i*2+1]*nbat->nbfp_comb[j*2+1]), tol)));
640 /* Add zero parameters for the additional dummy atom type */
641 nbat->nbfp[(i*nbat->ntype+j)*2 ] = 0;
642 nbat->nbfp[(i*nbat->ntype+j)*2+1] = 0;
648 fprintf(debug, "Combination rules: geometric %d Lorentz-Berthelot %d\n",
652 simple = nbnxn_kernel_pairlist_simple(nb_kernel_type);
654 switch (enbnxninitcombrule)
656 case enbnxninitcombruleDETECT:
657 /* We prefer the geometic combination rule,
658 * as that gives a slightly faster kernel than the LB rule.
662 nbat->comb_rule = ljcrGEOM;
666 nbat->comb_rule = ljcrLB;
670 nbat->comb_rule = ljcrNONE;
672 nbat->free(nbat->nbfp_comb);
677 if (nbat->comb_rule == ljcrNONE)
679 fprintf(fp, "Using full Lennard-Jones parameter combination matrix\n\n");
683 fprintf(fp, "Using %s Lennard-Jones combination rule\n\n",
684 nbat->comb_rule == ljcrGEOM ? "geometric" : "Lorentz-Berthelot");
688 case enbnxninitcombruleGEOM:
689 nbat->comb_rule = ljcrGEOM;
691 case enbnxninitcombruleLB:
692 nbat->comb_rule = ljcrLB;
694 case enbnxninitcombruleNONE:
695 nbat->comb_rule = ljcrNONE;
697 nbat->free(nbat->nbfp_comb);
700 gmx_incons("Unknown enbnxninitcombrule");
703 bSIMD = (nb_kernel_type == nbnxnk4xN_SIMD_4xN ||
704 nb_kernel_type == nbnxnk4xN_SIMD_2xNN);
706 set_lj_parameter_data(nbat, bSIMD);
710 nbat->lj_comb = NULL;
717 pack_x = max(NBNXN_CPU_CLUSTER_I_SIZE,
718 nbnxn_kernel_to_cj_size(nb_kernel_type));
722 nbat->XFormat = nbatX4;
725 nbat->XFormat = nbatX8;
728 gmx_incons("Unsupported packing width");
733 nbat->XFormat = nbatXYZ;
736 nbat->FFormat = nbat->XFormat;
740 nbat->XFormat = nbatXYZQ;
741 nbat->FFormat = nbatXYZ;
744 nbat->nenergrp = n_energygroups;
747 /* Energy groups not supported yet for super-sub lists */
748 if (n_energygroups > 1 && fp != NULL)
750 fprintf(fp, "\nNOTE: With GPUs, reporting energy group contributions is not supported\n\n");
754 /* Temporary storage goes as #grp^3*simd_width^2/2, so limit to 64 */
755 if (nbat->nenergrp > 64)
757 gmx_fatal(FARGS, "With NxN kernels not more than 64 energy groups are supported\n");
760 while (nbat->nenergrp > (1<<nbat->neg_2log))
764 nbat->energrp = NULL;
765 nbat->alloc((void **)&nbat->shift_vec, SHIFTS*sizeof(*nbat->shift_vec));
766 nbat->xstride = (nbat->XFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
767 nbat->fstride = (nbat->FFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
770 #ifdef GMX_NBNXN_SIMD
773 nbnxn_atomdata_init_simple_exclusion_masks(nbat);
777 /* Initialize the output data structures */
779 snew(nbat->out, nbat->nout);
781 for (i = 0; i < nbat->nout; i++)
783 nbnxn_atomdata_output_init(&nbat->out[i],
785 nbat->nenergrp, 1<<nbat->neg_2log,
788 nbat->buffer_flags.flag = NULL;
789 nbat->buffer_flags.flag_nalloc = 0;
791 nth = gmx_omp_nthreads_get(emntNonbonded);
793 ptr = getenv("GMX_USE_TREEREDUCE");
796 nbat->bUseTreeReduce = strtol(ptr, 0, 10);
799 else if (nth > 8) /*on the CPU we currently don't benefit even at 32*/
801 nbat->bUseTreeReduce = 1;
806 nbat->bUseTreeReduce = 0;
808 if (nbat->bUseTreeReduce)
812 fprintf(fp, "Using tree force reduction\n\n");
814 snew(nbat->syncStep, nth);
818 static void copy_lj_to_nbat_lj_comb_x4(const real *ljparam_type,
819 const int *type, int na,
824 /* The LJ params follow the combination rule:
825 * copy the params for the type array to the atom array.
827 for (is = 0; is < na; is += PACK_X4)
829 for (k = 0; k < PACK_X4; k++)
832 ljparam_at[is*2 +k] = ljparam_type[type[i]*2 ];
833 ljparam_at[is*2+PACK_X4+k] = ljparam_type[type[i]*2+1];
838 static void copy_lj_to_nbat_lj_comb_x8(const real *ljparam_type,
839 const int *type, int na,
844 /* The LJ params follow the combination rule:
845 * copy the params for the type array to the atom array.
847 for (is = 0; is < na; is += PACK_X8)
849 for (k = 0; k < PACK_X8; k++)
852 ljparam_at[is*2 +k] = ljparam_type[type[i]*2 ];
853 ljparam_at[is*2+PACK_X8+k] = ljparam_type[type[i]*2+1];
858 /* Sets the atom type in nbnxn_atomdata_t */
859 static void nbnxn_atomdata_set_atomtypes(nbnxn_atomdata_t *nbat,
861 const nbnxn_search_t nbs,
865 const nbnxn_grid_t *grid;
867 for (g = 0; g < ngrid; g++)
869 grid = &nbs->grid[g];
871 /* Loop over all columns and copy and fill */
872 for (i = 0; i < grid->ncx*grid->ncy; i++)
874 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
875 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
877 copy_int_to_nbat_int(nbs->a+ash, grid->cxy_na[i], ncz*grid->na_sc,
878 type, nbat->ntype-1, nbat->type+ash);
883 /* Sets the LJ combination rule parameters in nbnxn_atomdata_t */
884 static void nbnxn_atomdata_set_ljcombparams(nbnxn_atomdata_t *nbat,
886 const nbnxn_search_t nbs)
889 const nbnxn_grid_t *grid;
891 if (nbat->comb_rule != ljcrNONE)
893 for (g = 0; g < ngrid; g++)
895 grid = &nbs->grid[g];
897 /* Loop over all columns and copy and fill */
898 for (i = 0; i < grid->ncx*grid->ncy; i++)
900 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
901 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
903 if (nbat->XFormat == nbatX4)
905 copy_lj_to_nbat_lj_comb_x4(nbat->nbfp_comb,
906 nbat->type+ash, ncz*grid->na_sc,
907 nbat->lj_comb+ash*2);
909 else if (nbat->XFormat == nbatX8)
911 copy_lj_to_nbat_lj_comb_x8(nbat->nbfp_comb,
912 nbat->type+ash, ncz*grid->na_sc,
913 nbat->lj_comb+ash*2);
920 /* Sets the charges in nbnxn_atomdata_t *nbat */
921 static void nbnxn_atomdata_set_charges(nbnxn_atomdata_t *nbat,
923 const nbnxn_search_t nbs,
926 int g, cxy, ncz, ash, na, na_round, i, j;
928 const nbnxn_grid_t *grid;
930 for (g = 0; g < ngrid; g++)
932 grid = &nbs->grid[g];
934 /* Loop over all columns and copy and fill */
935 for (cxy = 0; cxy < grid->ncx*grid->ncy; cxy++)
937 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
938 na = grid->cxy_na[cxy];
939 na_round = (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
941 if (nbat->XFormat == nbatXYZQ)
943 q = nbat->x + ash*STRIDE_XYZQ + ZZ + 1;
944 for (i = 0; i < na; i++)
946 *q = charge[nbs->a[ash+i]];
949 /* Complete the partially filled last cell with zeros */
950 for (; i < na_round; i++)
959 for (i = 0; i < na; i++)
961 *q = charge[nbs->a[ash+i]];
964 /* Complete the partially filled last cell with zeros */
965 for (; i < na_round; i++)
975 /* Set the charges of perturbed atoms in nbnxn_atomdata_t to 0.
976 * This is to automatically remove the RF/PME self term in the nbnxn kernels.
977 * Part of the zero interactions are still calculated in the normal kernels.
978 * All perturbed interactions are calculated in the free energy kernel,
979 * using the original charge and LJ data, not nbnxn_atomdata_t.
981 static void nbnxn_atomdata_mask_fep(nbnxn_atomdata_t *nbat,
983 const nbnxn_search_t nbs)
986 int stride_q, g, nsubc, c_offset, c, subc, i, ind;
987 const nbnxn_grid_t *grid;
989 if (nbat->XFormat == nbatXYZQ)
991 q = nbat->x + ZZ + 1;
992 stride_q = STRIDE_XYZQ;
1000 for (g = 0; g < ngrid; g++)
1002 grid = &nbs->grid[g];
1009 nsubc = GPU_NSUBCELL;
1012 c_offset = grid->cell0*grid->na_sc;
1014 /* Loop over all columns and copy and fill */
1015 for (c = 0; c < grid->nc*nsubc; c++)
1017 /* Does this cluster contain perturbed particles? */
1018 if (grid->fep[c] != 0)
1020 for (i = 0; i < grid->na_c; i++)
1022 /* Is this a perturbed particle? */
1023 if (grid->fep[c] & (1 << i))
1025 ind = c_offset + c*grid->na_c + i;
1026 /* Set atom type and charge to non-interacting */
1027 nbat->type[ind] = nbat->ntype - 1;
1028 q[ind*stride_q] = 0;
1036 /* Copies the energy group indices to a reordered and packed array */
1037 static void copy_egp_to_nbat_egps(const int *a, int na, int na_round,
1038 int na_c, int bit_shift,
1039 const int *in, int *innb)
1045 for (i = 0; i < na; i += na_c)
1047 /* Store na_c energy group numbers into one int */
1049 for (sa = 0; sa < na_c; sa++)
1054 comb |= (GET_CGINFO_GID(in[at]) << (sa*bit_shift));
1059 /* Complete the partially filled last cell with fill */
1060 for (; i < na_round; i += na_c)
1066 /* Set the energy group indices for atoms in nbnxn_atomdata_t */
1067 static void nbnxn_atomdata_set_energygroups(nbnxn_atomdata_t *nbat,
1069 const nbnxn_search_t nbs,
1073 const nbnxn_grid_t *grid;
1075 if (nbat->nenergrp == 1)
1080 for (g = 0; g < ngrid; g++)
1082 grid = &nbs->grid[g];
1084 /* Loop over all columns and copy and fill */
1085 for (i = 0; i < grid->ncx*grid->ncy; i++)
1087 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
1088 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
1090 copy_egp_to_nbat_egps(nbs->a+ash, grid->cxy_na[i], ncz*grid->na_sc,
1091 nbat->na_c, nbat->neg_2log,
1092 atinfo, nbat->energrp+(ash>>grid->na_c_2log));
1097 /* Sets all required atom parameter data in nbnxn_atomdata_t */
1098 void nbnxn_atomdata_set(nbnxn_atomdata_t *nbat,
1100 const nbnxn_search_t nbs,
1101 const t_mdatoms *mdatoms,
1106 if (locality == eatLocal)
1115 nbnxn_atomdata_set_atomtypes(nbat, ngrid, nbs, mdatoms->typeA);
1117 nbnxn_atomdata_set_charges(nbat, ngrid, nbs, mdatoms->chargeA);
1121 nbnxn_atomdata_mask_fep(nbat, ngrid, nbs);
1124 /* This must be done after masking types for FEP */
1125 nbnxn_atomdata_set_ljcombparams(nbat, ngrid, nbs);
1127 nbnxn_atomdata_set_energygroups(nbat, ngrid, nbs, atinfo);
1130 /* Copies the shift vector array to nbnxn_atomdata_t */
1131 void nbnxn_atomdata_copy_shiftvec(gmx_bool bDynamicBox,
1133 nbnxn_atomdata_t *nbat)
1137 nbat->bDynamicBox = bDynamicBox;
1138 for (i = 0; i < SHIFTS; i++)
1140 copy_rvec(shift_vec[i], nbat->shift_vec[i]);
1144 /* Copies (and reorders) the coordinates to nbnxn_atomdata_t */
1145 void nbnxn_atomdata_copy_x_to_nbat_x(const nbnxn_search_t nbs,
1149 nbnxn_atomdata_t *nbat)
1172 nbat->natoms_local = nbs->grid[0].nc*nbs->grid[0].na_sc;
1175 nth = gmx_omp_nthreads_get(emntPairsearch);
1177 #pragma omp parallel for num_threads(nth) schedule(static)
1178 for (th = 0; th < nth; th++)
1182 for (g = g0; g < g1; g++)
1184 const nbnxn_grid_t *grid;
1185 int cxy0, cxy1, cxy;
1187 grid = &nbs->grid[g];
1189 cxy0 = (grid->ncx*grid->ncy* th +nth-1)/nth;
1190 cxy1 = (grid->ncx*grid->ncy*(th+1)+nth-1)/nth;
1192 for (cxy = cxy0; cxy < cxy1; cxy++)
1194 int na, ash, na_fill;
1196 na = grid->cxy_na[cxy];
1197 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
1199 if (g == 0 && FillLocal)
1202 (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
1206 /* We fill only the real particle locations.
1207 * We assume the filling entries at the end have been
1208 * properly set before during ns.
1212 copy_rvec_to_nbat_real(nbs->a+ash, na, na_fill, x,
1213 nbat->XFormat, nbat->x, ash,
1221 nbnxn_atomdata_clear_reals(real * gmx_restrict dest,
1226 for (i = i0; i < i1; i++)
1233 nbnxn_atomdata_reduce_reals(real * gmx_restrict dest,
1235 real ** gmx_restrict src,
1243 /* The destination buffer contains data, add to it */
1244 for (i = i0; i < i1; i++)
1246 for (s = 0; s < nsrc; s++)
1248 dest[i] += src[s][i];
1254 /* The destination buffer is unitialized, set it first */
1255 for (i = i0; i < i1; i++)
1257 dest[i] = src[0][i];
1258 for (s = 1; s < nsrc; s++)
1260 dest[i] += src[s][i];
1267 nbnxn_atomdata_reduce_reals_simd(real gmx_unused * gmx_restrict dest,
1268 gmx_bool gmx_unused bDestSet,
1269 real gmx_unused ** gmx_restrict src,
1270 int gmx_unused nsrc,
1271 int gmx_unused i0, int gmx_unused i1)
1273 #ifdef GMX_NBNXN_SIMD
1274 /* The SIMD width here is actually independent of that in the kernels,
1275 * but we use the same width for simplicity (usually optimal anyhow).
1278 gmx_simd_real_t dest_SSE, src_SSE;
1282 for (i = i0; i < i1; i += GMX_SIMD_REAL_WIDTH)
1284 dest_SSE = gmx_simd_load_r(dest+i);
1285 for (s = 0; s < nsrc; s++)
1287 src_SSE = gmx_simd_load_r(src[s]+i);
1288 dest_SSE = gmx_simd_add_r(dest_SSE, src_SSE);
1290 gmx_simd_store_r(dest+i, dest_SSE);
1295 for (i = i0; i < i1; i += GMX_SIMD_REAL_WIDTH)
1297 dest_SSE = gmx_simd_load_r(src[0]+i);
1298 for (s = 1; s < nsrc; s++)
1300 src_SSE = gmx_simd_load_r(src[s]+i);
1301 dest_SSE = gmx_simd_add_r(dest_SSE, src_SSE);
1303 gmx_simd_store_r(dest+i, dest_SSE);
1309 /* Add part of the force array(s) from nbnxn_atomdata_t to f */
1311 nbnxn_atomdata_add_nbat_f_to_f_part(const nbnxn_search_t nbs,
1312 const nbnxn_atomdata_t *nbat,
1313 nbnxn_atomdata_output_t *out,
1324 /* Loop over all columns and copy and fill */
1325 switch (nbat->FFormat)
1333 for (a = a0; a < a1; a++)
1335 i = cell[a]*nbat->fstride;
1338 f[a][YY] += fnb[i+1];
1339 f[a][ZZ] += fnb[i+2];
1344 for (a = a0; a < a1; a++)
1346 i = cell[a]*nbat->fstride;
1348 for (fa = 0; fa < nfa; fa++)
1350 f[a][XX] += out[fa].f[i];
1351 f[a][YY] += out[fa].f[i+1];
1352 f[a][ZZ] += out[fa].f[i+2];
1362 for (a = a0; a < a1; a++)
1364 i = X4_IND_A(cell[a]);
1366 f[a][XX] += fnb[i+XX*PACK_X4];
1367 f[a][YY] += fnb[i+YY*PACK_X4];
1368 f[a][ZZ] += fnb[i+ZZ*PACK_X4];
1373 for (a = a0; a < a1; a++)
1375 i = X4_IND_A(cell[a]);
1377 for (fa = 0; fa < nfa; fa++)
1379 f[a][XX] += out[fa].f[i+XX*PACK_X4];
1380 f[a][YY] += out[fa].f[i+YY*PACK_X4];
1381 f[a][ZZ] += out[fa].f[i+ZZ*PACK_X4];
1391 for (a = a0; a < a1; a++)
1393 i = X8_IND_A(cell[a]);
1395 f[a][XX] += fnb[i+XX*PACK_X8];
1396 f[a][YY] += fnb[i+YY*PACK_X8];
1397 f[a][ZZ] += fnb[i+ZZ*PACK_X8];
1402 for (a = a0; a < a1; a++)
1404 i = X8_IND_A(cell[a]);
1406 for (fa = 0; fa < nfa; fa++)
1408 f[a][XX] += out[fa].f[i+XX*PACK_X8];
1409 f[a][YY] += out[fa].f[i+YY*PACK_X8];
1410 f[a][ZZ] += out[fa].f[i+ZZ*PACK_X8];
1416 gmx_incons("Unsupported nbnxn_atomdata_t format");
1420 static gmx_inline unsigned char reverse_bits(unsigned char b)
1422 /* http://graphics.stanford.edu/~seander/bithacks.html#ReverseByteWith64BitsDiv */
1423 return (b * 0x0202020202ULL & 0x010884422010ULL) % 1023;
1426 static void nbnxn_atomdata_add_nbat_f_to_f_treereduce(const nbnxn_atomdata_t *nbat,
1429 const nbnxn_buffer_flags_t *flags = &nbat->buffer_flags;
1431 int next_pow2 = 1<<(gmx_log2i(nth-1)+1);
1433 assert(nbat->nout == nth); /* tree-reduce currently only works for nout==nth */
1435 memset(nbat->syncStep, 0, sizeof(*(nbat->syncStep))*nth);
1437 #pragma omp parallel num_threads(nth)
1443 th = gmx_omp_get_thread_num();
1445 for (group_size = 2; group_size < 2*next_pow2; group_size *= 2)
1447 int index[2], group_pos, partner_pos, wu;
1448 int partner_th = th ^ (group_size/2);
1453 /* wait on partner thread - replaces full barrier */
1454 int sync_th, sync_group_size;
1456 tMPI_Atomic_memory_barrier(); /* gurantee data is saved before marking work as done */
1457 tMPI_Atomic_set(&(nbat->syncStep[th]), group_size/2); /* mark previous step as completed */
1459 /* find thread to sync with. Equal to partner_th unless nth is not a power of two. */
1460 for (sync_th = partner_th, sync_group_size = group_size; sync_th >= nth && sync_group_size > 2; sync_group_size /= 2)
1462 sync_th &= ~(sync_group_size/4);
1464 if (sync_th < nth) /* otherwise nothing to sync index[1] will be >=nout */
1466 /* wait on the thread which computed input data in previous step */
1467 while (tMPI_Atomic_get((volatile tMPI_Atomic_t*)&(nbat->syncStep[sync_th])) < group_size/2)
1471 /* guarantee that no later load happens before wait loop is finisehd */
1472 tMPI_Atomic_memory_barrier();
1474 #else /* TMPI_ATOMICS */
1479 /* Calculate buffers to sum (result goes into first buffer) */
1480 group_pos = th % group_size;
1481 index[0] = th - group_pos;
1482 index[1] = index[0] + group_size/2;
1484 /* If no second buffer, nothing to do */
1485 if (index[1] >= nbat->nout && group_size > 2)
1490 #if NBNXN_BUFFERFLAG_MAX_THREADS > 256
1491 #error reverse_bits assumes max 256 threads
1493 /* Position is permuted so that one of the 2 vectors being added was computed on the same thread in the previous step.
1494 This improves locality and enables to sync with just a single thread between steps (=the levels in the btree).
1495 The permutation which allows this corresponds to reversing the bits of the group position.
1497 group_pos = reverse_bits(group_pos)/(256/group_size);
1499 partner_pos = group_pos ^ 1;
1501 /* loop over two work-units (own and partner) */
1502 for (wu = 0; wu < 2; wu++)
1506 if (partner_th < nth)
1508 break; /* partner exists we don't have to do his work */
1512 group_pos = partner_pos;
1516 /* Calculate the cell-block range for our thread */
1517 b0 = (flags->nflag* group_pos )/group_size;
1518 b1 = (flags->nflag*(group_pos+1))/group_size;
1520 for (b = b0; b < b1; b++)
1522 i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1523 i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1525 if ((flags->flag[b] & (1ULL<<index[1])) || group_size > 2)
1527 #ifdef GMX_NBNXN_SIMD
1528 nbnxn_atomdata_reduce_reals_simd
1530 nbnxn_atomdata_reduce_reals
1532 (nbat->out[index[0]].f,
1533 (flags->flag[b] & (1ULL<<index[0])) || group_size > 2,
1534 &(nbat->out[index[1]].f), 1, i0, i1);
1537 else if (!(flags->flag[b] & (1ULL<<index[0])))
1539 nbnxn_atomdata_clear_reals(nbat->out[index[0]].f,
1549 static void nbnxn_atomdata_add_nbat_f_to_f_stdreduce(const nbnxn_atomdata_t *nbat,
1553 #pragma omp parallel for num_threads(nth) schedule(static)
1554 for (th = 0; th < nth; th++)
1556 const nbnxn_buffer_flags_t *flags;
1560 real *fptr[NBNXN_BUFFERFLAG_MAX_THREADS];
1563 flags = &nbat->buffer_flags;
1565 /* Calculate the cell-block range for our thread */
1566 b0 = (flags->nflag* th )/nth;
1567 b1 = (flags->nflag*(th+1))/nth;
1569 for (b = b0; b < b1; b++)
1571 i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1572 i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1575 for (out = 1; out < nbat->nout; out++)
1577 if (flags->flag[b] & (1U<<out))
1579 fptr[nfptr++] = nbat->out[out].f;
1584 #ifdef GMX_NBNXN_SIMD
1585 nbnxn_atomdata_reduce_reals_simd
1587 nbnxn_atomdata_reduce_reals
1590 flags->flag[b] & (1U<<0),
1594 else if (!(flags->flag[b] & (1U<<0)))
1596 nbnxn_atomdata_clear_reals(nbat->out[0].f,
1603 /* Add the force array(s) from nbnxn_atomdata_t to f */
1604 void nbnxn_atomdata_add_nbat_f_to_f(const nbnxn_search_t nbs,
1606 const nbnxn_atomdata_t *nbat,
1612 nbs_cycle_start(&nbs->cc[enbsCCreducef]);
1618 na = nbs->natoms_nonlocal;
1622 na = nbs->natoms_local;
1625 a0 = nbs->natoms_local;
1626 na = nbs->natoms_nonlocal - nbs->natoms_local;
1630 nth = gmx_omp_nthreads_get(emntNonbonded);
1634 if (locality != eatAll)
1636 gmx_incons("add_f_to_f called with nout>1 and locality!=eatAll");
1639 /* Reduce the force thread output buffers into buffer 0, before adding
1640 * them to the, differently ordered, "real" force buffer.
1642 if (nbat->bUseTreeReduce)
1644 nbnxn_atomdata_add_nbat_f_to_f_treereduce(nbat, nth);
1648 nbnxn_atomdata_add_nbat_f_to_f_stdreduce(nbat, nth);
1651 #pragma omp parallel for num_threads(nth) schedule(static)
1652 for (th = 0; th < nth; th++)
1654 nbnxn_atomdata_add_nbat_f_to_f_part(nbs, nbat,
1662 nbs_cycle_stop(&nbs->cc[enbsCCreducef]);
1665 /* Adds the shift forces from nbnxn_atomdata_t to fshift */
1666 void nbnxn_atomdata_add_nbat_fshift_to_fshift(const nbnxn_atomdata_t *nbat,
1669 const nbnxn_atomdata_output_t *out;
1676 for (s = 0; s < SHIFTS; s++)
1679 for (th = 0; th < nbat->nout; th++)
1681 sum[XX] += out[th].fshift[s*DIM+XX];
1682 sum[YY] += out[th].fshift[s*DIM+YY];
1683 sum[ZZ] += out[th].fshift[s*DIM+ZZ];
1685 rvec_inc(fshift[s], sum);