2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "nbnxn_atomdata.h"
47 #include "thread_mpi/atomic.h"
49 #include "gromacs/legacyheaders/gmx_omp_nthreads.h"
50 #include "gromacs/legacyheaders/macros.h"
51 #include "gromacs/math/vec.h"
52 #include "gromacs/mdlib/nb_verlet.h"
53 #include "gromacs/mdlib/nbnxn_consts.h"
54 #include "gromacs/mdlib/nbnxn_internal.h"
55 #include "gromacs/mdlib/nbnxn_search.h"
56 #include "gromacs/pbcutil/ishift.h"
57 #include "gromacs/utility/gmxomp.h"
58 #include "gromacs/utility/smalloc.h"
60 /* Default nbnxn allocation routine, allocates NBNXN_MEM_ALIGN byte aligned */
61 void nbnxn_alloc_aligned(void **ptr, size_t nbytes)
63 *ptr = save_malloc_aligned("ptr", __FILE__, __LINE__, nbytes, 1, NBNXN_MEM_ALIGN);
66 /* Free function for memory allocated with nbnxn_alloc_aligned */
67 void nbnxn_free_aligned(void *ptr)
72 /* Reallocation wrapper function for nbnxn data structures */
73 void nbnxn_realloc_void(void **ptr,
74 int nbytes_copy, int nbytes_new,
80 ma(&ptr_new, nbytes_new);
82 if (nbytes_new > 0 && ptr_new == NULL)
84 gmx_fatal(FARGS, "Allocation of %d bytes failed", nbytes_new);
89 if (nbytes_new < nbytes_copy)
91 gmx_incons("In nbnxn_realloc_void: new size less than copy size");
93 memcpy(ptr_new, *ptr, nbytes_copy);
102 /* Reallocate the nbnxn_atomdata_t for a size of n atoms */
103 void nbnxn_atomdata_realloc(nbnxn_atomdata_t *nbat, int n)
107 nbnxn_realloc_void((void **)&nbat->type,
108 nbat->natoms*sizeof(*nbat->type),
109 n*sizeof(*nbat->type),
110 nbat->alloc, nbat->free);
111 nbnxn_realloc_void((void **)&nbat->lj_comb,
112 nbat->natoms*2*sizeof(*nbat->lj_comb),
113 n*2*sizeof(*nbat->lj_comb),
114 nbat->alloc, nbat->free);
115 if (nbat->XFormat != nbatXYZQ)
117 nbnxn_realloc_void((void **)&nbat->q,
118 nbat->natoms*sizeof(*nbat->q),
120 nbat->alloc, nbat->free);
122 if (nbat->nenergrp > 1)
124 nbnxn_realloc_void((void **)&nbat->energrp,
125 nbat->natoms/nbat->na_c*sizeof(*nbat->energrp),
126 n/nbat->na_c*sizeof(*nbat->energrp),
127 nbat->alloc, nbat->free);
129 nbnxn_realloc_void((void **)&nbat->x,
130 nbat->natoms*nbat->xstride*sizeof(*nbat->x),
131 n*nbat->xstride*sizeof(*nbat->x),
132 nbat->alloc, nbat->free);
133 for (t = 0; t < nbat->nout; t++)
135 /* Allocate one element extra for possible signaling with CUDA */
136 nbnxn_realloc_void((void **)&nbat->out[t].f,
137 nbat->natoms*nbat->fstride*sizeof(*nbat->out[t].f),
138 n*nbat->fstride*sizeof(*nbat->out[t].f),
139 nbat->alloc, nbat->free);
144 /* Initializes an nbnxn_atomdata_output_t data structure */
145 static void nbnxn_atomdata_output_init(nbnxn_atomdata_output_t *out,
147 int nenergrp, int stride,
153 ma((void **)&out->fshift, SHIFTS*DIM*sizeof(*out->fshift));
154 out->nV = nenergrp*nenergrp;
155 ma((void **)&out->Vvdw, out->nV*sizeof(*out->Vvdw));
156 ma((void **)&out->Vc, out->nV*sizeof(*out->Vc ));
158 if (nb_kernel_type == nbnxnk4xN_SIMD_4xN ||
159 nb_kernel_type == nbnxnk4xN_SIMD_2xNN)
161 cj_size = nbnxn_kernel_to_cj_size(nb_kernel_type);
162 out->nVS = nenergrp*nenergrp*stride*(cj_size>>1)*cj_size;
163 ma((void **)&out->VSvdw, out->nVS*sizeof(*out->VSvdw));
164 ma((void **)&out->VSc, out->nVS*sizeof(*out->VSc ));
172 static void copy_int_to_nbat_int(const int *a, int na, int na_round,
173 const int *in, int fill, int *innb)
178 for (i = 0; i < na; i++)
180 innb[j++] = in[a[i]];
182 /* Complete the partially filled last cell with fill */
183 for (; i < na_round; i++)
189 static void clear_nbat_real(int na, int nbatFormat, real *xnb, int a0)
196 for (a = 0; a < na; a++)
198 for (d = 0; d < DIM; d++)
200 xnb[(a0+a)*STRIDE_XYZ+d] = 0;
205 for (a = 0; a < na; a++)
207 for (d = 0; d < DIM; d++)
209 xnb[(a0+a)*STRIDE_XYZQ+d] = 0;
215 c = a0 & (PACK_X4-1);
216 for (a = 0; a < na; a++)
218 xnb[j+XX*PACK_X4] = 0;
219 xnb[j+YY*PACK_X4] = 0;
220 xnb[j+ZZ*PACK_X4] = 0;
225 j += (DIM-1)*PACK_X4;
232 c = a0 & (PACK_X8-1);
233 for (a = 0; a < na; a++)
235 xnb[j+XX*PACK_X8] = 0;
236 xnb[j+YY*PACK_X8] = 0;
237 xnb[j+ZZ*PACK_X8] = 0;
242 j += (DIM-1)*PACK_X8;
250 void copy_rvec_to_nbat_real(const int *a, int na, int na_round,
251 rvec *x, int nbatFormat, real *xnb, int a0,
252 int cx, int cy, int cz)
256 /* We might need to place filler particles to fill up the cell to na_round.
257 * The coefficients (LJ and q) for such particles are zero.
258 * But we might still get NaN as 0*NaN when distances are too small.
259 * We hope that -107 nm is far away enough from to zero
260 * to avoid accidental short distances to particles shifted down for pbc.
262 #define NBAT_FAR_AWAY 107
268 for (i = 0; i < na; i++)
270 xnb[j++] = x[a[i]][XX];
271 xnb[j++] = x[a[i]][YY];
272 xnb[j++] = x[a[i]][ZZ];
274 /* Complete the partially filled last cell with copies of the last element.
275 * This simplifies the bounding box calculation and avoid
276 * numerical issues with atoms that are coincidentally close.
278 for (; i < na_round; i++)
280 xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
281 xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
282 xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
287 for (i = 0; i < na; i++)
289 xnb[j++] = x[a[i]][XX];
290 xnb[j++] = x[a[i]][YY];
291 xnb[j++] = x[a[i]][ZZ];
294 /* Complete the partially filled last cell with particles far apart */
295 for (; i < na_round; i++)
297 xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
298 xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
299 xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
305 c = a0 & (PACK_X4-1);
306 for (i = 0; i < na; i++)
308 xnb[j+XX*PACK_X4] = x[a[i]][XX];
309 xnb[j+YY*PACK_X4] = x[a[i]][YY];
310 xnb[j+ZZ*PACK_X4] = x[a[i]][ZZ];
315 j += (DIM-1)*PACK_X4;
319 /* Complete the partially filled last cell with particles far apart */
320 for (; i < na_round; i++)
322 xnb[j+XX*PACK_X4] = -NBAT_FAR_AWAY*(1 + cx);
323 xnb[j+YY*PACK_X4] = -NBAT_FAR_AWAY*(1 + cy);
324 xnb[j+ZZ*PACK_X4] = -NBAT_FAR_AWAY*(1 + cz + i);
329 j += (DIM-1)*PACK_X4;
336 c = a0 & (PACK_X8 - 1);
337 for (i = 0; i < na; i++)
339 xnb[j+XX*PACK_X8] = x[a[i]][XX];
340 xnb[j+YY*PACK_X8] = x[a[i]][YY];
341 xnb[j+ZZ*PACK_X8] = x[a[i]][ZZ];
346 j += (DIM-1)*PACK_X8;
350 /* Complete the partially filled last cell with particles far apart */
351 for (; i < na_round; i++)
353 xnb[j+XX*PACK_X8] = -NBAT_FAR_AWAY*(1 + cx);
354 xnb[j+YY*PACK_X8] = -NBAT_FAR_AWAY*(1 + cy);
355 xnb[j+ZZ*PACK_X8] = -NBAT_FAR_AWAY*(1 + cz + i);
360 j += (DIM-1)*PACK_X8;
366 gmx_incons("Unsupported nbnxn_atomdata_t format");
370 /* Stores the LJ parameter data in a format convenient for different kernels */
371 static void set_lj_parameter_data(nbnxn_atomdata_t *nbat, gmx_bool bSIMD)
380 /* nbfp_s4 stores two parameters using a stride of 4,
381 * because this would suit x86 SIMD single-precision
382 * quad-load intrinsics. There's a slight inefficiency in
383 * allocating and initializing nbfp_s4 when it might not
384 * be used, but introducing the conditional code is not
385 * really worth it. */
386 nbat->alloc((void **)&nbat->nbfp_s4, nt*nt*4*sizeof(*nbat->nbfp_s4));
387 for (i = 0; i < nt; i++)
389 for (j = 0; j < nt; j++)
391 nbat->nbfp_s4[(i*nt+j)*4+0] = nbat->nbfp[(i*nt+j)*2+0];
392 nbat->nbfp_s4[(i*nt+j)*4+1] = nbat->nbfp[(i*nt+j)*2+1];
393 nbat->nbfp_s4[(i*nt+j)*4+2] = 0;
394 nbat->nbfp_s4[(i*nt+j)*4+3] = 0;
399 /* We use combination rule data for SIMD combination rule kernels
400 * and with LJ-PME kernels. We then only need parameters per atom type,
401 * not per pair of atom types.
403 switch (nbat->comb_rule)
406 nbat->comb_rule = ljcrGEOM;
408 for (i = 0; i < nt; i++)
410 /* Store the sqrt of the diagonal from the nbfp matrix */
411 nbat->nbfp_comb[i*2 ] = sqrt(nbat->nbfp[(i*nt+i)*2 ]);
412 nbat->nbfp_comb[i*2+1] = sqrt(nbat->nbfp[(i*nt+i)*2+1]);
416 for (i = 0; i < nt; i++)
418 /* Get 6*C6 and 12*C12 from the diagonal of the nbfp matrix */
419 c6 = nbat->nbfp[(i*nt+i)*2 ];
420 c12 = nbat->nbfp[(i*nt+i)*2+1];
421 if (c6 > 0 && c12 > 0)
423 /* We store 0.5*2^1/6*sigma and sqrt(4*3*eps),
424 * so we get 6*C6 and 12*C12 after combining.
426 nbat->nbfp_comb[i*2 ] = 0.5*pow(c12/c6, 1.0/6.0);
427 nbat->nbfp_comb[i*2+1] = sqrt(c6*c6/c12);
431 nbat->nbfp_comb[i*2 ] = 0;
432 nbat->nbfp_comb[i*2+1] = 0;
437 /* We always store the full matrix (see code above) */
440 gmx_incons("Unknown combination rule");
445 #ifdef GMX_NBNXN_SIMD
447 nbnxn_atomdata_init_simple_exclusion_masks(nbnxn_atomdata_t *nbat)
450 const int simd_width = GMX_SIMD_REAL_WIDTH;
452 /* Set the diagonal cluster pair exclusion mask setup data.
453 * In the kernel we check 0 < j - i to generate the masks.
454 * Here we store j - i for generating the mask for the first i,
455 * we substract 0.5 to avoid rounding issues.
456 * In the kernel we can subtract 1 to generate the subsequent mask.
458 int simd_4xn_diag_size;
459 const real simdFalse = -1, simdTrue = 1;
460 real *simd_interaction_array;
462 simd_4xn_diag_size = max(NBNXN_CPU_CLUSTER_I_SIZE, simd_width);
463 snew_aligned(nbat->simd_4xn_diagonal_j_minus_i, simd_4xn_diag_size, NBNXN_MEM_ALIGN);
464 for (j = 0; j < simd_4xn_diag_size; j++)
466 nbat->simd_4xn_diagonal_j_minus_i[j] = j - 0.5;
469 snew_aligned(nbat->simd_2xnn_diagonal_j_minus_i, simd_width, NBNXN_MEM_ALIGN);
470 for (j = 0; j < simd_width/2; j++)
472 /* The j-cluster size is half the SIMD width */
473 nbat->simd_2xnn_diagonal_j_minus_i[j] = j - 0.5;
474 /* The next half of the SIMD width is for i + 1 */
475 nbat->simd_2xnn_diagonal_j_minus_i[simd_width/2+j] = j - 1 - 0.5;
478 /* We use up to 32 bits for exclusion masking.
479 * The same masks are used for the 4xN and 2x(N+N) kernels.
480 * The masks are read either into epi32 SIMD registers or into
481 * real SIMD registers (together with a cast).
482 * In single precision this means the real and epi32 SIMD registers
484 * In double precision the epi32 registers can be smaller than
485 * the real registers, so depending on the architecture, we might
486 * need to use two, identical, 32-bit masks per real.
488 simd_excl_size = NBNXN_CPU_CLUSTER_I_SIZE*simd_width;
489 snew_aligned(nbat->simd_exclusion_filter1, simd_excl_size, NBNXN_MEM_ALIGN);
490 snew_aligned(nbat->simd_exclusion_filter2, simd_excl_size*2, NBNXN_MEM_ALIGN);
492 for (j = 0; j < simd_excl_size; j++)
494 /* Set the consecutive bits for masking pair exclusions */
495 nbat->simd_exclusion_filter1[j] = (1U << j);
496 nbat->simd_exclusion_filter2[j*2 + 0] = (1U << j);
497 nbat->simd_exclusion_filter2[j*2 + 1] = (1U << j);
500 #if (defined GMX_SIMD_IBM_QPX)
501 /* The QPX kernels shouldn't do the bit masking that is done on
502 * x86, because the SIMD units lack bit-wise operations. Instead,
503 * we generate a vector of all 2^4 possible ways an i atom
504 * interacts with its 4 j atoms. Each array entry contains
505 * simd_width signed ints that are read in a single SIMD
506 * load. These ints must contain values that will be interpreted
507 * as true and false when loaded in the SIMD floating-point
508 * registers, ie. any positive or any negative value,
509 * respectively. Each array entry encodes how this i atom will
510 * interact with the 4 j atoms. Matching code exists in
511 * set_ci_top_excls() to generate indices into this array. Those
512 * indices are used in the kernels. */
514 simd_excl_size = NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
515 const int qpx_simd_width = GMX_SIMD_REAL_WIDTH;
516 snew_aligned(simd_interaction_array, simd_excl_size * qpx_simd_width, NBNXN_MEM_ALIGN);
517 for (j = 0; j < simd_excl_size; j++)
519 int index = j * qpx_simd_width;
520 for (i = 0; i < qpx_simd_width; i++)
522 simd_interaction_array[index + i] = (j & (1 << i)) ? simdTrue : simdFalse;
525 nbat->simd_interaction_array = simd_interaction_array;
530 /* Initializes an nbnxn_atomdata_t data structure */
531 void nbnxn_atomdata_init(FILE *fp,
532 nbnxn_atomdata_t *nbat,
534 int enbnxninitcombrule,
535 int ntype, const real *nbfp,
538 nbnxn_alloc_t *alloc,
544 gmx_bool simple, bCombGeom, bCombLB, bSIMD;
548 nbat->alloc = nbnxn_alloc_aligned;
556 nbat->free = nbnxn_free_aligned;
565 fprintf(debug, "There are %d atom types in the system, adding one for nbnxn_atomdata_t\n", ntype);
567 nbat->ntype = ntype + 1;
568 nbat->alloc((void **)&nbat->nbfp,
569 nbat->ntype*nbat->ntype*2*sizeof(*nbat->nbfp));
570 nbat->alloc((void **)&nbat->nbfp_comb, nbat->ntype*2*sizeof(*nbat->nbfp_comb));
572 /* A tolerance of 1e-5 seems reasonable for (possibly hand-typed)
573 * force-field floating point parameters.
576 ptr = getenv("GMX_LJCOMB_TOL");
581 sscanf(ptr, "%lf", &dbl);
587 /* Temporarily fill nbat->nbfp_comb with sigma and epsilon
588 * to check for the LB rule.
590 for (i = 0; i < ntype; i++)
592 c6 = nbfp[(i*ntype+i)*2 ]/6.0;
593 c12 = nbfp[(i*ntype+i)*2+1]/12.0;
594 if (c6 > 0 && c12 > 0)
596 nbat->nbfp_comb[i*2 ] = pow(c12/c6, 1.0/6.0);
597 nbat->nbfp_comb[i*2+1] = 0.25*c6*c6/c12;
599 else if (c6 == 0 && c12 == 0)
601 nbat->nbfp_comb[i*2 ] = 0;
602 nbat->nbfp_comb[i*2+1] = 0;
606 /* Can not use LB rule with only dispersion or repulsion */
611 for (i = 0; i < nbat->ntype; i++)
613 for (j = 0; j < nbat->ntype; j++)
615 if (i < ntype && j < ntype)
617 /* fr->nbfp has been updated, so that array too now stores c6/c12 including
618 * the 6.0/12.0 prefactors to save 2 flops in the most common case (force-only).
620 c6 = nbfp[(i*ntype+j)*2 ];
621 c12 = nbfp[(i*ntype+j)*2+1];
622 nbat->nbfp[(i*nbat->ntype+j)*2 ] = c6;
623 nbat->nbfp[(i*nbat->ntype+j)*2+1] = c12;
625 /* Compare 6*C6 and 12*C12 for geometric cobination rule */
626 bCombGeom = bCombGeom &&
627 gmx_within_tol(c6*c6, nbfp[(i*ntype+i)*2 ]*nbfp[(j*ntype+j)*2 ], tol) &&
628 gmx_within_tol(c12*c12, nbfp[(i*ntype+i)*2+1]*nbfp[(j*ntype+j)*2+1], tol);
630 /* Compare C6 and C12 for Lorentz-Berthelot combination rule */
634 ((c6 == 0 && c12 == 0 &&
635 (nbat->nbfp_comb[i*2+1] == 0 || nbat->nbfp_comb[j*2+1] == 0)) ||
636 (c6 > 0 && c12 > 0 &&
637 gmx_within_tol(pow(c12/c6, 1.0/6.0), 0.5*(nbat->nbfp_comb[i*2]+nbat->nbfp_comb[j*2]), tol) &&
638 gmx_within_tol(0.25*c6*c6/c12, sqrt(nbat->nbfp_comb[i*2+1]*nbat->nbfp_comb[j*2+1]), tol)));
642 /* Add zero parameters for the additional dummy atom type */
643 nbat->nbfp[(i*nbat->ntype+j)*2 ] = 0;
644 nbat->nbfp[(i*nbat->ntype+j)*2+1] = 0;
650 fprintf(debug, "Combination rules: geometric %d Lorentz-Berthelot %d\n",
654 simple = nbnxn_kernel_pairlist_simple(nb_kernel_type);
656 switch (enbnxninitcombrule)
658 case enbnxninitcombruleDETECT:
659 /* We prefer the geometic combination rule,
660 * as that gives a slightly faster kernel than the LB rule.
664 nbat->comb_rule = ljcrGEOM;
668 nbat->comb_rule = ljcrLB;
672 nbat->comb_rule = ljcrNONE;
674 nbat->free(nbat->nbfp_comb);
679 if (nbat->comb_rule == ljcrNONE)
681 fprintf(fp, "Using full Lennard-Jones parameter combination matrix\n\n");
685 fprintf(fp, "Using %s Lennard-Jones combination rule\n\n",
686 nbat->comb_rule == ljcrGEOM ? "geometric" : "Lorentz-Berthelot");
690 case enbnxninitcombruleGEOM:
691 nbat->comb_rule = ljcrGEOM;
693 case enbnxninitcombruleLB:
694 nbat->comb_rule = ljcrLB;
696 case enbnxninitcombruleNONE:
697 nbat->comb_rule = ljcrNONE;
699 nbat->free(nbat->nbfp_comb);
702 gmx_incons("Unknown enbnxninitcombrule");
705 bSIMD = (nb_kernel_type == nbnxnk4xN_SIMD_4xN ||
706 nb_kernel_type == nbnxnk4xN_SIMD_2xNN);
708 set_lj_parameter_data(nbat, bSIMD);
712 nbat->lj_comb = NULL;
719 pack_x = max(NBNXN_CPU_CLUSTER_I_SIZE,
720 nbnxn_kernel_to_cj_size(nb_kernel_type));
724 nbat->XFormat = nbatX4;
727 nbat->XFormat = nbatX8;
730 gmx_incons("Unsupported packing width");
735 nbat->XFormat = nbatXYZ;
738 nbat->FFormat = nbat->XFormat;
742 nbat->XFormat = nbatXYZQ;
743 nbat->FFormat = nbatXYZ;
746 nbat->nenergrp = n_energygroups;
749 /* Energy groups not supported yet for super-sub lists */
750 if (n_energygroups > 1 && fp != NULL)
752 fprintf(fp, "\nNOTE: With GPUs, reporting energy group contributions is not supported\n\n");
756 /* Temporary storage goes as #grp^3*simd_width^2/2, so limit to 64 */
757 if (nbat->nenergrp > 64)
759 gmx_fatal(FARGS, "With NxN kernels not more than 64 energy groups are supported\n");
762 while (nbat->nenergrp > (1<<nbat->neg_2log))
766 nbat->energrp = NULL;
767 nbat->alloc((void **)&nbat->shift_vec, SHIFTS*sizeof(*nbat->shift_vec));
768 nbat->xstride = (nbat->XFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
769 nbat->fstride = (nbat->FFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
772 #ifdef GMX_NBNXN_SIMD
775 nbnxn_atomdata_init_simple_exclusion_masks(nbat);
779 /* Initialize the output data structures */
781 snew(nbat->out, nbat->nout);
783 for (i = 0; i < nbat->nout; i++)
785 nbnxn_atomdata_output_init(&nbat->out[i],
787 nbat->nenergrp, 1<<nbat->neg_2log,
790 nbat->buffer_flags.flag = NULL;
791 nbat->buffer_flags.flag_nalloc = 0;
793 nth = gmx_omp_nthreads_get(emntNonbonded);
795 ptr = getenv("GMX_USE_TREEREDUCE");
798 nbat->bUseTreeReduce = strtol(ptr, 0, 10);
801 else if (nth > 8) /*on the CPU we currently don't benefit even at 32*/
803 nbat->bUseTreeReduce = 1;
808 nbat->bUseTreeReduce = 0;
810 if (nbat->bUseTreeReduce)
814 fprintf(fp, "Using tree force reduction\n\n");
816 snew(nbat->syncStep, nth);
820 static void copy_lj_to_nbat_lj_comb_x4(const real *ljparam_type,
821 const int *type, int na,
826 /* The LJ params follow the combination rule:
827 * copy the params for the type array to the atom array.
829 for (is = 0; is < na; is += PACK_X4)
831 for (k = 0; k < PACK_X4; k++)
834 ljparam_at[is*2 +k] = ljparam_type[type[i]*2 ];
835 ljparam_at[is*2+PACK_X4+k] = ljparam_type[type[i]*2+1];
840 static void copy_lj_to_nbat_lj_comb_x8(const real *ljparam_type,
841 const int *type, int na,
846 /* The LJ params follow the combination rule:
847 * copy the params for the type array to the atom array.
849 for (is = 0; is < na; is += PACK_X8)
851 for (k = 0; k < PACK_X8; k++)
854 ljparam_at[is*2 +k] = ljparam_type[type[i]*2 ];
855 ljparam_at[is*2+PACK_X8+k] = ljparam_type[type[i]*2+1];
860 /* Sets the atom type in nbnxn_atomdata_t */
861 static void nbnxn_atomdata_set_atomtypes(nbnxn_atomdata_t *nbat,
863 const nbnxn_search_t nbs,
867 const nbnxn_grid_t *grid;
869 for (g = 0; g < ngrid; g++)
871 grid = &nbs->grid[g];
873 /* Loop over all columns and copy and fill */
874 for (i = 0; i < grid->ncx*grid->ncy; i++)
876 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
877 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
879 copy_int_to_nbat_int(nbs->a+ash, grid->cxy_na[i], ncz*grid->na_sc,
880 type, nbat->ntype-1, nbat->type+ash);
885 /* Sets the LJ combination rule parameters in nbnxn_atomdata_t */
886 static void nbnxn_atomdata_set_ljcombparams(nbnxn_atomdata_t *nbat,
888 const nbnxn_search_t nbs)
891 const nbnxn_grid_t *grid;
893 if (nbat->comb_rule != ljcrNONE)
895 for (g = 0; g < ngrid; g++)
897 grid = &nbs->grid[g];
899 /* Loop over all columns and copy and fill */
900 for (i = 0; i < grid->ncx*grid->ncy; i++)
902 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
903 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
905 if (nbat->XFormat == nbatX4)
907 copy_lj_to_nbat_lj_comb_x4(nbat->nbfp_comb,
908 nbat->type+ash, ncz*grid->na_sc,
909 nbat->lj_comb+ash*2);
911 else if (nbat->XFormat == nbatX8)
913 copy_lj_to_nbat_lj_comb_x8(nbat->nbfp_comb,
914 nbat->type+ash, ncz*grid->na_sc,
915 nbat->lj_comb+ash*2);
922 /* Sets the charges in nbnxn_atomdata_t *nbat */
923 static void nbnxn_atomdata_set_charges(nbnxn_atomdata_t *nbat,
925 const nbnxn_search_t nbs,
928 int g, cxy, ncz, ash, na, na_round, i, j;
930 const nbnxn_grid_t *grid;
932 for (g = 0; g < ngrid; g++)
934 grid = &nbs->grid[g];
936 /* Loop over all columns and copy and fill */
937 for (cxy = 0; cxy < grid->ncx*grid->ncy; cxy++)
939 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
940 na = grid->cxy_na[cxy];
941 na_round = (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
943 if (nbat->XFormat == nbatXYZQ)
945 q = nbat->x + ash*STRIDE_XYZQ + ZZ + 1;
946 for (i = 0; i < na; i++)
948 *q = charge[nbs->a[ash+i]];
951 /* Complete the partially filled last cell with zeros */
952 for (; i < na_round; i++)
961 for (i = 0; i < na; i++)
963 *q = charge[nbs->a[ash+i]];
966 /* Complete the partially filled last cell with zeros */
967 for (; i < na_round; i++)
977 /* Set the charges of perturbed atoms in nbnxn_atomdata_t to 0.
978 * This is to automatically remove the RF/PME self term in the nbnxn kernels.
979 * Part of the zero interactions are still calculated in the normal kernels.
980 * All perturbed interactions are calculated in the free energy kernel,
981 * using the original charge and LJ data, not nbnxn_atomdata_t.
983 static void nbnxn_atomdata_mask_fep(nbnxn_atomdata_t *nbat,
985 const nbnxn_search_t nbs)
988 int stride_q, g, nsubc, c_offset, c, subc, i, ind;
989 const nbnxn_grid_t *grid;
991 if (nbat->XFormat == nbatXYZQ)
993 q = nbat->x + ZZ + 1;
994 stride_q = STRIDE_XYZQ;
1002 for (g = 0; g < ngrid; g++)
1004 grid = &nbs->grid[g];
1011 nsubc = GPU_NSUBCELL;
1014 c_offset = grid->cell0*grid->na_sc;
1016 /* Loop over all columns and copy and fill */
1017 for (c = 0; c < grid->nc*nsubc; c++)
1019 /* Does this cluster contain perturbed particles? */
1020 if (grid->fep[c] != 0)
1022 for (i = 0; i < grid->na_c; i++)
1024 /* Is this a perturbed particle? */
1025 if (grid->fep[c] & (1 << i))
1027 ind = c_offset + c*grid->na_c + i;
1028 /* Set atom type and charge to non-interacting */
1029 nbat->type[ind] = nbat->ntype - 1;
1030 q[ind*stride_q] = 0;
1038 /* Copies the energy group indices to a reordered and packed array */
1039 static void copy_egp_to_nbat_egps(const int *a, int na, int na_round,
1040 int na_c, int bit_shift,
1041 const int *in, int *innb)
1047 for (i = 0; i < na; i += na_c)
1049 /* Store na_c energy group numbers into one int */
1051 for (sa = 0; sa < na_c; sa++)
1056 comb |= (GET_CGINFO_GID(in[at]) << (sa*bit_shift));
1061 /* Complete the partially filled last cell with fill */
1062 for (; i < na_round; i += na_c)
1068 /* Set the energy group indices for atoms in nbnxn_atomdata_t */
1069 static void nbnxn_atomdata_set_energygroups(nbnxn_atomdata_t *nbat,
1071 const nbnxn_search_t nbs,
1075 const nbnxn_grid_t *grid;
1077 if (nbat->nenergrp == 1)
1082 for (g = 0; g < ngrid; g++)
1084 grid = &nbs->grid[g];
1086 /* Loop over all columns and copy and fill */
1087 for (i = 0; i < grid->ncx*grid->ncy; i++)
1089 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
1090 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
1092 copy_egp_to_nbat_egps(nbs->a+ash, grid->cxy_na[i], ncz*grid->na_sc,
1093 nbat->na_c, nbat->neg_2log,
1094 atinfo, nbat->energrp+(ash>>grid->na_c_2log));
1099 /* Sets all required atom parameter data in nbnxn_atomdata_t */
1100 void nbnxn_atomdata_set(nbnxn_atomdata_t *nbat,
1102 const nbnxn_search_t nbs,
1103 const t_mdatoms *mdatoms,
1108 if (locality == eatLocal)
1117 nbnxn_atomdata_set_atomtypes(nbat, ngrid, nbs, mdatoms->typeA);
1119 nbnxn_atomdata_set_charges(nbat, ngrid, nbs, mdatoms->chargeA);
1123 nbnxn_atomdata_mask_fep(nbat, ngrid, nbs);
1126 /* This must be done after masking types for FEP */
1127 nbnxn_atomdata_set_ljcombparams(nbat, ngrid, nbs);
1129 nbnxn_atomdata_set_energygroups(nbat, ngrid, nbs, atinfo);
1132 /* Copies the shift vector array to nbnxn_atomdata_t */
1133 void nbnxn_atomdata_copy_shiftvec(gmx_bool bDynamicBox,
1135 nbnxn_atomdata_t *nbat)
1139 nbat->bDynamicBox = bDynamicBox;
1140 for (i = 0; i < SHIFTS; i++)
1142 copy_rvec(shift_vec[i], nbat->shift_vec[i]);
1146 /* Copies (and reorders) the coordinates to nbnxn_atomdata_t */
1147 void nbnxn_atomdata_copy_x_to_nbat_x(const nbnxn_search_t nbs,
1151 nbnxn_atomdata_t *nbat)
1174 nbat->natoms_local = nbs->grid[0].nc*nbs->grid[0].na_sc;
1177 nth = gmx_omp_nthreads_get(emntPairsearch);
1179 #pragma omp parallel for num_threads(nth) schedule(static)
1180 for (th = 0; th < nth; th++)
1184 for (g = g0; g < g1; g++)
1186 const nbnxn_grid_t *grid;
1187 int cxy0, cxy1, cxy;
1189 grid = &nbs->grid[g];
1191 cxy0 = (grid->ncx*grid->ncy* th +nth-1)/nth;
1192 cxy1 = (grid->ncx*grid->ncy*(th+1)+nth-1)/nth;
1194 for (cxy = cxy0; cxy < cxy1; cxy++)
1196 int na, ash, na_fill;
1198 na = grid->cxy_na[cxy];
1199 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
1201 if (g == 0 && FillLocal)
1204 (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
1208 /* We fill only the real particle locations.
1209 * We assume the filling entries at the end have been
1210 * properly set before during ns.
1214 copy_rvec_to_nbat_real(nbs->a+ash, na, na_fill, x,
1215 nbat->XFormat, nbat->x, ash,
1223 nbnxn_atomdata_clear_reals(real * gmx_restrict dest,
1228 for (i = i0; i < i1; i++)
1235 nbnxn_atomdata_reduce_reals(real * gmx_restrict dest,
1237 real ** gmx_restrict src,
1245 /* The destination buffer contains data, add to it */
1246 for (i = i0; i < i1; i++)
1248 for (s = 0; s < nsrc; s++)
1250 dest[i] += src[s][i];
1256 /* The destination buffer is unitialized, set it first */
1257 for (i = i0; i < i1; i++)
1259 dest[i] = src[0][i];
1260 for (s = 1; s < nsrc; s++)
1262 dest[i] += src[s][i];
1269 nbnxn_atomdata_reduce_reals_simd(real gmx_unused * gmx_restrict dest,
1270 gmx_bool gmx_unused bDestSet,
1271 real gmx_unused ** gmx_restrict src,
1272 int gmx_unused nsrc,
1273 int gmx_unused i0, int gmx_unused i1)
1275 #ifdef GMX_NBNXN_SIMD
1276 /* The SIMD width here is actually independent of that in the kernels,
1277 * but we use the same width for simplicity (usually optimal anyhow).
1280 gmx_simd_real_t dest_SSE, src_SSE;
1284 for (i = i0; i < i1; i += GMX_SIMD_REAL_WIDTH)
1286 dest_SSE = gmx_simd_load_r(dest+i);
1287 for (s = 0; s < nsrc; s++)
1289 src_SSE = gmx_simd_load_r(src[s]+i);
1290 dest_SSE = gmx_simd_add_r(dest_SSE, src_SSE);
1292 gmx_simd_store_r(dest+i, dest_SSE);
1297 for (i = i0; i < i1; i += GMX_SIMD_REAL_WIDTH)
1299 dest_SSE = gmx_simd_load_r(src[0]+i);
1300 for (s = 1; s < nsrc; s++)
1302 src_SSE = gmx_simd_load_r(src[s]+i);
1303 dest_SSE = gmx_simd_add_r(dest_SSE, src_SSE);
1305 gmx_simd_store_r(dest+i, dest_SSE);
1311 /* Add part of the force array(s) from nbnxn_atomdata_t to f */
1313 nbnxn_atomdata_add_nbat_f_to_f_part(const nbnxn_search_t nbs,
1314 const nbnxn_atomdata_t *nbat,
1315 nbnxn_atomdata_output_t *out,
1326 /* Loop over all columns and copy and fill */
1327 switch (nbat->FFormat)
1335 for (a = a0; a < a1; a++)
1337 i = cell[a]*nbat->fstride;
1340 f[a][YY] += fnb[i+1];
1341 f[a][ZZ] += fnb[i+2];
1346 for (a = a0; a < a1; a++)
1348 i = cell[a]*nbat->fstride;
1350 for (fa = 0; fa < nfa; fa++)
1352 f[a][XX] += out[fa].f[i];
1353 f[a][YY] += out[fa].f[i+1];
1354 f[a][ZZ] += out[fa].f[i+2];
1364 for (a = a0; a < a1; a++)
1366 i = X4_IND_A(cell[a]);
1368 f[a][XX] += fnb[i+XX*PACK_X4];
1369 f[a][YY] += fnb[i+YY*PACK_X4];
1370 f[a][ZZ] += fnb[i+ZZ*PACK_X4];
1375 for (a = a0; a < a1; a++)
1377 i = X4_IND_A(cell[a]);
1379 for (fa = 0; fa < nfa; fa++)
1381 f[a][XX] += out[fa].f[i+XX*PACK_X4];
1382 f[a][YY] += out[fa].f[i+YY*PACK_X4];
1383 f[a][ZZ] += out[fa].f[i+ZZ*PACK_X4];
1393 for (a = a0; a < a1; a++)
1395 i = X8_IND_A(cell[a]);
1397 f[a][XX] += fnb[i+XX*PACK_X8];
1398 f[a][YY] += fnb[i+YY*PACK_X8];
1399 f[a][ZZ] += fnb[i+ZZ*PACK_X8];
1404 for (a = a0; a < a1; a++)
1406 i = X8_IND_A(cell[a]);
1408 for (fa = 0; fa < nfa; fa++)
1410 f[a][XX] += out[fa].f[i+XX*PACK_X8];
1411 f[a][YY] += out[fa].f[i+YY*PACK_X8];
1412 f[a][ZZ] += out[fa].f[i+ZZ*PACK_X8];
1418 gmx_incons("Unsupported nbnxn_atomdata_t format");
1422 static gmx_inline unsigned char reverse_bits(unsigned char b)
1424 /* http://graphics.stanford.edu/~seander/bithacks.html#ReverseByteWith64BitsDiv */
1425 return (b * 0x0202020202ULL & 0x010884422010ULL) % 1023;
1428 static void nbnxn_atomdata_add_nbat_f_to_f_treereduce(const nbnxn_atomdata_t *nbat,
1431 const nbnxn_buffer_flags_t *flags = &nbat->buffer_flags;
1433 int next_pow2 = 1<<(gmx_log2i(nth-1)+1);
1435 assert(nbat->nout == nth); /* tree-reduce currently only works for nout==nth */
1437 memset(nbat->syncStep, 0, sizeof(*(nbat->syncStep))*nth);
1439 #pragma omp parallel num_threads(nth)
1445 th = gmx_omp_get_thread_num();
1447 for (group_size = 2; group_size < 2*next_pow2; group_size *= 2)
1449 int index[2], group_pos, partner_pos, wu;
1450 int partner_th = th ^ (group_size/2);
1455 /* wait on partner thread - replaces full barrier */
1456 int sync_th, sync_group_size;
1458 tMPI_Atomic_memory_barrier(); /* gurantee data is saved before marking work as done */
1459 tMPI_Atomic_set(&(nbat->syncStep[th]), group_size/2); /* mark previous step as completed */
1461 /* find thread to sync with. Equal to partner_th unless nth is not a power of two. */
1462 for (sync_th = partner_th, sync_group_size = group_size; sync_th >= nth && sync_group_size > 2; sync_group_size /= 2)
1464 sync_th &= ~(sync_group_size/4);
1466 if (sync_th < nth) /* otherwise nothing to sync index[1] will be >=nout */
1468 /* wait on the thread which computed input data in previous step */
1469 while (tMPI_Atomic_get((volatile tMPI_Atomic_t*)&(nbat->syncStep[sync_th])) < group_size/2)
1473 /* guarantee that no later load happens before wait loop is finisehd */
1474 tMPI_Atomic_memory_barrier();
1476 #else /* TMPI_ATOMICS */
1481 /* Calculate buffers to sum (result goes into first buffer) */
1482 group_pos = th % group_size;
1483 index[0] = th - group_pos;
1484 index[1] = index[0] + group_size/2;
1486 /* If no second buffer, nothing to do */
1487 if (index[1] >= nbat->nout && group_size > 2)
1492 #if NBNXN_BUFFERFLAG_MAX_THREADS > 256
1493 #error reverse_bits assumes max 256 threads
1495 /* Position is permuted so that one of the 2 vectors being added was computed on the same thread in the previous step.
1496 This improves locality and enables to sync with just a single thread between steps (=the levels in the btree).
1497 The permutation which allows this corresponds to reversing the bits of the group position.
1499 group_pos = reverse_bits(group_pos)/(256/group_size);
1501 partner_pos = group_pos ^ 1;
1503 /* loop over two work-units (own and partner) */
1504 for (wu = 0; wu < 2; wu++)
1508 if (partner_th < nth)
1510 break; /* partner exists we don't have to do his work */
1514 group_pos = partner_pos;
1518 /* Calculate the cell-block range for our thread */
1519 b0 = (flags->nflag* group_pos )/group_size;
1520 b1 = (flags->nflag*(group_pos+1))/group_size;
1522 for (b = b0; b < b1; b++)
1524 i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1525 i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1527 if ((flags->flag[b] & (1ULL<<index[1])) || group_size > 2)
1529 #ifdef GMX_NBNXN_SIMD
1530 nbnxn_atomdata_reduce_reals_simd
1532 nbnxn_atomdata_reduce_reals
1534 (nbat->out[index[0]].f,
1535 (flags->flag[b] & (1ULL<<index[0])) || group_size > 2,
1536 &(nbat->out[index[1]].f), 1, i0, i1);
1539 else if (!(flags->flag[b] & (1ULL<<index[0])))
1541 nbnxn_atomdata_clear_reals(nbat->out[index[0]].f,
1551 static void nbnxn_atomdata_add_nbat_f_to_f_stdreduce(const nbnxn_atomdata_t *nbat,
1555 #pragma omp parallel for num_threads(nth) schedule(static)
1556 for (th = 0; th < nth; th++)
1558 const nbnxn_buffer_flags_t *flags;
1562 real *fptr[NBNXN_BUFFERFLAG_MAX_THREADS];
1565 flags = &nbat->buffer_flags;
1567 /* Calculate the cell-block range for our thread */
1568 b0 = (flags->nflag* th )/nth;
1569 b1 = (flags->nflag*(th+1))/nth;
1571 for (b = b0; b < b1; b++)
1573 i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1574 i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1577 for (out = 1; out < nbat->nout; out++)
1579 if (flags->flag[b] & (1U<<out))
1581 fptr[nfptr++] = nbat->out[out].f;
1586 #ifdef GMX_NBNXN_SIMD
1587 nbnxn_atomdata_reduce_reals_simd
1589 nbnxn_atomdata_reduce_reals
1592 flags->flag[b] & (1U<<0),
1596 else if (!(flags->flag[b] & (1U<<0)))
1598 nbnxn_atomdata_clear_reals(nbat->out[0].f,
1605 /* Add the force array(s) from nbnxn_atomdata_t to f */
1606 void nbnxn_atomdata_add_nbat_f_to_f(const nbnxn_search_t nbs,
1608 const nbnxn_atomdata_t *nbat,
1614 nbs_cycle_start(&nbs->cc[enbsCCreducef]);
1620 na = nbs->natoms_nonlocal;
1624 na = nbs->natoms_local;
1627 a0 = nbs->natoms_local;
1628 na = nbs->natoms_nonlocal - nbs->natoms_local;
1632 nth = gmx_omp_nthreads_get(emntNonbonded);
1636 if (locality != eatAll)
1638 gmx_incons("add_f_to_f called with nout>1 and locality!=eatAll");
1641 /* Reduce the force thread output buffers into buffer 0, before adding
1642 * them to the, differently ordered, "real" force buffer.
1644 if (nbat->bUseTreeReduce)
1646 nbnxn_atomdata_add_nbat_f_to_f_treereduce(nbat, nth);
1650 nbnxn_atomdata_add_nbat_f_to_f_stdreduce(nbat, nth);
1653 #pragma omp parallel for num_threads(nth) schedule(static)
1654 for (th = 0; th < nth; th++)
1656 nbnxn_atomdata_add_nbat_f_to_f_part(nbs, nbat,
1664 nbs_cycle_stop(&nbs->cc[enbsCCreducef]);
1667 /* Adds the shift forces from nbnxn_atomdata_t to fshift */
1668 void nbnxn_atomdata_add_nbat_fshift_to_fshift(const nbnxn_atomdata_t *nbat,
1671 const nbnxn_atomdata_output_t *out;
1678 for (s = 0; s < SHIFTS; s++)
1681 for (th = 0; th < nbat->nout; th++)
1683 sum[XX] += out[th].fshift[s*DIM+XX];
1684 sum[YY] += out[th].fshift[s*DIM+YY];
1685 sum[ZZ] += out[th].fshift[s*DIM+ZZ];
1687 rvec_inc(fshift[s], sum);