1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2012, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
42 #include "nbnxn_consts.h"
43 #include "nbnxn_internal.h"
44 #include "nbnxn_search.h"
45 #include "gmx_omp_nthreads.h"
47 /* Default nbnxn allocation routine, allocates NBNXN_MEM_ALIGN byte aligned */
48 void nbnxn_alloc_aligned(void **ptr, size_t nbytes)
50 *ptr = save_malloc_aligned("ptr", __FILE__, __LINE__, nbytes, 1, NBNXN_MEM_ALIGN);
53 /* Free function for memory allocated with nbnxn_alloc_aligned */
54 void nbnxn_free_aligned(void *ptr)
59 /* Reallocation wrapper function for nbnxn data structures */
60 void nbnxn_realloc_void(void **ptr,
61 int nbytes_copy, int nbytes_new,
67 ma(&ptr_new, nbytes_new);
69 if (nbytes_new > 0 && ptr_new == NULL)
71 gmx_fatal(FARGS, "Allocation of %d bytes failed", nbytes_new);
76 if (nbytes_new < nbytes_copy)
78 gmx_incons("In nbnxn_realloc_void: new size less than copy size");
80 memcpy(ptr_new, *ptr, nbytes_copy);
89 /* Reallocate the nbnxn_atomdata_t for a size of n atoms */
90 void nbnxn_atomdata_realloc(nbnxn_atomdata_t *nbat, int n)
94 nbnxn_realloc_void((void **)&nbat->type,
95 nbat->natoms*sizeof(*nbat->type),
96 n*sizeof(*nbat->type),
97 nbat->alloc, nbat->free);
98 nbnxn_realloc_void((void **)&nbat->lj_comb,
99 nbat->natoms*2*sizeof(*nbat->lj_comb),
100 n*2*sizeof(*nbat->lj_comb),
101 nbat->alloc, nbat->free);
102 if (nbat->XFormat != nbatXYZQ)
104 nbnxn_realloc_void((void **)&nbat->q,
105 nbat->natoms*sizeof(*nbat->q),
107 nbat->alloc, nbat->free);
109 if (nbat->nenergrp > 1)
111 nbnxn_realloc_void((void **)&nbat->energrp,
112 nbat->natoms/nbat->na_c*sizeof(*nbat->energrp),
113 n/nbat->na_c*sizeof(*nbat->energrp),
114 nbat->alloc, nbat->free);
116 nbnxn_realloc_void((void **)&nbat->x,
117 nbat->natoms*nbat->xstride*sizeof(*nbat->x),
118 n*nbat->xstride*sizeof(*nbat->x),
119 nbat->alloc, nbat->free);
120 for (t = 0; t < nbat->nout; t++)
122 /* Allocate one element extra for possible signaling with CUDA */
123 nbnxn_realloc_void((void **)&nbat->out[t].f,
124 nbat->natoms*nbat->fstride*sizeof(*nbat->out[t].f),
125 n*nbat->fstride*sizeof(*nbat->out[t].f),
126 nbat->alloc, nbat->free);
131 /* Initializes an nbnxn_atomdata_output_t data structure */
132 static void nbnxn_atomdata_output_init(nbnxn_atomdata_output_t *out,
134 int nenergrp, int stride,
140 ma((void **)&out->fshift, SHIFTS*DIM*sizeof(*out->fshift));
141 out->nV = nenergrp*nenergrp;
142 ma((void **)&out->Vvdw, out->nV*sizeof(*out->Vvdw));
143 ma((void **)&out->Vc, out->nV*sizeof(*out->Vc ));
145 if (nb_kernel_type == nbnxnk4xN_SIMD_4xN ||
146 nb_kernel_type == nbnxnk4xN_SIMD_2xNN)
148 cj_size = nbnxn_kernel_to_cj_size(nb_kernel_type);
149 out->nVS = nenergrp*nenergrp*stride*(cj_size>>1)*cj_size;
150 ma((void **)&out->VSvdw, out->nVS*sizeof(*out->VSvdw));
151 ma((void **)&out->VSc, out->nVS*sizeof(*out->VSc ));
159 static void copy_int_to_nbat_int(const int *a, int na, int na_round,
160 const int *in, int fill, int *innb)
165 for (i = 0; i < na; i++)
167 innb[j++] = in[a[i]];
169 /* Complete the partially filled last cell with fill */
170 for (; i < na_round; i++)
176 static void clear_nbat_real(int na, int nbatFormat, real *xnb, int a0)
183 for (a = 0; a < na; a++)
185 for (d = 0; d < DIM; d++)
187 xnb[(a0+a)*STRIDE_XYZ+d] = 0;
192 for (a = 0; a < na; a++)
194 for (d = 0; d < DIM; d++)
196 xnb[(a0+a)*STRIDE_XYZQ+d] = 0;
202 c = a0 & (PACK_X4-1);
203 for (a = 0; a < na; a++)
205 xnb[j+XX*PACK_X4] = 0;
206 xnb[j+YY*PACK_X4] = 0;
207 xnb[j+ZZ*PACK_X4] = 0;
212 j += (DIM-1)*PACK_X4;
219 c = a0 & (PACK_X8-1);
220 for (a = 0; a < na; a++)
222 xnb[j+XX*PACK_X8] = 0;
223 xnb[j+YY*PACK_X8] = 0;
224 xnb[j+ZZ*PACK_X8] = 0;
229 j += (DIM-1)*PACK_X8;
237 void copy_rvec_to_nbat_real(const int *a, int na, int na_round,
238 rvec *x, int nbatFormat, real *xnb, int a0,
239 int cx, int cy, int cz)
243 /* We might need to place filler particles to fill up the cell to na_round.
244 * The coefficients (LJ and q) for such particles are zero.
245 * But we might still get NaN as 0*NaN when distances are too small.
246 * We hope that -107 nm is far away enough from to zero
247 * to avoid accidental short distances to particles shifted down for pbc.
249 #define NBAT_FAR_AWAY 107
255 for (i = 0; i < na; i++)
257 xnb[j++] = x[a[i]][XX];
258 xnb[j++] = x[a[i]][YY];
259 xnb[j++] = x[a[i]][ZZ];
261 /* Complete the partially filled last cell with copies of the last element.
262 * This simplifies the bounding box calculation and avoid
263 * numerical issues with atoms that are coincidentally close.
265 for (; i < na_round; i++)
267 xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
268 xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
269 xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
274 for (i = 0; i < na; i++)
276 xnb[j++] = x[a[i]][XX];
277 xnb[j++] = x[a[i]][YY];
278 xnb[j++] = x[a[i]][ZZ];
281 /* Complete the partially filled last cell with particles far apart */
282 for (; i < na_round; i++)
284 xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
285 xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
286 xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
292 c = a0 & (PACK_X4-1);
293 for (i = 0; i < na; i++)
295 xnb[j+XX*PACK_X4] = x[a[i]][XX];
296 xnb[j+YY*PACK_X4] = x[a[i]][YY];
297 xnb[j+ZZ*PACK_X4] = x[a[i]][ZZ];
302 j += (DIM-1)*PACK_X4;
306 /* Complete the partially filled last cell with particles far apart */
307 for (; i < na_round; i++)
309 xnb[j+XX*PACK_X4] = -NBAT_FAR_AWAY*(1 + cx);
310 xnb[j+YY*PACK_X4] = -NBAT_FAR_AWAY*(1 + cy);
311 xnb[j+ZZ*PACK_X4] = -NBAT_FAR_AWAY*(1 + cz + i);
316 j += (DIM-1)*PACK_X4;
323 c = a0 & (PACK_X8 - 1);
324 for (i = 0; i < na; i++)
326 xnb[j+XX*PACK_X8] = x[a[i]][XX];
327 xnb[j+YY*PACK_X8] = x[a[i]][YY];
328 xnb[j+ZZ*PACK_X8] = x[a[i]][ZZ];
333 j += (DIM-1)*PACK_X8;
337 /* Complete the partially filled last cell with particles far apart */
338 for (; i < na_round; i++)
340 xnb[j+XX*PACK_X8] = -NBAT_FAR_AWAY*(1 + cx);
341 xnb[j+YY*PACK_X8] = -NBAT_FAR_AWAY*(1 + cy);
342 xnb[j+ZZ*PACK_X8] = -NBAT_FAR_AWAY*(1 + cz + i);
347 j += (DIM-1)*PACK_X8;
353 gmx_incons("Unsupported nbnxn_atomdata_t format");
357 /* Determines the combination rule (or none) to be used, stores it,
358 * and sets the LJ parameters required with the rule.
360 static void set_combination_rule_data(nbnxn_atomdata_t *nbat)
367 switch (nbat->comb_rule)
370 nbat->comb_rule = ljcrGEOM;
372 for (i = 0; i < nt; i++)
374 /* Copy the diagonal from the nbfp matrix */
375 nbat->nbfp_comb[i*2 ] = sqrt(nbat->nbfp[(i*nt+i)*2 ]);
376 nbat->nbfp_comb[i*2+1] = sqrt(nbat->nbfp[(i*nt+i)*2+1]);
380 for (i = 0; i < nt; i++)
382 /* Get 6*C6 and 12*C12 from the diagonal of the nbfp matrix */
383 c6 = nbat->nbfp[(i*nt+i)*2 ];
384 c12 = nbat->nbfp[(i*nt+i)*2+1];
385 if (c6 > 0 && c12 > 0)
387 /* We store 0.5*2^1/6*sigma and sqrt(4*3*eps),
388 * so we get 6*C6 and 12*C12 after combining.
390 nbat->nbfp_comb[i*2 ] = 0.5*pow(c12/c6, 1.0/6.0);
391 nbat->nbfp_comb[i*2+1] = sqrt(c6*c6/c12);
395 nbat->nbfp_comb[i*2 ] = 0;
396 nbat->nbfp_comb[i*2+1] = 0;
401 /* nbfp_s4 stores two parameters using a stride of 4,
402 * because this would suit x86 SIMD single-precision
403 * quad-load intrinsics. There's a slight inefficiency in
404 * allocating and initializing nbfp_s4 when it might not
405 * be used, but introducing the conditional code is not
406 * really worth it. */
407 nbat->alloc((void **)&nbat->nbfp_s4, nt*nt*4*sizeof(*nbat->nbfp_s4));
408 for (i = 0; i < nt; i++)
410 for (j = 0; j < nt; j++)
412 nbat->nbfp_s4[(i*nt+j)*4+0] = nbat->nbfp[(i*nt+j)*2+0];
413 nbat->nbfp_s4[(i*nt+j)*4+1] = nbat->nbfp[(i*nt+j)*2+1];
414 nbat->nbfp_s4[(i*nt+j)*4+2] = 0;
415 nbat->nbfp_s4[(i*nt+j)*4+3] = 0;
420 gmx_incons("Unknown combination rule");
425 #ifdef GMX_NBNXN_SIMD
427 nbnxn_atomdata_init_simple_exclusion_masks(nbnxn_atomdata_t *nbat)
430 const int simd_width = GMX_SIMD_WIDTH_HERE;
432 /* Set the diagonal cluster pair exclusion mask setup data.
433 * In the kernel we check 0 < j - i to generate the masks.
434 * Here we store j - i for generating the mask for the first i,
435 * we substract 0.5 to avoid rounding issues.
436 * In the kernel we can subtract 1 to generate the subsequent mask.
438 int simd_4xn_diag_size;
439 const real simdFalse = -1, simdTrue = 1;
440 real *simd_interaction_array;
442 simd_4xn_diag_size = max(NBNXN_CPU_CLUSTER_I_SIZE, simd_width);
443 snew_aligned(nbat->simd_4xn_diagonal_j_minus_i, simd_4xn_diag_size, NBNXN_MEM_ALIGN);
444 for (j = 0; j < simd_4xn_diag_size; j++)
446 nbat->simd_4xn_diagonal_j_minus_i[j] = j - 0.5;
449 snew_aligned(nbat->simd_2xnn_diagonal_j_minus_i, simd_width, NBNXN_MEM_ALIGN);
450 for (j = 0; j < simd_width/2; j++)
452 /* The j-cluster size is half the SIMD width */
453 nbat->simd_2xnn_diagonal_j_minus_i[j] = j - 0.5;
454 /* The next half of the SIMD width is for i + 1 */
455 nbat->simd_2xnn_diagonal_j_minus_i[simd_width/2+j] = j - 1 - 0.5;
458 /* We use up to 32 bits for exclusion masking.
459 * The same masks are used for the 4xN and 2x(N+N) kernels.
460 * The masks are read either into epi32 SIMD registers or into
461 * real SIMD registers (together with a cast).
462 * In single precision this means the real and epi32 SIMD registers
464 * In double precision the epi32 registers can be smaller than
465 * the real registers, so depending on the architecture, we might
466 * need to use two, identical, 32-bit masks per real.
468 simd_excl_size = NBNXN_CPU_CLUSTER_I_SIZE*simd_width;
469 snew_aligned(nbat->simd_exclusion_filter1, simd_excl_size, NBNXN_MEM_ALIGN);
470 snew_aligned(nbat->simd_exclusion_filter2, simd_excl_size*2, NBNXN_MEM_ALIGN);
472 for (j = 0; j < simd_excl_size; j++)
474 /* Set the consecutive bits for masking pair exclusions */
475 nbat->simd_exclusion_filter1[j] = (1U << j);
476 nbat->simd_exclusion_filter2[j*2 + 0] = (1U << j);
477 nbat->simd_exclusion_filter2[j*2 + 1] = (1U << j);
480 #if (defined GMX_CPU_ACCELERATION_IBM_QPX)
481 /* The QPX kernels shouldn't do the bit masking that is done on
482 * x86, because the SIMD units lack bit-wise operations. Instead,
483 * we generate a vector of all 2^4 possible ways an i atom
484 * interacts with its 4 j atoms. Each array entry contains
485 * simd_width signed ints that are read in a single SIMD
486 * load. These ints must contain values that will be interpreted
487 * as true and false when loaded in the SIMD floating-point
488 * registers, ie. any positive or any negative value,
489 * respectively. Each array entry encodes how this i atom will
490 * interact with the 4 j atoms. Matching code exists in
491 * set_ci_top_excls() to generate indices into this array. Those
492 * indices are used in the kernels. */
494 simd_excl_size = NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
495 const int qpx_simd_width = GMX_SIMD_WIDTH_HERE;
496 snew_aligned(simd_interaction_array, simd_excl_size * qpx_simd_width, NBNXN_MEM_ALIGN);
497 for (j = 0; j < simd_excl_size; j++)
499 int index = j * qpx_simd_width;
500 for (i = 0; i < qpx_simd_width; i++)
502 simd_interaction_array[index + i] = (j & (1 << i)) ? simdTrue : simdFalse;
505 nbat->simd_interaction_array = simd_interaction_array;
510 /* Initializes an nbnxn_atomdata_t data structure */
511 void nbnxn_atomdata_init(FILE *fp,
512 nbnxn_atomdata_t *nbat,
514 int ntype, const real *nbfp,
517 nbnxn_alloc_t *alloc,
523 gmx_bool simple, bCombGeom, bCombLB;
527 nbat->alloc = nbnxn_alloc_aligned;
535 nbat->free = nbnxn_free_aligned;
544 fprintf(debug, "There are %d atom types in the system, adding one for nbnxn_atomdata_t\n", ntype);
546 nbat->ntype = ntype + 1;
547 nbat->alloc((void **)&nbat->nbfp,
548 nbat->ntype*nbat->ntype*2*sizeof(*nbat->nbfp));
549 nbat->alloc((void **)&nbat->nbfp_comb, nbat->ntype*2*sizeof(*nbat->nbfp_comb));
551 /* A tolerance of 1e-5 seems reasonable for (possibly hand-typed)
552 * force-field floating point parameters.
555 ptr = getenv("GMX_LJCOMB_TOL");
560 sscanf(ptr, "%lf", &dbl);
566 /* Temporarily fill nbat->nbfp_comb with sigma and epsilon
567 * to check for the LB rule.
569 for (i = 0; i < ntype; i++)
571 c6 = nbfp[(i*ntype+i)*2 ]/6.0;
572 c12 = nbfp[(i*ntype+i)*2+1]/12.0;
573 if (c6 > 0 && c12 > 0)
575 nbat->nbfp_comb[i*2 ] = pow(c12/c6, 1.0/6.0);
576 nbat->nbfp_comb[i*2+1] = 0.25*c6*c6/c12;
578 else if (c6 == 0 && c12 == 0)
580 nbat->nbfp_comb[i*2 ] = 0;
581 nbat->nbfp_comb[i*2+1] = 0;
585 /* Can not use LB rule with only dispersion or repulsion */
590 for (i = 0; i < nbat->ntype; i++)
592 for (j = 0; j < nbat->ntype; j++)
594 if (i < ntype && j < ntype)
596 /* fr->nbfp has been updated, so that array too now stores c6/c12 including
597 * the 6.0/12.0 prefactors to save 2 flops in the most common case (force-only).
599 c6 = nbfp[(i*ntype+j)*2 ];
600 c12 = nbfp[(i*ntype+j)*2+1];
601 nbat->nbfp[(i*nbat->ntype+j)*2 ] = c6;
602 nbat->nbfp[(i*nbat->ntype+j)*2+1] = c12;
604 /* Compare 6*C6 and 12*C12 for geometric cobination rule */
605 bCombGeom = bCombGeom &&
606 gmx_within_tol(c6*c6, nbfp[(i*ntype+i)*2 ]*nbfp[(j*ntype+j)*2 ], tol) &&
607 gmx_within_tol(c12*c12, nbfp[(i*ntype+i)*2+1]*nbfp[(j*ntype+j)*2+1], tol);
609 /* Compare C6 and C12 for Lorentz-Berthelot combination rule */
613 ((c6 == 0 && c12 == 0 &&
614 (nbat->nbfp_comb[i*2+1] == 0 || nbat->nbfp_comb[j*2+1] == 0)) ||
615 (c6 > 0 && c12 > 0 &&
616 gmx_within_tol(pow(c12/c6, 1.0/6.0), 0.5*(nbat->nbfp_comb[i*2]+nbat->nbfp_comb[j*2]), tol) &&
617 gmx_within_tol(0.25*c6*c6/c12, sqrt(nbat->nbfp_comb[i*2+1]*nbat->nbfp_comb[j*2+1]), tol)));
621 /* Add zero parameters for the additional dummy atom type */
622 nbat->nbfp[(i*nbat->ntype+j)*2 ] = 0;
623 nbat->nbfp[(i*nbat->ntype+j)*2+1] = 0;
629 fprintf(debug, "Combination rules: geometric %d Lorentz-Berthelot %d\n",
633 simple = nbnxn_kernel_pairlist_simple(nb_kernel_type);
637 /* We prefer the geometic combination rule,
638 * as that gives a slightly faster kernel than the LB rule.
642 nbat->comb_rule = ljcrGEOM;
646 nbat->comb_rule = ljcrLB;
650 nbat->comb_rule = ljcrNONE;
652 nbat->free(nbat->nbfp_comb);
657 if (nbat->comb_rule == ljcrNONE)
659 fprintf(fp, "Using full Lennard-Jones parameter combination matrix\n\n");
663 fprintf(fp, "Using %s Lennard-Jones combination rule\n\n",
664 nbat->comb_rule == ljcrGEOM ? "geometric" : "Lorentz-Berthelot");
668 set_combination_rule_data(nbat);
672 nbat->comb_rule = ljcrNONE;
674 nbat->free(nbat->nbfp_comb);
679 nbat->lj_comb = NULL;
684 switch (nb_kernel_type)
686 case nbnxnk4xN_SIMD_4xN:
687 case nbnxnk4xN_SIMD_2xNN:
688 pack_x = max(NBNXN_CPU_CLUSTER_I_SIZE,
689 nbnxn_kernel_to_cj_size(nb_kernel_type));
693 nbat->XFormat = nbatX4;
696 nbat->XFormat = nbatX8;
699 gmx_incons("Unsupported packing width");
703 nbat->XFormat = nbatXYZ;
707 nbat->FFormat = nbat->XFormat;
711 nbat->XFormat = nbatXYZQ;
712 nbat->FFormat = nbatXYZ;
715 nbat->nenergrp = n_energygroups;
718 /* Energy groups not supported yet for super-sub lists */
719 if (n_energygroups > 1 && fp != NULL)
721 fprintf(fp, "\nNOTE: With GPUs, reporting energy group contributions is not supported\n\n");
725 /* Temporary storage goes as #grp^3*simd_width^2/2, so limit to 64 */
726 if (nbat->nenergrp > 64)
728 gmx_fatal(FARGS, "With NxN kernels not more than 64 energy groups are supported\n");
731 while (nbat->nenergrp > (1<<nbat->neg_2log))
735 nbat->energrp = NULL;
736 nbat->alloc((void **)&nbat->shift_vec, SHIFTS*sizeof(*nbat->shift_vec));
737 nbat->xstride = (nbat->XFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
738 nbat->fstride = (nbat->FFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
741 #ifdef GMX_NBNXN_SIMD
744 nbnxn_atomdata_init_simple_exclusion_masks(nbat);
748 /* Initialize the output data structures */
750 snew(nbat->out, nbat->nout);
752 for (i = 0; i < nbat->nout; i++)
754 nbnxn_atomdata_output_init(&nbat->out[i],
756 nbat->nenergrp, 1<<nbat->neg_2log,
759 nbat->buffer_flags.flag = NULL;
760 nbat->buffer_flags.flag_nalloc = 0;
763 static void copy_lj_to_nbat_lj_comb_x4(const real *ljparam_type,
764 const int *type, int na,
769 /* The LJ params follow the combination rule:
770 * copy the params for the type array to the atom array.
772 for (is = 0; is < na; is += PACK_X4)
774 for (k = 0; k < PACK_X4; k++)
777 ljparam_at[is*2 +k] = ljparam_type[type[i]*2 ];
778 ljparam_at[is*2+PACK_X4+k] = ljparam_type[type[i]*2+1];
783 static void copy_lj_to_nbat_lj_comb_x8(const real *ljparam_type,
784 const int *type, int na,
789 /* The LJ params follow the combination rule:
790 * copy the params for the type array to the atom array.
792 for (is = 0; is < na; is += PACK_X8)
794 for (k = 0; k < PACK_X8; k++)
797 ljparam_at[is*2 +k] = ljparam_type[type[i]*2 ];
798 ljparam_at[is*2+PACK_X8+k] = ljparam_type[type[i]*2+1];
803 /* Sets the atom type and LJ data in nbnxn_atomdata_t */
804 static void nbnxn_atomdata_set_atomtypes(nbnxn_atomdata_t *nbat,
806 const nbnxn_search_t nbs,
810 const nbnxn_grid_t *grid;
812 for (g = 0; g < ngrid; g++)
814 grid = &nbs->grid[g];
816 /* Loop over all columns and copy and fill */
817 for (i = 0; i < grid->ncx*grid->ncy; i++)
819 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
820 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
822 copy_int_to_nbat_int(nbs->a+ash, grid->cxy_na[i], ncz*grid->na_sc,
823 type, nbat->ntype-1, nbat->type+ash);
825 if (nbat->comb_rule != ljcrNONE)
827 if (nbat->XFormat == nbatX4)
829 copy_lj_to_nbat_lj_comb_x4(nbat->nbfp_comb,
830 nbat->type+ash, ncz*grid->na_sc,
831 nbat->lj_comb+ash*2);
833 else if (nbat->XFormat == nbatX8)
835 copy_lj_to_nbat_lj_comb_x8(nbat->nbfp_comb,
836 nbat->type+ash, ncz*grid->na_sc,
837 nbat->lj_comb+ash*2);
844 /* Sets the charges in nbnxn_atomdata_t *nbat */
845 static void nbnxn_atomdata_set_charges(nbnxn_atomdata_t *nbat,
847 const nbnxn_search_t nbs,
850 int g, cxy, ncz, ash, na, na_round, i, j;
852 const nbnxn_grid_t *grid;
854 for (g = 0; g < ngrid; g++)
856 grid = &nbs->grid[g];
858 /* Loop over all columns and copy and fill */
859 for (cxy = 0; cxy < grid->ncx*grid->ncy; cxy++)
861 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
862 na = grid->cxy_na[cxy];
863 na_round = (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
865 if (nbat->XFormat == nbatXYZQ)
867 q = nbat->x + ash*STRIDE_XYZQ + ZZ + 1;
868 for (i = 0; i < na; i++)
870 *q = charge[nbs->a[ash+i]];
873 /* Complete the partially filled last cell with zeros */
874 for (; i < na_round; i++)
883 for (i = 0; i < na; i++)
885 *q = charge[nbs->a[ash+i]];
888 /* Complete the partially filled last cell with zeros */
889 for (; i < na_round; i++)
899 /* Copies the energy group indices to a reordered and packed array */
900 static void copy_egp_to_nbat_egps(const int *a, int na, int na_round,
901 int na_c, int bit_shift,
902 const int *in, int *innb)
908 for (i = 0; i < na; i += na_c)
910 /* Store na_c energy group numbers into one int */
912 for (sa = 0; sa < na_c; sa++)
917 comb |= (GET_CGINFO_GID(in[at]) << (sa*bit_shift));
922 /* Complete the partially filled last cell with fill */
923 for (; i < na_round; i += na_c)
929 /* Set the energy group indices for atoms in nbnxn_atomdata_t */
930 static void nbnxn_atomdata_set_energygroups(nbnxn_atomdata_t *nbat,
932 const nbnxn_search_t nbs,
936 const nbnxn_grid_t *grid;
938 for (g = 0; g < ngrid; g++)
940 grid = &nbs->grid[g];
942 /* Loop over all columns and copy and fill */
943 for (i = 0; i < grid->ncx*grid->ncy; i++)
945 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
946 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
948 copy_egp_to_nbat_egps(nbs->a+ash, grid->cxy_na[i], ncz*grid->na_sc,
949 nbat->na_c, nbat->neg_2log,
950 atinfo, nbat->energrp+(ash>>grid->na_c_2log));
955 /* Sets all required atom parameter data in nbnxn_atomdata_t */
956 void nbnxn_atomdata_set(nbnxn_atomdata_t *nbat,
958 const nbnxn_search_t nbs,
959 const t_mdatoms *mdatoms,
964 if (locality == eatLocal)
973 nbnxn_atomdata_set_atomtypes(nbat, ngrid, nbs, mdatoms->typeA);
975 nbnxn_atomdata_set_charges(nbat, ngrid, nbs, mdatoms->chargeA);
977 if (nbat->nenergrp > 1)
979 nbnxn_atomdata_set_energygroups(nbat, ngrid, nbs, atinfo);
983 /* Copies the shift vector array to nbnxn_atomdata_t */
984 void nbnxn_atomdata_copy_shiftvec(gmx_bool bDynamicBox,
986 nbnxn_atomdata_t *nbat)
990 nbat->bDynamicBox = bDynamicBox;
991 for (i = 0; i < SHIFTS; i++)
993 copy_rvec(shift_vec[i], nbat->shift_vec[i]);
997 /* Copies (and reorders) the coordinates to nbnxn_atomdata_t */
998 void nbnxn_atomdata_copy_x_to_nbat_x(const nbnxn_search_t nbs,
1002 nbnxn_atomdata_t *nbat)
1025 nbat->natoms_local = nbs->grid[0].nc*nbs->grid[0].na_sc;
1028 nth = gmx_omp_nthreads_get(emntPairsearch);
1030 #pragma omp parallel for num_threads(nth) schedule(static)
1031 for (th = 0; th < nth; th++)
1035 for (g = g0; g < g1; g++)
1037 const nbnxn_grid_t *grid;
1038 int cxy0, cxy1, cxy;
1040 grid = &nbs->grid[g];
1042 cxy0 = (grid->ncx*grid->ncy* th +nth-1)/nth;
1043 cxy1 = (grid->ncx*grid->ncy*(th+1)+nth-1)/nth;
1045 for (cxy = cxy0; cxy < cxy1; cxy++)
1047 int na, ash, na_fill;
1049 na = grid->cxy_na[cxy];
1050 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
1052 if (g == 0 && FillLocal)
1055 (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
1059 /* We fill only the real particle locations.
1060 * We assume the filling entries at the end have been
1061 * properly set before during ns.
1065 copy_rvec_to_nbat_real(nbs->a+ash, na, na_fill, x,
1066 nbat->XFormat, nbat->x, ash,
1074 nbnxn_atomdata_clear_reals(real * gmx_restrict dest,
1079 for (i = i0; i < i1; i++)
1086 nbnxn_atomdata_reduce_reals(real * gmx_restrict dest,
1088 real ** gmx_restrict src,
1096 /* The destination buffer contains data, add to it */
1097 for (i = i0; i < i1; i++)
1099 for (s = 0; s < nsrc; s++)
1101 dest[i] += src[s][i];
1107 /* The destination buffer is unitialized, set it first */
1108 for (i = i0; i < i1; i++)
1110 dest[i] = src[0][i];
1111 for (s = 1; s < nsrc; s++)
1113 dest[i] += src[s][i];
1120 nbnxn_atomdata_reduce_reals_simd(real gmx_unused * gmx_restrict dest,
1121 gmx_bool gmx_unused bDestSet,
1122 real gmx_unused ** gmx_restrict src,
1123 int gmx_unused nsrc,
1124 int gmx_unused i0, int gmx_unused i1)
1126 #ifdef GMX_NBNXN_SIMD
1127 /* The SIMD width here is actually independent of that in the kernels,
1128 * but we use the same width for simplicity (usually optimal anyhow).
1131 gmx_mm_pr dest_SSE, src_SSE;
1135 for (i = i0; i < i1; i += GMX_SIMD_WIDTH_HERE)
1137 dest_SSE = gmx_load_pr(dest+i);
1138 for (s = 0; s < nsrc; s++)
1140 src_SSE = gmx_load_pr(src[s]+i);
1141 dest_SSE = gmx_add_pr(dest_SSE, src_SSE);
1143 gmx_store_pr(dest+i, dest_SSE);
1148 for (i = i0; i < i1; i += GMX_SIMD_WIDTH_HERE)
1150 dest_SSE = gmx_load_pr(src[0]+i);
1151 for (s = 1; s < nsrc; s++)
1153 src_SSE = gmx_load_pr(src[s]+i);
1154 dest_SSE = gmx_add_pr(dest_SSE, src_SSE);
1156 gmx_store_pr(dest+i, dest_SSE);
1162 /* Add part of the force array(s) from nbnxn_atomdata_t to f */
1164 nbnxn_atomdata_add_nbat_f_to_f_part(const nbnxn_search_t nbs,
1165 const nbnxn_atomdata_t *nbat,
1166 nbnxn_atomdata_output_t *out,
1177 /* Loop over all columns and copy and fill */
1178 switch (nbat->FFormat)
1186 for (a = a0; a < a1; a++)
1188 i = cell[a]*nbat->fstride;
1191 f[a][YY] += fnb[i+1];
1192 f[a][ZZ] += fnb[i+2];
1197 for (a = a0; a < a1; a++)
1199 i = cell[a]*nbat->fstride;
1201 for (fa = 0; fa < nfa; fa++)
1203 f[a][XX] += out[fa].f[i];
1204 f[a][YY] += out[fa].f[i+1];
1205 f[a][ZZ] += out[fa].f[i+2];
1215 for (a = a0; a < a1; a++)
1217 i = X4_IND_A(cell[a]);
1219 f[a][XX] += fnb[i+XX*PACK_X4];
1220 f[a][YY] += fnb[i+YY*PACK_X4];
1221 f[a][ZZ] += fnb[i+ZZ*PACK_X4];
1226 for (a = a0; a < a1; a++)
1228 i = X4_IND_A(cell[a]);
1230 for (fa = 0; fa < nfa; fa++)
1232 f[a][XX] += out[fa].f[i+XX*PACK_X4];
1233 f[a][YY] += out[fa].f[i+YY*PACK_X4];
1234 f[a][ZZ] += out[fa].f[i+ZZ*PACK_X4];
1244 for (a = a0; a < a1; a++)
1246 i = X8_IND_A(cell[a]);
1248 f[a][XX] += fnb[i+XX*PACK_X8];
1249 f[a][YY] += fnb[i+YY*PACK_X8];
1250 f[a][ZZ] += fnb[i+ZZ*PACK_X8];
1255 for (a = a0; a < a1; a++)
1257 i = X8_IND_A(cell[a]);
1259 for (fa = 0; fa < nfa; fa++)
1261 f[a][XX] += out[fa].f[i+XX*PACK_X8];
1262 f[a][YY] += out[fa].f[i+YY*PACK_X8];
1263 f[a][ZZ] += out[fa].f[i+ZZ*PACK_X8];
1269 gmx_incons("Unsupported nbnxn_atomdata_t format");
1273 /* Add the force array(s) from nbnxn_atomdata_t to f */
1274 void nbnxn_atomdata_add_nbat_f_to_f(const nbnxn_search_t nbs,
1276 const nbnxn_atomdata_t *nbat,
1282 nbs_cycle_start(&nbs->cc[enbsCCreducef]);
1288 na = nbs->natoms_nonlocal;
1292 na = nbs->natoms_local;
1295 a0 = nbs->natoms_local;
1296 na = nbs->natoms_nonlocal - nbs->natoms_local;
1300 nth = gmx_omp_nthreads_get(emntNonbonded);
1304 if (locality != eatAll)
1306 gmx_incons("add_f_to_f called with nout>1 and locality!=eatAll");
1309 /* Reduce the force thread output buffers into buffer 0, before adding
1310 * them to the, differently ordered, "real" force buffer.
1312 #pragma omp parallel for num_threads(nth) schedule(static)
1313 for (th = 0; th < nth; th++)
1315 const nbnxn_buffer_flags_t *flags;
1319 real *fptr[NBNXN_BUFFERFLAG_MAX_THREADS];
1322 flags = &nbat->buffer_flags;
1324 /* Calculate the cell-block range for our thread */
1325 b0 = (flags->nflag* th )/nth;
1326 b1 = (flags->nflag*(th+1))/nth;
1328 for (b = b0; b < b1; b++)
1330 i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1331 i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1334 for (out = 1; out < nbat->nout; out++)
1336 if (flags->flag[b] & (1U<<out))
1338 fptr[nfptr++] = nbat->out[out].f;
1343 #ifdef GMX_NBNXN_SIMD
1344 nbnxn_atomdata_reduce_reals_simd
1346 nbnxn_atomdata_reduce_reals
1349 flags->flag[b] & (1U<<0),
1353 else if (!(flags->flag[b] & (1U<<0)))
1355 nbnxn_atomdata_clear_reals(nbat->out[0].f,
1362 #pragma omp parallel for num_threads(nth) schedule(static)
1363 for (th = 0; th < nth; th++)
1365 nbnxn_atomdata_add_nbat_f_to_f_part(nbs, nbat,
1373 nbs_cycle_stop(&nbs->cc[enbsCCreducef]);
1376 /* Adds the shift forces from nbnxn_atomdata_t to fshift */
1377 void nbnxn_atomdata_add_nbat_fshift_to_fshift(const nbnxn_atomdata_t *nbat,
1380 const nbnxn_atomdata_output_t *out;
1387 for (s = 0; s < SHIFTS; s++)
1390 for (th = 0; th < nbat->nout; th++)
1392 sum[XX] += out[th].fshift[s*DIM+XX];
1393 sum[YY] += out[th].fshift[s*DIM+YY];
1394 sum[ZZ] += out[th].fshift[s*DIM+ZZ];
1396 rvec_inc(fshift[s], sum);