2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2012, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012,2013, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
48 #include "nbnxn_consts.h"
49 #include "nbnxn_internal.h"
50 #include "nbnxn_search.h"
51 #include "nbnxn_atomdata.h"
52 #include "gmx_omp_nthreads.h"
54 /* Default nbnxn allocation routine, allocates NBNXN_MEM_ALIGN byte aligned */
55 void nbnxn_alloc_aligned(void **ptr, size_t nbytes)
57 *ptr = save_malloc_aligned("ptr", __FILE__, __LINE__, nbytes, 1, NBNXN_MEM_ALIGN);
60 /* Free function for memory allocated with nbnxn_alloc_aligned */
61 void nbnxn_free_aligned(void *ptr)
66 /* Reallocation wrapper function for nbnxn data structures */
67 void nbnxn_realloc_void(void **ptr,
68 int nbytes_copy, int nbytes_new,
74 ma(&ptr_new, nbytes_new);
76 if (nbytes_new > 0 && ptr_new == NULL)
78 gmx_fatal(FARGS, "Allocation of %d bytes failed", nbytes_new);
83 if (nbytes_new < nbytes_copy)
85 gmx_incons("In nbnxn_realloc_void: new size less than copy size");
87 memcpy(ptr_new, *ptr, nbytes_copy);
96 /* Reallocate the nbnxn_atomdata_t for a size of n atoms */
97 void nbnxn_atomdata_realloc(nbnxn_atomdata_t *nbat, int n)
101 nbnxn_realloc_void((void **)&nbat->type,
102 nbat->natoms*sizeof(*nbat->type),
103 n*sizeof(*nbat->type),
104 nbat->alloc, nbat->free);
105 nbnxn_realloc_void((void **)&nbat->lj_comb,
106 nbat->natoms*2*sizeof(*nbat->lj_comb),
107 n*2*sizeof(*nbat->lj_comb),
108 nbat->alloc, nbat->free);
109 if (nbat->XFormat != nbatXYZQ)
111 nbnxn_realloc_void((void **)&nbat->q,
112 nbat->natoms*sizeof(*nbat->q),
114 nbat->alloc, nbat->free);
116 if (nbat->nenergrp > 1)
118 nbnxn_realloc_void((void **)&nbat->energrp,
119 nbat->natoms/nbat->na_c*sizeof(*nbat->energrp),
120 n/nbat->na_c*sizeof(*nbat->energrp),
121 nbat->alloc, nbat->free);
123 nbnxn_realloc_void((void **)&nbat->x,
124 nbat->natoms*nbat->xstride*sizeof(*nbat->x),
125 n*nbat->xstride*sizeof(*nbat->x),
126 nbat->alloc, nbat->free);
127 for (t = 0; t < nbat->nout; t++)
129 /* Allocate one element extra for possible signaling with CUDA */
130 nbnxn_realloc_void((void **)&nbat->out[t].f,
131 nbat->natoms*nbat->fstride*sizeof(*nbat->out[t].f),
132 n*nbat->fstride*sizeof(*nbat->out[t].f),
133 nbat->alloc, nbat->free);
138 /* Initializes an nbnxn_atomdata_output_t data structure */
139 static void nbnxn_atomdata_output_init(nbnxn_atomdata_output_t *out,
141 int nenergrp, int stride,
147 ma((void **)&out->fshift, SHIFTS*DIM*sizeof(*out->fshift));
148 out->nV = nenergrp*nenergrp;
149 ma((void **)&out->Vvdw, out->nV*sizeof(*out->Vvdw));
150 ma((void **)&out->Vc, out->nV*sizeof(*out->Vc ));
152 if (nb_kernel_type == nbnxnk4xN_SIMD_4xN ||
153 nb_kernel_type == nbnxnk4xN_SIMD_2xNN)
155 cj_size = nbnxn_kernel_to_cj_size(nb_kernel_type);
156 out->nVS = nenergrp*nenergrp*stride*(cj_size>>1)*cj_size;
157 ma((void **)&out->VSvdw, out->nVS*sizeof(*out->VSvdw));
158 ma((void **)&out->VSc, out->nVS*sizeof(*out->VSc ));
166 static void copy_int_to_nbat_int(const int *a, int na, int na_round,
167 const int *in, int fill, int *innb)
172 for (i = 0; i < na; i++)
174 innb[j++] = in[a[i]];
176 /* Complete the partially filled last cell with fill */
177 for (; i < na_round; i++)
183 static void clear_nbat_real(int na, int nbatFormat, real *xnb, int a0)
190 for (a = 0; a < na; a++)
192 for (d = 0; d < DIM; d++)
194 xnb[(a0+a)*STRIDE_XYZ+d] = 0;
199 for (a = 0; a < na; a++)
201 for (d = 0; d < DIM; d++)
203 xnb[(a0+a)*STRIDE_XYZQ+d] = 0;
209 c = a0 & (PACK_X4-1);
210 for (a = 0; a < na; a++)
212 xnb[j+XX*PACK_X4] = 0;
213 xnb[j+YY*PACK_X4] = 0;
214 xnb[j+ZZ*PACK_X4] = 0;
219 j += (DIM-1)*PACK_X4;
226 c = a0 & (PACK_X8-1);
227 for (a = 0; a < na; a++)
229 xnb[j+XX*PACK_X8] = 0;
230 xnb[j+YY*PACK_X8] = 0;
231 xnb[j+ZZ*PACK_X8] = 0;
236 j += (DIM-1)*PACK_X8;
244 void copy_rvec_to_nbat_real(const int *a, int na, int na_round,
245 rvec *x, int nbatFormat, real *xnb, int a0,
246 int cx, int cy, int cz)
250 /* We might need to place filler particles to fill up the cell to na_round.
251 * The coefficients (LJ and q) for such particles are zero.
252 * But we might still get NaN as 0*NaN when distances are too small.
253 * We hope that -107 nm is far away enough from to zero
254 * to avoid accidental short distances to particles shifted down for pbc.
256 #define NBAT_FAR_AWAY 107
262 for (i = 0; i < na; i++)
264 xnb[j++] = x[a[i]][XX];
265 xnb[j++] = x[a[i]][YY];
266 xnb[j++] = x[a[i]][ZZ];
268 /* Complete the partially filled last cell with copies of the last element.
269 * This simplifies the bounding box calculation and avoid
270 * numerical issues with atoms that are coincidentally close.
272 for (; i < na_round; i++)
274 xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
275 xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
276 xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
281 for (i = 0; i < na; i++)
283 xnb[j++] = x[a[i]][XX];
284 xnb[j++] = x[a[i]][YY];
285 xnb[j++] = x[a[i]][ZZ];
288 /* Complete the partially filled last cell with particles far apart */
289 for (; i < na_round; i++)
291 xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
292 xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
293 xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
299 c = a0 & (PACK_X4-1);
300 for (i = 0; i < na; i++)
302 xnb[j+XX*PACK_X4] = x[a[i]][XX];
303 xnb[j+YY*PACK_X4] = x[a[i]][YY];
304 xnb[j+ZZ*PACK_X4] = x[a[i]][ZZ];
309 j += (DIM-1)*PACK_X4;
313 /* Complete the partially filled last cell with particles far apart */
314 for (; i < na_round; i++)
316 xnb[j+XX*PACK_X4] = -NBAT_FAR_AWAY*(1 + cx);
317 xnb[j+YY*PACK_X4] = -NBAT_FAR_AWAY*(1 + cy);
318 xnb[j+ZZ*PACK_X4] = -NBAT_FAR_AWAY*(1 + cz + i);
323 j += (DIM-1)*PACK_X4;
330 c = a0 & (PACK_X8 - 1);
331 for (i = 0; i < na; i++)
333 xnb[j+XX*PACK_X8] = x[a[i]][XX];
334 xnb[j+YY*PACK_X8] = x[a[i]][YY];
335 xnb[j+ZZ*PACK_X8] = x[a[i]][ZZ];
340 j += (DIM-1)*PACK_X8;
344 /* Complete the partially filled last cell with particles far apart */
345 for (; i < na_round; i++)
347 xnb[j+XX*PACK_X8] = -NBAT_FAR_AWAY*(1 + cx);
348 xnb[j+YY*PACK_X8] = -NBAT_FAR_AWAY*(1 + cy);
349 xnb[j+ZZ*PACK_X8] = -NBAT_FAR_AWAY*(1 + cz + i);
354 j += (DIM-1)*PACK_X8;
360 gmx_incons("Unsupported nbnxn_atomdata_t format");
364 /* Determines the combination rule (or none) to be used, stores it,
365 * and sets the LJ parameters required with the rule.
367 static void set_combination_rule_data(nbnxn_atomdata_t *nbat)
374 switch (nbat->comb_rule)
377 nbat->comb_rule = ljcrGEOM;
379 for (i = 0; i < nt; i++)
381 /* Copy the diagonal from the nbfp matrix */
382 nbat->nbfp_comb[i*2 ] = sqrt(nbat->nbfp[(i*nt+i)*2 ]);
383 nbat->nbfp_comb[i*2+1] = sqrt(nbat->nbfp[(i*nt+i)*2+1]);
387 for (i = 0; i < nt; i++)
389 /* Get 6*C6 and 12*C12 from the diagonal of the nbfp matrix */
390 c6 = nbat->nbfp[(i*nt+i)*2 ];
391 c12 = nbat->nbfp[(i*nt+i)*2+1];
392 if (c6 > 0 && c12 > 0)
394 /* We store 0.5*2^1/6*sigma and sqrt(4*3*eps),
395 * so we get 6*C6 and 12*C12 after combining.
397 nbat->nbfp_comb[i*2 ] = 0.5*pow(c12/c6, 1.0/6.0);
398 nbat->nbfp_comb[i*2+1] = sqrt(c6*c6/c12);
402 nbat->nbfp_comb[i*2 ] = 0;
403 nbat->nbfp_comb[i*2+1] = 0;
408 /* nbfp_s4 stores two parameters using a stride of 4,
409 * because this would suit x86 SIMD single-precision
410 * quad-load intrinsics. There's a slight inefficiency in
411 * allocating and initializing nbfp_s4 when it might not
412 * be used, but introducing the conditional code is not
413 * really worth it. */
414 nbat->alloc((void **)&nbat->nbfp_s4, nt*nt*4*sizeof(*nbat->nbfp_s4));
415 for (i = 0; i < nt; i++)
417 for (j = 0; j < nt; j++)
419 nbat->nbfp_s4[(i*nt+j)*4+0] = nbat->nbfp[(i*nt+j)*2+0];
420 nbat->nbfp_s4[(i*nt+j)*4+1] = nbat->nbfp[(i*nt+j)*2+1];
421 nbat->nbfp_s4[(i*nt+j)*4+2] = 0;
422 nbat->nbfp_s4[(i*nt+j)*4+3] = 0;
427 gmx_incons("Unknown combination rule");
432 /* Initializes an nbnxn_atomdata_t data structure */
433 void nbnxn_atomdata_init(FILE *fp,
434 nbnxn_atomdata_t *nbat,
436 int ntype, const real *nbfp,
439 nbnxn_alloc_t *alloc,
445 gmx_bool simple, bCombGeom, bCombLB;
449 nbat->alloc = nbnxn_alloc_aligned;
457 nbat->free = nbnxn_free_aligned;
466 fprintf(debug, "There are %d atom types in the system, adding one for nbnxn_atomdata_t\n", ntype);
468 nbat->ntype = ntype + 1;
469 nbat->alloc((void **)&nbat->nbfp,
470 nbat->ntype*nbat->ntype*2*sizeof(*nbat->nbfp));
471 nbat->alloc((void **)&nbat->nbfp_comb, nbat->ntype*2*sizeof(*nbat->nbfp_comb));
473 /* A tolerance of 1e-5 seems reasonable for (possibly hand-typed)
474 * force-field floating point parameters.
477 ptr = getenv("GMX_LJCOMB_TOL");
482 sscanf(ptr, "%lf", &dbl);
488 /* Temporarily fill nbat->nbfp_comb with sigma and epsilon
489 * to check for the LB rule.
491 for (i = 0; i < ntype; i++)
493 c6 = nbfp[(i*ntype+i)*2 ]/6.0;
494 c12 = nbfp[(i*ntype+i)*2+1]/12.0;
495 if (c6 > 0 && c12 > 0)
497 nbat->nbfp_comb[i*2 ] = pow(c12/c6, 1.0/6.0);
498 nbat->nbfp_comb[i*2+1] = 0.25*c6*c6/c12;
500 else if (c6 == 0 && c12 == 0)
502 nbat->nbfp_comb[i*2 ] = 0;
503 nbat->nbfp_comb[i*2+1] = 0;
507 /* Can not use LB rule with only dispersion or repulsion */
512 for (i = 0; i < nbat->ntype; i++)
514 for (j = 0; j < nbat->ntype; j++)
516 if (i < ntype && j < ntype)
518 /* fr->nbfp has been updated, so that array too now stores c6/c12 including
519 * the 6.0/12.0 prefactors to save 2 flops in the most common case (force-only).
521 c6 = nbfp[(i*ntype+j)*2 ];
522 c12 = nbfp[(i*ntype+j)*2+1];
523 nbat->nbfp[(i*nbat->ntype+j)*2 ] = c6;
524 nbat->nbfp[(i*nbat->ntype+j)*2+1] = c12;
526 /* Compare 6*C6 and 12*C12 for geometric cobination rule */
527 bCombGeom = bCombGeom &&
528 gmx_within_tol(c6*c6, nbfp[(i*ntype+i)*2 ]*nbfp[(j*ntype+j)*2 ], tol) &&
529 gmx_within_tol(c12*c12, nbfp[(i*ntype+i)*2+1]*nbfp[(j*ntype+j)*2+1], tol);
531 /* Compare C6 and C12 for Lorentz-Berthelot combination rule */
535 ((c6 == 0 && c12 == 0 &&
536 (nbat->nbfp_comb[i*2+1] == 0 || nbat->nbfp_comb[j*2+1] == 0)) ||
537 (c6 > 0 && c12 > 0 &&
538 gmx_within_tol(pow(c12/c6, 1.0/6.0), 0.5*(nbat->nbfp_comb[i*2]+nbat->nbfp_comb[j*2]), tol) &&
539 gmx_within_tol(0.25*c6*c6/c12, sqrt(nbat->nbfp_comb[i*2+1]*nbat->nbfp_comb[j*2+1]), tol)));
543 /* Add zero parameters for the additional dummy atom type */
544 nbat->nbfp[(i*nbat->ntype+j)*2 ] = 0;
545 nbat->nbfp[(i*nbat->ntype+j)*2+1] = 0;
551 fprintf(debug, "Combination rules: geometric %d Lorentz-Berthelot %d\n",
555 simple = nbnxn_kernel_pairlist_simple(nb_kernel_type);
559 /* We prefer the geometic combination rule,
560 * as that gives a slightly faster kernel than the LB rule.
564 nbat->comb_rule = ljcrGEOM;
568 nbat->comb_rule = ljcrLB;
572 nbat->comb_rule = ljcrNONE;
574 nbat->free(nbat->nbfp_comb);
579 if (nbat->comb_rule == ljcrNONE)
581 fprintf(fp, "Using full Lennard-Jones parameter combination matrix\n\n");
585 fprintf(fp, "Using %s Lennard-Jones combination rule\n\n",
586 nbat->comb_rule == ljcrGEOM ? "geometric" : "Lorentz-Berthelot");
590 set_combination_rule_data(nbat);
594 nbat->comb_rule = ljcrNONE;
596 nbat->free(nbat->nbfp_comb);
601 nbat->lj_comb = NULL;
606 switch (nb_kernel_type)
608 case nbnxnk4xN_SIMD_4xN:
609 case nbnxnk4xN_SIMD_2xNN:
610 pack_x = max(NBNXN_CPU_CLUSTER_I_SIZE,
611 nbnxn_kernel_to_cj_size(nb_kernel_type));
615 nbat->XFormat = nbatX4;
618 nbat->XFormat = nbatX8;
621 gmx_incons("Unsupported packing width");
625 nbat->XFormat = nbatXYZ;
629 nbat->FFormat = nbat->XFormat;
633 nbat->XFormat = nbatXYZQ;
634 nbat->FFormat = nbatXYZ;
637 nbat->nenergrp = n_energygroups;
640 /* Energy groups not supported yet for super-sub lists */
641 if (n_energygroups > 1 && fp != NULL)
643 fprintf(fp, "\nNOTE: With GPUs, reporting energy group contributions is not supported\n\n");
647 /* Temporary storage goes as #grp^3*simd_width^2/2, so limit to 64 */
648 if (nbat->nenergrp > 64)
650 gmx_fatal(FARGS, "With NxN kernels not more than 64 energy groups are supported\n");
653 while (nbat->nenergrp > (1<<nbat->neg_2log))
657 nbat->energrp = NULL;
658 nbat->alloc((void **)&nbat->shift_vec, SHIFTS*sizeof(*nbat->shift_vec));
659 nbat->xstride = (nbat->XFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
660 nbat->fstride = (nbat->FFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
663 #ifdef GMX_NBNXN_SIMD
666 /* Set the diagonal cluster pair exclusion mask setup data.
667 * In the kernel we check 0 < j - i to generate the masks.
668 * Here we store j - i for generating the mask for the first i,
669 * we substract 0.5 to avoid rounding issues.
670 * In the kernel we can subtract 1 to generate the subsequent mask.
672 const int simd_width = GMX_NBNXN_SIMD_BITWIDTH/(sizeof(real)*8);
673 int simd_4xn_diag_size, real_excl, simd_excl_size, j, s;
675 simd_4xn_diag_size = max(NBNXN_CPU_CLUSTER_I_SIZE, simd_width);
676 snew_aligned(nbat->simd_4xn_diag, simd_4xn_diag_size, NBNXN_MEM_ALIGN);
677 for (j = 0; j < simd_4xn_diag_size; j++)
679 nbat->simd_4xn_diag[j] = j - 0.5;
682 snew_aligned(nbat->simd_2xnn_diag, simd_width, NBNXN_MEM_ALIGN);
683 for (j = 0; j < simd_width/2; j++)
685 /* The j-cluster size is half the SIMD width */
686 nbat->simd_2xnn_diag[j] = j - 0.5;
687 /* The next half of the SIMD width is for i + 1 */
688 nbat->simd_2xnn_diag[simd_width/2+j] = j - 1 - 0.5;
691 /* We always use 32-bit integer exclusion masks. When we use
692 * double precision, we fit two integers in a double SIMD register.
694 real_excl = sizeof(real)/sizeof(*nbat->simd_excl_mask);
695 /* Set bits for use with both 4xN and 2x(N+N) kernels */
696 simd_excl_size = NBNXN_CPU_CLUSTER_I_SIZE*simd_width*real_excl;
697 snew_aligned(nbat->simd_excl_mask, simd_excl_size*real_excl, NBNXN_MEM_ALIGN);
698 for (j = 0; j < simd_excl_size; j++)
700 /* Set the consecutive bits for masking pair exclusions.
701 * For double a single-bit mask would be enough.
702 * But using two bits avoids endianness issues.
704 for (s = 0; s < real_excl; s++)
706 /* Set the consecutive bits for masking pair exclusions */
707 nbat->simd_excl_mask[j*real_excl + s] = (1U << j);
713 /* Initialize the output data structures */
715 snew(nbat->out, nbat->nout);
717 for (i = 0; i < nbat->nout; i++)
719 nbnxn_atomdata_output_init(&nbat->out[i],
721 nbat->nenergrp, 1<<nbat->neg_2log,
724 nbat->buffer_flags.flag = NULL;
725 nbat->buffer_flags.flag_nalloc = 0;
728 static void copy_lj_to_nbat_lj_comb_x4(const real *ljparam_type,
729 const int *type, int na,
734 /* The LJ params follow the combination rule:
735 * copy the params for the type array to the atom array.
737 for (is = 0; is < na; is += PACK_X4)
739 for (k = 0; k < PACK_X4; k++)
742 ljparam_at[is*2 +k] = ljparam_type[type[i]*2 ];
743 ljparam_at[is*2+PACK_X4+k] = ljparam_type[type[i]*2+1];
748 static void copy_lj_to_nbat_lj_comb_x8(const real *ljparam_type,
749 const int *type, int na,
754 /* The LJ params follow the combination rule:
755 * copy the params for the type array to the atom array.
757 for (is = 0; is < na; is += PACK_X8)
759 for (k = 0; k < PACK_X8; k++)
762 ljparam_at[is*2 +k] = ljparam_type[type[i]*2 ];
763 ljparam_at[is*2+PACK_X8+k] = ljparam_type[type[i]*2+1];
768 /* Sets the atom type and LJ data in nbnxn_atomdata_t */
769 static void nbnxn_atomdata_set_atomtypes(nbnxn_atomdata_t *nbat,
771 const nbnxn_search_t nbs,
775 const nbnxn_grid_t *grid;
777 for (g = 0; g < ngrid; g++)
779 grid = &nbs->grid[g];
781 /* Loop over all columns and copy and fill */
782 for (i = 0; i < grid->ncx*grid->ncy; i++)
784 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
785 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
787 copy_int_to_nbat_int(nbs->a+ash, grid->cxy_na[i], ncz*grid->na_sc,
788 type, nbat->ntype-1, nbat->type+ash);
790 if (nbat->comb_rule != ljcrNONE)
792 if (nbat->XFormat == nbatX4)
794 copy_lj_to_nbat_lj_comb_x4(nbat->nbfp_comb,
795 nbat->type+ash, ncz*grid->na_sc,
796 nbat->lj_comb+ash*2);
798 else if (nbat->XFormat == nbatX8)
800 copy_lj_to_nbat_lj_comb_x8(nbat->nbfp_comb,
801 nbat->type+ash, ncz*grid->na_sc,
802 nbat->lj_comb+ash*2);
809 /* Sets the charges in nbnxn_atomdata_t *nbat */
810 static void nbnxn_atomdata_set_charges(nbnxn_atomdata_t *nbat,
812 const nbnxn_search_t nbs,
815 int g, cxy, ncz, ash, na, na_round, i, j;
817 const nbnxn_grid_t *grid;
819 for (g = 0; g < ngrid; g++)
821 grid = &nbs->grid[g];
823 /* Loop over all columns and copy and fill */
824 for (cxy = 0; cxy < grid->ncx*grid->ncy; cxy++)
826 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
827 na = grid->cxy_na[cxy];
828 na_round = (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
830 if (nbat->XFormat == nbatXYZQ)
832 q = nbat->x + ash*STRIDE_XYZQ + ZZ + 1;
833 for (i = 0; i < na; i++)
835 *q = charge[nbs->a[ash+i]];
838 /* Complete the partially filled last cell with zeros */
839 for (; i < na_round; i++)
848 for (i = 0; i < na; i++)
850 *q = charge[nbs->a[ash+i]];
853 /* Complete the partially filled last cell with zeros */
854 for (; i < na_round; i++)
864 /* Copies the energy group indices to a reordered and packed array */
865 static void copy_egp_to_nbat_egps(const int *a, int na, int na_round,
866 int na_c, int bit_shift,
867 const int *in, int *innb)
873 for (i = 0; i < na; i += na_c)
875 /* Store na_c energy group numbers into one int */
877 for (sa = 0; sa < na_c; sa++)
882 comb |= (GET_CGINFO_GID(in[at]) << (sa*bit_shift));
887 /* Complete the partially filled last cell with fill */
888 for (; i < na_round; i += na_c)
894 /* Set the energy group indices for atoms in nbnxn_atomdata_t */
895 static void nbnxn_atomdata_set_energygroups(nbnxn_atomdata_t *nbat,
897 const nbnxn_search_t nbs,
901 const nbnxn_grid_t *grid;
903 for (g = 0; g < ngrid; g++)
905 grid = &nbs->grid[g];
907 /* Loop over all columns and copy and fill */
908 for (i = 0; i < grid->ncx*grid->ncy; i++)
910 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
911 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
913 copy_egp_to_nbat_egps(nbs->a+ash, grid->cxy_na[i], ncz*grid->na_sc,
914 nbat->na_c, nbat->neg_2log,
915 atinfo, nbat->energrp+(ash>>grid->na_c_2log));
920 /* Sets all required atom parameter data in nbnxn_atomdata_t */
921 void nbnxn_atomdata_set(nbnxn_atomdata_t *nbat,
923 const nbnxn_search_t nbs,
924 const t_mdatoms *mdatoms,
929 if (locality == eatLocal)
938 nbnxn_atomdata_set_atomtypes(nbat, ngrid, nbs, mdatoms->typeA);
940 nbnxn_atomdata_set_charges(nbat, ngrid, nbs, mdatoms->chargeA);
942 if (nbat->nenergrp > 1)
944 nbnxn_atomdata_set_energygroups(nbat, ngrid, nbs, atinfo);
948 /* Copies the shift vector array to nbnxn_atomdata_t */
949 void nbnxn_atomdata_copy_shiftvec(gmx_bool bDynamicBox,
951 nbnxn_atomdata_t *nbat)
955 nbat->bDynamicBox = bDynamicBox;
956 for (i = 0; i < SHIFTS; i++)
958 copy_rvec(shift_vec[i], nbat->shift_vec[i]);
962 /* Copies (and reorders) the coordinates to nbnxn_atomdata_t */
963 void nbnxn_atomdata_copy_x_to_nbat_x(const nbnxn_search_t nbs,
967 nbnxn_atomdata_t *nbat)
990 nbat->natoms_local = nbs->grid[0].nc*nbs->grid[0].na_sc;
993 nth = gmx_omp_nthreads_get(emntPairsearch);
995 #pragma omp parallel for num_threads(nth) schedule(static)
996 for (th = 0; th < nth; th++)
1000 for (g = g0; g < g1; g++)
1002 const nbnxn_grid_t *grid;
1003 int cxy0, cxy1, cxy;
1005 grid = &nbs->grid[g];
1007 cxy0 = (grid->ncx*grid->ncy* th +nth-1)/nth;
1008 cxy1 = (grid->ncx*grid->ncy*(th+1)+nth-1)/nth;
1010 for (cxy = cxy0; cxy < cxy1; cxy++)
1012 int na, ash, na_fill;
1014 na = grid->cxy_na[cxy];
1015 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
1017 if (g == 0 && FillLocal)
1020 (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
1024 /* We fill only the real particle locations.
1025 * We assume the filling entries at the end have been
1026 * properly set before during ns.
1030 copy_rvec_to_nbat_real(nbs->a+ash, na, na_fill, x,
1031 nbat->XFormat, nbat->x, ash,
1039 nbnxn_atomdata_clear_reals(real * gmx_restrict dest,
1044 for (i = i0; i < i1; i++)
1051 nbnxn_atomdata_reduce_reals(real * gmx_restrict dest,
1053 real ** gmx_restrict src,
1061 /* The destination buffer contains data, add to it */
1062 for (i = i0; i < i1; i++)
1064 for (s = 0; s < nsrc; s++)
1066 dest[i] += src[s][i];
1072 /* The destination buffer is unitialized, set it first */
1073 for (i = i0; i < i1; i++)
1075 dest[i] = src[0][i];
1076 for (s = 1; s < nsrc; s++)
1078 dest[i] += src[s][i];
1085 nbnxn_atomdata_reduce_reals_simd(real * gmx_restrict dest,
1087 real ** gmx_restrict src,
1091 #ifdef GMX_NBNXN_SIMD
1092 /* The SIMD width here is actually independent of that in the kernels,
1093 * but we use the same width for simplicity (usually optimal anyhow).
1095 #ifdef GMX_NBNXN_HALF_WIDTH_SIMD
1096 #define GMX_USE_HALF_WIDTH_SIMD_HERE
1098 #include "gmx_simd_macros.h"
1101 gmx_mm_pr dest_SSE, src_SSE;
1105 for (i = i0; i < i1; i += GMX_SIMD_WIDTH_HERE)
1107 dest_SSE = gmx_load_pr(dest+i);
1108 for (s = 0; s < nsrc; s++)
1110 src_SSE = gmx_load_pr(src[s]+i);
1111 dest_SSE = gmx_add_pr(dest_SSE, src_SSE);
1113 gmx_store_pr(dest+i, dest_SSE);
1118 for (i = i0; i < i1; i += GMX_SIMD_WIDTH_HERE)
1120 dest_SSE = gmx_load_pr(src[0]+i);
1121 for (s = 1; s < nsrc; s++)
1123 src_SSE = gmx_load_pr(src[s]+i);
1124 dest_SSE = gmx_add_pr(dest_SSE, src_SSE);
1126 gmx_store_pr(dest+i, dest_SSE);
1132 /* Add part of the force array(s) from nbnxn_atomdata_t to f */
1134 nbnxn_atomdata_add_nbat_f_to_f_part(const nbnxn_search_t nbs,
1135 const nbnxn_atomdata_t *nbat,
1136 nbnxn_atomdata_output_t *out,
1147 /* Loop over all columns and copy and fill */
1148 switch (nbat->FFormat)
1156 for (a = a0; a < a1; a++)
1158 i = cell[a]*nbat->fstride;
1161 f[a][YY] += fnb[i+1];
1162 f[a][ZZ] += fnb[i+2];
1167 for (a = a0; a < a1; a++)
1169 i = cell[a]*nbat->fstride;
1171 for (fa = 0; fa < nfa; fa++)
1173 f[a][XX] += out[fa].f[i];
1174 f[a][YY] += out[fa].f[i+1];
1175 f[a][ZZ] += out[fa].f[i+2];
1185 for (a = a0; a < a1; a++)
1187 i = X4_IND_A(cell[a]);
1189 f[a][XX] += fnb[i+XX*PACK_X4];
1190 f[a][YY] += fnb[i+YY*PACK_X4];
1191 f[a][ZZ] += fnb[i+ZZ*PACK_X4];
1196 for (a = a0; a < a1; a++)
1198 i = X4_IND_A(cell[a]);
1200 for (fa = 0; fa < nfa; fa++)
1202 f[a][XX] += out[fa].f[i+XX*PACK_X4];
1203 f[a][YY] += out[fa].f[i+YY*PACK_X4];
1204 f[a][ZZ] += out[fa].f[i+ZZ*PACK_X4];
1214 for (a = a0; a < a1; a++)
1216 i = X8_IND_A(cell[a]);
1218 f[a][XX] += fnb[i+XX*PACK_X8];
1219 f[a][YY] += fnb[i+YY*PACK_X8];
1220 f[a][ZZ] += fnb[i+ZZ*PACK_X8];
1225 for (a = a0; a < a1; a++)
1227 i = X8_IND_A(cell[a]);
1229 for (fa = 0; fa < nfa; fa++)
1231 f[a][XX] += out[fa].f[i+XX*PACK_X8];
1232 f[a][YY] += out[fa].f[i+YY*PACK_X8];
1233 f[a][ZZ] += out[fa].f[i+ZZ*PACK_X8];
1239 gmx_incons("Unsupported nbnxn_atomdata_t format");
1243 /* Add the force array(s) from nbnxn_atomdata_t to f */
1244 void nbnxn_atomdata_add_nbat_f_to_f(const nbnxn_search_t nbs,
1246 const nbnxn_atomdata_t *nbat,
1252 nbs_cycle_start(&nbs->cc[enbsCCreducef]);
1258 na = nbs->natoms_nonlocal;
1262 na = nbs->natoms_local;
1265 a0 = nbs->natoms_local;
1266 na = nbs->natoms_nonlocal - nbs->natoms_local;
1270 nth = gmx_omp_nthreads_get(emntNonbonded);
1274 if (locality != eatAll)
1276 gmx_incons("add_f_to_f called with nout>1 and locality!=eatAll");
1279 /* Reduce the force thread output buffers into buffer 0, before adding
1280 * them to the, differently ordered, "real" force buffer.
1282 #pragma omp parallel for num_threads(nth) schedule(static)
1283 for (th = 0; th < nth; th++)
1285 const nbnxn_buffer_flags_t *flags;
1289 real *fptr[NBNXN_BUFFERFLAG_MAX_THREADS];
1292 flags = &nbat->buffer_flags;
1294 /* Calculate the cell-block range for our thread */
1295 b0 = (flags->nflag* th )/nth;
1296 b1 = (flags->nflag*(th+1))/nth;
1298 for (b = b0; b < b1; b++)
1300 i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1301 i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1304 for (out = 1; out < nbat->nout; out++)
1306 if (flags->flag[b] & (1U<<out))
1308 fptr[nfptr++] = nbat->out[out].f;
1313 #ifdef GMX_NBNXN_SIMD
1314 nbnxn_atomdata_reduce_reals_simd
1316 nbnxn_atomdata_reduce_reals
1319 flags->flag[b] & (1U<<0),
1323 else if (!(flags->flag[b] & (1U<<0)))
1325 nbnxn_atomdata_clear_reals(nbat->out[0].f,
1332 #pragma omp parallel for num_threads(nth) schedule(static)
1333 for (th = 0; th < nth; th++)
1335 nbnxn_atomdata_add_nbat_f_to_f_part(nbs, nbat,
1343 nbs_cycle_stop(&nbs->cc[enbsCCreducef]);
1346 /* Adds the shift forces from nbnxn_atomdata_t to fshift */
1347 void nbnxn_atomdata_add_nbat_fshift_to_fshift(const nbnxn_atomdata_t *nbat,
1350 const nbnxn_atomdata_output_t *out;
1357 for (s = 0; s < SHIFTS; s++)
1360 for (th = 0; th < nbat->nout; th++)
1362 sum[XX] += out[th].fshift[s*DIM+XX];
1363 sum[YY] += out[th].fshift[s*DIM+YY];
1364 sum[ZZ] += out[th].fshift[s*DIM+ZZ];
1366 rvec_inc(fshift[s], sum);