2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2012, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
48 #include "nbnxn_consts.h"
49 #include "nbnxn_internal.h"
50 #include "nbnxn_search.h"
51 #include "nbnxn_atomdata.h"
52 #include "gmx_omp_nthreads.h"
54 /* Default nbnxn allocation routine, allocates 32 byte aligned,
55 * which works for plain C and aligned SSE and AVX loads/stores.
57 void nbnxn_alloc_aligned(void **ptr,size_t nbytes)
59 *ptr = save_malloc_aligned("ptr",__FILE__,__LINE__,nbytes,1,32);
62 /* Free function for memory allocated with nbnxn_alloc_aligned */
63 void nbnxn_free_aligned(void *ptr)
68 /* Reallocation wrapper function for nbnxn data structures */
69 void nbnxn_realloc_void(void **ptr,
70 int nbytes_copy,int nbytes_new,
76 ma(&ptr_new,nbytes_new);
78 if (nbytes_new > 0 && ptr_new == NULL)
80 gmx_fatal(FARGS, "Allocation of %d bytes failed", nbytes_new);
85 if (nbytes_new < nbytes_copy)
87 gmx_incons("In nbnxn_realloc_void: new size less than copy size");
89 memcpy(ptr_new,*ptr,nbytes_copy);
98 /* Reallocate the nbnxn_atomdata_t for a size of n atoms */
99 void nbnxn_atomdata_realloc(nbnxn_atomdata_t *nbat,int n)
103 nbnxn_realloc_void((void **)&nbat->type,
104 nbat->natoms*sizeof(*nbat->type),
105 n*sizeof(*nbat->type),
106 nbat->alloc,nbat->free);
107 nbnxn_realloc_void((void **)&nbat->lj_comb,
108 nbat->natoms*2*sizeof(*nbat->lj_comb),
109 n*2*sizeof(*nbat->lj_comb),
110 nbat->alloc,nbat->free);
111 if (nbat->XFormat != nbatXYZQ)
113 nbnxn_realloc_void((void **)&nbat->q,
114 nbat->natoms*sizeof(*nbat->q),
116 nbat->alloc,nbat->free);
118 if (nbat->nenergrp > 1)
120 nbnxn_realloc_void((void **)&nbat->energrp,
121 nbat->natoms/nbat->na_c*sizeof(*nbat->energrp),
122 n/nbat->na_c*sizeof(*nbat->energrp),
123 nbat->alloc,nbat->free);
125 nbnxn_realloc_void((void **)&nbat->x,
126 nbat->natoms*nbat->xstride*sizeof(*nbat->x),
127 n*nbat->xstride*sizeof(*nbat->x),
128 nbat->alloc,nbat->free);
129 for(t=0; t<nbat->nout; t++)
131 /* Allocate one element extra for possible signaling with CUDA */
132 nbnxn_realloc_void((void **)&nbat->out[t].f,
133 nbat->natoms*nbat->fstride*sizeof(*nbat->out[t].f),
134 n*nbat->fstride*sizeof(*nbat->out[t].f),
135 nbat->alloc,nbat->free);
140 /* Initializes an nbnxn_atomdata_output_t data structure */
141 static void nbnxn_atomdata_output_init(nbnxn_atomdata_output_t *out,
143 int nenergrp,int stride,
149 ma((void **)&out->fshift,SHIFTS*DIM*sizeof(*out->fshift));
150 out->nV = nenergrp*nenergrp;
151 ma((void **)&out->Vvdw,out->nV*sizeof(*out->Vvdw));
152 ma((void **)&out->Vc ,out->nV*sizeof(*out->Vc ));
154 if (nb_kernel_type == nbk4xN_X86_SIMD128 ||
155 nb_kernel_type == nbk4xN_X86_SIMD256)
157 cj_size = nbnxn_kernel_to_cj_size(nb_kernel_type);
158 out->nVS = nenergrp*nenergrp*stride*(cj_size>>1)*cj_size;
159 ma((void **)&out->VSvdw,out->nVS*sizeof(*out->VSvdw));
160 ma((void **)&out->VSc ,out->nVS*sizeof(*out->VSc ));
168 static void copy_int_to_nbat_int(const int *a,int na,int na_round,
169 const int *in,int fill,int *innb)
176 innb[j++] = in[a[i]];
178 /* Complete the partially filled last cell with fill */
179 for(; i<na_round; i++)
185 static void clear_nbat_real(int na,int nbatFormat,real *xnb,int a0)
196 xnb[(a0+a)*STRIDE_XYZ+d] = 0;
205 xnb[(a0+a)*STRIDE_XYZQ+d] = 0;
211 c = a0 & (PACK_X4-1);
214 xnb[j+XX*PACK_X4] = 0;
215 xnb[j+YY*PACK_X4] = 0;
216 xnb[j+ZZ*PACK_X4] = 0;
221 j += (DIM-1)*PACK_X4;
228 c = a0 & (PACK_X8-1);
231 xnb[j+XX*PACK_X8] = 0;
232 xnb[j+YY*PACK_X8] = 0;
233 xnb[j+ZZ*PACK_X8] = 0;
238 j += (DIM-1)*PACK_X8;
246 void copy_rvec_to_nbat_real(const int *a,int na,int na_round,
247 rvec *x,int nbatFormat,real *xnb,int a0,
248 int cx,int cy,int cz)
252 /* We might need to place filler particles to fill up the cell to na_round.
253 * The coefficients (LJ and q) for such particles are zero.
254 * But we might still get NaN as 0*NaN when distances are too small.
255 * We hope that -107 nm is far away enough from to zero
256 * to avoid accidental short distances to particles shifted down for pbc.
258 #define NBAT_FAR_AWAY 107
266 xnb[j++] = x[a[i]][XX];
267 xnb[j++] = x[a[i]][YY];
268 xnb[j++] = x[a[i]][ZZ];
270 /* Complete the partially filled last cell with copies of the last element.
271 * This simplifies the bounding box calculation and avoid
272 * numerical issues with atoms that are coincidentally close.
274 for(; i<na_round; i++)
276 xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
277 xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
278 xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
285 xnb[j++] = x[a[i]][XX];
286 xnb[j++] = x[a[i]][YY];
287 xnb[j++] = x[a[i]][ZZ];
290 /* Complete the partially filled last cell with particles far apart */
291 for(; i<na_round; i++)
293 xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
294 xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
295 xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
301 c = a0 & (PACK_X4-1);
304 xnb[j+XX*PACK_X4] = x[a[i]][XX];
305 xnb[j+YY*PACK_X4] = x[a[i]][YY];
306 xnb[j+ZZ*PACK_X4] = x[a[i]][ZZ];
311 j += (DIM-1)*PACK_X4;
315 /* Complete the partially filled last cell with particles far apart */
316 for(; i<na_round; i++)
318 xnb[j+XX*PACK_X4] = -NBAT_FAR_AWAY*(1 + cx);
319 xnb[j+YY*PACK_X4] = -NBAT_FAR_AWAY*(1 + cy);
320 xnb[j+ZZ*PACK_X4] = -NBAT_FAR_AWAY*(1 + cz + i);
325 j += (DIM-1)*PACK_X4;
332 c = a0 & (PACK_X8 - 1);
335 xnb[j+XX*PACK_X8] = x[a[i]][XX];
336 xnb[j+YY*PACK_X8] = x[a[i]][YY];
337 xnb[j+ZZ*PACK_X8] = x[a[i]][ZZ];
342 j += (DIM-1)*PACK_X8;
346 /* Complete the partially filled last cell with particles far apart */
347 for(; i<na_round; i++)
349 xnb[j+XX*PACK_X8] = -NBAT_FAR_AWAY*(1 + cx);
350 xnb[j+YY*PACK_X8] = -NBAT_FAR_AWAY*(1 + cy);
351 xnb[j+ZZ*PACK_X8] = -NBAT_FAR_AWAY*(1 + cz + i);
356 j += (DIM-1)*PACK_X8;
362 gmx_incons("Unsupported stride");
366 /* Determines the combination rule (or none) to be used, stores it,
367 * and sets the LJ parameters required with the rule.
369 static void set_combination_rule_data(nbnxn_atomdata_t *nbat)
376 switch (nbat->comb_rule)
379 nbat->comb_rule = ljcrGEOM;
383 /* Copy the diagonal from the nbfp matrix */
384 nbat->nbfp_comb[i*2 ] = sqrt(nbat->nbfp[(i*nt+i)*2 ]);
385 nbat->nbfp_comb[i*2+1] = sqrt(nbat->nbfp[(i*nt+i)*2+1]);
391 /* Get 6*C6 and 12*C12 from the diagonal of the nbfp matrix */
392 c6 = nbat->nbfp[(i*nt+i)*2 ];
393 c12 = nbat->nbfp[(i*nt+i)*2+1];
394 if (c6 > 0 && c12 > 0)
396 /* We store 0.5*2^1/6*sigma and sqrt(4*3*eps),
397 * so we get 6*C6 and 12*C12 after combining.
399 nbat->nbfp_comb[i*2 ] = 0.5*pow(c12/c6,1.0/6.0);
400 nbat->nbfp_comb[i*2+1] = sqrt(c6*c6/c12);
404 nbat->nbfp_comb[i*2 ] = 0;
405 nbat->nbfp_comb[i*2+1] = 0;
410 /* In nbfp_s4 we use a stride of 4 for storing two parameters */
411 nbat->alloc((void **)&nbat->nbfp_s4,nt*nt*4*sizeof(*nbat->nbfp_s4));
416 nbat->nbfp_s4[(i*nt+j)*4+0] = nbat->nbfp[(i*nt+j)*2+0];
417 nbat->nbfp_s4[(i*nt+j)*4+1] = nbat->nbfp[(i*nt+j)*2+1];
418 nbat->nbfp_s4[(i*nt+j)*4+2] = 0;
419 nbat->nbfp_s4[(i*nt+j)*4+3] = 0;
424 gmx_incons("Unknown combination rule");
429 /* Initializes an nbnxn_atomdata_t data structure */
430 void nbnxn_atomdata_init(FILE *fp,
431 nbnxn_atomdata_t *nbat,
433 int ntype,const real *nbfp,
436 nbnxn_alloc_t *alloc,
442 gmx_bool simple,bCombGeom,bCombLB;
446 nbat->alloc = nbnxn_alloc_aligned;
454 nbat->free = nbnxn_free_aligned;
463 fprintf(debug,"There are %d atom types in the system, adding one for nbnxn_atomdata_t\n",ntype);
465 nbat->ntype = ntype + 1;
466 nbat->alloc((void **)&nbat->nbfp,
467 nbat->ntype*nbat->ntype*2*sizeof(*nbat->nbfp));
468 nbat->alloc((void **)&nbat->nbfp_comb,nbat->ntype*2*sizeof(*nbat->nbfp_comb));
470 /* A tolerance of 1e-5 seems reasonable for (possibly hand-typed)
471 * force-field floating point parameters.
474 ptr = getenv("GMX_LJCOMB_TOL");
479 sscanf(ptr,"%lf",&dbl);
485 /* Temporarily fill nbat->nbfp_comb with sigma and epsilon
486 * to check for the LB rule.
488 for(i=0; i<ntype; i++)
490 c6 = nbfp[(i*ntype+i)*2 ]/6.0;
491 c12 = nbfp[(i*ntype+i)*2+1]/12.0;
492 if (c6 > 0 && c12 > 0)
494 nbat->nbfp_comb[i*2 ] = pow(c12/c6,1.0/6.0);
495 nbat->nbfp_comb[i*2+1] = 0.25*c6*c6/c12;
497 else if (c6 == 0 && c12 == 0)
499 nbat->nbfp_comb[i*2 ] = 0;
500 nbat->nbfp_comb[i*2+1] = 0;
504 /* Can not use LB rule with only dispersion or repulsion */
509 for(i=0; i<nbat->ntype; i++)
511 for(j=0; j<nbat->ntype; j++)
513 if (i < ntype && j < ntype)
515 /* fr->nbfp has been updated, so that array too now stores c6/c12 including
516 * the 6.0/12.0 prefactors to save 2 flops in the most common case (force-only).
518 c6 = nbfp[(i*ntype+j)*2 ];
519 c12 = nbfp[(i*ntype+j)*2+1];
520 nbat->nbfp[(i*nbat->ntype+j)*2 ] = c6;
521 nbat->nbfp[(i*nbat->ntype+j)*2+1] = c12;
523 /* Compare 6*C6 and 12*C12 for geometric cobination rule */
524 bCombGeom = bCombGeom &&
525 gmx_within_tol(c6*c6 ,nbfp[(i*ntype+i)*2 ]*nbfp[(j*ntype+j)*2 ],tol) &&
526 gmx_within_tol(c12*c12,nbfp[(i*ntype+i)*2+1]*nbfp[(j*ntype+j)*2+1],tol);
528 /* Compare C6 and C12 for Lorentz-Berthelot combination rule */
532 ((c6 == 0 && c12 == 0 &&
533 (nbat->nbfp_comb[i*2+1] == 0 || nbat->nbfp_comb[j*2+1] == 0)) ||
534 (c6 > 0 && c12 > 0 &&
535 gmx_within_tol(pow(c12/c6,1.0/6.0),0.5*(nbat->nbfp_comb[i*2]+nbat->nbfp_comb[j*2]),tol) &&
536 gmx_within_tol(0.25*c6*c6/c12,sqrt(nbat->nbfp_comb[i*2+1]*nbat->nbfp_comb[j*2+1]),tol)));
540 /* Add zero parameters for the additional dummy atom type */
541 nbat->nbfp[(i*nbat->ntype+j)*2 ] = 0;
542 nbat->nbfp[(i*nbat->ntype+j)*2+1] = 0;
548 fprintf(debug,"Combination rules: geometric %d Lorentz-Berthelot %d\n",
552 simple = nbnxn_kernel_pairlist_simple(nb_kernel_type);
556 /* We prefer the geometic combination rule,
557 * as that gives a slightly faster kernel than the LB rule.
561 nbat->comb_rule = ljcrGEOM;
565 nbat->comb_rule = ljcrLB;
569 nbat->comb_rule = ljcrNONE;
571 nbat->free(nbat->nbfp_comb);
576 if (nbat->comb_rule == ljcrNONE)
578 fprintf(fp,"Using full Lennard-Jones parameter combination matrix\n\n");
582 fprintf(fp,"Using %s Lennard-Jones combination rule\n\n",
583 nbat->comb_rule==ljcrGEOM ? "geometric" : "Lorentz-Berthelot");
587 set_combination_rule_data(nbat);
591 nbat->comb_rule = ljcrNONE;
593 nbat->free(nbat->nbfp_comb);
598 nbat->lj_comb = NULL;
601 switch (nb_kernel_type)
603 case nbk4xN_X86_SIMD128:
604 nbat->XFormat = nbatX4;
606 case nbk4xN_X86_SIMD256:
608 nbat->XFormat = nbatX8;
610 nbat->XFormat = nbatX4;
614 nbat->XFormat = nbatXYZ;
618 nbat->FFormat = nbat->XFormat;
622 nbat->XFormat = nbatXYZQ;
623 nbat->FFormat = nbatXYZ;
626 nbat->nenergrp = n_energygroups;
629 /* Energy groups not supported yet for super-sub lists */
630 if (n_energygroups > 1 && fp != NULL)
632 fprintf(fp,"\nNOTE: With GPUs, reporting energy group contributions is not supported\n\n");
636 /* Temporary storage goes as #grp^3*simd_width^2/2, so limit to 64 */
637 if (nbat->nenergrp > 64)
639 gmx_fatal(FARGS,"With NxN kernels not more than 64 energy groups are supported\n");
642 while (nbat->nenergrp > (1<<nbat->neg_2log))
646 nbat->energrp = NULL;
647 nbat->alloc((void **)&nbat->shift_vec,SHIFTS*sizeof(*nbat->shift_vec));
648 nbat->xstride = (nbat->XFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
649 nbat->fstride = (nbat->FFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
652 snew(nbat->out,nbat->nout);
654 for(i=0; i<nbat->nout; i++)
656 nbnxn_atomdata_output_init(&nbat->out[i],
658 nbat->nenergrp,1<<nbat->neg_2log,
663 static void copy_lj_to_nbat_lj_comb_x4(const real *ljparam_type,
664 const int *type,int na,
669 /* The LJ params follow the combination rule:
670 * copy the params for the type array to the atom array.
672 for(is=0; is<na; is+=PACK_X4)
674 for(k=0; k<PACK_X4; k++)
677 ljparam_at[is*2 +k] = ljparam_type[type[i]*2 ];
678 ljparam_at[is*2+PACK_X4+k] = ljparam_type[type[i]*2+1];
683 static void copy_lj_to_nbat_lj_comb_x8(const real *ljparam_type,
684 const int *type,int na,
689 /* The LJ params follow the combination rule:
690 * copy the params for the type array to the atom array.
692 for(is=0; is<na; is+=PACK_X8)
694 for(k=0; k<PACK_X8; k++)
697 ljparam_at[is*2 +k] = ljparam_type[type[i]*2 ];
698 ljparam_at[is*2+PACK_X8+k] = ljparam_type[type[i]*2+1];
703 /* Sets the atom type and LJ data in nbnxn_atomdata_t */
704 static void nbnxn_atomdata_set_atomtypes(nbnxn_atomdata_t *nbat,
706 const nbnxn_search_t nbs,
710 const nbnxn_grid_t *grid;
712 for(g=0; g<ngrid; g++)
714 grid = &nbs->grid[g];
716 /* Loop over all columns and copy and fill */
717 for(i=0; i<grid->ncx*grid->ncy; i++)
719 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
720 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
722 copy_int_to_nbat_int(nbs->a+ash,grid->cxy_na[i],ncz*grid->na_sc,
723 type,nbat->ntype-1,nbat->type+ash);
725 if (nbat->comb_rule != ljcrNONE)
727 if (nbat->XFormat == nbatX4)
729 copy_lj_to_nbat_lj_comb_x4(nbat->nbfp_comb,
730 nbat->type+ash,ncz*grid->na_sc,
731 nbat->lj_comb+ash*2);
733 else if (nbat->XFormat == nbatX8)
735 copy_lj_to_nbat_lj_comb_x8(nbat->nbfp_comb,
736 nbat->type+ash,ncz*grid->na_sc,
737 nbat->lj_comb+ash*2);
744 /* Sets the charges in nbnxn_atomdata_t *nbat */
745 static void nbnxn_atomdata_set_charges(nbnxn_atomdata_t *nbat,
747 const nbnxn_search_t nbs,
750 int g,cxy,ncz,ash,na,na_round,i,j;
752 const nbnxn_grid_t *grid;
754 for(g=0; g<ngrid; g++)
756 grid = &nbs->grid[g];
758 /* Loop over all columns and copy and fill */
759 for(cxy=0; cxy<grid->ncx*grid->ncy; cxy++)
761 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
762 na = grid->cxy_na[cxy];
763 na_round = (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
765 if (nbat->XFormat == nbatXYZQ)
767 q = nbat->x + ash*STRIDE_XYZQ + ZZ + 1;
770 *q = charge[nbs->a[ash+i]];
773 /* Complete the partially filled last cell with zeros */
774 for(; i<na_round; i++)
785 *q = charge[nbs->a[ash+i]];
788 /* Complete the partially filled last cell with zeros */
789 for(; i<na_round; i++)
799 /* Copies the energy group indices to a reordered and packed array */
800 static void copy_egp_to_nbat_egps(const int *a,int na,int na_round,
801 int na_c,int bit_shift,
802 const int *in,int *innb)
808 for(i=0; i<na; i+=na_c)
810 /* Store na_c energy group numbers into one int */
812 for(sa=0; sa<na_c; sa++)
817 comb |= (GET_CGINFO_GID(in[at]) << (sa*bit_shift));
822 /* Complete the partially filled last cell with fill */
823 for(; i<na_round; i+=na_c)
829 /* Set the energy group indices for atoms in nbnxn_atomdata_t */
830 static void nbnxn_atomdata_set_energygroups(nbnxn_atomdata_t *nbat,
832 const nbnxn_search_t nbs,
836 const nbnxn_grid_t *grid;
838 for(g=0; g<ngrid; g++)
840 grid = &nbs->grid[g];
842 /* Loop over all columns and copy and fill */
843 for(i=0; i<grid->ncx*grid->ncy; i++)
845 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
846 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
848 copy_egp_to_nbat_egps(nbs->a+ash,grid->cxy_na[i],ncz*grid->na_sc,
849 nbat->na_c,nbat->neg_2log,
850 atinfo,nbat->energrp+(ash>>grid->na_c_2log));
855 /* Sets all required atom parameter data in nbnxn_atomdata_t */
856 void nbnxn_atomdata_set(nbnxn_atomdata_t *nbat,
858 const nbnxn_search_t nbs,
859 const t_mdatoms *mdatoms,
864 if (locality == eatLocal)
873 nbnxn_atomdata_set_atomtypes(nbat,ngrid,nbs,mdatoms->typeA);
875 nbnxn_atomdata_set_charges(nbat,ngrid,nbs,mdatoms->chargeA);
877 if (nbat->nenergrp > 1)
879 nbnxn_atomdata_set_energygroups(nbat,ngrid,nbs,atinfo);
883 /* Copies the shift vector array to nbnxn_atomdata_t */
884 void nbnxn_atomdata_copy_shiftvec(gmx_bool bDynamicBox,
886 nbnxn_atomdata_t *nbat)
890 nbat->bDynamicBox = bDynamicBox;
891 for(i=0; i<SHIFTS; i++)
893 copy_rvec(shift_vec[i],nbat->shift_vec[i]);
897 /* Copies (and reorders) the coordinates to nbnxn_atomdata_t */
898 void nbnxn_atomdata_copy_x_to_nbat_x(const nbnxn_search_t nbs,
902 nbnxn_atomdata_t *nbat)
925 nbat->natoms_local = nbs->grid[0].nc*nbs->grid[0].na_sc;
928 nth = gmx_omp_nthreads_get(emntPairsearch);
930 #pragma omp parallel for num_threads(nth) schedule(static)
931 for(th=0; th<nth; th++)
937 const nbnxn_grid_t *grid;
940 grid = &nbs->grid[g];
942 cxy0 = (grid->ncx*grid->ncy* th +nth-1)/nth;
943 cxy1 = (grid->ncx*grid->ncy*(th+1)+nth-1)/nth;
945 for(cxy=cxy0; cxy<cxy1; cxy++)
949 na = grid->cxy_na[cxy];
950 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
952 if (g == 0 && FillLocal)
955 (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
959 /* We fill only the real particle locations.
960 * We assume the filling entries at the end have been
961 * properly set before during ns.
965 copy_rvec_to_nbat_real(nbs->a+ash,na,na_fill,x,
966 nbat->XFormat,nbat->x,ash,
974 nbnxn_atomdata_reduce_reals(real * gmx_restrict dest,
975 real ** gmx_restrict src,
983 for(s=0; s<nsrc; s++)
985 dest[i] += src[s][i];
991 nbnxn_atomdata_reduce_reals_x86_simd(real * gmx_restrict dest,
992 real ** gmx_restrict src,
996 #ifdef NBNXN_SEARCH_SSE
997 /* We can use AVX256 here, but not when AVX128 kernels are selected.
998 * As this reduction is not faster with AVX256 anyway, we use 128-bit SIMD.
1000 #define GMX_MM128_HERE
1001 #include "gmx_x86_simd_macros.h"
1004 gmx_mm_pr dest_SSE,src_SSE;
1006 if ((i0 & (GMX_X86_SIMD_WIDTH_HERE-1)) ||
1007 (i1 & (GMX_X86_SIMD_WIDTH_HERE-1)))
1009 gmx_incons("bounds not a multiple of GMX_X86_SIMD_WIDTH_HERE in nbnxn_atomdata_reduce_reals_x86_simd");
1012 for(i=i0; i<i1; i+=GMX_X86_SIMD_WIDTH_HERE)
1014 dest_SSE = gmx_load_pr(dest+i);
1015 for(s=0; s<nsrc; s++)
1017 src_SSE = gmx_load_pr(src[s]+i);
1018 dest_SSE = gmx_add_pr(dest_SSE,src_SSE);
1020 gmx_store_pr(dest+i,dest_SSE);
1023 #undef GMX_MM128_HERE
1024 #undef GMX_MM256_HERE
1028 /* Add part of the force array(s) from nbnxn_atomdata_t to f */
1030 nbnxn_atomdata_add_nbat_f_to_f_part(const nbnxn_search_t nbs,
1031 const nbnxn_atomdata_t *nbat,
1032 nbnxn_atomdata_output_t *out,
1043 /* Loop over all columns and copy and fill */
1044 switch (nbat->FFormat)
1052 for(a=a0; a<a1; a++)
1054 i = cell[a]*nbat->fstride;
1057 f[a][YY] += fnb[i+1];
1058 f[a][ZZ] += fnb[i+2];
1063 for(a=a0; a<a1; a++)
1065 i = cell[a]*nbat->fstride;
1067 for(fa=0; fa<nfa; fa++)
1069 f[a][XX] += out[fa].f[i];
1070 f[a][YY] += out[fa].f[i+1];
1071 f[a][ZZ] += out[fa].f[i+2];
1081 for(a=a0; a<a1; a++)
1083 i = X4_IND_A(cell[a]);
1085 f[a][XX] += fnb[i+XX*PACK_X4];
1086 f[a][YY] += fnb[i+YY*PACK_X4];
1087 f[a][ZZ] += fnb[i+ZZ*PACK_X4];
1092 for(a=a0; a<a1; a++)
1094 i = X4_IND_A(cell[a]);
1096 for(fa=0; fa<nfa; fa++)
1098 f[a][XX] += out[fa].f[i+XX*PACK_X4];
1099 f[a][YY] += out[fa].f[i+YY*PACK_X4];
1100 f[a][ZZ] += out[fa].f[i+ZZ*PACK_X4];
1110 for(a=a0; a<a1; a++)
1112 i = X8_IND_A(cell[a]);
1114 f[a][XX] += fnb[i+XX*PACK_X8];
1115 f[a][YY] += fnb[i+YY*PACK_X8];
1116 f[a][ZZ] += fnb[i+ZZ*PACK_X8];
1121 for(a=a0; a<a1; a++)
1123 i = X8_IND_A(cell[a]);
1125 for(fa=0; fa<nfa; fa++)
1127 f[a][XX] += out[fa].f[i+XX*PACK_X8];
1128 f[a][YY] += out[fa].f[i+YY*PACK_X8];
1129 f[a][ZZ] += out[fa].f[i+ZZ*PACK_X8];
1137 /* Add the force array(s) from nbnxn_atomdata_t to f */
1138 void nbnxn_atomdata_add_nbat_f_to_f(const nbnxn_search_t nbs,
1140 const nbnxn_atomdata_t *nbat,
1145 gmx_bool bStreamingReduce;
1147 nbs_cycle_start(&nbs->cc[enbsCCreducef]);
1153 na = nbs->natoms_nonlocal;
1157 na = nbs->natoms_local;
1160 a0 = nbs->natoms_local;
1161 na = nbs->natoms_nonlocal - nbs->natoms_local;
1165 nth = gmx_omp_nthreads_get(emntNonbonded);
1167 /* Using the two-step streaming reduction is probably always faster */
1168 bStreamingReduce = (nbat->nout > 1);
1170 if (bStreamingReduce)
1172 /* Reduce the force thread output buffers into buffer 0, before adding
1173 * them to the, differently ordered, "real" force buffer.
1175 #pragma omp parallel for num_threads(nth) schedule(static)
1176 for(th=0; th<nth; th++)
1180 /* For which grids should we reduce the force output? */
1181 g0 = ((locality==eatLocal || locality==eatAll) ? 0 : 1);
1182 g1 = (locality==eatLocal ? 1 : nbs->ngrid);
1184 for(g=g0; g<g1; g++)
1190 real *fptr[NBNXN_CELLBLOCK_MAX_THREADS];
1193 grid = &nbs->grid[g];
1195 /* Calculate the cell-block range for our thread */
1196 b0 = (grid->cellblock_flags.ncb* th )/nth;
1197 b1 = (grid->cellblock_flags.ncb*(th+1))/nth;
1199 if (grid->cellblock_flags.bUse)
1201 for(b=b0; b<b1; b++)
1203 c0 = b*NBNXN_CELLBLOCK_SIZE;
1204 c1 = min(c0 + NBNXN_CELLBLOCK_SIZE,grid->nc);
1205 i0 = (grid->cell0 + c0)*grid->na_c*nbat->fstride;
1206 i1 = (grid->cell0 + c1)*grid->na_c*nbat->fstride;
1209 for(out=1; out<nbat->nout; out++)
1211 if (grid->cellblock_flags.flag[b] & (1U<<out))
1213 fptr[nfptr++] = nbat->out[out].f;
1218 #ifdef NBNXN_SEARCH_SSE
1219 nbnxn_atomdata_reduce_reals_x86_simd
1221 nbnxn_atomdata_reduce_reals
1231 c0 = b0*NBNXN_CELLBLOCK_SIZE;
1232 c1 = min(b1*NBNXN_CELLBLOCK_SIZE,grid->nc);
1233 i0 = (grid->cell0 + c0)*grid->na_c*nbat->fstride;
1234 i1 = (grid->cell0 + c1)*grid->na_c*nbat->fstride;
1237 for(out=1; out<nbat->nout; out++)
1239 fptr[nfptr++] = nbat->out[out].f;
1242 #ifdef NBNXN_SEARCH_SSE
1243 nbnxn_atomdata_reduce_reals_x86_simd
1245 nbnxn_atomdata_reduce_reals
1255 #pragma omp parallel for num_threads(nth) schedule(static)
1256 for(th=0; th<nth; th++)
1258 nbnxn_atomdata_add_nbat_f_to_f_part(nbs,nbat,
1260 bStreamingReduce ? 1 : nbat->nout,
1266 nbs_cycle_stop(&nbs->cc[enbsCCreducef]);
1269 /* Adds the shift forces from nbnxn_atomdata_t to fshift */
1270 void nbnxn_atomdata_add_nbat_fshift_to_fshift(const nbnxn_atomdata_t *nbat,
1273 const nbnxn_atomdata_output_t *out;
1280 for(s=0; s<SHIFTS; s++)
1283 for(th=0; th<nbat->nout; th++)
1285 sum[XX] += out[th].fshift[s*DIM+XX];
1286 sum[YY] += out[th].fshift[s*DIM+YY];
1287 sum[ZZ] += out[th].fshift[s*DIM+ZZ];
1289 rvec_inc(fshift[s],sum);