1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2012, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
42 #include "nbnxn_consts.h"
43 #include "nbnxn_internal.h"
44 #include "nbnxn_search.h"
45 #include "nbnxn_atomdata.h"
46 #include "gmx_omp_nthreads.h"
48 /* Default nbnxn allocation routine, allocates NBNXN_MEM_ALIGN byte aligned */
49 void nbnxn_alloc_aligned(void **ptr,size_t nbytes)
51 *ptr = save_malloc_aligned("ptr",__FILE__,__LINE__,nbytes,1,NBNXN_MEM_ALIGN);
54 /* Free function for memory allocated with nbnxn_alloc_aligned */
55 void nbnxn_free_aligned(void *ptr)
60 /* Reallocation wrapper function for nbnxn data structures */
61 void nbnxn_realloc_void(void **ptr,
62 int nbytes_copy,int nbytes_new,
68 ma(&ptr_new,nbytes_new);
70 if (nbytes_new > 0 && ptr_new == NULL)
72 gmx_fatal(FARGS, "Allocation of %d bytes failed", nbytes_new);
77 if (nbytes_new < nbytes_copy)
79 gmx_incons("In nbnxn_realloc_void: new size less than copy size");
81 memcpy(ptr_new,*ptr,nbytes_copy);
90 /* Reallocate the nbnxn_atomdata_t for a size of n atoms */
91 void nbnxn_atomdata_realloc(nbnxn_atomdata_t *nbat,int n)
95 nbnxn_realloc_void((void **)&nbat->type,
96 nbat->natoms*sizeof(*nbat->type),
97 n*sizeof(*nbat->type),
98 nbat->alloc,nbat->free);
99 nbnxn_realloc_void((void **)&nbat->lj_comb,
100 nbat->natoms*2*sizeof(*nbat->lj_comb),
101 n*2*sizeof(*nbat->lj_comb),
102 nbat->alloc,nbat->free);
103 if (nbat->XFormat != nbatXYZQ)
105 nbnxn_realloc_void((void **)&nbat->q,
106 nbat->natoms*sizeof(*nbat->q),
108 nbat->alloc,nbat->free);
110 if (nbat->nenergrp > 1)
112 nbnxn_realloc_void((void **)&nbat->energrp,
113 nbat->natoms/nbat->na_c*sizeof(*nbat->energrp),
114 n/nbat->na_c*sizeof(*nbat->energrp),
115 nbat->alloc,nbat->free);
117 nbnxn_realloc_void((void **)&nbat->x,
118 nbat->natoms*nbat->xstride*sizeof(*nbat->x),
119 n*nbat->xstride*sizeof(*nbat->x),
120 nbat->alloc,nbat->free);
121 for(t=0; t<nbat->nout; t++)
123 /* Allocate one element extra for possible signaling with CUDA */
124 nbnxn_realloc_void((void **)&nbat->out[t].f,
125 nbat->natoms*nbat->fstride*sizeof(*nbat->out[t].f),
126 n*nbat->fstride*sizeof(*nbat->out[t].f),
127 nbat->alloc,nbat->free);
132 /* Initializes an nbnxn_atomdata_output_t data structure */
133 static void nbnxn_atomdata_output_init(nbnxn_atomdata_output_t *out,
135 int nenergrp,int stride,
141 ma((void **)&out->fshift,SHIFTS*DIM*sizeof(*out->fshift));
142 out->nV = nenergrp*nenergrp;
143 ma((void **)&out->Vvdw,out->nV*sizeof(*out->Vvdw));
144 ma((void **)&out->Vc ,out->nV*sizeof(*out->Vc ));
146 if (nb_kernel_type == nbnxnk4xN_SIMD_4xN ||
147 nb_kernel_type == nbnxnk4xN_SIMD_2xNN)
149 cj_size = nbnxn_kernel_to_cj_size(nb_kernel_type);
150 out->nVS = nenergrp*nenergrp*stride*(cj_size>>1)*cj_size;
151 ma((void **)&out->VSvdw,out->nVS*sizeof(*out->VSvdw));
152 ma((void **)&out->VSc ,out->nVS*sizeof(*out->VSc ));
160 static void copy_int_to_nbat_int(const int *a,int na,int na_round,
161 const int *in,int fill,int *innb)
168 innb[j++] = in[a[i]];
170 /* Complete the partially filled last cell with fill */
171 for(; i<na_round; i++)
177 static void clear_nbat_real(int na,int nbatFormat,real *xnb,int a0)
188 xnb[(a0+a)*STRIDE_XYZ+d] = 0;
197 xnb[(a0+a)*STRIDE_XYZQ+d] = 0;
203 c = a0 & (PACK_X4-1);
206 xnb[j+XX*PACK_X4] = 0;
207 xnb[j+YY*PACK_X4] = 0;
208 xnb[j+ZZ*PACK_X4] = 0;
213 j += (DIM-1)*PACK_X4;
220 c = a0 & (PACK_X8-1);
223 xnb[j+XX*PACK_X8] = 0;
224 xnb[j+YY*PACK_X8] = 0;
225 xnb[j+ZZ*PACK_X8] = 0;
230 j += (DIM-1)*PACK_X8;
238 void copy_rvec_to_nbat_real(const int *a,int na,int na_round,
239 rvec *x,int nbatFormat,real *xnb,int a0,
240 int cx,int cy,int cz)
244 /* We might need to place filler particles to fill up the cell to na_round.
245 * The coefficients (LJ and q) for such particles are zero.
246 * But we might still get NaN as 0*NaN when distances are too small.
247 * We hope that -107 nm is far away enough from to zero
248 * to avoid accidental short distances to particles shifted down for pbc.
250 #define NBAT_FAR_AWAY 107
258 xnb[j++] = x[a[i]][XX];
259 xnb[j++] = x[a[i]][YY];
260 xnb[j++] = x[a[i]][ZZ];
262 /* Complete the partially filled last cell with copies of the last element.
263 * This simplifies the bounding box calculation and avoid
264 * numerical issues with atoms that are coincidentally close.
266 for(; i<na_round; i++)
268 xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
269 xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
270 xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
277 xnb[j++] = x[a[i]][XX];
278 xnb[j++] = x[a[i]][YY];
279 xnb[j++] = x[a[i]][ZZ];
282 /* Complete the partially filled last cell with particles far apart */
283 for(; i<na_round; i++)
285 xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
286 xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
287 xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
293 c = a0 & (PACK_X4-1);
296 xnb[j+XX*PACK_X4] = x[a[i]][XX];
297 xnb[j+YY*PACK_X4] = x[a[i]][YY];
298 xnb[j+ZZ*PACK_X4] = x[a[i]][ZZ];
303 j += (DIM-1)*PACK_X4;
307 /* Complete the partially filled last cell with particles far apart */
308 for(; i<na_round; i++)
310 xnb[j+XX*PACK_X4] = -NBAT_FAR_AWAY*(1 + cx);
311 xnb[j+YY*PACK_X4] = -NBAT_FAR_AWAY*(1 + cy);
312 xnb[j+ZZ*PACK_X4] = -NBAT_FAR_AWAY*(1 + cz + i);
317 j += (DIM-1)*PACK_X4;
324 c = a0 & (PACK_X8 - 1);
327 xnb[j+XX*PACK_X8] = x[a[i]][XX];
328 xnb[j+YY*PACK_X8] = x[a[i]][YY];
329 xnb[j+ZZ*PACK_X8] = x[a[i]][ZZ];
334 j += (DIM-1)*PACK_X8;
338 /* Complete the partially filled last cell with particles far apart */
339 for(; i<na_round; i++)
341 xnb[j+XX*PACK_X8] = -NBAT_FAR_AWAY*(1 + cx);
342 xnb[j+YY*PACK_X8] = -NBAT_FAR_AWAY*(1 + cy);
343 xnb[j+ZZ*PACK_X8] = -NBAT_FAR_AWAY*(1 + cz + i);
348 j += (DIM-1)*PACK_X8;
354 gmx_incons("Unsupported stride");
358 /* Determines the combination rule (or none) to be used, stores it,
359 * and sets the LJ parameters required with the rule.
361 static void set_combination_rule_data(nbnxn_atomdata_t *nbat)
368 switch (nbat->comb_rule)
371 nbat->comb_rule = ljcrGEOM;
375 /* Copy the diagonal from the nbfp matrix */
376 nbat->nbfp_comb[i*2 ] = sqrt(nbat->nbfp[(i*nt+i)*2 ]);
377 nbat->nbfp_comb[i*2+1] = sqrt(nbat->nbfp[(i*nt+i)*2+1]);
383 /* Get 6*C6 and 12*C12 from the diagonal of the nbfp matrix */
384 c6 = nbat->nbfp[(i*nt+i)*2 ];
385 c12 = nbat->nbfp[(i*nt+i)*2+1];
386 if (c6 > 0 && c12 > 0)
388 /* We store 0.5*2^1/6*sigma and sqrt(4*3*eps),
389 * so we get 6*C6 and 12*C12 after combining.
391 nbat->nbfp_comb[i*2 ] = 0.5*pow(c12/c6,1.0/6.0);
392 nbat->nbfp_comb[i*2+1] = sqrt(c6*c6/c12);
396 nbat->nbfp_comb[i*2 ] = 0;
397 nbat->nbfp_comb[i*2+1] = 0;
402 /* In nbfp_s4 we use a stride of 4 for storing two parameters */
403 nbat->alloc((void **)&nbat->nbfp_s4,nt*nt*4*sizeof(*nbat->nbfp_s4));
408 nbat->nbfp_s4[(i*nt+j)*4+0] = nbat->nbfp[(i*nt+j)*2+0];
409 nbat->nbfp_s4[(i*nt+j)*4+1] = nbat->nbfp[(i*nt+j)*2+1];
410 nbat->nbfp_s4[(i*nt+j)*4+2] = 0;
411 nbat->nbfp_s4[(i*nt+j)*4+3] = 0;
416 gmx_incons("Unknown combination rule");
421 /* Initializes an nbnxn_atomdata_t data structure */
422 void nbnxn_atomdata_init(FILE *fp,
423 nbnxn_atomdata_t *nbat,
425 int ntype,const real *nbfp,
428 nbnxn_alloc_t *alloc,
434 gmx_bool simple,bCombGeom,bCombLB;
438 nbat->alloc = nbnxn_alloc_aligned;
446 nbat->free = nbnxn_free_aligned;
455 fprintf(debug,"There are %d atom types in the system, adding one for nbnxn_atomdata_t\n",ntype);
457 nbat->ntype = ntype + 1;
458 nbat->alloc((void **)&nbat->nbfp,
459 nbat->ntype*nbat->ntype*2*sizeof(*nbat->nbfp));
460 nbat->alloc((void **)&nbat->nbfp_comb,nbat->ntype*2*sizeof(*nbat->nbfp_comb));
462 /* A tolerance of 1e-5 seems reasonable for (possibly hand-typed)
463 * force-field floating point parameters.
466 ptr = getenv("GMX_LJCOMB_TOL");
471 sscanf(ptr,"%lf",&dbl);
477 /* Temporarily fill nbat->nbfp_comb with sigma and epsilon
478 * to check for the LB rule.
480 for(i=0; i<ntype; i++)
482 c6 = nbfp[(i*ntype+i)*2 ]/6.0;
483 c12 = nbfp[(i*ntype+i)*2+1]/12.0;
484 if (c6 > 0 && c12 > 0)
486 nbat->nbfp_comb[i*2 ] = pow(c12/c6,1.0/6.0);
487 nbat->nbfp_comb[i*2+1] = 0.25*c6*c6/c12;
489 else if (c6 == 0 && c12 == 0)
491 nbat->nbfp_comb[i*2 ] = 0;
492 nbat->nbfp_comb[i*2+1] = 0;
496 /* Can not use LB rule with only dispersion or repulsion */
501 for(i=0; i<nbat->ntype; i++)
503 for(j=0; j<nbat->ntype; j++)
505 if (i < ntype && j < ntype)
507 /* fr->nbfp has been updated, so that array too now stores c6/c12 including
508 * the 6.0/12.0 prefactors to save 2 flops in the most common case (force-only).
510 c6 = nbfp[(i*ntype+j)*2 ];
511 c12 = nbfp[(i*ntype+j)*2+1];
512 nbat->nbfp[(i*nbat->ntype+j)*2 ] = c6;
513 nbat->nbfp[(i*nbat->ntype+j)*2+1] = c12;
515 /* Compare 6*C6 and 12*C12 for geometric cobination rule */
516 bCombGeom = bCombGeom &&
517 gmx_within_tol(c6*c6 ,nbfp[(i*ntype+i)*2 ]*nbfp[(j*ntype+j)*2 ],tol) &&
518 gmx_within_tol(c12*c12,nbfp[(i*ntype+i)*2+1]*nbfp[(j*ntype+j)*2+1],tol);
520 /* Compare C6 and C12 for Lorentz-Berthelot combination rule */
524 ((c6 == 0 && c12 == 0 &&
525 (nbat->nbfp_comb[i*2+1] == 0 || nbat->nbfp_comb[j*2+1] == 0)) ||
526 (c6 > 0 && c12 > 0 &&
527 gmx_within_tol(pow(c12/c6,1.0/6.0),0.5*(nbat->nbfp_comb[i*2]+nbat->nbfp_comb[j*2]),tol) &&
528 gmx_within_tol(0.25*c6*c6/c12,sqrt(nbat->nbfp_comb[i*2+1]*nbat->nbfp_comb[j*2+1]),tol)));
532 /* Add zero parameters for the additional dummy atom type */
533 nbat->nbfp[(i*nbat->ntype+j)*2 ] = 0;
534 nbat->nbfp[(i*nbat->ntype+j)*2+1] = 0;
540 fprintf(debug,"Combination rules: geometric %d Lorentz-Berthelot %d\n",
544 simple = nbnxn_kernel_pairlist_simple(nb_kernel_type);
548 /* We prefer the geometic combination rule,
549 * as that gives a slightly faster kernel than the LB rule.
553 nbat->comb_rule = ljcrGEOM;
557 nbat->comb_rule = ljcrLB;
561 nbat->comb_rule = ljcrNONE;
563 nbat->free(nbat->nbfp_comb);
568 if (nbat->comb_rule == ljcrNONE)
570 fprintf(fp,"Using full Lennard-Jones parameter combination matrix\n\n");
574 fprintf(fp,"Using %s Lennard-Jones combination rule\n\n",
575 nbat->comb_rule==ljcrGEOM ? "geometric" : "Lorentz-Berthelot");
579 set_combination_rule_data(nbat);
583 nbat->comb_rule = ljcrNONE;
585 nbat->free(nbat->nbfp_comb);
590 nbat->lj_comb = NULL;
595 switch (nb_kernel_type)
597 case nbnxnk4xN_SIMD_4xN:
598 case nbnxnk4xN_SIMD_2xNN:
599 pack_x = max(NBNXN_CPU_CLUSTER_I_SIZE,
600 nbnxn_kernel_to_cj_size(nb_kernel_type));
604 nbat->XFormat = nbatX4;
607 nbat->XFormat = nbatX8;
610 gmx_incons("Unsupported packing width");
614 nbat->XFormat = nbatXYZ;
618 nbat->FFormat = nbat->XFormat;
622 nbat->XFormat = nbatXYZQ;
623 nbat->FFormat = nbatXYZ;
626 nbat->nenergrp = n_energygroups;
629 /* Energy groups not supported yet for super-sub lists */
630 if (n_energygroups > 1 && fp != NULL)
632 fprintf(fp,"\nNOTE: With GPUs, reporting energy group contributions is not supported\n\n");
636 /* Temporary storage goes as #grp^3*simd_width^2/2, so limit to 64 */
637 if (nbat->nenergrp > 64)
639 gmx_fatal(FARGS,"With NxN kernels not more than 64 energy groups are supported\n");
642 while (nbat->nenergrp > (1<<nbat->neg_2log))
646 nbat->energrp = NULL;
647 nbat->alloc((void **)&nbat->shift_vec,SHIFTS*sizeof(*nbat->shift_vec));
648 nbat->xstride = (nbat->XFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
649 nbat->fstride = (nbat->FFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
652 #ifdef GMX_NBNXN_SIMD
655 /* Set the diagonal cluster pair exclusion mask setup data.
656 * In the kernel we check 0 < j - i to generate the masks.
657 * Here we store j - i for generating the mask for the first i,
658 * we substract 0.5 to avoid rounding issues.
659 * In the kernel we can subtract 1 to generate the subsequent mask.
661 const int simd_width=GMX_NBNXN_SIMD_BITWIDTH/(sizeof(real)*8);
662 int simd_4xn_diag_size,j;
664 simd_4xn_diag_size = max(NBNXN_CPU_CLUSTER_I_SIZE,simd_width);
665 snew_aligned(nbat->simd_4xn_diag,simd_4xn_diag_size,NBNXN_MEM_ALIGN);
666 for(j=0; j<simd_4xn_diag_size; j++)
668 nbat->simd_4xn_diag[j] = j - 0.5;
671 snew_aligned(nbat->simd_2xnn_diag,simd_width,NBNXN_MEM_ALIGN);
672 for(j=0; j<simd_width/2; j++)
674 /* The j-cluster size is half the SIMD width */
675 nbat->simd_2xnn_diag[j] = j - 0.5;
676 /* The next half of the SIMD width is for i + 1 */
677 nbat->simd_2xnn_diag[simd_width/2+j] = j - 1 - 0.5;
682 /* Initialize the output data structures */
684 snew(nbat->out,nbat->nout);
686 for(i=0; i<nbat->nout; i++)
688 nbnxn_atomdata_output_init(&nbat->out[i],
690 nbat->nenergrp,1<<nbat->neg_2log,
693 nbat->buffer_flags.flag = NULL;
694 nbat->buffer_flags.flag_nalloc = 0;
697 static void copy_lj_to_nbat_lj_comb_x4(const real *ljparam_type,
698 const int *type,int na,
703 /* The LJ params follow the combination rule:
704 * copy the params for the type array to the atom array.
706 for(is=0; is<na; is+=PACK_X4)
708 for(k=0; k<PACK_X4; k++)
711 ljparam_at[is*2 +k] = ljparam_type[type[i]*2 ];
712 ljparam_at[is*2+PACK_X4+k] = ljparam_type[type[i]*2+1];
717 static void copy_lj_to_nbat_lj_comb_x8(const real *ljparam_type,
718 const int *type,int na,
723 /* The LJ params follow the combination rule:
724 * copy the params for the type array to the atom array.
726 for(is=0; is<na; is+=PACK_X8)
728 for(k=0; k<PACK_X8; k++)
731 ljparam_at[is*2 +k] = ljparam_type[type[i]*2 ];
732 ljparam_at[is*2+PACK_X8+k] = ljparam_type[type[i]*2+1];
737 /* Sets the atom type and LJ data in nbnxn_atomdata_t */
738 static void nbnxn_atomdata_set_atomtypes(nbnxn_atomdata_t *nbat,
740 const nbnxn_search_t nbs,
744 const nbnxn_grid_t *grid;
746 for(g=0; g<ngrid; g++)
748 grid = &nbs->grid[g];
750 /* Loop over all columns and copy and fill */
751 for(i=0; i<grid->ncx*grid->ncy; i++)
753 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
754 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
756 copy_int_to_nbat_int(nbs->a+ash,grid->cxy_na[i],ncz*grid->na_sc,
757 type,nbat->ntype-1,nbat->type+ash);
759 if (nbat->comb_rule != ljcrNONE)
761 if (nbat->XFormat == nbatX4)
763 copy_lj_to_nbat_lj_comb_x4(nbat->nbfp_comb,
764 nbat->type+ash,ncz*grid->na_sc,
765 nbat->lj_comb+ash*2);
767 else if (nbat->XFormat == nbatX8)
769 copy_lj_to_nbat_lj_comb_x8(nbat->nbfp_comb,
770 nbat->type+ash,ncz*grid->na_sc,
771 nbat->lj_comb+ash*2);
778 /* Sets the charges in nbnxn_atomdata_t *nbat */
779 static void nbnxn_atomdata_set_charges(nbnxn_atomdata_t *nbat,
781 const nbnxn_search_t nbs,
784 int g,cxy,ncz,ash,na,na_round,i,j;
786 const nbnxn_grid_t *grid;
788 for(g=0; g<ngrid; g++)
790 grid = &nbs->grid[g];
792 /* Loop over all columns and copy and fill */
793 for(cxy=0; cxy<grid->ncx*grid->ncy; cxy++)
795 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
796 na = grid->cxy_na[cxy];
797 na_round = (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
799 if (nbat->XFormat == nbatXYZQ)
801 q = nbat->x + ash*STRIDE_XYZQ + ZZ + 1;
804 *q = charge[nbs->a[ash+i]];
807 /* Complete the partially filled last cell with zeros */
808 for(; i<na_round; i++)
819 *q = charge[nbs->a[ash+i]];
822 /* Complete the partially filled last cell with zeros */
823 for(; i<na_round; i++)
833 /* Copies the energy group indices to a reordered and packed array */
834 static void copy_egp_to_nbat_egps(const int *a,int na,int na_round,
835 int na_c,int bit_shift,
836 const int *in,int *innb)
842 for(i=0; i<na; i+=na_c)
844 /* Store na_c energy group numbers into one int */
846 for(sa=0; sa<na_c; sa++)
851 comb |= (GET_CGINFO_GID(in[at]) << (sa*bit_shift));
856 /* Complete the partially filled last cell with fill */
857 for(; i<na_round; i+=na_c)
863 /* Set the energy group indices for atoms in nbnxn_atomdata_t */
864 static void nbnxn_atomdata_set_energygroups(nbnxn_atomdata_t *nbat,
866 const nbnxn_search_t nbs,
870 const nbnxn_grid_t *grid;
872 for(g=0; g<ngrid; g++)
874 grid = &nbs->grid[g];
876 /* Loop over all columns and copy and fill */
877 for(i=0; i<grid->ncx*grid->ncy; i++)
879 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
880 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
882 copy_egp_to_nbat_egps(nbs->a+ash,grid->cxy_na[i],ncz*grid->na_sc,
883 nbat->na_c,nbat->neg_2log,
884 atinfo,nbat->energrp+(ash>>grid->na_c_2log));
889 /* Sets all required atom parameter data in nbnxn_atomdata_t */
890 void nbnxn_atomdata_set(nbnxn_atomdata_t *nbat,
892 const nbnxn_search_t nbs,
893 const t_mdatoms *mdatoms,
898 if (locality == eatLocal)
907 nbnxn_atomdata_set_atomtypes(nbat,ngrid,nbs,mdatoms->typeA);
909 nbnxn_atomdata_set_charges(nbat,ngrid,nbs,mdatoms->chargeA);
911 if (nbat->nenergrp > 1)
913 nbnxn_atomdata_set_energygroups(nbat,ngrid,nbs,atinfo);
917 /* Copies the shift vector array to nbnxn_atomdata_t */
918 void nbnxn_atomdata_copy_shiftvec(gmx_bool bDynamicBox,
920 nbnxn_atomdata_t *nbat)
924 nbat->bDynamicBox = bDynamicBox;
925 for(i=0; i<SHIFTS; i++)
927 copy_rvec(shift_vec[i],nbat->shift_vec[i]);
931 /* Copies (and reorders) the coordinates to nbnxn_atomdata_t */
932 void nbnxn_atomdata_copy_x_to_nbat_x(const nbnxn_search_t nbs,
936 nbnxn_atomdata_t *nbat)
959 nbat->natoms_local = nbs->grid[0].nc*nbs->grid[0].na_sc;
962 nth = gmx_omp_nthreads_get(emntPairsearch);
964 #pragma omp parallel for num_threads(nth) schedule(static)
965 for(th=0; th<nth; th++)
971 const nbnxn_grid_t *grid;
974 grid = &nbs->grid[g];
976 cxy0 = (grid->ncx*grid->ncy* th +nth-1)/nth;
977 cxy1 = (grid->ncx*grid->ncy*(th+1)+nth-1)/nth;
979 for(cxy=cxy0; cxy<cxy1; cxy++)
983 na = grid->cxy_na[cxy];
984 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
986 if (g == 0 && FillLocal)
989 (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
993 /* We fill only the real particle locations.
994 * We assume the filling entries at the end have been
995 * properly set before during ns.
999 copy_rvec_to_nbat_real(nbs->a+ash,na,na_fill,x,
1000 nbat->XFormat,nbat->x,ash,
1008 nbnxn_atomdata_clear_reals(real * gmx_restrict dest,
1013 for(i=i0; i<i1; i++)
1020 nbnxn_atomdata_reduce_reals(real * gmx_restrict dest,
1022 real ** gmx_restrict src,
1030 /* The destination buffer contains data, add to it */
1031 for(i=i0; i<i1; i++)
1033 for(s=0; s<nsrc; s++)
1035 dest[i] += src[s][i];
1041 /* The destination buffer is unitialized, set it first */
1042 for(i=i0; i<i1; i++)
1044 dest[i] = src[0][i];
1045 for(s=1; s<nsrc; s++)
1047 dest[i] += src[s][i];
1054 nbnxn_atomdata_reduce_reals_simd(real * gmx_restrict dest,
1056 real ** gmx_restrict src,
1060 #ifdef GMX_NBNXN_SIMD
1061 /* The SIMD width here is actually independent of that in the kernels,
1062 * but we use the same width for simplicity (usually optimal anyhow).
1064 #if GMX_NBNXN_SIMD_BITWIDTH == 128
1065 #define GMX_MM128_HERE
1067 #if GMX_NBNXN_SIMD_BITWIDTH == 256
1068 #define GMX_MM256_HERE
1070 #include "gmx_simd_macros.h"
1073 gmx_mm_pr dest_SSE,src_SSE;
1077 for(i=i0; i<i1; i+=GMX_SIMD_WIDTH_HERE)
1079 dest_SSE = gmx_load_pr(dest+i);
1080 for(s=0; s<nsrc; s++)
1082 src_SSE = gmx_load_pr(src[s]+i);
1083 dest_SSE = gmx_add_pr(dest_SSE,src_SSE);
1085 gmx_store_pr(dest+i,dest_SSE);
1090 for(i=i0; i<i1; i+=GMX_SIMD_WIDTH_HERE)
1092 dest_SSE = gmx_load_pr(src[0]+i);
1093 for(s=1; s<nsrc; s++)
1095 src_SSE = gmx_load_pr(src[s]+i);
1096 dest_SSE = gmx_add_pr(dest_SSE,src_SSE);
1098 gmx_store_pr(dest+i,dest_SSE);
1102 #undef GMX_MM128_HERE
1103 #undef GMX_MM256_HERE
1107 /* Add part of the force array(s) from nbnxn_atomdata_t to f */
1109 nbnxn_atomdata_add_nbat_f_to_f_part(const nbnxn_search_t nbs,
1110 const nbnxn_atomdata_t *nbat,
1111 nbnxn_atomdata_output_t *out,
1122 /* Loop over all columns and copy and fill */
1123 switch (nbat->FFormat)
1131 for(a=a0; a<a1; a++)
1133 i = cell[a]*nbat->fstride;
1136 f[a][YY] += fnb[i+1];
1137 f[a][ZZ] += fnb[i+2];
1142 for(a=a0; a<a1; a++)
1144 i = cell[a]*nbat->fstride;
1146 for(fa=0; fa<nfa; fa++)
1148 f[a][XX] += out[fa].f[i];
1149 f[a][YY] += out[fa].f[i+1];
1150 f[a][ZZ] += out[fa].f[i+2];
1160 for(a=a0; a<a1; a++)
1162 i = X4_IND_A(cell[a]);
1164 f[a][XX] += fnb[i+XX*PACK_X4];
1165 f[a][YY] += fnb[i+YY*PACK_X4];
1166 f[a][ZZ] += fnb[i+ZZ*PACK_X4];
1171 for(a=a0; a<a1; a++)
1173 i = X4_IND_A(cell[a]);
1175 for(fa=0; fa<nfa; fa++)
1177 f[a][XX] += out[fa].f[i+XX*PACK_X4];
1178 f[a][YY] += out[fa].f[i+YY*PACK_X4];
1179 f[a][ZZ] += out[fa].f[i+ZZ*PACK_X4];
1189 for(a=a0; a<a1; a++)
1191 i = X8_IND_A(cell[a]);
1193 f[a][XX] += fnb[i+XX*PACK_X8];
1194 f[a][YY] += fnb[i+YY*PACK_X8];
1195 f[a][ZZ] += fnb[i+ZZ*PACK_X8];
1200 for(a=a0; a<a1; a++)
1202 i = X8_IND_A(cell[a]);
1204 for(fa=0; fa<nfa; fa++)
1206 f[a][XX] += out[fa].f[i+XX*PACK_X8];
1207 f[a][YY] += out[fa].f[i+YY*PACK_X8];
1208 f[a][ZZ] += out[fa].f[i+ZZ*PACK_X8];
1216 /* Add the force array(s) from nbnxn_atomdata_t to f */
1217 void nbnxn_atomdata_add_nbat_f_to_f(const nbnxn_search_t nbs,
1219 const nbnxn_atomdata_t *nbat,
1225 nbs_cycle_start(&nbs->cc[enbsCCreducef]);
1231 na = nbs->natoms_nonlocal;
1235 na = nbs->natoms_local;
1238 a0 = nbs->natoms_local;
1239 na = nbs->natoms_nonlocal - nbs->natoms_local;
1243 nth = gmx_omp_nthreads_get(emntNonbonded);
1247 if (locality != eatAll)
1249 gmx_incons("add_f_to_f called with nout>1 and locality!=eatAll");
1252 /* Reduce the force thread output buffers into buffer 0, before adding
1253 * them to the, differently ordered, "real" force buffer.
1255 #pragma omp parallel for num_threads(nth) schedule(static)
1256 for(th=0; th<nth; th++)
1258 const nbnxn_buffer_flags_t *flags;
1262 real *fptr[NBNXN_BUFFERFLAG_MAX_THREADS];
1265 flags = &nbat->buffer_flags;
1267 /* Calculate the cell-block range for our thread */
1268 b0 = (flags->nflag* th )/nth;
1269 b1 = (flags->nflag*(th+1))/nth;
1271 for(b=b0; b<b1; b++)
1273 i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1274 i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1277 for(out=1; out<nbat->nout; out++)
1279 if (flags->flag[b] & (1U<<out))
1281 fptr[nfptr++] = nbat->out[out].f;
1286 #ifdef GMX_NBNXN_SIMD
1287 nbnxn_atomdata_reduce_reals_simd
1289 nbnxn_atomdata_reduce_reals
1292 flags->flag[b] & (1U<<0),
1296 else if (!(flags->flag[b] & (1U<<0)))
1298 nbnxn_atomdata_clear_reals(nbat->out[0].f,
1305 #pragma omp parallel for num_threads(nth) schedule(static)
1306 for(th=0; th<nth; th++)
1308 nbnxn_atomdata_add_nbat_f_to_f_part(nbs,nbat,
1316 nbs_cycle_stop(&nbs->cc[enbsCCreducef]);
1319 /* Adds the shift forces from nbnxn_atomdata_t to fshift */
1320 void nbnxn_atomdata_add_nbat_fshift_to_fshift(const nbnxn_atomdata_t *nbat,
1323 const nbnxn_atomdata_output_t *out;
1330 for(s=0; s<SHIFTS; s++)
1333 for(th=0; th<nbat->nout; th++)
1335 sum[XX] += out[th].fshift[s*DIM+XX];
1336 sum[YY] += out[th].fshift[s*DIM+YY];
1337 sum[ZZ] += out[th].fshift[s*DIM+ZZ];
1339 rvec_inc(fshift[s],sum);