1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2012, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
42 #include "nbnxn_consts.h"
43 #include "nbnxn_internal.h"
44 #include "nbnxn_search.h"
45 #include "nbnxn_atomdata.h"
46 #include "gmx_omp_nthreads.h"
48 /* Default nbnxn allocation routine, allocates NBNXN_MEM_ALIGN byte aligned */
49 void nbnxn_alloc_aligned(void **ptr, size_t nbytes)
51 *ptr = save_malloc_aligned("ptr", __FILE__, __LINE__, nbytes, 1, NBNXN_MEM_ALIGN);
54 /* Free function for memory allocated with nbnxn_alloc_aligned */
55 void nbnxn_free_aligned(void *ptr)
60 /* Reallocation wrapper function for nbnxn data structures */
61 void nbnxn_realloc_void(void **ptr,
62 int nbytes_copy, int nbytes_new,
68 ma(&ptr_new, nbytes_new);
70 if (nbytes_new > 0 && ptr_new == NULL)
72 gmx_fatal(FARGS, "Allocation of %d bytes failed", nbytes_new);
77 if (nbytes_new < nbytes_copy)
79 gmx_incons("In nbnxn_realloc_void: new size less than copy size");
81 memcpy(ptr_new, *ptr, nbytes_copy);
90 /* Reallocate the nbnxn_atomdata_t for a size of n atoms */
91 void nbnxn_atomdata_realloc(nbnxn_atomdata_t *nbat, int n)
95 nbnxn_realloc_void((void **)&nbat->type,
96 nbat->natoms*sizeof(*nbat->type),
97 n*sizeof(*nbat->type),
98 nbat->alloc, nbat->free);
99 nbnxn_realloc_void((void **)&nbat->lj_comb,
100 nbat->natoms*2*sizeof(*nbat->lj_comb),
101 n*2*sizeof(*nbat->lj_comb),
102 nbat->alloc, nbat->free);
103 if (nbat->XFormat != nbatXYZQ)
105 nbnxn_realloc_void((void **)&nbat->q,
106 nbat->natoms*sizeof(*nbat->q),
108 nbat->alloc, nbat->free);
110 if (nbat->nenergrp > 1)
112 nbnxn_realloc_void((void **)&nbat->energrp,
113 nbat->natoms/nbat->na_c*sizeof(*nbat->energrp),
114 n/nbat->na_c*sizeof(*nbat->energrp),
115 nbat->alloc, nbat->free);
117 nbnxn_realloc_void((void **)&nbat->x,
118 nbat->natoms*nbat->xstride*sizeof(*nbat->x),
119 n*nbat->xstride*sizeof(*nbat->x),
120 nbat->alloc, nbat->free);
121 for (t = 0; t < nbat->nout; t++)
123 /* Allocate one element extra for possible signaling with CUDA */
124 nbnxn_realloc_void((void **)&nbat->out[t].f,
125 nbat->natoms*nbat->fstride*sizeof(*nbat->out[t].f),
126 n*nbat->fstride*sizeof(*nbat->out[t].f),
127 nbat->alloc, nbat->free);
132 /* Initializes an nbnxn_atomdata_output_t data structure */
133 static void nbnxn_atomdata_output_init(nbnxn_atomdata_output_t *out,
135 int nenergrp, int stride,
141 ma((void **)&out->fshift, SHIFTS*DIM*sizeof(*out->fshift));
142 out->nV = nenergrp*nenergrp;
143 ma((void **)&out->Vvdw, out->nV*sizeof(*out->Vvdw));
144 ma((void **)&out->Vc, out->nV*sizeof(*out->Vc ));
146 if (nb_kernel_type == nbnxnk4xN_SIMD_4xN ||
147 nb_kernel_type == nbnxnk4xN_SIMD_2xNN)
149 cj_size = nbnxn_kernel_to_cj_size(nb_kernel_type);
150 out->nVS = nenergrp*nenergrp*stride*(cj_size>>1)*cj_size;
151 ma((void **)&out->VSvdw, out->nVS*sizeof(*out->VSvdw));
152 ma((void **)&out->VSc, out->nVS*sizeof(*out->VSc ));
160 static void copy_int_to_nbat_int(const int *a, int na, int na_round,
161 const int *in, int fill, int *innb)
166 for (i = 0; i < na; i++)
168 innb[j++] = in[a[i]];
170 /* Complete the partially filled last cell with fill */
171 for (; i < na_round; i++)
177 static void clear_nbat_real(int na, int nbatFormat, real *xnb, int a0)
184 for (a = 0; a < na; a++)
186 for (d = 0; d < DIM; d++)
188 xnb[(a0+a)*STRIDE_XYZ+d] = 0;
193 for (a = 0; a < na; a++)
195 for (d = 0; d < DIM; d++)
197 xnb[(a0+a)*STRIDE_XYZQ+d] = 0;
203 c = a0 & (PACK_X4-1);
204 for (a = 0; a < na; a++)
206 xnb[j+XX*PACK_X4] = 0;
207 xnb[j+YY*PACK_X4] = 0;
208 xnb[j+ZZ*PACK_X4] = 0;
213 j += (DIM-1)*PACK_X4;
220 c = a0 & (PACK_X8-1);
221 for (a = 0; a < na; a++)
223 xnb[j+XX*PACK_X8] = 0;
224 xnb[j+YY*PACK_X8] = 0;
225 xnb[j+ZZ*PACK_X8] = 0;
230 j += (DIM-1)*PACK_X8;
238 void copy_rvec_to_nbat_real(const int *a, int na, int na_round,
239 rvec *x, int nbatFormat, real *xnb, int a0,
240 int cx, int cy, int cz)
244 /* We might need to place filler particles to fill up the cell to na_round.
245 * The coefficients (LJ and q) for such particles are zero.
246 * But we might still get NaN as 0*NaN when distances are too small.
247 * We hope that -107 nm is far away enough from to zero
248 * to avoid accidental short distances to particles shifted down for pbc.
250 #define NBAT_FAR_AWAY 107
256 for (i = 0; i < na; i++)
258 xnb[j++] = x[a[i]][XX];
259 xnb[j++] = x[a[i]][YY];
260 xnb[j++] = x[a[i]][ZZ];
262 /* Complete the partially filled last cell with copies of the last element.
263 * This simplifies the bounding box calculation and avoid
264 * numerical issues with atoms that are coincidentally close.
266 for (; i < na_round; i++)
268 xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
269 xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
270 xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
275 for (i = 0; i < na; i++)
277 xnb[j++] = x[a[i]][XX];
278 xnb[j++] = x[a[i]][YY];
279 xnb[j++] = x[a[i]][ZZ];
282 /* Complete the partially filled last cell with particles far apart */
283 for (; i < na_round; i++)
285 xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
286 xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
287 xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
293 c = a0 & (PACK_X4-1);
294 for (i = 0; i < na; i++)
296 xnb[j+XX*PACK_X4] = x[a[i]][XX];
297 xnb[j+YY*PACK_X4] = x[a[i]][YY];
298 xnb[j+ZZ*PACK_X4] = x[a[i]][ZZ];
303 j += (DIM-1)*PACK_X4;
307 /* Complete the partially filled last cell with particles far apart */
308 for (; i < na_round; i++)
310 xnb[j+XX*PACK_X4] = -NBAT_FAR_AWAY*(1 + cx);
311 xnb[j+YY*PACK_X4] = -NBAT_FAR_AWAY*(1 + cy);
312 xnb[j+ZZ*PACK_X4] = -NBAT_FAR_AWAY*(1 + cz + i);
317 j += (DIM-1)*PACK_X4;
324 c = a0 & (PACK_X8 - 1);
325 for (i = 0; i < na; i++)
327 xnb[j+XX*PACK_X8] = x[a[i]][XX];
328 xnb[j+YY*PACK_X8] = x[a[i]][YY];
329 xnb[j+ZZ*PACK_X8] = x[a[i]][ZZ];
334 j += (DIM-1)*PACK_X8;
338 /* Complete the partially filled last cell with particles far apart */
339 for (; i < na_round; i++)
341 xnb[j+XX*PACK_X8] = -NBAT_FAR_AWAY*(1 + cx);
342 xnb[j+YY*PACK_X8] = -NBAT_FAR_AWAY*(1 + cy);
343 xnb[j+ZZ*PACK_X8] = -NBAT_FAR_AWAY*(1 + cz + i);
348 j += (DIM-1)*PACK_X8;
354 gmx_incons("Unsupported nbnxn_atomdata_t format");
358 /* Determines the combination rule (or none) to be used, stores it,
359 * and sets the LJ parameters required with the rule.
361 static void set_combination_rule_data(nbnxn_atomdata_t *nbat)
368 switch (nbat->comb_rule)
371 nbat->comb_rule = ljcrGEOM;
373 for (i = 0; i < nt; i++)
375 /* Copy the diagonal from the nbfp matrix */
376 nbat->nbfp_comb[i*2 ] = sqrt(nbat->nbfp[(i*nt+i)*2 ]);
377 nbat->nbfp_comb[i*2+1] = sqrt(nbat->nbfp[(i*nt+i)*2+1]);
381 for (i = 0; i < nt; i++)
383 /* Get 6*C6 and 12*C12 from the diagonal of the nbfp matrix */
384 c6 = nbat->nbfp[(i*nt+i)*2 ];
385 c12 = nbat->nbfp[(i*nt+i)*2+1];
386 if (c6 > 0 && c12 > 0)
388 /* We store 0.5*2^1/6*sigma and sqrt(4*3*eps),
389 * so we get 6*C6 and 12*C12 after combining.
391 nbat->nbfp_comb[i*2 ] = 0.5*pow(c12/c6, 1.0/6.0);
392 nbat->nbfp_comb[i*2+1] = sqrt(c6*c6/c12);
396 nbat->nbfp_comb[i*2 ] = 0;
397 nbat->nbfp_comb[i*2+1] = 0;
402 /* nbfp_s4 stores two parameters using a stride of 4,
403 * because this would suit x86 SIMD single-precision
404 * quad-load intrinsics. There's a slight inefficiency in
405 * allocating and initializing nbfp_s4 when it might not
406 * be used, but introducing the conditional code is not
407 * really worth it. */
408 nbat->alloc((void **)&nbat->nbfp_s4, nt*nt*4*sizeof(*nbat->nbfp_s4));
409 for (i = 0; i < nt; i++)
411 for (j = 0; j < nt; j++)
413 nbat->nbfp_s4[(i*nt+j)*4+0] = nbat->nbfp[(i*nt+j)*2+0];
414 nbat->nbfp_s4[(i*nt+j)*4+1] = nbat->nbfp[(i*nt+j)*2+1];
415 nbat->nbfp_s4[(i*nt+j)*4+2] = 0;
416 nbat->nbfp_s4[(i*nt+j)*4+3] = 0;
421 gmx_incons("Unknown combination rule");
426 /* Initializes an nbnxn_atomdata_t data structure */
427 void nbnxn_atomdata_init(FILE *fp,
428 nbnxn_atomdata_t *nbat,
430 int ntype, const real *nbfp,
433 nbnxn_alloc_t *alloc,
439 gmx_bool simple, bCombGeom, bCombLB;
443 nbat->alloc = nbnxn_alloc_aligned;
451 nbat->free = nbnxn_free_aligned;
460 fprintf(debug, "There are %d atom types in the system, adding one for nbnxn_atomdata_t\n", ntype);
462 nbat->ntype = ntype + 1;
463 nbat->alloc((void **)&nbat->nbfp,
464 nbat->ntype*nbat->ntype*2*sizeof(*nbat->nbfp));
465 nbat->alloc((void **)&nbat->nbfp_comb, nbat->ntype*2*sizeof(*nbat->nbfp_comb));
467 /* A tolerance of 1e-5 seems reasonable for (possibly hand-typed)
468 * force-field floating point parameters.
471 ptr = getenv("GMX_LJCOMB_TOL");
476 sscanf(ptr, "%lf", &dbl);
482 /* Temporarily fill nbat->nbfp_comb with sigma and epsilon
483 * to check for the LB rule.
485 for (i = 0; i < ntype; i++)
487 c6 = nbfp[(i*ntype+i)*2 ]/6.0;
488 c12 = nbfp[(i*ntype+i)*2+1]/12.0;
489 if (c6 > 0 && c12 > 0)
491 nbat->nbfp_comb[i*2 ] = pow(c12/c6, 1.0/6.0);
492 nbat->nbfp_comb[i*2+1] = 0.25*c6*c6/c12;
494 else if (c6 == 0 && c12 == 0)
496 nbat->nbfp_comb[i*2 ] = 0;
497 nbat->nbfp_comb[i*2+1] = 0;
501 /* Can not use LB rule with only dispersion or repulsion */
506 for (i = 0; i < nbat->ntype; i++)
508 for (j = 0; j < nbat->ntype; j++)
510 if (i < ntype && j < ntype)
512 /* fr->nbfp has been updated, so that array too now stores c6/c12 including
513 * the 6.0/12.0 prefactors to save 2 flops in the most common case (force-only).
515 c6 = nbfp[(i*ntype+j)*2 ];
516 c12 = nbfp[(i*ntype+j)*2+1];
517 nbat->nbfp[(i*nbat->ntype+j)*2 ] = c6;
518 nbat->nbfp[(i*nbat->ntype+j)*2+1] = c12;
520 /* Compare 6*C6 and 12*C12 for geometric cobination rule */
521 bCombGeom = bCombGeom &&
522 gmx_within_tol(c6*c6, nbfp[(i*ntype+i)*2 ]*nbfp[(j*ntype+j)*2 ], tol) &&
523 gmx_within_tol(c12*c12, nbfp[(i*ntype+i)*2+1]*nbfp[(j*ntype+j)*2+1], tol);
525 /* Compare C6 and C12 for Lorentz-Berthelot combination rule */
529 ((c6 == 0 && c12 == 0 &&
530 (nbat->nbfp_comb[i*2+1] == 0 || nbat->nbfp_comb[j*2+1] == 0)) ||
531 (c6 > 0 && c12 > 0 &&
532 gmx_within_tol(pow(c12/c6, 1.0/6.0), 0.5*(nbat->nbfp_comb[i*2]+nbat->nbfp_comb[j*2]), tol) &&
533 gmx_within_tol(0.25*c6*c6/c12, sqrt(nbat->nbfp_comb[i*2+1]*nbat->nbfp_comb[j*2+1]), tol)));
537 /* Add zero parameters for the additional dummy atom type */
538 nbat->nbfp[(i*nbat->ntype+j)*2 ] = 0;
539 nbat->nbfp[(i*nbat->ntype+j)*2+1] = 0;
545 fprintf(debug, "Combination rules: geometric %d Lorentz-Berthelot %d\n",
549 simple = nbnxn_kernel_pairlist_simple(nb_kernel_type);
553 /* We prefer the geometic combination rule,
554 * as that gives a slightly faster kernel than the LB rule.
558 nbat->comb_rule = ljcrGEOM;
562 nbat->comb_rule = ljcrLB;
566 nbat->comb_rule = ljcrNONE;
568 nbat->free(nbat->nbfp_comb);
573 if (nbat->comb_rule == ljcrNONE)
575 fprintf(fp, "Using full Lennard-Jones parameter combination matrix\n\n");
579 fprintf(fp, "Using %s Lennard-Jones combination rule\n\n",
580 nbat->comb_rule == ljcrGEOM ? "geometric" : "Lorentz-Berthelot");
584 set_combination_rule_data(nbat);
588 nbat->comb_rule = ljcrNONE;
590 nbat->free(nbat->nbfp_comb);
595 nbat->lj_comb = NULL;
600 switch (nb_kernel_type)
602 case nbnxnk4xN_SIMD_4xN:
603 case nbnxnk4xN_SIMD_2xNN:
604 pack_x = max(NBNXN_CPU_CLUSTER_I_SIZE,
605 nbnxn_kernel_to_cj_size(nb_kernel_type));
609 nbat->XFormat = nbatX4;
612 nbat->XFormat = nbatX8;
615 gmx_incons("Unsupported packing width");
619 nbat->XFormat = nbatXYZ;
623 nbat->FFormat = nbat->XFormat;
627 nbat->XFormat = nbatXYZQ;
628 nbat->FFormat = nbatXYZ;
631 nbat->nenergrp = n_energygroups;
634 /* Energy groups not supported yet for super-sub lists */
635 if (n_energygroups > 1 && fp != NULL)
637 fprintf(fp, "\nNOTE: With GPUs, reporting energy group contributions is not supported\n\n");
641 /* Temporary storage goes as #grp^3*simd_width^2/2, so limit to 64 */
642 if (nbat->nenergrp > 64)
644 gmx_fatal(FARGS, "With NxN kernels not more than 64 energy groups are supported\n");
647 while (nbat->nenergrp > (1<<nbat->neg_2log))
651 nbat->energrp = NULL;
652 nbat->alloc((void **)&nbat->shift_vec, SHIFTS*sizeof(*nbat->shift_vec));
653 nbat->xstride = (nbat->XFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
654 nbat->fstride = (nbat->FFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
657 #ifdef GMX_NBNXN_SIMD
660 /* Set the diagonal cluster pair exclusion mask setup data.
661 * In the kernel we check 0 < j - i to generate the masks.
662 * Here we store j - i for generating the mask for the first i,
663 * we substract 0.5 to avoid rounding issues.
664 * In the kernel we can subtract 1 to generate the subsequent mask.
666 const int simd_width = GMX_NBNXN_SIMD_BITWIDTH/(sizeof(real)*8);
667 int simd_4xn_diag_size, real_excl, simd_excl_size, j, s;
669 simd_4xn_diag_size = max(NBNXN_CPU_CLUSTER_I_SIZE, simd_width);
670 snew_aligned(nbat->simd_4xn_diag, simd_4xn_diag_size, NBNXN_MEM_ALIGN);
671 for (j = 0; j < simd_4xn_diag_size; j++)
673 nbat->simd_4xn_diag[j] = j - 0.5;
676 snew_aligned(nbat->simd_2xnn_diag, simd_width, NBNXN_MEM_ALIGN);
677 for (j = 0; j < simd_width/2; j++)
679 /* The j-cluster size is half the SIMD width */
680 nbat->simd_2xnn_diag[j] = j - 0.5;
681 /* The next half of the SIMD width is for i + 1 */
682 nbat->simd_2xnn_diag[simd_width/2+j] = j - 1 - 0.5;
685 /* We always use 32-bit integer exclusion masks. When we use
686 * double precision, we fit two integers in a double SIMD register.
688 real_excl = sizeof(real)/sizeof(*nbat->simd_excl_mask);
689 /* Set bits for use with both 4xN and 2x(N+N) kernels */
690 simd_excl_size = NBNXN_CPU_CLUSTER_I_SIZE*simd_width*real_excl;
691 snew_aligned(nbat->simd_excl_mask, simd_excl_size*real_excl, NBNXN_MEM_ALIGN);
692 for (j = 0; j < simd_excl_size; j++)
694 /* Set the consecutive bits for masking pair exclusions.
695 * For double a single-bit mask would be enough.
696 * But using two bits avoids endianness issues.
698 for (s = 0; s < real_excl; s++)
700 /* Set the consecutive bits for masking pair exclusions */
701 nbat->simd_excl_mask[j*real_excl + s] = (1U << j);
707 /* Initialize the output data structures */
709 snew(nbat->out, nbat->nout);
711 for (i = 0; i < nbat->nout; i++)
713 nbnxn_atomdata_output_init(&nbat->out[i],
715 nbat->nenergrp, 1<<nbat->neg_2log,
718 nbat->buffer_flags.flag = NULL;
719 nbat->buffer_flags.flag_nalloc = 0;
722 static void copy_lj_to_nbat_lj_comb_x4(const real *ljparam_type,
723 const int *type, int na,
728 /* The LJ params follow the combination rule:
729 * copy the params for the type array to the atom array.
731 for (is = 0; is < na; is += PACK_X4)
733 for (k = 0; k < PACK_X4; k++)
736 ljparam_at[is*2 +k] = ljparam_type[type[i]*2 ];
737 ljparam_at[is*2+PACK_X4+k] = ljparam_type[type[i]*2+1];
742 static void copy_lj_to_nbat_lj_comb_x8(const real *ljparam_type,
743 const int *type, int na,
748 /* The LJ params follow the combination rule:
749 * copy the params for the type array to the atom array.
751 for (is = 0; is < na; is += PACK_X8)
753 for (k = 0; k < PACK_X8; k++)
756 ljparam_at[is*2 +k] = ljparam_type[type[i]*2 ];
757 ljparam_at[is*2+PACK_X8+k] = ljparam_type[type[i]*2+1];
762 /* Sets the atom type and LJ data in nbnxn_atomdata_t */
763 static void nbnxn_atomdata_set_atomtypes(nbnxn_atomdata_t *nbat,
765 const nbnxn_search_t nbs,
769 const nbnxn_grid_t *grid;
771 for (g = 0; g < ngrid; g++)
773 grid = &nbs->grid[g];
775 /* Loop over all columns and copy and fill */
776 for (i = 0; i < grid->ncx*grid->ncy; i++)
778 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
779 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
781 copy_int_to_nbat_int(nbs->a+ash, grid->cxy_na[i], ncz*grid->na_sc,
782 type, nbat->ntype-1, nbat->type+ash);
784 if (nbat->comb_rule != ljcrNONE)
786 if (nbat->XFormat == nbatX4)
788 copy_lj_to_nbat_lj_comb_x4(nbat->nbfp_comb,
789 nbat->type+ash, ncz*grid->na_sc,
790 nbat->lj_comb+ash*2);
792 else if (nbat->XFormat == nbatX8)
794 copy_lj_to_nbat_lj_comb_x8(nbat->nbfp_comb,
795 nbat->type+ash, ncz*grid->na_sc,
796 nbat->lj_comb+ash*2);
803 /* Sets the charges in nbnxn_atomdata_t *nbat */
804 static void nbnxn_atomdata_set_charges(nbnxn_atomdata_t *nbat,
806 const nbnxn_search_t nbs,
809 int g, cxy, ncz, ash, na, na_round, i, j;
811 const nbnxn_grid_t *grid;
813 for (g = 0; g < ngrid; g++)
815 grid = &nbs->grid[g];
817 /* Loop over all columns and copy and fill */
818 for (cxy = 0; cxy < grid->ncx*grid->ncy; cxy++)
820 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
821 na = grid->cxy_na[cxy];
822 na_round = (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
824 if (nbat->XFormat == nbatXYZQ)
826 q = nbat->x + ash*STRIDE_XYZQ + ZZ + 1;
827 for (i = 0; i < na; i++)
829 *q = charge[nbs->a[ash+i]];
832 /* Complete the partially filled last cell with zeros */
833 for (; i < na_round; i++)
842 for (i = 0; i < na; i++)
844 *q = charge[nbs->a[ash+i]];
847 /* Complete the partially filled last cell with zeros */
848 for (; i < na_round; i++)
858 /* Copies the energy group indices to a reordered and packed array */
859 static void copy_egp_to_nbat_egps(const int *a, int na, int na_round,
860 int na_c, int bit_shift,
861 const int *in, int *innb)
867 for (i = 0; i < na; i += na_c)
869 /* Store na_c energy group numbers into one int */
871 for (sa = 0; sa < na_c; sa++)
876 comb |= (GET_CGINFO_GID(in[at]) << (sa*bit_shift));
881 /* Complete the partially filled last cell with fill */
882 for (; i < na_round; i += na_c)
888 /* Set the energy group indices for atoms in nbnxn_atomdata_t */
889 static void nbnxn_atomdata_set_energygroups(nbnxn_atomdata_t *nbat,
891 const nbnxn_search_t nbs,
895 const nbnxn_grid_t *grid;
897 for (g = 0; g < ngrid; g++)
899 grid = &nbs->grid[g];
901 /* Loop over all columns and copy and fill */
902 for (i = 0; i < grid->ncx*grid->ncy; i++)
904 ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
905 ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
907 copy_egp_to_nbat_egps(nbs->a+ash, grid->cxy_na[i], ncz*grid->na_sc,
908 nbat->na_c, nbat->neg_2log,
909 atinfo, nbat->energrp+(ash>>grid->na_c_2log));
914 /* Sets all required atom parameter data in nbnxn_atomdata_t */
915 void nbnxn_atomdata_set(nbnxn_atomdata_t *nbat,
917 const nbnxn_search_t nbs,
918 const t_mdatoms *mdatoms,
923 if (locality == eatLocal)
932 nbnxn_atomdata_set_atomtypes(nbat, ngrid, nbs, mdatoms->typeA);
934 nbnxn_atomdata_set_charges(nbat, ngrid, nbs, mdatoms->chargeA);
936 if (nbat->nenergrp > 1)
938 nbnxn_atomdata_set_energygroups(nbat, ngrid, nbs, atinfo);
942 /* Copies the shift vector array to nbnxn_atomdata_t */
943 void nbnxn_atomdata_copy_shiftvec(gmx_bool bDynamicBox,
945 nbnxn_atomdata_t *nbat)
949 nbat->bDynamicBox = bDynamicBox;
950 for (i = 0; i < SHIFTS; i++)
952 copy_rvec(shift_vec[i], nbat->shift_vec[i]);
956 /* Copies (and reorders) the coordinates to nbnxn_atomdata_t */
957 void nbnxn_atomdata_copy_x_to_nbat_x(const nbnxn_search_t nbs,
961 nbnxn_atomdata_t *nbat)
984 nbat->natoms_local = nbs->grid[0].nc*nbs->grid[0].na_sc;
987 nth = gmx_omp_nthreads_get(emntPairsearch);
989 #pragma omp parallel for num_threads(nth) schedule(static)
990 for (th = 0; th < nth; th++)
994 for (g = g0; g < g1; g++)
996 const nbnxn_grid_t *grid;
999 grid = &nbs->grid[g];
1001 cxy0 = (grid->ncx*grid->ncy* th +nth-1)/nth;
1002 cxy1 = (grid->ncx*grid->ncy*(th+1)+nth-1)/nth;
1004 for (cxy = cxy0; cxy < cxy1; cxy++)
1006 int na, ash, na_fill;
1008 na = grid->cxy_na[cxy];
1009 ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
1011 if (g == 0 && FillLocal)
1014 (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
1018 /* We fill only the real particle locations.
1019 * We assume the filling entries at the end have been
1020 * properly set before during ns.
1024 copy_rvec_to_nbat_real(nbs->a+ash, na, na_fill, x,
1025 nbat->XFormat, nbat->x, ash,
1033 nbnxn_atomdata_clear_reals(real * gmx_restrict dest,
1038 for (i = i0; i < i1; i++)
1045 nbnxn_atomdata_reduce_reals(real * gmx_restrict dest,
1047 real ** gmx_restrict src,
1055 /* The destination buffer contains data, add to it */
1056 for (i = i0; i < i1; i++)
1058 for (s = 0; s < nsrc; s++)
1060 dest[i] += src[s][i];
1066 /* The destination buffer is unitialized, set it first */
1067 for (i = i0; i < i1; i++)
1069 dest[i] = src[0][i];
1070 for (s = 1; s < nsrc; s++)
1072 dest[i] += src[s][i];
1079 nbnxn_atomdata_reduce_reals_simd(real * gmx_restrict dest,
1081 real ** gmx_restrict src,
1085 #ifdef GMX_NBNXN_SIMD
1086 /* The SIMD width here is actually independent of that in the kernels,
1087 * but we use the same width for simplicity (usually optimal anyhow).
1089 #ifdef GMX_NBNXN_HALF_WIDTH_SIMD
1090 #define GMX_USE_HALF_WIDTH_SIMD_HERE
1092 #include "gmx_simd_macros.h"
1095 gmx_mm_pr dest_SSE, src_SSE;
1099 for (i = i0; i < i1; i += GMX_SIMD_WIDTH_HERE)
1101 dest_SSE = gmx_load_pr(dest+i);
1102 for (s = 0; s < nsrc; s++)
1104 src_SSE = gmx_load_pr(src[s]+i);
1105 dest_SSE = gmx_add_pr(dest_SSE, src_SSE);
1107 gmx_store_pr(dest+i, dest_SSE);
1112 for (i = i0; i < i1; i += GMX_SIMD_WIDTH_HERE)
1114 dest_SSE = gmx_load_pr(src[0]+i);
1115 for (s = 1; s < nsrc; s++)
1117 src_SSE = gmx_load_pr(src[s]+i);
1118 dest_SSE = gmx_add_pr(dest_SSE, src_SSE);
1120 gmx_store_pr(dest+i, dest_SSE);
1126 /* Add part of the force array(s) from nbnxn_atomdata_t to f */
1128 nbnxn_atomdata_add_nbat_f_to_f_part(const nbnxn_search_t nbs,
1129 const nbnxn_atomdata_t *nbat,
1130 nbnxn_atomdata_output_t *out,
1141 /* Loop over all columns and copy and fill */
1142 switch (nbat->FFormat)
1150 for (a = a0; a < a1; a++)
1152 i = cell[a]*nbat->fstride;
1155 f[a][YY] += fnb[i+1];
1156 f[a][ZZ] += fnb[i+2];
1161 for (a = a0; a < a1; a++)
1163 i = cell[a]*nbat->fstride;
1165 for (fa = 0; fa < nfa; fa++)
1167 f[a][XX] += out[fa].f[i];
1168 f[a][YY] += out[fa].f[i+1];
1169 f[a][ZZ] += out[fa].f[i+2];
1179 for (a = a0; a < a1; a++)
1181 i = X4_IND_A(cell[a]);
1183 f[a][XX] += fnb[i+XX*PACK_X4];
1184 f[a][YY] += fnb[i+YY*PACK_X4];
1185 f[a][ZZ] += fnb[i+ZZ*PACK_X4];
1190 for (a = a0; a < a1; a++)
1192 i = X4_IND_A(cell[a]);
1194 for (fa = 0; fa < nfa; fa++)
1196 f[a][XX] += out[fa].f[i+XX*PACK_X4];
1197 f[a][YY] += out[fa].f[i+YY*PACK_X4];
1198 f[a][ZZ] += out[fa].f[i+ZZ*PACK_X4];
1208 for (a = a0; a < a1; a++)
1210 i = X8_IND_A(cell[a]);
1212 f[a][XX] += fnb[i+XX*PACK_X8];
1213 f[a][YY] += fnb[i+YY*PACK_X8];
1214 f[a][ZZ] += fnb[i+ZZ*PACK_X8];
1219 for (a = a0; a < a1; a++)
1221 i = X8_IND_A(cell[a]);
1223 for (fa = 0; fa < nfa; fa++)
1225 f[a][XX] += out[fa].f[i+XX*PACK_X8];
1226 f[a][YY] += out[fa].f[i+YY*PACK_X8];
1227 f[a][ZZ] += out[fa].f[i+ZZ*PACK_X8];
1233 gmx_incons("Unsupported nbnxn_atomdata_t format");
1237 /* Add the force array(s) from nbnxn_atomdata_t to f */
1238 void nbnxn_atomdata_add_nbat_f_to_f(const nbnxn_search_t nbs,
1240 const nbnxn_atomdata_t *nbat,
1246 nbs_cycle_start(&nbs->cc[enbsCCreducef]);
1252 na = nbs->natoms_nonlocal;
1256 na = nbs->natoms_local;
1259 a0 = nbs->natoms_local;
1260 na = nbs->natoms_nonlocal - nbs->natoms_local;
1264 nth = gmx_omp_nthreads_get(emntNonbonded);
1268 if (locality != eatAll)
1270 gmx_incons("add_f_to_f called with nout>1 and locality!=eatAll");
1273 /* Reduce the force thread output buffers into buffer 0, before adding
1274 * them to the, differently ordered, "real" force buffer.
1276 #pragma omp parallel for num_threads(nth) schedule(static)
1277 for (th = 0; th < nth; th++)
1279 const nbnxn_buffer_flags_t *flags;
1283 real *fptr[NBNXN_BUFFERFLAG_MAX_THREADS];
1286 flags = &nbat->buffer_flags;
1288 /* Calculate the cell-block range for our thread */
1289 b0 = (flags->nflag* th )/nth;
1290 b1 = (flags->nflag*(th+1))/nth;
1292 for (b = b0; b < b1; b++)
1294 i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1295 i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1298 for (out = 1; out < nbat->nout; out++)
1300 if (flags->flag[b] & (1U<<out))
1302 fptr[nfptr++] = nbat->out[out].f;
1307 #ifdef GMX_NBNXN_SIMD
1308 nbnxn_atomdata_reduce_reals_simd
1310 nbnxn_atomdata_reduce_reals
1313 flags->flag[b] & (1U<<0),
1317 else if (!(flags->flag[b] & (1U<<0)))
1319 nbnxn_atomdata_clear_reals(nbat->out[0].f,
1326 #pragma omp parallel for num_threads(nth) schedule(static)
1327 for (th = 0; th < nth; th++)
1329 nbnxn_atomdata_add_nbat_f_to_f_part(nbs, nbat,
1337 nbs_cycle_stop(&nbs->cc[enbsCCreducef]);
1340 /* Adds the shift forces from nbnxn_atomdata_t to fshift */
1341 void nbnxn_atomdata_add_nbat_fshift_to_fshift(const nbnxn_atomdata_t *nbat,
1344 const nbnxn_atomdata_output_t *out;
1351 for (s = 0; s < SHIFTS; s++)
1354 for (th = 0; th < nbat->nout; th++)
1356 sum[XX] += out[th].fshift[s*DIM+XX];
1357 sum[YY] += out[th].fshift[s*DIM+YY];
1358 sum[ZZ] += out[th].fshift[s*DIM+ZZ];
1360 rvec_inc(fshift[s], sum);