2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 #define UNROLLI NBNXN_CPU_CLUSTER_I_SIZE
37 #define UNROLLJ NBNXN_CPU_CLUSTER_I_SIZE
39 /* We could use nbat->xstride and nbat->fstride, but macros might be faster */
42 /* Local i-atom buffer strides */
47 /* All functionality defines are set here, except for:
48 * CALC_ENERGIES, ENERGY_GROUPS which are defined before.
49 * CHECK_EXCLS, which is set just before including the inner loop contents.
52 /* We always calculate shift forces, because it's cheap anyhow */
53 #define CALC_SHIFTFORCES
56 #define NBK_FUNC_NAME2(ljt, feg) nbnxn_kernel ## _ElecRF ## ljt ## feg ## _ref
59 #ifndef VDW_CUTOFF_CHECK
60 #define NBK_FUNC_NAME2(ljt, feg) nbnxn_kernel ## _ElecQSTab ## ljt ## feg ## _ref
62 #define NBK_FUNC_NAME2(ljt, feg) nbnxn_kernel ## _ElecQSTabTwinCut ## ljt ## feg ## _ref
66 #if defined LJ_CUT && !defined LJ_EWALD
67 #define NBK_FUNC_NAME(feg) NBK_FUNC_NAME2(_VdwLJ, feg)
68 #elif defined LJ_FORCE_SWITCH
69 #define NBK_FUNC_NAME(feg) NBK_FUNC_NAME2(_VdwLJFsw, feg)
70 #elif defined LJ_POT_SWITCH
71 #define NBK_FUNC_NAME(feg) NBK_FUNC_NAME2(_VdwLJPsw, feg)
72 #elif defined LJ_EWALD
73 #ifdef LJ_EWALD_COMB_GEOM
74 #define NBK_FUNC_NAME(feg) NBK_FUNC_NAME2(_VdwLJEwCombGeom, feg)
76 #define NBK_FUNC_NAME(feg) NBK_FUNC_NAME2(_VdwLJEwCombLB, feg)
79 #error "No VdW type defined"
94 (const nbnxn_pairlist_t *nbl,
95 const nbnxn_atomdata_t *nbat,
96 const interaction_const_t *ic,
99 #ifdef CALC_SHIFTFORCES
110 const nbnxn_ci_t *nbln;
111 const nbnxn_cj_t *l_cj;
114 const real *shiftvec;
118 #ifdef VDW_CUTOFF_CHECK
126 gmx_bool do_LJ, half_LJ, do_coul, do_self;
127 int cjind0, cjind1, cjind;
130 real xi[UNROLLI*XI_STRIDE];
131 real fi[UNROLLI*FI_STRIDE];
135 #ifndef ENERGY_GROUPS
140 int egp_sh_i[UNROLLI];
144 real swV3, swV4, swV5;
145 real swF2, swF3, swF4;
148 real lje_coeff2, lje_coeff6_6, lje_vc;
164 const real *tab_coul_FDV0;
166 const real *tab_coul_F;
167 const real *tab_coul_V;
178 swV3 = ic->vdw_switch.c3;
179 swV4 = ic->vdw_switch.c4;
180 swV5 = ic->vdw_switch.c5;
181 swF2 = 3*ic->vdw_switch.c3;
182 swF3 = 4*ic->vdw_switch.c4;
183 swF4 = 5*ic->vdw_switch.c5;
187 lje_coeff2 = ic->ewaldcoeff_lj*ic->ewaldcoeff_lj;
188 lje_coeff6_6 = lje_coeff2*lje_coeff2*lje_coeff2/6.0;
189 lje_vc = ic->sh_lj_ewald;
191 ljc = nbat->nbfp_comb;
202 tabscale = ic->tabq_scale;
204 halfsp = 0.5/ic->tabq_scale;
208 tab_coul_FDV0 = ic->tabq_coul_FDV0;
210 tab_coul_F = ic->tabq_coul_F;
211 tab_coul_V = ic->tabq_coul_V;
216 egp_mask = (1<<nbat->neg_2log) - 1;
220 rcut2 = ic->rcoulomb*ic->rcoulomb;
221 #ifdef VDW_CUTOFF_CHECK
222 rvdw2 = ic->rvdw*ic->rvdw;
225 ntype2 = nbat->ntype*2;
230 shiftvec = shift_vec[0];
236 for (n = 0; n < nbl->nci; n++)
242 ish = (nbln->shift & NBNXN_CI_SHIFT);
243 /* x, f and fshift are assumed to be stored with stride 3 */
245 cjind0 = nbln->cj_ind_start;
246 cjind1 = nbln->cj_ind_end;
247 /* Currently only works super-cells equal to sub-cells */
249 ci_sh = (ish == CENTRAL ? ci : -1);
251 /* We have 5 LJ/C combinations, but use only three inner loops,
252 * as the other combinations are unlikely and/or not much faster:
253 * inner half-LJ + C for half-LJ + C / no-LJ + C
254 * inner LJ + C for full-LJ + C
255 * inner LJ for full-LJ + no-C / half-LJ + no-C
257 do_LJ = (nbln->shift & NBNXN_CI_DO_LJ(0));
258 do_coul = (nbln->shift & NBNXN_CI_DO_COUL(0));
259 half_LJ = ((nbln->shift & NBNXN_CI_HALF_LJ(0)) || !do_LJ) && do_coul;
267 #ifndef ENERGY_GROUPS
271 for (i = 0; i < UNROLLI; i++)
273 egp_sh_i[i] = ((nbat->energrp[ci]>>(i*nbat->neg_2log)) & egp_mask)*nbat->nenergrp;
278 for (i = 0; i < UNROLLI; i++)
280 for (d = 0; d < DIM; d++)
282 xi[i*XI_STRIDE+d] = x[(ci*UNROLLI+i)*X_STRIDE+d] + shiftvec[ishf+d];
283 fi[i*FI_STRIDE+d] = 0;
286 qi[i] = facel*q[ci*UNROLLI+i];
295 Vc_sub_self = 0.5*c_rf;
299 Vc_sub_self = 0.5*tab_coul_V[0];
301 Vc_sub_self = 0.5*tab_coul_FDV0[2];
305 if (l_cj[nbln->cj_ind_start].cj == ci_sh)
307 for (i = 0; i < UNROLLI; i++)
311 egp_ind = egp_sh_i[i] + ((nbat->energrp[ci]>>(i*nbat->neg_2log)) & egp_mask);
315 /* Coulomb self interaction */
316 Vc[egp_ind] -= qi[i]*q[ci*UNROLLI+i]*Vc_sub_self;
319 /* LJ Ewald self interaction */
320 Vvdw[egp_ind] += 0.5*nbat->nbfp[nbat->type[ci*UNROLLI+i]*(nbat->ntype + 1)*2]/6*lje_coeff6_6;
325 #endif /* CALC_ENERGIES */
328 while (cjind < cjind1 && nbl->cj[cjind].excl != 0xffff)
335 #include "nbnxn_kernel_ref_inner.h"
342 #include "nbnxn_kernel_ref_inner.h"
347 #include "nbnxn_kernel_ref_inner.h"
353 for (; (cjind < cjind1); cjind++)
359 #include "nbnxn_kernel_ref_inner.h"
366 #include "nbnxn_kernel_ref_inner.h"
371 #include "nbnxn_kernel_ref_inner.h"
374 ninner += cjind1 - cjind0;
376 /* Add accumulated i-forces to the force array */
377 for (i = 0; i < UNROLLI; i++)
379 for (d = 0; d < DIM; d++)
381 f[(ci*UNROLLI+i)*F_STRIDE+d] += fi[i*FI_STRIDE+d];
384 #ifdef CALC_SHIFTFORCES
387 /* Add i forces to shifted force list */
388 for (i = 0; i < UNROLLI; i++)
390 for (d = 0; d < DIM; d++)
392 fshift[ishf+d] += fi[i*FI_STRIDE+d];
399 #ifndef ENERGY_GROUPS
407 printf("atom pairs %d\n", npair);
411 #undef CALC_SHIFTFORCES