2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 #define UNROLLI NBNXN_CPU_CLUSTER_I_SIZE
37 #define UNROLLJ NBNXN_CPU_CLUSTER_I_SIZE
39 /* We could use nbat->xstride and nbat->fstride, but macros might be faster */
42 /* Local i-atom buffer strides */
47 /* All functionality defines are set here, except for:
48 * CALC_ENERGIES, ENERGY_GROUPS which are defined before.
49 * CHECK_EXCLS, which is set just before including the inner loop contents.
52 /* We always calculate shift forces, because it's cheap anyhow */
53 #define CALC_SHIFTFORCES
56 #define NBK_FUNC_NAME2(ljt, feg) nbnxn_kernel ## _ElecRF ## ljt ## feg ## _ref
59 #ifndef VDW_CUTOFF_CHECK
60 #define NBK_FUNC_NAME2(ljt, feg) nbnxn_kernel ## _ElecQSTab ## ljt ## feg ## _ref
62 #define NBK_FUNC_NAME2(ljt, feg) nbnxn_kernel ## _ElecQSTabTwinCut ## ljt ## feg ## _ref
66 #if defined LJ_CUT && !defined LJ_EWALD
67 #define NBK_FUNC_NAME(feg) NBK_FUNC_NAME2(_VdwLJ, feg)
68 #elif defined LJ_FORCE_SWITCH
69 #define NBK_FUNC_NAME(feg) NBK_FUNC_NAME2(_VdwLJFsw, feg)
70 #elif defined LJ_POT_SWITCH
71 #define NBK_FUNC_NAME(feg) NBK_FUNC_NAME2(_VdwLJPsw, feg)
72 #elif defined LJ_EWALD
73 #ifdef LJ_EWALD_COMB_GEOM
74 #define NBK_FUNC_NAME(feg) NBK_FUNC_NAME2(_VdwLJEwCombGeom, feg)
76 #define NBK_FUNC_NAME(feg) NBK_FUNC_NAME2(_VdwLJEwCombLB, feg)
79 #error "No VdW type defined"
94 (const nbnxn_pairlist_t *nbl,
95 const nbnxn_atomdata_t *nbat,
96 const interaction_const_t *ic,
99 real gmx_unused *fshift
107 const nbnxn_ci_t *nbln;
108 const nbnxn_cj_t *l_cj;
111 const real *shiftvec;
115 #ifdef VDW_CUTOFF_CHECK
122 gmx_bool do_LJ, half_LJ, do_coul;
123 int cjind0, cjind1, cjind;
125 real xi[UNROLLI*XI_STRIDE];
126 real fi[UNROLLI*FI_STRIDE];
130 #ifndef ENERGY_GROUPS
135 int egp_sh_i[UNROLLI];
139 real swV3, swV4, swV5;
140 real swF2, swF3, swF4;
143 real lje_coeff2, lje_coeff6_6;
161 const real *tab_coul_FDV0;
163 const real *tab_coul_F;
164 const real gmx_unused *tab_coul_V;
173 swV3 = ic->vdw_switch.c3;
174 swV4 = ic->vdw_switch.c4;
175 swV5 = ic->vdw_switch.c5;
176 swF2 = 3*ic->vdw_switch.c3;
177 swF3 = 4*ic->vdw_switch.c4;
178 swF4 = 5*ic->vdw_switch.c5;
182 lje_coeff2 = ic->ewaldcoeff_lj*ic->ewaldcoeff_lj;
183 lje_coeff6_6 = lje_coeff2*lje_coeff2*lje_coeff2/6.0;
185 lje_vc = ic->sh_lj_ewald;
188 ljc = nbat->nbfp_comb;
200 halfsp = 0.5/ic->tabq_scale;
204 tab_coul_FDV0 = ic->tabq_coul_FDV0;
206 tab_coul_F = ic->tabq_coul_F;
207 tab_coul_V = ic->tabq_coul_V;
212 egp_mask = (1<<nbat->neg_2log) - 1;
216 rcut2 = ic->rcoulomb*ic->rcoulomb;
217 #ifdef VDW_CUTOFF_CHECK
218 rvdw2 = ic->rvdw*ic->rvdw;
221 ntype2 = nbat->ntype*2;
226 shiftvec = shift_vec[0];
231 for (n = 0; n < nbl->nci; n++)
237 ish = (nbln->shift & NBNXN_CI_SHIFT);
238 /* x, f and fshift are assumed to be stored with stride 3 */
240 cjind0 = nbln->cj_ind_start;
241 cjind1 = nbln->cj_ind_end;
242 /* Currently only works super-cells equal to sub-cells */
244 ci_sh = (ish == CENTRAL ? ci : -1);
246 /* We have 5 LJ/C combinations, but use only three inner loops,
247 * as the other combinations are unlikely and/or not much faster:
248 * inner half-LJ + C for half-LJ + C / no-LJ + C
249 * inner LJ + C for full-LJ + C
250 * inner LJ for full-LJ + no-C / half-LJ + no-C
252 do_LJ = (nbln->shift & NBNXN_CI_DO_LJ(0));
253 do_coul = (nbln->shift & NBNXN_CI_DO_COUL(0));
254 half_LJ = ((nbln->shift & NBNXN_CI_HALF_LJ(0)) || !do_LJ) && do_coul;
258 gmx_bool do_self = TRUE;
260 gmx_bool do_self = do_coul;
263 #ifndef ENERGY_GROUPS
267 for (i = 0; i < UNROLLI; i++)
269 egp_sh_i[i] = ((nbat->energrp[ci]>>(i*nbat->neg_2log)) & egp_mask)*nbat->nenergrp;
274 for (i = 0; i < UNROLLI; i++)
276 for (d = 0; d < DIM; d++)
278 xi[i*XI_STRIDE+d] = x[(ci*UNROLLI+i)*X_STRIDE+d] + shiftvec[ishf+d];
279 fi[i*FI_STRIDE+d] = 0;
282 qi[i] = facel*q[ci*UNROLLI+i];
291 Vc_sub_self = 0.5*c_rf;
295 Vc_sub_self = 0.5*tab_coul_V[0];
297 Vc_sub_self = 0.5*tab_coul_FDV0[2];
301 if (l_cj[nbln->cj_ind_start].cj == ci_sh)
303 for (i = 0; i < UNROLLI; i++)
307 egp_ind = egp_sh_i[i] + ((nbat->energrp[ci]>>(i*nbat->neg_2log)) & egp_mask);
311 /* Coulomb self interaction */
312 Vc[egp_ind] -= qi[i]*q[ci*UNROLLI+i]*Vc_sub_self;
315 /* LJ Ewald self interaction */
316 Vvdw[egp_ind] += 0.5*nbat->nbfp[nbat->type[ci*UNROLLI+i]*(nbat->ntype + 1)*2]/6*lje_coeff6_6;
321 #endif /* CALC_ENERGIES */
324 while (cjind < cjind1 && nbl->cj[cjind].excl != 0xffff)
331 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref_inner.h"
338 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref_inner.h"
343 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref_inner.h"
349 for (; (cjind < cjind1); cjind++)
355 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref_inner.h"
362 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref_inner.h"
367 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref_inner.h"
371 /* Add accumulated i-forces to the force array */
372 for (i = 0; i < UNROLLI; i++)
374 for (d = 0; d < DIM; d++)
376 f[(ci*UNROLLI+i)*F_STRIDE+d] += fi[i*FI_STRIDE+d];
379 #ifdef CALC_SHIFTFORCES
380 if (fshift != nullptr)
382 /* Add i forces to shifted force list */
383 for (i = 0; i < UNROLLI; i++)
385 for (d = 0; d < DIM; d++)
387 fshift[ishf+d] += fi[i*FI_STRIDE+d];
394 #ifndef ENERGY_GROUPS
402 printf("atom pairs %d\n", npair);
406 #undef CALC_SHIFTFORCES