2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
39 static_assert(UNROLLI == c_nbnxnCpuIClusterSize, "UNROLLI should match the i-cluster size");
41 /* We could use nbat->xstride and nbat->fstride, but macros might be faster */
44 /* Local i-atom buffer strides */
49 /* All functionality defines are set here, except for:
50 * CALC_ENERGIES, ENERGY_GROUPS which are defined before.
51 * CHECK_EXCLS, which is set just before including the inner loop contents.
54 /* We always calculate shift forces, because it's cheap anyhow */
55 #define CALC_SHIFTFORCES
58 #define NBK_FUNC_NAME2(ljt, feg) nbnxn_kernel ## _ElecRF ## ljt ## feg ## _ref
61 #ifndef VDW_CUTOFF_CHECK
62 #define NBK_FUNC_NAME2(ljt, feg) nbnxn_kernel ## _ElecQSTab ## ljt ## feg ## _ref
64 #define NBK_FUNC_NAME2(ljt, feg) nbnxn_kernel ## _ElecQSTabTwinCut ## ljt ## feg ## _ref
68 #if defined LJ_CUT && !defined LJ_EWALD
69 #define NBK_FUNC_NAME(feg) NBK_FUNC_NAME2(_VdwLJ, feg)
70 #elif defined LJ_FORCE_SWITCH
71 #define NBK_FUNC_NAME(feg) NBK_FUNC_NAME2(_VdwLJFsw, feg)
72 #elif defined LJ_POT_SWITCH
73 #define NBK_FUNC_NAME(feg) NBK_FUNC_NAME2(_VdwLJPsw, feg)
74 #elif defined LJ_EWALD
75 #ifdef LJ_EWALD_COMB_GEOM
76 #define NBK_FUNC_NAME(feg) NBK_FUNC_NAME2(_VdwLJEwCombGeom, feg)
78 #define NBK_FUNC_NAME(feg) NBK_FUNC_NAME2(_VdwLJEwCombLB, feg)
81 #error "No VdW type defined"
86 NBK_FUNC_NAME(_F) // NOLINT(misc-definitions-in-headers)
89 NBK_FUNC_NAME(_VF) // NOLINT(misc-definitions-in-headers)
91 NBK_FUNC_NAME(_VgrpF) // NOLINT(misc-definitions-in-headers)
96 (const NbnxnPairlistCpu *nbl,
97 const nbnxn_atomdata_t *nbat,
98 const interaction_const_t *ic,
101 real gmx_unused *fshift
109 const nbnxn_cj_t *l_cj;
111 #ifdef VDW_CUTOFF_CHECK
118 gmx_bool do_LJ, half_LJ, do_coul;
119 int cjind0, cjind1, cjind;
121 real xi[UNROLLI*XI_STRIDE];
122 real fi[UNROLLI*FI_STRIDE];
126 #ifndef ENERGY_GROUPS
131 int egp_sh_i[UNROLLI];
135 real swV3, swV4, swV5;
136 real swF2, swF3, swF4;
139 real lje_coeff2, lje_coeff6_6;
156 const real *tab_coul_FDV0;
158 const real *tab_coul_F;
159 const real gmx_unused *tab_coul_V;
168 swV3 = ic->vdw_switch.c3;
169 swV4 = ic->vdw_switch.c4;
170 swV5 = ic->vdw_switch.c5;
171 swF2 = 3*ic->vdw_switch.c3;
172 swF3 = 4*ic->vdw_switch.c4;
173 swF4 = 5*ic->vdw_switch.c5;
176 const nbnxn_atomdata_t::Params &nbatParams = nbat->params();
179 lje_coeff2 = ic->ewaldcoeff_lj*ic->ewaldcoeff_lj;
180 lje_coeff6_6 = lje_coeff2*lje_coeff2*lje_coeff2/6.0;
182 lje_vc = ic->sh_lj_ewald;
185 const real *ljc = nbatParams.nbfp_comb.data();
197 halfsp = 0.5/ic->tabq_scale;
201 tab_coul_FDV0 = ic->tabq_coul_FDV0;
203 tab_coul_F = ic->tabq_coul_F;
204 tab_coul_V = ic->tabq_coul_V;
209 egp_mask = (1 << nbatParams.neg_2log) - 1;
213 rcut2 = ic->rcoulomb*ic->rcoulomb;
214 #ifdef VDW_CUTOFF_CHECK
215 rvdw2 = ic->rvdw*ic->rvdw;
218 ntype2 = nbatParams.numTypes*2;
219 const real *nbfp = nbatParams.nbfp.data();
220 const real *q = nbatParams.q.data();
221 const int *type = nbatParams.type.data();
223 const real *shiftvec = shift_vec[0];
224 const real *x = nbat->x().data();
226 l_cj = nbl->cj.data();
228 for (const nbnxn_ci_t &ciEntry : nbl->ci)
232 ish = (ciEntry.shift & NBNXN_CI_SHIFT);
233 /* x, f and fshift are assumed to be stored with stride 3 */
235 cjind0 = ciEntry.cj_ind_start;
236 cjind1 = ciEntry.cj_ind_end;
237 /* Currently only works super-cells equal to sub-cells */
239 ci_sh = (ish == CENTRAL ? ci : -1);
241 /* We have 5 LJ/C combinations, but use only three inner loops,
242 * as the other combinations are unlikely and/or not much faster:
243 * inner half-LJ + C for half-LJ + C / no-LJ + C
244 * inner LJ + C for full-LJ + C
245 * inner LJ for full-LJ + no-C / half-LJ + no-C
247 do_LJ = ((ciEntry.shift & NBNXN_CI_DO_LJ(0)) != 0);
248 do_coul = ((ciEntry.shift & NBNXN_CI_DO_COUL(0)) != 0);
249 half_LJ = (((ciEntry.shift & NBNXN_CI_HALF_LJ(0)) != 0) || !do_LJ) && do_coul;
253 gmx_bool do_self = TRUE;
255 gmx_bool do_self = do_coul;
258 #ifndef ENERGY_GROUPS
262 for (i = 0; i < UNROLLI; i++)
264 egp_sh_i[i] = ((nbatParams.energrp[ci] >> (i*nbatParams.neg_2log)) & egp_mask)*nbatParams.nenergrp;
269 for (i = 0; i < UNROLLI; i++)
271 for (d = 0; d < DIM; d++)
273 xi[i*XI_STRIDE+d] = x[(ci*UNROLLI+i)*X_STRIDE+d] + shiftvec[ishf+d];
274 fi[i*FI_STRIDE+d] = 0;
277 qi[i] = facel*q[ci*UNROLLI+i];
286 Vc_sub_self = 0.5*c_rf;
290 Vc_sub_self = 0.5*tab_coul_V[0];
292 Vc_sub_self = 0.5*tab_coul_FDV0[2];
296 if (l_cj[ciEntry.cj_ind_start].cj == ci_sh)
298 for (i = 0; i < UNROLLI; i++)
302 egp_ind = egp_sh_i[i] + ((nbatParams.energrp[ci] >> (i*nbatParams.neg_2log)) & egp_mask);
306 /* Coulomb self interaction */
307 Vc[egp_ind] -= qi[i]*q[ci*UNROLLI+i]*Vc_sub_self;
310 /* LJ Ewald self interaction */
311 Vvdw[egp_ind] += 0.5*nbatParams.nbfp[nbatParams.type[ci*UNROLLI+i]*(nbatParams.numTypes + 1)*2]/6*lje_coeff6_6;
316 #endif /* CALC_ENERGIES */
319 while (cjind < cjind1 && nbl->cj[cjind].excl != 0xffff)
326 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref_inner.h"
333 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref_inner.h"
338 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref_inner.h"
344 for (; (cjind < cjind1); cjind++)
350 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref_inner.h"
357 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref_inner.h"
362 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref_inner.h"
366 /* Add accumulated i-forces to the force array */
367 for (i = 0; i < UNROLLI; i++)
369 for (d = 0; d < DIM; d++)
371 f[(ci*UNROLLI+i)*F_STRIDE+d] += fi[i*FI_STRIDE+d];
374 #ifdef CALC_SHIFTFORCES
375 if (fshift != nullptr)
377 /* Add i forces to shifted force list */
378 for (i = 0; i < UNROLLI; i++)
380 for (d = 0; d < DIM; d++)
382 fshift[ishf+d] += fi[i*FI_STRIDE+d];
389 #ifndef ENERGY_GROUPS
397 printf("atom pairs %d\n", npair);
401 #undef CALC_SHIFTFORCES