2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "kernel_gpu_ref.h"
44 #include "gromacs/math/functions.h"
45 #include "gromacs/math/utilities.h"
46 #include "gromacs/math/vec.h"
47 #include "gromacs/mdtypes/interaction_const.h"
48 #include "gromacs/mdtypes/md_enums.h"
49 #include "gromacs/mdtypes/simulation_workload.h"
50 #include "gromacs/nbnxm/atomdata.h"
51 #include "gromacs/nbnxm/nbnxm.h"
52 #include "gromacs/nbnxm/pairlist.h"
53 #include "gromacs/pbcutil/ishift.h"
54 #include "gromacs/utility/fatalerror.h"
56 static constexpr int c_clSize = c_nbnxnGpuClusterSize;
58 void nbnxn_kernel_gpu_ref(const NbnxnPairlistGpu* nbl,
59 const nbnxn_atomdata_t* nbat,
60 const interaction_const_t* iconst,
62 const gmx::StepWorkload& stepWork,
64 gmx::ArrayRef<real> f,
70 const real* Ftab = nullptr;
71 real rcut2, rvdw2, rlist2;
76 int cj4_ind0, cj4_ind1, cj4_ind;
78 int ic, jc, ia, ja, is, ifs, js, jfs, im, jm;
82 real fscal, tx, ty, tz;
85 real qq, vcoul = 0, krsq, vctot;
91 real Vvdw_rep, Vvdw_disp;
92 real ix, iy, iz, fix, fiy, fiz;
94 real dx, dy, dz, rsq, rinv;
98 const nbnxn_excl_t* excl[2];
100 int npair_tot, npair;
101 int nhwu, nhwu_pruned;
103 if (nbl->na_ci != c_clSize)
106 "The neighborlist cluster size in the GPU reference kernel is %d, expected it to "
112 if (clearF == enbvClearFYes)
120 bEwald = EEL_FULL(iconst->eeltype);
123 Ftab = iconst->coulombEwaldTables->tableF.data();
126 rcut2 = iconst->rcoulomb * iconst->rcoulomb;
127 rvdw2 = iconst->rvdw * iconst->rvdw;
129 rlist2 = nbl->rlist * nbl->rlist;
131 const int* type = nbat->params().type.data();
132 facel = iconst->epsfac;
133 const real* shiftvec = shift_vec[0];
134 const real* vdwparam = nbat->params().nbfp.data();
135 ntype = nbat->params().numTypes;
137 const real* x = nbat->x().data();
143 for (const nbnxn_sci_t& nbln : nbl->sci)
145 ish3 = 3 * nbln.shift;
146 shX = shiftvec[ish3];
147 shY = shiftvec[ish3 + 1];
148 shZ = shiftvec[ish3 + 2];
149 cj4_ind0 = nbln.cj4_ind_start;
150 cj4_ind1 = nbln.cj4_ind_end;
155 if (nbln.shift == CENTRAL && nbl->cj4[cj4_ind0].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
157 /* we have the diagonal:
158 * add the charge self interaction energy term
160 for (im = 0; im < c_nbnxnGpuNumClusterPerSupercluster; im++)
162 ci = sci * c_nbnxnGpuNumClusterPerSupercluster + im;
163 for (ic = 0; ic < c_clSize; ic++)
165 ia = ci * c_clSize + ic;
166 iq = x[ia * nbat->xstride + 3];
172 vctot *= -facel * 0.5 * iconst->c_rf;
176 /* last factor 1/sqrt(pi) */
177 vctot *= -facel * iconst->ewaldcoeff_q * M_1_SQRTPI;
181 for (cj4_ind = cj4_ind0; (cj4_ind < cj4_ind1); cj4_ind++)
183 excl[0] = &nbl->excl[nbl->cj4[cj4_ind].imei[0].excl_ind];
184 excl[1] = &nbl->excl[nbl->cj4[cj4_ind].imei[1].excl_ind];
186 for (jm = 0; jm < c_nbnxnGpuJgroupSize; jm++)
188 cj = nbl->cj4[cj4_ind].cj[jm];
190 for (im = 0; im < c_nbnxnGpuNumClusterPerSupercluster; im++)
192 /* We're only using the first imask,
193 * but here imei[1].imask is identical.
195 if ((nbl->cj4[cj4_ind].imei[0].imask >> (jm * c_nbnxnGpuNumClusterPerSupercluster + im))
198 gmx_bool within_rlist;
200 ci = sci * c_nbnxnGpuNumClusterPerSupercluster + im;
202 within_rlist = FALSE;
204 for (ic = 0; ic < c_clSize; ic++)
206 ia = ci * c_clSize + ic;
208 is = ia * nbat->xstride;
209 ifs = ia * nbat->fstride;
210 ix = shX + x[is + 0];
211 iy = shY + x[is + 1];
212 iz = shZ + x[is + 2];
213 iq = facel * x[is + 3];
214 nti = ntype * 2 * type[ia];
220 for (jc = 0; jc < c_clSize; jc++)
222 ja = cj * c_clSize + jc;
224 if (nbln.shift == CENTRAL && ci == cj && ja <= ia)
229 constexpr int clusterPerSplit =
230 c_nbnxnGpuClusterSize / c_nbnxnGpuClusterpairSplit;
231 int_bit = static_cast<real>(
232 (excl[jc / clusterPerSplit]->pair[(jc & (clusterPerSplit - 1)) * c_clSize + ic]
233 >> (jm * c_nbnxnGpuNumClusterPerSupercluster + im))
236 js = ja * nbat->xstride;
237 jfs = ja * nbat->fstride;
244 rsq = dx * dx + dy * dy + dz * dz;
254 if (type[ia] != ntype - 1 && type[ja] != ntype - 1)
259 // Ensure distance do not become so small that r^-12 overflows
260 rsq = std::max(rsq, c_nbnxnMinDistanceSquared);
262 rinv = gmx::invsqrt(rsq);
263 rinvsq = rinv * rinv;
269 krsq = iconst->k_rf * rsq;
270 fscal = qq * (int_bit * rinv - 2 * krsq) * rinvsq;
271 if (stepWork.computeEnergy)
273 vcoul = qq * (int_bit * rinv + krsq - iconst->c_rf);
279 rt = r * iconst->coulombEwaldTables->scale;
280 n0 = static_cast<int>(rt);
281 eps = rt - static_cast<real>(n0);
283 fexcl = (1 - eps) * Ftab[n0] + eps * Ftab[n0 + 1];
285 fscal = qq * (int_bit * rinvsq - fexcl) * rinv;
287 if (stepWork.computeEnergy)
290 * ((int_bit - std::erf(iconst->ewaldcoeff_q * r)) * rinv
291 - int_bit * iconst->sh_ewald);
297 tj = nti + 2 * type[ja];
299 /* Vanilla Lennard-Jones cutoff */
301 c12 = vdwparam[tj + 1];
303 rinvsix = int_bit * rinvsq * rinvsq * rinvsq;
304 Vvdw_disp = c6 * rinvsix;
305 Vvdw_rep = c12 * rinvsix * rinvsix;
306 fscal += (Vvdw_rep - Vvdw_disp) * rinvsq;
308 if (stepWork.computeEnergy)
313 (Vvdw_rep + int_bit * c12 * iconst->repulsion_shift.cpot) / 12
315 + int_bit * c6 * iconst->dispersion_shift.cpot)
334 fshift[ish3] = fshift[ish3] + fix;
335 fshift[ish3 + 1] = fshift[ish3 + 1] + fiy;
336 fshift[ish3 + 2] = fshift[ish3 + 2] + fiz;
338 /* Count in half work-units.
339 * In CUDA one work-unit is 2 warps.
341 if ((ic + 1) % (c_clSize / c_nbnxnGpuClusterpairSplit) == 0)
351 within_rlist = FALSE;
360 if (stepWork.computeEnergy)
363 Vc[ggid] = Vc[ggid] + vctot;
364 Vvdw[ggid] = Vvdw[ggid] + Vvdwtot;
371 "number of half %dx%d atom pairs: %d after pruning: %d fraction %4.2f\n",
376 nhwu_pruned / static_cast<double>(nhwu));
377 fprintf(debug, "generic kernel pair interactions: %d\n", nhwu * nbl->na_ci / 2 * nbl->na_ci);
379 "generic kernel post-prune pair interactions: %d\n",
380 nhwu_pruned * nbl->na_ci / 2 * nbl->na_ci);
381 fprintf(debug, "generic kernel non-zero pair interactions: %d\n", npair_tot);
383 "ratio non-zero/post-prune pair interactions: %4.2f\n",
384 npair_tot / static_cast<double>(nhwu_pruned * gmx::exactDiv(nbl->na_ci, 2) * nbl->na_ci));