Update some nbnxm kernel constants to constexpr
[alexxy/gromacs.git] / src / gromacs / nbnxm / kernels_reference / kernel_gpu_ref.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5  * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36 #include "gmxpre.h"
37
38 #include "kernel_gpu_ref.h"
39
40 #include <cmath>
41
42 #include <algorithm>
43
44 #include "gromacs/math/functions.h"
45 #include "gromacs/math/utilities.h"
46 #include "gromacs/math/vec.h"
47 #include "gromacs/mdtypes/md_enums.h"
48 #include "gromacs/mdtypes/simulation_workload.h"
49 #include "gromacs/nbnxm/atomdata.h"
50 #include "gromacs/nbnxm/nbnxm.h"
51 #include "gromacs/nbnxm/pairlist.h"
52 #include "gromacs/pbcutil/ishift.h"
53 #include "gromacs/utility/fatalerror.h"
54
55 static constexpr int c_clSize = c_nbnxnGpuClusterSize;
56
57 void nbnxn_kernel_gpu_ref(const NbnxnPairlistGpu*    nbl,
58                           const nbnxn_atomdata_t*    nbat,
59                           const interaction_const_t* iconst,
60                           rvec*                      shift_vec,
61                           const gmx::StepWorkload&   stepWork,
62                           int                        clearF,
63                           gmx::ArrayRef<real>        f,
64                           real*                      fshift,
65                           real*                      Vc,
66                           real*                      Vvdw)
67 {
68     gmx_bool            bEwald;
69     const real*         Ftab = nullptr;
70     real                rcut2, rvdw2, rlist2;
71     int                 ntype;
72     real                facel;
73     int                 ish3;
74     int                 sci;
75     int                 cj4_ind0, cj4_ind1, cj4_ind;
76     int                 ci, cj;
77     int                 ic, jc, ia, ja, is, ifs, js, jfs, im, jm;
78     int                 n0;
79     int                 ggid;
80     real                shX, shY, shZ;
81     real                fscal, tx, ty, tz;
82     real                rinvsq;
83     real                iq;
84     real                qq, vcoul = 0, krsq, vctot;
85     int                 nti;
86     int                 tj;
87     real                rt, r, eps;
88     real                rinvsix;
89     real                Vvdwtot;
90     real                Vvdw_rep, Vvdw_disp;
91     real                ix, iy, iz, fix, fiy, fiz;
92     real                jx, jy, jz;
93     real                dx, dy, dz, rsq, rinv;
94     real                int_bit;
95     real                fexcl;
96     real                c6, c12;
97     const nbnxn_excl_t* excl[2];
98
99     int npair_tot, npair;
100     int nhwu, nhwu_pruned;
101
102     if (nbl->na_ci != c_clSize)
103     {
104         gmx_fatal(FARGS,
105                   "The neighborlist cluster size in the GPU reference kernel is %d, expected it to "
106                   "be %d",
107                   nbl->na_ci, c_clSize);
108     }
109
110     if (clearF == enbvClearFYes)
111     {
112         for (real& elem : f)
113         {
114             elem = 0;
115         }
116     }
117
118     bEwald = EEL_FULL(iconst->eeltype);
119     if (bEwald)
120     {
121         Ftab = iconst->coulombEwaldTables->tableF.data();
122     }
123
124     rcut2 = iconst->rcoulomb * iconst->rcoulomb;
125     rvdw2 = iconst->rvdw * iconst->rvdw;
126
127     rlist2 = nbl->rlist * nbl->rlist;
128
129     const int* type      = nbat->params().type.data();
130     facel                = iconst->epsfac;
131     const real* shiftvec = shift_vec[0];
132     const real* vdwparam = nbat->params().nbfp.data();
133     ntype                = nbat->params().numTypes;
134
135     const real* x = nbat->x().data();
136
137     npair_tot   = 0;
138     nhwu        = 0;
139     nhwu_pruned = 0;
140
141     for (const nbnxn_sci_t& nbln : nbl->sci)
142     {
143         ish3     = 3 * nbln.shift;
144         shX      = shiftvec[ish3];
145         shY      = shiftvec[ish3 + 1];
146         shZ      = shiftvec[ish3 + 2];
147         cj4_ind0 = nbln.cj4_ind_start;
148         cj4_ind1 = nbln.cj4_ind_end;
149         sci      = nbln.sci;
150         vctot    = 0;
151         Vvdwtot  = 0;
152
153         if (nbln.shift == CENTRAL && nbl->cj4[cj4_ind0].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
154         {
155             /* we have the diagonal:
156              * add the charge self interaction energy term
157              */
158             for (im = 0; im < c_nbnxnGpuNumClusterPerSupercluster; im++)
159             {
160                 ci = sci * c_nbnxnGpuNumClusterPerSupercluster + im;
161                 for (ic = 0; ic < c_clSize; ic++)
162                 {
163                     ia = ci * c_clSize + ic;
164                     iq = x[ia * nbat->xstride + 3];
165                     vctot += iq * iq;
166                 }
167             }
168             if (!bEwald)
169             {
170                 vctot *= -facel * 0.5 * iconst->c_rf;
171             }
172             else
173             {
174                 /* last factor 1/sqrt(pi) */
175                 vctot *= -facel * iconst->ewaldcoeff_q * M_1_SQRTPI;
176             }
177         }
178
179         for (cj4_ind = cj4_ind0; (cj4_ind < cj4_ind1); cj4_ind++)
180         {
181             excl[0] = &nbl->excl[nbl->cj4[cj4_ind].imei[0].excl_ind];
182             excl[1] = &nbl->excl[nbl->cj4[cj4_ind].imei[1].excl_ind];
183
184             for (jm = 0; jm < c_nbnxnGpuJgroupSize; jm++)
185             {
186                 cj = nbl->cj4[cj4_ind].cj[jm];
187
188                 for (im = 0; im < c_nbnxnGpuNumClusterPerSupercluster; im++)
189                 {
190                     /* We're only using the first imask,
191                      * but here imei[1].imask is identical.
192                      */
193                     if ((nbl->cj4[cj4_ind].imei[0].imask >> (jm * c_nbnxnGpuNumClusterPerSupercluster + im))
194                         & 1)
195                     {
196                         gmx_bool within_rlist;
197
198                         ci = sci * c_nbnxnGpuNumClusterPerSupercluster + im;
199
200                         within_rlist = FALSE;
201                         npair        = 0;
202                         for (ic = 0; ic < c_clSize; ic++)
203                         {
204                             ia = ci * c_clSize + ic;
205
206                             is  = ia * nbat->xstride;
207                             ifs = ia * nbat->fstride;
208                             ix  = shX + x[is + 0];
209                             iy  = shY + x[is + 1];
210                             iz  = shZ + x[is + 2];
211                             iq  = facel * x[is + 3];
212                             nti = ntype * 2 * type[ia];
213
214                             fix = 0;
215                             fiy = 0;
216                             fiz = 0;
217
218                             for (jc = 0; jc < c_clSize; jc++)
219                             {
220                                 ja = cj * c_clSize + jc;
221
222                                 if (nbln.shift == CENTRAL && ci == cj && ja <= ia)
223                                 {
224                                     continue;
225                                 }
226
227                                 constexpr int clusterPerSplit =
228                                         c_nbnxnGpuClusterSize / c_nbnxnGpuClusterpairSplit;
229                                 int_bit = static_cast<real>(
230                                         (excl[jc / clusterPerSplit]->pair[(jc & (clusterPerSplit - 1)) * c_clSize + ic]
231                                          >> (jm * c_nbnxnGpuNumClusterPerSupercluster + im))
232                                         & 1);
233
234                                 js  = ja * nbat->xstride;
235                                 jfs = ja * nbat->fstride;
236                                 jx  = x[js + 0];
237                                 jy  = x[js + 1];
238                                 jz  = x[js + 2];
239                                 dx  = ix - jx;
240                                 dy  = iy - jy;
241                                 dz  = iz - jz;
242                                 rsq = dx * dx + dy * dy + dz * dz;
243                                 if (rsq < rlist2)
244                                 {
245                                     within_rlist = TRUE;
246                                 }
247                                 if (rsq >= rcut2)
248                                 {
249                                     continue;
250                                 }
251
252                                 if (type[ia] != ntype - 1 && type[ja] != ntype - 1)
253                                 {
254                                     npair++;
255                                 }
256
257                                 // Ensure distance do not become so small that r^-12 overflows
258                                 rsq = std::max(rsq, c_nbnxnMinDistanceSquared);
259
260                                 rinv   = gmx::invsqrt(rsq);
261                                 rinvsq = rinv * rinv;
262
263                                 qq = iq * x[js + 3];
264                                 if (!bEwald)
265                                 {
266                                     /* Reaction-field */
267                                     krsq  = iconst->k_rf * rsq;
268                                     fscal = qq * (int_bit * rinv - 2 * krsq) * rinvsq;
269                                     if (stepWork.computeEnergy)
270                                     {
271                                         vcoul = qq * (int_bit * rinv + krsq - iconst->c_rf);
272                                     }
273                                 }
274                                 else
275                                 {
276                                     r   = rsq * rinv;
277                                     rt  = r * iconst->coulombEwaldTables->scale;
278                                     n0  = static_cast<int>(rt);
279                                     eps = rt - static_cast<real>(n0);
280
281                                     fexcl = (1 - eps) * Ftab[n0] + eps * Ftab[n0 + 1];
282
283                                     fscal = qq * (int_bit * rinvsq - fexcl) * rinv;
284
285                                     if (stepWork.computeEnergy)
286                                     {
287                                         vcoul = qq
288                                                 * ((int_bit - std::erf(iconst->ewaldcoeff_q * r)) * rinv
289                                                    - int_bit * iconst->sh_ewald);
290                                     }
291                                 }
292
293                                 if (rsq < rvdw2)
294                                 {
295                                     tj = nti + 2 * type[ja];
296
297                                     /* Vanilla Lennard-Jones cutoff */
298                                     c6  = vdwparam[tj];
299                                     c12 = vdwparam[tj + 1];
300
301                                     rinvsix   = int_bit * rinvsq * rinvsq * rinvsq;
302                                     Vvdw_disp = c6 * rinvsix;
303                                     Vvdw_rep  = c12 * rinvsix * rinvsix;
304                                     fscal += (Vvdw_rep - Vvdw_disp) * rinvsq;
305
306                                     if (stepWork.computeEnergy)
307                                     {
308                                         vctot += vcoul;
309
310                                         Vvdwtot +=
311                                                 (Vvdw_rep + int_bit * c12 * iconst->repulsion_shift.cpot) / 12
312                                                 - (Vvdw_disp
313                                                    + int_bit * c6 * iconst->dispersion_shift.cpot)
314                                                           / 6;
315                                     }
316                                 }
317
318                                 tx  = fscal * dx;
319                                 ty  = fscal * dy;
320                                 tz  = fscal * dz;
321                                 fix = fix + tx;
322                                 fiy = fiy + ty;
323                                 fiz = fiz + tz;
324                                 f[jfs + 0] -= tx;
325                                 f[jfs + 1] -= ty;
326                                 f[jfs + 2] -= tz;
327                             }
328
329                             f[ifs + 0] += fix;
330                             f[ifs + 1] += fiy;
331                             f[ifs + 2] += fiz;
332                             fshift[ish3]     = fshift[ish3] + fix;
333                             fshift[ish3 + 1] = fshift[ish3 + 1] + fiy;
334                             fshift[ish3 + 2] = fshift[ish3 + 2] + fiz;
335
336                             /* Count in half work-units.
337                              * In CUDA one work-unit is 2 warps.
338                              */
339                             if ((ic + 1) % (c_clSize / c_nbnxnGpuClusterpairSplit) == 0)
340                             {
341                                 npair_tot += npair;
342
343                                 nhwu++;
344                                 if (within_rlist)
345                                 {
346                                     nhwu_pruned++;
347                                 }
348
349                                 within_rlist = FALSE;
350                                 npair        = 0;
351                             }
352                         }
353                     }
354                 }
355             }
356         }
357
358         if (stepWork.computeEnergy)
359         {
360             ggid       = 0;
361             Vc[ggid]   = Vc[ggid] + vctot;
362             Vvdw[ggid] = Vvdw[ggid] + Vvdwtot;
363         }
364     }
365
366     if (debug)
367     {
368         fprintf(debug, "number of half %dx%d atom pairs: %d after pruning: %d fraction %4.2f\n",
369                 nbl->na_ci, nbl->na_ci, nhwu, nhwu_pruned, nhwu_pruned / static_cast<double>(nhwu));
370         fprintf(debug, "generic kernel pair interactions:            %d\n",
371                 nhwu * nbl->na_ci / 2 * nbl->na_ci);
372         fprintf(debug, "generic kernel post-prune pair interactions: %d\n",
373                 nhwu_pruned * nbl->na_ci / 2 * nbl->na_ci);
374         fprintf(debug, "generic kernel non-zero pair interactions:   %d\n", npair_tot);
375         fprintf(debug, "ratio non-zero/post-prune pair interactions: %4.2f\n",
376                 npair_tot / static_cast<double>(nhwu_pruned * gmx::exactDiv(nbl->na_ci, 2) * nbl->na_ci));
377     }
378 }