2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "gromacs/gmxlib/nrnb.h"
39 #include "gromacs/math/vectypes.h"
40 #include "gromacs/mdlib/force_flags.h"
41 #include "gromacs/mdlib/gmx_omp_nthreads.h"
42 #include "gromacs/mdtypes/enerdata.h"
43 #include "gromacs/mdtypes/interaction_const.h"
44 #include "gromacs/mdtypes/md_enums.h"
45 #include "gromacs/nbnxm/gpu_data_mgmt.h"
46 #include "gromacs/nbnxm/nbnxm.h"
47 #include "gromacs/nbnxm/nbnxm_simd.h"
48 #include "gromacs/nbnxm/kernels_reference/kernel_gpu_ref.h"
49 #include "gromacs/simd/simd.h"
50 #include "gromacs/utility/gmxassert.h"
51 #include "gromacs/utility/real.h"
53 #include "kernel_common.h"
54 #define INCLUDE_KERNELFUNCTION_TABLES
55 #include "gromacs/nbnxm/kernels_reference/kernel_ref.h"
56 #ifdef GMX_NBNXN_SIMD_2XNN
57 #include "gromacs/nbnxm/kernels_simd_2xmm/kernels.h"
59 #ifdef GMX_NBNXN_SIMD_4XN
60 #include "gromacs/nbnxm/kernels_simd_4xm/kernels.h"
62 #undef INCLUDE_FUNCTION_TABLES
64 /*! \brief Clears the energy group output buffers
66 * \param[in,out] out nbnxn kernel output struct
68 static void clearGroupEnergies(nbnxn_atomdata_output_t *out)
70 std::fill(out->Vvdw.begin(), out->Vvdw.end(), 0.0_real);
71 std::fill(out->Vc.begin(), out->Vc.end(), 0.0_real);
72 std::fill(out->VSvdw.begin(), out->VSvdw.end(), 0.0_real);
73 std::fill(out->VSc.begin(), out->VSc.end(), 0.0_real);
76 /*! \brief Reduce the group-pair energy buffers produced by a SIMD kernel
77 * to single terms in the output buffers.
79 * The SIMD kernels produce a large number of energy buffer in SIMD registers
80 * to avoid scattered reads and writes.
82 * \tparam unrollj The unroll size for j-particles in the SIMD kernel
83 * \param[in] numGroups The number of energy groups
84 * \param[in] numGroups_2log Log2 of numGroups, rounded up
85 * \param[in,out] out Struct with energy buffers
87 template <int unrollj> static void
88 reduceGroupEnergySimdBuffers(int numGroups,
90 nbnxn_atomdata_output_t *out)
92 const int unrollj_half = unrollj/2;
93 /* Energies are stored in SIMD registers with size 2^numGroups_2log */
94 const int numGroupsStorage = (1 << numGroups_2log);
96 const real * gmx_restrict vVdwSimd = out->VSvdw.data();
97 const real * gmx_restrict vCoulombSimd = out->VSc.data();
98 real * gmx_restrict vVdw = out->Vvdw.data();
99 real * gmx_restrict vCoulomb = out->Vc.data();
101 /* The size of the SIMD energy group buffer array is:
102 * numGroups*numGroups*numGroupsStorage*unrollj_half*simd_width
104 for (int i = 0; i < numGroups; i++)
106 for (int j1 = 0; j1 < numGroups; j1++)
108 for (int j0 = 0; j0 < numGroups; j0++)
110 int c = ((i*numGroups + j1)*numGroupsStorage + j0)*unrollj_half*unrollj;
111 for (int s = 0; s < unrollj_half; s++)
113 vVdw [i*numGroups + j0] += vVdwSimd [c + 0];
114 vVdw [i*numGroups + j1] += vVdwSimd [c + 1];
115 vCoulomb[i*numGroups + j0] += vCoulombSimd[c + 0];
116 vCoulomb[i*numGroups + j1] += vCoulombSimd[c + 1];
124 /*! \brief Dispatches the non-bonded N versus M atom cluster CPU kernels.
126 * OpenMP parallelization is performed within this function.
127 * Energy reduction, but not force and shift force reduction, is performed
128 * within this function.
130 * \param[in] nbvg The group (local/non-local) to compute interaction for
131 * \param[in,out] nbat The atomdata for the interactions
132 * \param[in] ic Non-bonded interaction constants
133 * \param[in] shiftVectors The PBC shift vectors
134 * \param[in] forceFlags Flags that tell what to compute
135 * \param[in] clearF Enum that tells if to clear the force output buffer
136 * \param[out] fshift Shift force output buffer
137 * \param[out] vCoulomb Output buffer for Coulomb energies
138 * \param[out] vVdw Output buffer for Van der Waals energies
141 nbnxn_kernel_cpu(const nonbonded_verlet_group_t *nbvg,
142 nbnxn_atomdata_t *nbat,
143 const interaction_const_t &ic,
153 if (EEL_RF(ic.eeltype) || ic.eeltype == eelCUT)
159 if (nbvg->ewald_excl == ewaldexclTable)
161 if (ic.rcoulomb == ic.rvdw)
167 coulkt = coulktTAB_TWIN;
172 if (ic.rcoulomb == ic.rvdw)
174 coulkt = coulktEWALD;
178 coulkt = coulktEWALD_TWIN;
183 const nbnxn_atomdata_t::Params &nbatParams = nbat->params();
186 if (ic.vdwtype == evdwCUT)
188 switch (ic.vdw_modifier)
191 case eintmodPOTSHIFT:
192 switch (nbatParams.comb_rule)
194 case ljcrGEOM: vdwkt = vdwktLJCUT_COMBGEOM; break;
195 case ljcrLB: vdwkt = vdwktLJCUT_COMBLB; break;
196 case ljcrNONE: vdwkt = vdwktLJCUT_COMBNONE; break;
198 GMX_RELEASE_ASSERT(false, "Unknown combination rule");
201 case eintmodFORCESWITCH:
202 vdwkt = vdwktLJFORCESWITCH;
204 case eintmodPOTSWITCH:
205 vdwkt = vdwktLJPOTSWITCH;
208 GMX_RELEASE_ASSERT(false, "Unsupported VdW interaction modifier");
211 else if (ic.vdwtype == evdwPME)
213 if (ic.ljpme_comb_rule == eljpmeGEOM)
215 vdwkt = vdwktLJEWALDCOMBGEOM;
219 vdwkt = vdwktLJEWALDCOMBLB;
220 /* At setup we (should have) selected the C reference kernel */
221 GMX_RELEASE_ASSERT(nbvg->kernel_type == nbnxnk4x4_PlainC, "Only the C reference nbnxn SIMD kernel supports LJ-PME with LB combination rules");
226 GMX_RELEASE_ASSERT(false, "Unsupported VdW interaction type");
229 int nnbl = nbvg->nbl_lists.nnbl;
230 NbnxnPairlistCpu * const * nbl = nbvg->nbl_lists.nbl;
232 int gmx_unused nthreads = gmx_omp_nthreads_get(emntNonbonded);
233 #pragma omp parallel for schedule(static) num_threads(nthreads)
234 for (int nb = 0; nb < nnbl; nb++)
236 // Presently, the kernels do not call C++ code that can throw,
237 // so no need for a try/catch pair in this OpenMP region.
238 nbnxn_atomdata_output_t *out = &nbat->out[nb];
240 if (clearF == enbvClearFYes)
242 clear_f(nbat, nb, out->f.data());
246 if ((forceFlags & GMX_FORCE_VIRIAL) && nnbl == 1)
252 fshift_p = out->fshift.data();
254 if (clearF == enbvClearFYes)
256 clear_fshift(fshift_p);
260 if (!(forceFlags & GMX_FORCE_ENERGY))
262 /* Don't calculate energies */
263 switch (nbvg->kernel_type)
265 case nbnxnk4x4_PlainC:
266 nbnxn_kernel_noener_ref[coulkt][vdwkt](nbl[nb], nbat,
272 #ifdef GMX_NBNXN_SIMD_2XNN
273 case nbnxnk4xN_SIMD_2xNN:
274 nbnxm_kernel_noener_simd_2xmm[coulkt][vdwkt](nbl[nb], nbat,
281 #ifdef GMX_NBNXN_SIMD_4XN
282 case nbnxnk4xN_SIMD_4xN:
283 nbnxm_kernel_noener_simd_4xm[coulkt][vdwkt](nbl[nb], nbat,
291 GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
294 else if (out->Vvdw.size() == 1)
296 /* A single energy group (pair) */
300 switch (nbvg->kernel_type)
302 case nbnxnk4x4_PlainC:
303 nbnxn_kernel_ener_ref[coulkt][vdwkt](nbl[nb], nbat,
311 #ifdef GMX_NBNXN_SIMD_2XNN
312 case nbnxnk4xN_SIMD_2xNN:
313 nbnxm_kernel_ener_simd_2xmm[coulkt][vdwkt](nbl[nb], nbat,
322 #ifdef GMX_NBNXN_SIMD_4XN
323 case nbnxnk4xN_SIMD_4xN:
324 nbnxm_kernel_ener_simd_4xm[coulkt][vdwkt](nbl[nb], nbat,
334 GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
339 /* Calculate energy group contributions */
340 clearGroupEnergies(out);
344 switch (nbvg->kernel_type)
346 case nbnxnk4x4_PlainC:
347 unrollj = c_nbnxnCpuIClusterSize;
348 nbnxn_kernel_energrp_ref[coulkt][vdwkt](nbl[nb], nbat,
356 #ifdef GMX_NBNXN_SIMD_2XNN
357 case nbnxnk4xN_SIMD_2xNN:
358 unrollj = GMX_SIMD_REAL_WIDTH/2;
359 nbnxm_kernel_energrp_simd_2xmm[coulkt][vdwkt](nbl[nb], nbat,
368 #ifdef GMX_NBNXN_SIMD_4XN
369 case nbnxnk4xN_SIMD_4xN:
370 unrollj = GMX_SIMD_REAL_WIDTH;
371 nbnxm_kernel_energrp_simd_4xm[coulkt][vdwkt](nbl[nb], nbat,
381 GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
384 if (nbvg->kernel_type != nbnxnk4x4_PlainC)
389 reduceGroupEnergySimdBuffers<2>(nbatParams.nenergrp,
394 reduceGroupEnergySimdBuffers<4>(nbatParams.nenergrp,
399 reduceGroupEnergySimdBuffers<8>(nbatParams.nenergrp,
404 GMX_RELEASE_ASSERT(false, "Unsupported j-unroll size");
410 if (forceFlags & GMX_FORCE_ENERGY)
412 reduce_energies_over_lists(nbat, nnbl, vVdw, vCoulomb);
416 static void accountFlops(t_nrnb *nrnb,
417 const nonbonded_verlet_t &nbv,
419 const interaction_const_t &ic,
422 const nonbonded_verlet_group_t &nbvg = nbv.grp[ilocality];
423 const bool usingGpuKernels = (nbvg.kernel_type == nbnxnk8x8x8_GPU);
425 int enr_nbnxn_kernel_ljc;
426 if (EEL_RF(ic.eeltype) || ic.eeltype == eelCUT)
428 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
430 else if ((!usingGpuKernels && nbvg.ewald_excl == ewaldexclAnalytical) ||
431 (usingGpuKernels && nbnxn_gpu_is_kernel_ewald_analytical(nbv.gpu_nbv)))
433 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
437 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
439 int enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
440 if (forceFlags & GMX_FORCE_ENERGY)
442 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
443 enr_nbnxn_kernel_ljc += 1;
444 enr_nbnxn_kernel_lj += 1;
447 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
448 nbvg.nbl_lists.natpair_ljq);
449 inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
450 nbvg.nbl_lists.natpair_lj);
451 /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
452 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
453 nbvg.nbl_lists.natpair_q);
455 const bool calcEnergy = ((forceFlags & GMX_FORCE_ENERGY) != 0);
456 if (ic.vdw_modifier == eintmodFORCESWITCH)
458 /* We add up the switch cost separately */
459 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_FSW + (calcEnergy ? 1 : 0),
460 nbvg.nbl_lists.natpair_ljq + nbvg.nbl_lists.natpair_lj);
462 if (ic.vdw_modifier == eintmodPOTSWITCH)
464 /* We add up the switch cost separately */
465 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_PSW + (calcEnergy ? 1 : 0),
466 nbvg.nbl_lists.natpair_ljq + nbvg.nbl_lists.natpair_lj);
468 if (ic.vdwtype == evdwPME)
470 /* We add up the LJ Ewald cost separately */
471 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_EWALD + (calcEnergy ? 1 : 0),
472 nbvg.nbl_lists.natpair_ljq + nbvg.nbl_lists.natpair_lj);
476 void NbnxnDispatchKernel(nonbonded_verlet_t *nbv,
478 const interaction_const_t &ic,
482 gmx_enerdata_t *enerd,
485 const nonbonded_verlet_group_t &nbvg = nbv->grp[ilocality];
487 switch (nbvg.kernel_type)
489 case nbnxnk4x4_PlainC:
490 case nbnxnk4xN_SIMD_4xN:
491 case nbnxnk4xN_SIMD_2xNN:
492 nbnxn_kernel_cpu(&nbvg,
499 enerd->grpp.ener[egCOULSR],
501 enerd->grpp.ener[egBHAMSR] :
502 enerd->grpp.ener[egLJSR]);
505 case nbnxnk8x8x8_GPU:
506 nbnxn_gpu_launch_kernel(nbv->gpu_nbv, forceFlags, ilocality);
509 case nbnxnk8x8x8_PlainC:
510 nbnxn_kernel_gpu_ref(nbvg.nbl_lists.nblGpu[0],
517 enerd->grpp.ener[egCOULSR],
519 enerd->grpp.ener[egBHAMSR] :
520 enerd->grpp.ener[egLJSR]);
524 GMX_RELEASE_ASSERT(false, "Invalid nonbonded kernel type passed!");
528 accountFlops(nrnb, *nbv, ilocality, ic, forceFlags);