*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
+ * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "nb_free_energy.h"
+#include "config.h"
+
#include <cmath>
+#include <set>
#include <algorithm>
#include "gromacs/gmxlib/nrnb.h"
-#include "gromacs/gmxlib/nonbonded/nb_kernel.h"
#include "gromacs/gmxlib/nonbonded/nonbonded.h"
+#include "gromacs/math/arrayrefwithpadding.h"
#include "gromacs/math/functions.h"
#include "gromacs/math/vec.h"
#include "gromacs/mdtypes/forceoutput.h"
#include "gromacs/mdtypes/forcerec.h"
+#include "gromacs/mdtypes/interaction_const.h"
#include "gromacs/mdtypes/md_enums.h"
+#include "gromacs/mdtypes/mdatom.h"
+#include "gromacs/mdtypes/nblist.h"
+#include "gromacs/pbcutil/ishift.h"
+#include "gromacs/simd/simd.h"
+#include "gromacs/simd/simd_math.h"
#include "gromacs/utility/fatalerror.h"
+#include "gromacs/utility/arrayref.h"
+#include "nb_softcore.h"
-//! Enum for templating the soft-core treatment in the kernel
-enum class SoftCoreTreatment
+//! Scalar (non-SIMD) data types.
+struct ScalarDataTypes
{
- None, //!< No soft-core
- RPower6, //!< Soft-core with r-power = 6
- RPower48 //!< Soft-core with r-power = 48
+ using RealType = real; //!< The data type to use as real.
+ using IntType = int; //!< The data type to use as int.
+ using BoolType = bool; //!< The data type to use as bool for real value comparison.
+ static constexpr int simdRealWidth = 1; //!< The width of the RealType.
+ static constexpr int simdIntWidth = 1; //!< The width of the IntType.
};
-//! Most treatments are fine with float in mixed-precision mode.
-template<SoftCoreTreatment softCoreTreatment>
-struct SoftCoreReal
+#if GMX_SIMD_HAVE_REAL && GMX_SIMD_HAVE_INT32_ARITHMETICS
+//! SIMD data types.
+struct SimdDataTypes
{
- //! Real type for soft-core calculations
- using Real = real;
+ using RealType = gmx::SimdReal; //!< The data type to use as real.
+ using IntType = gmx::SimdInt32; //!< The data type to use as int.
+ using BoolType = gmx::SimdBool; //!< The data type to use as bool for real value comparison.
+ static constexpr int simdRealWidth = GMX_SIMD_REAL_WIDTH; //!< The width of the RealType.
+# if GMX_SIMD_HAVE_DOUBLE && GMX_DOUBLE
+ static constexpr int simdIntWidth = GMX_SIMD_DINT32_WIDTH; //!< The width of the IntType.
+# else
+ static constexpr int simdIntWidth = GMX_SIMD_FINT32_WIDTH; //!< The width of the IntType.
+# endif
};
+#endif
-//! This treatment requires double precision for some computations.
-template<>
-struct SoftCoreReal<SoftCoreTreatment::RPower48>
-{
- //! Real type for soft-core calculations
- using Real = double;
-};
+/*! \brief Lower limit for square interaction distances in nonbonded kernels.
+ *
+ * This is a mimimum on r^2 to avoid overflows when computing r^6.
+ * This will only affect results for soft-cored interaction at distances smaller
+ * than 1e-6 and will limit extremely high foreign energies for overlapping atoms.
+ * Note that we could use a somewhat smaller minimum in double precision.
+ * But because invsqrt in double precision can use single precision, this number
+ * can not be much smaller, we use the same number for simplicity.
+ */
+constexpr real c_minDistanceSquared = 1.0e-12_real;
-//! Computes r^(1/p) and 1/r^(1/p) for the standard p=6
-template<SoftCoreTreatment softCoreTreatment>
-static inline void pthRoot(const real r, real* pthRoot, real* invPthRoot)
-{
- *invPthRoot = gmx::invsqrt(std::cbrt(r));
- *pthRoot = 1 / (*invPthRoot);
-}
+/*! \brief Higher limit for r^-6 used for Lennard-Jones interactions
+ *
+ * This is needed to avoid overflow of LJ energy and force terms for excluded
+ * atoms and foreign energies of hard-core states of overlapping atoms.
+ * Note that in single precision this value leaves room for C12 coefficients up to 3.4e8.
+ */
+constexpr real c_maxRInvSix = 1.0e15_real;
-// We need a double version to make the specialization below work
-#if !GMX_DOUBLE
-//! Computes r^(1/p) and 1/r^(1/p) for the standard p=6
-template<SoftCoreTreatment softCoreTreatment>
-static inline void pthRoot(const double r, real* pthRoot, double* invPthRoot)
+template<bool computeForces, class RealType>
+static inline void
+pmeCoulombCorrectionVF(const RealType rSq, const real beta, RealType* pot, RealType gmx_unused* force)
{
- *invPthRoot = gmx::invsqrt(std::cbrt(r));
- *pthRoot = 1 / (*invPthRoot);
+ const RealType brsq = rSq * beta * beta;
+ if constexpr (computeForces)
+ {
+ *force = -brsq * beta * gmx::pmeForceCorrection(brsq);
+ }
+ *pot = beta * gmx::pmePotentialCorrection(brsq);
}
-#endif
-//! Computes r^(1/p) and 1/r^(1/p) for p=48
-template<>
-inline void pthRoot<SoftCoreTreatment::RPower48>(const double r, real* pthRoot, double* invPthRoot)
+template<bool computeForces, class RealType, class BoolType>
+static inline void pmeLJCorrectionVF(const RealType rInv,
+ const RealType rSq,
+ const real ewaldLJCoeffSq,
+ const real ewaldLJCoeffSixDivSix,
+ RealType* pot,
+ RealType gmx_unused* force,
+ const BoolType mask,
+ const BoolType bIiEqJnr)
{
- *pthRoot = std::pow(r, 1.0 / 48.0);
- *invPthRoot = 1 / (*pthRoot);
+ // We mask rInv to get zero force and potential for masked out pair interactions
+ const RealType rInvSq = rInv * rInv;
+ const RealType rInvSix = rInvSq * rInvSq * rInvSq;
+ // Mask rSq to avoid underflow in exp()
+ const RealType coeffSqRSq = ewaldLJCoeffSq * gmx::selectByMask(rSq, mask);
+ const RealType expNegCoeffSqRSq = gmx::exp(-coeffSqRSq);
+ const RealType poly = 1.0_real + coeffSqRSq + 0.5_real * coeffSqRSq * coeffSqRSq;
+ if constexpr (computeForces)
+ {
+ *force = rInvSix - expNegCoeffSqRSq * (rInvSix * poly + ewaldLJCoeffSixDivSix);
+ *force = *force * rInvSq;
+ }
+ // The self interaction is the limit for r -> 0 which we need to compute separately
+ *pot = gmx::blend(
+ rInvSix * (1.0_real - expNegCoeffSqRSq * poly), 0.5_real * ewaldLJCoeffSixDivSix, bIiEqJnr);
}
-template<SoftCoreTreatment softCoreTreatment>
-static inline real calculateSigmaPow(const real sigma6)
+//! Computes r^(1/6) and 1/r^(1/6)
+template<class RealType>
+static inline void sixthRoot(const RealType r, RealType* sixthRoot, RealType* invSixthRoot)
{
- if (softCoreTreatment == SoftCoreTreatment::RPower6)
- {
- return sigma6;
- }
- else
- {
- real sigmaPow = sigma6 * sigma6; /* sigma^12 */
- sigmaPow = sigmaPow * sigmaPow; /* sigma^24 */
- sigmaPow = sigmaPow * sigmaPow; /* sigma^48 */
- return (sigmaPow);
- }
+ RealType cbrtRes = gmx::cbrt(r);
+ *invSixthRoot = gmx::invsqrt(cbrtRes);
+ *sixthRoot = gmx::inv(*invSixthRoot);
}
-template<SoftCoreTreatment softCoreTreatment, class SCReal>
-static inline real calculateRinv6(const SCReal rinvV)
+template<class RealType>
+static inline RealType calculateRinv6(const RealType rInvV)
{
- if (softCoreTreatment == SoftCoreTreatment::RPower6)
- {
- return rinvV;
- }
- else
- {
- real rinv6 = rinvV * rinvV;
- return (rinv6 * rinv6 * rinv6);
- }
+ RealType rInv6 = rInvV * rInvV;
+ return (rInv6 * rInv6 * rInv6);
}
-static inline real calculateVdw6(const real c6, const real rinv6)
+template<class RealType>
+static inline RealType calculateVdw6(const RealType c6, const RealType rInv6)
{
- return (c6 * rinv6);
+ return (c6 * rInv6);
}
-static inline real calculateVdw12(const real c12, const real rinv6)
+template<class RealType>
+static inline RealType calculateVdw12(const RealType c12, const RealType rInv6)
{
- return (c12 * rinv6 * rinv6);
+ return (c12 * rInv6 * rInv6);
}
/* reaction-field electrostatics */
-template<class SCReal>
-static inline SCReal
-reactionFieldScalarForce(const real qq, const real rinv, const SCReal r, const real krf, const real two)
+template<class RealType>
+static inline RealType reactionFieldScalarForce(const RealType qq,
+ const RealType rInv,
+ const RealType r,
+ const real krf,
+ const real two)
{
- return (qq * (rinv - two * krf * r * r));
+ return (qq * (rInv - two * krf * r * r));
}
-template<class SCReal>
-static inline real
-reactionFieldPotential(const real qq, const real rinv, const SCReal r, const real krf, const real potentialShift)
+template<class RealType>
+static inline RealType reactionFieldPotential(const RealType qq,
+ const RealType rInv,
+ const RealType r,
+ const real krf,
+ const real potentialShift)
{
- return (qq * (rinv + krf * r * r - potentialShift));
+ return (qq * (rInv + krf * r * r - potentialShift));
}
/* Ewald electrostatics */
-static inline real ewaldScalarForce(const real coulomb, const real rinv)
+template<class RealType>
+static inline RealType ewaldScalarForce(const RealType coulomb, const RealType rInv)
{
- return (coulomb * rinv);
+ return (coulomb * rInv);
}
-static inline real ewaldPotential(const real coulomb, const real rinv, const real potentialShift)
+template<class RealType>
+static inline RealType ewaldPotential(const RealType coulomb, const RealType rInv, const real potentialShift)
{
- return (coulomb * (rinv - potentialShift));
+ return (coulomb * (rInv - potentialShift));
}
/* cutoff LJ */
-static inline real lennardJonesScalarForce(const real v6, const real v12)
+template<class RealType>
+static inline RealType lennardJonesScalarForce(const RealType v6, const RealType v12)
{
return (v12 - v6);
}
-static inline real lennardJonesPotential(const real v6,
- const real v12,
- const real c6,
- const real c12,
- const real repulsionShift,
- const real dispersionShift,
- const real onesixth,
- const real onetwelfth)
+template<class RealType>
+static inline RealType lennardJonesPotential(const RealType v6,
+ const RealType v12,
+ const RealType c6,
+ const RealType c12,
+ const real repulsionShift,
+ const real dispersionShift,
+ const real oneSixth,
+ const real oneTwelfth)
{
- return ((v12 + c12 * repulsionShift) * onetwelfth - (v6 + c6 * dispersionShift) * onesixth);
+ return ((v12 + c12 * repulsionShift) * oneTwelfth - (v6 + c6 * dispersionShift) * oneSixth);
}
/* Ewald LJ */
-static inline real ewaldLennardJonesGridSubtract(const real c6grid, const real potentialShift, const real onesixth)
+template<class RealType>
+static inline RealType ewaldLennardJonesGridSubtract(const RealType c6grid,
+ const real potentialShift,
+ const real oneSixth)
{
- return (c6grid * potentialShift * onesixth);
+ return (c6grid * potentialShift * oneSixth);
}
/* LJ Potential switch */
-template<class SCReal>
-static inline SCReal potSwitchScalarForceMod(const SCReal fScalarInp,
- const real potential,
- const real sw,
- const SCReal r,
- const real rVdw,
- const real dsw,
- const real zero)
+template<class RealType, class BoolType>
+static inline RealType potSwitchScalarForceMod(const RealType fScalarInp,
+ const RealType potential,
+ const RealType sw,
+ const RealType r,
+ const RealType dsw,
+ const BoolType mask)
{
- if (r < rVdw)
- {
- SCReal fScalar = fScalarInp * sw - r * potential * dsw;
- return (fScalar);
- }
- return (zero);
+ /* The mask should select on rV < rVdw */
+ return (gmx::selectByMask(fScalarInp * sw - r * potential * dsw, mask));
}
-template<class SCReal>
-static inline real
-potSwitchPotentialMod(const real potentialInp, const real sw, const SCReal r, const real rVdw, const real zero)
+template<class RealType, class BoolType>
+static inline RealType potSwitchPotentialMod(const RealType potentialInp, const RealType sw, const BoolType mask)
{
- if (r < rVdw)
- {
- real potential = potentialInp * sw;
- return (potential);
- }
- return (zero);
+ /* The mask should select on rV < rVdw */
+ return (gmx::selectByMask(potentialInp * sw, mask));
}
//! Templated free-energy non-bonded kernel
-template<SoftCoreTreatment softCoreTreatment, bool scLambdasOrAlphasDiffer, bool vdwInteractionTypeIsEwald, bool elecInteractionTypeIsEwald, bool vdwModifierIsPotSwitch>
-static void nb_free_energy_kernel(const t_nblist* gmx_restrict nlist,
- rvec* gmx_restrict xx,
- gmx::ForceWithShiftForces* forceWithShiftForces,
- const t_forcerec* gmx_restrict fr,
- const t_mdatoms* gmx_restrict mdatoms,
- nb_kernel_data_t* gmx_restrict kernel_data,
- t_nrnb* gmx_restrict nrnb)
+template<typename DataTypes, KernelSoftcoreType softcoreType, bool scLambdasOrAlphasDiffer, bool vdwInteractionTypeIsEwald, bool elecInteractionTypeIsEwald, bool vdwModifierIsPotSwitch, bool computeForces>
+static void nb_free_energy_kernel(const t_nblist& nlist,
+ const gmx::ArrayRefWithPadding<const gmx::RVec>& coords,
+ const int ntype,
+ const real rlist,
+ const interaction_const_t& ic,
+ gmx::ArrayRef<const gmx::RVec> shiftvec,
+ gmx::ArrayRef<const real> nbfp,
+ gmx::ArrayRef<const real> gmx_unused nbfp_grid,
+ gmx::ArrayRef<const real> chargeA,
+ gmx::ArrayRef<const real> chargeB,
+ gmx::ArrayRef<const int> typeA,
+ gmx::ArrayRef<const int> typeB,
+ int flags,
+ gmx::ArrayRef<const real> lambda,
+ t_nrnb* gmx_restrict nrnb,
+ gmx::ArrayRefWithPadding<gmx::RVec> threadForceBuffer,
+ rvec gmx_unused* threadForceShiftBuffer,
+ gmx::ArrayRef<real> threadVc,
+ gmx::ArrayRef<real> threadVv,
+ gmx::ArrayRef<real> threadDvdl)
{
- using SCReal = typename SoftCoreReal<softCoreTreatment>::Real;
-
- constexpr bool useSoftCore = (softCoreTreatment != SoftCoreTreatment::None);
-
#define STATE_A 0
#define STATE_B 1
#define NSTATES 2
- constexpr real onetwelfth = 1.0 / 12.0;
- constexpr real onesixth = 1.0 / 6.0;
- constexpr real zero = 0.0;
- constexpr real half = 0.5;
- constexpr real one = 1.0;
- constexpr real two = 2.0;
- constexpr real six = 6.0;
+ using RealType = typename DataTypes::RealType;
+ using IntType = typename DataTypes::IntType;
+ using BoolType = typename DataTypes::BoolType;
- /* Extract pointer to non-bonded interaction constants */
- const interaction_const_t* ic = fr->ic;
+ constexpr real oneTwelfth = 1.0_real / 12.0_real;
+ constexpr real oneSixth = 1.0_real / 6.0_real;
+ constexpr real zero = 0.0_real;
+ constexpr real half = 0.5_real;
+ constexpr real one = 1.0_real;
+ constexpr real two = 2.0_real;
+ constexpr real six = 6.0_real;
// Extract pair list data
- const int nri = nlist->nri;
- const int* iinr = nlist->iinr;
- const int* jindex = nlist->jindex;
- const int* jjnr = nlist->jjnr;
- const int* shift = nlist->shift;
- const int* gid = nlist->gid;
-
- const real* shiftvec = fr->shift_vec[0];
- const real* chargeA = mdatoms->chargeA;
- const real* chargeB = mdatoms->chargeB;
- real* Vc = kernel_data->energygrp_elec;
- const int* typeA = mdatoms->typeA;
- const int* typeB = mdatoms->typeB;
- const int ntype = fr->ntype;
- const real* nbfp = fr->nbfp;
- const real* nbfp_grid = fr->ljpme_c6grid;
- real* Vv = kernel_data->energygrp_vdw;
- const real lambda_coul = kernel_data->lambda[efptCOUL];
- const real lambda_vdw = kernel_data->lambda[efptVDW];
- real* dvdl = kernel_data->dvdl;
- const real alpha_coul = fr->sc_alphacoul;
- const real alpha_vdw = fr->sc_alphavdw;
- const real lam_power = fr->sc_power;
- const real sigma6_def = fr->sc_sigma6_def;
- const real sigma6_min = fr->sc_sigma6_min;
- const bool doForces = ((kernel_data->flags & GMX_NONBONDED_DO_FORCE) != 0);
- const bool doShiftForces = ((kernel_data->flags & GMX_NONBONDED_DO_SHIFTFORCE) != 0);
- const bool doPotential = ((kernel_data->flags & GMX_NONBONDED_DO_POTENTIAL) != 0);
+ const int nri = nlist.nri;
+ gmx::ArrayRef<const int> iinr = nlist.iinr;
+ gmx::ArrayRef<const int> jindex = nlist.jindex;
+ gmx::ArrayRef<const int> jjnr = nlist.jjnr;
+ gmx::ArrayRef<const int> shift = nlist.shift;
+ gmx::ArrayRef<const int> gid = nlist.gid;
+
+ const real lambda_coul = lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)];
+ const real lambda_vdw = lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)];
+
+ // Extract softcore parameters
+ const auto& scParams = *ic.softCoreParameters;
+ const real lam_power = scParams.lambdaPower;
+ const real gmx_unused alpha_coul = scParams.alphaCoulomb;
+ const real gmx_unused alpha_vdw = scParams.alphaVdw;
+ const real gmx_unused sigma6_def = scParams.sigma6WithInvalidSigma;
+ const real gmx_unused sigma6_min = scParams.sigma6Minimum;
+
+ const real gmx_unused gapsysScaleLinpointCoul = scParams.gapsysScaleLinpointCoul;
+ const real gmx_unused gapsysScaleLinpointVdW = scParams.gapsysScaleLinpointVdW;
+ const real gmx_unused gapsysSigma6VdW = scParams.gapsysSigma6VdW;
+
+ const bool gmx_unused doShiftForces = ((flags & GMX_NONBONDED_DO_SHIFTFORCE) != 0);
+ const bool doPotential = ((flags & GMX_NONBONDED_DO_POTENTIAL) != 0);
// Extract data from interaction_const_t
- const real facel = ic->epsfac;
- const real rcoulomb = ic->rcoulomb;
- const real krf = ic->k_rf;
- const real crf = ic->c_rf;
- const real sh_lj_ewald = ic->sh_lj_ewald;
- const real rvdw = ic->rvdw;
- const real dispersionShift = ic->dispersion_shift.cpot;
- const real repulsionShift = ic->repulsion_shift.cpot;
+ const real facel = ic.epsfac;
+ const real rCoulomb = ic.rcoulomb;
+ const real krf = ic.reactionFieldCoefficient;
+ const real crf = ic.reactionFieldShift;
+ const real gmx_unused shLjEwald = ic.sh_lj_ewald;
+ const real rVdw = ic.rvdw;
+ const real dispersionShift = ic.dispersion_shift.cpot;
+ const real repulsionShift = ic.repulsion_shift.cpot;
+ const real ewaldBeta = ic.ewaldcoeff_q;
+ real gmx_unused ewaldLJCoeffSq;
+ real gmx_unused ewaldLJCoeffSixDivSix;
+ if constexpr (vdwInteractionTypeIsEwald)
+ {
+ ewaldLJCoeffSq = ic.ewaldcoeff_lj * ic.ewaldcoeff_lj;
+ ewaldLJCoeffSixDivSix = ewaldLJCoeffSq * ewaldLJCoeffSq * ewaldLJCoeffSq / six;
+ }
// Note that the nbnxm kernels do not support Coulomb potential switching at all
- GMX_ASSERT(ic->coulomb_modifier != eintmodPOTSWITCH,
+ GMX_ASSERT(ic.coulomb_modifier != InteractionModifiers::PotSwitch,
"Potential switching is not supported for Coulomb with FEP");
- real vdw_swV3, vdw_swV4, vdw_swV5, vdw_swF2, vdw_swF3, vdw_swF4;
- if (vdwModifierIsPotSwitch)
+ const real rVdwSwitch = ic.rvdw_switch;
+ real gmx_unused vdw_swV3, vdw_swV4, vdw_swV5, vdw_swF2, vdw_swF3, vdw_swF4;
+ if constexpr (vdwModifierIsPotSwitch)
{
- const real d = ic->rvdw - ic->rvdw_switch;
- vdw_swV3 = -10.0 / (d * d * d);
- vdw_swV4 = 15.0 / (d * d * d * d);
- vdw_swV5 = -6.0 / (d * d * d * d * d);
- vdw_swF2 = -30.0 / (d * d * d);
- vdw_swF3 = 60.0 / (d * d * d * d);
- vdw_swF4 = -30.0 / (d * d * d * d * d);
+ const real d = rVdw - rVdwSwitch;
+ vdw_swV3 = -10.0_real / (d * d * d);
+ vdw_swV4 = 15.0_real / (d * d * d * d);
+ vdw_swV5 = -6.0_real / (d * d * d * d * d);
+ vdw_swF2 = -30.0_real / (d * d * d);
+ vdw_swF3 = 60.0_real / (d * d * d * d);
+ vdw_swF4 = -30.0_real / (d * d * d * d * d);
}
else
{
/* Avoid warnings from stupid compilers (looking at you, Clang!) */
- vdw_swV3 = vdw_swV4 = vdw_swV5 = vdw_swF2 = vdw_swF3 = vdw_swF4 = 0.0;
+ vdw_swV3 = vdw_swV4 = vdw_swV5 = vdw_swF2 = vdw_swF3 = vdw_swF4 = zero;
}
- int icoul;
- if (ic->eeltype == eelCUT || EEL_RF(ic->eeltype))
+ NbkernelElecType icoul;
+ if (ic.eeltype == CoulombInteractionType::Cut || EEL_RF(ic.eeltype))
{
- icoul = GMX_NBKERNEL_ELEC_REACTIONFIELD;
+ icoul = NbkernelElecType::ReactionField;
}
else
{
- icoul = GMX_NBKERNEL_ELEC_NONE;
+ icoul = NbkernelElecType::None;
}
- real rcutoff_max2 = std::max(ic->rcoulomb, ic->rvdw);
- rcutoff_max2 = rcutoff_max2 * rcutoff_max2;
+ real rcutoff_max2 = std::max(ic.rcoulomb, ic.rvdw);
+ rcutoff_max2 = rcutoff_max2 * rcutoff_max2;
+ const real gmx_unused rCutoffCoul = ic.rcoulomb;
- const real* tab_ewald_F_lj = nullptr;
- const real* tab_ewald_V_lj = nullptr;
- const real* ewtab = nullptr;
- real ewtabscale = 0;
- real ewtabhalfspace = 0;
- real sh_ewald = 0;
- if (elecInteractionTypeIsEwald || vdwInteractionTypeIsEwald)
+ real gmx_unused sh_ewald = 0;
+ if constexpr (elecInteractionTypeIsEwald || vdwInteractionTypeIsEwald)
{
- const auto& tables = *ic->coulombEwaldTables;
- sh_ewald = ic->sh_ewald;
- ewtab = tables.tableFDV0.data();
- ewtabscale = tables.scale;
- ewtabhalfspace = half / ewtabscale;
- tab_ewald_F_lj = tables.tableF.data();
- tab_ewald_V_lj = tables.tableV.data();
+ sh_ewald = ic.sh_ewald;
}
/* For Ewald/PME interactions we cannot easily apply the soft-core component to
GMX_RELEASE_ASSERT(!(vdwInteractionTypeIsEwald && vdwModifierIsPotSwitch),
"Can not apply soft-core to switched Ewald potentials");
- SCReal dvdl_coul = 0; /* Needs double for sc_power==48 */
- SCReal dvdl_vdw = 0; /* Needs double for sc_power==48 */
+ const RealType minDistanceSquared(c_minDistanceSquared);
+ const RealType maxRInvSix(c_maxRInvSix);
+ const RealType gmx_unused floatMin(GMX_FLOAT_MIN);
+
+ RealType dvdlCoul(zero);
+ RealType dvdlVdw(zero);
/* Lambda factor for state A, 1-lambda*/
real LFC[NSTATES], LFV[NSTATES];
/*derivative of the lambda factor for state A and B */
real DLF[NSTATES];
- DLF[STATE_A] = -1;
- DLF[STATE_B] = 1;
+ DLF[STATE_A] = -one;
+ DLF[STATE_B] = one;
- real lfac_coul[NSTATES], dlfac_coul[NSTATES], lfac_vdw[NSTATES], dlfac_vdw[NSTATES];
- constexpr real sc_r_power = (softCoreTreatment == SoftCoreTreatment::RPower48 ? 48.0_real : 6.0_real);
+ real gmx_unused lFacCoul[NSTATES], dlFacCoul[NSTATES], lFacVdw[NSTATES], dlFacVdw[NSTATES];
+ constexpr real sc_r_power = six;
for (int i = 0; i < NSTATES; i++)
{
- lfac_coul[i] = (lam_power == 2 ? (1 - LFC[i]) * (1 - LFC[i]) : (1 - LFC[i]));
- dlfac_coul[i] = DLF[i] * lam_power / sc_r_power * (lam_power == 2 ? (1 - LFC[i]) : 1);
- lfac_vdw[i] = (lam_power == 2 ? (1 - LFV[i]) * (1 - LFV[i]) : (1 - LFV[i]));
- dlfac_vdw[i] = DLF[i] * lam_power / sc_r_power * (lam_power == 2 ? (1 - LFV[i]) : 1);
+ lFacCoul[i] = (lam_power == 2 ? (1 - LFC[i]) * (1 - LFC[i]) : (1 - LFC[i]));
+ dlFacCoul[i] = DLF[i] * lam_power / sc_r_power * (lam_power == 2 ? (1 - LFC[i]) : 1);
+ lFacVdw[i] = (lam_power == 2 ? (1 - LFV[i]) * (1 - LFV[i]) : (1 - LFV[i]));
+ dlFacVdw[i] = DLF[i] * lam_power / sc_r_power * (lam_power == 2 ? (1 - LFV[i]) : 1);
+ }
+
+ // We need pointers to real for SIMD access
+ const real* gmx_restrict x = coords.paddedConstArrayRef().data()[0];
+ real* gmx_restrict forceRealPtr;
+ if constexpr (computeForces)
+ {
+ GMX_ASSERT(nri == 0 || !threadForceBuffer.empty(), "need a valid threadForceBuffer");
+
+ forceRealPtr = threadForceBuffer.paddedArrayRef().data()[0];
+
+ if (doShiftForces)
+ {
+ GMX_ASSERT(threadForceShiftBuffer != nullptr, "need a valid threadForceShiftBuffer");
+ }
}
- // TODO: We should get rid of using pointers to real
- const real* x = xx[0];
- real* gmx_restrict f = &(forceWithShiftForces->force()[0][0]);
- real* gmx_restrict fshift = &(forceWithShiftForces->shiftForces()[0][0]);
+ const real rlistSquared = gmx::square(rlist);
+
+ bool haveExcludedPairsBeyondRlist = false;
for (int n = 0; n < nri; n++)
{
- int npair_within_cutoff = 0;
-
- const int is3 = 3 * shift[n];
- const real shX = shiftvec[is3];
- const real shY = shiftvec[is3 + 1];
- const real shZ = shiftvec[is3 + 2];
- const int nj0 = jindex[n];
- const int nj1 = jindex[n + 1];
- const int ii = iinr[n];
- const int ii3 = 3 * ii;
- const real ix = shX + x[ii3 + 0];
- const real iy = shY + x[ii3 + 1];
- const real iz = shZ + x[ii3 + 2];
- const real iqA = facel * chargeA[ii];
- const real iqB = facel * chargeB[ii];
- const int ntiA = 2 * ntype * typeA[ii];
- const int ntiB = 2 * ntype * typeB[ii];
- real vctot = 0;
- real vvtot = 0;
- real fix = 0;
- real fiy = 0;
- real fiz = 0;
-
- for (int k = nj0; k < nj1; k++)
+ bool havePairsWithinCutoff = false;
+
+ const int is = shift[n];
+ const real shX = shiftvec[is][XX];
+ const real shY = shiftvec[is][YY];
+ const real shZ = shiftvec[is][ZZ];
+ const int nj0 = jindex[n];
+ const int nj1 = jindex[n + 1];
+ const int ii = iinr[n];
+ const int ii3 = 3 * ii;
+ const real ix = shX + x[ii3 + 0];
+ const real iy = shY + x[ii3 + 1];
+ const real iz = shZ + x[ii3 + 2];
+ const real iqA = facel * chargeA[ii];
+ const real iqB = facel * chargeB[ii];
+ const int ntiA = ntype * typeA[ii];
+ const int ntiB = ntype * typeB[ii];
+ RealType vCTot(0);
+ RealType vVTot(0);
+ RealType fIX(0);
+ RealType fIY(0);
+ RealType fIZ(0);
+
+#if GMX_SIMD_HAVE_REAL
+ alignas(GMX_SIMD_ALIGNMENT) int preloadIi[DataTypes::simdRealWidth];
+ alignas(GMX_SIMD_ALIGNMENT) int gmx_unused preloadIs[DataTypes::simdRealWidth];
+#else
+ int preloadIi[DataTypes::simdRealWidth];
+ int gmx_unused preloadIs[DataTypes::simdRealWidth];
+#endif
+ for (int i = 0; i < DataTypes::simdRealWidth; i++)
{
- int tj[NSTATES];
- const int jnr = jjnr[k];
- const int j3 = 3 * jnr;
- real c6[NSTATES], c12[NSTATES], qq[NSTATES], Vcoul[NSTATES], Vvdw[NSTATES];
- real r, rinv, rp, rpm2;
- real alpha_vdw_eff, alpha_coul_eff, sigma_pow[NSTATES];
- const real dx = ix - x[j3];
- const real dy = iy - x[j3 + 1];
- const real dz = iz - x[j3 + 2];
- const real rsq = dx * dx + dy * dy + dz * dz;
- SCReal FscalC[NSTATES], FscalV[NSTATES]; /* Needs double for sc_power==48 */
-
- if (rsq >= rcutoff_max2)
+ preloadIi[i] = ii;
+ preloadIs[i] = shift[n];
+ }
+ IntType ii_s = gmx::load<IntType>(preloadIi);
+
+ for (int k = nj0; k < nj1; k += DataTypes::simdRealWidth)
+ {
+ RealType r, rInv;
+
+#if GMX_SIMD_HAVE_REAL
+ alignas(GMX_SIMD_ALIGNMENT) real preloadPairIsValid[DataTypes::simdRealWidth];
+ alignas(GMX_SIMD_ALIGNMENT) real preloadPairIncluded[DataTypes::simdRealWidth];
+ alignas(GMX_SIMD_ALIGNMENT) int32_t preloadJnr[DataTypes::simdRealWidth];
+ alignas(GMX_SIMD_ALIGNMENT) int32_t typeIndices[NSTATES][DataTypes::simdRealWidth];
+ alignas(GMX_SIMD_ALIGNMENT) real preloadQq[NSTATES][DataTypes::simdRealWidth];
+ alignas(GMX_SIMD_ALIGNMENT) real gmx_unused preloadSigma6[NSTATES][DataTypes::simdRealWidth];
+ alignas(GMX_SIMD_ALIGNMENT) real gmx_unused preloadAlphaVdwEff[DataTypes::simdRealWidth];
+ alignas(GMX_SIMD_ALIGNMENT) real gmx_unused preloadAlphaCoulEff[DataTypes::simdRealWidth];
+ alignas(GMX_SIMD_ALIGNMENT)
+ real gmx_unused preloadGapsysScaleLinpointVdW[DataTypes::simdRealWidth];
+ alignas(GMX_SIMD_ALIGNMENT)
+ real gmx_unused preloadGapsysScaleLinpointCoul[DataTypes::simdRealWidth];
+ alignas(GMX_SIMD_ALIGNMENT)
+ real gmx_unused preloadGapsysSigma6VdW[NSTATES][DataTypes::simdRealWidth];
+ alignas(GMX_SIMD_ALIGNMENT) real preloadLjPmeC6Grid[NSTATES][DataTypes::simdRealWidth];
+#else
+ real preloadPairIsValid[DataTypes::simdRealWidth];
+ real preloadPairIncluded[DataTypes::simdRealWidth];
+ int preloadJnr[DataTypes::simdRealWidth];
+ int typeIndices[NSTATES][DataTypes::simdRealWidth];
+ real preloadQq[NSTATES][DataTypes::simdRealWidth];
+ real gmx_unused preloadSigma6[NSTATES][DataTypes::simdRealWidth];
+ real gmx_unused preloadAlphaVdwEff[DataTypes::simdRealWidth];
+ real gmx_unused preloadAlphaCoulEff[DataTypes::simdRealWidth];
+ real gmx_unused preloadGapsysScaleLinpointVdW[DataTypes::simdRealWidth];
+ real gmx_unused preloadGapsysScaleLinpointCoul[DataTypes::simdRealWidth];
+ real gmx_unused preloadGapsysSigma6VdW[NSTATES][DataTypes::simdRealWidth];
+ real preloadLjPmeC6Grid[NSTATES][DataTypes::simdRealWidth];
+#endif
+ for (int j = 0; j < DataTypes::simdRealWidth; j++)
+ {
+ if (k + j < nj1)
+ {
+ preloadPairIsValid[j] = true;
+ /* Check if this pair on the exclusions list.*/
+ preloadPairIncluded[j] = (nlist.excl_fep.empty() || nlist.excl_fep[k + j]);
+ const int jnr = jjnr[k + j];
+ preloadJnr[j] = jnr;
+ typeIndices[STATE_A][j] = ntiA + typeA[jnr];
+ typeIndices[STATE_B][j] = ntiB + typeB[jnr];
+ preloadQq[STATE_A][j] = iqA * chargeA[jnr];
+ preloadQq[STATE_B][j] = iqB * chargeB[jnr];
+
+ for (int i = 0; i < NSTATES; i++)
+ {
+ if constexpr (vdwInteractionTypeIsEwald)
+ {
+ preloadLjPmeC6Grid[i][j] = nbfp_grid[2 * typeIndices[i][j]];
+ }
+ else
+ {
+ preloadLjPmeC6Grid[i][j] = 0;
+ }
+ if constexpr (softcoreType == KernelSoftcoreType::Beutler)
+ {
+ const real c6 = nbfp[2 * typeIndices[i][j]];
+ const real c12 = nbfp[2 * typeIndices[i][j] + 1];
+ if (c6 > 0 && c12 > 0)
+ {
+ /* c12 is stored scaled with 12.0 and c6 is scaled with 6.0 - correct for this */
+ preloadSigma6[i][j] = 0.5_real * c12 / c6;
+ if (preloadSigma6[i][j]
+ < sigma6_min) /* for disappearing coul and vdw with soft core at the same time */
+ {
+ preloadSigma6[i][j] = sigma6_min;
+ }
+ }
+ else
+ {
+ preloadSigma6[i][j] = sigma6_def;
+ }
+ }
+ if constexpr (softcoreType == KernelSoftcoreType::Gapsys)
+ {
+ const real c6 = nbfp[2 * typeIndices[i][j]];
+ const real c12 = nbfp[2 * typeIndices[i][j] + 1];
+ if (c6 > 0 && c12 > 0)
+ {
+ /* c12 is stored scaled with 12.0 and c6 is scaled with 6.0 - correct for this */
+ preloadGapsysSigma6VdW[i][j] = 0.5_real * c12 / c6;
+ }
+ else
+ {
+ preloadGapsysSigma6VdW[i][j] = gapsysSigma6VdW;
+ }
+ }
+ }
+ if constexpr (softcoreType == KernelSoftcoreType::Beutler)
+ {
+ /* only use softcore if one of the states has a zero endstate - softcore is for avoiding infinities!*/
+ const real c12A = nbfp[2 * typeIndices[STATE_A][j] + 1];
+ const real c12B = nbfp[2 * typeIndices[STATE_B][j] + 1];
+ if (c12A > 0 && c12B > 0)
+ {
+ preloadAlphaVdwEff[j] = 0;
+ preloadAlphaCoulEff[j] = 0;
+ }
+ else
+ {
+ preloadAlphaVdwEff[j] = alpha_vdw;
+ preloadAlphaCoulEff[j] = alpha_coul;
+ }
+ }
+ if constexpr (softcoreType == KernelSoftcoreType::Gapsys)
+ {
+ /* only use softcore if one of the states has a zero endstate - softcore is for avoiding infinities!*/
+ const real c12A = nbfp[2 * typeIndices[STATE_A][j] + 1];
+ const real c12B = nbfp[2 * typeIndices[STATE_B][j] + 1];
+ if (c12A > 0 && c12B > 0)
+ {
+ preloadGapsysScaleLinpointVdW[j] = 0;
+ preloadGapsysScaleLinpointCoul[j] = 0;
+ }
+ else
+ {
+ preloadGapsysScaleLinpointVdW[j] = gapsysScaleLinpointVdW;
+ preloadGapsysScaleLinpointCoul[j] = gapsysScaleLinpointCoul;
+ }
+ }
+ }
+ else
+ {
+ preloadJnr[j] = jjnr[k];
+ preloadPairIsValid[j] = false;
+ preloadPairIncluded[j] = false;
+ preloadAlphaVdwEff[j] = 0;
+ preloadAlphaCoulEff[j] = 0;
+ preloadGapsysScaleLinpointVdW[j] = 0;
+ preloadGapsysScaleLinpointCoul[j] = 0;
+
+ for (int i = 0; i < NSTATES; i++)
+ {
+ typeIndices[STATE_A][j] = ntiA + typeA[jjnr[k]];
+ typeIndices[STATE_B][j] = ntiB + typeB[jjnr[k]];
+ preloadLjPmeC6Grid[i][j] = 0;
+ preloadQq[i][j] = 0;
+ preloadSigma6[i][j] = 0;
+ preloadGapsysSigma6VdW[i][j] = 0;
+ }
+ }
+ }
+
+ RealType jx, jy, jz;
+ gmx::gatherLoadUTranspose<3>(reinterpret_cast<const real*>(x), preloadJnr, &jx, &jy, &jz);
+
+ const RealType pairIsValid = gmx::load<RealType>(preloadPairIsValid);
+ const RealType pairIncluded = gmx::load<RealType>(preloadPairIncluded);
+ const BoolType bPairIncluded = (pairIncluded != zero);
+ const BoolType bPairExcluded = (pairIncluded == zero && pairIsValid != zero);
+
+ const RealType dX = ix - jx;
+ const RealType dY = iy - jy;
+ const RealType dZ = iz - jz;
+ RealType rSq = dX * dX + dY * dY + dZ * dZ;
+
+ BoolType withinCutoffMask = (rSq < rcutoff_max2);
+
+ if (!gmx::anyTrue(withinCutoffMask || bPairExcluded))
{
/* We save significant time by skipping all code below.
* Note that with soft-core interactions, the actual cut-off
* check might be different. But since the soft-core distance
* is always larger than r, checking on r here is safe.
+ * Exclusions outside the cutoff can not be skipped as
+ * when using Ewald: the reciprocal-space
+ * Ewald component still needs to be subtracted.
*/
continue;
}
- npair_within_cutoff++;
-
- if (rsq > 0)
+ else
{
- /* Note that unlike in the nbnxn kernels, we do not need
- * to clamp the value of rsq before taking the invsqrt
- * to avoid NaN in the LJ calculation, since here we do
- * not calculate LJ interactions when C6 and C12 are zero.
- */
-
- rinv = gmx::invsqrt(rsq);
- r = rsq * rinv;
+ havePairsWithinCutoff = true;
}
- else
+
+ if (gmx::anyTrue(rlistSquared < rSq && bPairExcluded))
{
- /* The force at r=0 is zero, because of symmetry.
- * But note that the potential is in general non-zero,
- * since the soft-cored r will be non-zero.
- */
- rinv = 0;
- r = 0;
+ haveExcludedPairsBeyondRlist = true;
}
- if (softCoreTreatment == SoftCoreTreatment::None)
+ const IntType jnr_s = gmx::load<IntType>(preloadJnr);
+ const BoolType bIiEqJnr = gmx::cvtIB2B(ii_s == jnr_s);
+
+ RealType c6[NSTATES];
+ RealType c12[NSTATES];
+ RealType gmx_unused sigma6[NSTATES];
+ RealType qq[NSTATES];
+ RealType gmx_unused ljPmeC6Grid[NSTATES];
+ RealType gmx_unused alphaVdwEff;
+ RealType gmx_unused alphaCoulEff;
+ RealType gmx_unused gapsysScaleLinpointVdWEff;
+ RealType gmx_unused gapsysScaleLinpointCoulEff;
+ RealType gmx_unused gapsysSigma6VdWEff[NSTATES];
+ for (int i = 0; i < NSTATES; i++)
{
- /* The soft-core power p will not affect the results
- * with not using soft-core, so we use power of 0 which gives
- * the simplest math and cheapest code.
- */
- rpm2 = rinv * rinv;
- rp = 1;
+ gmx::gatherLoadTranspose<2>(nbfp.data(), typeIndices[i], &c6[i], &c12[i]);
+ qq[i] = gmx::load<RealType>(preloadQq[i]);
+ ljPmeC6Grid[i] = gmx::load<RealType>(preloadLjPmeC6Grid[i]);
+ if constexpr (softcoreType == KernelSoftcoreType::Beutler)
+ {
+ sigma6[i] = gmx::load<RealType>(preloadSigma6[i]);
+ }
+ if constexpr (softcoreType == KernelSoftcoreType::Gapsys)
+ {
+ gapsysSigma6VdWEff[i] = gmx::load<RealType>(preloadGapsysSigma6VdW[i]);
+ }
}
- if (softCoreTreatment == SoftCoreTreatment::RPower6)
+ if constexpr (softcoreType == KernelSoftcoreType::Beutler)
{
- rpm2 = rsq * rsq; /* r4 */
- rp = rpm2 * rsq; /* r6 */
+ alphaVdwEff = gmx::load<RealType>(preloadAlphaVdwEff);
+ alphaCoulEff = gmx::load<RealType>(preloadAlphaCoulEff);
}
- if (softCoreTreatment == SoftCoreTreatment::RPower48)
+ if constexpr (softcoreType == KernelSoftcoreType::Gapsys)
{
- rp = rsq * rsq * rsq; /* r6 */
- rp = rp * rp; /* r12 */
- rp = rp * rp; /* r24 */
- rp = rp * rp; /* r48 */
- rpm2 = rp / rsq; /* r46 */
+ gapsysScaleLinpointVdWEff = gmx::load<RealType>(preloadGapsysScaleLinpointVdW);
+ gapsysScaleLinpointCoulEff = gmx::load<RealType>(preloadGapsysScaleLinpointCoul);
}
- real Fscal = 0;
+ // Avoid overflow of r^-12 at distances near zero
+ rSq = gmx::max(rSq, minDistanceSquared);
+ rInv = gmx::invsqrt(rSq);
+ r = rSq * rInv;
- qq[STATE_A] = iqA * chargeA[jnr];
- qq[STATE_B] = iqB * chargeB[jnr];
+ RealType gmx_unused rp, rpm2;
+ if constexpr (softcoreType == KernelSoftcoreType::Beutler)
+ {
+ rpm2 = rSq * rSq; /* r4 */
+ rp = rpm2 * rSq; /* r6 */
+ }
+ else
+ {
+ /* The soft-core power p will not affect the results
+ * with not using soft-core, so we use power of 0 which gives
+ * the simplest math and cheapest code.
+ */
+ rpm2 = rInv * rInv;
+ rp = one;
+ }
- tj[STATE_A] = ntiA + 2 * typeA[jnr];
- tj[STATE_B] = ntiB + 2 * typeB[jnr];
+ RealType fScal(0);
- if (nlist->excl_fep == nullptr || nlist->excl_fep[k])
+ /* The following block is masked to only calculate values having bPairIncluded. If
+ * bPairIncluded is true then withinCutoffMask must also be true. */
+ if (gmx::anyTrue(withinCutoffMask && bPairIncluded))
{
- c6[STATE_A] = nbfp[tj[STATE_A]];
- c6[STATE_B] = nbfp[tj[STATE_B]];
-
+ RealType gmx_unused fScalC[NSTATES], fScalV[NSTATES];
+ RealType vCoul[NSTATES], vVdw[NSTATES];
for (int i = 0; i < NSTATES; i++)
{
- c12[i] = nbfp[tj[i] + 1];
- if (useSoftCore)
+ fScalC[i] = zero;
+ fScalV[i] = zero;
+ vCoul[i] = zero;
+ vVdw[i] = zero;
+
+ RealType gmx_unused rInvC, rInvV, rC, rV, rPInvC, rPInvV;
+
+ /* The following block is masked to require (qq[i] != 0 || c6[i] != 0 || c12[i]
+ * != 0) in addition to bPairIncluded, which in turn requires withinCutoffMask. */
+ BoolType nonZeroState = ((qq[i] != zero || c6[i] != zero || c12[i] != zero)
+ && bPairIncluded && withinCutoffMask);
+ if (gmx::anyTrue(nonZeroState))
{
- real sigma6[NSTATES];
- if ((c6[i] > 0) && (c12[i] > 0))
- {
- /* c12 is stored scaled with 12.0 and c6 is scaled with 6.0 - correct for this */
- sigma6[i] = half * c12[i] / c6[i];
- if (sigma6[i] < sigma6_min) /* for disappearing coul and vdw with soft core at the same time */
- {
- sigma6[i] = sigma6_min;
- }
- }
- else
+ if constexpr (softcoreType == KernelSoftcoreType::Beutler)
{
- sigma6[i] = sigma6_def;
- }
- sigma_pow[i] = calculateSigmaPow<softCoreTreatment>(sigma6[i]);
- }
- }
-
- if (useSoftCore)
- {
- /* only use softcore if one of the states has a zero endstate - softcore is for avoiding infinities!*/
- if ((c12[STATE_A] > 0) && (c12[STATE_B] > 0))
- {
- alpha_vdw_eff = 0;
- alpha_coul_eff = 0;
- }
- else
- {
- alpha_vdw_eff = alpha_vdw;
- alpha_coul_eff = alpha_coul;
- }
- }
-
- for (int i = 0; i < NSTATES; i++)
- {
- FscalC[i] = 0;
- FscalV[i] = 0;
- Vcoul[i] = 0;
- Vvdw[i] = 0;
+ RealType divisor = (alphaCoulEff * lFacCoul[i] * sigma6[i] + rp);
+ rPInvC = gmx::inv(divisor);
+ sixthRoot(rPInvC, &rInvC, &rC);
- real rinvC, rinvV;
- SCReal rC, rV, rpinvC, rpinvV; /* Needs double for sc_power==48 */
-
- /* Only spend time on A or B state if it is non-zero */
- if ((qq[i] != 0) || (c6[i] != 0) || (c12[i] != 0))
- {
- /* this section has to be inside the loop because of the dependence on sigma_pow */
- if (useSoftCore)
- {
- rpinvC = one / (alpha_coul_eff * lfac_coul[i] * sigma_pow[i] + rp);
- pthRoot<softCoreTreatment>(rpinvC, &rinvC, &rC);
- if (scLambdasOrAlphasDiffer)
+ if constexpr (scLambdasOrAlphasDiffer)
{
- rpinvV = one / (alpha_vdw_eff * lfac_vdw[i] * sigma_pow[i] + rp);
- pthRoot<softCoreTreatment>(rpinvV, &rinvV, &rV);
+ RealType divisor = (alphaVdwEff * lFacVdw[i] * sigma6[i] + rp);
+ rPInvV = gmx::inv(divisor);
+ sixthRoot(rPInvV, &rInvV, &rV);
}
else
{
/* We can avoid one expensive pow and one / operation */
- rpinvV = rpinvC;
- rinvV = rinvC;
+ rPInvV = rPInvC;
+ rInvV = rInvC;
rV = rC;
}
}
else
{
- rpinvC = 1;
- rinvC = rinv;
+ rPInvC = one;
+ rInvC = rInv;
rC = r;
- rpinvV = 1;
- rinvV = rinv;
+ rPInvV = one;
+ rInvV = rInv;
rV = r;
}
- /* Only process the coulomb interactions if we have charges,
- * and if we either include all entries in the list (no cutoff
+ /* Only process the coulomb interactions if we either
+ * include all entries in the list (no cutoff
* used in the kernel), or if we are within the cutoff.
*/
- bool computeElecInteraction = (elecInteractionTypeIsEwald && r < rcoulomb)
- || (!elecInteractionTypeIsEwald && rC < rcoulomb);
-
- if ((qq[i] != 0) && computeElecInteraction)
+ BoolType computeElecInteraction;
+ if constexpr (elecInteractionTypeIsEwald)
+ {
+ computeElecInteraction = (r < rCoulomb && qq[i] != zero && bPairIncluded);
+ }
+ else
+ {
+ computeElecInteraction = (rC < rCoulomb && qq[i] != zero && bPairIncluded);
+ }
+ if (gmx::anyTrue(computeElecInteraction))
{
- if (elecInteractionTypeIsEwald)
+ if constexpr (elecInteractionTypeIsEwald)
{
- Vcoul[i] = ewaldPotential(qq[i], rinvC, sh_ewald);
- FscalC[i] = ewaldScalarForce(qq[i], rinvC);
+ vCoul[i] = ewaldPotential(qq[i], rInvC, sh_ewald);
+ if constexpr (computeForces)
+ {
+ fScalC[i] = ewaldScalarForce(qq[i], rInvC);
+ }
+
+ if constexpr (softcoreType == KernelSoftcoreType::Gapsys)
+ {
+ ewaldQuadraticPotential<computeForces>(qq[i],
+ facel,
+ rC,
+ rCutoffCoul,
+ LFC[i],
+ DLF[i],
+ gapsysScaleLinpointCoulEff,
+ sh_ewald,
+ &fScalC[i],
+ &vCoul[i],
+ &dvdlCoul,
+ computeElecInteraction);
+ }
}
else
{
- Vcoul[i] = reactionFieldPotential(qq[i], rinvC, rC, krf, crf);
- FscalC[i] = reactionFieldScalarForce(qq[i], rinvC, rC, krf, two);
+ vCoul[i] = reactionFieldPotential(qq[i], rInvC, rC, krf, crf);
+ if constexpr (computeForces)
+ {
+ fScalC[i] = reactionFieldScalarForce(qq[i], rInvC, rC, krf, two);
+ }
+
+ if constexpr (softcoreType == KernelSoftcoreType::Gapsys)
+ {
+ reactionFieldQuadraticPotential<computeForces>(
+ qq[i],
+ facel,
+ rC,
+ rCutoffCoul,
+ LFC[i],
+ DLF[i],
+ gapsysScaleLinpointCoulEff,
+ krf,
+ crf,
+ &fScalC[i],
+ &vCoul[i],
+ &dvdlCoul,
+ computeElecInteraction);
+ }
+ }
+
+ vCoul[i] = gmx::selectByMask(vCoul[i], computeElecInteraction);
+ if constexpr (computeForces)
+ {
+ fScalC[i] = gmx::selectByMask(fScalC[i], computeElecInteraction);
}
}
- /* Only process the VDW interactions if we have
- * some non-zero parameters, and if we either
+ /* Only process the VDW interactions if we either
* include all entries in the list (no cutoff used
* in the kernel), or if we are within the cutoff.
*/
- bool computeVdwInteraction = (vdwInteractionTypeIsEwald && r < rvdw)
- || (!vdwInteractionTypeIsEwald && rV < rvdw);
- if ((c6[i] != 0 || c12[i] != 0) && computeVdwInteraction)
+ BoolType computeVdwInteraction;
+ if constexpr (vdwInteractionTypeIsEwald)
+ {
+ computeVdwInteraction =
+ (r < rVdw && (c6[i] != 0 || c12[i] != 0) && bPairIncluded);
+ }
+ else
+ {
+ computeVdwInteraction =
+ (rV < rVdw && (c6[i] != 0 || c12[i] != 0) && bPairIncluded);
+ }
+ if (gmx::anyTrue(computeVdwInteraction))
{
- real rinv6;
- if (softCoreTreatment == SoftCoreTreatment::RPower6)
+ RealType rInv6;
+ if constexpr (softcoreType == KernelSoftcoreType::Beutler)
{
- rinv6 = calculateRinv6<softCoreTreatment>(rpinvV);
+ rInv6 = rPInvV;
}
else
{
- rinv6 = calculateRinv6<softCoreTreatment>(rinvV);
+ rInv6 = calculateRinv6(rInvV);
+ }
+ // Avoid overflow at short distance for masked exclusions and
+ // for foreign energy calculations at a hard core end state.
+ // Note that we should limit r^-6, and thus also r^-12, and
+ // not only r^-12, as that could lead to erroneously low instead
+ // of very high foreign energies.
+ rInv6 = gmx::min(rInv6, maxRInvSix);
+ RealType vVdw6 = calculateVdw6(c6[i], rInv6);
+ RealType vVdw12 = calculateVdw12(c12[i], rInv6);
+
+ vVdw[i] = lennardJonesPotential(
+ vVdw6, vVdw12, c6[i], c12[i], repulsionShift, dispersionShift, oneSixth, oneTwelfth);
+ if constexpr (computeForces)
+ {
+ fScalV[i] = lennardJonesScalarForce(vVdw6, vVdw12);
}
- real Vvdw6 = calculateVdw6(c6[i], rinv6);
- real Vvdw12 = calculateVdw12(c12[i], rinv6);
- Vvdw[i] = lennardJonesPotential(Vvdw6, Vvdw12, c6[i], c12[i], repulsionShift,
- dispersionShift, onesixth, onetwelfth);
- FscalV[i] = lennardJonesScalarForce(Vvdw6, Vvdw12);
+ if constexpr (softcoreType == KernelSoftcoreType::Gapsys)
+ {
+ lennardJonesQuadraticPotential<computeForces>(c6[i],
+ c12[i],
+ r,
+ rSq,
+ LFV[i],
+ DLF[i],
+ gapsysSigma6VdWEff[i],
+ gapsysScaleLinpointVdWEff,
+ repulsionShift,
+ dispersionShift,
+ &fScalV[i],
+ &vVdw[i],
+ &dvdlVdw,
+ computeVdwInteraction);
+ }
- if (vdwInteractionTypeIsEwald)
+ if constexpr (vdwInteractionTypeIsEwald)
{
/* Subtract the grid potential at the cut-off */
- Vvdw[i] += ewaldLennardJonesGridSubtract(nbfp_grid[tj[i]],
- sh_lj_ewald, onesixth);
+ vVdw[i] = vVdw[i]
+ + gmx::selectByMask(ewaldLennardJonesGridSubtract(
+ ljPmeC6Grid[i], shLjEwald, oneSixth),
+ computeVdwInteraction);
+ }
+
+ if constexpr (vdwModifierIsPotSwitch)
+ {
+ RealType d = rV - rVdwSwitch;
+ BoolType zeroMask = zero < d;
+ BoolType potSwitchMask = rV < rVdw;
+ d = gmx::selectByMask(d, zeroMask);
+ const RealType d2 = d * d;
+ const RealType sw =
+ one + d2 * d * (vdw_swV3 + d * (vdw_swV4 + d * vdw_swV5));
+
+ if constexpr (computeForces)
+ {
+ const RealType dsw = d2 * (vdw_swF2 + d * (vdw_swF3 + d * vdw_swF4));
+ fScalV[i] = potSwitchScalarForceMod(
+ fScalV[i], vVdw[i], sw, rV, dsw, potSwitchMask);
+ }
+ vVdw[i] = potSwitchPotentialMod(vVdw[i], sw, potSwitchMask);
}
- if (vdwModifierIsPotSwitch)
+ vVdw[i] = gmx::selectByMask(vVdw[i], computeVdwInteraction);
+ if constexpr (computeForces)
{
- real d = rV - ic->rvdw_switch;
- d = (d > zero) ? d : zero;
- const real d2 = d * d;
- const real sw = one + d2 * d * (vdw_swV3 + d * (vdw_swV4 + d * vdw_swV5));
- const real dsw = d2 * (vdw_swF2 + d * (vdw_swF3 + d * vdw_swF4));
-
- FscalV[i] = potSwitchScalarForceMod(FscalV[i], Vvdw[i], sw, rV,
- rvdw, dsw, zero);
- Vvdw[i] = potSwitchPotentialMod(Vvdw[i], sw, rV, rvdw, zero);
+ fScalV[i] = gmx::selectByMask(fScalV[i], computeVdwInteraction);
}
}
- /* FscalC (and FscalV) now contain: dV/drC * rC
- * Now we multiply by rC^-p, so it will be: dV/drC * rC^1-p
- * Further down we first multiply by r^p-2 and then by
- * the vector r, which in total gives: dV/drC * (r/rC)^1-p
- */
- FscalC[i] *= rpinvC;
- FscalV[i] *= rpinvV;
- }
- }
+ if constexpr (computeForces)
+ {
+ /* fScalC (and fScalV) now contain: dV/drC * rC
+ * Now we multiply by rC^-6, so it will be: dV/drC * rC^-5
+ * Further down we first multiply by r^4 and then by
+ * the vector r, which in total gives: dV/drC * (r/rC)^-5
+ */
+ fScalC[i] = fScalC[i] * rPInvC;
+ fScalV[i] = fScalV[i] * rPInvV;
+ }
+ } // end of block requiring nonZeroState
+ } // end for (int i = 0; i < NSTATES; i++)
- /* Assemble A and B states */
- for (int i = 0; i < NSTATES; i++)
+ /* Assemble A and B states. */
+ BoolType assembleStates = (bPairIncluded && withinCutoffMask);
+ if (gmx::anyTrue(assembleStates))
{
- vctot += LFC[i] * Vcoul[i];
- vvtot += LFV[i] * Vvdw[i];
+ for (int i = 0; i < NSTATES; i++)
+ {
+ vCTot = vCTot + LFC[i] * vCoul[i];
+ vVTot = vVTot + LFV[i] * vVdw[i];
- Fscal += LFC[i] * FscalC[i] * rpm2;
- Fscal += LFV[i] * FscalV[i] * rpm2;
+ if constexpr (computeForces)
+ {
+ fScal = fScal + LFC[i] * fScalC[i] * rpm2;
+ fScal = fScal + LFV[i] * fScalV[i] * rpm2;
+ }
- if (useSoftCore)
- {
- dvdl_coul +=
- Vcoul[i] * DLF[i]
- + LFC[i] * alpha_coul_eff * dlfac_coul[i] * FscalC[i] * sigma_pow[i];
- dvdl_vdw += Vvdw[i] * DLF[i]
- + LFV[i] * alpha_vdw_eff * dlfac_vdw[i] * FscalV[i] * sigma_pow[i];
- }
- else
- {
- dvdl_coul += Vcoul[i] * DLF[i];
- dvdl_vdw += Vvdw[i] * DLF[i];
+ if constexpr (softcoreType == KernelSoftcoreType::Beutler)
+ {
+ dvdlCoul = dvdlCoul + vCoul[i] * DLF[i]
+ + LFC[i] * alphaCoulEff * dlFacCoul[i] * fScalC[i] * sigma6[i];
+ dvdlVdw = dvdlVdw + vVdw[i] * DLF[i]
+ + LFV[i] * alphaVdwEff * dlFacVdw[i] * fScalV[i] * sigma6[i];
+ }
+ else
+ {
+ dvdlCoul = dvdlCoul + vCoul[i] * DLF[i];
+ dvdlVdw = dvdlVdw + vVdw[i] * DLF[i];
+ }
}
}
- }
- else if (icoul == GMX_NBKERNEL_ELEC_REACTIONFIELD)
+ } // end of block requiring bPairIncluded && withinCutoffMask
+ /* In the following block bPairIncluded should be false in the masks. */
+ if (icoul == NbkernelElecType::ReactionField)
{
- /* For excluded pairs, which are only in this pair list when
- * using the Verlet scheme, we don't use soft-core.
- * As there is no singularity, there is no need for soft-core.
- */
- const real FF = -two * krf;
- real VV = krf * rsq - crf;
+ const BoolType computeReactionField = bPairExcluded;
- if (ii == jnr)
+ if (gmx::anyTrue(computeReactionField))
{
- VV *= half;
- }
+ /* For excluded pairs we don't use soft-core.
+ * As there is no singularity, there is no need for soft-core.
+ */
+ const RealType FF = -two * krf;
+ RealType VV = krf * rSq - crf;
- for (int i = 0; i < NSTATES; i++)
- {
- vctot += LFC[i] * qq[i] * VV;
- Fscal += LFC[i] * qq[i] * FF;
- dvdl_coul += DLF[i] * qq[i] * VV;
+ /* If ii == jnr the i particle (ii) has itself (jnr)
+ * in its neighborlist. This corresponds to a self-interaction
+ * that will occur twice. Scale it down by 50% to only include
+ * it once.
+ */
+ VV = VV * gmx::blend(one, half, bIiEqJnr);
+
+ for (int i = 0; i < NSTATES; i++)
+ {
+ vCTot = vCTot + gmx::selectByMask(LFC[i] * qq[i] * VV, computeReactionField);
+ fScal = fScal + gmx::selectByMask(LFC[i] * qq[i] * FF, computeReactionField);
+ dvdlCoul = dvdlCoul + gmx::selectByMask(DLF[i] * qq[i] * VV, computeReactionField);
+ }
}
}
- if (elecInteractionTypeIsEwald && r < rcoulomb)
+ const BoolType computeElecEwaldInteraction = (bPairExcluded || r < rCoulomb);
+ if (elecInteractionTypeIsEwald && gmx::anyTrue(computeElecEwaldInteraction))
{
/* See comment in the preamble. When using Ewald interactions
* (unless we use a switch modifier) we subtract the reciprocal-space
* the softcore to the entire electrostatic interaction,
* including the reciprocal-space component.
*/
- real v_lr, f_lr;
+ RealType v_lr, f_lr;
- const real ewrt = r * ewtabscale;
- int ewitab = static_cast<int>(ewrt);
- const real eweps = ewrt - ewitab;
- ewitab = 4 * ewitab;
- f_lr = ewtab[ewitab] + eweps * ewtab[ewitab + 1];
- v_lr = (ewtab[ewitab + 2] - ewtabhalfspace * eweps * (ewtab[ewitab] + f_lr));
- f_lr *= rinv;
+ pmeCoulombCorrectionVF<computeForces>(rSq, ewaldBeta, &v_lr, &f_lr);
+ if constexpr (computeForces)
+ {
+ f_lr = f_lr * rInv * rInv;
+ }
/* Note that any possible Ewald shift has already been applied in
* the normal interaction part above.
*/
- if (ii == jnr)
- {
- /* If we get here, the i particle (ii) has itself (jnr)
- * in its neighborlist. This can only happen with the Verlet
- * scheme, and corresponds to a self-interaction that will
- * occur twice. Scale it down by 50% to only include it once.
- */
- v_lr *= half;
- }
+ /* If ii == jnr the i particle (ii) has itself (jnr)
+ * in its neighborlist. This corresponds to a self-interaction
+ * that will occur twice. Scale it down by 50% to only include
+ * it once.
+ */
+ v_lr = v_lr * gmx::blend(one, half, bIiEqJnr);
for (int i = 0; i < NSTATES; i++)
{
- vctot -= LFC[i] * qq[i] * v_lr;
- Fscal -= LFC[i] * qq[i] * f_lr;
- dvdl_coul -= (DLF[i] * qq[i]) * v_lr;
+ vCTot = vCTot - gmx::selectByMask(LFC[i] * qq[i] * v_lr, computeElecEwaldInteraction);
+ if constexpr (computeForces)
+ {
+ fScal = fScal - gmx::selectByMask(LFC[i] * qq[i] * f_lr, computeElecEwaldInteraction);
+ }
+ dvdlCoul = dvdlCoul
+ - gmx::selectByMask(DLF[i] * qq[i] * v_lr, computeElecEwaldInteraction);
}
}
- if (vdwInteractionTypeIsEwald && r < rvdw)
+ const BoolType computeVdwEwaldInteraction = (bPairExcluded || r < rVdw);
+ if (vdwInteractionTypeIsEwald && gmx::anyTrue(computeVdwEwaldInteraction))
{
/* See comment in the preamble. When using LJ-Ewald interactions
* (unless we use a switch modifier) we subtract the reciprocal-space
* the softcore to the entire VdW interaction,
* including the reciprocal-space component.
*/
- /* We could also use the analytical form here
- * iso a table, but that can cause issues for
- * r close to 0 for non-interacting pairs.
- */
-
- const real rs = rsq * rinv * ewtabscale;
- const int ri = static_cast<int>(rs);
- const real frac = rs - ri;
- const real f_lr = (1 - frac) * tab_ewald_F_lj[ri] + frac * tab_ewald_F_lj[ri + 1];
- /* TODO: Currently the Ewald LJ table does not contain
- * the factor 1/6, we should add this.
- */
- const real FF = f_lr * rinv / six;
- real VV = (tab_ewald_V_lj[ri] - ewtabhalfspace * frac * (tab_ewald_F_lj[ri] + f_lr)) / six;
- if (ii == jnr)
- {
- /* If we get here, the i particle (ii) has itself (jnr)
- * in its neighborlist. This can only happen with the Verlet
- * scheme, and corresponds to a self-interaction that will
- * occur twice. Scale it down by 50% to only include it once.
- */
- VV *= half;
- }
+ RealType v_lr, f_lr;
+ pmeLJCorrectionVF<computeForces>(
+ rInv, rSq, ewaldLJCoeffSq, ewaldLJCoeffSixDivSix, &v_lr, &f_lr, computeVdwEwaldInteraction, bIiEqJnr);
+ v_lr = v_lr * oneSixth;
for (int i = 0; i < NSTATES; i++)
{
- const real c6grid = nbfp_grid[tj[i]];
- vvtot += LFV[i] * c6grid * VV;
- Fscal += LFV[i] * c6grid * FF;
- dvdl_vdw += (DLF[i] * c6grid) * VV;
+ vVTot = vVTot + gmx::selectByMask(LFV[i] * ljPmeC6Grid[i] * v_lr, computeVdwEwaldInteraction);
+ if constexpr (computeForces)
+ {
+ fScal = fScal + gmx::selectByMask(LFV[i] * ljPmeC6Grid[i] * f_lr, computeVdwEwaldInteraction);
+ }
+ dvdlVdw = dvdlVdw + gmx::selectByMask(DLF[i] * ljPmeC6Grid[i] * v_lr, computeVdwEwaldInteraction);
}
}
- if (doForces)
+ if (computeForces && gmx::anyTrue(fScal != zero))
{
- const real tx = Fscal * dx;
- const real ty = Fscal * dy;
- const real tz = Fscal * dz;
- fix = fix + tx;
- fiy = fiy + ty;
- fiz = fiz + tz;
- /* OpenMP atomics are expensive, but this kernels is also
- * expensive, so we can take this hit, instead of using
- * thread-local output buffers and extra reduction.
- *
- * All the OpenMP regions in this file are trivial and should
- * not throw, so no need for try/catch.
- */
-#pragma omp atomic
- f[j3] -= tx;
-#pragma omp atomic
- f[j3 + 1] -= ty;
-#pragma omp atomic
- f[j3 + 2] -= tz;
+ const RealType tX = fScal * dX;
+ const RealType tY = fScal * dY;
+ const RealType tZ = fScal * dZ;
+ fIX = fIX + tX;
+ fIY = fIY + tY;
+ fIZ = fIZ + tZ;
+
+ gmx::transposeScatterDecrU<3>(forceRealPtr, preloadJnr, tX, tY, tZ);
}
- }
+ } // end for (int k = nj0; k < nj1; k += DataTypes::simdRealWidth)
- /* The atomics below are expensive with many OpenMP threads.
- * Here unperturbed i-particles will usually only have a few
- * (perturbed) j-particles in the list. Thus with a buffered list
- * we can skip a significant number of i-reductions with a check.
- */
- if (npair_within_cutoff > 0)
+ if (havePairsWithinCutoff)
{
- if (doForces)
+ if constexpr (computeForces)
{
-#pragma omp atomic
- f[ii3] += fix;
-#pragma omp atomic
- f[ii3 + 1] += fiy;
-#pragma omp atomic
- f[ii3 + 2] += fiz;
- }
- if (doShiftForces)
- {
-#pragma omp atomic
- fshift[is3] += fix;
-#pragma omp atomic
- fshift[is3 + 1] += fiy;
-#pragma omp atomic
- fshift[is3 + 2] += fiz;
+ gmx::transposeScatterIncrU<3>(forceRealPtr, preloadIi, fIX, fIY, fIZ);
+
+ if (doShiftForces)
+ {
+ gmx::transposeScatterIncrU<3>(
+ reinterpret_cast<real*>(threadForceShiftBuffer), preloadIs, fIX, fIY, fIZ);
+ }
}
if (doPotential)
{
int ggid = gid[n];
-#pragma omp atomic
- Vc[ggid] += vctot;
-#pragma omp atomic
- Vv[ggid] += vvtot;
+ threadVc[ggid] += gmx::reduce(vCTot);
+ threadVv[ggid] += gmx::reduce(vVTot);
}
}
- }
+ } // end for (int n = 0; n < nri; n++)
-#pragma omp atomic
- dvdl[efptCOUL] += dvdl_coul;
-#pragma omp atomic
- dvdl[efptVDW] += dvdl_vdw;
+ if (gmx::anyTrue(dvdlCoul != zero))
+ {
+ threadDvdl[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)] += gmx::reduce(dvdlCoul);
+ }
+ if (gmx::anyTrue(dvdlVdw != zero))
+ {
+ threadDvdl[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)] += gmx::reduce(dvdlVdw);
+ }
/* Estimate flops, average for free energy stuff:
* 12 flops per outer iteration
* 150 flops per inner iteration
+ * TODO: Update the number of flops and/or use different counts for different code paths.
*/
-#pragma omp atomic
- inc_nrnb(nrnb, eNR_NBKERNEL_FREE_ENERGY, nlist->nri * 12 + nlist->jindex[nri] * 150);
+ atomicNrnbIncrement(nrnb, eNR_NBKERNEL_FREE_ENERGY, nlist.nri * 12 + nlist.jindex[nri] * 150);
+
+ if (haveExcludedPairsBeyondRlist > 0)
+ {
+ gmx_fatal(FARGS,
+ "There are perturbed non-bonded pair interactions beyond the pair-list cutoff "
+ "of %g nm, which is not supported. This can happen because the system is "
+ "unstable or because intra-molecular interactions at long distances are "
+ "excluded. If the "
+ "latter is the case, you can try to increase nstlist or rlist to avoid this."
+ "The error is likely triggered by the use of couple-intramol=no "
+ "and the maximal distance in the decoupled molecule exceeding rlist.",
+ rlist);
+ }
}
-typedef void (*KernelFunction)(const t_nblist* gmx_restrict nlist,
- rvec* gmx_restrict xx,
- gmx::ForceWithShiftForces* forceWithShiftForces,
- const t_forcerec* gmx_restrict fr,
- const t_mdatoms* gmx_restrict mdatoms,
- nb_kernel_data_t* gmx_restrict kernel_data,
- t_nrnb* gmx_restrict nrnb);
+typedef void (*KernelFunction)(const t_nblist& nlist,
+ const gmx::ArrayRefWithPadding<const gmx::RVec>& coords,
+ const int ntype,
+ const real rlist,
+ const interaction_const_t& ic,
+ gmx::ArrayRef<const gmx::RVec> shiftvec,
+ gmx::ArrayRef<const real> nbfp,
+ gmx::ArrayRef<const real> nbfp_grid,
+ gmx::ArrayRef<const real> chargeA,
+ gmx::ArrayRef<const real> chargeB,
+ gmx::ArrayRef<const int> typeA,
+ gmx::ArrayRef<const int> typeB,
+ int flags,
+ gmx::ArrayRef<const real> lambda,
+ t_nrnb* gmx_restrict nrnb,
+ gmx::ArrayRefWithPadding<gmx::RVec> threadForceBuffer,
+ rvec* threadForceShiftBuffer,
+ gmx::ArrayRef<real> threadVc,
+ gmx::ArrayRef<real> threadVv,
+ gmx::ArrayRef<real> threadDvdl);
+
+template<KernelSoftcoreType softcoreType, bool scLambdasOrAlphasDiffer, bool vdwInteractionTypeIsEwald, bool elecInteractionTypeIsEwald, bool vdwModifierIsPotSwitch, bool computeForces>
+static KernelFunction dispatchKernelOnUseSimd(const bool useSimd)
+{
+ if (useSimd)
+ {
+#if GMX_SIMD_HAVE_REAL && GMX_SIMD_HAVE_INT32_ARITHMETICS && GMX_USE_SIMD_KERNELS
+ return (nb_free_energy_kernel<SimdDataTypes, softcoreType, scLambdasOrAlphasDiffer, vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald, vdwModifierIsPotSwitch, computeForces>);
+#else
+ return (nb_free_energy_kernel<ScalarDataTypes, softcoreType, scLambdasOrAlphasDiffer, vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald, vdwModifierIsPotSwitch, computeForces>);
+#endif
+ }
+ else
+ {
+ return (nb_free_energy_kernel<ScalarDataTypes, softcoreType, scLambdasOrAlphasDiffer, vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald, vdwModifierIsPotSwitch, computeForces>);
+ }
+}
-template<SoftCoreTreatment softCoreTreatment, bool scLambdasOrAlphasDiffer, bool vdwInteractionTypeIsEwald, bool elecInteractionTypeIsEwald>
-static KernelFunction dispatchKernelOnVdwModifier(const bool vdwModifierIsPotSwitch)
+template<KernelSoftcoreType softcoreType, bool scLambdasOrAlphasDiffer, bool vdwInteractionTypeIsEwald, bool elecInteractionTypeIsEwald, bool vdwModifierIsPotSwitch>
+static KernelFunction dispatchKernelOnComputeForces(const bool computeForces, const bool useSimd)
+{
+ if (computeForces)
+ {
+ return (dispatchKernelOnUseSimd<softcoreType, scLambdasOrAlphasDiffer, vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald, vdwModifierIsPotSwitch, true>(
+ useSimd));
+ }
+ else
+ {
+ return (dispatchKernelOnUseSimd<softcoreType, scLambdasOrAlphasDiffer, vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald, vdwModifierIsPotSwitch, false>(
+ useSimd));
+ }
+}
+
+template<KernelSoftcoreType softcoreType, bool scLambdasOrAlphasDiffer, bool vdwInteractionTypeIsEwald, bool elecInteractionTypeIsEwald>
+static KernelFunction dispatchKernelOnVdwModifier(const bool vdwModifierIsPotSwitch,
+ const bool computeForces,
+ const bool useSimd)
{
if (vdwModifierIsPotSwitch)
{
- return (nb_free_energy_kernel<softCoreTreatment, scLambdasOrAlphasDiffer,
- vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald, true>);
+ return (dispatchKernelOnComputeForces<softcoreType, scLambdasOrAlphasDiffer, vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald, true>(
+ computeForces, useSimd));
}
else
{
- return (nb_free_energy_kernel<softCoreTreatment, scLambdasOrAlphasDiffer,
- vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald, false>);
+ return (dispatchKernelOnComputeForces<softcoreType, scLambdasOrAlphasDiffer, vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald, false>(
+ computeForces, useSimd));
}
}
-template<SoftCoreTreatment softCoreTreatment, bool scLambdasOrAlphasDiffer, bool vdwInteractionTypeIsEwald>
+template<KernelSoftcoreType softcoreType, bool scLambdasOrAlphasDiffer, bool vdwInteractionTypeIsEwald>
static KernelFunction dispatchKernelOnElecInteractionType(const bool elecInteractionTypeIsEwald,
- const bool vdwModifierIsPotSwitch)
+ const bool vdwModifierIsPotSwitch,
+ const bool computeForces,
+ const bool useSimd)
{
if (elecInteractionTypeIsEwald)
{
- return (dispatchKernelOnVdwModifier<softCoreTreatment, scLambdasOrAlphasDiffer, vdwInteractionTypeIsEwald, true>(
- vdwModifierIsPotSwitch));
+ return (dispatchKernelOnVdwModifier<softcoreType, scLambdasOrAlphasDiffer, vdwInteractionTypeIsEwald, true>(
+ vdwModifierIsPotSwitch, computeForces, useSimd));
}
else
{
- return (dispatchKernelOnVdwModifier<softCoreTreatment, scLambdasOrAlphasDiffer, vdwInteractionTypeIsEwald, false>(
- vdwModifierIsPotSwitch));
+ return (dispatchKernelOnVdwModifier<softcoreType, scLambdasOrAlphasDiffer, vdwInteractionTypeIsEwald, false>(
+ vdwModifierIsPotSwitch, computeForces, useSimd));
}
}
-template<SoftCoreTreatment softCoreTreatment, bool scLambdasOrAlphasDiffer>
+template<KernelSoftcoreType softcoreType, bool scLambdasOrAlphasDiffer>
static KernelFunction dispatchKernelOnVdwInteractionType(const bool vdwInteractionTypeIsEwald,
const bool elecInteractionTypeIsEwald,
- const bool vdwModifierIsPotSwitch)
+ const bool vdwModifierIsPotSwitch,
+ const bool computeForces,
+ const bool useSimd)
{
if (vdwInteractionTypeIsEwald)
{
- return (dispatchKernelOnElecInteractionType<softCoreTreatment, scLambdasOrAlphasDiffer, true>(
- elecInteractionTypeIsEwald, vdwModifierIsPotSwitch));
+ return (dispatchKernelOnElecInteractionType<softcoreType, scLambdasOrAlphasDiffer, true>(
+ elecInteractionTypeIsEwald, vdwModifierIsPotSwitch, computeForces, useSimd));
}
else
{
- return (dispatchKernelOnElecInteractionType<softCoreTreatment, scLambdasOrAlphasDiffer, false>(
- elecInteractionTypeIsEwald, vdwModifierIsPotSwitch));
+ return (dispatchKernelOnElecInteractionType<softcoreType, scLambdasOrAlphasDiffer, false>(
+ elecInteractionTypeIsEwald, vdwModifierIsPotSwitch, computeForces, useSimd));
}
}
-template<SoftCoreTreatment softCoreTreatment>
+template<KernelSoftcoreType softcoreType>
static KernelFunction dispatchKernelOnScLambdasOrAlphasDifference(const bool scLambdasOrAlphasDiffer,
const bool vdwInteractionTypeIsEwald,
const bool elecInteractionTypeIsEwald,
- const bool vdwModifierIsPotSwitch)
+ const bool vdwModifierIsPotSwitch,
+ const bool computeForces,
+ const bool useSimd)
{
if (scLambdasOrAlphasDiffer)
{
- return (dispatchKernelOnVdwInteractionType<softCoreTreatment, true>(
- vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald, vdwModifierIsPotSwitch));
+ return (dispatchKernelOnVdwInteractionType<softcoreType, true>(
+ vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald, vdwModifierIsPotSwitch, computeForces, useSimd));
}
else
{
- return (dispatchKernelOnVdwInteractionType<softCoreTreatment, false>(
- vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald, vdwModifierIsPotSwitch));
+ return (dispatchKernelOnVdwInteractionType<softcoreType, false>(
+ vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald, vdwModifierIsPotSwitch, computeForces, useSimd));
}
}
-static KernelFunction dispatchKernel(const bool scLambdasOrAlphasDiffer,
- const bool vdwInteractionTypeIsEwald,
- const bool elecInteractionTypeIsEwald,
- const bool vdwModifierIsPotSwitch,
- const t_forcerec* fr)
+static KernelFunction dispatchKernel(const bool scLambdasOrAlphasDiffer,
+ const bool vdwInteractionTypeIsEwald,
+ const bool elecInteractionTypeIsEwald,
+ const bool vdwModifierIsPotSwitch,
+ const bool computeForces,
+ const bool useSimd,
+ const interaction_const_t& ic)
{
- if (fr->sc_alphacoul == 0 && fr->sc_alphavdw == 0)
- {
- return (dispatchKernelOnScLambdasOrAlphasDifference<SoftCoreTreatment::None>(
- scLambdasOrAlphasDiffer, vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald,
- vdwModifierIsPotSwitch));
- }
- else if (fr->sc_r_power == 6.0_real)
+ const auto& scParams = *ic.softCoreParameters;
+ if (scParams.softcoreType == SoftcoreType::Beutler)
{
- return (dispatchKernelOnScLambdasOrAlphasDifference<SoftCoreTreatment::RPower6>(
- scLambdasOrAlphasDiffer, vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald,
- vdwModifierIsPotSwitch));
+ if (scParams.alphaCoulomb == 0 && scParams.alphaVdw == 0)
+ {
+ return (dispatchKernelOnScLambdasOrAlphasDifference<KernelSoftcoreType::None>(
+ scLambdasOrAlphasDiffer,
+ vdwInteractionTypeIsEwald,
+ elecInteractionTypeIsEwald,
+ vdwModifierIsPotSwitch,
+ computeForces,
+ useSimd));
+ }
+ return (dispatchKernelOnScLambdasOrAlphasDifference<KernelSoftcoreType::Beutler>(
+ scLambdasOrAlphasDiffer,
+ vdwInteractionTypeIsEwald,
+ elecInteractionTypeIsEwald,
+ vdwModifierIsPotSwitch,
+ computeForces,
+ useSimd));
}
- else
+ else // Gapsys
{
- return (dispatchKernelOnScLambdasOrAlphasDifference<SoftCoreTreatment::RPower48>(
- scLambdasOrAlphasDiffer, vdwInteractionTypeIsEwald, elecInteractionTypeIsEwald,
- vdwModifierIsPotSwitch));
+ if (scParams.gapsysScaleLinpointCoul == 0 && scParams.gapsysScaleLinpointVdW == 0)
+ {
+ return (dispatchKernelOnScLambdasOrAlphasDifference<KernelSoftcoreType::None>(
+ scLambdasOrAlphasDiffer,
+ vdwInteractionTypeIsEwald,
+ elecInteractionTypeIsEwald,
+ vdwModifierIsPotSwitch,
+ computeForces,
+ useSimd));
+ }
+ return (dispatchKernelOnScLambdasOrAlphasDifference<KernelSoftcoreType::Gapsys>(
+ scLambdasOrAlphasDiffer,
+ vdwInteractionTypeIsEwald,
+ elecInteractionTypeIsEwald,
+ vdwModifierIsPotSwitch,
+ computeForces,
+ useSimd));
}
}
-void gmx_nb_free_energy_kernel(const t_nblist* nlist,
- rvec* xx,
- gmx::ForceWithShiftForces* ff,
- const t_forcerec* fr,
- const t_mdatoms* mdatoms,
- nb_kernel_data_t* kernel_data,
- t_nrnb* nrnb)
+void gmx_nb_free_energy_kernel(const t_nblist& nlist,
+ const gmx::ArrayRefWithPadding<const gmx::RVec>& coords,
+ const bool useSimd,
+ const int ntype,
+ const real rlist,
+ const interaction_const_t& ic,
+ gmx::ArrayRef<const gmx::RVec> shiftvec,
+ gmx::ArrayRef<const real> nbfp,
+ gmx::ArrayRef<const real> nbfp_grid,
+ gmx::ArrayRef<const real> chargeA,
+ gmx::ArrayRef<const real> chargeB,
+ gmx::ArrayRef<const int> typeA,
+ gmx::ArrayRef<const int> typeB,
+ int flags,
+ gmx::ArrayRef<const real> lambda,
+ t_nrnb* nrnb,
+ gmx::ArrayRefWithPadding<gmx::RVec> threadForceBuffer,
+ rvec* threadForceShiftBuffer,
+ gmx::ArrayRef<real> threadVc,
+ gmx::ArrayRef<real> threadVv,
+ gmx::ArrayRef<real> threadDvdl)
{
- GMX_ASSERT(EEL_PME_EWALD(fr->ic->eeltype) || fr->ic->eeltype == eelCUT || EEL_RF(fr->ic->eeltype),
+ GMX_ASSERT(EEL_PME_EWALD(ic.eeltype) || ic.eeltype == CoulombInteractionType::Cut || EEL_RF(ic.eeltype),
"Unsupported eeltype with free energy");
+ GMX_ASSERT(ic.softCoreParameters, "We need soft-core parameters");
+
+ // Not all SIMD implementations need padding, but we provide padding anyhow so we can assert
+ GMX_ASSERT(!GMX_SIMD_HAVE_REAL || threadForceBuffer.empty()
+ || threadForceBuffer.size() > threadForceBuffer.unpaddedArrayRef().ssize(),
+ "We need actual padding with at least one element for SIMD scatter operations");
- const bool vdwInteractionTypeIsEwald = (EVDW_PME(fr->ic->vdwtype));
- const bool elecInteractionTypeIsEwald = (EEL_PME_EWALD(fr->ic->eeltype));
- const bool vdwModifierIsPotSwitch = (fr->ic->vdw_modifier == eintmodPOTSWITCH);
- bool scLambdasOrAlphasDiffer = true;
+ const auto& scParams = *ic.softCoreParameters;
+ const bool vdwInteractionTypeIsEwald = (EVDW_PME(ic.vdwtype));
+ const bool elecInteractionTypeIsEwald = (EEL_PME_EWALD(ic.eeltype));
+ const bool vdwModifierIsPotSwitch = (ic.vdw_modifier == InteractionModifiers::PotSwitch);
+ const bool computeForces = ((flags & GMX_NONBONDED_DO_FORCE) != 0);
+ bool scLambdasOrAlphasDiffer = true;
- if (fr->sc_alphacoul == 0 && fr->sc_alphavdw == 0)
+ if (scParams.alphaCoulomb == 0 && scParams.alphaVdw == 0)
{
scLambdasOrAlphasDiffer = false;
}
- else if (fr->sc_r_power == 6.0_real || fr->sc_r_power == 48.0_real)
+ else
{
- if (kernel_data->lambda[efptCOUL] == kernel_data->lambda[efptVDW] && fr->sc_alphacoul == fr->sc_alphavdw)
+ if (lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Coul)]
+ == lambda[static_cast<int>(FreeEnergyPerturbationCouplingType::Vdw)]
+ && scParams.alphaCoulomb == scParams.alphaVdw)
{
scLambdasOrAlphasDiffer = false;
}
}
- else
- {
- GMX_RELEASE_ASSERT(false, "Unsupported soft-core r-power");
- }
- KernelFunction kernelFunc = dispatchKernel(scLambdasOrAlphasDiffer, vdwInteractionTypeIsEwald,
- elecInteractionTypeIsEwald, vdwModifierIsPotSwitch, fr);
- kernelFunc(nlist, xx, ff, fr, mdatoms, kernel_data, nrnb);
+
+ KernelFunction kernelFunc;
+ kernelFunc = dispatchKernel(scLambdasOrAlphasDiffer,
+ vdwInteractionTypeIsEwald,
+ elecInteractionTypeIsEwald,
+ vdwModifierIsPotSwitch,
+ computeForces,
+ useSimd,
+ ic);
+ kernelFunc(nlist,
+ coords,
+ ntype,
+ rlist,
+ ic,
+ shiftvec,
+ nbfp,
+ nbfp_grid,
+ chargeA,
+ chargeB,
+ typeA,
+ typeB,
+ flags,
+ lambda,
+ nrnb,
+ threadForceBuffer,
+ threadForceShiftBuffer,
+ threadVc,
+ threadVv,
+ threadDvdl);
}