Forcerec should not be responsible for initializing modules.
Change-Id: I666075fe03441c815b191f1eb3809e294c62d541
acSETTLE
};
-static std::vector<cginfo_mb_t> init_cginfo_mb(const gmx_mtop_t* mtop, const t_forcerec* fr, gmx_bool* bFEP_NonBonded)
+static std::vector<cginfo_mb_t> init_cginfo_mb(const gmx_mtop_t* mtop, const t_forcerec* fr)
{
gmx_bool* type_VDW;
int* a_con;
}
}
- *bFEP_NonBonded = FALSE;
-
std::vector<cginfo_mb_t> cginfoPerMolblock;
int a_offset = 0;
for (size_t mb = 0; mb < mtop->molblock.size(); mb++)
if (fr->efep != efepNO && PERTURBED(atom))
{
SET_CGINFO_FEP(atomInfo);
- *bFEP_NonBonded = TRUE;
}
}
}
const char* tabfn,
const char* tabpfn,
gmx::ArrayRef<const std::string> tabbfnm,
- const gmx_hw_info_t& hardwareInfo,
- const gmx_device_info_t* deviceInfo,
- const bool useGpuForBonded,
const bool pmeOnlyRankUsesGpu,
- real print_force,
- gmx_wallcycle* wcycle)
+ real print_force)
{
- real rtab;
- char* env;
- double dbl;
- gmx_bool bFEP_NonBonded;
-
/* By default we turn SIMD kernels on, but it might be turned off further down... */
fr->use_simd_kernels = TRUE;
fr->sc_r_power = ir->fepvals->sc_r_power;
fr->sc_sigma6_def = gmx::power6(ir->fepvals->sc_sigma);
- env = getenv("GMX_SCSIGMA_MIN");
+ char* env = getenv("GMX_SCSIGMA_MIN");
if (env != nullptr)
{
- dbl = 0;
+ double dbl = 0;
sscanf(env, "%20lf", &dbl);
fr->sc_sigma6_min = gmx::power6(dbl);
if (fp)
* in that case grompp should already have checked that we do not need
* normal tables and we only generate tables for 1-4 interactions.
*/
- rtab = ir->rlist + ir->tabext;
+ real rtab = ir->rlist + ir->tabext;
/* We want to use unmodified tables for 1-4 coulombic
* interactions, so we must in general have an extra set of
}
/* Set all the static charge group info */
- fr->cginfo_mb = init_cginfo_mb(mtop, fr, &bFEP_NonBonded);
+ fr->cginfo_mb = init_cginfo_mb(mtop, fr);
if (!DOMAINDECOMP(cr))
{
fr->cginfo = cginfo_expand(mtop->molblock.size(), fr->cginfo_mb);
fr->nthread_ewc = gmx_omp_nthreads_get(emntBonded);
snew(fr->ewc_t, fr->nthread_ewc);
- if (fr->cutoff_scheme == ecutsVERLET)
- {
- // We checked the cut-offs in grompp, but double-check here.
- // We have PME+LJcutoff kernels for rcoulomb>rvdw.
- if (EEL_PME_EWALD(ir->coulombtype) && ir->vdwtype == eelCUT)
- {
- GMX_RELEASE_ASSERT(ir->rcoulomb >= ir->rvdw,
- "With Verlet lists and PME we should have rcoulomb>=rvdw");
- }
- else
- {
- GMX_RELEASE_ASSERT(
- ir->rcoulomb == ir->rvdw,
- "With Verlet lists and no PME rcoulomb and rvdw should be identical");
- }
-
- fr->nbv = Nbnxm::init_nb_verlet(mdlog, bFEP_NonBonded, ir, fr, cr, hardwareInfo, deviceInfo,
- mtop, box, wcycle);
-
- if (useGpuForBonded)
- {
- auto stream = havePPDomainDecomposition(cr)
- ? Nbnxm::gpu_get_command_stream(
- fr->nbv->gpu_nbv, gmx::InteractionLocality::NonLocal)
- : Nbnxm::gpu_get_command_stream(fr->nbv->gpu_nbv,
- gmx::InteractionLocality::Local);
- // TODO the heap allocation is only needed while
- // t_forcerec lacks a constructor.
- fr->gpuBonded = new gmx::GpuBonded(mtop->ffparams, stream, wcycle);
- }
- }
-
if (ir->eDispCorr != edispcNO)
{
fr->dispersionCorrection = std::make_unique<DispersionCorrection>(
* \param[in] tabfn Table potential file for non-bonded interactions
* \param[in] tabpfn Table potential file for pair interactions
* \param[in] tabbfnm Table potential files for bonded interactions
- * \param[in] hardwareInfo Information about hardware
- * \param[in] deviceInfo Info about GPU device to use for short-ranged work
- * \param[in] useGpuForBonded Whether bonded interactions will run on a GPU
* \param[in] pmeOnlyRankUsesGpu Whether there is a PME task on a GPU on a PME-only rank
* \param[in] print_force Print forces for atoms with force >= print_force
- * \param[out] wcycle Pointer to cycle counter object
*/
void init_forcerec(FILE* fplog,
const gmx::MDLogger& mdlog,
const char* tabfn,
const char* tabpfn,
gmx::ArrayRef<const std::string> tabbfnm,
- const gmx_hw_info_t& hardwareInfo,
- const gmx_device_info_t* deviceInfo,
- bool useGpuForBonded,
bool pmeOnlyRankUsesGpu,
- real print_force,
- gmx_wallcycle* wcycle);
+ real print_force);
/*! \brief Divide exclusions over threads
*
bool makeGpuPairList,
const gmx::CpuInfo& cpuinfo)
{
+ // We checked the cut-offs in grompp, but double-check here.
+ // We have PME+LJcutoff kernels for rcoulomb>rvdw.
+ if (EEL_PME_EWALD(ir->coulombtype) && ir->vdwtype == eelCUT)
+ {
+ GMX_RELEASE_ASSERT(ir->rcoulomb >= ir->rvdw,
+ "With Verlet lists and PME we should have rcoulomb>=rvdw");
+ }
+ else
+ {
+ GMX_RELEASE_ASSERT(ir->rcoulomb == ir->rvdw,
+ "With Verlet lists and no PME rcoulomb and rvdw should be identical");
+ }
/* For NVE simulations, we will retain the initial list buffer */
if (EI_DYNAMICS(ir->eI) && ir->verletbuf_tol > 0 && !(EI_MD(ir->eI) && ir->etc == etcNO))
{
const bool thisRankHasPmeGpuTask = gpuTaskAssignments.thisRankHasPmeGpuTask();
std::unique_ptr<MDAtoms> mdAtoms;
std::unique_ptr<gmx_vsite_t> vsite;
+ std::unique_ptr<GpuBonded> gpuBonded;
t_nrnb nrnb;
if (thisRankHasDuty(cr, DUTY_PP))
init_forcerec(fplog, mdlog, fr, fcd, inputrec, &mtop, cr, box,
opt2fn("-table", filenames.size(), filenames.data()),
opt2fn("-tablep", filenames.size(), filenames.data()),
- opt2fns("-tableb", filenames.size(), filenames.data()), *hwinfo,
- nonbondedDeviceInfo, useGpuForBonded,
- pmeRunMode == PmeRunMode::GPU && !thisRankHasDuty(cr, DUTY_PME), pforce, wcycle);
+ opt2fns("-tableb", filenames.size(), filenames.data()),
+ pmeRunMode == PmeRunMode::GPU && !thisRankHasDuty(cr, DUTY_PME), pforce);
+
+ fr->nbv = Nbnxm::init_nb_verlet(mdlog, inputrec, fr, cr, *hwinfo, nonbondedDeviceInfo,
+ &mtop, box, wcycle);
+ if (useGpuForBonded)
+ {
+ auto stream = havePPDomainDecomposition(cr)
+ ? Nbnxm::gpu_get_command_stream(
+ fr->nbv->gpu_nbv, gmx::InteractionLocality::NonLocal)
+ : Nbnxm::gpu_get_command_stream(fr->nbv->gpu_nbv,
+ gmx::InteractionLocality::Local);
+ gpuBonded = std::make_unique<GpuBonded>(mtop.ffparams, stream, wcycle);
+ fr->gpuBonded = gpuBonded.get();
+ }
// TODO Move this to happen during domain decomposition setup,
// once stream and event handling works well with that.
mdAtoms.reset(nullptr);
globalState.reset(nullptr);
mdModules_.reset(nullptr); // destruct force providers here as they might also use the GPU
+ gpuBonded.reset(nullptr);
/* Free pinned buffers in *fr */
delete fr;
fr = nullptr;
/*! \brief Creates an Nbnxm object */
std::unique_ptr<nonbonded_verlet_t> init_nb_verlet(const gmx::MDLogger& mdlog,
- gmx_bool bFEP_NonBonded,
const t_inputrec* ir,
const t_forcerec* fr,
const t_commrec* cr,
#include "gromacs/nbnxm/nbnxm.h"
#include "gromacs/nbnxm/pairlist_tuning.h"
#include "gromacs/simd/simd.h"
+#include "gromacs/topology/mtop_util.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/logger.h"
}
std::unique_ptr<nonbonded_verlet_t> init_nb_verlet(const gmx::MDLogger& mdlog,
- gmx_bool bFEP_NonBonded,
const t_inputrec* ir,
const t_forcerec* fr,
const t_commrec* cr,
const bool haveMultipleDomains = (DOMAINDECOMP(cr) && cr->dd->nnodes > 1);
+ bool bFEP_NonBonded = (fr->efep != efepNO) && haveFepPerturbedNBInteractions(mtop);
PairlistParams pairlistParams(kernelSetup.kernelType, bFEP_NonBonded, ir->rlist,
havePPDomainDecomposition(cr));
gmx_mtop_finalize(mtop);
}
+
+bool haveFepPerturbedNBInteractions(const gmx_mtop_t* mtop)
+{
+ for (size_t mb = 0; mb < mtop->molblock.size(); mb++)
+ {
+ const gmx_molblock_t& molb = mtop->molblock[mb];
+ const gmx_moltype_t& molt = mtop->moltype[molb.type];
+ for (int m = 0; m < molb.nmol; m++)
+ {
+ for (int a = 0; a < molt.atoms.nr; a++)
+ {
+ const t_atom& atom = molt.atoms.atom[a];
+ if (PERTURBED(atom))
+ {
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
*/
void convertAtomsToMtop(t_symtab* symtab, char** name, t_atoms* atoms, gmx_mtop_t* mtop);
+/*! \brief Checks if the non-bonded FEP should be performed in this run.
+ *
+ * \param[in] mtop Molecular topology.
+ * \returns Whether FEP non-bonded is requested.
+ */
+bool haveFepPerturbedNBInteractions(const gmx_mtop_t* mtop);
+
#endif