#include <config.h>
#endif
-#include "smalloc.h"
+#include "gromacs/utility/smalloc.h"
+#include "types/commrec.h"
#include "network.h"
#include "calcgrid.h"
#include "pme.h"
while (sp <= 1.001*pme_lb->setup[pme_lb->cur].spacing || !grid_ok);
set->rcut_coulomb = pme_lb->cut_spacing*sp;
+ if (set->rcut_coulomb < pme_lb->rcut_coulomb_start)
+ {
+ /* This is unlikely, but can happen when e.g. continuing from
+ * a checkpoint after equilibration where the box shrank a lot.
+ * We want to avoid rcoulomb getting smaller than rvdw
+ * and there might be more issues with decreasing rcoulomb.
+ */
+ set->rcut_coulomb = pme_lb->rcut_coulomb_start;
+ }
if (pme_lb->cutoff_scheme == ecutsVERLET)
{
/* The Ewald coefficient is inversly proportional to the cut-off */
set->ewaldcoeff_q =
pme_lb->setup[0].ewaldcoeff_q*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
+ /* We set ewaldcoeff_lj in set, even when LJ-PME is not used */
+ set->ewaldcoeff_lj =
+ pme_lb->setup[0].ewaldcoeff_lj*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
set->count = 0;
set->cycles = 0;
ic->rlistlong = set->rlistlong;
ir->nstcalclr = set->nstcalclr;
ic->ewaldcoeff_q = set->ewaldcoeff_q;
+ /* TODO: centralize the code that sets the potentials shifts */
if (ic->coulomb_modifier == eintmodPOTSHIFT)
{
ic->sh_ewald = gmx_erfc(ic->ewaldcoeff_q*ic->rcoulomb);
}
+ if (EVDW_PME(ic->vdwtype))
+ {
+ /* We have PME for both Coulomb and VdW, set rvdw equal to rcoulomb */
+ ic->rvdw = set->rcut_coulomb;
+ ic->ewaldcoeff_lj = set->ewaldcoeff_lj;
+ if (ic->vdw_modifier == eintmodPOTSHIFT)
+ {
+ real crc2;
+
+ ic->dispersion_shift.cpot = -pow(ic->rvdw, -6.0);
+ ic->repulsion_shift.cpot = -pow(ic->rvdw, -12.0);
+ ic->sh_invrc6 = -ic->dispersion_shift.cpot;
+ crc2 = sqr(ic->ewaldcoeff_lj*ic->rvdw);
+ ic->sh_lj_ewald = (exp(-crc2)*(1 + crc2 + 0.5*crc2*crc2) - 1)*pow(ic->rvdw, -6.0);
+ }
+ }
bUsesSimpleTables = uses_simple_tables(ir->cutoff_scheme, nbv, 0);
if (pme_lb->cutoff_scheme == ecutsVERLET &&
{
md_print_warn(cr, fplog,
"NOTE: PME load balancing increased the non-bonded workload by more than 50%%.\n"
- " For better performance use (more) PME nodes (mdrun -npme),\n"
- " or in case you are beyond the scaling limit, use less nodes in total.\n");
+ " For better performance, use (more) PME ranks (mdrun -npme),\n"
+ " or if you are beyond the scaling limit, use fewer total ranks (or nodes).\n");
}
else
{