#include "gromacs/topology/topology.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/futil.h"
+#include "gromacs/utility/gmxassert.h"
#include "gromacs/utility/pleasecite.h"
#include "gromacs/utility/smalloc.h"
gmx_mtop_ilistloop_t iloop;
t_ilist *il;
char *ptr;
+ int type_min, type_max;
dd = &(fcd->disres);
fprintf(fplog, "Initializing the distance restraints\n");
}
-
- if (ir->eDisre == edrEnsemble)
- {
- gmx_fatal(FARGS, "Sorry, distance restraints with ensemble averaging over multiple molecules in one system are not functional in this version of GROMACS");
- }
-
dd->dr_weighting = ir->eDisreWeighting;
dd->dr_fc = ir->dr_fc;
if (EI_DYNAMICS(ir->eI))
}
else
{
+ /* We store the r^-6 time averages in an array that is indexed
+ * with the local disres iatom index, so this doesn't work with DD.
+ * Note that DD is not initialized yet here, so we check for PAR(cr),
+ * but there are probably also issues with e.g. NM MPI parallelization.
+ */
+ if (cr && PAR(cr))
+ {
+ gmx_fatal(FARGS, "Time-averaged distance restraints are not supported with MPI parallelization. You can use OpenMP parallelization on a single node.");
+ }
+
dd->dr_bMixed = ir->bDisreMixed;
dd->ETerm = std::exp(-(ir->delta_t/ir->dr_tau));
}
dd->nres = 0;
dd->npair = 0;
+ type_min = INT_MAX;
+ type_max = 0;
iloop = gmx_mtop_ilistloop_init(mtop);
while (gmx_mtop_ilistloop_next(iloop, &il, &nmol))
{
+ if (nmol > 1 && ir->eDisre != edrEnsemble)
+ {
+ gmx_fatal(FARGS, "NMR distance restraints with multiple copies of the same molecule are currently only supported with ensemble averaging. If you just want to restrain distances between atom pairs using a flat-bottomed potential, use a restraint potential (bonds type 10) instead.");
+ }
+
np = 0;
for (fa = 0; fa < il[F_DISRES].nr; fa += 3)
{
+ int type;
+
+ type = il[F_DISRES].iatoms[fa];
+
np++;
- npair = mtop->ffparams.iparams[il[F_DISRES].iatoms[fa]].disres.npair;
+ npair = mtop->ffparams.iparams[type].disres.npair;
if (np == npair)
{
- dd->nres += (ir->eDisre == edrEnsemble ? 1 : nmol)*npair;
+ dd->nres += (ir->eDisre == edrEnsemble ? 1 : nmol);
dd->npair += nmol*npair;
np = 0;
+
+ type_min = std::min(type_min, type);
+ type_max = std::max(type_max, type);
}
}
}
- if (cr && PAR(cr))
+ if (cr && PAR(cr) && ir->nstdisreout > 0)
{
- /* Temporary check, will be removed when disre is implemented with DD */
- const char *notestr = "NOTE: atoms involved in distance restraints should be within the same domain. If this is not the case mdrun generates a fatal error. If you encounter this, use a single MPI rank (Verlet+OpenMP+GPUs work fine).";
+ /* With DD we currently only have local pair information available */
+ gmx_fatal(FARGS, "With MPI parallelization distance-restraint pair output is not supported. Use nstdisreout=0 or use OpenMP parallelization on a single node.");
+ }
- if (MASTER(cr))
- {
- fprintf(stderr, "\n%s\n\n", notestr);
- }
- if (fplog)
- {
- fprintf(fplog, "%s\n", notestr);
- }
+ /* For communicating and/or reducing (sums of) r^-6 for pairs over threads
+ * we use multiple arrays in t_disresdata. We need to have unique indices
+ * for each restraint that work over threads and MPI ranks. To this end
+ * we use the type index. These should all be in one block and there should
+ * be dd->nres types, but we check for this here.
+ * This setup currently does not allow for multiple copies of the same
+ * molecule without ensemble averaging, this is check for above.
+ */
+ GMX_RELEASE_ASSERT(type_max - type_min + 1 == dd->nres, "All distance restraint parameter entries in the topology should be consecutive");
- if (dd->dr_tau != 0 || ir->eDisre == edrEnsemble ||
- dd->nres != dd->npair)
- {
- gmx_fatal(FARGS, "Time or ensemble averaged or multiple pair distance restraints do not work (yet) with domain decomposition, use a single MPI rank%s", cr->ms ? " per simulation" : "");
- }
- if (ir->nstdisreout != 0)
- {
- if (fplog)
- {
- fprintf(fplog, "\nWARNING: Can not write distance restraint data to energy file with domain decomposition\n\n");
- }
- if (MASTER(cr))
- {
- fprintf(stderr, "\nWARNING: Can not write distance restraint data to energy file with domain decomposition\n");
- }
- ir->nstdisreout = 0;
- }
- }
+ dd->type_min = type_min;
snew(dd->rt, dd->npair);
}
fprintf(fplog, "\n");
}
- snew(dd->Rtl_6, dd->nres);
#endif
}
else
{
dd->nsystems = 1;
+ }
+
+ if (dd->nsystems == 1)
+ {
dd->Rtl_6 = dd->Rt_6;
}
+ else
+ {
+ snew(dd->Rtl_6, dd->nres);
+ }
if (dd->npair > 0)
{
}
}
-void calc_disres_R_6(int nfa, const t_iatom forceatoms[], const t_iparams ip[],
+void calc_disres_R_6(const t_commrec *cr,
+ int nfa, const t_iatom forceatoms[],
const rvec x[], const t_pbc *pbc,
t_fcdata *fcd, history_t *hist)
{
- int ai, aj;
- int fa, res, pair;
- int type, npair, np;
rvec dx;
real *rt, *rm3tav, *Rtl_6, *Rt_6, *Rtav_6;
- real rt_1, rt_3, rt2;
t_disresdata *dd;
- real ETerm, ETerm1, cf1 = 0, cf2 = 0, invn = 0;
+ real ETerm, ETerm1, cf1 = 0, cf2 = 0;
gmx_bool bTav;
dd = &(fcd->disres);
cf2 = 1.0/(1.0 - dd->exp_min_t_tau);
}
- if (dd->nsystems > 1)
+ for (int res = 0; res < dd->nres; res++)
{
- invn = 1.0/dd->nsystems;
+ Rtav_6[res] = 0.0;
+ Rt_6[res] = 0.0;
}
/* 'loop' over all atom pairs (pair_nr=fa/3) involved in restraints, *
* the total number of atoms pairs is nfa/3 */
- res = 0;
- fa = 0;
- while (fa < nfa)
+ for (int fa = 0; fa < nfa; fa += 3)
{
- type = forceatoms[fa];
- npair = ip[type].disres.npair;
+ int type = forceatoms[fa];
+ int res = type - dd->type_min;
+ int pair = fa/3;
+ int ai = forceatoms[fa+1];
+ int aj = forceatoms[fa+2];
- Rtav_6[res] = 0.0;
- Rt_6[res] = 0.0;
+ if (pbc)
+ {
+ pbc_dx_aiuc(pbc, x[ai], x[aj], dx);
+ }
+ else
+ {
+ rvec_sub(x[ai], x[aj], dx);
+ }
+ real rt2 = iprod(dx, dx);
+ real rt_1 = gmx::invsqrt(rt2);
+ real rt_3 = rt_1*rt_1*rt_1;
- /* Loop over the atom pairs of 'this' restraint */
- np = 0;
- while (fa < nfa && np < npair)
+ rt[pair] = rt2*rt_1;
+ if (bTav)
{
- pair = fa/3;
- ai = forceatoms[fa+1];
- aj = forceatoms[fa+2];
+ /* Here we update rm3tav in t_fcdata using the data
+ * in history_t.
+ * Thus the results stay correct when this routine
+ * is called multiple times.
+ */
+ rm3tav[pair] = cf2*((ETerm - cf1)*hist->disre_rm3tav[pair] +
+ ETerm1*rt_3);
+ }
+ else
+ {
+ rm3tav[pair] = rt_3;
+ }
- if (pbc)
- {
- pbc_dx_aiuc(pbc, x[ai], x[aj], dx);
- }
- else
- {
- rvec_sub(x[ai], x[aj], dx);
- }
- rt2 = iprod(dx, dx);
- rt_1 = gmx::invsqrt(rt2);
- rt_3 = rt_1*rt_1*rt_1;
+ /* Currently divide_bondeds_over_threads() ensures that pairs within
+ * the same restraint get assigned to the same thread, so we could
+ * run this loop thread-parallel.
+ */
+ Rt_6[res] += rt_3*rt_3;
+ Rtav_6[res] += rm3tav[pair]*rm3tav[pair];
+ }
- rt[pair] = std::sqrt(rt2);
- if (bTav)
- {
- /* Here we update rm3tav in t_fcdata using the data
- * in history_t.
- * Thus the results stay correct when this routine
- * is called multiple times.
- */
- rm3tav[pair] = cf2*((ETerm - cf1)*hist->disre_rm3tav[pair] +
- ETerm1*rt_3);
- }
- else
- {
- rm3tav[pair] = rt_3;
- }
+ /* NOTE: Rt_6 and Rtav_6 are stored consecutively in memory */
+ if (cr && DOMAINDECOMP(cr))
+ {
+ gmx_sum(2*dd->nres, dd->Rt_6, cr);
+ }
- Rt_6[res] += rt_3*rt_3;
- Rtav_6[res] += rm3tav[pair]*rm3tav[pair];
+ if (fcd->disres.nsystems > 1)
+ {
+ real invn = 1.0/dd->nsystems;
- fa += 3;
- np++;
- }
- if (dd->nsystems > 1)
+ for (int res = 0; res < dd->nres; res++)
{
Rtl_6[res] = Rt_6[res];
Rt_6[res] *= invn;
Rtav_6[res] *= invn;
}
- res++;
+ GMX_ASSERT(cr != NULL && cr->ms != NULL, "We need multisim with nsystems>1");
+ gmx_sum_sim(2*dd->nres, dd->Rt_6, cr->ms);
+
+ if (DOMAINDECOMP(cr))
+ {
+ gmx_bcast(2*dd->nres, dd->Rt_6, cr);
+ }
}
+
+ /* Store the base forceatoms pointer, so we can re-calculate the pair
+ * index in ta_disres() for indexing pair data in t_disresdata when
+ * using thread parallelization.
+ */
+ dd->forceatomsStart = forceatoms;
+
+ dd->sumviol = 0;
}
real ta_disres(int nfa, const t_iatom forceatoms[], const t_iparams ip[],
{
const real seven_three = 7.0/3.0;
- int ai, aj;
- int fa, res, npair, p, pair, ki = CENTRAL, m;
- int type;
rvec dx;
real weight_rt_1;
real smooth_fc, Rt, Rtav, rt2, *Rtl_6, *Rt_6, *Rtav_6;
/* 'loop' over all atom pairs (pair_nr=fa/3) involved in restraints, *
* the total number of atoms pairs is nfa/3 */
- res = 0;
- fa = 0;
- while (fa < nfa)
+ int faOffset = static_cast<int>(forceatoms - dd->forceatomsStart);
+ for (int fa = 0; fa < nfa; fa += 3)
{
- type = forceatoms[fa];
- /* Take action depending on restraint, calculate scalar force */
- npair = ip[type].disres.npair;
- up1 = ip[type].disres.up1;
- up2 = ip[type].disres.up2;
- low = ip[type].disres.low;
- k0 = smooth_fc*ip[type].disres.kfac;
+ int type = forceatoms[fa];
+ int npair = ip[type].disres.npair;
+ up1 = ip[type].disres.up1;
+ up2 = ip[type].disres.up2;
+ low = ip[type].disres.low;
+ k0 = smooth_fc*ip[type].disres.kfac;
+
+ int res = type - dd->type_min;
/* save some flops when there is only one pair */
if (ip[type].disres.type != 2)
}
else
{
- /* When rtype=2 use instantaneous not ensemble avereged distance */
+ /* When rtype=2 use instantaneous not ensemble averaged distance */
bConservative = (npair > 1);
bMixed = FALSE;
Rt = gmx::invsixthroot(Rtl_6[res]);
if (bViolation)
{
+ /* Add 1/npair energy and violation for each of the npair pairs */
+ real pairFac = 1/static_cast<real>(npair);
+
/* NOTE:
* there is no real potential when time averaging is applied
*/
- vtot += 0.5*k0*gmx::square(tav_viol);
- if (1/vtot == 0)
- {
- printf("vtot is inf: %f\n", vtot);
- }
+ vtot += 0.5*k0*gmx::square(tav_viol)*pairFac;
if (!bMixed)
{
f_scal = -k0*tav_viol;
- violtot += fabs(tav_viol);
+ violtot += fabs(tav_viol)*pairFac;
}
else
{
{
mixed_viol = std::sqrt(tav_viol*instant_viol);
f_scal = -k0*mixed_viol;
- violtot += mixed_viol;
+ violtot += mixed_viol*pairFac;
}
}
}
/* Exert the force ... */
- /* Loop over the atom pairs of 'this' restraint */
- for (p = 0; p < npair; p++)
+ int pair = (faOffset + fa)/3;
+ int ai = forceatoms[fa+1];
+ int aj = forceatoms[fa+2];
+ int ki = CENTRAL;
+ if (pbc)
{
- pair = fa/3;
- ai = forceatoms[fa+1];
- aj = forceatoms[fa+2];
+ ki = pbc_dx_aiuc(pbc, x[ai], x[aj], dx);
+ }
+ else
+ {
+ rvec_sub(x[ai], x[aj], dx);
+ }
+ rt2 = iprod(dx, dx);
+
+ weight_rt_1 = gmx::invsqrt(rt2);
- if (pbc)
+ if (bConservative)
+ {
+ if (!dr_bMixed)
{
- ki = pbc_dx_aiuc(pbc, x[ai], x[aj], dx);
+ weight_rt_1 *= std::pow(dd->rm3tav[pair], seven_three);
}
else
{
- rvec_sub(x[ai], x[aj], dx);
- }
- rt2 = iprod(dx, dx);
-
- weight_rt_1 = gmx::invsqrt(rt2);
-
- if (bConservative)
- {
- if (!dr_bMixed)
- {
- weight_rt_1 *= std::pow(dd->rm3tav[pair], seven_three);
- }
- else
- {
- weight_rt_1 *= tav_viol_Rtav7*std::pow(dd->rm3tav[pair], seven_three)+
- instant_viol_Rtav7/(dd->rt[pair]*gmx::power6(dd->rt[pair]));
- }
+ weight_rt_1 *= tav_viol_Rtav7*std::pow(dd->rm3tav[pair], seven_three)+
+ instant_viol_Rtav7/(dd->rt[pair]*gmx::power6(dd->rt[pair]));
}
+ }
- fk_scal = f_scal*weight_rt_1;
+ fk_scal = f_scal*weight_rt_1;
- if (g)
- {
- ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt);
- ki = IVEC2IS(dt);
- }
+ if (g)
+ {
+ ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt);
+ ki = IVEC2IS(dt);
+ }
- for (m = 0; m < DIM; m++)
- {
- fij = fk_scal*dx[m];
+ for (int m = 0; m < DIM; m++)
+ {
+ fij = fk_scal*dx[m];
- f[ai][m] += fij;
- f[aj][m] -= fij;
- fshift[ki][m] += fij;
- fshift[CENTRAL][m] -= fij;
- }
- fa += 3;
+ f[ai][m] += fij;
+ f[aj][m] -= fij;
+ fshift[ki][m] += fij;
+ fshift[CENTRAL][m] -= fij;
}
}
- else
- {
- /* No violation so force and potential contributions */
- fa += 3*npair;
- }
- res++;
}
- dd->sumviol = violtot;
+#pragma omp atomic
+ dd->sumviol += violtot;
/* Return energy */
return vtot;
#include "listed-forces.h"
-#include "config.h"
-
#include <assert.h>
#include <algorithm>
#include "gromacs/math/vec.h"
#include "gromacs/mdlib/force.h"
#include "gromacs/mdlib/force_flags.h"
+#include "gromacs/mdtypes/commrec.h"
#include "gromacs/mdtypes/fcdata.h"
#include "gromacs/mdtypes/forcerec.h"
#include "gromacs/mdtypes/inputrec.h"
(ftype < F_GB12 || ftype > F_GB14);
}
-void calc_listed(const struct gmx_multisim_t *ms,
+void calc_listed(const t_commrec *cr,
struct gmx_wallcycle *wcycle,
const t_idef *idef,
const rvec x[], history_t *hist,
if ((idef->il[F_POSRES].nr > 0) ||
(idef->il[F_FBPOSRES].nr > 0) ||
- (idef->il[F_ORIRES].nr > 0) ||
- (idef->il[F_DISRES].nr > 0))
+ fcd->orires.nr > 0 ||
+ fcd->disres.nres > 0)
{
/* TODO Use of restraints triggers further function calls
inside the loop over calc_one_bond(), but those are too
}
/* Do pre force calculation stuff which might require communication */
- if (idef->il[F_ORIRES].nr > 0)
+ if (fcd->orires.nr > 0)
{
enerd->term[F_ORIRESDEV] =
- calc_orires_dev(ms, idef->il[F_ORIRES].nr,
+ calc_orires_dev(cr->ms, idef->il[F_ORIRES].nr,
idef->il[F_ORIRES].iatoms,
idef->iparams, md, x,
pbc_null, fcd, hist);
}
- if (idef->il[F_DISRES].nr)
+ if (fcd->disres.nres > 0)
{
- calc_disres_R_6(idef->il[F_DISRES].nr,
+ calc_disres_R_6(cr,
+ idef->il[F_DISRES].nr,
idef->il[F_DISRES].iatoms,
- idef->iparams, x, pbc_null,
+ x, pbc_null,
fcd, hist);
-#if GMX_MPI
- if (fcd->disres.nsystems > 1)
- {
- gmx_sum_sim(2*fcd->disres.nres, fcd->disres.Rt_6, ms);
- }
-#endif
}
wallcycle_sub_stop(wcycle, ewcsRESTRAINTS);
do_force_listed(struct gmx_wallcycle *wcycle,
matrix box,
const t_lambda *fepvals,
- const struct gmx_multisim_t *ms,
+ const t_commrec *cr,
const t_idef *idef,
const rvec x[],
history_t *hist,
/* Not enough flops to bother counting */
set_pbc(&pbc_full, fr->ePBC, box);
}
- calc_listed(ms, wcycle, idef, x, hist, f, fr, pbc, &pbc_full,
+ calc_listed(cr, wcycle, idef, x, hist, f, fr, pbc, &pbc_full,
graph, enerd, nrnb, lambda, md, fcd,
global_atom_index, flags);