#include "gromacs/mdlib/gmx_omp_nthreads.h"
#include "gromacs/mdlib/mdatoms.h"
#include "gromacs/mdlib/mdrun.h"
+#include "gromacs/mdlib/mdsetup.h"
#include "gromacs/mdlib/nb_verlet.h"
#include "gromacs/mdlib/nbnxn_grid.h"
#include "gromacs/mdlib/nsgrid.h"
#include "gromacs/topology/block.h"
#include "gromacs/topology/idef.h"
#include "gromacs/topology/ifunc.h"
+#include "gromacs/topology/mtop_lookup.h"
#include "gromacs/topology/mtop_util.h"
#include "gromacs/topology/topology.h"
#include "gromacs/utility/basedefinitions.h"
gmx_incons("The state does not the domain decomposition state");
}
- state->ncg_gl = dd->ncg_home;
- if (state->ncg_gl > state->cg_gl_nalloc)
- {
- state->cg_gl_nalloc = over_alloc_dd(state->ncg_gl);
- srenew(state->cg_gl, state->cg_gl_nalloc);
- }
- for (i = 0; i < state->ncg_gl; i++)
+ state->cg_gl.resize(dd->ncg_home);
+ for (i = 0; i < dd->ncg_home; i++)
{
state->cg_gl[i] = dd->index_gl[i];
}
}
}
+int dd_natoms_mdatoms(const gmx_domdec_t *dd)
+{
+ /* We currently set mdatoms entries for all atoms:
+ * local + non-local + communicated for vsite + constraints
+ */
+
+ return dd->comm->nat[ddnatNR - 1];
+}
+
int dd_natoms_vsite(const gmx_domdec_t *dd)
{
return dd->comm->nat[ddnatVSITE];
cgs_gl = &dd->comm->cgs_gl;
- ncg_home = state_local->ncg_gl;
- cg = state_local->cg_gl;
+ ncg_home = state_local->cg_gl.size();
+ cg = state_local->cg_gl.data();
nat_home = 0;
for (i = 0; i < ncg_home; i++)
{
}
static void dd_collect_vec_sendrecv(gmx_domdec_t *dd,
- rvec *lv, rvec *v)
+ const rvec *lv, rvec *v)
{
gmx_domdec_master_t *ma;
int n, i, c, a, nalloc = 0;
if (!DDMASTER(dd))
{
#if GMX_MPI
- MPI_Send(lv, dd->nat_home*sizeof(rvec), MPI_BYTE, DDMASTERRANK(dd),
- dd->rank, dd->mpi_comm_all);
+ MPI_Send(const_cast<void *>(static_cast<const void *>(lv)), dd->nat_home*sizeof(rvec), MPI_BYTE,
+ DDMASTERRANK(dd), dd->rank, dd->mpi_comm_all);
#endif
}
else
}
static void dd_collect_vec_gatherv(gmx_domdec_t *dd,
- rvec *lv, rvec *v)
+ const rvec *lv, rvec *v)
{
gmx_domdec_master_t *ma;
int *rcounts = NULL, *disps = NULL;
}
}
-void dd_collect_vec(gmx_domdec_t *dd,
- t_state *state_local, rvec *lv, rvec *v)
+void dd_collect_vec(gmx_domdec_t *dd,
+ t_state *state_local,
+ const PaddedRVecVector *localVector,
+ rvec *v)
{
dd_collect_cg(dd, state_local);
+ const rvec *lv = as_rvec_array(localVector->data());
+
if (dd->nnodes <= GMX_DD_NNODES_SENDRECV)
{
dd_collect_vec_sendrecv(dd, lv, v);
}
}
+void dd_collect_vec(gmx_domdec_t *dd,
+ t_state *state_local,
+ const PaddedRVecVector *localVector,
+ PaddedRVecVector *vector)
+{
+ dd_collect_vec(dd, state_local, localVector, as_rvec_array(vector->data()));
+}
+
void dd_collect_state(gmx_domdec_t *dd,
t_state *state_local, t_state *state)
switch (est)
{
case estX:
- dd_collect_vec(dd, state_local, state_local->x, state->x);
+ dd_collect_vec(dd, state_local, &state_local->x, &state->x);
break;
case estV:
- dd_collect_vec(dd, state_local, state_local->v, state->v);
+ dd_collect_vec(dd, state_local, &state_local->v, &state->v);
break;
case est_SDX_NOTSUPPORTED:
break;
case estCGP:
- dd_collect_vec(dd, state_local, state_local->cg_p, state->cg_p);
+ dd_collect_vec(dd, state_local, &state_local->cg_p, &state->cg_p);
break;
case estDISRE_INITF:
case estDISRE_RM3TAV:
}
}
-static void dd_realloc_state(t_state *state, rvec **f, int nalloc)
+static void dd_resize_state(t_state *state, PaddedRVecVector *f, int natoms)
{
int est;
if (debug)
{
- fprintf(debug, "Reallocating state: currently %d, required %d, allocating %d\n", state->nalloc, nalloc, over_alloc_dd(nalloc));
+ fprintf(debug, "Resizing state: currently %d, required %d\n", state->natoms, natoms);
}
- state->nalloc = over_alloc_dd(nalloc);
-
for (est = 0; est < estNR; est++)
{
if (EST_DISTR(est) && (state->flags & (1<<est)))
switch (est)
{
case estX:
- srenew(state->x, state->nalloc + 1);
+ state->x.resize(natoms + 1);
break;
case estV:
- srenew(state->v, state->nalloc + 1);
+ state->v.resize(natoms + 1);
break;
case est_SDX_NOTSUPPORTED:
break;
case estCGP:
- srenew(state->cg_p, state->nalloc + 1);
+ state->cg_p.resize(natoms + 1);
break;
case estDISRE_INITF:
case estDISRE_RM3TAV:
/* No reallocation required */
break;
default:
- gmx_incons("Unknown state entry encountered in dd_realloc_state");
+ gmx_incons("Unknown state entry encountered in dd_resize_state");
}
}
}
if (f != NULL)
{
- srenew(*f, state->nalloc);
+ (*f).resize(natoms + 1);
}
}
-static void dd_check_alloc_ncg(t_forcerec *fr, t_state *state, rvec **f,
- int nalloc)
+static void dd_check_alloc_ncg(t_forcerec *fr,
+ t_state *state,
+ PaddedRVecVector *f,
+ int numChargeGroups)
{
- if (nalloc > fr->cg_nalloc)
+ if (numChargeGroups > fr->cg_nalloc)
{
if (debug)
{
- fprintf(debug, "Reallocating forcerec: currently %d, required %d, allocating %d\n", fr->cg_nalloc, nalloc, over_alloc_dd(nalloc));
+ fprintf(debug, "Reallocating forcerec: currently %d, required %d, allocating %d\n", fr->cg_nalloc, numChargeGroups, over_alloc_dd(numChargeGroups));
}
- fr->cg_nalloc = over_alloc_dd(nalloc);
+ fr->cg_nalloc = over_alloc_dd(numChargeGroups);
srenew(fr->cginfo, fr->cg_nalloc);
if (fr->cutoff_scheme == ecutsGROUP)
{
srenew(fr->cg_cm, fr->cg_nalloc);
}
}
- if (fr->cutoff_scheme == ecutsVERLET && nalloc > state->nalloc)
+ if (fr->cutoff_scheme == ecutsVERLET)
{
/* We don't use charge groups, we use x in state to set up
* the atom communication.
*/
- dd_realloc_state(state, f, nalloc);
+ dd_resize_state(state, f, numChargeGroups);
}
}
static void dd_distribute_dfhist(gmx_domdec_t *dd, df_history_t *dfhist)
{
- int i;
+ if (dfhist == NULL)
+ {
+ return;
+ }
+
dd_bcast(dd, sizeof(int), &dfhist->bEquil);
dd_bcast(dd, sizeof(int), &dfhist->nlambda);
dd_bcast(dd, sizeof(real), &dfhist->wl_delta);
dd_bcast(dd, sizeof(real)*nlam, dfhist->sum_minvar);
dd_bcast(dd, sizeof(real)*nlam, dfhist->sum_variance);
- for (i = 0; i < nlam; i++)
+ for (int i = 0; i < nlam; i++)
{
dd_bcast(dd, sizeof(real)*nlam, dfhist->accum_p[i]);
dd_bcast(dd, sizeof(real)*nlam, dfhist->accum_m[i]);
static void dd_distribute_state(gmx_domdec_t *dd, t_block *cgs,
t_state *state, t_state *state_local,
- rvec **f)
+ PaddedRVecVector *f)
{
int i, j, nh;
copy_mat(state->boxv, state_local->boxv);
copy_mat(state->svir_prev, state_local->svir_prev);
copy_mat(state->fvir_prev, state_local->fvir_prev);
- copy_df_history(&state_local->dfhist, &state->dfhist);
+ if (state->dfhist != NULL)
+ {
+ copy_df_history(state_local->dfhist, state->dfhist);
+ }
for (i = 0; i < state_local->ngtc; i++)
{
for (j = 0; j < nh; j++)
}
}
}
- dd_bcast(dd, ((efptNR)*sizeof(real)), state_local->lambda);
+ dd_bcast(dd, ((efptNR)*sizeof(real)), state_local->lambda.data());
dd_bcast(dd, sizeof(int), &state_local->fep_state);
dd_bcast(dd, sizeof(real), &state_local->veta);
dd_bcast(dd, sizeof(real), &state_local->vol0);
dd_bcast(dd, sizeof(state_local->boxv), state_local->boxv);
dd_bcast(dd, sizeof(state_local->svir_prev), state_local->svir_prev);
dd_bcast(dd, sizeof(state_local->fvir_prev), state_local->fvir_prev);
- dd_bcast(dd, ((state_local->ngtc*nh)*sizeof(double)), state_local->nosehoover_xi);
- dd_bcast(dd, ((state_local->ngtc*nh)*sizeof(double)), state_local->nosehoover_vxi);
- dd_bcast(dd, state_local->ngtc*sizeof(double), state_local->therm_integral);
- dd_bcast(dd, ((state_local->nnhpres*nh)*sizeof(double)), state_local->nhpres_xi);
- dd_bcast(dd, ((state_local->nnhpres*nh)*sizeof(double)), state_local->nhpres_vxi);
+ dd_bcast(dd, ((state_local->ngtc*nh)*sizeof(double)), state_local->nosehoover_xi.data());
+ dd_bcast(dd, ((state_local->ngtc*nh)*sizeof(double)), state_local->nosehoover_vxi.data());
+ dd_bcast(dd, state_local->ngtc*sizeof(double), state_local->therm_integral.data());
+ dd_bcast(dd, ((state_local->nnhpres*nh)*sizeof(double)), state_local->nhpres_xi.data());
+ dd_bcast(dd, ((state_local->nnhpres*nh)*sizeof(double)), state_local->nhpres_vxi.data());
/* communicate df_history -- required for restarting from checkpoint */
- dd_distribute_dfhist(dd, &state_local->dfhist);
+ dd_distribute_dfhist(dd, state_local->dfhist);
+
+ dd_resize_state(state_local, f, dd->nat_home);
- if (dd->nat_home > state_local->nalloc)
- {
- dd_realloc_state(state_local, f, dd->nat_home);
- }
for (i = 0; i < estNR; i++)
{
if (EST_DISTR(i) && (state_local->flags & (1<<i)))
switch (i)
{
case estX:
- dd_distribute_vec(dd, cgs, state->x, state_local->x);
+ dd_distribute_vec(dd, cgs, as_rvec_array(state->x.data()), as_rvec_array(state_local->x.data()));
break;
case estV:
- dd_distribute_vec(dd, cgs, state->v, state_local->v);
+ dd_distribute_vec(dd, cgs, as_rvec_array(state->v.data()), as_rvec_array(state_local->v.data()));
break;
case est_SDX_NOTSUPPORTED:
break;
case estCGP:
- dd_distribute_vec(dd, cgs, state->cg_p, state_local->cg_p);
+ dd_distribute_vec(dd, cgs, as_rvec_array(state->cg_p.data()), as_rvec_array(state_local->cg_p.data()));
break;
case estDISRE_INITF:
case estDISRE_RM3TAV:
char fname[STRLEN], buf[22];
FILE *out;
int i, ii, resnr, c;
- char *atomname, *resname;
+ const char *atomname, *resname;
real b;
gmx_domdec_t *dd;
fprintf(out, "TITLE %s\n", title);
gmx_write_pdb_box(out, dd->bScrewPBC ? epbcSCREW : epbcXYZ, box);
+ int molb = 0;
for (i = 0; i < natoms; i++)
{
ii = dd->gatindex[i];
- gmx_mtop_atominfo_global(mtop, ii, &atomname, &resnr, &resname);
+ mtopGetAtomAndResidueName(mtop, ii, &molb, &atomname, &resnr, &resname, nullptr);
if (i < dd->comm->nat[ddnatZONE])
{
c = 0;
}
static void rebuild_cgindex(gmx_domdec_t *dd,
- const int *gcgs_index, t_state *state)
+ const int *gcgs_index, const t_state *state)
{
- int nat, i, *ind, *dd_cg_gl, *cgindex, cg_gl;
+ int * gmx_restrict dd_cg_gl = dd->index_gl;
+ int * gmx_restrict cgindex = dd->cgindex;
+ int nat = 0;
- ind = state->cg_gl;
- dd_cg_gl = dd->index_gl;
- cgindex = dd->cgindex;
- nat = 0;
+ /* Copy back the global charge group indices from state
+ * and rebuild the local charge group to atom index.
+ */
cgindex[0] = nat;
- for (i = 0; i < state->ncg_gl; i++)
+ for (unsigned int i = 0; i < state->cg_gl.size(); i++)
{
cgindex[i] = nat;
- cg_gl = ind[i];
+ int cg_gl = state->cg_gl[i];
dd_cg_gl[i] = cg_gl;
nat += gcgs_index[cg_gl+1] - gcgs_index[cg_gl];
}
- cgindex[i] = nat;
+ cgindex[state->cg_gl.size()] = nat;
- dd->ncg_home = state->ncg_gl;
+ dd->ncg_home = state->cg_gl.size();
dd->nat_home = nat;
set_zones_ncg_home(dd);
if (pos_d >= limit1[d])
{
cg_move_error(fplog, dd, step, cg, d, 1,
- cg_cm != state->x, limitd[d],
+ cg_cm != as_rvec_array(state->x.data()), limitd[d],
cg_cm[cg], cm_new, pos_d);
}
dev[d] = 1;
if (pos_d < limit0[d])
{
cg_move_error(fplog, dd, step, cg, d, -1,
- cg_cm != state->x, limitd[d],
+ cg_cm != as_rvec_array(state->x.data()), limitd[d],
cg_cm[cg], cm_new, pos_d);
}
dev[d] = -1;
static void dd_redistribute_cg(FILE *fplog, gmx_int64_t step,
gmx_domdec_t *dd, ivec tric_dir,
- t_state *state, rvec **f,
+ t_state *state, PaddedRVecVector *f,
t_forcerec *fr,
gmx_bool bCompact,
t_nrnb *nrnb,
cgindex,
( thread *dd->ncg_home)/nthread,
((thread+1)*dd->ncg_home)/nthread,
- fr->cutoff_scheme == ecutsGROUP ? cg_cm : state->x,
+ fr->cutoff_scheme == ecutsGROUP ? cg_cm : as_rvec_array(state->x.data()),
move);
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
*/
home_pos_cg =
compact_and_copy_vec_cg(dd->ncg_home, move, cgindex,
- nvec, state->x, comm, FALSE);
+ nvec, as_rvec_array(state->x.data()), comm, FALSE);
if (bCompact)
{
home_pos_cg -= *ncg_moved;
vec = 0;
home_pos_at =
compact_and_copy_vec_at(dd->ncg_home, move, cgindex,
- nvec, vec++, state->x, comm, bCompact);
+ nvec, vec++, as_rvec_array(state->x.data()),
+ comm, bCompact);
if (bV)
{
compact_and_copy_vec_at(dd->ncg_home, move, cgindex,
- nvec, vec++, state->v, comm, bCompact);
+ nvec, vec++, as_rvec_array(state->v.data()),
+ comm, bCompact);
}
if (bCGP)
{
compact_and_copy_vec_at(dd->ncg_home, move, cgindex,
- nvec, vec++, state->cg_p, comm, bCompact);
+ nvec, vec++, as_rvec_array(state->cg_p.data()),
+ comm, bCompact);
}
if (bCompact)
nvr += i;
}
+ dd_check_alloc_ncg(fr, state, f, home_pos_cg + ncg_recv);
+ if (fr->cutoff_scheme == ecutsGROUP)
+ {
+ /* Here we resize to more than necessary and shrink later */
+ dd_resize_state(state, f, home_pos_at + ncg_recv*MAX_CGCGSIZE);
+ }
+
/* Process the received charge groups */
buf_pos = 0;
for (cg = 0; cg < ncg_recv; cg++)
dd->index_gl[home_pos_cg] = comm->buf_int[cg*DD_CGIBS];
dd->cgindex[home_pos_cg+1] = dd->cgindex[home_pos_cg] + nrcg;
/* Copy the state from the buffer */
- dd_check_alloc_ncg(fr, state, f, home_pos_cg+1);
if (fr->cutoff_scheme == ecutsGROUP)
{
cg_cm = fr->cg_cm;
comm->bLocalCG[dd->index_gl[home_pos_cg]] = TRUE;
}
- if (home_pos_at+nrcg > state->nalloc)
- {
- dd_realloc_state(state, f, home_pos_at+nrcg);
- }
for (i = 0; i < nrcg; i++)
{
copy_rvec(comm->vbuf.v[buf_pos++],
dd->ncg_home = home_pos_cg;
dd->nat_home = home_pos_at;
+ if (fr->cutoff_scheme == ecutsGROUP && !bCompact)
+ {
+ /* We overallocated before, we need to set the right size here */
+ dd_resize_state(state, f, dd->nat_home);
+ }
+
if (debug)
{
fprintf(debug,
dd = cr->dd;
set_ddbox(dd, FALSE, cr, ir, state->box,
- TRUE, &dd->comm->cgs_gl, state->x, &ddbox);
+ TRUE, &dd->comm->cgs_gl, as_rvec_array(state->x.data()), &ddbox);
LocallyLimited = 0;
static void setup_dd_communication(gmx_domdec_t *dd,
matrix box, gmx_ddbox_t *ddbox,
- t_forcerec *fr, t_state *state, rvec **f)
+ t_forcerec *fr,
+ t_state *state, PaddedRVecVector *f)
{
int dim_ind, dim, dim0, dim1, dim2, dimd, p, nat_tot;
int nzone, nzone_send, zone, zonei, cg0, cg1;
cg_cm = fr->cg_cm;
break;
case ecutsVERLET:
- cg_cm = state->x;
+ cg_cm = as_rvec_array(state->x.data());
break;
default:
gmx_incons("unimplemented");
}
else
{
- cg_cm = state->x;
+ cg_cm = as_rvec_array(state->x.data());
}
/* Communicate cg_cm */
if (cd->bInPlace)
switch (i)
{
case estX:
- order_vec_atom(dd->ncg_home, cgindex, cgsort, state->x, vbuf);
+ order_vec_atom(dd->ncg_home, cgindex, cgsort, as_rvec_array(state->x.data()), vbuf);
break;
case estV:
- order_vec_atom(dd->ncg_home, cgindex, cgsort, state->v, vbuf);
+ order_vec_atom(dd->ncg_home, cgindex, cgsort, as_rvec_array(state->v.data()), vbuf);
break;
case est_SDX_NOTSUPPORTED:
break;
case estCGP:
- order_vec_atom(dd->ncg_home, cgindex, cgsort, state->cg_p, vbuf);
+ order_vec_atom(dd->ncg_home, cgindex, cgsort, as_rvec_array(state->cg_p.data()), vbuf);
break;
case estLD_RNG:
case estLD_RNGI:
const gmx_mtop_t *top_global,
const t_inputrec *ir,
t_state *state_local,
- rvec **f,
+ PaddedRVecVector *f,
t_mdatoms *mdatoms,
gmx_localtop_t *top_local,
t_forcerec *fr,
ncgindex_set = 0;
set_ddbox(dd, bMasterState, cr, ir, state_global->box,
- TRUE, cgs_gl, state_global->x, &ddbox);
+ TRUE, cgs_gl, as_rvec_array(state_global->x.data()), &ddbox);
get_cg_distribution(fplog, dd, cgs_gl,
- state_global->box, &ddbox, state_global->x);
+ state_global->box, &ddbox, as_rvec_array(state_global->x.data()));
dd_distribute_state(dd, cgs_gl,
state_global, state_local, f);
if (fr->cutoff_scheme == ecutsGROUP)
{
calc_cgcm(fplog, 0, dd->ncg_home,
- &top_local->cgs, state_local->x, fr->cg_cm);
+ &top_local->cgs, as_rvec_array(state_local->x.data()), fr->cg_cm);
}
inc_nrnb(nrnb, eNR_CGCM, dd->nat_home);
{
/* Redetermine the cg COMs */
calc_cgcm(fplog, 0, dd->ncg_home,
- &top_local->cgs, state_local->x, fr->cg_cm);
+ &top_local->cgs, as_rvec_array(state_local->x.data()), fr->cg_cm);
}
inc_nrnb(nrnb, eNR_CGCM, dd->nat_home);
dd_set_cginfo(dd->index_gl, 0, dd->ncg_home, fr, comm->bLocalCG);
set_ddbox(dd, bMasterState, cr, ir, state_local->box,
- TRUE, &top_local->cgs, state_local->x, &ddbox);
+ TRUE, &top_local->cgs, as_rvec_array(state_local->x.data()), &ddbox);
bRedist = dlbIsOn(comm);
}
copy_rvec(comm->box_size, ddbox.box_size);
}
set_ddbox(dd, bMasterState, cr, ir, state_local->box,
- bNStGlobalComm, &top_local->cgs, state_local->x, &ddbox);
+ bNStGlobalComm, &top_local->cgs, as_rvec_array(state_local->x.data()), &ddbox);
bBoxChanged = TRUE;
bRedist = TRUE;
0, dd->ncg_home,
comm->zones.dens_zone0,
fr->cginfo,
- state_local->x,
+ as_rvec_array(state_local->x.data()),
ncg_moved, bRedist ? comm->moved : NULL,
fr->nbv->grp[eintLocal].kernel_type,
fr->nbv->grp[eintLocal].nbat);
}
dd_sort_state(dd, fr->cg_cm, fr, state_local,
bResortAll ? -1 : ncg_home_old);
+
+ /* After sorting and compacting we set the correct size */
+ dd_resize_state(state_local, f, dd->nat_home);
+
/* Rebuild all the indices */
ga2la_clear(dd->ga2la);
ncgindex_set = 0;
/*
write_dd_pdb("dd_home",step,"dump",top_global,cr,
- -1,state_local->x,state_local->box);
+ -1,as_rvec_array(state_local->x.data()),state_local->box);
*/
wallcycle_sub_start(wcycle, ewcsDD_MAKETOP);
dd_make_local_top(dd, &comm->zones, dd->npbcdim, state_local->box,
comm->cellsize_min, np,
fr,
- fr->cutoff_scheme == ecutsGROUP ? fr->cg_cm : state_local->x,
+ fr->cutoff_scheme == ecutsGROUP ? fr->cg_cm : as_rvec_array(state_local->x.data()),
vsite, top_global, top_local);
wallcycle_sub_stop(wcycle, ewcsDD_MAKETOP);
* or constraint communication.
*/
state_local->natoms = comm->nat[ddnatNR-1];
- if (state_local->natoms > state_local->nalloc)
- {
- dd_realloc_state(state_local, f, state_local->natoms);
- }
+
+ dd_resize_state(state_local, f, state_local->natoms);
if (fr->bF_NoVirSum)
{
forcerec_set_ranges(fr, dd->ncg_home, dd->ncg_tot,
dd->nat_tot, comm->nat[ddnatCON], nat_f_novirsum);
- /* We make the all mdatoms up to nat_tot_con.
- * We could save some work by only setting invmass
- * between nat_tot and nat_tot_con.
- */
- /* This call also sets the new number of home particles to dd->nat_home */
- atoms2md(top_global, ir,
- comm->nat[ddnatCON], dd->gatindex, dd->nat_home, mdatoms);
-
- /* Now we have the charges we can sort the FE interactions */
- dd_sort_local_top(dd, mdatoms, top_local);
-
- if (vsite != NULL)
- {
- /* Now we have updated mdatoms, we can do the last vsite bookkeeping */
- split_vsites_over_threads(top_local->idef.il, top_local->idef.iparams,
- mdatoms, FALSE, vsite);
- }
+ /* Update atom data for mdatoms and several algorithms */
+ mdAlgorithmsSetupAtomData(cr, ir, top_global, top_local, fr,
+ NULL, mdatoms, vsite, NULL);
if (ir->implicit_solvent)
{
make_local_gb(cr, fr->born, ir->gb_algorithm);
}
- setup_bonded_threading(fr, &top_local->idef);
-
if (!(cr->duty & DUTY_PME))
{
/* Send the charges and/or c6/sigmas to our PME only node */
* the last vsite construction, we need to communicate the constructing
* atom coordinates again (for spreading the forces this MD step).
*/
- dd_move_x_vsites(dd, state_local->box, state_local->x);
+ dd_move_x_vsites(dd, state_local->box, as_rvec_array(state_local->x.data()));
wallcycle_sub_stop(wcycle, ewcsDD_TOPOTHER);
if (comm->nstDDDump > 0 && step % comm->nstDDDump == 0)
{
- dd_move_x(dd, state_local->box, state_local->x);
+ dd_move_x(dd, state_local->box, as_rvec_array(state_local->x.data()));
write_dd_pdb("dd_dump", step, "dump", top_global, cr,
- -1, state_local->x, state_local->box);
+ -1, as_rvec_array(state_local->x.data()), state_local->box);
}
/* Store the partitioning step */