dd->comm->master_cg_ddp_count = state_local->ddp_count;
}
-static void dd_collect_vec_sendrecv(gmx_domdec_t *dd,
- const rvec *lv, rvec *v)
+static void dd_collect_vec_sendrecv(gmx_domdec_t *dd,
+ gmx::ArrayRef<const gmx::RVec> lv,
+ gmx::ArrayRef<gmx::RVec> v)
{
gmx_domdec_master_t *ma;
int n, i, c, a, nalloc = 0;
if (!DDMASTER(dd))
{
#if GMX_MPI
- MPI_Send(const_cast<void *>(static_cast<const void *>(lv)), dd->nat_home*sizeof(rvec), MPI_BYTE,
+ MPI_Send(const_cast<void *>(static_cast<const void *>(lv.data())), dd->nat_home*sizeof(rvec), MPI_BYTE,
DDMASTERRANK(dd), dd->rank, dd->mpi_comm_all);
#endif
}
}
}
-static void dd_collect_vec_gatherv(gmx_domdec_t *dd,
- const rvec *lv, rvec *v)
+static void dd_collect_vec_gatherv(gmx_domdec_t *dd,
+ gmx::ArrayRef<const gmx::RVec> lv,
+ gmx::ArrayRef<gmx::RVec> v)
{
gmx_domdec_master_t *ma;
int *rcounts = nullptr, *disps = nullptr;
buf = ma->vbuf;
}
- dd_gatherv(dd, dd->nat_home*sizeof(rvec), lv, rcounts, disps, buf);
+ dd_gatherv(dd, dd->nat_home*sizeof(rvec), lv.data(), rcounts, disps, buf);
if (DDMASTER(dd))
{
}
}
-void dd_collect_vec(gmx_domdec_t *dd,
- const t_state *state_local,
- const PaddedRVecVector *localVector,
- rvec *v)
+void dd_collect_vec(gmx_domdec_t *dd,
+ const t_state *state_local,
+ gmx::ArrayRef<const gmx::RVec> lv,
+ gmx::ArrayRef<gmx::RVec> v)
{
dd_collect_cg(dd, state_local);
- const rvec *lv = as_rvec_array(localVector->data());
-
if (dd->nnodes <= GMX_DD_NNODES_SENDRECV)
{
dd_collect_vec_sendrecv(dd, lv, v);
}
}
-void dd_collect_vec(gmx_domdec_t *dd,
- const t_state *state_local,
- const PaddedRVecVector *localVector,
- PaddedRVecVector *vector)
-{
- dd_collect_vec(dd, state_local, localVector,
- DDMASTER(dd) ? as_rvec_array(vector->data()) : nullptr);
-}
-
void dd_collect_state(gmx_domdec_t *dd,
const t_state *state_local, t_state *state)
}
if (state_local->flags & (1 << estX))
{
- dd_collect_vec(dd, state_local, &state_local->x, &state->x);
+ gmx::ArrayRef<gmx::RVec> globalXRef = state ? gmx::makeArrayRef(state->x) : gmx::EmptyArrayRef();
+ dd_collect_vec(dd, state_local, state_local->x, globalXRef);
}
if (state_local->flags & (1 << estV))
{
- dd_collect_vec(dd, state_local, &state_local->v, &state->v);
+ gmx::ArrayRef<gmx::RVec> globalVRef = state ? gmx::makeArrayRef(state->v) : gmx::EmptyArrayRef();
+ dd_collect_vec(dd, state_local, state_local->v, globalVRef);
}
if (state_local->flags & (1 << estCGP))
{
- dd_collect_vec(dd, state_local, &state_local->cg_p, &state->cg_p);
+ gmx::ArrayRef<gmx::RVec> globalCgpRef = state ? gmx::makeArrayRef(state->cg_p) : gmx::EmptyArrayRef();
+ dd_collect_vec(dd, state_local, state_local->cg_p, globalCgpRef);
}
}
#include "gromacs/topology/block.h"
#include "gromacs/topology/idef.h"
#include "gromacs/topology/topology.h"
+#include "gromacs/utility/arrayref.h"
#include "gromacs/utility/basedefinitions.h"
#include "gromacs/utility/real.h"
int gpu_id);
/*! \brief Collects local rvec arrays \p lv to \p v on the master rank */
-void dd_collect_vec(struct gmx_domdec_t *dd,
- const t_state *state_local,
- const PaddedRVecVector *lv,
- rvec *v);
-
-/*! \brief Collects local rvec arrays \p lv to \p v on the master rank */
-void dd_collect_vec(struct gmx_domdec_t *dd,
- const t_state *state_local,
- const PaddedRVecVector *lv,
- PaddedRVecVector *v);
+void dd_collect_vec(struct gmx_domdec_t *dd,
+ const t_state *state_local,
+ gmx::ArrayRef<const gmx::RVec> lv,
+ gmx::ArrayRef<gmx::RVec> v);
/*! \brief Collects the local state \p state_local to \p state on the master rank */
void dd_collect_state(struct gmx_domdec_t *dd,
* and std::vector<real>. Using real * is deprecated and this routine
* will simplify a lot when only std::vector needs to be supported.
*
+ * The routine is generic to vectors with different allocators,
+ * because as part of reading a checkpoint there are vectors whose
+ * size is not known until reading has progressed far enough, so a
+ * resize method must be called.
+ *
* When not listing, we use either v or vector, depending on which is !=NULL.
* If nval >= 0, nval is used; on read this should match the passed value.
* If nval n<0, *nptr (with v) or vector->size() is used. On read using v,
* the value is stored in nptr
*/
-template<typename T>
+template<typename T, typename AllocatorType>
static int doVectorLow(XDR *xd, StatePart part, int ecpt, int sflags,
int nval, int *nptr,
- T **v, std::vector<T> *vector,
+ T **v, std::vector<T, AllocatorType> *vector,
FILE *list, CptElementType cptElementType)
{
GMX_RELEASE_ASSERT(list != nullptr || (v != nullptr && vector == nullptr) || (v == nullptr && vector != nullptr), "Without list, we should have exactly one of v and vector != NULL");
gmx::ArrayRef<real> vector, FILE *list)
{
real *v_real = vector.data();
- return doVectorLow<real>(xd, part, ecpt, sflags, vector.size(), nullptr, &v_real, nullptr, list, CptElementType::real);
+ return doVectorLow < real, std::allocator < real>>(xd, part, ecpt, sflags, vector.size(), nullptr, &v_real, nullptr, list, CptElementType::real);
}
-//! \brief Read/Write a PaddedRVecVector.
-static int doPaddedRvecVector(XDR *xd, StatePart part, int ecpt, int sflags,
- gmx::PaddedRVecVector *v, int numAtoms, FILE *list)
+//! \brief Read/Write a vector whose value_type is RVec and whose allocator is \c AllocatorType.
+template <typename AllocatorType>
+static int doRvecVector(XDR *xd, StatePart part, int ecpt, int sflags,
+ std::vector<gmx::RVec, AllocatorType> *v, int numAtoms, FILE *list)
{
const int numReals = numAtoms*DIM;
}
else
{
- return doVectorLow<real>(xd, part, ecpt, sflags, numReals, nullptr, nullptr, nullptr, list, CptElementType::real);
+ // Use the rebind facility to change the value_type of the
+ // allocator from RVec to real.
+ using realAllocator = typename AllocatorType::template rebind<real>::other;
+ return doVectorLow<real, realAllocator>(xd, part, ecpt, sflags, numReals, nullptr, nullptr, nullptr, list, CptElementType::real);
}
}
static int do_cpte_reals(XDR *xd, StatePart part, int ecpt, int sflags,
int n, real **v, FILE *list)
{
- return doVectorLow<real>(xd, part, ecpt, sflags, n, nullptr, v, nullptr, list, CptElementType::real);
+ return doVectorLow < real, std::allocator < real>>(xd, part, ecpt, sflags, n, nullptr, v, nullptr, list, CptElementType::real);
}
/* This function does the same as do_cpte_reals,
static int do_cpte_n_reals(XDR *xd, StatePart part, int ecpt, int sflags,
int *n, real **v, FILE *list)
{
- return doVectorLow<real>(xd, part, ecpt, sflags, -1, n, v, nullptr, list, CptElementType::real);
+ return doVectorLow < real, std::allocator < real>>(xd, part, ecpt, sflags, -1, n, v, nullptr, list, CptElementType::real);
}
static int do_cpte_real(XDR *xd, StatePart part, int ecpt, int sflags,
real *r, FILE *list)
{
- return doVectorLow<real>(xd, part, ecpt, sflags, 1, nullptr, &r, nullptr, list, CptElementType::real);
+ return doVectorLow < real, std::allocator < real>>(xd, part, ecpt, sflags, 1, nullptr, &r, nullptr, list, CptElementType::real);
}
static int do_cpte_ints(XDR *xd, StatePart part, int ecpt, int sflags,
int n, int **v, FILE *list)
{
- return doVectorLow<int>(xd, part, ecpt, sflags, n, nullptr, v, nullptr, list, CptElementType::integer);
+ return doVectorLow < int, std::allocator < int>>(xd, part, ecpt, sflags, n, nullptr, v, nullptr, list, CptElementType::integer);
}
static int do_cpte_int(XDR *xd, StatePart part, int ecpt, int sflags,
static int do_cpte_doubles(XDR *xd, StatePart part, int ecpt, int sflags,
int n, double **v, FILE *list)
{
- return doVectorLow<double>(xd, part, ecpt, sflags, n, nullptr, v, nullptr, list, CptElementType::real);
+ return doVectorLow < double, std::allocator < double>>(xd, part, ecpt, sflags, n, nullptr, v, nullptr, list, CptElementType::real);
}
static int do_cpte_double(XDR *xd, StatePart part, int ecpt, int sflags,
int ret;
vr = &(v[0][0]);
- ret = doVectorLow<real>(xd, part, ecpt, sflags,
- DIM*DIM, nullptr, &vr, nullptr, nullptr, CptElementType::matrix3x3);
+ ret = doVectorLow < real, std::allocator < real>>(xd, part, ecpt, sflags,
+ DIM*DIM, nullptr, &vr, nullptr, nullptr, CptElementType::matrix3x3);
if (list && ret == 0)
{
}
for (i = 0; i < n; i++)
{
- reti = doVectorLow<real>(xd, part, ecpt, sflags, n, nullptr, &(v[i]), nullptr, nullptr, CptElementType::matrix3x3);
+ reti = doVectorLow < real, std::allocator < real>>(xd, part, ecpt, sflags, n, nullptr, &(v[i]), nullptr, nullptr, CptElementType::matrix3x3);
if (list && reti == 0)
{
sprintf(name, "%s[%d]", entryName(part, ecpt), i);
}
}
}
- ret = doVectorLow<real>(xd, part, ecpt, sflags,
- nf*DIM*DIM, nullptr, &vr, nullptr, nullptr,
- CptElementType::matrix3x3);
+ ret = doVectorLow < real, std::allocator < real>>(xd, part, ecpt, sflags,
+ nf*DIM*DIM, nullptr, &vr, nullptr, nullptr,
+ CptElementType::matrix3x3);
for (i = 0; i < nf; i++)
{
for (j = 0; j < DIM; j++)
case estBAROS_INT: ret = do_cpte_double(xd, part, i, sflags, &state->baros_integral, list); break;
case estVETA: ret = do_cpte_real(xd, part, i, sflags, &state->veta, list); break;
case estVOL0: ret = do_cpte_real(xd, part, i, sflags, &state->vol0, list); break;
- case estX: ret = doPaddedRvecVector(xd, part, i, sflags, &state->x, state->natoms, list); break;
- case estV: ret = doPaddedRvecVector(xd, part, i, sflags, &state->v, state->natoms, list); break;
+ case estX: ret = doRvecVector(xd, part, i, sflags, &state->x, state->natoms, list); break;
+ case estV: ret = doRvecVector(xd, part, i, sflags, &state->v, state->natoms, list); break;
/* The RNG entries are no longer written,
* the next 4 lines are only for reading old files.
*/
fr->bX = (state.flags & (1<<estX));
if (fr->bX)
{
- fr->x = getRvecArrayFromPaddedRVecVector(&state.x, state.natoms);
+ fr->x = makeRvecArray(state.x, state.natoms);
}
fr->bV = (state.flags & (1<<estV));
if (fr->bV)
{
- fr->v = getRvecArrayFromPaddedRVecVector(&state.v, state.natoms);
+ fr->v = makeRvecArray(state.v, state.natoms);
}
fr->bF = FALSE;
fr->bBox = (state.flags & (1<<estBOX));
#include "gromacs/topology/index.h"
#include "gromacs/topology/topology.h"
#include "gromacs/trajectory/trajectoryframe.h"
+#include "gromacs/utility/arrayref.h"
#include "gromacs/utility/arraysize.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/futil.h"
{
int i, j;
int d;
- rvec com, new_com, shift, box_center;
+ rvec com, shift, box_center;
real m;
double mtot;
t_pbc pbc;
svmul(1.0/mtot, com, com);
/* check if COM is outside box */
- copy_rvec(com, new_com);
+ gmx::RVec newCom;
+ copy_rvec(com, newCom);
+ auto newComArrayRef = gmx::arrayRefFromArray(&newCom, 1);
switch (unitcell_enum)
{
case euRect:
- put_atoms_in_box(ePBC, box, 1, &new_com);
+ put_atoms_in_box(ePBC, box, newComArrayRef);
break;
case euTric:
- put_atoms_in_triclinic_unitcell(ecenter, box, 1, &new_com);
+ put_atoms_in_triclinic_unitcell(ecenter, box, newComArrayRef);
break;
case euCompact:
- put_atoms_in_compact_unitcell(ePBC, ecenter, box, 1, &new_com);
+ put_atoms_in_compact_unitcell(ePBC, ecenter, box, newComArrayRef);
break;
}
- rvec_sub(new_com, com, shift);
+ rvec_sub(newCom, com, shift);
if (norm2(shift) > 0)
{
if (debug)
int d, presnr;
real m;
double mtot;
- rvec box_center, com, new_com, shift;
+ rvec box_center, com, shift;
static const int NOTSET = -12347;
calc_box_center(ecenter, box, box_center);
svmul(1.0/mtot, com, com);
/* check if COM is outside box */
- copy_rvec(com, new_com);
+ gmx::RVec newCom;
+ copy_rvec(com, newCom);
+ auto newComArrayRef = gmx::arrayRefFromArray(&newCom, 1);
switch (unitcell_enum)
{
case euRect:
- put_atoms_in_box(ePBC, box, 1, &new_com);
+ put_atoms_in_box(ePBC, box, newComArrayRef);
break;
case euTric:
- put_atoms_in_triclinic_unitcell(ecenter, box, 1, &new_com);
+ put_atoms_in_triclinic_unitcell(ecenter, box, newComArrayRef);
break;
case euCompact:
- put_atoms_in_compact_unitcell(ePBC, ecenter, box, 1, &new_com);
+ put_atoms_in_compact_unitcell(ePBC, ecenter, box, newComArrayRef);
break;
}
- rvec_sub(new_com, com, shift);
+ rvec_sub(newCom, com, shift);
if (norm2(shift))
{
if (debug)
}
}
+ auto positionsArrayRef = gmx::arrayRefFromArray(reinterpret_cast<gmx::RVec *>(fr.x), natoms);
if (bPBCcomAtom)
{
switch (unitcell_enum)
{
case euRect:
- put_atoms_in_box(ePBC, fr.box, natoms, fr.x);
+ put_atoms_in_box(ePBC, fr.box, positionsArrayRef);
break;
case euTric:
- put_atoms_in_triclinic_unitcell(ecenter, fr.box, natoms, fr.x);
+ put_atoms_in_triclinic_unitcell(ecenter, fr.box, positionsArrayRef);
break;
case euCompact:
put_atoms_in_compact_unitcell(ePBC, ecenter, fr.box,
- natoms, fr.x);
+ positionsArrayRef);
break;
}
}
#include "gromacs/math/vectypes.h"
#include "gromacs/utility/alignedallocator.h"
+#include "gromacs/utility/arrayref.h"
namespace gmx
{
* by automated padding on resize() of the vector.
* \todo Undo the move of allocator.h and alignedallocator.h from the internal
* to be public API applied in Change-Id: Ifb8dacf, needed to use
- * AlginedAllocationPolicy here, when replacing std::vector here.
+ * AlignedAllocationPolicy here, when replacing std::vector here.
*/
-using PaddedRVecVector = std::vector < gmx::RVec, gmx::Allocator < gmx::RVec, AlignedAllocationPolicy > >;
+using PaddedRVecVector = std::vector < RVec, Allocator < RVec, AlignedAllocationPolicy > >;
/*! \brief Returns the padded size for PaddedRVecVector given the number of atoms
*
return std::max(simdScatterAccessSize, simdFlatAccesSize);
}
+/*! \brief Temporary definition of a type usable for SIMD-style loads of RVec quantities from a view.
+ *
+ * \todo Find a more permanent solution that permits the update code to safely
+ * use a padded, aligned array-ref type. */
+template <typename T>
+using PaddedArrayRef = ArrayRef<T>;
+
} // namespace gmx
-// TODO This is a hack to avoid littering gmx:: all over code that is
+// TODO These are hacks to avoid littering gmx:: all over code that is
// almost all destined to move into the gmx namespace at some point.
// An alternative would be about 20 files with using statements.
using gmx::PaddedRVecVector;
}
}
-static void bcastPaddedRVecVector(const t_commrec *cr, PaddedRVecVector *v, int numAtoms)
+template <typename AllocatorType>
+static void bcastPaddedRVecVector(const t_commrec *cr, std::vector<gmx::RVec, AllocatorType> *v, int numAtoms)
{
v->resize(gmx::paddedRVecVectorSize(numAtoms));
nblock_bc(cr, numAtoms, as_rvec_array(v->data()));
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2012,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2012,2014,2015,2017, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "gromacs/math/vec.h"
#include "gromacs/mdlib/gmx_omp_nthreads.h"
-void calc_mu(int start, int homenr, rvec x[], real q[], real qB[],
+void calc_mu(int start, int homenr, gmx::ArrayRef<gmx::RVec> x, real q[], real qB[],
int nChargePerturbed,
dvec mu, dvec mu_B)
{
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2010,2014,2015, by the GROMACS development team, led by
+ * Copyright (c) 2010,2014,2015,2017, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include <stdio.h>
#include "gromacs/math/vectypes.h"
+#include "gromacs/utility/arrayref.h"
#include "gromacs/utility/basedefinitions.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void calc_mu(int start, int homenr, rvec x[], real q[], real qB[],
+void calc_mu(int start, int homenr, gmx::ArrayRef<gmx::RVec> x, real q[], real qB[],
int nChargePerturbed,
dvec mu, dvec mu_B);
gmx_bool read_mu(FILE *fp, rvec mu, real *vol);
/* Return true on succes */
-#ifdef __cplusplus
-}
-#endif
-
#endif
gmx_int64_t step, struct t_nrnb *nrnb, gmx_wallcycle_t wcycle,
gmx_localtop_t *top,
gmx_groups_t *groups,
- matrix box, PaddedRVecVector *coordinates, history_t *hist,
- PaddedRVecVector *force,
+ matrix box, gmx::PaddedArrayRef<gmx::RVec> coordinates, history_t *hist,
+ gmx::PaddedArrayRef<gmx::RVec> force,
tensor vir_force,
t_mdatoms *mdatoms,
gmx_enerdata_t *enerd, t_fcdata *fcd,
gmx_int64_t step, double t,
t_state *state_local, t_state *state_global,
ObservablesHistory *observablesHistory,
- PaddedRVecVector *f_local)
+ gmx::ArrayRef<gmx::RVec> f_local)
{
rvec *f_global;
{
if (mdof_flags & (MDOF_X | MDOF_X_COMPRESSED))
{
- dd_collect_vec(cr->dd, state_local, &state_local->x,
- MASTER(cr) ? &state_global->x : nullptr);
+ gmx::ArrayRef<gmx::RVec> globalXRef = MASTER(cr) ? gmx::makeArrayRef(state_global->x) : gmx::EmptyArrayRef();
+ dd_collect_vec(cr->dd, state_local, state_local->x, globalXRef);
}
if (mdof_flags & MDOF_V)
{
- dd_collect_vec(cr->dd, state_local, &state_local->v,
- MASTER(cr) ? &state_global->v : nullptr);
+ gmx::ArrayRef<gmx::RVec> globalVRef = MASTER(cr) ? gmx::makeArrayRef(state_global->v) : gmx::EmptyArrayRef();
+ dd_collect_vec(cr->dd, state_local, state_local->v, globalVRef);
}
}
f_global = of->f_global;
if (mdof_flags & MDOF_F)
{
- dd_collect_vec(cr->dd, state_local, f_local, f_global);
+ dd_collect_vec(cr->dd, state_local, f_local, gmx::arrayRefFromArray(reinterpret_cast<gmx::RVec *>(f_global), f_local.size()));
}
}
else
/* We have the whole state locally: copy the local state pointer */
state_global = state_local;
- f_global = as_rvec_array(f_local->data());
+ f_global = as_rvec_array(f_local.data());
}
if (MASTER(cr))
#include "gromacs/math/paddedvector.h"
#include "gromacs/math/vectypes.h"
#include "gromacs/timing/wallcycle.h"
+#include "gromacs/utility/arrayref.h"
#include "gromacs/utility/basedefinitions.h"
class energyhistory_t;
gmx_int64_t step, double t,
t_state *state_local, t_state *state_global,
ObservablesHistory *observablesHistory,
- PaddedRVecVector *f_local);
+ gmx::ArrayRef<gmx::RVec> f_local);
#define MDOF_X (1<<0)
#define MDOF_V (1<<1)
mdoutf_write_to_trajectory_files(fplog, cr, outf, mdof_flags,
top_global, step, (double)step,
&state->s, state_global, observablesHistory,
- &state->f);
+ state->f);
if (confout != nullptr && MASTER(cr))
{
*/
do_force(fplog, cr, inputrec,
count, nrnb, wcycle, top, &top_global->groups,
- ems->s.box, &ems->s.x, &ems->s.hist,
- &ems->f, force_vir, mdAtoms->mdatoms(), enerd, fcd,
+ ems->s.box, ems->s.x, &ems->s.hist,
+ ems->f, force_vir, mdAtoms->mdatoms(), enerd, fcd,
ems->s.lambda, graph, fr, vsite, mu_tot, t, nullptr, TRUE,
GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES |
GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY |
}
mdoutf_write_to_trajectory_files(fplog, cr, outf, mdof_flags,
- top_global, step, (real)step, &ems.s, state_global, observablesHistory, &ems.f);
+ top_global, step, (real)step, &ems.s, state_global, observablesHistory, ems.f);
/* Do the linesearching in the direction dx[point][0..(n-1)] */
#include "gromacs/pbcutil/pbc.h"
#include "gromacs/topology/mtop_lookup.h"
#include "gromacs/topology/mtop_util.h"
+#include "gromacs/utility/arrayref.h"
#include "gromacs/utility/arraysize.h"
#include "gromacs/utility/cstringutil.h"
#include "gromacs/utility/fatalerror.h"
xnew[ZZ] = zo+dz;
}
-static void directional_sd(const PaddedRVecVector *xold, PaddedRVecVector *xnew, const rvec acc_dir[],
- int homenr, real step)
+static void directional_sd(gmx::ArrayRef<const gmx::RVec> xold,
+ gmx::ArrayRef<gmx::RVec> xnew,
+ const rvec acc_dir[], int homenr, real step)
{
- const rvec *xo = as_rvec_array(xold->data());
- rvec *xn = as_rvec_array(xnew->data());
+ const rvec *xo = as_rvec_array(xold.data());
+ rvec *xn = as_rvec_array(xnew.data());
for (int i = 0; i < homenr; i++)
{
}
}
-static void shell_pos_sd(const PaddedRVecVector * gmx_restrict xcur,
- PaddedRVecVector * gmx_restrict xnew,
- const PaddedRVecVector *f,
+static void shell_pos_sd(gmx::ArrayRef<const gmx::RVec> xcur,
+ gmx::ArrayRef<gmx::RVec> xnew,
+ gmx::ArrayRef<const gmx::RVec> f,
int ns, t_shell s[], int count)
{
const real step_scale_min = 0.8,
{
for (d = 0; d < DIM; d++)
{
- dx = (*xcur)[shell][d] - s[i].xold[d];
- df = (*f)[shell][d] - s[i].fold[d];
+ dx = xcur[shell][d] - s[i].xold[d];
+ df = f[shell][d] - s[i].fold[d];
/* -dx/df gets used to generate an interpolated value, but would
* cause a NaN if df were binary-equal to zero. Values close to
* zero won't cause problems (because of the min() and max()), so
#endif
}
}
- copy_rvec((*xcur)[shell], s[i].xold);
- copy_rvec((*f)[shell], s[i].fold);
+ copy_rvec(xcur [shell], s[i].xold);
+ copy_rvec(f[shell], s[i].fold);
- do_1pos3((*xnew)[shell], (*xcur)[shell], (*f)[shell], s[i].step);
+ do_1pos3(xnew[shell], xcur[shell], f[shell], s[i].step);
if (gmx_debug_at)
{
fprintf(debug, "shell[%d] = %d\n", i, shell);
- pr_rvec(debug, 0, "fshell", (*f)[shell], DIM, TRUE);
- pr_rvec(debug, 0, "xold", (*xcur)[shell], DIM, TRUE);
+ pr_rvec(debug, 0, "fshell", f[shell], DIM, TRUE);
+ pr_rvec(debug, 0, "xold", xcur[shell], DIM, TRUE);
pr_rvec(debug, 0, "step", s[i].step, DIM, TRUE);
- pr_rvec(debug, 0, "xnew", (*xnew)[shell], DIM, TRUE);
+ pr_rvec(debug, 0, "xnew", xnew[shell], DIM, TRUE);
}
}
#ifdef PRINT_STEP
}
-static real rms_force(t_commrec *cr, const PaddedRVecVector *force, int ns, t_shell s[],
+static real rms_force(t_commrec *cr, gmx::ArrayRef<const gmx::RVec> force, int ns, t_shell s[],
int ndir, real *sf_dir, real *Epot)
{
double buf[4];
- const rvec *f = as_rvec_array(force->data());
+ const rvec *f = as_rvec_array(force.data());
buf[0] = *sf_dir;
for (int i = 0; i < ns; i++)
return (ntot ? std::sqrt(buf[0]/ntot) : 0);
}
-static void check_pbc(FILE *fp, PaddedRVecVector x, int shell)
+static void check_pbc(FILE *fp, gmx::ArrayRef<gmx::RVec> x, int shell)
{
int m, now;
}
}
-static void dump_shells(FILE *fp, PaddedRVecVector x, PaddedRVecVector f, real ftol, int ns, t_shell s[])
+static void dump_shells(FILE *fp, gmx::ArrayRef<gmx::RVec> x, gmx::ArrayRef<gmx::RVec> f, real ftol, int ns, t_shell s[])
{
int i, shell;
real ft2, ff2;
shfc->f[i].resize(gmx::paddedRVecVectorSize(nat));
}
- /* Create pointer that we can swap */
- PaddedRVecVector *pos[2];
- PaddedRVecVector *force[2];
+ /* Create views that we can swap */
+ gmx::PaddedArrayRef<gmx::RVec> pos[2];
+ gmx::PaddedArrayRef<gmx::RVec> force[2];
for (i = 0; (i < 2); i++)
{
- pos[i] = &shfc->x[i];
- force[i] = &shfc->f[i];
+ pos[i] = shfc->x[i];
+ force[i] = shfc->f[i];
}
if (bDoNS && inputrec->ePBC != epbcNONE && !DOMAINDECOMP(cr))
*/
if (inputrec->cutoff_scheme == ecutsVERLET)
{
- put_atoms_in_box_omp(fr->ePBC, state->box, md->homenr, as_rvec_array(state->x.data()));
+ auto xRef = makeArrayRef(state->x);
+ put_atoms_in_box_omp(fr->ePBC, state->box, xRef.subArray(0, md->homenr));
}
else
{
pr_rvecs(debug, 0, "x b4 do_force", as_rvec_array(state->x.data()), homenr);
}
do_force(fplog, cr, inputrec, mdstep, nrnb, wcycle, top, groups,
- state->box, &state->x, &state->hist,
+ state->box, state->x, &state->hist,
force[Min], force_vir, md, enerd, fcd,
state->lambda, graph,
fr, vsite, mu_tot, t, nullptr, bBornRadii,
{
init_adir(fplog, shfc,
constr, idef, inputrec, cr, dd_ac1, mdstep, md, end,
- shfc->x_old, as_rvec_array(state->x.data()), as_rvec_array(state->x.data()), as_rvec_array(force[Min]->data()),
+ shfc->x_old, as_rvec_array(state->x.data()), as_rvec_array(state->x.data()), as_rvec_array(force[Min].data()),
shfc->acc_dir,
fr->bMolPBC, state->box, state->lambda, &dum, nrnb);
Epot[Min] = enerd->term[F_EPOT];
- df[Min] = rms_force(cr, &shfc->f[Min], nshell, shell, nflexcon, &sf_dir, &Epot[Min]);
+ df[Min] = rms_force(cr, shfc->f[Min], nshell, shell, nflexcon, &sf_dir, &Epot[Min]);
df[Try] = 0;
if (debug)
{
if (gmx_debug_at)
{
- pr_rvecs(debug, 0, "force0", as_rvec_array(force[Min]->data()), md->nr);
+ pr_rvecs(debug, 0, "force0", as_rvec_array(force[Min].data()), md->nr);
}
if (nshell+nflexcon > 0)
* shell positions are updated, therefore the other particles must
* be set here.
*/
- *pos[Min] = state->x;
- *pos[Try] = state->x;
+ pos[Min] = state->x;
+ pos[Try] = state->x;
}
if (bVerbose && MASTER(cr))
{
if (vsite)
{
- construct_vsites(vsite, as_rvec_array(pos[Min]->data()),
+ construct_vsites(vsite, as_rvec_array(pos[Min].data()),
inputrec->delta_t, as_rvec_array(state->v.data()),
idef->iparams, idef->il,
fr->ePBC, fr->bMolPBC, cr, state->box);
{
init_adir(fplog, shfc,
constr, idef, inputrec, cr, dd_ac1, mdstep, md, end,
- x_old, as_rvec_array(state->x.data()), as_rvec_array(pos[Min]->data()), as_rvec_array(force[Min]->data()), acc_dir,
+ x_old, as_rvec_array(state->x.data()), as_rvec_array(pos[Min].data()), as_rvec_array(force[Min].data()), acc_dir,
fr->bMolPBC, state->box, state->lambda, &dum, nrnb);
directional_sd(pos[Min], pos[Try], acc_dir, end, fr->fc_stepsize);
/* do_force expected the charge groups to be in the box */
if (graph)
{
- unshift_self(graph, state->box, as_rvec_array(pos[Try]->data()));
+ unshift_self(graph, state->box, as_rvec_array(pos[Try].data()));
}
if (gmx_debug_at)
{
- pr_rvecs(debug, 0, "RELAX: pos[Min] ", as_rvec_array(pos[Min]->data()), homenr);
- pr_rvecs(debug, 0, "RELAX: pos[Try] ", as_rvec_array(pos[Try]->data()), homenr);
+ pr_rvecs(debug, 0, "RELAX: pos[Min] ", as_rvec_array(pos[Min].data()), homenr);
+ pr_rvecs(debug, 0, "RELAX: pos[Try] ", as_rvec_array(pos[Try].data()), homenr);
}
/* Try the new positions */
do_force(fplog, cr, inputrec, 1, nrnb, wcycle,
if (gmx_debug_at)
{
- pr_rvecs(debug, 0, "RELAX: force[Min]", as_rvec_array(force[Min]->data()), homenr);
- pr_rvecs(debug, 0, "RELAX: force[Try]", as_rvec_array(force[Try]->data()), homenr);
+ pr_rvecs(debug, 0, "RELAX: force[Min]", as_rvec_array(force[Min].data()), homenr);
+ pr_rvecs(debug, 0, "RELAX: force[Try]", as_rvec_array(force[Try].data()), homenr);
}
sf_dir = 0;
if (nflexcon)
{
init_adir(fplog, shfc,
constr, idef, inputrec, cr, dd_ac1, mdstep, md, end,
- x_old, as_rvec_array(state->x.data()), as_rvec_array(pos[Try]->data()), as_rvec_array(force[Try]->data()), acc_dir,
+ x_old, as_rvec_array(state->x.data()), as_rvec_array(pos[Try].data()), as_rvec_array(force[Try].data()), acc_dir,
fr->bMolPBC, state->box, state->lambda, &dum, nrnb);
for (i = 0; i < end; i++)
{
if (gmx_debug_at)
{
- pr_rvecs(debug, 0, "F na do_force", as_rvec_array(force[Try]->data()), homenr);
+ pr_rvecs(debug, 0, "F na do_force", as_rvec_array(force[Try].data()), homenr);
}
if (gmx_debug_at)
{
fprintf(debug, "SHELL ITER %d\n", count);
- dump_shells(debug, *pos[Try], *force[Try], ftol, nshell, shell);
+ dump_shells(debug, pos[Try], force[Try], ftol, nshell, shell);
}
}
}
/* Copy back the coordinates and the forces */
- state->x = *pos[Min];
- *f = *force[Min];
+ std::copy(pos[Min].begin(), pos[Min].end(), state->x.begin());
+ std::copy(force[Min].begin(), force[Min].end(), f->begin());
}
void done_shellfc(FILE *fplog, gmx_shellfc_t *shfc, gmx_int64_t numSteps)
gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
gmx_localtop_t *top,
gmx_groups_t gmx_unused *groups,
- matrix box, rvec x[], history_t *hist,
- PaddedRVecVector *force,
+ matrix box, gmx::PaddedArrayRef<gmx::RVec> x, history_t *hist,
+ gmx::PaddedArrayRef<gmx::RVec> force,
tensor vir_force,
t_mdatoms *mdatoms,
gmx_enerdata_t *enerd, t_fcdata *fcd,
if (bCalcCGCM)
{
- put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
+ put_atoms_in_box_omp(fr->ePBC, box, x.subArray(0, homenr));
inc_nrnb(nrnb, eNR_SHIFTX, homenr);
}
else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
{
- unshift_self(graph, box, x);
+ unshift_self(graph, box, as_rvec_array(x.data()));
}
}
* we do not need to worry about shifting.
*/
wallcycle_start(wcycle, ewcPP_PMESENDX);
- gmx_pme_send_coordinates(cr, box, x,
+ gmx_pme_send_coordinates(cr, box, as_rvec_array(x.data()),
lambda[efptCOUL], lambda[efptVDW],
(flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
step);
if (useGpuPme)
{
- launchPmeGpuSpread(fr->pmedata, box, x, flags, wcycle);
+ launchPmeGpuSpread(fr->pmedata, box, as_rvec_array(x.data()), flags, wcycle);
}
/* do gridding for pair search */
if (graph && bStateChanged)
{
/* Calculate intramolecular shift vectors to make molecules whole */
- mk_mshift(fplog, graph, fr->ePBC, box, x);
+ mk_mshift(fplog, graph, fr->ePBC, box, as_rvec_array(x.data()));
}
clear_rvec(vzero);
wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
0, vzero, box_diag,
- 0, mdatoms->homenr, -1, fr->cginfo, x,
+ 0, mdatoms->homenr, -1, fr->cginfo, as_rvec_array(x.data()),
0, nullptr,
nbv->grp[eintLocal].kernel_type,
nbv->nbat);
{
wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
- fr->cginfo, x,
+ fr->cginfo, as_rvec_array(x.data()),
nbv->grp[eintNonlocal].kernel_type,
nbv->nbat);
wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
{
wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
- nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
+ nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, as_rvec_array(x.data()),
nbv->nbat);
wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
else
{
wallcycle_start(wcycle, ewcMOVEX);
- dd_move_x(cr->dd, box, x);
+ dd_move_x(cr->dd, box, as_rvec_array(x.data()));
wallcycle_stop(wcycle, ewcMOVEX);
wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
- nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
+ nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, as_rvec_array(x.data()),
nbv->nbat);
wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
if (inputrec->bRot)
{
wallcycle_start(wcycle, ewcROT);
- do_rotation(cr, inputrec, box, x, t, step, bNS);
+ do_rotation(cr, inputrec, box, as_rvec_array(x.data()), t, step, bNS);
wallcycle_stop(wcycle, ewcROT);
}
/* Temporary solution until all routines take PaddedRVecVector */
- rvec *f = as_rvec_array(force->data());
+ rvec *f = as_rvec_array(force.data());
/* Start the force cycle counter.
* Note that a different counter is used for dynamic load balancing.
*/
wallcycle_start(wcycle, ewcFORCE);
- gmx::ArrayRef<gmx::RVec> forceRef = *force;
+ gmx::ArrayRef<gmx::RVec> forceRef = force;
if (bDoForces)
{
/* If we need to compute the virial, we might need a separate
if (fr->nbv->grp[eintLocal].nbl_lists.nbl_fep[0]->nrj > 0)
{
do_nb_verlet_fep(&fr->nbv->grp[eintLocal].nbl_lists,
- fr, x, f, mdatoms,
+ fr, as_rvec_array(x.data()), f, mdatoms,
inputrec->fepvals, lambda,
enerd, flags, nrnb, wcycle);
}
fr->nbv->grp[eintNonlocal].nbl_lists.nbl_fep[0]->nrj > 0)
{
do_nb_verlet_fep(&fr->nbv->grp[eintNonlocal].nbl_lists,
- fr, x, f, mdatoms,
+ fr, as_rvec_array(x.data()), f, mdatoms,
inputrec->fepvals, lambda,
enerd, flags, nrnb, wcycle);
}
/* update QMMMrec, if necessary */
if (fr->bQMMM)
{
- update_QMMMrec(cr, fr, x, mdatoms, box);
+ update_QMMMrec(cr, fr, as_rvec_array(x.data()), mdatoms, box);
}
/* Compute the bonded and non-bonded energies and optionally forces */
do_force_lowlevel(fr, inputrec, &(top->idef),
cr, nrnb, wcycle, mdatoms,
- x, hist, f, &forceWithVirial, enerd, fcd, top, fr->born,
+ as_rvec_array(x.data()), hist, f, &forceWithVirial, enerd, fcd, top, fr->born,
bBornRadii, box,
inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
flags, &cycles_pme);
wallcycle_stop(wcycle, ewcFORCE);
computeSpecialForces(fplog, cr, inputrec, step, t, wcycle,
- fr->forceProviders, box, x, mdatoms, lambda,
+ fr->forceProviders, box, as_rvec_array(x.data()), mdatoms, lambda,
flags, &forceWithVirial, enerd,
ed, bNS);
if (vsite && !(fr->haveDirectVirialContributions && !(flags & GMX_FORCE_VIRIAL)))
{
wallcycle_start(wcycle, ewcVSITESPREAD);
- spread_vsite_f(vsite, x, f, fr->fshift, FALSE, nullptr, nrnb,
+ spread_vsite_f(vsite, as_rvec_array(x.data()), f, fr->fshift, FALSE, nullptr, nrnb,
&top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
wallcycle_stop(wcycle, ewcVSITESPREAD);
}
if (flags & GMX_FORCE_VIRIAL)
{
/* Calculation of the virial must be done after vsites! */
- calc_virial(0, mdatoms->homenr, x, f,
+ calc_virial(0, mdatoms->homenr, as_rvec_array(x.data()), f,
vir_force, graph, box, nrnb, fr, inputrec->ePBC);
}
}
if (bDoForces)
{
post_process_forces(cr, step, nrnb, wcycle,
- top, box, x, f, &forceWithVirial,
+ top, box, as_rvec_array(x.data()), f, &forceWithVirial,
vir_force, mdatoms, graph, fr, vsite,
flags);
}
gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
gmx_localtop_t *top,
gmx_groups_t *groups,
- matrix box, rvec x[], history_t *hist,
- PaddedRVecVector *force,
+ matrix box, gmx::PaddedArrayRef<gmx::RVec> x, history_t *hist,
+ gmx::PaddedArrayRef<gmx::RVec> force,
tensor vir_force,
t_mdatoms *mdatoms,
gmx_enerdata_t *enerd, t_fcdata *fcd,
if (bCalcCGCM)
{
put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
- &(top->cgs), x, fr->cg_cm);
+ &(top->cgs), as_rvec_array(x.data()), fr->cg_cm);
inc_nrnb(nrnb, eNR_CGCM, homenr);
inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
}
else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
{
- unshift_self(graph, box, x);
+ unshift_self(graph, box, as_rvec_array(x.data()));
}
}
else if (bCalcCGCM)
{
- calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
+ calc_cgcm(fplog, cg0, cg1, &(top->cgs), as_rvec_array(x.data()), fr->cg_cm);
inc_nrnb(nrnb, eNR_CGCM, homenr);
}
* we do not need to worry about shifting.
*/
wallcycle_start(wcycle, ewcPP_PMESENDX);
- gmx_pme_send_coordinates(cr, box, x,
+ gmx_pme_send_coordinates(cr, box, as_rvec_array(x.data()),
lambda[efptCOUL], lambda[efptVDW],
(flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
step);
if (DOMAINDECOMP(cr))
{
wallcycle_start(wcycle, ewcMOVEX);
- dd_move_x(cr->dd, box, x);
+ dd_move_x(cr->dd, box, as_rvec_array(x.data()));
wallcycle_stop(wcycle, ewcMOVEX);
/* No GPU support, no move_x overlap, so reopen the balance region here */
if (ddOpenBalanceRegion == DdOpenBalanceRegionBeforeForceComputation::yes)
if (graph && bStateChanged)
{
/* Calculate intramolecular shift vectors to make molecules whole */
- mk_mshift(fplog, graph, fr->ePBC, box, x);
+ mk_mshift(fplog, graph, fr->ePBC, box, as_rvec_array(x.data()));
}
/* Do the actual neighbour searching */
if (inputrec->implicit_solvent && bNS)
{
make_gb_nblist(cr, inputrec->gb_algorithm,
- x, box, fr, &top->idef, graph, fr->born);
+ as_rvec_array(x.data()), box, fr, &top->idef, graph, fr->born);
}
if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
if (inputrec->bRot)
{
wallcycle_start(wcycle, ewcROT);
- do_rotation(cr, inputrec, box, x, t, step, bNS);
+ do_rotation(cr, inputrec, box, as_rvec_array(x.data()), t, step, bNS);
wallcycle_stop(wcycle, ewcROT);
}
/* Temporary solution until all routines take PaddedRVecVector */
- rvec *f = as_rvec_array(force->data());
+ rvec *f = as_rvec_array(force.data());
/* Start the force cycle counter.
* Note that a different counter is used for dynamic load balancing.
*/
wallcycle_start(wcycle, ewcFORCE);
- gmx::ArrayRef<gmx::RVec> forceRef = *force;
+ gmx::ArrayRef<gmx::RVec> forceRef = force;
if (bDoForces)
{
/* If we need to compute the virial, we might need a separate
/* update QMMMrec, if necessary */
if (fr->bQMMM)
{
- update_QMMMrec(cr, fr, x, mdatoms, box);
+ update_QMMMrec(cr, fr, as_rvec_array(x.data()), mdatoms, box);
}
/* Compute the bonded and non-bonded energies and optionally forces */
do_force_lowlevel(fr, inputrec, &(top->idef),
cr, nrnb, wcycle, mdatoms,
- x, hist, f, &forceWithVirial, enerd, fcd, top, fr->born,
+ as_rvec_array(x.data()), hist, f, &forceWithVirial, enerd, fcd, top, fr->born,
bBornRadii, box,
inputrec->fepvals, lambda,
graph, &(top->excls), fr->mu_tot,
}
computeSpecialForces(fplog, cr, inputrec, step, t, wcycle,
- fr->forceProviders, box, x, mdatoms, lambda,
+ fr->forceProviders, box, as_rvec_array(x.data()), mdatoms, lambda,
flags, &forceWithVirial, enerd,
ed, bNS);
if (vsite && !(fr->haveDirectVirialContributions && !(flags & GMX_FORCE_VIRIAL)))
{
wallcycle_start(wcycle, ewcVSITESPREAD);
- spread_vsite_f(vsite, x, f, fr->fshift, FALSE, nullptr, nrnb,
+ spread_vsite_f(vsite, as_rvec_array(x.data()), f, fr->fshift, FALSE, nullptr, nrnb,
&top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
wallcycle_stop(wcycle, ewcVSITESPREAD);
}
if (flags & GMX_FORCE_VIRIAL)
{
/* Calculation of the virial must be done after vsites! */
- calc_virial(0, mdatoms->homenr, x, f,
+ calc_virial(0, mdatoms->homenr, as_rvec_array(x.data()), f,
vir_force, graph, box, nrnb, fr, inputrec->ePBC);
}
}
if (bDoForces)
{
post_process_forces(cr, step, nrnb, wcycle,
- top, box, x, f, &forceWithVirial,
+ top, box, as_rvec_array(x.data()), f, &forceWithVirial,
vir_force, mdatoms, graph, fr, vsite,
flags);
}
gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
gmx_localtop_t *top,
gmx_groups_t *groups,
- matrix box, PaddedRVecVector *coordinates, history_t *hist,
- PaddedRVecVector *force,
+ matrix box, gmx::PaddedArrayRef<gmx::RVec> x, history_t *hist,
+ gmx::PaddedArrayRef<gmx::RVec> force,
tensor vir_force,
t_mdatoms *mdatoms,
gmx_enerdata_t *enerd, t_fcdata *fcd,
flags &= ~GMX_FORCE_NONBONDED;
}
- GMX_ASSERT(coordinates->size() >= gmx::paddedRVecVectorSize(fr->natoms_force), "coordinates should be padded");
- GMX_ASSERT(force->size() >= gmx::paddedRVecVectorSize(fr->natoms_force), "force should be padded");
-
- rvec *x = as_rvec_array(coordinates->data());
+ GMX_ASSERT(x.size() >= gmx::paddedRVecVectorSize(fr->natoms_force), "coordinates should be padded");
+ GMX_ASSERT(force.size() >= gmx::paddedRVecVectorSize(fr->natoms_force), "force should be padded");
switch (inputrec->cutoff_scheme)
{
low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
}
-void put_atoms_in_box_omp(int ePBC, const matrix box, int natoms, rvec x[])
+void put_atoms_in_box_omp(int ePBC, const matrix box, gmx::ArrayRef<gmx::RVec> x)
{
int t, nth;
nth = gmx_omp_nthreads_get(emntDefault);
{
try
{
- int offset, len;
-
- offset = (natoms*t )/nth;
- len = (natoms*(t + 1))/nth - offset;
- put_atoms_in_box(ePBC, box, len, x + offset);
+ size_t natoms = x.size();
+ size_t offset = (natoms*t )/nth;
+ size_t len = (natoms*(t + 1))/nth - offset;
+ put_atoms_in_box(ePBC, box, x.subArray(offset, len));
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
}
* in tools.
* \param[in] ePBC The pbc type
* \param[in] box The simulation box
- * \param[in] natoms The number of atoms
* \param[inout] x The coordinates of the atoms
*/
-void put_atoms_in_box_omp(int ePBC, const matrix box, int natoms, rvec x[]);
+void put_atoms_in_box_omp(int ePBC, const matrix box, gmx::ArrayRef<gmx::RVec> x);
cr->nnodes = 1;
do_force(fplog, cr, inputrec,
step, nrnb, wcycle, top, &top_global->groups,
- state_global->box, &state_global->x, &state_global->hist,
- &f, force_vir, mdatoms, enerd, fcd,
+ state_global->box, state_global->x, &state_global->hist,
+ f, force_vir, mdatoms, enerd, fcd,
state_global->lambda,
nullptr, fr, nullptr, mu_tot, t, nullptr, FALSE,
GMX_FORCE_NONBONDED | GMX_FORCE_ENERGY |
#include "gromacs/utility/smalloc.h"
void
-do_md_trajectory_writing(FILE *fplog,
- t_commrec *cr,
- int nfile,
- const t_filenm fnm[],
- gmx_int64_t step,
- gmx_int64_t step_rel,
- double t,
- t_inputrec *ir,
- t_state *state,
- t_state *state_global,
- ObservablesHistory *observablesHistory,
- gmx_mtop_t *top_global,
- t_forcerec *fr,
- gmx_mdoutf_t outf,
- t_mdebin *mdebin,
- gmx_ekindata_t *ekind,
- PaddedRVecVector *f,
- int *nchkpt,
- gmx_bool bCPT,
- gmx_bool bRerunMD,
- gmx_bool bLastStep,
- gmx_bool bDoConfOut,
- gmx_bool bSumEkinhOld
+do_md_trajectory_writing(FILE *fplog,
+ t_commrec *cr,
+ int nfile,
+ const t_filenm fnm[],
+ gmx_int64_t step,
+ gmx_int64_t step_rel,
+ double t,
+ t_inputrec *ir,
+ t_state *state,
+ t_state *state_global,
+ ObservablesHistory *observablesHistory,
+ gmx_mtop_t *top_global,
+ t_forcerec *fr,
+ gmx_mdoutf_t outf,
+ t_mdebin *mdebin,
+ gmx_ekindata_t *ekind,
+ gmx::ArrayRef<gmx::RVec> f,
+ int *nchkpt,
+ gmx_bool bCPT,
+ gmx_bool bRerunMD,
+ gmx_bool bLastStep,
+ gmx_bool bDoConfOut,
+ gmx_bool bSumEkinhOld
)
{
int mdof_flags;
gmx_mdoutf_t outf,
t_mdebin *mdebin,
struct gmx_ekindata_t *ekind,
- PaddedRVecVector *f,
+ gmx::ArrayRef<gmx::RVec> f,
int *nchkpt,
gmx_bool bCPT,
gmx_bool bRerunMD,
#include "gromacs/listed-forces/orires.h"
#include "gromacs/math/functions.h"
#include "gromacs/math/invertmatrix.h"
+#include "gromacs/math/paddedvector.h"
#include "gromacs/math/units.h"
#include "gromacs/math/vec.h"
#include "gromacs/math/vecdump.h"
static void dump_it_all(FILE gmx_unused *fp, const char gmx_unused *title,
int gmx_unused natoms,
- const gmx_unused PaddedRVecVector *x,
- const gmx_unused PaddedRVecVector *xp,
- const gmx_unused PaddedRVecVector *v,
- const gmx_unused PaddedRVecVector *f)
+ gmx::PaddedArrayRef<gmx::RVec> gmx_unused x,
+ gmx::PaddedArrayRef<gmx::RVec> gmx_unused xp,
+ gmx::PaddedArrayRef<gmx::RVec> gmx_unused v,
+ gmx::PaddedArrayRef<gmx::RVec> gmx_unused f)
{
#ifdef DEBUG
if (fp)
}
}
-void update_constraints(FILE *fplog,
- gmx_int64_t step,
- real *dvdlambda, /* the contribution to be added to the bonded interactions */
- t_inputrec *inputrec, /* input record and box stuff */
- t_mdatoms *md,
- t_state *state,
- gmx_bool bMolPBC,
- t_graph *graph,
- PaddedRVecVector *force, /* forces on home particles */
- t_idef *idef,
- tensor vir_part,
- t_commrec *cr,
- t_nrnb *nrnb,
- gmx_wallcycle_t wcycle,
- gmx_update_t *upd,
- gmx_constr_t constr,
- gmx_bool bFirstHalf,
- gmx_bool bCalcVir)
+void update_constraints(FILE *fplog,
+ gmx_int64_t step,
+ real *dvdlambda, /* the contribution to be added to the bonded interactions */
+ t_inputrec *inputrec, /* input record and box stuff */
+ t_mdatoms *md,
+ t_state *state,
+ gmx_bool bMolPBC,
+ t_graph *graph,
+ gmx::PaddedArrayRef<gmx::RVec> force, /* forces on home particles */
+ t_idef *idef,
+ tensor vir_part,
+ t_commrec *cr,
+ t_nrnb *nrnb,
+ gmx_wallcycle_t wcycle,
+ gmx_update_t *upd,
+ gmx_constr_t constr,
+ gmx_bool bFirstHalf,
+ gmx_bool bCalcVir)
{
gmx_bool bLastStep, bLog = FALSE, bEner = FALSE, bDoConstr = FALSE;
tensor vir_con;
where();
dump_it_all(fplog, "After Shake",
- state->natoms, &state->x, &upd->xp, &state->v, force);
+ state->natoms, state->x, upd->xp, state->v, force);
if (bCalcVir)
{
inputrec->opts.acc, inputrec->opts.nFreeze,
md->invmass, md->ptype,
md->cFREEZE, md->cACC, md->cTC,
- as_rvec_array(state->x.data()), as_rvec_array(upd->xp.data()), as_rvec_array(state->v.data()), as_rvec_array(force->data()),
+ as_rvec_array(state->x.data()), as_rvec_array(upd->xp.data()), as_rvec_array(state->v.data()), as_rvec_array(force.data()),
bDoConstr, FALSE,
step, inputrec->ld_seed,
DOMAINDECOMP(cr) ? cr->dd->gatindex : nullptr);
wallcycle_stop(wcycle, ewcUPDATE);
dump_it_all(fplog, "After unshift",
- state->natoms, &state->x, &upd->xp, &state->v, force);
+ state->natoms, state->x, upd->xp, state->v, force);
}
/* ############# END the update of velocities and positions ######### */
}
}
where();
dump_it_all(fplog, "After update",
- state->natoms, &state->x, &upd->xp, &state->v, nullptr);
+ state->natoms, state->x, upd->xp, state->v, gmx::EmptyArrayRef());
}
-void update_coords(FILE *fplog,
- gmx_int64_t step,
- t_inputrec *inputrec, /* input record and box stuff */
- t_mdatoms *md,
- t_state *state,
- PaddedRVecVector *f, /* forces on home particles */
- t_fcdata *fcd,
- gmx_ekindata_t *ekind,
- matrix M,
- gmx_update_t *upd,
- int UpdatePart,
- t_commrec *cr, /* these shouldn't be here -- need to think about it */
- gmx_constr_t constr)
+void update_coords(FILE *fplog,
+ gmx_int64_t step,
+ t_inputrec *inputrec, /* input record and box stuff */
+ t_mdatoms *md,
+ t_state *state,
+ gmx::PaddedArrayRef<gmx::RVec> f, /* forces on home particles */
+ t_fcdata *fcd,
+ gmx_ekindata_t *ekind,
+ matrix M,
+ gmx_update_t *upd,
+ int UpdatePart,
+ t_commrec *cr, /* these shouldn't be here -- need to think about it */
+ gmx_constr_t constr)
{
gmx_bool bDoConstr = (nullptr != constr);
/* ############# START The update of velocities and positions ######### */
where();
dump_it_all(fplog, "Before update",
- state->natoms, &state->x, &upd->xp, &state->v, f);
+ state->natoms, state->x, upd->xp, state->v, f);
int nth = gmx_omp_nthreads_get(emntUpdate);
GMX_ASSERT(state->x.size() >= homenrSimdPadded, "state->x needs to be padded for SIMD access");
GMX_ASSERT(upd->xp.size() >= homenrSimdPadded, "upd->xp needs to be padded for SIMD access");
GMX_ASSERT(state->v.size() >= homenrSimdPadded, "state->v needs to be padded for SIMD access");
- GMX_ASSERT(f->size() >= homenrSimdPadded, "f needs to be padded for SIMD access");
+ GMX_ASSERT(f.size() >= homenrSimdPadded, "f needs to be padded for SIMD access");
#endif
const rvec *x_rvec = as_rvec_array(state->x.data());
rvec *xp_rvec = as_rvec_array(upd->xp.data());
rvec *v_rvec = as_rvec_array(state->v.data());
- const rvec *f_rvec = as_rvec_array(f->data());
+ const rvec *f_rvec = as_rvec_array(f.data());
switch (inputrec->eI)
{
#include "gromacs/math/paddedvector.h"
#include "gromacs/math/vectypes.h"
#include "gromacs/timing/wallcycle.h"
+#include "gromacs/utility/arrayref.h"
#include "gromacs/utility/basedefinitions.h"
#include "gromacs/utility/real.h"
t_nrnb *nrnb,
gmx_update_t *upd);
-void update_coords(FILE *fplog,
- gmx_int64_t step,
- t_inputrec *inputrec, /* input record and box stuff */
- t_mdatoms *md,
- t_state *state,
- PaddedRVecVector *f, /* forces on home particles */
- t_fcdata *fcd,
- gmx_ekindata_t *ekind,
- matrix M,
- gmx_update_t *upd,
- int bUpdatePart,
- t_commrec *cr, /* these shouldn't be here -- need to think about it */
- gmx_constr *constr);
+void update_coords(FILE *fplog,
+ gmx_int64_t step,
+ t_inputrec *inputrec, /* input record and box stuff */
+ t_mdatoms *md,
+ t_state *state,
+ gmx::PaddedArrayRef<gmx::RVec> f, /* forces on home particles */
+ t_fcdata *fcd,
+ gmx_ekindata_t *ekind,
+ matrix M,
+ gmx_update_t *upd,
+ int bUpdatePart,
+ t_commrec *cr, /* these shouldn't be here -- need to think about it */
+ gmx_constr *constr);
/* Return TRUE if OK, FALSE in case of Shake Error */
extern gmx_bool update_randomize_velocities(t_inputrec *ir, gmx_int64_t step, const t_commrec *cr, t_mdatoms *md, t_state *state, gmx_update_t *upd, gmx_constr *constr);
-void update_constraints(FILE *fplog,
- gmx_int64_t step,
- real *dvdlambda, /* FEP stuff */
- t_inputrec *inputrec, /* input record and box stuff */
- t_mdatoms *md,
- t_state *state,
- gmx_bool bMolPBC,
- t_graph *graph,
- PaddedRVecVector *force, /* forces on home particles */
- t_idef *idef,
- tensor vir_part,
- t_commrec *cr,
- t_nrnb *nrnb,
- gmx_wallcycle_t wcycle,
- gmx_update_t *upd,
- gmx_constr *constr,
- gmx_bool bFirstHalf,
- gmx_bool bCalcVir);
+void update_constraints(FILE *fplog,
+ gmx_int64_t step,
+ real *dvdlambda, /* FEP stuff */
+ t_inputrec *inputrec, /* input record and box stuff */
+ t_mdatoms *md,
+ t_state *state,
+ gmx_bool bMolPBC,
+ t_graph *graph,
+ gmx::ArrayRef<gmx::RVec> force, /* forces on home particles */
+ t_idef *idef,
+ tensor vir_part,
+ t_commrec *cr,
+ t_nrnb *nrnb,
+ gmx_wallcycle_t wcycle,
+ gmx_update_t *upd,
+ gmx_constr *constr,
+ gmx_bool bFirstHalf,
+ gmx_bool bCalcVir);
/* Return TRUE if OK, FALSE in case of Shake Error */
}
}
-rvec *getRvecArrayFromPaddedRVecVector(const PaddedRVecVector *v,
- unsigned int n)
+rvec *makeRvecArray(gmx::ArrayRef<const gmx::RVec> v,
+ unsigned int n)
{
- GMX_ASSERT(v->size() >= n, "We can't copy more elements than the vector size");
+ GMX_ASSERT(v.size() >= n, "We can't copy more elements than the vector size");
rvec *dest;
snew(dest, n);
- const rvec *vPtr = as_rvec_array(v->data());
+ const rvec *vPtr = as_rvec_array(v.data());
for (unsigned int i = 0; i < n; i++)
{
copy_rvec(vPtr[i], dest[i]);
#include "gromacs/math/paddedvector.h"
#include "gromacs/math/vectypes.h"
#include "gromacs/mdtypes/md_enums.h"
+#include "gromacs/utility/arrayref.h"
#include "gromacs/utility/basedefinitions.h"
#include "gromacs/utility/real.h"
void comp_state(const t_state *st1, const t_state *st2, gmx_bool bRMSD, real ftol, real abstol);
/*! \brief Allocates an rvec pointer and copy the contents of v to it */
-rvec *getRvecArrayFromPaddedRVecVector(const PaddedRVecVector *v,
- unsigned int n);
+rvec *makeRvecArray(gmx::ArrayRef<const gmx::RVec> v,
+ unsigned int n);
/*! \brief Determine the relative box components
*
return edge;
}
-void put_atoms_in_box(int ePBC, const matrix box, int natoms, rvec x[])
+void put_atoms_in_box(int ePBC, const matrix box, gmx::ArrayRef<gmx::RVec> x)
{
- int npbcdim, i, m, d;
+ int npbcdim, m, d;
if (ePBC == epbcSCREW)
{
if (TRICLINIC(box))
{
- for (i = 0; (i < natoms); i++)
+ for (size_t i = 0; (i < x.size()); ++i)
{
for (m = npbcdim-1; m >= 0; m--)
{
}
else
{
- for (i = 0; i < natoms; i++)
+ for (size_t i = 0; (i < x.size()); ++i)
{
for (d = 0; d < npbcdim; d++)
{
}
void put_atoms_in_triclinic_unitcell(int ecenter, const matrix box,
- int natoms, rvec x[])
+ gmx::ArrayRef<gmx::RVec> x)
{
rvec box_center, shift_center;
real shm01, shm02, shm12, shift;
- int i, m, d;
+ int m, d;
calc_box_center(ecenter, box, box_center);
shift_center[1] = shm12*shift_center[2];
shift_center[2] = 0;
- for (i = 0; (i < natoms); i++)
+ for (size_t i = 0; (i < x.size()); ++i)
{
for (m = DIM-1; m >= 0; m--)
{
}
void put_atoms_in_compact_unitcell(int ePBC, int ecenter, const matrix box,
- int natoms, rvec x[])
+ gmx::ArrayRef<gmx::RVec> x)
{
t_pbc pbc;
rvec box_center, dx;
- int i;
set_pbc(&pbc, ePBC, box);
}
calc_box_center(ecenter, box, box_center);
- for (i = 0; i < natoms; i++)
+ for (size_t i = 0; (i < x.size()); ++i)
{
pbc_dx(&pbc, x[i], box_center, dx);
rvec_add(box_center, dx, x[i]);
#include <stdio.h>
#include "gromacs/math/vectypes.h"
+#include "gromacs/utility/arrayref.h"
#include "gromacs/utility/basedefinitions.h"
#include "gromacs/utility/real.h"
struct gmx_domdec_t;
-#ifdef __cplusplus
-extern "C" {
-#endif
-
enum {
epbcXYZ, epbcNONE, epbcXY, epbcSCREW, epbcNR
};
* Also works for triclinic cells.
* \param[in] ePBC The pbc type
* \param[in] box The simulation box
- * \param[in] natoms The number of atoms
* \param[inout] x The coordinates of the atoms
*/
-void put_atoms_in_box(int ePBC, const matrix box, int natoms, rvec x[]);
+void put_atoms_in_box(int ePBC, const matrix box, gmx::ArrayRef<gmx::RVec> x);
/*! \brief Put atoms inside triclinic box
*
* box center as calculated by calc_box_center.
* \param[in] ecenter The pbc center type
* \param[in] box The simulation box
- * \param[in] natoms The number of atoms
* \param[inout] x The coordinates of the atoms
*/
void put_atoms_in_triclinic_unitcell(int ecenter, const matrix box,
- int natoms, rvec x[]);
+ gmx::ArrayRef<gmx::RVec> x);
/*! \brief Put atoms inside the unitcell
*
* \param[in] ePBC The pbc type
* \param[in] ecenter The pbc center type
* \param[in] box The simulation box
- * \param[in] natoms The number of atoms
* \param[inout] x The coordinates of the atoms
*/
void put_atoms_in_compact_unitcell(int ePBC, int ecenter,
const matrix box,
- int natoms, rvec x[]);
-
-#ifdef __cplusplus
-}
-#endif
+ gmx::ArrayRef<gmx::RVec> x);
#endif
#include "gromacs/awh/awh.h"
#include "gromacs/commandline/filenm.h"
+#include "gromacs/compat/make_unique.h"
#include "gromacs/domdec/domdec.h"
#include "gromacs/domdec/domdec_network.h"
#include "gromacs/domdec/domdec_struct.h"
{
top = dd_init_local_top(top_global);
- stateInstance = std::unique_ptr<t_state>(new t_state);
+ stateInstance = gmx::compat::make_unique<t_state>();
state = stateInstance.get();
dd_init_local_state(cr->dd, state_global, state);
}
* Check comments in sim_util.c
*/
do_force(fplog, cr, ir, step, nrnb, wcycle, top, groups,
- state->box, &state->x, &state->hist,
- &f, force_vir, mdatoms, enerd, fcd,
+ state->box, state->x, &state->hist,
+ f, force_vir, mdatoms, enerd, fcd,
state->lambda, graph,
fr, vsite, mu_tot, t, ed, bBornRadii,
(bNS ? GMX_FORCE_NS : 0) | force_flags,
trotter_update(ir, step, ekind, enerd, state, total_vir, mdatoms, &MassQ, trotter_seq, ettTSEQ1);
}
- update_coords(fplog, step, ir, mdatoms, state, &f, fcd,
+ update_coords(fplog, step, ir, mdatoms, state, f, fcd,
ekind, M, upd, etrtVELOCITY1,
cr, constr);
{
wallcycle_stop(wcycle, ewcUPDATE);
update_constraints(fplog, step, nullptr, ir, mdatoms,
- state, fr->bMolPBC, graph, &f,
+ state, fr->bMolPBC, graph, f,
&top->idef, shake_vir,
cr, nrnb, wcycle, upd, constr,
TRUE, bCalcVir);
do_md_trajectory_writing(fplog, cr, nfile, fnm, step, step_rel, t,
ir, state, state_global, observablesHistory,
top_global, fr,
- outf, mdebin, ekind, &f,
+ outf, mdebin, ekind, f,
&nchkpt,
bCPT, bRerunMD, bLastStep,
mdrunOptions.writeConfout,
if (constr && bIfRandomize)
{
update_constraints(fplog, step, nullptr, ir, mdatoms,
- state, fr->bMolPBC, graph, &f,
+ state, fr->bMolPBC, graph, f,
&top->idef, tmp_vir,
cr, nrnb, wcycle, upd, constr,
TRUE, bCalcVir);
if (EI_VV(ir->eI))
{
/* velocity half-step update */
- update_coords(fplog, step, ir, mdatoms, state, &f, fcd,
+ update_coords(fplog, step, ir, mdatoms, state, f, fcd,
ekind, M, upd, etrtVELOCITY2,
cr, constr);
}
copy_rvecn(as_rvec_array(state->x.data()), cbuf, 0, state->natoms);
}
- update_coords(fplog, step, ir, mdatoms, state, &f, fcd,
+ update_coords(fplog, step, ir, mdatoms, state, f, fcd,
ekind, M, upd, etrtPOSITION, cr, constr);
wallcycle_stop(wcycle, ewcUPDATE);
update_constraints(fplog, step, &dvdl_constr, ir, mdatoms, state,
- fr->bMolPBC, graph, &f,
+ fr->bMolPBC, graph, f,
&top->idef, shake_vir,
cr, nrnb, wcycle, upd, constr,
FALSE, bCalcVir);
/* now we know the scaling, we can compute the positions again again */
copy_rvecn(cbuf, as_rvec_array(state->x.data()), 0, state->natoms);
- update_coords(fplog, step, ir, mdatoms, state, &f, fcd,
+ update_coords(fplog, step, ir, mdatoms, state, f, fcd,
ekind, M, upd, etrtPOSITION, cr, constr);
wallcycle_stop(wcycle, ewcUPDATE);
* physically? I'm thinking they are just errors, but not completely sure.
* For now, will call without actually constraining, constr=NULL*/
update_constraints(fplog, step, nullptr, ir, mdatoms,
- state, fr->bMolPBC, graph, &f,
+ state, fr->bMolPBC, graph, f,
&top->idef, tmp_vir,
cr, nrnb, wcycle, upd, nullptr,
FALSE, bCalcVir);
*nat = man->natom;
if (ncount == man->nSkip)
{
+ auto atomsArrayRef = gmx::arrayRefFromArray(reinterpret_cast<gmx::RVec *>(man->x), man->natom);
switch (man->molw->boxtype)
{
case esbTri:
- put_atoms_in_triclinic_unitcell(ecenterDEF, man->box, man->natom, man->x);
+ put_atoms_in_triclinic_unitcell(ecenterDEF, man->box, atomsArrayRef);
break;
case esbTrunc:
put_atoms_in_compact_unitcell(man->molw->ePBC, ecenterDEF, man->box,
- man->natom, man->x);
+ atomsArrayRef);
break;
case esbRect:
case esbNone: