From: Joe Jordan Date: Tue, 16 Mar 2021 18:03:19 +0000 (+0000) Subject: Use RVec and std::array in gmx_domdec_comm_t X-Git-Url: http://biod.pnpi.spb.ru/gitweb/?a=commitdiff_plain;h=c258e95e9eba209ccbb59078f9318ffe3e2386db;p=alexxy%2Fgromacs.git Use RVec and std::array in gmx_domdec_comm_t Where possible, rvecs have been changed to RVecs and pointers changed to std::arrays. --- diff --git a/src/gromacs/domdec/domdec_internal.h b/src/gromacs/domdec/domdec_internal.h index 680515174d..41730c5c1b 100644 --- a/src/gromacs/domdec/domdec_internal.h +++ b/src/gromacs/domdec/domdec_internal.h @@ -583,35 +583,35 @@ struct gmx_domdec_comm_t // NOLINT (clang-analyzer-optin.performance.Padding) /**< Cut-off for multi-body interactions, also 2-body bonded when \p cutoff_mody > \p cutoff */ real cutoff_mbody = 0; /**< The minimum guaranteed cell-size, Cartesian indexing */ - rvec cellsize_min = {}; + gmx::RVec cellsize_min = { 0, 0, 0 }; /**< The minimum guaranteed cell-size with dlb=auto */ - rvec cellsize_min_dlb = {}; + gmx::RVec cellsize_min_dlb = { 0, 0, 0 }; /**< The lower limit for the DD cell size with DLB */ real cellsize_limit = 0; /**< Effectively no NB cut-off limit with DLB for systems without PBC? */ - gmx_bool bVacDLBNoLimit = false; + bool bVacDLBNoLimit = false; /** With PME load balancing we set limits on DLB */ - gmx_bool bPMELoadBalDLBLimits = false; + bool bPMELoadBalDLBLimits = false; /** DLB needs to take into account that we want to allow this maximum * cut-off (for PME load balancing), this could limit cell boundaries. */ real PMELoadBal_max_cutoff = 0; /**< box lower corner, required with dim's without pbc and -gcom */ - rvec box0 = {}; + gmx::RVec box0 = { 0, 0, 0 }; /**< box size, required with dim's without pbc and -gcom */ - rvec box_size = {}; + gmx::RVec box_size = { 0, 0, 0 }; /**< The DD cell lower corner, in triclinic space */ - rvec cell_x0 = {}; + gmx::RVec cell_x0 = { 0, 0, 0 }; /**< The DD cell upper corner, in triclinic space */ - rvec cell_x1 = {}; + gmx::RVec cell_x1 = { 0, 0, 0 }; /**< The old \p cell_x0, to check cg displacements */ - rvec old_cell_x0 = {}; + gmx::RVec old_cell_x0 = { 0, 0, 0 }; /**< The old \p cell_x1, to check cg displacements */ - rvec old_cell_x1 = {}; + gmx::RVec old_cell_x1 = { 0, 0, 0 }; /** The communication setup and charge group boundaries for the zones */ gmx_domdec_zones_t zones; @@ -621,12 +621,12 @@ struct gmx_domdec_comm_t // NOLINT (clang-analyzer-optin.performance.Padding) * dynamic load balancing. */ /**< Zone limits for dim 1 with staggered grids */ - gmx_ddzone_t zone_d1[2]; + std::array zone_d1; /**< Zone limits for dim 2 with staggered grids */ gmx_ddzone_t zone_d2[2][2]; /** The coordinate/force communication setup and indices */ - gmx_domdec_comm_dim_t cd[DIM]; + std::array cd; /** Restricts the maximum number of cells to communicate with in one dimension * * Dynamic load balancing is not permitted to change sizes if it @@ -639,7 +639,7 @@ struct gmx_domdec_comm_t // NOLINT (clang-analyzer-optin.performance.Padding) int64_t master_cg_ddp_count = 0; /** The number of cg's received from the direct neighbors */ - int zone_ncg1[DD_MAXZONE] = { 0 }; + std::array zone_ncg1 = { 0 }; /** The atom ranges in the local state */ DDAtomRanges atomRanges; @@ -687,11 +687,11 @@ struct gmx_domdec_comm_t // NOLINT (clang-analyzer-optin.performance.Padding) /* Cycle counters over nstlist steps */ /**< Total cycles counted */ - float cycl[ddCyclNr] = {}; + std::array cycl = { 0 }; /**< The number of cycle recordings */ - int cycl_n[ddCyclNr] = {}; + std::array cycl_n = { 0 }; /**< The maximum cycle count */ - float cycl_max[ddCyclNr] = {}; + std::array cycl_max = { 0 }; /**< Total flops counted */ double flop = 0.0; /**< The number of flop recordings */ @@ -727,7 +727,7 @@ struct gmx_domdec_comm_t // NOLINT (clang-analyzer-optin.performance.Padding) /**< Max \p load_sum over the ranks */ double load_max = 0.0; /**< Was load balancing limited, per DD dim */ - ivec load_lim = {}; + gmx::IVec load_lim = { 0, 0, 0 }; /**< Total time on PP done during PME overlap time */ double load_mdf = 0.0; /**< Total time on our PME-only rank */ diff --git a/src/gromacs/domdec/partition.cpp b/src/gromacs/domdec/partition.cpp index cbf2ff5c30..a4e994e490 100644 --- a/src/gromacs/domdec/partition.cpp +++ b/src/gromacs/domdec/partition.cpp @@ -491,8 +491,8 @@ static void dd_set_cginfo(gmx::ArrayRef index_gl, int cg0, int cg1, t static void make_dd_indices(gmx_domdec_t* dd, const int atomStart) { const int numZones = dd->comm->zones.n; - const int* zone2cg = dd->comm->zones.cg_range.data(); - const int* zone_ncg1 = dd->comm->zone_ncg1; + gmx::ArrayRef zone2cg = dd->comm->zones.cg_range; + gmx::ArrayRef zone_ncg1 = dd->comm->zone_ncg1; gmx::ArrayRef globalAtomGroupIndices = dd->globalAtomGroupIndices; std::vector& globalAtomIndices = dd->globalAtomIndices; diff --git a/src/gromacs/mdlib/nsgrid.cpp b/src/gromacs/mdlib/nsgrid.cpp index 1bfe85acf0..4b821c6484 100644 --- a/src/gromacs/mdlib/nsgrid.cpp +++ b/src/gromacs/mdlib/nsgrid.cpp @@ -112,8 +112,8 @@ void get_nsgrid_boundaries(int nboundeddim, matrix box, gmx_domdec_t* dd, gmx_ddbox_t* ddbox, - rvec* gr0, - rvec* gr1, + gmx::RVec* gr0, + gmx::RVec* gr1, int ncg, rvec* cgcm, rvec grid_x0, diff --git a/src/gromacs/mdlib/nsgrid.h b/src/gromacs/mdlib/nsgrid.h index 7de1184bb2..421b917b30 100644 --- a/src/gromacs/mdlib/nsgrid.h +++ b/src/gromacs/mdlib/nsgrid.h @@ -73,8 +73,8 @@ void get_nsgrid_boundaries(int nboundeddim, matrix box, struct gmx_domdec_t* dd, gmx_ddbox_t* ddbox, - rvec* gr0, - rvec* gr1, + gmx::RVec* gr0, + gmx::RVec* gr1, int ncg, rvec* cgcm, rvec grid_x0,