Remove dead members from gmx_pme_t
authorJoe Jordan <ejjordan12@gmail.com>
Mon, 24 May 2021 12:34:55 +0000 (12:34 +0000)
committerPaul Bauer <paul.bauer.q@gmail.com>
Mon, 24 May 2021 12:34:55 +0000 (12:34 +0000)
src/gromacs/ewald/pme.cpp
src/gromacs/ewald/pme_gpu_internal.cpp
src/gromacs/ewald/pme_internal.h

index ba72fb1a9ef446381d56ccd8fceeec818c9faf8d..5eba296172ab3e8a38b58484b70facbeedbbdfbe 100644 (file)
@@ -550,9 +550,6 @@ gmx_pme_t* gmx_pme_init(const t_commrec*     cr,
 
     gmx::unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
 
-    pme->sum_qgrid_tmp    = nullptr;
-    pme->sum_qgrid_dd_tmp = nullptr;
-
     pme->buf_nalloc = 0;
 
     pme->nnodes  = 1;
@@ -686,8 +683,8 @@ gmx_pme_t* gmx_pme_init(const t_commrec*     cr,
 
     // The box requires scaling with nwalls = 2, we store that condition as well
     // as the scaling factor
-    delete pme->boxScaler;
-    pme->boxScaler = new EwaldBoxZScaler(inputrecPbcXY2Walls(ir), ir->wall_ewald_zfac);
+    pme->boxScaler = std::make_unique<EwaldBoxZScaler>(
+            EwaldBoxZScaler(inputrecPbcXY2Walls(ir), ir->wall_ewald_zfac));
 
     /* If we violate restrictions, generate a fatal error here */
     gmx_pme_check_restrictions(
@@ -1655,8 +1652,6 @@ void gmx_pme_destroy(gmx_pme_t* pme)
         return;
     }
 
-    delete pme->boxScaler;
-
     sfree(pme->nnx);
     sfree(pme->nny);
     sfree(pme->nnz);
@@ -1692,9 +1687,6 @@ void gmx_pme_destroy(gmx_pme_t* pme)
         pme_free_all_work(&pme->solve_work, pme->nthread);
     }
 
-    sfree(pme->sum_qgrid_tmp);
-    sfree(pme->sum_qgrid_dd_tmp);
-
     destroy_pme_spline_work(pme->spline_work);
 
     if (pme->gpu != nullptr)
index 9700d01f941336e94d4fce061973252c35dd53b1..d0e54ea1012194a573b7914c2d1c676d14cdbb43 100644 (file)
@@ -828,7 +828,7 @@ static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
     pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
     pmeGpu->common->runMode       = pme->runMode;
     pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
-    pmeGpu->common->boxScaler     = pme->boxScaler;
+    pmeGpu->common->boxScaler     = pme->boxScaler.get();
 }
 
 /*! \libinternal \brief
index c97f9e2948460c11c51c1ac2d70349ffa53112a1..ce4684e4cb1c16c0ce9432c2c0c04014e47eab92 100644 (file)
@@ -61,7 +61,6 @@
 #include "gromacs/math/gmxcomplex.h"
 #include "gromacs/utility/alignedallocator.h"
 #include "gromacs/utility/arrayref.h"
-#include "gromacs/utility/basedefinitions.h"
 #include "gromacs/utility/defaultinitializationallocator.h"
 #include "gromacs/utility/gmxassert.h"
 #include "gromacs/utility/gmxmpi.h"
@@ -74,7 +73,7 @@ typedef struct gmx_parallel_3dfft* gmx_parallel_3dfft_t;
 struct t_commrec;
 struct t_inputrec;
 struct PmeGpu;
-
+class EwaldBoxZScaler;
 enum class PmeRunMode;
 enum class LongRangeVdW : int;
 
@@ -86,8 +85,8 @@ enum class LongRangeVdW : int;
 
 //@{
 /*! \brief Flags that indicate the number of PME grids in use */
-#define DO_Q 2           /* Electrostatic grids have index q<2 */
-#define DO_Q_AND_LJ 4    /* non-LB LJ grids have index 2 <= q < 4 */
+#define DO_Q 2 /* Electrostatic grids have index q<2 */
+#define DO_Q_AND_LJ 4 /* non-LB LJ grids have index 2 <= q < 4 */
 #define DO_Q_AND_LJ_LB 9 /* With LB rules we need a total of 2+7 grids */
 //@}
 
@@ -321,21 +320,21 @@ struct gmx_pme_t
     MPI_Datatype rvec_mpi; /* the pme vector's MPI type */
 #endif
 
-    gmx_bool bUseThreads; /* Does any of the PME ranks have nthread>1 ?  */
-    int      nthread;     /* The number of threads doing PME on our rank */
+    bool bUseThreads; /* Does any of the PME ranks have nthread>1 ?  */
+    int  nthread;     /* The number of threads doing PME on our rank */
 
-    gmx_bool bPPnode;   /* Node also does particle-particle forces */
-    bool     doCoulomb; /* Apply PME to electrostatics */
-    bool     doLJ;      /* Apply PME to Lennard-Jones r^-6 interactions */
-    gmx_bool bFEP;      /* Compute Free energy contribution */
-    gmx_bool bFEP_q;
-    gmx_bool bFEP_lj;
-    int      nkx, nky, nkz; /* Grid dimensions */
-    gmx_bool bP3M;          /* Do P3M: optimize the influence function */
-    int      pme_order;
-    real     ewaldcoeff_q;  /* Ewald splitting coefficient for Coulomb */
-    real     ewaldcoeff_lj; /* Ewald splitting coefficient for r^-6 */
-    real     epsilon_r;
+    bool bPPnode;   /* Node also does particle-particle forces */
+    bool doCoulomb; /* Apply PME to electrostatics */
+    bool doLJ;      /* Apply PME to Lennard-Jones r^-6 interactions */
+    bool bFEP;      /* Compute Free energy contribution */
+    bool bFEP_q;
+    bool bFEP_lj;
+    int  nkx, nky, nkz; /* Grid dimensions */
+    bool bP3M;          /* Do P3M: optimize the influence function */
+    int  pme_order;
+    real ewaldcoeff_q;  /* Ewald splitting coefficient for Coulomb */
+    real ewaldcoeff_lj; /* Ewald splitting coefficient for r^-6 */
+    real epsilon_r;
 
 
     enum PmeRunMode runMode; /* Which codepath is the PME runner taking - CPU, GPU, mixed;
@@ -355,22 +354,22 @@ struct gmx_pme_t
                   */
 
 
-    class EwaldBoxZScaler* boxScaler; /**< The scaling data Ewald uses with walls (set at pme_init constant for the entire run) */
+    std::unique_ptr<EwaldBoxZScaler> boxScaler; /**< The scaling data Ewald uses with walls (set at pme_init constant for the entire run) */
 
 
     LongRangeVdW ljpme_combination_rule; /* Type of combination rule in LJ-PME */
 
     int ngrids; /* number of grids we maintain for pmegrid, (c)fftgrid and pfft_setups*/
 
-    pmegrids_t pmegrid[DO_Q_AND_LJ_LB]; /* Grids on which we do spreading/interpolation,
-                                         * includes overlap Grid indices are ordered as
-                                         * follows:
-                                         * 0: Coloumb PME, state A
-                                         * 1: Coloumb PME, state B
-                                         * 2-8: LJ-PME
-                                         * This can probably be done in a better way
-                                         * but this simple hack works for now
-                                         */
+    std::array<pmegrids_t, DO_Q_AND_LJ_LB> pmegrid; /* Grids on which we do spreading/interpolation,
+                                                     * includes overlap Grid indices are ordered as
+                                                     * follows:
+                                                     * 0: Coloumb PME, state A
+                                                     * 1: Coloumb PME, state B
+                                                     * 2-8: LJ-PME
+                                                     * This can probably be done in a better way
+                                                     * but this simple hack works for now
+                                                     */
 
     /* The PME coefficient spreading grid sizes/strides, includes pme_order-1 */
     int pmegrid_nx, pmegrid_ny, pmegrid_nz;
@@ -385,13 +384,9 @@ struct gmx_pme_t
     pme_spline_work* spline_work;
 
     real** fftgrid; /* Grids for FFT. With 1D FFT decomposition this can be a pointer */
-    /* inside the interpolation grid, but separate for 2D PME decomp. */
-    int fftgrid_nx, fftgrid_ny, fftgrid_nz;
 
     t_complex** cfftgrid; /* Grids for complex FFT data */
 
-    int cfftgrid_nx, cfftgrid_ny, cfftgrid_nz;
-
     gmx_parallel_3dfft_t* pfft_setup;
 
     int * nnx, *nny, *nnz;
@@ -409,7 +404,7 @@ struct gmx_pme_t
     FastVector<real> lb_buf1;
     FastVector<real> lb_buf2;
 
-    pme_overlap_t overlap[2]; /* Indexed on dimension, 0=x, 1=y */
+    std::array<pme_overlap_t, 2> overlap; /* Indexed on dimension, 0=x, 1=y */
 
     /* Atom step for energy only calculation in gmx_pme_calc_energy() */
     std::unique_ptr<PmeAtomComm> atc_energy;
@@ -421,10 +416,6 @@ struct gmx_pme_t
 
     /* thread local work data for solve_pme */
     struct pme_solve_work_t* solve_work;
-
-    /* Work data for sum_qgrid */
-    real* sum_qgrid_tmp;
-    real* sum_qgrid_dd_tmp;
 };
 
 //! @endcond