#include "gromacs/math/gmxcomplex.h"
#include "gromacs/utility/alignedallocator.h"
#include "gromacs/utility/arrayref.h"
-#include "gromacs/utility/basedefinitions.h"
#include "gromacs/utility/defaultinitializationallocator.h"
#include "gromacs/utility/gmxassert.h"
#include "gromacs/utility/gmxmpi.h"
struct t_commrec;
struct t_inputrec;
struct PmeGpu;
-
+class EwaldBoxZScaler;
enum class PmeRunMode;
enum class LongRangeVdW : int;
//@{
/*! \brief Flags that indicate the number of PME grids in use */
-#define DO_Q 2 /* Electrostatic grids have index q<2 */
-#define DO_Q_AND_LJ 4 /* non-LB LJ grids have index 2 <= q < 4 */
+#define DO_Q 2 /* Electrostatic grids have index q<2 */
+#define DO_Q_AND_LJ 4 /* non-LB LJ grids have index 2 <= q < 4 */
#define DO_Q_AND_LJ_LB 9 /* With LB rules we need a total of 2+7 grids */
//@}
MPI_Datatype rvec_mpi; /* the pme vector's MPI type */
#endif
- gmx_bool bUseThreads; /* Does any of the PME ranks have nthread>1 ? */
- int nthread; /* The number of threads doing PME on our rank */
+ bool bUseThreads; /* Does any of the PME ranks have nthread>1 ? */
+ int nthread; /* The number of threads doing PME on our rank */
- gmx_bool bPPnode; /* Node also does particle-particle forces */
- bool doCoulomb; /* Apply PME to electrostatics */
- bool doLJ; /* Apply PME to Lennard-Jones r^-6 interactions */
- gmx_bool bFEP; /* Compute Free energy contribution */
- gmx_bool bFEP_q;
- gmx_bool bFEP_lj;
- int nkx, nky, nkz; /* Grid dimensions */
- gmx_bool bP3M; /* Do P3M: optimize the influence function */
- int pme_order;
- real ewaldcoeff_q; /* Ewald splitting coefficient for Coulomb */
- real ewaldcoeff_lj; /* Ewald splitting coefficient for r^-6 */
- real epsilon_r;
+ bool bPPnode; /* Node also does particle-particle forces */
+ bool doCoulomb; /* Apply PME to electrostatics */
+ bool doLJ; /* Apply PME to Lennard-Jones r^-6 interactions */
+ bool bFEP; /* Compute Free energy contribution */
+ bool bFEP_q;
+ bool bFEP_lj;
+ int nkx, nky, nkz; /* Grid dimensions */
+ bool bP3M; /* Do P3M: optimize the influence function */
+ int pme_order;
+ real ewaldcoeff_q; /* Ewald splitting coefficient for Coulomb */
+ real ewaldcoeff_lj; /* Ewald splitting coefficient for r^-6 */
+ real epsilon_r;
enum PmeRunMode runMode; /* Which codepath is the PME runner taking - CPU, GPU, mixed;
*/
- class EwaldBoxZScaler* boxScaler; /**< The scaling data Ewald uses with walls (set at pme_init constant for the entire run) */
+ std::unique_ptr<EwaldBoxZScaler> boxScaler; /**< The scaling data Ewald uses with walls (set at pme_init constant for the entire run) */
LongRangeVdW ljpme_combination_rule; /* Type of combination rule in LJ-PME */
int ngrids; /* number of grids we maintain for pmegrid, (c)fftgrid and pfft_setups*/
- pmegrids_t pmegrid[DO_Q_AND_LJ_LB]; /* Grids on which we do spreading/interpolation,
- * includes overlap Grid indices are ordered as
- * follows:
- * 0: Coloumb PME, state A
- * 1: Coloumb PME, state B
- * 2-8: LJ-PME
- * This can probably be done in a better way
- * but this simple hack works for now
- */
+ std::array<pmegrids_t, DO_Q_AND_LJ_LB> pmegrid; /* Grids on which we do spreading/interpolation,
+ * includes overlap Grid indices are ordered as
+ * follows:
+ * 0: Coloumb PME, state A
+ * 1: Coloumb PME, state B
+ * 2-8: LJ-PME
+ * This can probably be done in a better way
+ * but this simple hack works for now
+ */
/* The PME coefficient spreading grid sizes/strides, includes pme_order-1 */
int pmegrid_nx, pmegrid_ny, pmegrid_nz;
pme_spline_work* spline_work;
real** fftgrid; /* Grids for FFT. With 1D FFT decomposition this can be a pointer */
- /* inside the interpolation grid, but separate for 2D PME decomp. */
- int fftgrid_nx, fftgrid_ny, fftgrid_nz;
t_complex** cfftgrid; /* Grids for complex FFT data */
- int cfftgrid_nx, cfftgrid_ny, cfftgrid_nz;
-
gmx_parallel_3dfft_t* pfft_setup;
int * nnx, *nny, *nnz;
FastVector<real> lb_buf1;
FastVector<real> lb_buf2;
- pme_overlap_t overlap[2]; /* Indexed on dimension, 0=x, 1=y */
+ std::array<pme_overlap_t, 2> overlap; /* Indexed on dimension, 0=x, 1=y */
/* Atom step for energy only calculation in gmx_pme_calc_energy() */
std::unique_ptr<PmeAtomComm> atc_energy;
/* thread local work data for solve_pme */
struct pme_solve_work_t* solve_work;
-
- /* Work data for sum_qgrid */
- real* sum_qgrid_tmp;
- real* sum_qgrid_dd_tmp;
};
//! @endcond