2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
7 * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
40 * \brief This file contains function declarations necessary for
41 * computing energies and forces for the PME long-ranged part (Coulomb
44 * \author Berk Hess <hess@kth.se>
45 * \author Mark Abraham <mark.j.abraham@gmail.com>
46 * \ingroup module_ewald
49 /* TODO This file is a temporary holding area for stuff local to the
50 * PME code, before it acquires some more normal ewald/file.c and
51 * ewald/file.h structure. In future clean up, get rid of this file,
52 * to build more normal. */
54 #ifndef GMX_EWALD_PME_INTERNAL_H
55 #define GMX_EWALD_PME_INTERNAL_H
61 #include "gromacs/math/gmxcomplex.h"
62 #include "gromacs/utility/alignedallocator.h"
63 #include "gromacs/utility/arrayref.h"
64 #include "gromacs/utility/basedefinitions.h"
65 #include "gromacs/utility/defaultinitializationallocator.h"
66 #include "gromacs/utility/gmxassert.h"
67 #include "gromacs/utility/gmxmpi.h"
69 #include "spline_vectors.h"
71 //! A repeat of typedef from parallel_3dfft.h
72 typedef struct gmx_parallel_3dfft* gmx_parallel_3dfft_t;
78 enum class PmeRunMode;
79 enum class LongRangeVdW : int;
82 //! Grid indices for A state for charge and Lennard-Jones C6
84 #define PME_GRID_C6A 2
88 /*! \brief Flags that indicate the number of PME grids in use */
89 #define DO_Q 2 /* Electrostatic grids have index q<2 */
90 #define DO_Q_AND_LJ 4 /* non-LB LJ grids have index 2 <= q < 4 */
91 #define DO_Q_AND_LJ_LB 9 /* With LB rules we need a total of 2+7 grids */
94 /*! \brief Pascal triangle coefficients scaled with (1/2)^6 for LJ-PME with LB-rules */
95 static const real lb_scale_factor[] = { 1.0 / 64, 6.0 / 64, 15.0 / 64, 20.0 / 64,
96 15.0 / 64, 6.0 / 64, 1.0 / 64 };
98 /*! \brief Pascal triangle coefficients used in solve_pme_lj_yzx, only need to do 4 calculations due to symmetry */
99 static const real lb_scale_factor_symm[] = { 2.0 / 64, 12.0 / 64, 30.0 / 64, 20.0 / 64 };
101 /*! \brief We only define a maximum to be able to use local arrays without allocation.
102 * An order larger than 12 should never be needed, even for test cases.
103 * If needed it can be changed here.
105 #define PME_ORDER_MAX 12
108 /* Temporary suppression until these structs become opaque and don't live in
109 * a header that is included by other headers. Also, until then I have no
110 * idea what some of the names mean. */
112 //! @cond Doxygen_Suppress
114 /*! \brief Data structure for grid communication */
115 struct pme_grid_comm_t
117 int send_id; //!< Source rank id
120 int recv_id; //!< Destination rank id
123 int recv_size = 0; //!< Receive buffer width, used with OpenMP
126 /*! \brief Data structure for grid overlap communication in a single dimension */
129 MPI_Comm mpi_comm; //!< MPI communcator
130 int nnodes; //!< Number of ranks
131 int nodeid; //!< Unique rank identifcator
132 std::vector<int> s2g0; //!< The local interpolation grid start
133 std::vector<int> s2g1; //!< The local interpolation grid end
134 int send_size; //!< Send buffer width, used with OpenMP
135 std::vector<pme_grid_comm_t> comm_data; //!< All the individual communication data for each rank
136 std::vector<real> sendbuf; //!< Shared buffer for sending
137 std::vector<real> recvbuf; //!< Shared buffer for receiving
141 using AlignedVector = std::vector<T, gmx::AlignedAllocator<T>>;
144 using FastVector = std::vector<T, gmx::DefaultInitializationAllocator<T>>;
146 /*! \brief Data structure for organizing particle allocation to threads */
147 struct AtomToThreadMap
149 //! Cumulative counts of the number of particles per thread
151 //! Storage buffer for n
152 std::vector<int> nBuffer;
153 //! Particle indices ordered on thread index (n)
158 * \brief Coefficients for theta or dtheta
160 class SplineCoefficients
163 //! Reallocate for use with up to nalloc coefficients
164 void realloc(int nalloc);
166 //! Pointers to the coefficient buffer for x, y, z
167 splinevec coefficients = { nullptr };
170 //! Storage for x coefficients
171 std::vector<real> bufferX_;
172 //! Storage for y coefficients
173 std::vector<real> bufferY_;
174 //! Storage for z coefficients, aligned for SIMD load
175 AlignedVector<real> bufferZ_;
178 /*! \brief Data structure for beta-spline interpolation */
183 SplineCoefficients theta;
184 SplineCoefficients dtheta;
188 /*! \brief PME slab MPI communication setup */
191 //! The nodes to send x and q to with DD
193 //! The nodes to receive x and q from with DD
195 //! Index for commnode into the buffers
197 //! The number of atoms to receive
202 * \brief Data structure for coordinating transfers between PME ranks along one dimension
204 * Also used for passing coordinates, coefficients and forces to and from PME routines.
209 //! Constructor, \p PmeMpiCommunicator is the communicator for this dimension
210 PmeAtomComm(MPI_Comm PmeMpiCommunicator, int numThreads, int pmeOrder, int dimIndex, bool doSpread);
212 //! Set the atom count and when necessary resizes atom buffers
213 void setNumAtoms(int numAtoms);
215 //! Returns the atom count
216 int numAtoms() const { return numAtoms_; }
218 //! Returns the number of atoms to send to each rank
219 gmx::ArrayRef<int> sendCount()
221 GMX_ASSERT(!count_thread.empty(), "Need at least one thread_count");
222 return count_thread[0];
225 //! The index of the dimension, 0=x, 1=y
227 //! The number of slabs and ranks this dimension is decomposed over
229 //! Our MPI rank index
231 //! Communicator for this dimension
234 //! Communication setup for each slab, only present with nslab > 1
235 std::vector<SlabCommSetup> slabCommSetup;
236 //! The maximum communication distance counted in MPI ranks
239 //! The target slab index for each particle
241 //! Target particle counts for each slab, for each thread
242 std::vector<std::vector<int>> count_thread;
245 //! The number of atoms
250 gmx::ArrayRef<const gmx::RVec> x;
251 //! The coefficient, charges or LJ C6
252 gmx::ArrayRef<const real> coefficient;
254 gmx::ArrayRef<gmx::RVec> f;
255 //! Coordinate buffer, used only with nslab > 1
256 FastVector<gmx::RVec> xBuffer;
257 //! Coefficient buffer, used only with nslab > 1
258 FastVector<real> coefficientBuffer;
259 //! Force buffer, used only with nslab > 1
260 FastVector<gmx::RVec> fBuffer;
261 //! Tells whether these coordinates are used for spreading
265 //! The grid index per atom
266 FastVector<gmx::IVec> idx;
267 //! Fractional atom coordinates relative to the lower cell boundary
268 FastVector<gmx::RVec> fractx;
270 //! The number of threads to use in PME
272 //! Thread index for each atom
273 FastVector<int> thread_idx;
274 std::vector<AtomToThreadMap> threadMap;
275 std::vector<splinedata_t> spline;
278 /*! \brief Data structure for a single PME grid */
281 ivec ci; /* The spatial location of this grid */
282 ivec n; /* The used size of *grid, including order-1 */
283 ivec offset; /* The grid offset from the full node grid */
284 int order; /* PME spreading order */
285 ivec s; /* The allocated size of *grid, s >= n */
286 real* grid; /* The grid local thread, size n */
289 /*! \brief Data structures for PME grids */
292 pmegrid_t grid; /* The full node grid (non thread-local) */
293 int nthread; /* The number of threads operating on this grid */
294 ivec nc; /* The local spatial decomposition over the threads */
295 pmegrid_t* grid_th; /* Array of grids for each thread */
296 real* grid_all; /* Allocated array for the grids in *grid_th */
297 int* g2t[DIM]; /* The grid to thread index */
298 ivec nthread_comm; /* The number of threads to communicate with */
301 /*! \brief Data structure for spline-interpolation working buffers */
302 struct pme_spline_work;
304 /*! \brief Data structure for working buffers */
305 struct pme_solve_work_t;
307 /*! \brief Master PME data structure */
309 { //NOLINT(clang-analyzer-optin.performance.Padding)
310 int ndecompdim; /* The number of decomposition dimensions */
311 int nodeid; /* Our nodeid in mpi->mpi_comm */
314 int nnodes; /* The number of nodes doing PME */
319 MPI_Comm mpi_comm_d[2]; /* Indexed on dimension, 0=x, 1=y */
321 MPI_Datatype rvec_mpi; /* the pme vector's MPI type */
324 gmx_bool bUseThreads; /* Does any of the PME ranks have nthread>1 ? */
325 int nthread; /* The number of threads doing PME on our rank */
327 gmx_bool bPPnode; /* Node also does particle-particle forces */
328 bool doCoulomb; /* Apply PME to electrostatics */
329 bool doLJ; /* Apply PME to Lennard-Jones r^-6 interactions */
330 gmx_bool bFEP; /* Compute Free energy contribution */
333 int nkx, nky, nkz; /* Grid dimensions */
334 gmx_bool bP3M; /* Do P3M: optimize the influence function */
336 real ewaldcoeff_q; /* Ewald splitting coefficient for Coulomb */
337 real ewaldcoeff_lj; /* Ewald splitting coefficient for r^-6 */
341 enum PmeRunMode runMode; /* Which codepath is the PME runner taking - CPU, GPU, mixed;
342 * TODO: this is the information that should be owned by the task
343 * scheduler, and ideally not be duplicated here.
346 PmeGpu* gpu; /* A pointer to the GPU data.
347 * TODO: this should be unique or a shared pointer.
348 * Currently in practice there is a single gmx_pme_t instance while a code
349 * is partially set up for many of them. The PME tuning calls gmx_pme_reinit()
350 * which fully reinitializes the one and only PME structure anew while maybe
351 * keeping the old grid buffers if they were already large enough.
352 * This small choice should be made clear in the later refactoring -
353 * do we store many PME objects for different grid sizes,
354 * or a single PME object that handles different grid sizes gracefully.
358 class EwaldBoxZScaler* boxScaler; /**< The scaling data Ewald uses with walls (set at pme_init constant for the entire run) */
361 LongRangeVdW ljpme_combination_rule; /* Type of combination rule in LJ-PME */
363 int ngrids; /* number of grids we maintain for pmegrid, (c)fftgrid and pfft_setups*/
365 pmegrids_t pmegrid[DO_Q_AND_LJ_LB]; /* Grids on which we do spreading/interpolation,
366 * includes overlap Grid indices are ordered as
368 * 0: Coloumb PME, state A
369 * 1: Coloumb PME, state B
371 * This can probably be done in a better way
372 * but this simple hack works for now
375 /* The PME coefficient spreading grid sizes/strides, includes pme_order-1 */
376 int pmegrid_nx, pmegrid_ny, pmegrid_nz;
377 /* pmegrid_nz might be larger than strictly necessary to ensure
378 * memory alignment, pmegrid_nz_base gives the real base size.
381 /* The local PME grid starting indices */
382 int pmegrid_start_ix, pmegrid_start_iy, pmegrid_start_iz;
384 /* Work data for spreading and gathering */
385 pme_spline_work* spline_work;
387 real** fftgrid; /* Grids for FFT. With 1D FFT decomposition this can be a pointer */
388 /* inside the interpolation grid, but separate for 2D PME decomp. */
389 int fftgrid_nx, fftgrid_ny, fftgrid_nz;
391 t_complex** cfftgrid; /* Grids for complex FFT data */
393 int cfftgrid_nx, cfftgrid_ny, cfftgrid_nz;
395 gmx_parallel_3dfft_t* pfft_setup;
397 int * nnx, *nny, *nnz;
398 real *fshx, *fshy, *fshz;
400 std::vector<PmeAtomComm> atc; /* Indexed on decomposition index */
404 /* Buffers to store data for local atoms for L-B combination rule
405 * calculations in LJ-PME. lb_buf1 stores either the coefficients
406 * for spreading/gathering (in serial), or the C6 coefficient for
407 * local atoms (in parallel). lb_buf2 is only used in parallel,
408 * and stores the sigma values for local atoms. */
409 FastVector<real> lb_buf1;
410 FastVector<real> lb_buf2;
412 pme_overlap_t overlap[2]; /* Indexed on dimension, 0=x, 1=y */
414 /* Atom step for energy only calculation in gmx_pme_calc_energy() */
415 std::unique_ptr<PmeAtomComm> atc_energy;
417 /* Communication buffers */
418 rvec* bufv; /* Communication buffer */
419 real* bufr; /* Communication buffer */
420 int buf_nalloc; /* The communication buffer size */
422 /* thread local work data for solve_pme */
423 struct pme_solve_work_t* solve_work;
425 /* Work data for sum_qgrid */
427 real* sum_qgrid_dd_tmp;