2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief This file contains function declarations necessary for
40 * computing energies and forces for the PME long-ranged part (Coulomb
43 * \author Berk Hess <hess@kth.se>
44 * \author Mark Abraham <mark.j.abraham@gmail.com>
45 * \ingroup module_ewald
48 /* TODO This file is a temporary holding area for stuff local to the
49 * PME code, before it acquires some more normal ewald/file.c and
50 * ewald/file.h structure. In future clean up, get rid of this file,
51 * to build more normal. */
53 #ifndef GMX_EWALD_PME_INTERNAL_H
54 #define GMX_EWALD_PME_INTERNAL_H
58 #include "gromacs/math/gmxcomplex.h"
59 #include "gromacs/utility/basedefinitions.h"
60 #include "gromacs/utility/defaultinitializationallocator.h"
61 #include "gromacs/utility/gmxmpi.h"
62 #include "gromacs/utility/smalloc.h"
64 #include "pme_gpu_types_host.h"
66 //! A repeat of typedef from parallel_3dfft.h
67 typedef struct gmx_parallel_3dfft* gmx_parallel_3dfft_t;
74 //! Grid indices for A state for charge and Lennard-Jones C6
76 #define PME_GRID_C6A 2
80 /*! \brief Flags that indicate the number of PME grids in use */
81 #define DO_Q 2 /* Electrostatic grids have index q<2 */
82 #define DO_Q_AND_LJ 4 /* non-LB LJ grids have index 2 <= q < 4 */
83 #define DO_Q_AND_LJ_LB 9 /* With LB rules we need a total of 2+7 grids */
86 /*! \brief Pascal triangle coefficients scaled with (1/2)^6 for LJ-PME with LB-rules */
87 static const real lb_scale_factor[] = { 1.0 / 64, 6.0 / 64, 15.0 / 64, 20.0 / 64,
88 15.0 / 64, 6.0 / 64, 1.0 / 64 };
90 /*! \brief Pascal triangle coefficients used in solve_pme_lj_yzx, only need to do 4 calculations due to symmetry */
91 static const real lb_scale_factor_symm[] = { 2.0 / 64, 12.0 / 64, 30.0 / 64, 20.0 / 64 };
93 /*! \brief We only define a maximum to be able to use local arrays without allocation.
94 * An order larger than 12 should never be needed, even for test cases.
95 * If needed it can be changed here.
97 #define PME_ORDER_MAX 12
99 /*! \brief As gmx_pme_init, but takes most settings, except the grid/Ewald coefficients, from
100 * pme_src. This is only called when the PME cut-off/grid size changes.
102 void gmx_pme_reinit(struct gmx_pme_t** pmedata,
104 struct gmx_pme_t* pme_src,
105 const t_inputrec* ir,
106 const ivec grid_size,
111 /* Temporary suppression until these structs become opaque and don't live in
112 * a header that is included by other headers. Also, until then I have no
113 * idea what some of the names mean. */
115 //! @cond Doxygen_Suppress
117 /*! \brief Data structure for grid communication */
118 struct pme_grid_comm_t
120 int send_id; //!< Source rank id
123 int recv_id; //!< Destination rank id
126 int recv_size = 0; //!< Receive buffer width, used with OpenMP
129 /*! \brief Data structure for grid overlap communication in a single dimension */
132 MPI_Comm mpi_comm; //!< MPI communcator
133 int nnodes; //!< Number of ranks
134 int nodeid; //!< Unique rank identifcator
135 std::vector<int> s2g0; //!< The local interpolation grid start
136 std::vector<int> s2g1; //!< The local interpolation grid end
137 int send_size; //!< Send buffer width, used with OpenMP
138 std::vector<pme_grid_comm_t> comm_data; //!< All the individual communication data for each rank
139 std::vector<real> sendbuf; //!< Shared buffer for sending
140 std::vector<real> recvbuf; //!< Shared buffer for receiving
144 using AlignedVector = std::vector<T, gmx::AlignedAllocator<T>>;
147 using FastVector = std::vector<T, gmx::DefaultInitializationAllocator<T>>;
149 /*! \brief Data structure for organizing particle allocation to threads */
150 struct AtomToThreadMap
152 //! Cumulative counts of the number of particles per thread
154 //! Storage buffer for n
155 std::vector<int> nBuffer;
156 //! Particle indices ordered on thread index (n)
160 /*! \brief Helper typedef for spline vectors */
161 typedef real* splinevec[DIM];
164 * \brief Coefficients for theta or dtheta
166 class SplineCoefficients
169 //! Reallocate for use with up to nalloc coefficients
170 void realloc(int nalloc);
172 //! Pointers to the coefficient buffer for x, y, z
173 splinevec coefficients = { nullptr };
176 //! Storage for x coefficients
177 std::vector<real> bufferX_;
178 //! Storage for y coefficients
179 std::vector<real> bufferY_;
180 //! Storage for z coefficients, aligned for SIMD load
181 AlignedVector<real> bufferZ_;
184 /*! \brief Data structure for beta-spline interpolation */
189 SplineCoefficients theta;
190 SplineCoefficients dtheta;
194 /*! \brief PME slab MPI communication setup */
197 //! The nodes to send x and q to with DD
199 //! The nodes to receive x and q from with DD
201 //! Index for commnode into the buffers
203 //! The number of atoms to receive
208 * \brief Data structure for coordinating transfers between PME ranks along one dimension
210 * Also used for passing coordinates, coefficients and forces to and from PME routines.
215 //! Constructor, \p PmeMpiCommunicator is the communicator for this dimension
216 PmeAtomComm(MPI_Comm PmeMpiCommunicator, int numThreads, int pmeOrder, int dimIndex, bool doSpread);
218 //! Set the atom count and when necessary resizes atom buffers
219 void setNumAtoms(int numAtoms);
221 //! Returns the atom count
222 int numAtoms() const { return numAtoms_; }
224 //! Returns the number of atoms to send to each rank
225 gmx::ArrayRef<int> sendCount()
227 GMX_ASSERT(!count_thread.empty(), "Need at least one thread_count");
228 return count_thread[0];
231 //! The index of the dimension, 0=x, 1=y
233 //! The number of slabs and ranks this dimension is decomposed over
235 //! Our MPI rank index
237 //! Communicator for this dimension
240 //! Communication setup for each slab, only present with nslab > 1
241 std::vector<SlabCommSetup> slabCommSetup;
242 //! The maximum communication distance counted in MPI ranks
245 //! The target slab index for each particle
247 //! Target particle counts for each slab, for each thread
248 std::vector<std::vector<int>> count_thread;
251 //! The number of atoms
256 gmx::ArrayRef<const gmx::RVec> x;
257 //! The coefficient, charges or LJ C6
258 gmx::ArrayRef<const real> coefficient;
260 gmx::ArrayRef<gmx::RVec> f;
261 //! Coordinate buffer, used only with nslab > 1
262 FastVector<gmx::RVec> xBuffer;
263 //! Coefficient buffer, used only with nslab > 1
264 FastVector<real> coefficientBuffer;
265 //! Force buffer, used only with nslab > 1
266 FastVector<gmx::RVec> fBuffer;
267 //! Tells whether these coordinates are used for spreading
271 //! The grid index per atom
272 FastVector<gmx::IVec> idx;
273 //! Fractional atom coordinates relative to the lower cell boundary
274 FastVector<gmx::RVec> fractx;
276 //! The number of threads to use in PME
278 //! Thread index for each atom
279 FastVector<int> thread_idx;
280 std::vector<AtomToThreadMap> threadMap;
281 std::vector<splinedata_t> spline;
284 /*! \brief Data structure for a single PME grid */
287 ivec ci; /* The spatial location of this grid */
288 ivec n; /* The used size of *grid, including order-1 */
289 ivec offset; /* The grid offset from the full node grid */
290 int order; /* PME spreading order */
291 ivec s; /* The allocated size of *grid, s >= n */
292 real* grid; /* The grid local thread, size n */
295 /*! \brief Data structures for PME grids */
298 pmegrid_t grid; /* The full node grid (non thread-local) */
299 int nthread; /* The number of threads operating on this grid */
300 ivec nc; /* The local spatial decomposition over the threads */
301 pmegrid_t* grid_th; /* Array of grids for each thread */
302 real* grid_all; /* Allocated array for the grids in *grid_th */
303 int* g2t[DIM]; /* The grid to thread index */
304 ivec nthread_comm; /* The number of threads to communicate with */
307 /*! \brief Data structure for spline-interpolation working buffers */
308 struct pme_spline_work;
310 /*! \brief Data structure for working buffers */
311 struct pme_solve_work_t;
313 /*! \brief Master PME data structure */
315 { //NOLINT(clang-analyzer-optin.performance.Padding)
316 int ndecompdim; /* The number of decomposition dimensions */
317 int nodeid; /* Our nodeid in mpi->mpi_comm */
320 int nnodes; /* The number of nodes doing PME */
325 MPI_Comm mpi_comm_d[2]; /* Indexed on dimension, 0=x, 1=y */
327 MPI_Datatype rvec_mpi; /* the pme vector's MPI type */
330 gmx_bool bUseThreads; /* Does any of the PME ranks have nthread>1 ? */
331 int nthread; /* The number of threads doing PME on our rank */
333 gmx_bool bPPnode; /* Node also does particle-particle forces */
334 bool doCoulomb; /* Apply PME to electrostatics */
335 bool doLJ; /* Apply PME to Lennard-Jones r^-6 interactions */
336 gmx_bool bFEP; /* Compute Free energy contribution */
339 int nkx, nky, nkz; /* Grid dimensions */
340 gmx_bool bP3M; /* Do P3M: optimize the influence function */
342 real ewaldcoeff_q; /* Ewald splitting coefficient for Coulomb */
343 real ewaldcoeff_lj; /* Ewald splitting coefficient for r^-6 */
347 enum PmeRunMode runMode; /* Which codepath is the PME runner taking - CPU, GPU, mixed;
348 * TODO: this is the information that should be owned by the task
349 * scheduler, and ideally not be duplicated here.
352 PmeGpu* gpu; /* A pointer to the GPU data.
353 * TODO: this should be unique or a shared pointer.
354 * Currently in practice there is a single gmx_pme_t instance while a code
355 * is partially set up for many of them. The PME tuning calls gmx_pme_reinit()
356 * which fully reinitializes the one and only PME structure anew while maybe
357 * keeping the old grid buffers if they were already large enough.
358 * This small choice should be made clear in the later refactoring -
359 * do we store many PME objects for different grid sizes,
360 * or a single PME object that handles different grid sizes gracefully.
364 class EwaldBoxZScaler* boxScaler; /**< The scaling data Ewald uses with walls (set at pme_init constant for the entire run) */
367 int ljpme_combination_rule; /* Type of combination rule in LJ-PME */
369 int ngrids; /* number of grids we maintain for pmegrid, (c)fftgrid and pfft_setups*/
371 pmegrids_t pmegrid[DO_Q_AND_LJ_LB]; /* Grids on which we do spreading/interpolation,
372 * includes overlap Grid indices are ordered as
374 * 0: Coloumb PME, state A
375 * 1: Coloumb PME, state B
377 * This can probably be done in a better way
378 * but this simple hack works for now
381 /* The PME coefficient spreading grid sizes/strides, includes pme_order-1 */
382 int pmegrid_nx, pmegrid_ny, pmegrid_nz;
383 /* pmegrid_nz might be larger than strictly necessary to ensure
384 * memory alignment, pmegrid_nz_base gives the real base size.
387 /* The local PME grid starting indices */
388 int pmegrid_start_ix, pmegrid_start_iy, pmegrid_start_iz;
390 /* Work data for spreading and gathering */
391 pme_spline_work* spline_work;
393 real** fftgrid; /* Grids for FFT. With 1D FFT decomposition this can be a pointer */
394 /* inside the interpolation grid, but separate for 2D PME decomp. */
395 int fftgrid_nx, fftgrid_ny, fftgrid_nz;
397 t_complex** cfftgrid; /* Grids for complex FFT data */
399 int cfftgrid_nx, cfftgrid_ny, cfftgrid_nz;
401 gmx_parallel_3dfft_t* pfft_setup;
403 int * nnx, *nny, *nnz;
404 real *fshx, *fshy, *fshz;
406 std::vector<PmeAtomComm> atc; /* Indexed on decomposition index */
410 /* Buffers to store data for local atoms for L-B combination rule
411 * calculations in LJ-PME. lb_buf1 stores either the coefficients
412 * for spreading/gathering (in serial), or the C6 coefficient for
413 * local atoms (in parallel). lb_buf2 is only used in parallel,
414 * and stores the sigma values for local atoms. */
415 FastVector<real> lb_buf1;
416 FastVector<real> lb_buf2;
418 pme_overlap_t overlap[2]; /* Indexed on dimension, 0=x, 1=y */
420 /* Atom step for energy only calculation in gmx_pme_calc_energy() */
421 std::unique_ptr<PmeAtomComm> atc_energy;
423 /* Communication buffers */
424 rvec* bufv; /* Communication buffer */
425 real* bufr; /* Communication buffer */
426 int buf_nalloc; /* The communication buffer size */
428 /* thread local work data for solve_pme */
429 struct pme_solve_work_t* solve_work;
431 /* Work data for sum_qgrid */
433 real* sum_qgrid_dd_tmp;
439 * Finds out if PME is currently running on GPU.
440 * TODO: should this be removed eventually?
442 * \param[in] pme The PME structure.
443 * \returns True if PME runs on GPU currently, false otherwise.
445 inline bool pme_gpu_active(const gmx_pme_t* pme)
447 return (pme != nullptr) && (pme->runMode != PmeRunMode::CPU);
450 /*! \brief Tell our PME-only node to switch to a new grid size */
451 void gmx_pme_send_switchgrid(const t_commrec* cr, ivec grid_size, real ewaldcoeff_q, real ewaldcoeff_lj);