/* * This file is part of the GROMACS molecular simulation package. * * Copyright (c) 2016,2017,2018, by the GROMACS development team, led by * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl, * and including many others, as listed in the AUTHORS file in the * top-level source directory and at http://www.gromacs.org. * * GROMACS is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * GROMACS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with GROMACS; if not, see * http://www.gnu.org/licenses, or write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * If you want to redistribute modifications to GROMACS, please * consider that scientific software is very special. Version * control is crucial - bugs must be traceable. We will be happy to * consider code for inclusion in the official distribution, but * derived work must not be called official GROMACS. Details are found * in the README & COPYING files - if they are missing, get the * official version at http://www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the research papers on the package. Check out http://www.gromacs.org. */ /*! \internal \file * \brief This file defines the PME CUDA-specific data structure, * various compile-time constants shared among the PME CUDA kernels, * and also names some PME CUDA memory management routines. * TODO: consider changing defines into variables where possible; have inline getters. * * \author Aleksei Iupinov */ #ifndef GMX_EWALD_PME_CUH #define GMX_EWALD_PME_CUH #include #include "pme-gpu-constants.h" #include "pme-gpu-internal.h" #include "pme-gpu-types.h" #include "pme-gpu-types-host.h" #include "pme-gpu-types-host-impl.h" #include "pme-timings.cuh" /*! \internal \brief * Gets a base of the unique index to an element in a spline parameter buffer (theta/dtheta), * which is laid out for GPU spread/gather kernels. The base only corresponds to the atom index within the execution block. * Feed the result into getSplineParamIndex() to get a full index. * TODO: it's likely that both parameters can be just replaced with a single atom index, as they are derived from it. * Do that, verifying that the generated code is not bloated, and/or revise the spline indexing scheme. * Removing warp dependency would also be nice (and would probably coincide with removing PME_SPREADGATHER_ATOMS_PER_WARP). * * \tparam order PME order * \param[in] warpIndex Warp index wrt the block. * \param[in] atomWarpIndex Atom index wrt the warp (from 0 to PME_SPREADGATHER_ATOMS_PER_WARP - 1). * * \returns Index into theta or dtheta array using GPU layout. */ template int __host__ __device__ __forceinline__ getSplineParamIndexBase(int warpIndex, int atomWarpIndex) { assert((atomWarpIndex >= 0) && (atomWarpIndex < PME_SPREADGATHER_ATOMS_PER_WARP)); const int dimIndex = 0; const int splineIndex = 0; // The zeroes are here to preserve the full index formula for reference return (((splineIndex + order * warpIndex) * DIM + dimIndex) * PME_SPREADGATHER_ATOMS_PER_WARP + atomWarpIndex); } /*! \internal \brief * Gets a unique index to an element in a spline parameter buffer (theta/dtheta), * which is laid out for GPU spread/gather kernels. The index is wrt to the execution block, * in range(0, atomsPerBlock * order * DIM). * This function consumes result of getSplineParamIndexBase() and adjusts it for \p dimIndex and \p splineIndex. * * \tparam order PME order * \param[in] paramIndexBase Must be result of getSplineParamIndexBase(). * \param[in] dimIndex Dimension index (from 0 to 2) * \param[in] splineIndex Spline contribution index (from 0 to \p order - 1) * * \returns Index into theta or dtheta array using GPU layout. */ template int __host__ __device__ __forceinline__ getSplineParamIndex(int paramIndexBase, int dimIndex, int splineIndex) { assert((dimIndex >= XX) && (dimIndex < DIM)); assert((splineIndex >= 0) && (splineIndex < order)); return (paramIndexBase + (splineIndex * DIM + dimIndex) * PME_SPREADGATHER_ATOMS_PER_WARP); } /*! \brief \internal * An inline CUDA function for checking the global atom data indices against the atom data array sizes. * * \param[in] atomDataIndexGlobal The atom data index. * \param[in] nAtomData The atom data array element count. * \returns Non-0 if index is within bounds (or PME data padding is enabled), 0 otherwise. * * This is called from the spline_and_spread and gather PME kernels. * The goal is to isolate the global range checks, and allow avoiding them with c_usePadding enabled. */ int __device__ __forceinline__ pme_gpu_check_atom_data_index(const int atomDataIndex, const int nAtomData) { return c_usePadding ? 1 : (atomDataIndex < nAtomData); } /*! \brief \internal * An inline CUDA function for skipping the zero-charge atoms. * * \returns Non-0 if atom should be processed, 0 otherwise. * \param[in] coefficient The atom charge. * * This is called from the spline_and_spread and gather PME kernels. */ int __device__ __forceinline__ pme_gpu_check_atom_charge(const float coefficient) { assert(isfinite(coefficient)); return c_skipNeutralAtoms ? (coefficient != 0.0f) : 1; } /*! \brief \internal * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling, * to minimize number of unused blocks. */ template dim3 __host__ inline pmeGpuCreateGrid(const PmeGpu *pmeGpu, int blockCount) { // How many maximum widths in X do we need (hopefully just one) const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX; // Trying to make things even const int colCount = (blockCount + minRowCount - 1) / minRowCount; GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong"); GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount, "pmeGpuCreateGrid: excessive blocks"); return dim3(colCount, minRowCount); } /*! \brief \internal * A single structure encompassing all the PME data used in CUDA kernels. * This inherits from PmeGpuKernelParamsBase and adds a couple cudaTextureObject_t handles, * which we would like to avoid in plain C++. */ struct PmeGpuCudaKernelParams : PmeGpuKernelParamsBase { /* These are CUDA texture objects, related to the grid size. */ /*! \brief CUDA texture object for accessing grid.d_fractShiftsTable */ cudaTextureObject_t fractShiftsTableTexture; /*! \brief CUDA texture object for accessing grid.d_gridlineIndicesTable */ cudaTextureObject_t gridlineIndicesTableTexture; }; #endif