2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \libinternal \file
37 * \brief Declarations for CUDA implementation of Leap-Frog.
39 * \todo Reconsider naming towards using "gpu" suffix instead of "cuda".
41 * \author Artem Zhmurov <zhmurov@gmail.com>
43 * \ingroup module_mdlib
46 #ifndef GMX_MDLIB_LEAPFROG_CUDA_CUH
47 #define GMX_MDLIB_LEAPFROG_CUDA_CUH
49 #include "gromacs/gpu_utils/gputraits.cuh"
50 #include "gromacs/gpu_utils/hostallocator.h"
51 #include "gromacs/mdtypes/group.h"
52 #include "gromacs/mdtypes/mdatom.h"
53 #include "gromacs/pbcutil/pbc.h"
54 #include "gromacs/pbcutil/pbc_aiuc.h"
55 #include "gromacs/utility/arrayref.h"
56 #include "gromacs/utility/classhelpers.h"
65 /*! \brief Constructor.
67 * \param[in] commandStream Device command stream to use.
69 LeapFrogCuda(CommandStream commandStream);
75 * Converts PBC data from t_pbc into the PbcAiuc format and stores the latter.
77 * \param[in] pbc The PBC data in t_pbc format.
79 void setPbc(const t_pbc* pbc);
83 * Integrates the equation of motion using Leap-Frog algorithm.
84 * Updates coordinates and velocities on the GPU. The current coordinates are saved for constraints.
86 * \param[in,out] d_x Coordinates to update
87 * \param[out] d_xp Place to save the values of initial coordinates coordinates to.
88 * \param[in,out] d_v Velocities (will be updated).
89 * \param[in] d_f Forces.
90 * \param[in] dt Timestep.
91 * \param[in] doTempCouple If the temperature coupling should be applied.
92 * \param[in] tcstat Temperature coupling data.
93 * \param[in] doPressureCouple If the temperature coupling should be applied.
94 * \param[in] dtPressureCouple Period between pressure coupling steps
95 * \param[in] velocityScalingMatrix Parrinello-Rahman velocity scaling matrix
97 void integrate(const float3* d_x,
102 const bool doTempCouple,
103 gmx::ArrayRef<const t_grp_tcstat> tcstat,
104 const bool doPressureCouple,
105 const float dtPressureCouple,
106 const matrix velocityScalingMatrix);
108 /*! \brief Set the integrator
110 * Allocates memory for inverse masses, and, if needed for temperature scaling factor(s)
111 * and temperature coupling groups. Copies inverse masses and temperature coupling groups
114 * \param[in] md MD atoms, from which inverse masses are taken.
115 * \param[in] numTempScaleValues Number of temperature scale groups.
116 * \param[in] tempScaleGroups Maps the atom index to temperature scale value.
118 void set(const t_mdatoms& md, int numTempScaleValues, const unsigned short* tempScaleGroups);
120 /*! \brief Class with hardware-specific interfaces and implementations.*/
125 CommandStream commandStream_;
126 //! CUDA kernel launch config
127 KernelLaunchConfig kernelLaunchConfig_;
128 //! Periodic boundary data
133 //! 1/mass for all atoms (GPU)
134 real* d_inverseMasses_;
135 //! Current size of the reciprocal masses array
136 int numInverseMasses_ = -1;
137 //! Maximum size of the reciprocal masses array
138 int numInverseMassesAlloc_ = -1;
140 //! Number of temperature coupling groups (zero = no coupling)
141 int numTempScaleValues_ = 0;
142 /*! \brief Array with temperature scaling factors.
143 * This is temporary solution to remap data from t_grp_tcstat into plain array
144 * \todo Replace with better solution.
146 gmx::HostVector<float> h_lambdas_;
147 //! Device-side temperature scaling factors
149 //! Current size of the array with temperature scaling factors (lambdas)
150 int numLambdas_ = -1;
151 //! Maximum size of the array with temperature scaling factors (lambdas)
152 int numLambdasAlloc_ = -1;
155 //! Array that maps atom index onto the temperature scaling group to get scaling parameter
156 unsigned short* d_tempScaleGroups_;
157 //! Current size of the temperature coupling groups array
158 int numTempScaleGroups_ = -1;
159 //! Maximum size of the temperature coupling groups array
160 int numTempScaleGroupsAlloc_ = -1;
162 //! Vector with diagonal elements of the pressure coupling velocity rescale factors
163 float3 velocityScalingMatrixDiagonal_;