2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements backend-specific functions of the update-constraints in CUDA.
39 * \author Artem Zhmurov <zhmurov@gmail.com>
41 * \ingroup module_mdlib
43 #include "update_constrain_gpu_internal.h"
45 #include "gromacs/gpu_utils/cudautils.cuh"
46 #include "gromacs/gpu_utils/typecasts.cuh"
47 #include "gromacs/gpu_utils/vectype_ops.cuh"
52 /*!\brief Number of CUDA threads in a block
54 * \todo Check if using smaller block size will lead to better performance.
56 constexpr static int c_threadsPerBlock = 256;
57 //! Maximum number of threads in a block (for __launch_bounds__)
58 constexpr static int c_maxThreadsPerBlock = c_threadsPerBlock;
60 __launch_bounds__(c_maxThreadsPerBlock) __global__
61 static void scaleCoordinates_kernel(const int numAtoms,
62 float3* __restrict__ gm_x,
63 const ScalingMatrix scalingMatrix)
65 int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
66 if (threadIndex < numAtoms)
68 float3 x = gm_x[threadIndex];
70 x.x = scalingMatrix.xx * x.x + scalingMatrix.yx * x.y + scalingMatrix.zx * x.z;
71 x.y = scalingMatrix.yy * x.y + scalingMatrix.zy * x.z;
72 x.z = scalingMatrix.zz * x.z;
74 gm_x[threadIndex] = x;
78 void launchScaleCoordinatesKernel(const int numAtoms,
79 DeviceBuffer<Float3> d_coordinates,
80 const ScalingMatrix mu,
81 const DeviceStream& deviceStream)
83 KernelLaunchConfig kernelLaunchConfig;
85 kernelLaunchConfig.blockSize[0] = c_threadsPerBlock;
86 kernelLaunchConfig.blockSize[1] = 1;
87 kernelLaunchConfig.blockSize[2] = 1;
88 kernelLaunchConfig.sharedMemorySize = 0;
90 kernelLaunchConfig.gridSize[0] = (numAtoms + c_threadsPerBlock - 1) / c_threadsPerBlock;
92 const auto kernelArgs = prepareGpuKernelArguments(
93 scaleCoordinates_kernel, kernelLaunchConfig, &numAtoms, asFloat3Pointer(&d_coordinates), &mu);
94 launchGpuKernel(scaleCoordinates_kernel,
98 "scaleCoordinates_kernel",
100 // TODO: Although this only happens on the pressure coupling steps, this synchronization
101 // can affect the performance if nstpcouple is small. See Issue #4018
102 deviceStream.synchronize();