2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2020,2021, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements GPU Force Reduction using CUDA
39 * \author Alan Gray <alang@nvidia.com>
41 * \ingroup module_mdlib
46 #include "gpuforcereduction_impl_internal.h"
48 #include "gromacs/gpu_utils/cudautils.cuh"
49 #include "gromacs/gpu_utils/devicebuffer.h"
50 #include "gromacs/gpu_utils/typecasts.cuh"
51 #include "gromacs/gpu_utils/vectype_ops.cuh"
56 constexpr static int c_threadsPerBlock = 128;
58 template<bool addRvecForce, bool accumulateForce>
59 static __global__ void reduceKernel(const float3* __restrict__ gm_nbnxmForce,
60 const float3* __restrict__ rvecForceToAdd,
66 // map particle-level parallelism to 1D CUDA thread and block index
67 const int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
69 // perform addition for each particle
70 if (threadIndex < numAtoms)
73 float3* gm_fDest = &gm_fTotal[threadIndex];
76 // Accumulate or set nbnxm force
80 temp += gm_nbnxmForce[gm_cell[threadIndex]];
84 temp = gm_nbnxmForce[gm_cell[threadIndex]];
89 temp += rvecForceToAdd[threadIndex];
96 void launchForceReductionKernel(int numAtoms,
100 const DeviceBuffer<Float3> d_nbnxmForceToAdd,
101 const DeviceBuffer<Float3> d_rvecForceToAdd,
102 DeviceBuffer<Float3> d_baseForce,
103 DeviceBuffer<int> d_cell,
104 const DeviceStream& deviceStream)
106 float3* d_baseForcePtr = &(asFloat3(d_baseForce)[atomStart]);
107 float3* d_nbnxmForcePtr = asFloat3(d_nbnxmForceToAdd);
108 float3* d_rvecForceToAddPtr = &(asFloat3(d_rvecForceToAdd)[atomStart]);
110 // Configure and launch kernel
111 KernelLaunchConfig config;
112 config.blockSize[0] = c_threadsPerBlock;
113 config.blockSize[1] = 1;
114 config.blockSize[2] = 1;
115 config.gridSize[0] = ((numAtoms + 1) + c_threadsPerBlock - 1) / c_threadsPerBlock;
116 config.gridSize[1] = 1;
117 config.gridSize[2] = 1;
118 config.sharedMemorySize = 0;
120 auto kernelFn = addRvecForce
121 ? (accumulate ? reduceKernel<true, true> : reduceKernel<true, false>)
122 : (accumulate ? reduceKernel<false, true> : reduceKernel<false, false>);
124 const auto kernelArgs = prepareGpuKernelArguments(
125 kernelFn, config, &d_nbnxmForcePtr, &d_rvecForceToAddPtr, &d_baseForcePtr, &d_cell, &numAtoms);
127 launchGpuKernel(kernelFn, config, deviceStream, nullptr, "Force Reduction", kernelArgs);