Make LINCS setup code platform-agnostic
[alexxy/gromacs.git] / src / gromacs / mdlib / update_constrain_gpu_impl.cu
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *
37  * \brief Implements update and constraints class using CUDA.
38  *
39  * The class combines Leap-Frog integrator with LINCS and SETTLE constraints.
40  *
41  * \todo The computational procedures in members should be integrated to improve
42  *       computational performance.
43  *
44  * \author Artem Zhmurov <zhmurov@gmail.com>
45  *
46  * \ingroup module_mdlib
47  */
48 #include "gmxpre.h"
49
50 #include "update_constrain_gpu_impl.h"
51
52 #include <assert.h>
53 #include <stdio.h>
54
55 #include <cmath>
56
57 #include <algorithm>
58
59 #include "gromacs/gpu_utils/cudautils.cuh"
60 #include "gromacs/gpu_utils/device_context.h"
61 #include "gromacs/gpu_utils/device_stream.h"
62 #include "gromacs/gpu_utils/devicebuffer.h"
63 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
64 #include "gromacs/gpu_utils/gputraits.cuh"
65 #include "gromacs/gpu_utils/vectype_ops.cuh"
66 #include "gromacs/mdlib/leapfrog_gpu.h"
67 #include "gromacs/mdlib/lincs_gpu.h"
68 #include "gromacs/mdlib/settle_gpu.cuh"
69 #include "gromacs/mdlib/update_constrain_gpu.h"
70 #include "gromacs/mdtypes/mdatom.h"
71 #include "gromacs/timing/wallcycle.h"
72
73 namespace gmx
74 {
75 /*!\brief Number of CUDA threads in a block
76  *
77  * \todo Check if using smaller block size will lead to better performance.
78  */
79 constexpr static int c_threadsPerBlock = 256;
80 //! Maximum number of threads in a block (for __launch_bounds__)
81 constexpr static int c_maxThreadsPerBlock = c_threadsPerBlock;
82
83 __launch_bounds__(c_maxThreadsPerBlock) __global__
84         static void scaleCoordinates_kernel(const int numAtoms,
85                                             float3* __restrict__ gm_x,
86                                             const ScalingMatrix scalingMatrix)
87 {
88     int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
89     if (threadIndex < numAtoms)
90     {
91         float3 x = gm_x[threadIndex];
92
93         x.x = scalingMatrix.xx * x.x + scalingMatrix.yx * x.y + scalingMatrix.zx * x.z;
94         x.y = scalingMatrix.yy * x.y + scalingMatrix.zy * x.z;
95         x.z = scalingMatrix.zz * x.z;
96
97         gm_x[threadIndex] = x;
98     }
99 }
100
101 void UpdateConstrainGpu::Impl::integrate(GpuEventSynchronizer*             fReadyOnDevice,
102                                          const real                        dt,
103                                          const bool                        updateVelocities,
104                                          const bool                        computeVirial,
105                                          tensor                            virial,
106                                          const bool                        doTemperatureScaling,
107                                          gmx::ArrayRef<const t_grp_tcstat> tcstat,
108                                          const bool                        doParrinelloRahman,
109                                          const float                       dtPressureCouple,
110                                          const matrix                      prVelocityScalingMatrix)
111 {
112     wallcycle_start_nocount(wcycle_, WallCycleCounter::LaunchGpu);
113     wallcycle_sub_start(wcycle_, WallCycleSubCounter::LaunchGpuUpdateConstrain);
114
115     // Clearing virial matrix
116     // TODO There is no point in having separate virial matrix for constraints
117     clear_mat(virial);
118
119     // Make sure that the forces are ready on device before proceeding with the update.
120     fReadyOnDevice->enqueueWaitEvent(deviceStream_);
121
122     // The integrate should save a copy of the current coordinates in d_xp_ and write updated
123     // once into d_x_. The d_xp_ is only needed by constraints.
124     integrator_->integrate(
125             d_x_, d_xp_, d_v_, d_f_, dt, doTemperatureScaling, tcstat, doParrinelloRahman, dtPressureCouple, prVelocityScalingMatrix);
126     // Constraints need both coordinates before (d_x_) and after (d_xp_) update. However, after constraints
127     // are applied, the d_x_ can be discarded. So we intentionally swap the d_x_ and d_xp_ here to avoid the
128     // d_xp_ -> d_x_ copy after constraints. Note that the integrate saves them in the wrong order as well.
129     lincsGpu_->apply(d_xp_, d_x_, updateVelocities, d_v_, 1.0 / dt, computeVirial, virial, pbcAiuc_);
130     settleGpu_->apply(d_xp_, d_x_, updateVelocities, d_v_, 1.0 / dt, computeVirial, virial, pbcAiuc_);
131
132     // scaledVirial -> virial (methods above returns scaled values)
133     float scaleFactor = 0.5f / (dt * dt);
134     for (int i = 0; i < DIM; i++)
135     {
136         for (int j = 0; j < DIM; j++)
137         {
138             virial[i][j] = scaleFactor * virial[i][j];
139         }
140     }
141
142     coordinatesReady_->markEvent(deviceStream_);
143
144     wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchGpuUpdateConstrain);
145     wallcycle_stop(wcycle_, WallCycleCounter::LaunchGpu);
146
147     return;
148 }
149
150 void UpdateConstrainGpu::Impl::scaleCoordinates(const matrix scalingMatrix)
151 {
152     wallcycle_start_nocount(wcycle_, WallCycleCounter::LaunchGpu);
153     wallcycle_sub_start(wcycle_, WallCycleSubCounter::LaunchGpuUpdateConstrain);
154
155     ScalingMatrix mu(scalingMatrix);
156
157     const auto kernelArgs = prepareGpuKernelArguments(
158             scaleCoordinates_kernel, coordinateScalingKernelLaunchConfig_, &numAtoms_, &d_x_, &mu);
159     launchGpuKernel(scaleCoordinates_kernel,
160                     coordinateScalingKernelLaunchConfig_,
161                     deviceStream_,
162                     nullptr,
163                     "scaleCoordinates_kernel",
164                     kernelArgs);
165     // TODO: Although this only happens on the pressure coupling steps, this synchronization
166     //       can affect the performance if nstpcouple is small.
167     deviceStream_.synchronize();
168
169     wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchGpuUpdateConstrain);
170     wallcycle_stop(wcycle_, WallCycleCounter::LaunchGpu);
171 }
172
173 void UpdateConstrainGpu::Impl::scaleVelocities(const matrix scalingMatrix)
174 {
175     wallcycle_start_nocount(wcycle_, WallCycleCounter::LaunchGpu);
176     wallcycle_sub_start(wcycle_, WallCycleSubCounter::LaunchGpuUpdateConstrain);
177
178     ScalingMatrix mu(scalingMatrix);
179
180     const auto kernelArgs = prepareGpuKernelArguments(
181             scaleCoordinates_kernel, coordinateScalingKernelLaunchConfig_, &numAtoms_, &d_v_, &mu);
182     launchGpuKernel(scaleCoordinates_kernel,
183                     coordinateScalingKernelLaunchConfig_,
184                     deviceStream_,
185                     nullptr,
186                     "scaleCoordinates_kernel",
187                     kernelArgs);
188     // TODO: Although this only happens on the pressure coupling steps, this synchronization
189     //       can affect the performance if nstpcouple is small.
190     deviceStream_.synchronize();
191
192     wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchGpuUpdateConstrain);
193     wallcycle_stop(wcycle_, WallCycleCounter::LaunchGpu);
194 }
195
196 UpdateConstrainGpu::Impl::Impl(const t_inputrec&     ir,
197                                const gmx_mtop_t&     mtop,
198                                const int             numTempScaleValues,
199                                const DeviceContext&  deviceContext,
200                                const DeviceStream&   deviceStream,
201                                GpuEventSynchronizer* xUpdatedOnDevice,
202                                gmx_wallcycle*        wcycle) :
203     deviceContext_(deviceContext),
204     deviceStream_(deviceStream),
205     coordinatesReady_(xUpdatedOnDevice),
206     wcycle_(wcycle)
207 {
208     GMX_ASSERT(xUpdatedOnDevice != nullptr, "The event synchronizer can not be nullptr.");
209
210
211     integrator_ = std::make_unique<LeapFrogGpu>(deviceContext_, deviceStream_, numTempScaleValues);
212     lincsGpu_ = std::make_unique<LincsGpu>(ir.nLincsIter, ir.nProjOrder, deviceContext_, deviceStream_);
213     settleGpu_ = std::make_unique<SettleGpu>(mtop, deviceContext_, deviceStream_);
214
215     coordinateScalingKernelLaunchConfig_.blockSize[0]     = c_threadsPerBlock;
216     coordinateScalingKernelLaunchConfig_.blockSize[1]     = 1;
217     coordinateScalingKernelLaunchConfig_.blockSize[2]     = 1;
218     coordinateScalingKernelLaunchConfig_.sharedMemorySize = 0;
219 }
220
221 UpdateConstrainGpu::Impl::~Impl() {}
222
223 void UpdateConstrainGpu::Impl::set(DeviceBuffer<Float3>          d_x,
224                                    DeviceBuffer<Float3>          d_v,
225                                    const DeviceBuffer<Float3>    d_f,
226                                    const InteractionDefinitions& idef,
227                                    const t_mdatoms&              md)
228 {
229     // TODO wallcycle
230     wallcycle_start_nocount(wcycle_, WallCycleCounter::LaunchGpu);
231     wallcycle_sub_start(wcycle_, WallCycleSubCounter::LaunchGpuUpdateConstrain);
232
233     GMX_ASSERT(d_x != nullptr, "Coordinates device buffer should not be null.");
234     GMX_ASSERT(d_v != nullptr, "Velocities device buffer should not be null.");
235     GMX_ASSERT(d_f != nullptr, "Forces device buffer should not be null.");
236
237     d_x_ = d_x;
238     d_v_ = d_v;
239     d_f_ = d_f;
240
241     numAtoms_ = md.nr;
242
243     reallocateDeviceBuffer(&d_xp_, numAtoms_, &numXp_, &numXpAlloc_, deviceContext_);
244
245     reallocateDeviceBuffer(
246             &d_inverseMasses_, numAtoms_, &numInverseMasses_, &numInverseMassesAlloc_, deviceContext_);
247
248     // Integrator should also update something, but it does not even have a method yet
249     integrator_->set(numAtoms_, md.invmass, md.cTC);
250     lincsGpu_->set(idef, numAtoms_, md.invmass);
251     settleGpu_->set(idef);
252
253     coordinateScalingKernelLaunchConfig_.gridSize[0] =
254             (numAtoms_ + c_threadsPerBlock - 1) / c_threadsPerBlock;
255
256     wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchGpuUpdateConstrain);
257     wallcycle_stop(wcycle_, WallCycleCounter::LaunchGpu);
258 }
259
260 void UpdateConstrainGpu::Impl::setPbc(const PbcType pbcType, const matrix box)
261 {
262     // TODO wallcycle
263     setPbcAiuc(numPbcDimensions(pbcType), box, &pbcAiuc_);
264 }
265
266 GpuEventSynchronizer* UpdateConstrainGpu::Impl::getCoordinatesReadySync()
267 {
268     return coordinatesReady_;
269 }
270
271 UpdateConstrainGpu::UpdateConstrainGpu(const t_inputrec&     ir,
272                                        const gmx_mtop_t&     mtop,
273                                        const int             numTempScaleValues,
274                                        const DeviceContext&  deviceContext,
275                                        const DeviceStream&   deviceStream,
276                                        GpuEventSynchronizer* xUpdatedOnDevice,
277                                        gmx_wallcycle*        wcycle) :
278     impl_(new Impl(ir, mtop, numTempScaleValues, deviceContext, deviceStream, xUpdatedOnDevice, wcycle))
279 {
280 }
281
282 UpdateConstrainGpu::~UpdateConstrainGpu() = default;
283
284 void UpdateConstrainGpu::integrate(GpuEventSynchronizer*             fReadyOnDevice,
285                                    const real                        dt,
286                                    const bool                        updateVelocities,
287                                    const bool                        computeVirial,
288                                    tensor                            virialScaled,
289                                    const bool                        doTemperatureScaling,
290                                    gmx::ArrayRef<const t_grp_tcstat> tcstat,
291                                    const bool                        doParrinelloRahman,
292                                    const float                       dtPressureCouple,
293                                    const matrix                      prVelocityScalingMatrix)
294 {
295     impl_->integrate(fReadyOnDevice,
296                      dt,
297                      updateVelocities,
298                      computeVirial,
299                      virialScaled,
300                      doTemperatureScaling,
301                      tcstat,
302                      doParrinelloRahman,
303                      dtPressureCouple,
304                      prVelocityScalingMatrix);
305 }
306
307 void UpdateConstrainGpu::scaleCoordinates(const matrix scalingMatrix)
308 {
309     impl_->scaleCoordinates(scalingMatrix);
310 }
311
312 void UpdateConstrainGpu::scaleVelocities(const matrix scalingMatrix)
313 {
314     impl_->scaleVelocities(scalingMatrix);
315 }
316
317 void UpdateConstrainGpu::set(DeviceBuffer<Float3>          d_x,
318                              DeviceBuffer<Float3>          d_v,
319                              const DeviceBuffer<Float3>    d_f,
320                              const InteractionDefinitions& idef,
321                              const t_mdatoms&              md)
322 {
323     impl_->set(d_x, d_v, d_f, idef, md);
324 }
325
326 void UpdateConstrainGpu::setPbc(const PbcType pbcType, const matrix box)
327 {
328     impl_->setPbc(pbcType, box);
329 }
330
331 GpuEventSynchronizer* UpdateConstrainGpu::getCoordinatesReadySync()
332 {
333     return impl_->getCoordinatesReadySync();
334 }
335
336 bool UpdateConstrainGpu::isNumCoupledConstraintsSupported(const gmx_mtop_t& mtop)
337 {
338     return LincsGpu::isNumCoupledConstraintsSupported(mtop);
339 }
340
341 } // namespace gmx