2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements GPU bonded lists for CUDA
39 * \author Berk Hess <hess@kth.se>
40 * \author Szilárd Páll <pall.szilard@gmail.com>
41 * \author Mark Abraham <mark.j.abraham@gmail.com>
42 * \author Magnus Lundborg <lundborg.magnus@gmail.com>
44 * \ingroup module_listed_forces
49 #include "gpubonded_impl.h"
51 #include "gromacs/gpu_utils/cuda_arch_utils.cuh"
52 #include "gromacs/gpu_utils/cudautils.cuh"
53 #include "gromacs/gpu_utils/device_context.h"
54 #include "gromacs/gpu_utils/device_stream.h"
55 #include "gromacs/gpu_utils/devicebuffer.h"
56 #include "gromacs/gpu_utils/typecasts.cuh"
57 #include "gromacs/mdtypes/enerdata.h"
58 #include "gromacs/timing/wallcycle.h"
59 #include "gromacs/topology/forcefieldparameters.h"
65 // Number of CUDA threads in a block
66 constexpr static int c_threadsPerBlock = 256;
68 // ---- GpuBonded::Impl
70 GpuBonded::Impl::Impl(const gmx_ffparams_t& ffparams,
71 const float electrostaticsScaleFactor,
72 const DeviceContext& deviceContext,
73 const DeviceStream& deviceStream,
74 gmx_wallcycle* wcycle) :
75 deviceContext_(deviceContext), deviceStream_(deviceStream)
77 GMX_RELEASE_ASSERT(deviceStream.isValid(),
78 "Can't run GPU version of bonded forces in stream that is not valid.");
81 c_threadsPerBlock >= c_numShiftVectors,
82 "Threads per block in GPU bonded must be >= c_numShiftVectors for the virial kernel "
87 allocateDeviceBuffer(&d_forceParams_, ffparams.numTypes(), deviceContext_);
88 // This could be an async transfer (if the source is pinned), so
89 // long as it uses the same stream as the kernels and we are happy
90 // to consume additional pinned pages.
91 copyToDeviceBuffer(&d_forceParams_,
92 ffparams.iparams.data(),
96 GpuApiCallBehavior::Sync,
99 allocateDeviceBuffer(&d_vTot_, F_NRE, deviceContext_);
100 clearDeviceBufferAsync(&d_vTot_, 0, F_NRE, deviceStream_);
102 kernelParams_.electrostaticsScaleFactor = electrostaticsScaleFactor;
103 kernelParams_.d_forceParams = d_forceParams_;
104 kernelParams_.d_xq = d_xq_;
105 kernelParams_.d_f = d_f_;
106 kernelParams_.d_fShift = d_fShift_;
107 kernelParams_.d_vTot = d_vTot_;
108 for (int i = 0; i < numFTypesOnGpu; i++)
110 kernelParams_.d_iatoms[i] = nullptr;
111 kernelParams_.fTypeRangeStart[i] = 0;
112 kernelParams_.fTypeRangeEnd[i] = -1;
115 int fTypeRangeEnd = kernelParams_.fTypeRangeEnd[numFTypesOnGpu - 1];
117 kernelLaunchConfig_.blockSize[0] = c_threadsPerBlock;
118 kernelLaunchConfig_.blockSize[1] = 1;
119 kernelLaunchConfig_.blockSize[2] = 1;
120 kernelLaunchConfig_.gridSize[0] = (fTypeRangeEnd + c_threadsPerBlock) / c_threadsPerBlock;
121 kernelLaunchConfig_.gridSize[1] = 1;
122 kernelLaunchConfig_.gridSize[2] = 1;
123 kernelLaunchConfig_.sharedMemorySize =
124 c_numShiftVectors * sizeof(float3) + (c_threadsPerBlock / warp_size) * 3 * sizeof(float);
127 GpuBonded::Impl::~Impl()
129 for (int fType : fTypesOnGpu)
131 if (d_iLists_[fType].iatoms)
133 freeDeviceBuffer(&d_iLists_[fType].iatoms);
134 d_iLists_[fType].iatoms = nullptr;
138 freeDeviceBuffer(&d_forceParams_);
139 freeDeviceBuffer(&d_vTot_);
142 //! Return whether function type \p fType in \p idef has perturbed interactions
143 static bool fTypeHasPerturbedEntries(const InteractionDefinitions& idef, int fType)
145 GMX_ASSERT(idef.ilsort == ilsortNO_FE || idef.ilsort == ilsortFE_SORTED,
146 "Perturbed interations should be sorted here");
148 const InteractionList& ilist = idef.il[fType];
150 return (idef.ilsort != ilsortNO_FE && idef.numNonperturbedInteractions[fType] != ilist.size());
153 //! Converts \p src with atom indices in state order to \p dest in nbnxn order
154 static void convertIlistToNbnxnOrder(const InteractionList& src,
155 HostInteractionList* dest,
156 int numAtomsPerInteraction,
157 ArrayRef<const int> nbnxnAtomOrder)
159 GMX_ASSERT(src.empty() || !nbnxnAtomOrder.empty(), "We need the nbnxn atom order");
161 dest->iatoms.resize(src.size());
163 // TODO use OpenMP to parallelise this loop
164 for (int i = 0; i < src.size(); i += 1 + numAtomsPerInteraction)
166 dest->iatoms[i] = src.iatoms[i];
167 for (int a = 0; a < numAtomsPerInteraction; a++)
169 dest->iatoms[i + 1 + a] = nbnxnAtomOrder[src.iatoms[i + 1 + a]];
174 //! Returns \p input rounded up to the closest multiple of \p factor.
175 static inline int roundUpToFactor(const int input, const int factor)
177 GMX_ASSERT(factor > 0, "The factor to round up to must be > 0.");
179 int remainder = input % factor;
185 return (input + (factor - remainder));
188 // TODO Consider whether this function should be a factory method that
189 // makes an object that is the only one capable of the device
190 // operations needed for the lifetime of an interaction list. This
191 // would be harder to misuse than GpuBonded, and exchange the problem
192 // of naming this method for the problem of what to name the
193 // BondedDeviceInteractionListHandler type.
195 /*! Divides bonded interactions over threads and GPU.
196 * The bonded interactions are assigned by interaction type to GPU threads. The intereaction
197 * types are assigned in blocks sized as <warp_size>. The beginning and end (thread index) of each
198 * interaction type are stored in kernelParams_. Pointers to the relevant data structures on the
199 * GPU are also stored in kernelParams_.
201 * \todo Use DeviceBuffer for the d_xqPtr.
203 void GpuBonded::Impl::updateInteractionListsAndDeviceBuffers(ArrayRef<const int> nbnxnAtomOrder,
204 const InteractionDefinitions& idef,
206 DeviceBuffer<RVec> d_fPtr,
207 DeviceBuffer<RVec> d_fShiftPtr)
209 // TODO wallcycle sub start
210 haveInteractions_ = false;
211 int fTypesCounter = 0;
213 for (int fType : fTypesOnGpu)
215 auto& iList = iLists_[fType];
217 /* Perturbation is not implemented in the GPU bonded kernels.
218 * But instead of doing all interactions on the CPU, we can
219 * still easily handle the types that have no perturbed
220 * interactions on the GPU. */
221 if (!idef.il[fType].empty() && !fTypeHasPerturbedEntries(idef, fType))
223 haveInteractions_ = true;
225 convertIlistToNbnxnOrder(idef.il[fType], &iList, NRAL(fType), nbnxnAtomOrder);
229 iList.iatoms.clear();
232 // Update the device if necessary. This can leave some
233 // allocations on the device when the host size decreases to
234 // zero, which is OK, since we deallocate everything at the
236 if (iList.size() > 0)
238 t_ilist& d_iList = d_iLists_[fType];
240 reallocateDeviceBuffer(
241 &d_iList.iatoms, iList.size(), &d_iList.nr, &d_iList.nalloc, deviceContext_);
243 copyToDeviceBuffer(&d_iList.iatoms,
248 GpuApiCallBehavior::Async,
251 kernelParams_.fTypesOnGpu[fTypesCounter] = fType;
252 kernelParams_.numFTypeIAtoms[fTypesCounter] = iList.size();
253 int numBonds = iList.size() / (interaction_function[fType].nratoms + 1);
254 kernelParams_.numFTypeBonds[fTypesCounter] = numBonds;
255 kernelParams_.d_iatoms[fTypesCounter] = d_iLists_[fType].iatoms;
256 if (fTypesCounter == 0)
258 kernelParams_.fTypeRangeStart[fTypesCounter] = 0;
262 kernelParams_.fTypeRangeStart[fTypesCounter] =
263 kernelParams_.fTypeRangeEnd[fTypesCounter - 1] + 1;
265 kernelParams_.fTypeRangeEnd[fTypesCounter] = kernelParams_.fTypeRangeStart[fTypesCounter]
266 + roundUpToFactor(numBonds, warp_size) - 1;
268 GMX_ASSERT(numBonds > 0
269 || kernelParams_.fTypeRangeEnd[fTypesCounter]
270 <= kernelParams_.fTypeRangeStart[fTypesCounter],
271 "Invalid GPU listed forces setup. numBonds must be > 0 if there are threads "
272 "allocated to do work on that interaction function type.");
273 GMX_ASSERT(kernelParams_.fTypeRangeStart[fTypesCounter] % warp_size == 0
274 && (kernelParams_.fTypeRangeEnd[fTypesCounter] + 1) % warp_size == 0,
275 "The bonded interactions must be assigned to the GPU in blocks of warp size.");
280 int fTypeRangeEnd = kernelParams_.fTypeRangeEnd[numFTypesOnGpu - 1];
281 kernelLaunchConfig_.gridSize[0] = (fTypeRangeEnd + c_threadsPerBlock) / c_threadsPerBlock;
283 d_xq_ = static_cast<float4*>(d_xqPtr);
284 d_f_ = asFloat3(d_fPtr);
285 d_fShift_ = asFloat3(d_fShiftPtr);
287 kernelParams_.d_xq = d_xq_;
288 kernelParams_.d_f = d_f_;
289 kernelParams_.d_fShift = d_fShift_;
290 kernelParams_.d_forceParams = d_forceParams_;
291 kernelParams_.d_vTot = d_vTot_;
293 // TODO wallcycle sub stop
296 void GpuBonded::Impl::setPbc(PbcType pbcType, const matrix box, bool canMoleculeSpanPbc)
299 setPbcAiuc(canMoleculeSpanPbc ? numPbcDimensions(pbcType) : 0, box, &pbcAiuc);
300 kernelParams_.pbcAiuc = pbcAiuc;
303 bool GpuBonded::Impl::haveInteractions() const
305 return haveInteractions_;
308 void GpuBonded::Impl::launchEnergyTransfer()
310 GMX_ASSERT(haveInteractions_,
311 "No GPU bonded interactions, so no energies will be computed, so transfer should "
313 wallcycle_sub_start_nocount(wcycle_, WallCycleSubCounter::LaunchGpuBonded);
314 // TODO add conditional on whether there has been any compute (and make sure host buffer doesn't contain garbage)
315 float* h_vTot = vTot_.data();
316 copyFromDeviceBuffer(h_vTot, &d_vTot_, 0, F_NRE, deviceStream_, GpuApiCallBehavior::Async, nullptr);
317 wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchGpuBonded);
320 void GpuBonded::Impl::waitAccumulateEnergyTerms(gmx_enerdata_t* enerd)
322 GMX_ASSERT(haveInteractions_,
323 "No GPU bonded interactions, so no energies will be computed or transferred, so "
324 "accumulation should not occur");
326 wallcycle_start(wcycle_, WallCycleCounter::WaitGpuBonded);
327 cudaError_t stat = cudaStreamSynchronize(deviceStream_.stream());
328 CU_RET_ERR(stat, "D2H transfer of bonded energies failed");
329 wallcycle_stop(wcycle_, WallCycleCounter::WaitGpuBonded);
331 for (int fType : fTypesOnGpu)
333 if (fType != F_LJ14 && fType != F_COUL14)
335 enerd->term[fType] += vTot_[fType];
339 // Note: We do not support energy groups here
340 gmx_grppairener_t* grppener = &enerd->grpp;
341 GMX_RELEASE_ASSERT(grppener->nener == 1, "No energy group support for bondeds on the GPU");
342 grppener->energyGroupPairTerms[NonBondedEnergyTerms::LJ14][0] += vTot_[F_LJ14];
343 grppener->energyGroupPairTerms[NonBondedEnergyTerms::Coulomb14][0] += vTot_[F_COUL14];
346 void GpuBonded::Impl::clearEnergies()
348 wallcycle_start_nocount(wcycle_, WallCycleCounter::LaunchGpu);
349 wallcycle_sub_start_nocount(wcycle_, WallCycleSubCounter::LaunchGpuBonded);
350 clearDeviceBufferAsync(&d_vTot_, 0, F_NRE, deviceStream_);
351 wallcycle_sub_stop(wcycle_, WallCycleSubCounter::LaunchGpuBonded);
352 wallcycle_stop(wcycle_, WallCycleCounter::LaunchGpu);
357 GpuBonded::GpuBonded(const gmx_ffparams_t& ffparams,
358 const float electrostaticsScaleFactor,
359 const DeviceContext& deviceContext,
360 const DeviceStream& deviceStream,
361 gmx_wallcycle* wcycle) :
362 impl_(new Impl(ffparams, electrostaticsScaleFactor, deviceContext, deviceStream, wcycle))
366 GpuBonded::~GpuBonded() = default;
368 void GpuBonded::updateInteractionListsAndDeviceBuffers(ArrayRef<const int> nbnxnAtomOrder,
369 const InteractionDefinitions& idef,
371 DeviceBuffer<RVec> d_f,
372 DeviceBuffer<RVec> d_fShift)
374 impl_->updateInteractionListsAndDeviceBuffers(nbnxnAtomOrder, idef, d_xq, d_f, d_fShift);
377 void GpuBonded::setPbc(PbcType pbcType, const matrix box, bool canMoleculeSpanPbc)
379 impl_->setPbc(pbcType, box, canMoleculeSpanPbc);
382 bool GpuBonded::haveInteractions() const
384 return impl_->haveInteractions();
387 void GpuBonded::setPbcAndlaunchKernel(PbcType pbcType,
389 bool canMoleculeSpanPbc,
390 const gmx::StepWorkload& stepWork)
392 setPbc(pbcType, box, canMoleculeSpanPbc);
393 launchKernel(stepWork);
396 void GpuBonded::launchEnergyTransfer()
398 impl_->launchEnergyTransfer();
401 void GpuBonded::waitAccumulateEnergyTerms(gmx_enerdata_t* enerd)
403 impl_->waitAccumulateEnergyTerms(enerd);
406 void GpuBonded::clearEnergies()
408 impl_->clearEnergies();