2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements GPU bonded lists for CUDA
39 * \author Berk Hess <hess@kth.se>
40 * \author Szilárd Páll <pall.szilard@gmail.com>
41 * \author Mark Abraham <mark.j.abraham@gmail.com>
42 * \author Magnus Lundborg <lundborg.magnus@gmail.com>
44 * \ingroup module_listed_forces
49 #include "gpubonded_impl.h"
51 #include "gromacs/gpu_utils/cuda_arch_utils.cuh"
52 #include "gromacs/gpu_utils/cudautils.cuh"
53 #include "gromacs/gpu_utils/devicebuffer.h"
54 #include "gromacs/mdtypes/enerdata.h"
55 #include "gromacs/topology/forcefieldparameters.h"
62 // ---- GpuBonded::Impl
64 GpuBonded::Impl::Impl(const gmx_ffparams_t &ffparams,
67 stream_ = *static_cast<CommandStream*>(streamPtr);
69 allocateDeviceBuffer(&d_forceParams_, ffparams.numTypes(), nullptr);
70 // This could be an async transfer (if the source is pinned), so
71 // long as it uses the same stream as the kernels and we are happy
72 // to consume additional pinned pages.
73 copyToDeviceBuffer(&d_forceParams_, ffparams.iparams.data(),
74 0, ffparams.numTypes(),
75 stream_, GpuApiCallBehavior::Sync, nullptr);
77 allocateDeviceBuffer(&d_vTot_, F_NRE, nullptr);
78 clearDeviceBufferAsync(&d_vTot_, 0, F_NRE, stream_);
80 for (int fType = 0; fType < F_NRE; fType++)
82 d_iLists_[fType].nr = 0;
83 d_iLists_[fType].iatoms = nullptr;
84 d_iLists_[fType].nalloc = 0;
87 kernelParams_.d_forceParams = d_forceParams_;
88 kernelParams_.d_xq = d_xq_;
89 kernelParams_.d_f = d_f_;
90 kernelParams_.d_fShift = d_fShift_;
91 kernelParams_.d_vTot = d_vTot_;
92 for (int i = 0; i < numFTypesOnGpu; i++)
94 kernelParams_.d_iatoms[i] = nullptr;
95 kernelParams_.fTypeRangeStart[i] = 0;
96 kernelParams_.fTypeRangeEnd[i] = -1;
100 GpuBonded::Impl::~Impl()
102 for (int fType : fTypesOnGpu)
104 if (d_iLists_[fType].iatoms)
106 freeDeviceBuffer(&d_iLists_[fType].iatoms);
107 d_iLists_[fType].iatoms = nullptr;
111 freeDeviceBuffer(&d_forceParams_);
112 freeDeviceBuffer(&d_vTot_);
115 //! Return whether function type \p fType in \p idef has perturbed interactions
116 static bool fTypeHasPerturbedEntries(const t_idef &idef,
119 GMX_ASSERT(idef.ilsort == ilsortNO_FE || idef.ilsort == ilsortFE_SORTED,
120 "Perturbed interations should be sorted here");
122 const t_ilist &ilist = idef.il[fType];
124 return (idef.ilsort != ilsortNO_FE && ilist.nr_nonperturbed != ilist.nr);
127 //! Converts \p src with atom indices in state order to \p dest in nbnxn order
128 static void convertIlistToNbnxnOrder(const t_ilist &src,
129 HostInteractionList *dest,
130 int numAtomsPerInteraction,
131 ArrayRef<const int> nbnxnAtomOrder)
133 GMX_ASSERT(src.size() == 0 || !nbnxnAtomOrder.empty(), "We need the nbnxn atom order");
135 dest->iatoms.resize(src.size());
137 // TODO use OpenMP to parallelise this loop
138 for (int i = 0; i < src.size(); i += 1 + numAtomsPerInteraction)
140 dest->iatoms[i] = src.iatoms[i];
141 for (int a = 0; a < numAtomsPerInteraction; a++)
143 dest->iatoms[i + 1 + a] = nbnxnAtomOrder[src.iatoms[i + 1 + a]];
148 //! Returns \p input rounded up to the closest multiple of \p factor.
149 static inline int roundUpToFactor(const int input, const int factor)
151 GMX_ASSERT(factor > 0, "The factor to round up to must be > 0.");
153 int remainder = input % factor;
159 return (input + (factor - remainder));
162 // TODO Consider whether this function should be a factory method that
163 // makes an object that is the only one capable of the device
164 // operations needed for the lifetime of an interaction list. This
165 // would be harder to misuse than GpuBonded, and exchange the problem
166 // of naming this method for the problem of what to name the
167 // BondedDeviceInteractionListHandler type.
169 /*! Divides bonded interactions over threads and GPU.
170 * The bonded interactions are assigned by interaction type to GPU threads. The intereaction
171 * types are assigned in blocks sized as <warp_size>. The beginning and end (thread index) of each
172 * interaction type are stored in kernelParams_. Pointers to the relevant data structures on the
173 * GPU are also stored in kernelParams_.
176 GpuBonded::Impl::updateInteractionListsAndDeviceBuffers(ArrayRef<const int> nbnxnAtomOrder,
182 // TODO wallcycle sub start
183 haveInteractions_ = false;
184 int fTypesCounter = 0;
186 for (int fType : fTypesOnGpu)
188 auto &iList = iLists_[fType];
190 /* Perturbation is not implemented in the GPU bonded kernels.
191 * But instead of doing all interactions on the CPU, we can
192 * still easily handle the types that have no perturbed
193 * interactions on the GPU. */
194 if (idef.il[fType].nr > 0 && !fTypeHasPerturbedEntries(idef, fType))
196 haveInteractions_ = true;
198 convertIlistToNbnxnOrder(idef.il[fType],
200 NRAL(fType), nbnxnAtomOrder);
204 iList.iatoms.clear();
207 // Update the device if necessary. This can leave some
208 // allocations on the device when the host size decreases to
209 // zero, which is OK, since we deallocate everything at the
211 if (iList.size() > 0)
213 t_ilist &d_iList = d_iLists_[fType];
215 reallocateDeviceBuffer(&d_iList.iatoms, iList.size(), &d_iList.nr, &d_iList.nalloc, nullptr);
217 copyToDeviceBuffer(&d_iList.iatoms, iList.iatoms.data(),
219 stream_, GpuApiCallBehavior::Async, nullptr);
221 kernelParams_.fTypesOnGpu[fTypesCounter] = fType;
222 kernelParams_.numFTypeIAtoms[fTypesCounter] = iList.size();
223 int numBonds = iList.size() / (interaction_function[fType].nratoms + 1);
224 kernelParams_.numFTypeBonds[fTypesCounter] = numBonds;
225 kernelParams_.d_iatoms[fTypesCounter] = d_iLists_[fType].iatoms;
226 if (fTypesCounter == 0)
228 kernelParams_.fTypeRangeStart[fTypesCounter] = 0;
232 kernelParams_.fTypeRangeStart[fTypesCounter] = kernelParams_.fTypeRangeEnd[fTypesCounter - 1] + 1;
234 kernelParams_.fTypeRangeEnd[fTypesCounter] = kernelParams_.fTypeRangeStart[fTypesCounter] + roundUpToFactor(numBonds, warp_size) - 1;
236 GMX_ASSERT(numBonds > 0 || kernelParams_.fTypeRangeEnd[fTypesCounter] <= kernelParams_.fTypeRangeStart[fTypesCounter],
237 "Invalid GPU listed forces setup. numBonds must be > 0 if there are threads allocated to do work on that interaction function type.");
238 GMX_ASSERT(kernelParams_.fTypeRangeStart[fTypesCounter] % warp_size == 0 && (kernelParams_.fTypeRangeEnd[fTypesCounter] + 1) % warp_size == 0,
239 "The bonded interactions must be assigned to the GPU in blocks of warp size.");
244 d_xq_ = static_cast<float4 *>(d_xqPtr);
245 d_f_ = static_cast<fvec *>(d_fPtr);
246 d_fShift_ = static_cast<fvec *>(d_fShiftPtr);
248 kernelParams_.d_xq = d_xq_;
249 kernelParams_.d_f = d_f_;
250 kernelParams_.d_fShift = d_fShift_;
251 kernelParams_.d_forceParams = d_forceParams_;
252 kernelParams_.d_vTot = d_vTot_;
254 // TODO wallcycle sub stop
258 GpuBonded::Impl::haveInteractions() const
260 return haveInteractions_;
264 GpuBonded::Impl::launchEnergyTransfer()
266 // TODO should wrap with ewcLAUNCH_GPU
267 GMX_ASSERT(haveInteractions_, "No GPU bonded interactions, so no energies will be computed, so transfer should not be called");
269 float *h_vTot = vTot_.data();
270 copyFromDeviceBuffer(h_vTot, &d_vTot_,
272 stream_, GpuApiCallBehavior::Async, nullptr);
276 GpuBonded::Impl::accumulateEnergyTerms(gmx_enerdata_t *enerd)
278 // TODO should wrap with some kind of wait counter, so not all
279 // wait goes in to the "Rest" counter
280 GMX_ASSERT(haveInteractions_, "No GPU bonded interactions, so no energies will be computed or transferred, so accumulation should not occur");
282 cudaError_t stat = cudaStreamSynchronize(stream_);
283 CU_RET_ERR(stat, "D2H transfer of bonded energies failed");
285 for (int fType : fTypesOnGpu)
287 if (fType != F_LJ14 && fType != F_COUL14)
289 enerd->term[fType] += vTot_[fType];
293 // Note: We do not support energy groups here
294 gmx_grppairener_t *grppener = &enerd->grpp;
295 GMX_RELEASE_ASSERT(grppener->nener == 1, "No energy group support for bondeds on the GPU");
296 grppener->ener[egLJ14][0] += vTot_[F_LJ14];
297 grppener->ener[egCOUL14][0] += vTot_[F_COUL14];
301 GpuBonded::Impl::clearEnergies()
303 // TODO should wrap with ewcLAUNCH_GPU
304 clearDeviceBufferAsync(&d_vTot_, 0, F_NRE, stream_);
309 GpuBonded::GpuBonded(const gmx_ffparams_t &ffparams,
311 : impl_(new Impl(ffparams, streamPtr))
315 GpuBonded::~GpuBonded() = default;
318 GpuBonded::updateInteractionListsAndDeviceBuffers(ArrayRef<const int> nbnxnAtomOrder,
324 impl_->updateInteractionListsAndDeviceBuffers
325 (nbnxnAtomOrder, idef, d_xq, d_f, d_fShift);
329 GpuBonded::haveInteractions() const
331 return impl_->haveInteractions();
335 GpuBonded::launchEnergyTransfer()
337 impl_->launchEnergyTransfer();
341 GpuBonded::accumulateEnergyTerms(gmx_enerdata_t *enerd)
343 impl_->accumulateEnergyTerms(enerd);
347 GpuBonded::clearEnergies()
349 impl_->clearEnergies();