2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements GPU bonded lists for CUDA
39 * \author Berk Hess <hess@kth.se>
40 * \author Szilárd Páll <pall.szilard@gmail.com>
41 * \author Mark Abraham <mark.j.abraham@gmail.com>
42 * \author Magnus Lundborg <lundborg.magnus@gmail.com>
44 * \ingroup module_listed_forces
49 #include "gpubonded_impl.h"
51 #include "gromacs/gpu_utils/cuda_arch_utils.cuh"
52 #include "gromacs/gpu_utils/cudautils.cuh"
53 #include "gromacs/gpu_utils/devicebuffer.h"
54 #include "gromacs/gpu_utils/typecasts.cuh"
55 #include "gromacs/mdtypes/enerdata.h"
56 #include "gromacs/timing/wallcycle.h"
57 #include "gromacs/topology/forcefieldparameters.h"
64 // ---- GpuBonded::Impl
66 GpuBonded::Impl::Impl(const gmx_ffparams_t& ffparams, void* streamPtr, gmx_wallcycle* wcycle)
68 stream_ = *static_cast<CommandStream*>(streamPtr);
71 allocateDeviceBuffer(&d_forceParams_, ffparams.numTypes(), deviceContext_);
72 // This could be an async transfer (if the source is pinned), so
73 // long as it uses the same stream as the kernels and we are happy
74 // to consume additional pinned pages.
75 copyToDeviceBuffer(&d_forceParams_, ffparams.iparams.data(), 0, ffparams.numTypes(), stream_,
76 GpuApiCallBehavior::Sync, nullptr);
78 allocateDeviceBuffer(&d_vTot_, F_NRE, deviceContext_);
79 clearDeviceBufferAsync(&d_vTot_, 0, F_NRE, stream_);
81 kernelParams_.d_forceParams = d_forceParams_;
82 kernelParams_.d_xq = d_xq_;
83 kernelParams_.d_f = d_f_;
84 kernelParams_.d_fShift = d_fShift_;
85 kernelParams_.d_vTot = d_vTot_;
86 for (int i = 0; i < numFTypesOnGpu; i++)
88 kernelParams_.d_iatoms[i] = nullptr;
89 kernelParams_.fTypeRangeStart[i] = 0;
90 kernelParams_.fTypeRangeEnd[i] = -1;
94 GpuBonded::Impl::~Impl()
96 for (int fType : fTypesOnGpu)
98 if (d_iLists_[fType].iatoms)
100 freeDeviceBuffer(&d_iLists_[fType].iatoms);
101 d_iLists_[fType].iatoms = nullptr;
105 freeDeviceBuffer(&d_forceParams_);
106 freeDeviceBuffer(&d_vTot_);
109 //! Return whether function type \p fType in \p idef has perturbed interactions
110 static bool fTypeHasPerturbedEntries(const InteractionDefinitions& idef, int fType)
112 GMX_ASSERT(idef.ilsort == ilsortNO_FE || idef.ilsort == ilsortFE_SORTED,
113 "Perturbed interations should be sorted here");
115 const InteractionList& ilist = idef.il[fType];
117 return (idef.ilsort != ilsortNO_FE && idef.numNonperturbedInteractions[fType] != ilist.size());
120 //! Converts \p src with atom indices in state order to \p dest in nbnxn order
121 static void convertIlistToNbnxnOrder(const InteractionList& src,
122 HostInteractionList* dest,
123 int numAtomsPerInteraction,
124 ArrayRef<const int> nbnxnAtomOrder)
126 GMX_ASSERT(src.size() == 0 || !nbnxnAtomOrder.empty(), "We need the nbnxn atom order");
128 dest->iatoms.resize(src.size());
130 // TODO use OpenMP to parallelise this loop
131 for (int i = 0; i < src.size(); i += 1 + numAtomsPerInteraction)
133 dest->iatoms[i] = src.iatoms[i];
134 for (int a = 0; a < numAtomsPerInteraction; a++)
136 dest->iatoms[i + 1 + a] = nbnxnAtomOrder[src.iatoms[i + 1 + a]];
141 //! Returns \p input rounded up to the closest multiple of \p factor.
142 static inline int roundUpToFactor(const int input, const int factor)
144 GMX_ASSERT(factor > 0, "The factor to round up to must be > 0.");
146 int remainder = input % factor;
152 return (input + (factor - remainder));
155 // TODO Consider whether this function should be a factory method that
156 // makes an object that is the only one capable of the device
157 // operations needed for the lifetime of an interaction list. This
158 // would be harder to misuse than GpuBonded, and exchange the problem
159 // of naming this method for the problem of what to name the
160 // BondedDeviceInteractionListHandler type.
162 /*! Divides bonded interactions over threads and GPU.
163 * The bonded interactions are assigned by interaction type to GPU threads. The intereaction
164 * types are assigned in blocks sized as <warp_size>. The beginning and end (thread index) of each
165 * interaction type are stored in kernelParams_. Pointers to the relevant data structures on the
166 * GPU are also stored in kernelParams_.
168 * \todo Use DeviceBuffer for the d_xqPtr.
170 void GpuBonded::Impl::updateInteractionListsAndDeviceBuffers(ArrayRef<const int> nbnxnAtomOrder,
171 const InteractionDefinitions& idef,
173 DeviceBuffer<RVec> d_fPtr,
174 DeviceBuffer<RVec> d_fShiftPtr)
176 // TODO wallcycle sub start
177 haveInteractions_ = false;
178 int fTypesCounter = 0;
180 for (int fType : fTypesOnGpu)
182 auto& iList = iLists_[fType];
184 /* Perturbation is not implemented in the GPU bonded kernels.
185 * But instead of doing all interactions on the CPU, we can
186 * still easily handle the types that have no perturbed
187 * interactions on the GPU. */
188 if (!idef.il[fType].empty() && !fTypeHasPerturbedEntries(idef, fType))
190 haveInteractions_ = true;
192 convertIlistToNbnxnOrder(idef.il[fType], &iList, NRAL(fType), nbnxnAtomOrder);
196 iList.iatoms.clear();
199 // Update the device if necessary. This can leave some
200 // allocations on the device when the host size decreases to
201 // zero, which is OK, since we deallocate everything at the
203 if (iList.size() > 0)
205 t_ilist& d_iList = d_iLists_[fType];
207 reallocateDeviceBuffer(&d_iList.iatoms, iList.size(), &d_iList.nr, &d_iList.nalloc,
210 copyToDeviceBuffer(&d_iList.iatoms, iList.iatoms.data(), 0, iList.size(), stream_,
211 GpuApiCallBehavior::Async, nullptr);
213 kernelParams_.fTypesOnGpu[fTypesCounter] = fType;
214 kernelParams_.numFTypeIAtoms[fTypesCounter] = iList.size();
215 int numBonds = iList.size() / (interaction_function[fType].nratoms + 1);
216 kernelParams_.numFTypeBonds[fTypesCounter] = numBonds;
217 kernelParams_.d_iatoms[fTypesCounter] = d_iLists_[fType].iatoms;
218 if (fTypesCounter == 0)
220 kernelParams_.fTypeRangeStart[fTypesCounter] = 0;
224 kernelParams_.fTypeRangeStart[fTypesCounter] =
225 kernelParams_.fTypeRangeEnd[fTypesCounter - 1] + 1;
227 kernelParams_.fTypeRangeEnd[fTypesCounter] = kernelParams_.fTypeRangeStart[fTypesCounter]
228 + roundUpToFactor(numBonds, warp_size) - 1;
230 GMX_ASSERT(numBonds > 0
231 || kernelParams_.fTypeRangeEnd[fTypesCounter]
232 <= kernelParams_.fTypeRangeStart[fTypesCounter],
233 "Invalid GPU listed forces setup. numBonds must be > 0 if there are threads "
234 "allocated to do work on that interaction function type.");
235 GMX_ASSERT(kernelParams_.fTypeRangeStart[fTypesCounter] % warp_size == 0
236 && (kernelParams_.fTypeRangeEnd[fTypesCounter] + 1) % warp_size == 0,
237 "The bonded interactions must be assigned to the GPU in blocks of warp size.");
242 d_xq_ = static_cast<float4*>(d_xqPtr);
243 d_f_ = asFloat3(d_fPtr);
244 d_fShift_ = asFloat3(d_fShiftPtr);
246 kernelParams_.d_xq = d_xq_;
247 kernelParams_.d_f = d_f_;
248 kernelParams_.d_fShift = d_fShift_;
249 kernelParams_.d_forceParams = d_forceParams_;
250 kernelParams_.d_vTot = d_vTot_;
252 // TODO wallcycle sub stop
255 bool GpuBonded::Impl::haveInteractions() const
257 return haveInteractions_;
260 void GpuBonded::Impl::launchEnergyTransfer()
262 GMX_ASSERT(haveInteractions_,
263 "No GPU bonded interactions, so no energies will be computed, so transfer should "
265 wallcycle_sub_start_nocount(wcycle_, ewcsLAUNCH_GPU_BONDED);
266 // TODO add conditional on whether there has been any compute (and make sure host buffer doesn't contain garbage)
267 float* h_vTot = vTot_.data();
268 copyFromDeviceBuffer(h_vTot, &d_vTot_, 0, F_NRE, stream_, GpuApiCallBehavior::Async, nullptr);
269 wallcycle_sub_stop(wcycle_, ewcsLAUNCH_GPU_BONDED);
272 void GpuBonded::Impl::waitAccumulateEnergyTerms(gmx_enerdata_t* enerd)
274 GMX_ASSERT(haveInteractions_,
275 "No GPU bonded interactions, so no energies will be computed or transferred, so "
276 "accumulation should not occur");
278 wallcycle_start(wcycle_, ewcWAIT_GPU_BONDED);
279 cudaError_t stat = cudaStreamSynchronize(stream_);
280 CU_RET_ERR(stat, "D2H transfer of bonded energies failed");
281 wallcycle_stop(wcycle_, ewcWAIT_GPU_BONDED);
283 for (int fType : fTypesOnGpu)
285 if (fType != F_LJ14 && fType != F_COUL14)
287 enerd->term[fType] += vTot_[fType];
291 // Note: We do not support energy groups here
292 gmx_grppairener_t* grppener = &enerd->grpp;
293 GMX_RELEASE_ASSERT(grppener->nener == 1, "No energy group support for bondeds on the GPU");
294 grppener->ener[egLJ14][0] += vTot_[F_LJ14];
295 grppener->ener[egCOUL14][0] += vTot_[F_COUL14];
298 void GpuBonded::Impl::clearEnergies()
300 wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
301 wallcycle_sub_start_nocount(wcycle_, ewcsLAUNCH_GPU_BONDED);
302 clearDeviceBufferAsync(&d_vTot_, 0, F_NRE, stream_);
303 wallcycle_sub_stop(wcycle_, ewcsLAUNCH_GPU_BONDED);
304 wallcycle_stop(wcycle_, ewcLAUNCH_GPU);
309 GpuBonded::GpuBonded(const gmx_ffparams_t& ffparams, void* streamPtr, gmx_wallcycle* wcycle) :
310 impl_(new Impl(ffparams, streamPtr, wcycle))
314 GpuBonded::~GpuBonded() = default;
316 void GpuBonded::updateInteractionListsAndDeviceBuffers(ArrayRef<const int> nbnxnAtomOrder,
317 const InteractionDefinitions& idef,
319 DeviceBuffer<RVec> d_f,
320 DeviceBuffer<RVec> d_fShift)
322 impl_->updateInteractionListsAndDeviceBuffers(nbnxnAtomOrder, idef, d_xq, d_f, d_fShift);
325 bool GpuBonded::haveInteractions() const
327 return impl_->haveInteractions();
330 void GpuBonded::launchEnergyTransfer()
332 impl_->launchEnergyTransfer();
335 void GpuBonded::waitAccumulateEnergyTerms(gmx_enerdata_t* enerd)
337 impl_->waitAccumulateEnergyTerms(enerd);
340 void GpuBonded::clearEnergies()
342 impl_->clearEnergies();