2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Define CUDA implementation of nbnxn_gpu_data_mgmt.h
39 * \author Szilard Pall <pall.szilard@gmail.com>
48 // TODO We would like to move this down, but the way NbnxmGpu
49 // is currently declared means this has to be before gpu_types.h
50 #include "nbnxm_cuda_types.h"
52 // TODO Remove this comment when the above order issue is resolved
53 #include "gromacs/gpu_utils/cudautils.cuh"
54 #include "gromacs/gpu_utils/device_context.h"
55 #include "gromacs/gpu_utils/device_stream_manager.h"
56 #include "gromacs/gpu_utils/gpu_utils.h"
57 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
58 #include "gromacs/gpu_utils/pmalloc_cuda.h"
59 #include "gromacs/hardware/device_information.h"
60 #include "gromacs/hardware/device_management.h"
61 #include "gromacs/math/vectypes.h"
62 #include "gromacs/mdlib/force_flags.h"
63 #include "gromacs/mdtypes/interaction_const.h"
64 #include "gromacs/mdtypes/md_enums.h"
65 #include "gromacs/nbnxm/atomdata.h"
66 #include "gromacs/nbnxm/gpu_data_mgmt.h"
67 #include "gromacs/nbnxm/gridset.h"
68 #include "gromacs/nbnxm/nbnxm.h"
69 #include "gromacs/nbnxm/nbnxm_gpu.h"
70 #include "gromacs/nbnxm/nbnxm_gpu_data_mgmt.h"
71 #include "gromacs/nbnxm/pairlistsets.h"
72 #include "gromacs/pbcutil/ishift.h"
73 #include "gromacs/timing/gpu_timing.h"
74 #include "gromacs/utility/basedefinitions.h"
75 #include "gromacs/utility/cstringutil.h"
76 #include "gromacs/utility/fatalerror.h"
77 #include "gromacs/utility/real.h"
78 #include "gromacs/utility/smalloc.h"
80 #include "nbnxm_cuda.h"
85 /* This is a heuristically determined parameter for the Kepler
86 * and Maxwell architectures for the minimum size of ci lists by multiplying
87 * this constant with the # of multiprocessors on the current device.
88 * Since the maximum number of blocks per multiprocessor is 16, the ideal
89 * count for small systems is 32 or 48 blocks per multiprocessor. Because
90 * there is a bit of fluctuations in the generated block counts, we use
91 * a target of 44 instead of the ideal value of 48.
93 static unsigned int gpu_min_ci_balanced_factor = 44;
96 static void nbnxn_cuda_clear_e_fshift(NbnxmGpu* nb);
98 /*! Initializes the atomdata structure first time, it only gets filled at
100 static void init_atomdata_first(NBAtomData* ad, int ntypes, const DeviceContext& deviceContext)
102 ad->numTypes = ntypes;
103 allocateDeviceBuffer(&ad->shiftVec, SHIFTS, deviceContext);
104 ad->shiftVecUploaded = false;
106 allocateDeviceBuffer(&ad->fShift, SHIFTS, deviceContext);
107 allocateDeviceBuffer(&ad->eLJ, 1, deviceContext);
108 allocateDeviceBuffer(&ad->eElec, 1, deviceContext);
110 /* initialize to nullptr poiters to data that is not allocated here and will
111 need reallocation in nbnxn_cuda_init_atomdata */
115 /* size -1 indicates that the respective array hasn't been initialized yet */
117 ad->numAtomsAlloc = -1;
120 /*! Initializes the nonbonded parameter data structure. */
121 static void init_nbparam(NBParamGpu* nbp,
122 const interaction_const_t* ic,
123 const PairlistParams& listParams,
124 const nbnxn_atomdata_t::Params& nbatParams,
125 const DeviceContext& deviceContext)
127 const int ntypes = nbatParams.numTypes;
129 set_cutoff_parameters(nbp, ic, listParams);
131 /* The kernel code supports LJ combination rules (geometric and LB) for
132 * all kernel types, but we only generate useful combination rule kernels.
133 * We currently only use LJ combination rule (geometric and LB) kernels
134 * for plain cut-off LJ. On Maxwell the force only kernels speed up 15%
135 * with PME and 20% with RF, the other kernels speed up about half as much.
136 * For LJ force-switch the geometric rule would give 7% speed-up, but this
137 * combination is rarely used. LJ force-switch with LB rule is more common,
138 * but gives only 1% speed-up.
140 nbp->vdwType = nbnxmGpuPickVdwKernelType(ic, nbatParams.ljCombinationRule);
141 nbp->elecType = nbnxmGpuPickElectrostaticsKernelType(ic, deviceContext.deviceInfo());
143 /* generate table for PME */
144 nbp->coulomb_tab = nullptr;
145 if (nbp->elecType == ElecType::EwaldTab || nbp->elecType == ElecType::EwaldTabTwin)
147 GMX_RELEASE_ASSERT(ic->coulombEwaldTables, "Need valid Coulomb Ewald correction tables");
148 init_ewald_coulomb_force_table(*ic->coulombEwaldTables, nbp, deviceContext);
151 /* set up LJ parameter lookup table */
152 if (!useLjCombRule(nbp->vdwType))
154 initParamLookupTable(
155 &nbp->nbfp, &nbp->nbfp_texobj, nbatParams.nbfp.data(), 2 * ntypes * ntypes, deviceContext);
158 /* set up LJ-PME parameter lookup table */
159 if (ic->vdwtype == VanDerWaalsType::Pme)
161 initParamLookupTable(
162 &nbp->nbfp_comb, &nbp->nbfp_comb_texobj, nbatParams.nbfp_comb.data(), 2 * ntypes, deviceContext);
166 /*! Initializes simulation constant data. */
167 static void cuda_init_const(NbnxmGpu* nb,
168 const interaction_const_t* ic,
169 const PairlistParams& listParams,
170 const nbnxn_atomdata_t::Params& nbatParams)
172 init_atomdata_first(nb->atdat, nbatParams.numTypes, *nb->deviceContext_);
173 init_nbparam(nb->nbparam, ic, listParams, nbatParams, *nb->deviceContext_);
175 /* clear energy and shift force outputs */
176 nbnxn_cuda_clear_e_fshift(nb);
179 NbnxmGpu* gpu_init(const gmx::DeviceStreamManager& deviceStreamManager,
180 const interaction_const_t* ic,
181 const PairlistParams& listParams,
182 const nbnxn_atomdata_t* nbat,
183 bool bLocalAndNonlocal)
185 auto nb = new NbnxmGpu();
186 nb->deviceContext_ = &deviceStreamManager.context();
188 snew(nb->nbparam, 1);
189 snew(nb->plist[InteractionLocality::Local], 1);
190 if (bLocalAndNonlocal)
192 snew(nb->plist[InteractionLocality::NonLocal], 1);
195 nb->bUseTwoStreams = bLocalAndNonlocal;
197 nb->timers = new Nbnxm::GpuTimers();
198 snew(nb->timings, 1);
201 pmalloc((void**)&nb->nbst.eLJ, sizeof(*nb->nbst.eLJ));
202 pmalloc((void**)&nb->nbst.eElec, sizeof(*nb->nbst.eElec));
203 pmalloc((void**)&nb->nbst.fShift, SHIFTS * sizeof(*nb->nbst.fShift));
205 init_plist(nb->plist[InteractionLocality::Local]);
207 /* local/non-local GPU streams */
208 GMX_RELEASE_ASSERT(deviceStreamManager.streamIsValid(gmx::DeviceStreamType::NonBondedLocal),
209 "Local non-bonded stream should be initialized to use GPU for non-bonded.");
210 nb->deviceStreams[InteractionLocality::Local] =
211 &deviceStreamManager.stream(gmx::DeviceStreamType::NonBondedLocal);
212 if (nb->bUseTwoStreams)
214 init_plist(nb->plist[InteractionLocality::NonLocal]);
216 /* Note that the device we're running on does not have to support
217 * priorities, because we are querying the priority range which in this
218 * case will be a single value.
220 GMX_RELEASE_ASSERT(deviceStreamManager.streamIsValid(gmx::DeviceStreamType::NonBondedNonLocal),
221 "Non-local non-bonded stream should be initialized to use GPU for "
222 "non-bonded with domain decomposition.");
223 nb->deviceStreams[InteractionLocality::NonLocal] =
224 &deviceStreamManager.stream(gmx::DeviceStreamType::NonBondedNonLocal);
228 /* WARNING: CUDA timings are incorrect with multiple streams.
229 * This is the main reason why they are disabled by default.
231 // TODO: Consider turning on by default when we can detect nr of streams.
232 nb->bDoTime = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
236 init_timings(nb->timings);
239 /* set the kernel type for the current GPU */
240 /* pick L1 cache configuration */
241 cuda_set_cacheconfig();
243 cuda_init_const(nb, ic, listParams, nbat->params());
245 nb->atomIndicesSize = 0;
246 nb->atomIndicesSize_alloc = 0;
248 nb->ncxy_na_alloc = 0;
250 nb->ncxy_ind_alloc = 0;
254 fprintf(debug, "Initialized CUDA data structures.\n");
260 void gpu_upload_shiftvec(NbnxmGpu* nb, const nbnxn_atomdata_t* nbatom)
262 NBAtomData* adat = nb->atdat;
263 const DeviceStream& localStream = *nb->deviceStreams[InteractionLocality::Local];
265 /* only if we have a dynamic box */
266 if (nbatom->bDynamicBox || !adat->shiftVecUploaded)
268 static_assert(sizeof(adat->shiftVec[0]) == sizeof(nbatom->shift_vec[0]),
269 "Sizes of host- and device-side shift vectors should be the same.");
270 copyToDeviceBuffer(&adat->shiftVec,
271 reinterpret_cast<const Float3*>(nbatom->shift_vec.data()),
275 GpuApiCallBehavior::Async,
277 adat->shiftVecUploaded = true;
281 /*! Clears the first natoms_clear elements of the GPU nonbonded force output array. */
282 static void nbnxn_cuda_clear_f(NbnxmGpu* nb, int natoms_clear)
284 NBAtomData* adat = nb->atdat;
285 const DeviceStream& localStream = *nb->deviceStreams[InteractionLocality::Local];
286 clearDeviceBufferAsync(&adat->f, 0, natoms_clear, localStream);
289 /*! Clears nonbonded shift force output array and energy outputs on the GPU. */
290 static void nbnxn_cuda_clear_e_fshift(NbnxmGpu* nb)
292 NBAtomData* adat = nb->atdat;
293 const DeviceStream& localStream = *nb->deviceStreams[InteractionLocality::Local];
295 clearDeviceBufferAsync(&adat->fShift, 0, SHIFTS, localStream);
296 clearDeviceBufferAsync(&adat->eLJ, 0, 1, localStream);
297 clearDeviceBufferAsync(&adat->eElec, 0, 1, localStream);
300 void gpu_clear_outputs(NbnxmGpu* nb, bool computeVirial)
302 nbnxn_cuda_clear_f(nb, nb->atdat->numAtoms);
303 /* clear shift force array and energies if the outputs were
304 used in the current step */
307 nbnxn_cuda_clear_e_fshift(nb);
311 void gpu_init_atomdata(NbnxmGpu* nb, const nbnxn_atomdata_t* nbat)
315 bool bDoTime = nb->bDoTime;
316 Nbnxm::GpuTimers* timers = nb->timers;
317 NBAtomData* d_atdat = nb->atdat;
318 const DeviceContext& deviceContext = *nb->deviceContext_;
319 const DeviceStream& localStream = *nb->deviceStreams[InteractionLocality::Local];
321 natoms = nbat->numAtoms();
326 /* time async copy */
327 timers->atdat.openTimingRegion(localStream);
330 /* need to reallocate if we have to copy more atoms than the amount of space
331 available and only allocate if we haven't initialized yet, i.e d_atdat->natoms == -1 */
332 if (natoms > d_atdat->numAtomsAlloc)
334 nalloc = over_alloc_small(natoms);
336 /* free up first if the arrays have already been initialized */
337 if (d_atdat->numAtomsAlloc != -1)
339 freeDeviceBuffer(&d_atdat->f);
340 freeDeviceBuffer(&d_atdat->xq);
341 freeDeviceBuffer(&d_atdat->atomTypes);
342 freeDeviceBuffer(&d_atdat->ljComb);
345 allocateDeviceBuffer(&d_atdat->f, nalloc, deviceContext);
346 allocateDeviceBuffer(&d_atdat->xq, nalloc, deviceContext);
347 if (useLjCombRule(nb->nbparam->vdwType))
349 allocateDeviceBuffer(&d_atdat->ljComb, nalloc, deviceContext);
353 allocateDeviceBuffer(&d_atdat->atomTypes, nalloc, deviceContext);
356 d_atdat->numAtomsAlloc = nalloc;
360 d_atdat->numAtoms = natoms;
361 d_atdat->numAtomsLocal = nbat->natoms_local;
363 /* need to clear GPU f output if realloc happened */
366 nbnxn_cuda_clear_f(nb, nalloc);
369 if (useLjCombRule(nb->nbparam->vdwType))
371 static_assert(sizeof(d_atdat->ljComb[0]) == sizeof(Float2),
372 "Size of the LJ parameters element should be equal to the size of float2.");
373 copyToDeviceBuffer(&d_atdat->ljComb,
374 reinterpret_cast<const Float2*>(nbat->params().lj_comb.data()),
378 GpuApiCallBehavior::Async,
383 static_assert(sizeof(d_atdat->atomTypes[0]) == sizeof(nbat->params().type[0]),
384 "Sizes of host- and device-side atom types should be the same.");
385 copyToDeviceBuffer(&d_atdat->atomTypes,
386 nbat->params().type.data(),
390 GpuApiCallBehavior::Async,
396 timers->atdat.closeTimingRegion(localStream);
400 void gpu_free(NbnxmGpu* nb)
407 NBAtomData* atdat = nb->atdat;
408 NBParamGpu* nbparam = nb->nbparam;
410 if ((!nbparam->coulomb_tab)
411 && (nbparam->elecType == ElecType::EwaldTab || nbparam->elecType == ElecType::EwaldTabTwin))
413 destroyParamLookupTable(&nbparam->coulomb_tab, nbparam->coulomb_tab_texobj);
418 if (!useLjCombRule(nb->nbparam->vdwType))
420 destroyParamLookupTable(&nbparam->nbfp, nbparam->nbfp_texobj);
423 if (nbparam->vdwType == VdwType::EwaldGeom || nbparam->vdwType == VdwType::EwaldLB)
425 destroyParamLookupTable(&nbparam->nbfp_comb, nbparam->nbfp_comb_texobj);
428 freeDeviceBuffer(&atdat->shiftVec);
429 freeDeviceBuffer(&atdat->fShift);
431 freeDeviceBuffer(&atdat->eLJ);
432 freeDeviceBuffer(&atdat->eElec);
434 freeDeviceBuffer(&atdat->f);
435 freeDeviceBuffer(&atdat->xq);
436 freeDeviceBuffer(&atdat->atomTypes);
437 freeDeviceBuffer(&atdat->ljComb);
440 auto* plist = nb->plist[InteractionLocality::Local];
441 freeDeviceBuffer(&plist->sci);
442 freeDeviceBuffer(&plist->cj4);
443 freeDeviceBuffer(&plist->imask);
444 freeDeviceBuffer(&plist->excl);
446 if (nb->bUseTwoStreams)
448 auto* plist_nl = nb->plist[InteractionLocality::NonLocal];
449 freeDeviceBuffer(&plist_nl->sci);
450 freeDeviceBuffer(&plist_nl->cj4);
451 freeDeviceBuffer(&plist_nl->imask);
452 freeDeviceBuffer(&plist_nl->excl);
458 nb->nbst.eLJ = nullptr;
460 pfree(nb->nbst.eElec);
461 nb->nbst.eElec = nullptr;
463 pfree(nb->nbst.fShift);
464 nb->nbst.fShift = nullptr;
473 fprintf(debug, "Cleaned up CUDA data structures.\n");
477 int gpu_min_ci_balanced(NbnxmGpu* nb)
479 return nb != nullptr ? gpu_min_ci_balanced_factor * nb->deviceContext_->deviceInfo().prop.multiProcessorCount
483 void* gpu_get_xq(NbnxmGpu* nb)
487 return static_cast<void*>(nb->atdat->xq);
490 DeviceBuffer<gmx::RVec> gpu_get_f(NbnxmGpu* nb)
494 return reinterpret_cast<DeviceBuffer<gmx::RVec>>(nb->atdat->f);
497 DeviceBuffer<gmx::RVec> gpu_get_fshift(NbnxmGpu* nb)
501 return reinterpret_cast<DeviceBuffer<gmx::RVec>>(nb->atdat->fShift);
504 /* Initialization for X buffer operations on GPU. */
505 /* TODO Remove explicit pinning from host arrays from here and manage in a more natural way*/
506 void nbnxn_gpu_init_x_to_nbat_x(const Nbnxm::GridSet& gridSet, NbnxmGpu* gpu_nbv)
508 const DeviceStream& localStream = *gpu_nbv->deviceStreams[InteractionLocality::Local];
509 bool bDoTime = gpu_nbv->bDoTime;
510 const int maxNumColumns = gridSet.numColumnsMax();
512 reallocateDeviceBuffer(&gpu_nbv->cxy_na,
513 maxNumColumns * gridSet.grids().size(),
515 &gpu_nbv->ncxy_na_alloc,
516 *gpu_nbv->deviceContext_);
517 reallocateDeviceBuffer(&gpu_nbv->cxy_ind,
518 maxNumColumns * gridSet.grids().size(),
520 &gpu_nbv->ncxy_ind_alloc,
521 *gpu_nbv->deviceContext_);
523 for (unsigned int g = 0; g < gridSet.grids().size(); g++)
526 const Nbnxm::Grid& grid = gridSet.grids()[g];
528 const int numColumns = grid.numColumns();
529 const int* atomIndices = gridSet.atomIndices().data();
530 const int atomIndicesSize = gridSet.atomIndices().size();
531 const int* cxy_na = grid.cxy_na().data();
532 const int* cxy_ind = grid.cxy_ind().data();
534 reallocateDeviceBuffer(&gpu_nbv->atomIndices,
536 &gpu_nbv->atomIndicesSize,
537 &gpu_nbv->atomIndicesSize_alloc,
538 *gpu_nbv->deviceContext_);
540 if (atomIndicesSize > 0)
545 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(localStream);
548 copyToDeviceBuffer(&gpu_nbv->atomIndices,
553 GpuApiCallBehavior::Async,
558 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(localStream);
566 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(localStream);
569 int* destPtr = &gpu_nbv->cxy_na[maxNumColumns * g];
571 &destPtr, cxy_na, 0, numColumns, localStream, GpuApiCallBehavior::Async, nullptr);
575 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(localStream);
580 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(localStream);
583 destPtr = &gpu_nbv->cxy_ind[maxNumColumns * g];
585 &destPtr, cxy_ind, 0, numColumns, localStream, GpuApiCallBehavior::Async, nullptr);
589 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(localStream);
594 if (gpu_nbv->bUseTwoStreams)
596 // The above data is transferred on the local stream but is a
597 // dependency of the nonlocal stream (specifically the nonlocal X
598 // buf ops kernel). We therefore set a dependency to ensure
599 // that the nonlocal stream waits on the local stream here.
600 // This call records an event in the local stream:
601 gpu_nbv->misc_ops_and_local_H2D_done.markEvent(
602 *gpu_nbv->deviceStreams[Nbnxm::InteractionLocality::Local]);
603 // ...and this call instructs the nonlocal stream to wait on that event:
604 gpu_nbv->misc_ops_and_local_H2D_done.enqueueWaitEvent(
605 *gpu_nbv->deviceStreams[Nbnxm::InteractionLocality::NonLocal]);