2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Define CUDA implementation of nbnxn_gpu_data_mgmt.h
39 * \author Szilard Pall <pall.szilard@gmail.com>
48 // TODO We would like to move this down, but the way NbnxmGpu
49 // is currently declared means this has to be before gpu_types.h
50 #include "nbnxm_cuda_types.h"
52 // TODO Remove this comment when the above order issue is resolved
53 #include "gromacs/gpu_utils/cudautils.cuh"
54 #include "gromacs/gpu_utils/device_context.h"
55 #include "gromacs/gpu_utils/device_stream_manager.h"
56 #include "gromacs/gpu_utils/gpu_utils.h"
57 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
58 #include "gromacs/gpu_utils/pmalloc_cuda.h"
59 #include "gromacs/hardware/device_information.h"
60 #include "gromacs/hardware/device_management.h"
61 #include "gromacs/math/vectypes.h"
62 #include "gromacs/mdlib/force_flags.h"
63 #include "gromacs/mdtypes/interaction_const.h"
64 #include "gromacs/mdtypes/md_enums.h"
65 #include "gromacs/nbnxm/atomdata.h"
66 #include "gromacs/nbnxm/gpu_data_mgmt.h"
67 #include "gromacs/nbnxm/gridset.h"
68 #include "gromacs/nbnxm/nbnxm.h"
69 #include "gromacs/nbnxm/nbnxm_gpu.h"
70 #include "gromacs/nbnxm/nbnxm_gpu_data_mgmt.h"
71 #include "gromacs/nbnxm/pairlistsets.h"
72 #include "gromacs/pbcutil/ishift.h"
73 #include "gromacs/timing/gpu_timing.h"
74 #include "gromacs/utility/basedefinitions.h"
75 #include "gromacs/utility/cstringutil.h"
76 #include "gromacs/utility/fatalerror.h"
77 #include "gromacs/utility/real.h"
78 #include "gromacs/utility/smalloc.h"
80 #include "nbnxm_cuda.h"
85 /* This is a heuristically determined parameter for the Kepler
86 * and Maxwell architectures for the minimum size of ci lists by multiplying
87 * this constant with the # of multiprocessors on the current device.
88 * Since the maximum number of blocks per multiprocessor is 16, the ideal
89 * count for small systems is 32 or 48 blocks per multiprocessor. Because
90 * there is a bit of fluctuations in the generated block counts, we use
91 * a target of 44 instead of the ideal value of 48.
93 static unsigned int gpu_min_ci_balanced_factor = 44;
96 static void nbnxn_cuda_clear_e_fshift(NbnxmGpu* nb);
98 /*! Initializes the atomdata structure first time, it only gets filled at
100 static void init_atomdata_first(cu_atomdata_t* ad, int ntypes, const DeviceContext& deviceContext)
103 allocateDeviceBuffer(&ad->shift_vec, SHIFTS, deviceContext);
104 ad->bShiftVecUploaded = false;
106 allocateDeviceBuffer(&ad->fshift, SHIFTS, deviceContext);
107 allocateDeviceBuffer(&ad->e_lj, 1, deviceContext);
108 allocateDeviceBuffer(&ad->e_el, 1, deviceContext);
110 /* initialize to nullptr poiters to data that is not allocated here and will
111 need reallocation in nbnxn_cuda_init_atomdata */
115 /* size -1 indicates that the respective array hasn't been initialized yet */
120 /*! Initializes the nonbonded parameter data structure. */
121 static void init_nbparam(NBParamGpu* nbp,
122 const interaction_const_t* ic,
123 const PairlistParams& listParams,
124 const nbnxn_atomdata_t::Params& nbatParams,
125 const DeviceContext& deviceContext)
129 ntypes = nbatParams.numTypes;
131 set_cutoff_parameters(nbp, ic, listParams);
133 /* The kernel code supports LJ combination rules (geometric and LB) for
134 * all kernel types, but we only generate useful combination rule kernels.
135 * We currently only use LJ combination rule (geometric and LB) kernels
136 * for plain cut-off LJ. On Maxwell the force only kernels speed up 15%
137 * with PME and 20% with RF, the other kernels speed up about half as much.
138 * For LJ force-switch the geometric rule would give 7% speed-up, but this
139 * combination is rarely used. LJ force-switch with LB rule is more common,
140 * but gives only 1% speed-up.
142 if (ic->vdwtype == evdwCUT)
144 switch (ic->vdw_modifier)
147 case eintmodPOTSHIFT:
148 switch (nbatParams.comb_rule)
150 case ljcrNONE: nbp->vdwtype = evdwTypeCUT; break;
151 case ljcrGEOM: nbp->vdwtype = evdwTypeCUTCOMBGEOM; break;
152 case ljcrLB: nbp->vdwtype = evdwTypeCUTCOMBLB; break;
155 "The requested LJ combination rule is not implemented in the CUDA "
156 "GPU accelerated kernels!");
159 case eintmodFORCESWITCH: nbp->vdwtype = evdwTypeFSWITCH; break;
160 case eintmodPOTSWITCH: nbp->vdwtype = evdwTypePSWITCH; break;
163 "The requested VdW interaction modifier is not implemented in the CUDA GPU "
164 "accelerated kernels!");
167 else if (ic->vdwtype == evdwPME)
169 if (ic->ljpme_comb_rule == ljcrGEOM)
171 assert(nbatParams.comb_rule == ljcrGEOM);
172 nbp->vdwtype = evdwTypeEWALDGEOM;
176 assert(nbatParams.comb_rule == ljcrLB);
177 nbp->vdwtype = evdwTypeEWALDLB;
183 "The requested VdW type is not implemented in the CUDA GPU accelerated kernels!");
186 if (ic->eeltype == eelCUT)
188 nbp->eeltype = eelTypeCUT;
190 else if (EEL_RF(ic->eeltype))
192 nbp->eeltype = eelTypeRF;
194 else if ((EEL_PME(ic->eeltype) || ic->eeltype == eelEWALD))
196 nbp->eeltype = nbnxn_gpu_pick_ewald_kernel_type(*ic);
200 /* Shouldn't happen, as this is checked when choosing Verlet-scheme */
202 "The requested electrostatics type is not implemented in the CUDA GPU accelerated "
206 /* generate table for PME */
207 nbp->coulomb_tab = nullptr;
208 if (nbp->eeltype == eelTypeEWALD_TAB || nbp->eeltype == eelTypeEWALD_TAB_TWIN)
210 GMX_RELEASE_ASSERT(ic->coulombEwaldTables, "Need valid Coulomb Ewald correction tables");
211 init_ewald_coulomb_force_table(*ic->coulombEwaldTables, nbp, deviceContext);
214 /* set up LJ parameter lookup table */
215 if (!useLjCombRule(nbp->vdwtype))
217 initParamLookupTable(&nbp->nbfp, &nbp->nbfp_texobj, nbatParams.nbfp.data(),
218 2 * ntypes * ntypes, deviceContext);
221 /* set up LJ-PME parameter lookup table */
222 if (ic->vdwtype == evdwPME)
224 initParamLookupTable(&nbp->nbfp_comb, &nbp->nbfp_comb_texobj, nbatParams.nbfp_comb.data(),
225 2 * ntypes, deviceContext);
229 /*! Re-generate the GPU Ewald force table, resets rlist, and update the
230 * electrostatic type switching to twin cut-off (or back) if needed. */
231 void gpu_pme_loadbal_update_param(const nonbonded_verlet_t* nbv, const interaction_const_t* ic)
233 if (!nbv || !nbv->useGpu())
237 NbnxmGpu* nb = nbv->gpu_nbv;
238 NBParamGpu* nbp = nbv->gpu_nbv->nbparam;
240 set_cutoff_parameters(nbp, ic, nbv->pairlistSets().params());
242 nbp->eeltype = nbnxn_gpu_pick_ewald_kernel_type(*ic);
244 GMX_RELEASE_ASSERT(ic->coulombEwaldTables, "Need valid Coulomb Ewald correction tables");
245 init_ewald_coulomb_force_table(*ic->coulombEwaldTables, nbp, *nb->deviceContext_);
248 /*! Initializes simulation constant data. */
249 static void cuda_init_const(NbnxmGpu* nb,
250 const interaction_const_t* ic,
251 const PairlistParams& listParams,
252 const nbnxn_atomdata_t::Params& nbatParams)
254 init_atomdata_first(nb->atdat, nbatParams.numTypes, *nb->deviceContext_);
255 init_nbparam(nb->nbparam, ic, listParams, nbatParams, *nb->deviceContext_);
257 /* clear energy and shift force outputs */
258 nbnxn_cuda_clear_e_fshift(nb);
261 NbnxmGpu* gpu_init(const gmx::DeviceStreamManager& deviceStreamManager,
262 const interaction_const_t* ic,
263 const PairlistParams& listParams,
264 const nbnxn_atomdata_t* nbat,
265 bool bLocalAndNonlocal)
269 auto nb = new NbnxmGpu();
270 nb->deviceContext_ = &deviceStreamManager.context();
272 snew(nb->nbparam, 1);
273 snew(nb->plist[InteractionLocality::Local], 1);
274 if (bLocalAndNonlocal)
276 snew(nb->plist[InteractionLocality::NonLocal], 1);
279 nb->bUseTwoStreams = bLocalAndNonlocal;
281 nb->timers = new cu_timers_t();
282 snew(nb->timings, 1);
285 pmalloc((void**)&nb->nbst.e_lj, sizeof(*nb->nbst.e_lj));
286 pmalloc((void**)&nb->nbst.e_el, sizeof(*nb->nbst.e_el));
287 pmalloc((void**)&nb->nbst.fshift, SHIFTS * sizeof(*nb->nbst.fshift));
289 init_plist(nb->plist[InteractionLocality::Local]);
291 /* local/non-local GPU streams */
292 GMX_RELEASE_ASSERT(deviceStreamManager.streamIsValid(gmx::DeviceStreamType::NonBondedLocal),
293 "Local non-bonded stream should be initialized to use GPU for non-bonded.");
294 nb->deviceStreams[InteractionLocality::Local] =
295 &deviceStreamManager.stream(gmx::DeviceStreamType::NonBondedLocal);
296 if (nb->bUseTwoStreams)
298 init_plist(nb->plist[InteractionLocality::NonLocal]);
300 /* Note that the device we're running on does not have to support
301 * priorities, because we are querying the priority range which in this
302 * case will be a single value.
304 GMX_RELEASE_ASSERT(deviceStreamManager.streamIsValid(gmx::DeviceStreamType::NonBondedNonLocal),
305 "Non-local non-bonded stream should be initialized to use GPU for "
306 "non-bonded with domain decomposition.");
307 nb->deviceStreams[InteractionLocality::NonLocal] =
308 &deviceStreamManager.stream(gmx::DeviceStreamType::NonBondedNonLocal);
312 /* init events for sychronization (timing disabled for performance reasons!) */
313 stat = cudaEventCreateWithFlags(&nb->nonlocal_done, cudaEventDisableTiming);
314 CU_RET_ERR(stat, "cudaEventCreate on nonlocal_done failed");
315 stat = cudaEventCreateWithFlags(&nb->misc_ops_and_local_H2D_done, cudaEventDisableTiming);
316 CU_RET_ERR(stat, "cudaEventCreate on misc_ops_and_local_H2D_done failed");
318 nb->xNonLocalCopyD2HDone = new GpuEventSynchronizer();
320 /* WARNING: CUDA timings are incorrect with multiple streams.
321 * This is the main reason why they are disabled by default.
323 // TODO: Consider turning on by default when we can detect nr of streams.
324 nb->bDoTime = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
328 init_timings(nb->timings);
331 /* set the kernel type for the current GPU */
332 /* pick L1 cache configuration */
333 cuda_set_cacheconfig();
335 cuda_init_const(nb, ic, listParams, nbat->params());
337 nb->atomIndicesSize = 0;
338 nb->atomIndicesSize_alloc = 0;
340 nb->ncxy_na_alloc = 0;
342 nb->ncxy_ind_alloc = 0;
348 fprintf(debug, "Initialized CUDA data structures.\n");
354 void gpu_init_pairlist(NbnxmGpu* nb, const NbnxnPairlistGpu* h_plist, const InteractionLocality iloc)
357 bool bDoTime = (nb->bDoTime && !h_plist->sci.empty());
358 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
359 gpu_plist* d_plist = nb->plist[iloc];
361 if (d_plist->na_c < 0)
363 d_plist->na_c = h_plist->na_ci;
367 if (d_plist->na_c != h_plist->na_ci)
369 sprintf(sbuf, "In init_plist: the #atoms per cell has changed (from %d to %d)",
370 d_plist->na_c, h_plist->na_ci);
375 gpu_timers_t::Interaction& iTimers = nb->timers->interaction[iloc];
379 iTimers.pl_h2d.openTimingRegion(deviceStream);
380 iTimers.didPairlistH2D = true;
383 const DeviceContext& deviceContext = *nb->deviceContext_;
385 reallocateDeviceBuffer(&d_plist->sci, h_plist->sci.size(), &d_plist->nsci, &d_plist->sci_nalloc,
387 copyToDeviceBuffer(&d_plist->sci, h_plist->sci.data(), 0, h_plist->sci.size(), deviceStream,
388 GpuApiCallBehavior::Async, bDoTime ? iTimers.pl_h2d.fetchNextEvent() : nullptr);
390 reallocateDeviceBuffer(&d_plist->cj4, h_plist->cj4.size(), &d_plist->ncj4, &d_plist->cj4_nalloc,
392 copyToDeviceBuffer(&d_plist->cj4, h_plist->cj4.data(), 0, h_plist->cj4.size(), deviceStream,
393 GpuApiCallBehavior::Async, bDoTime ? iTimers.pl_h2d.fetchNextEvent() : nullptr);
395 reallocateDeviceBuffer(&d_plist->imask, h_plist->cj4.size() * c_nbnxnGpuClusterpairSplit,
396 &d_plist->nimask, &d_plist->imask_nalloc, deviceContext);
398 reallocateDeviceBuffer(&d_plist->excl, h_plist->excl.size(), &d_plist->nexcl,
399 &d_plist->excl_nalloc, deviceContext);
400 copyToDeviceBuffer(&d_plist->excl, h_plist->excl.data(), 0, h_plist->excl.size(), deviceStream,
401 GpuApiCallBehavior::Async, bDoTime ? iTimers.pl_h2d.fetchNextEvent() : nullptr);
405 iTimers.pl_h2d.closeTimingRegion(deviceStream);
408 /* the next use of thist list we be the first one, so we need to prune */
409 d_plist->haveFreshList = true;
412 void gpu_upload_shiftvec(NbnxmGpu* nb, const nbnxn_atomdata_t* nbatom)
414 cu_atomdata_t* adat = nb->atdat;
415 const DeviceStream& localStream = *nb->deviceStreams[InteractionLocality::Local];
417 /* only if we have a dynamic box */
418 if (nbatom->bDynamicBox || !adat->bShiftVecUploaded)
420 static_assert(sizeof(adat->shift_vec[0]) == sizeof(nbatom->shift_vec[0]),
421 "Sizes of host- and device-side shift vectors should be the same.");
422 copyToDeviceBuffer(&adat->shift_vec, reinterpret_cast<const float3*>(nbatom->shift_vec.data()),
423 0, SHIFTS, localStream, GpuApiCallBehavior::Async, nullptr);
424 adat->bShiftVecUploaded = true;
428 /*! Clears the first natoms_clear elements of the GPU nonbonded force output array. */
429 static void nbnxn_cuda_clear_f(NbnxmGpu* nb, int natoms_clear)
431 cu_atomdata_t* adat = nb->atdat;
432 const DeviceStream& localStream = *nb->deviceStreams[InteractionLocality::Local];
433 clearDeviceBufferAsync(&adat->f, 0, natoms_clear, localStream);
436 /*! Clears nonbonded shift force output array and energy outputs on the GPU. */
437 static void nbnxn_cuda_clear_e_fshift(NbnxmGpu* nb)
439 cu_atomdata_t* adat = nb->atdat;
440 const DeviceStream& localStream = *nb->deviceStreams[InteractionLocality::Local];
442 clearDeviceBufferAsync(&adat->fshift, 0, SHIFTS, localStream);
443 clearDeviceBufferAsync(&adat->e_lj, 0, 1, localStream);
444 clearDeviceBufferAsync(&adat->e_el, 0, 1, localStream);
447 void gpu_clear_outputs(NbnxmGpu* nb, bool computeVirial)
449 nbnxn_cuda_clear_f(nb, nb->atdat->natoms);
450 /* clear shift force array and energies if the outputs were
451 used in the current step */
454 nbnxn_cuda_clear_e_fshift(nb);
458 void gpu_init_atomdata(NbnxmGpu* nb, const nbnxn_atomdata_t* nbat)
462 bool bDoTime = nb->bDoTime;
463 cu_timers_t* timers = nb->timers;
464 cu_atomdata_t* d_atdat = nb->atdat;
465 const DeviceContext& deviceContext = *nb->deviceContext_;
466 const DeviceStream& localStream = *nb->deviceStreams[InteractionLocality::Local];
468 natoms = nbat->numAtoms();
473 /* time async copy */
474 timers->atdat.openTimingRegion(localStream);
477 /* need to reallocate if we have to copy more atoms than the amount of space
478 available and only allocate if we haven't initialized yet, i.e d_atdat->natoms == -1 */
479 if (natoms > d_atdat->nalloc)
481 nalloc = over_alloc_small(natoms);
483 /* free up first if the arrays have already been initialized */
484 if (d_atdat->nalloc != -1)
486 freeDeviceBuffer(&d_atdat->f);
487 freeDeviceBuffer(&d_atdat->xq);
488 freeDeviceBuffer(&d_atdat->atom_types);
489 freeDeviceBuffer(&d_atdat->lj_comb);
492 allocateDeviceBuffer(&d_atdat->f, nalloc, deviceContext);
493 allocateDeviceBuffer(&d_atdat->xq, nalloc, deviceContext);
494 if (useLjCombRule(nb->nbparam->vdwtype))
496 allocateDeviceBuffer(&d_atdat->lj_comb, nalloc, deviceContext);
500 allocateDeviceBuffer(&d_atdat->atom_types, nalloc, deviceContext);
503 d_atdat->nalloc = nalloc;
507 d_atdat->natoms = natoms;
508 d_atdat->natoms_local = nbat->natoms_local;
510 /* need to clear GPU f output if realloc happened */
513 nbnxn_cuda_clear_f(nb, nalloc);
516 if (useLjCombRule(nb->nbparam->vdwtype))
518 static_assert(sizeof(d_atdat->lj_comb[0]) == sizeof(float2),
519 "Size of the LJ parameters element should be equal to the size of float2.");
520 copyToDeviceBuffer(&d_atdat->lj_comb,
521 reinterpret_cast<const float2*>(nbat->params().lj_comb.data()), 0,
522 natoms, localStream, GpuApiCallBehavior::Async, nullptr);
526 static_assert(sizeof(d_atdat->atom_types[0]) == sizeof(nbat->params().type[0]),
527 "Sizes of host- and device-side atom types should be the same.");
528 copyToDeviceBuffer(&d_atdat->atom_types, nbat->params().type.data(), 0, natoms, localStream,
529 GpuApiCallBehavior::Async, nullptr);
534 timers->atdat.closeTimingRegion(localStream);
538 void gpu_free(NbnxmGpu* nb)
541 cu_atomdata_t* atdat;
550 nbparam = nb->nbparam;
552 if ((!nbparam->coulomb_tab)
553 && (nbparam->eeltype == eelTypeEWALD_TAB || nbparam->eeltype == eelTypeEWALD_TAB_TWIN))
555 destroyParamLookupTable(&nbparam->coulomb_tab, nbparam->coulomb_tab_texobj);
558 stat = cudaEventDestroy(nb->nonlocal_done);
559 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->nonlocal_done");
560 stat = cudaEventDestroy(nb->misc_ops_and_local_H2D_done);
561 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->misc_ops_and_local_H2D_done");
565 if (!useLjCombRule(nb->nbparam->vdwtype))
567 destroyParamLookupTable(&nbparam->nbfp, nbparam->nbfp_texobj);
570 if (nbparam->vdwtype == evdwTypeEWALDGEOM || nbparam->vdwtype == evdwTypeEWALDLB)
572 destroyParamLookupTable(&nbparam->nbfp_comb, nbparam->nbfp_comb_texobj);
575 freeDeviceBuffer(&atdat->shift_vec);
576 freeDeviceBuffer(&atdat->fshift);
578 freeDeviceBuffer(&atdat->e_lj);
579 freeDeviceBuffer(&atdat->e_el);
581 freeDeviceBuffer(&atdat->f);
582 freeDeviceBuffer(&atdat->xq);
583 freeDeviceBuffer(&atdat->atom_types);
584 freeDeviceBuffer(&atdat->lj_comb);
587 auto* plist = nb->plist[InteractionLocality::Local];
588 freeDeviceBuffer(&plist->sci);
589 freeDeviceBuffer(&plist->cj4);
590 freeDeviceBuffer(&plist->imask);
591 freeDeviceBuffer(&plist->excl);
593 if (nb->bUseTwoStreams)
595 auto* plist_nl = nb->plist[InteractionLocality::NonLocal];
596 freeDeviceBuffer(&plist_nl->sci);
597 freeDeviceBuffer(&plist_nl->cj4);
598 freeDeviceBuffer(&plist_nl->imask);
599 freeDeviceBuffer(&plist_nl->excl);
604 pfree(nb->nbst.e_lj);
605 nb->nbst.e_lj = nullptr;
607 pfree(nb->nbst.e_el);
608 nb->nbst.e_el = nullptr;
610 pfree(nb->nbst.fshift);
611 nb->nbst.fshift = nullptr;
620 fprintf(debug, "Cleaned up CUDA data structures.\n");
624 //! This function is documented in the header file
625 gmx_wallclock_gpu_nbnxn_t* gpu_get_timings(NbnxmGpu* nb)
627 return (nb != nullptr && nb->bDoTime) ? nb->timings : nullptr;
630 void gpu_reset_timings(nonbonded_verlet_t* nbv)
632 if (nbv->gpu_nbv && nbv->gpu_nbv->bDoTime)
634 init_timings(nbv->gpu_nbv->timings);
638 int gpu_min_ci_balanced(NbnxmGpu* nb)
640 return nb != nullptr ? gpu_min_ci_balanced_factor * nb->deviceContext_->deviceInfo().prop.multiProcessorCount
644 gmx_bool gpu_is_kernel_ewald_analytical(const NbnxmGpu* nb)
646 return ((nb->nbparam->eeltype == eelTypeEWALD_ANA) || (nb->nbparam->eeltype == eelTypeEWALD_ANA_TWIN));
649 void* gpu_get_xq(NbnxmGpu* nb)
653 return static_cast<void*>(nb->atdat->xq);
656 DeviceBuffer<gmx::RVec> gpu_get_f(NbnxmGpu* nb)
660 return reinterpret_cast<DeviceBuffer<gmx::RVec>>(nb->atdat->f);
663 DeviceBuffer<gmx::RVec> gpu_get_fshift(NbnxmGpu* nb)
667 return reinterpret_cast<DeviceBuffer<gmx::RVec>>(nb->atdat->fshift);
670 /* Initialization for X buffer operations on GPU. */
671 /* TODO Remove explicit pinning from host arrays from here and manage in a more natural way*/
672 void nbnxn_gpu_init_x_to_nbat_x(const Nbnxm::GridSet& gridSet, NbnxmGpu* gpu_nbv)
674 const DeviceStream& deviceStream = *gpu_nbv->deviceStreams[InteractionLocality::Local];
675 bool bDoTime = gpu_nbv->bDoTime;
676 const int maxNumColumns = gridSet.numColumnsMax();
678 reallocateDeviceBuffer(&gpu_nbv->cxy_na, maxNumColumns * gridSet.grids().size(),
679 &gpu_nbv->ncxy_na, &gpu_nbv->ncxy_na_alloc, *gpu_nbv->deviceContext_);
680 reallocateDeviceBuffer(&gpu_nbv->cxy_ind, maxNumColumns * gridSet.grids().size(),
681 &gpu_nbv->ncxy_ind, &gpu_nbv->ncxy_ind_alloc, *gpu_nbv->deviceContext_);
683 for (unsigned int g = 0; g < gridSet.grids().size(); g++)
686 const Nbnxm::Grid& grid = gridSet.grids()[g];
688 const int numColumns = grid.numColumns();
689 const int* atomIndices = gridSet.atomIndices().data();
690 const int atomIndicesSize = gridSet.atomIndices().size();
691 const int* cxy_na = grid.cxy_na().data();
692 const int* cxy_ind = grid.cxy_ind().data();
694 reallocateDeviceBuffer(&gpu_nbv->atomIndices, atomIndicesSize, &gpu_nbv->atomIndicesSize,
695 &gpu_nbv->atomIndicesSize_alloc, *gpu_nbv->deviceContext_);
697 if (atomIndicesSize > 0)
702 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(deviceStream);
705 copyToDeviceBuffer(&gpu_nbv->atomIndices, atomIndices, 0, atomIndicesSize, deviceStream,
706 GpuApiCallBehavior::Async, nullptr);
710 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(deviceStream);
718 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(deviceStream);
721 int* destPtr = &gpu_nbv->cxy_na[maxNumColumns * g];
722 copyToDeviceBuffer(&destPtr, cxy_na, 0, numColumns, deviceStream,
723 GpuApiCallBehavior::Async, nullptr);
727 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(deviceStream);
732 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(deviceStream);
735 destPtr = &gpu_nbv->cxy_ind[maxNumColumns * g];
736 copyToDeviceBuffer(&destPtr, cxy_ind, 0, numColumns, deviceStream,
737 GpuApiCallBehavior::Async, nullptr);
741 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(deviceStream);
746 // The above data is transferred on the local stream but is a
747 // dependency of the nonlocal stream (specifically the nonlocal X
748 // buf ops kernel). We therefore set a dependency to ensure
749 // that the nonlocal stream waits on the local stream here.
750 // This call records an event in the local stream:
751 nbnxnInsertNonlocalGpuDependency(gpu_nbv, Nbnxm::InteractionLocality::Local);
752 // ...and this call instructs the nonlocal stream to wait on that event:
753 nbnxnInsertNonlocalGpuDependency(gpu_nbv, Nbnxm::InteractionLocality::NonLocal);
758 /* Initialization for F buffer operations on GPU. */
759 void nbnxn_gpu_init_add_nbat_f_to_f(const int* cell,
762 GpuEventSynchronizer* const localReductionDone)
765 const DeviceStream& deviceStream = *gpu_nbv->deviceStreams[InteractionLocality::Local];
767 GMX_ASSERT(localReductionDone, "localReductionDone should be a valid pointer");
768 gpu_nbv->localFReductionDone = localReductionDone;
770 if (natoms_total > 0)
772 reallocateDeviceBuffer(&gpu_nbv->cell, natoms_total, &gpu_nbv->ncell, &gpu_nbv->ncell_alloc,
773 *gpu_nbv->deviceContext_);
774 copyToDeviceBuffer(&gpu_nbv->cell, cell, 0, natoms_total, deviceStream,
775 GpuApiCallBehavior::Async, nullptr);