Simplify LJ parameter lookup
[alexxy/gromacs.git] / src / gromacs / nbnxm / cuda / nbnxm_cuda_data_mgmt.cu
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5  * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36 /*! \file
37  *  \brief Define CUDA implementation of nbnxn_gpu_data_mgmt.h
38  *
39  *  \author Szilard Pall <pall.szilard@gmail.com>
40  */
41 #include "gmxpre.h"
42
43 #include <assert.h>
44 #include <stdarg.h>
45 #include <stdio.h>
46 #include <stdlib.h>
47
48 // TODO We would like to move this down, but the way NbnxmGpu
49 //      is currently declared means this has to be before gpu_types.h
50 #include "nbnxm_cuda_types.h"
51
52 // TODO Remove this comment when the above order issue is resolved
53 #include "gromacs/gpu_utils/cudautils.cuh"
54 #include "gromacs/gpu_utils/device_context.h"
55 #include "gromacs/gpu_utils/device_stream_manager.h"
56 #include "gromacs/gpu_utils/gpu_utils.h"
57 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
58 #include "gromacs/gpu_utils/pmalloc_cuda.h"
59 #include "gromacs/hardware/device_information.h"
60 #include "gromacs/hardware/device_management.h"
61 #include "gromacs/math/vectypes.h"
62 #include "gromacs/mdlib/force_flags.h"
63 #include "gromacs/mdtypes/interaction_const.h"
64 #include "gromacs/mdtypes/md_enums.h"
65 #include "gromacs/nbnxm/atomdata.h"
66 #include "gromacs/nbnxm/gpu_data_mgmt.h"
67 #include "gromacs/nbnxm/gridset.h"
68 #include "gromacs/nbnxm/nbnxm.h"
69 #include "gromacs/nbnxm/nbnxm_gpu.h"
70 #include "gromacs/nbnxm/nbnxm_gpu_data_mgmt.h"
71 #include "gromacs/nbnxm/pairlistsets.h"
72 #include "gromacs/pbcutil/ishift.h"
73 #include "gromacs/timing/gpu_timing.h"
74 #include "gromacs/utility/basedefinitions.h"
75 #include "gromacs/utility/cstringutil.h"
76 #include "gromacs/utility/fatalerror.h"
77 #include "gromacs/utility/real.h"
78 #include "gromacs/utility/smalloc.h"
79
80 #include "nbnxm_cuda.h"
81
82 namespace Nbnxm
83 {
84
85 /* This is a heuristically determined parameter for the Kepler
86  * and Maxwell architectures for the minimum size of ci lists by multiplying
87  * this constant with the # of multiprocessors on the current device.
88  * Since the maximum number of blocks per multiprocessor is 16, the ideal
89  * count for small systems is 32 or 48 blocks per multiprocessor. Because
90  * there is a bit of fluctuations in the generated block counts, we use
91  * a target of 44 instead of the ideal value of 48.
92  */
93 static unsigned int gpu_min_ci_balanced_factor = 44;
94
95 /* Fw. decl. */
96 static void nbnxn_cuda_clear_e_fshift(NbnxmGpu* nb);
97
98 /*! Initializes the atomdata structure first time, it only gets filled at
99     pair-search. */
100 static void init_atomdata_first(NBAtomData* ad, int ntypes, const DeviceContext& deviceContext)
101 {
102     ad->numTypes = ntypes;
103     allocateDeviceBuffer(&ad->shiftVec, SHIFTS, deviceContext);
104     ad->shiftVecUploaded = false;
105
106     allocateDeviceBuffer(&ad->fShift, SHIFTS, deviceContext);
107     allocateDeviceBuffer(&ad->eLJ, 1, deviceContext);
108     allocateDeviceBuffer(&ad->eElec, 1, deviceContext);
109
110     /* initialize to nullptr poiters to data that is not allocated here and will
111        need reallocation in nbnxn_cuda_init_atomdata */
112     ad->xq = nullptr;
113     ad->f  = nullptr;
114
115     /* size -1 indicates that the respective array hasn't been initialized yet */
116     ad->numAtoms      = -1;
117     ad->numAtomsAlloc = -1;
118 }
119
120 /*! Initializes the nonbonded parameter data structure. */
121 static void init_nbparam(NBParamGpu*                     nbp,
122                          const interaction_const_t*      ic,
123                          const PairlistParams&           listParams,
124                          const nbnxn_atomdata_t::Params& nbatParams,
125                          const DeviceContext&            deviceContext)
126 {
127     const int ntypes = nbatParams.numTypes;
128
129     set_cutoff_parameters(nbp, ic, listParams);
130
131     /* The kernel code supports LJ combination rules (geometric and LB) for
132      * all kernel types, but we only generate useful combination rule kernels.
133      * We currently only use LJ combination rule (geometric and LB) kernels
134      * for plain cut-off LJ. On Maxwell the force only kernels speed up 15%
135      * with PME and 20% with RF, the other kernels speed up about half as much.
136      * For LJ force-switch the geometric rule would give 7% speed-up, but this
137      * combination is rarely used. LJ force-switch with LB rule is more common,
138      * but gives only 1% speed-up.
139      */
140     nbp->vdwType  = nbnxmGpuPickVdwKernelType(ic, nbatParams.ljCombinationRule);
141     nbp->elecType = nbnxmGpuPickElectrostaticsKernelType(ic, deviceContext.deviceInfo());
142
143     /* generate table for PME */
144     nbp->coulomb_tab = nullptr;
145     if (nbp->elecType == ElecType::EwaldTab || nbp->elecType == ElecType::EwaldTabTwin)
146     {
147         GMX_RELEASE_ASSERT(ic->coulombEwaldTables, "Need valid Coulomb Ewald correction tables");
148         init_ewald_coulomb_force_table(*ic->coulombEwaldTables, nbp, deviceContext);
149     }
150
151     /* set up LJ parameter lookup table */
152     if (!useLjCombRule(nbp->vdwType))
153     {
154         static_assert(sizeof(decltype(nbp->nbfp)) == 2 * sizeof(decltype(*nbatParams.nbfp.data())),
155                       "Mismatch in the size of host / device data types");
156         initParamLookupTable(&nbp->nbfp,
157                              &nbp->nbfp_texobj,
158                              reinterpret_cast<const Float2*>(nbatParams.nbfp.data()),
159                              ntypes * ntypes,
160                              deviceContext);
161     }
162
163     /* set up LJ-PME parameter lookup table */
164     if (ic->vdwtype == VanDerWaalsType::Pme)
165     {
166         static_assert(sizeof(decltype(nbp->nbfp_comb))
167                               == 2 * sizeof(decltype(*nbatParams.nbfp_comb.data())),
168                       "Mismatch in the size of host / device data types");
169         initParamLookupTable(&nbp->nbfp_comb,
170                              &nbp->nbfp_comb_texobj,
171                              reinterpret_cast<const Float2*>(nbatParams.nbfp_comb.data()),
172                              ntypes,
173                              deviceContext);
174     }
175 }
176
177 /*! Initializes simulation constant data. */
178 static void cuda_init_const(NbnxmGpu*                       nb,
179                             const interaction_const_t*      ic,
180                             const PairlistParams&           listParams,
181                             const nbnxn_atomdata_t::Params& nbatParams)
182 {
183     init_atomdata_first(nb->atdat, nbatParams.numTypes, *nb->deviceContext_);
184     init_nbparam(nb->nbparam, ic, listParams, nbatParams, *nb->deviceContext_);
185
186     /* clear energy and shift force outputs */
187     nbnxn_cuda_clear_e_fshift(nb);
188 }
189
190 NbnxmGpu* gpu_init(const gmx::DeviceStreamManager& deviceStreamManager,
191                    const interaction_const_t*      ic,
192                    const PairlistParams&           listParams,
193                    const nbnxn_atomdata_t*         nbat,
194                    bool                            bLocalAndNonlocal)
195 {
196     auto nb            = new NbnxmGpu();
197     nb->deviceContext_ = &deviceStreamManager.context();
198     snew(nb->atdat, 1);
199     snew(nb->nbparam, 1);
200     snew(nb->plist[InteractionLocality::Local], 1);
201     if (bLocalAndNonlocal)
202     {
203         snew(nb->plist[InteractionLocality::NonLocal], 1);
204     }
205
206     nb->bUseTwoStreams = bLocalAndNonlocal;
207
208     nb->timers = new Nbnxm::GpuTimers();
209     snew(nb->timings, 1);
210
211     /* init nbst */
212     pmalloc((void**)&nb->nbst.eLJ, sizeof(*nb->nbst.eLJ));
213     pmalloc((void**)&nb->nbst.eElec, sizeof(*nb->nbst.eElec));
214     pmalloc((void**)&nb->nbst.fShift, SHIFTS * sizeof(*nb->nbst.fShift));
215
216     init_plist(nb->plist[InteractionLocality::Local]);
217
218     /* local/non-local GPU streams */
219     GMX_RELEASE_ASSERT(deviceStreamManager.streamIsValid(gmx::DeviceStreamType::NonBondedLocal),
220                        "Local non-bonded stream should be initialized to use GPU for non-bonded.");
221     nb->deviceStreams[InteractionLocality::Local] =
222             &deviceStreamManager.stream(gmx::DeviceStreamType::NonBondedLocal);
223     if (nb->bUseTwoStreams)
224     {
225         init_plist(nb->plist[InteractionLocality::NonLocal]);
226
227         /* Note that the device we're running on does not have to support
228          * priorities, because we are querying the priority range which in this
229          * case will be a single value.
230          */
231         GMX_RELEASE_ASSERT(deviceStreamManager.streamIsValid(gmx::DeviceStreamType::NonBondedNonLocal),
232                            "Non-local non-bonded stream should be initialized to use GPU for "
233                            "non-bonded with domain decomposition.");
234         nb->deviceStreams[InteractionLocality::NonLocal] =
235                 &deviceStreamManager.stream(gmx::DeviceStreamType::NonBondedNonLocal);
236         ;
237     }
238
239     /* WARNING: CUDA timings are incorrect with multiple streams.
240      *          This is the main reason why they are disabled by default.
241      */
242     // TODO: Consider turning on by default when we can detect nr of streams.
243     nb->bDoTime = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
244
245     if (nb->bDoTime)
246     {
247         init_timings(nb->timings);
248     }
249
250     /* set the kernel type for the current GPU */
251     /* pick L1 cache configuration */
252     cuda_set_cacheconfig();
253
254     cuda_init_const(nb, ic, listParams, nbat->params());
255
256     nb->atomIndicesSize       = 0;
257     nb->atomIndicesSize_alloc = 0;
258     nb->ncxy_na               = 0;
259     nb->ncxy_na_alloc         = 0;
260     nb->ncxy_ind              = 0;
261     nb->ncxy_ind_alloc        = 0;
262
263     if (debug)
264     {
265         fprintf(debug, "Initialized CUDA data structures.\n");
266     }
267
268     return nb;
269 }
270
271 void gpu_upload_shiftvec(NbnxmGpu* nb, const nbnxn_atomdata_t* nbatom)
272 {
273     NBAtomData*         adat        = nb->atdat;
274     const DeviceStream& localStream = *nb->deviceStreams[InteractionLocality::Local];
275
276     /* only if we have a dynamic box */
277     if (nbatom->bDynamicBox || !adat->shiftVecUploaded)
278     {
279         static_assert(sizeof(adat->shiftVec[0]) == sizeof(nbatom->shift_vec[0]),
280                       "Sizes of host- and device-side shift vectors should be the same.");
281         copyToDeviceBuffer(&adat->shiftVec,
282                            reinterpret_cast<const Float3*>(nbatom->shift_vec.data()),
283                            0,
284                            SHIFTS,
285                            localStream,
286                            GpuApiCallBehavior::Async,
287                            nullptr);
288         adat->shiftVecUploaded = true;
289     }
290 }
291
292 /*! Clears the first natoms_clear elements of the GPU nonbonded force output array. */
293 static void nbnxn_cuda_clear_f(NbnxmGpu* nb, int natoms_clear)
294 {
295     NBAtomData*         adat        = nb->atdat;
296     const DeviceStream& localStream = *nb->deviceStreams[InteractionLocality::Local];
297     clearDeviceBufferAsync(&adat->f, 0, natoms_clear, localStream);
298 }
299
300 /*! Clears nonbonded shift force output array and energy outputs on the GPU. */
301 static void nbnxn_cuda_clear_e_fshift(NbnxmGpu* nb)
302 {
303     NBAtomData*         adat        = nb->atdat;
304     const DeviceStream& localStream = *nb->deviceStreams[InteractionLocality::Local];
305
306     clearDeviceBufferAsync(&adat->fShift, 0, SHIFTS, localStream);
307     clearDeviceBufferAsync(&adat->eLJ, 0, 1, localStream);
308     clearDeviceBufferAsync(&adat->eElec, 0, 1, localStream);
309 }
310
311 void gpu_clear_outputs(NbnxmGpu* nb, bool computeVirial)
312 {
313     nbnxn_cuda_clear_f(nb, nb->atdat->numAtoms);
314     /* clear shift force array and energies if the outputs were
315        used in the current step */
316     if (computeVirial)
317     {
318         nbnxn_cuda_clear_e_fshift(nb);
319     }
320 }
321
322 void gpu_init_atomdata(NbnxmGpu* nb, const nbnxn_atomdata_t* nbat)
323 {
324     int                  nalloc, natoms;
325     bool                 realloced;
326     bool                 bDoTime       = nb->bDoTime;
327     Nbnxm::GpuTimers*    timers        = nb->timers;
328     NBAtomData*          d_atdat       = nb->atdat;
329     const DeviceContext& deviceContext = *nb->deviceContext_;
330     const DeviceStream&  localStream   = *nb->deviceStreams[InteractionLocality::Local];
331
332     natoms    = nbat->numAtoms();
333     realloced = false;
334
335     if (bDoTime)
336     {
337         /* time async copy */
338         timers->atdat.openTimingRegion(localStream);
339     }
340
341     /* need to reallocate if we have to copy more atoms than the amount of space
342        available and only allocate if we haven't initialized yet, i.e d_atdat->natoms == -1 */
343     if (natoms > d_atdat->numAtomsAlloc)
344     {
345         nalloc = over_alloc_small(natoms);
346
347         /* free up first if the arrays have already been initialized */
348         if (d_atdat->numAtomsAlloc != -1)
349         {
350             freeDeviceBuffer(&d_atdat->f);
351             freeDeviceBuffer(&d_atdat->xq);
352             freeDeviceBuffer(&d_atdat->atomTypes);
353             freeDeviceBuffer(&d_atdat->ljComb);
354         }
355
356         allocateDeviceBuffer(&d_atdat->f, nalloc, deviceContext);
357         allocateDeviceBuffer(&d_atdat->xq, nalloc, deviceContext);
358         if (useLjCombRule(nb->nbparam->vdwType))
359         {
360             allocateDeviceBuffer(&d_atdat->ljComb, nalloc, deviceContext);
361         }
362         else
363         {
364             allocateDeviceBuffer(&d_atdat->atomTypes, nalloc, deviceContext);
365         }
366
367         d_atdat->numAtomsAlloc = nalloc;
368         realloced              = true;
369     }
370
371     d_atdat->numAtoms      = natoms;
372     d_atdat->numAtomsLocal = nbat->natoms_local;
373
374     /* need to clear GPU f output if realloc happened */
375     if (realloced)
376     {
377         nbnxn_cuda_clear_f(nb, nalloc);
378     }
379
380     if (useLjCombRule(nb->nbparam->vdwType))
381     {
382         static_assert(sizeof(d_atdat->ljComb[0]) == sizeof(Float2),
383                       "Size of the LJ parameters element should be equal to the size of float2.");
384         copyToDeviceBuffer(&d_atdat->ljComb,
385                            reinterpret_cast<const Float2*>(nbat->params().lj_comb.data()),
386                            0,
387                            natoms,
388                            localStream,
389                            GpuApiCallBehavior::Async,
390                            nullptr);
391     }
392     else
393     {
394         static_assert(sizeof(d_atdat->atomTypes[0]) == sizeof(nbat->params().type[0]),
395                       "Sizes of host- and device-side atom types should be the same.");
396         copyToDeviceBuffer(&d_atdat->atomTypes,
397                            nbat->params().type.data(),
398                            0,
399                            natoms,
400                            localStream,
401                            GpuApiCallBehavior::Async,
402                            nullptr);
403     }
404
405     if (bDoTime)
406     {
407         timers->atdat.closeTimingRegion(localStream);
408     }
409 }
410
411 void gpu_free(NbnxmGpu* nb)
412 {
413     if (nb == nullptr)
414     {
415         return;
416     }
417
418     NBAtomData* atdat   = nb->atdat;
419     NBParamGpu* nbparam = nb->nbparam;
420
421     if ((!nbparam->coulomb_tab)
422         && (nbparam->elecType == ElecType::EwaldTab || nbparam->elecType == ElecType::EwaldTabTwin))
423     {
424         destroyParamLookupTable(&nbparam->coulomb_tab, nbparam->coulomb_tab_texobj);
425     }
426
427     delete nb->timers;
428
429     if (!useLjCombRule(nb->nbparam->vdwType))
430     {
431         destroyParamLookupTable(&nbparam->nbfp, nbparam->nbfp_texobj);
432     }
433
434     if (nbparam->vdwType == VdwType::EwaldGeom || nbparam->vdwType == VdwType::EwaldLB)
435     {
436         destroyParamLookupTable(&nbparam->nbfp_comb, nbparam->nbfp_comb_texobj);
437     }
438
439     freeDeviceBuffer(&atdat->shiftVec);
440     freeDeviceBuffer(&atdat->fShift);
441
442     freeDeviceBuffer(&atdat->eLJ);
443     freeDeviceBuffer(&atdat->eElec);
444
445     freeDeviceBuffer(&atdat->f);
446     freeDeviceBuffer(&atdat->xq);
447     freeDeviceBuffer(&atdat->atomTypes);
448     freeDeviceBuffer(&atdat->ljComb);
449
450     /* Free plist */
451     auto* plist = nb->plist[InteractionLocality::Local];
452     freeDeviceBuffer(&plist->sci);
453     freeDeviceBuffer(&plist->cj4);
454     freeDeviceBuffer(&plist->imask);
455     freeDeviceBuffer(&plist->excl);
456     sfree(plist);
457     if (nb->bUseTwoStreams)
458     {
459         auto* plist_nl = nb->plist[InteractionLocality::NonLocal];
460         freeDeviceBuffer(&plist_nl->sci);
461         freeDeviceBuffer(&plist_nl->cj4);
462         freeDeviceBuffer(&plist_nl->imask);
463         freeDeviceBuffer(&plist_nl->excl);
464         sfree(plist_nl);
465     }
466
467     /* Free nbst */
468     pfree(nb->nbst.eLJ);
469     nb->nbst.eLJ = nullptr;
470
471     pfree(nb->nbst.eElec);
472     nb->nbst.eElec = nullptr;
473
474     pfree(nb->nbst.fShift);
475     nb->nbst.fShift = nullptr;
476
477     sfree(atdat);
478     sfree(nbparam);
479     sfree(nb->timings);
480     delete nb;
481
482     if (debug)
483     {
484         fprintf(debug, "Cleaned up CUDA data structures.\n");
485     }
486 }
487
488 int gpu_min_ci_balanced(NbnxmGpu* nb)
489 {
490     return nb != nullptr ? gpu_min_ci_balanced_factor * nb->deviceContext_->deviceInfo().prop.multiProcessorCount
491                          : 0;
492 }
493
494 void* gpu_get_xq(NbnxmGpu* nb)
495 {
496     assert(nb);
497
498     return static_cast<void*>(nb->atdat->xq);
499 }
500
501 DeviceBuffer<gmx::RVec> gpu_get_f(NbnxmGpu* nb)
502 {
503     assert(nb);
504
505     return reinterpret_cast<DeviceBuffer<gmx::RVec>>(nb->atdat->f);
506 }
507
508 DeviceBuffer<gmx::RVec> gpu_get_fshift(NbnxmGpu* nb)
509 {
510     assert(nb);
511
512     return reinterpret_cast<DeviceBuffer<gmx::RVec>>(nb->atdat->fShift);
513 }
514
515 /* Initialization for X buffer operations on GPU. */
516 /* TODO  Remove explicit pinning from host arrays from here and manage in a more natural way*/
517 void nbnxn_gpu_init_x_to_nbat_x(const Nbnxm::GridSet& gridSet, NbnxmGpu* gpu_nbv)
518 {
519     const DeviceStream& localStream   = *gpu_nbv->deviceStreams[InteractionLocality::Local];
520     bool                bDoTime       = gpu_nbv->bDoTime;
521     const int           maxNumColumns = gridSet.numColumnsMax();
522
523     reallocateDeviceBuffer(&gpu_nbv->cxy_na,
524                            maxNumColumns * gridSet.grids().size(),
525                            &gpu_nbv->ncxy_na,
526                            &gpu_nbv->ncxy_na_alloc,
527                            *gpu_nbv->deviceContext_);
528     reallocateDeviceBuffer(&gpu_nbv->cxy_ind,
529                            maxNumColumns * gridSet.grids().size(),
530                            &gpu_nbv->ncxy_ind,
531                            &gpu_nbv->ncxy_ind_alloc,
532                            *gpu_nbv->deviceContext_);
533
534     for (unsigned int g = 0; g < gridSet.grids().size(); g++)
535     {
536
537         const Nbnxm::Grid& grid = gridSet.grids()[g];
538
539         const int  numColumns      = grid.numColumns();
540         const int* atomIndices     = gridSet.atomIndices().data();
541         const int  atomIndicesSize = gridSet.atomIndices().size();
542         const int* cxy_na          = grid.cxy_na().data();
543         const int* cxy_ind         = grid.cxy_ind().data();
544
545         reallocateDeviceBuffer(&gpu_nbv->atomIndices,
546                                atomIndicesSize,
547                                &gpu_nbv->atomIndicesSize,
548                                &gpu_nbv->atomIndicesSize_alloc,
549                                *gpu_nbv->deviceContext_);
550
551         if (atomIndicesSize > 0)
552         {
553
554             if (bDoTime)
555             {
556                 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(localStream);
557             }
558
559             copyToDeviceBuffer(&gpu_nbv->atomIndices,
560                                atomIndices,
561                                0,
562                                atomIndicesSize,
563                                localStream,
564                                GpuApiCallBehavior::Async,
565                                nullptr);
566
567             if (bDoTime)
568             {
569                 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(localStream);
570             }
571         }
572
573         if (numColumns > 0)
574         {
575             if (bDoTime)
576             {
577                 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(localStream);
578             }
579
580             int* destPtr = &gpu_nbv->cxy_na[maxNumColumns * g];
581             copyToDeviceBuffer(
582                     &destPtr, cxy_na, 0, numColumns, localStream, GpuApiCallBehavior::Async, nullptr);
583
584             if (bDoTime)
585             {
586                 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(localStream);
587             }
588
589             if (bDoTime)
590             {
591                 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(localStream);
592             }
593
594             destPtr = &gpu_nbv->cxy_ind[maxNumColumns * g];
595             copyToDeviceBuffer(
596                     &destPtr, cxy_ind, 0, numColumns, localStream, GpuApiCallBehavior::Async, nullptr);
597
598             if (bDoTime)
599             {
600                 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(localStream);
601             }
602         }
603     }
604
605     if (gpu_nbv->bUseTwoStreams)
606     {
607         // The above data is transferred on the local stream but is a
608         // dependency of the nonlocal stream (specifically the nonlocal X
609         // buf ops kernel).  We therefore set a dependency to ensure
610         // that the nonlocal stream waits on the local stream here.
611         // This call records an event in the local stream:
612         gpu_nbv->misc_ops_and_local_H2D_done.markEvent(
613                 *gpu_nbv->deviceStreams[Nbnxm::InteractionLocality::Local]);
614         // ...and this call instructs the nonlocal stream to wait on that event:
615         gpu_nbv->misc_ops_and_local_H2D_done.enqueueWaitEvent(
616                 *gpu_nbv->deviceStreams[Nbnxm::InteractionLocality::NonLocal]);
617     }
618
619     return;
620 }
621
622 } // namespace Nbnxm