2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define CUDA implementation of nbnxn_gpu_data_mgmt.h
38 * \author Szilard Pall <pall.szilard@gmail.com>
47 // TODO We would like to move this down, but the way gmx_nbnxn_gpu_t
48 // is currently declared means this has to be before gpu_types.h
49 #include "nbnxm_cuda_types.h"
51 // TODO Remove this comment when the above order issue is resolved
52 #include "gromacs/gpu_utils/cudautils.cuh"
53 #include "gromacs/gpu_utils/gpu_utils.h"
54 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
55 #include "gromacs/gpu_utils/pmalloc_cuda.h"
56 #include "gromacs/hardware/gpu_hw_info.h"
57 #include "gromacs/math/vectypes.h"
58 #include "gromacs/mdlib/force_flags.h"
59 #include "gromacs/mdtypes/interaction_const.h"
60 #include "gromacs/mdtypes/md_enums.h"
61 #include "gromacs/nbnxm/atomdata.h"
62 #include "gromacs/nbnxm/gpu_data_mgmt.h"
63 #include "gromacs/nbnxm/gridset.h"
64 #include "gromacs/nbnxm/nbnxm.h"
65 #include "gromacs/nbnxm/nbnxm_gpu.h"
66 #include "gromacs/nbnxm/pairlistsets.h"
67 #include "gromacs/pbcutil/ishift.h"
68 #include "gromacs/timing/gpu_timing.h"
69 #include "gromacs/utility/basedefinitions.h"
70 #include "gromacs/utility/cstringutil.h"
71 #include "gromacs/utility/fatalerror.h"
72 #include "gromacs/utility/real.h"
73 #include "gromacs/utility/smalloc.h"
75 #include "nbnxm_cuda.h"
80 /* This is a heuristically determined parameter for the Kepler
81 * and Maxwell architectures for the minimum size of ci lists by multiplying
82 * this constant with the # of multiprocessors on the current device.
83 * Since the maximum number of blocks per multiprocessor is 16, the ideal
84 * count for small systems is 32 or 48 blocks per multiprocessor. Because
85 * there is a bit of fluctuations in the generated block counts, we use
86 * a target of 44 instead of the ideal value of 48.
88 static unsigned int gpu_min_ci_balanced_factor = 44;
91 static void nbnxn_cuda_clear_e_fshift(gmx_nbnxn_cuda_t *nb);
94 static void nbnxn_cuda_free_nbparam_table(cu_nbparam_t *nbparam);
96 /*! \brief Return whether combination rules are used.
98 * \param[in] pointer to nonbonded paramter struct
99 * \return true if combination rules are used in this run, false otherwise
101 static inline bool useLjCombRule(const cu_nbparam_t *nbparam)
103 return (nbparam->vdwtype == evdwCuCUTCOMBGEOM ||
104 nbparam->vdwtype == evdwCuCUTCOMBLB);
107 /*! \brief Initialized the Ewald Coulomb correction GPU table.
109 Tabulates the Ewald Coulomb force and initializes the size/scale
110 and the table GPU array. If called with an already allocated table,
111 it just re-uploads the table.
113 static void init_ewald_coulomb_force_table(const EwaldCorrectionTables &tables,
116 if (nbp->coulomb_tab != nullptr)
118 nbnxn_cuda_free_nbparam_table(nbp);
121 nbp->coulomb_tab_scale = tables.scale;
122 initParamLookupTable(nbp->coulomb_tab, nbp->coulomb_tab_texobj,
123 tables.tableF.data(), tables.tableF.size());
127 /*! Initializes the atomdata structure first time, it only gets filled at
129 static void init_atomdata_first(cu_atomdata_t *ad, int ntypes)
134 stat = cudaMalloc((void**)&ad->shift_vec, SHIFTS*sizeof(*ad->shift_vec));
135 CU_RET_ERR(stat, "cudaMalloc failed on ad->shift_vec");
136 ad->bShiftVecUploaded = false;
138 stat = cudaMalloc((void**)&ad->fshift, SHIFTS*sizeof(*ad->fshift));
139 CU_RET_ERR(stat, "cudaMalloc failed on ad->fshift");
141 stat = cudaMalloc((void**)&ad->e_lj, sizeof(*ad->e_lj));
142 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_lj");
143 stat = cudaMalloc((void**)&ad->e_el, sizeof(*ad->e_el));
144 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_el");
146 /* initialize to nullptr poiters to data that is not allocated here and will
147 need reallocation in nbnxn_cuda_init_atomdata */
151 /* size -1 indicates that the respective array hasn't been initialized yet */
156 /*! Selects the Ewald kernel type, analytical on SM 3.0 and later, tabulated on
157 earlier GPUs, single or twin cut-off. */
158 static int pick_ewald_kernel_type(const interaction_const_t &ic)
160 bool bTwinCut = (ic.rcoulomb != ic.rvdw);
161 bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
164 /* Benchmarking/development environment variables to force the use of
165 analytical or tabulated Ewald kernel. */
166 bForceAnalyticalEwald = (getenv("GMX_CUDA_NB_ANA_EWALD") != nullptr);
167 bForceTabulatedEwald = (getenv("GMX_CUDA_NB_TAB_EWALD") != nullptr);
169 if (bForceAnalyticalEwald && bForceTabulatedEwald)
171 gmx_incons("Both analytical and tabulated Ewald CUDA non-bonded kernels "
172 "requested through environment variables.");
175 /* By default use analytical Ewald. */
176 bUseAnalyticalEwald = true;
177 if (bForceAnalyticalEwald)
181 fprintf(debug, "Using analytical Ewald CUDA kernels\n");
184 else if (bForceTabulatedEwald)
186 bUseAnalyticalEwald = false;
190 fprintf(debug, "Using tabulated Ewald CUDA kernels\n");
194 /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
195 forces it (use it for debugging/benchmarking only). */
196 if (!bTwinCut && (getenv("GMX_CUDA_NB_EWALD_TWINCUT") == nullptr))
198 kernel_type = bUseAnalyticalEwald ? eelCuEWALD_ANA : eelCuEWALD_TAB;
202 kernel_type = bUseAnalyticalEwald ? eelCuEWALD_ANA_TWIN : eelCuEWALD_TAB_TWIN;
208 /*! Copies all parameters related to the cut-off from ic to nbp */
209 static void set_cutoff_parameters(cu_nbparam_t *nbp,
210 const interaction_const_t *ic,
211 const PairlistParams &listParams)
213 nbp->ewald_beta = ic->ewaldcoeff_q;
214 nbp->sh_ewald = ic->sh_ewald;
215 nbp->epsfac = ic->epsfac;
216 nbp->two_k_rf = 2.0 * ic->k_rf;
217 nbp->c_rf = ic->c_rf;
218 nbp->rvdw_sq = ic->rvdw * ic->rvdw;
219 nbp->rcoulomb_sq = ic->rcoulomb * ic->rcoulomb;
220 nbp->rlistOuter_sq = listParams.rlistOuter * listParams.rlistOuter;
221 nbp->rlistInner_sq = listParams.rlistInner * listParams.rlistInner;
222 nbp->useDynamicPruning = listParams.useDynamicPruning;
224 nbp->sh_lj_ewald = ic->sh_lj_ewald;
225 nbp->ewaldcoeff_lj = ic->ewaldcoeff_lj;
227 nbp->rvdw_switch = ic->rvdw_switch;
228 nbp->dispersion_shift = ic->dispersion_shift;
229 nbp->repulsion_shift = ic->repulsion_shift;
230 nbp->vdw_switch = ic->vdw_switch;
233 /*! Initializes the nonbonded parameter data structure. */
234 static void init_nbparam(cu_nbparam_t *nbp,
235 const interaction_const_t *ic,
236 const PairlistParams &listParams,
237 const nbnxn_atomdata_t::Params &nbatParams)
241 ntypes = nbatParams.numTypes;
243 set_cutoff_parameters(nbp, ic, listParams);
245 /* The kernel code supports LJ combination rules (geometric and LB) for
246 * all kernel types, but we only generate useful combination rule kernels.
247 * We currently only use LJ combination rule (geometric and LB) kernels
248 * for plain cut-off LJ. On Maxwell the force only kernels speed up 15%
249 * with PME and 20% with RF, the other kernels speed up about half as much.
250 * For LJ force-switch the geometric rule would give 7% speed-up, but this
251 * combination is rarely used. LJ force-switch with LB rule is more common,
252 * but gives only 1% speed-up.
254 if (ic->vdwtype == evdwCUT)
256 switch (ic->vdw_modifier)
259 case eintmodPOTSHIFT:
260 switch (nbatParams.comb_rule)
263 nbp->vdwtype = evdwCuCUT;
266 nbp->vdwtype = evdwCuCUTCOMBGEOM;
269 nbp->vdwtype = evdwCuCUTCOMBLB;
272 gmx_incons("The requested LJ combination rule is not implemented in the CUDA GPU accelerated kernels!");
275 case eintmodFORCESWITCH:
276 nbp->vdwtype = evdwCuFSWITCH;
278 case eintmodPOTSWITCH:
279 nbp->vdwtype = evdwCuPSWITCH;
282 gmx_incons("The requested VdW interaction modifier is not implemented in the CUDA GPU accelerated kernels!");
285 else if (ic->vdwtype == evdwPME)
287 if (ic->ljpme_comb_rule == ljcrGEOM)
289 assert(nbatParams.comb_rule == ljcrGEOM);
290 nbp->vdwtype = evdwCuEWALDGEOM;
294 assert(nbatParams.comb_rule == ljcrLB);
295 nbp->vdwtype = evdwCuEWALDLB;
300 gmx_incons("The requested VdW type is not implemented in the CUDA GPU accelerated kernels!");
303 if (ic->eeltype == eelCUT)
305 nbp->eeltype = eelCuCUT;
307 else if (EEL_RF(ic->eeltype))
309 nbp->eeltype = eelCuRF;
311 else if ((EEL_PME(ic->eeltype) || ic->eeltype == eelEWALD))
313 nbp->eeltype = pick_ewald_kernel_type(*ic);
317 /* Shouldn't happen, as this is checked when choosing Verlet-scheme */
318 gmx_incons("The requested electrostatics type is not implemented in the CUDA GPU accelerated kernels!");
321 /* generate table for PME */
322 nbp->coulomb_tab = nullptr;
323 if (nbp->eeltype == eelCuEWALD_TAB || nbp->eeltype == eelCuEWALD_TAB_TWIN)
325 GMX_RELEASE_ASSERT(ic->coulombEwaldTables, "Need valid Coulomb Ewald correction tables");
326 init_ewald_coulomb_force_table(*ic->coulombEwaldTables, nbp);
329 /* set up LJ parameter lookup table */
330 if (!useLjCombRule(nbp))
332 initParamLookupTable(nbp->nbfp, nbp->nbfp_texobj,
333 nbatParams.nbfp.data(), 2*ntypes*ntypes);
336 /* set up LJ-PME parameter lookup table */
337 if (ic->vdwtype == evdwPME)
339 initParamLookupTable(nbp->nbfp_comb, nbp->nbfp_comb_texobj,
340 nbatParams.nbfp_comb.data(), 2*ntypes);
344 /*! Re-generate the GPU Ewald force table, resets rlist, and update the
345 * electrostatic type switching to twin cut-off (or back) if needed. */
346 void gpu_pme_loadbal_update_param(const nonbonded_verlet_t *nbv,
347 const interaction_const_t *ic)
349 if (!nbv || !nbv->useGpu())
353 cu_nbparam_t *nbp = nbv->gpu_nbv->nbparam;
355 set_cutoff_parameters(nbp, ic, nbv->pairlistSets().params());
357 nbp->eeltype = pick_ewald_kernel_type(*ic);
359 GMX_RELEASE_ASSERT(ic->coulombEwaldTables, "Need valid Coulomb Ewald correction tables");
360 init_ewald_coulomb_force_table(*ic->coulombEwaldTables, nbp);
363 /*! Initializes the pair list data structure. */
364 static void init_plist(cu_plist_t *pl)
366 /* initialize to nullptr pointers to data that is not allocated here and will
367 need reallocation in nbnxn_gpu_init_pairlist */
373 /* size -1 indicates that the respective array hasn't been initialized yet */
380 pl->imask_nalloc = -1;
382 pl->excl_nalloc = -1;
383 pl->haveFreshList = false;
386 /*! Initializes the timings data structure. */
387 static void init_timings(gmx_wallclock_gpu_nbnxn_t *t)
396 for (i = 0; i < 2; i++)
398 for (j = 0; j < 2; j++)
400 t->ktime[i][j].t = 0.0;
401 t->ktime[i][j].c = 0;
405 t->pruneTime.t = 0.0;
406 t->dynamicPruneTime.c = 0;
407 t->dynamicPruneTime.t = 0.0;
410 /*! Initializes simulation constant data. */
411 static void cuda_init_const(gmx_nbnxn_cuda_t *nb,
412 const interaction_const_t *ic,
413 const PairlistParams &listParams,
414 const nbnxn_atomdata_t::Params &nbatParams)
416 init_atomdata_first(nb->atdat, nbatParams.numTypes);
417 init_nbparam(nb->nbparam, ic, listParams, nbatParams);
419 /* clear energy and shift force outputs */
420 nbnxn_cuda_clear_e_fshift(nb);
424 gpu_init(const gmx_device_info_t *deviceInfo,
425 const interaction_const_t *ic,
426 const PairlistParams &listParams,
427 const nbnxn_atomdata_t *nbat,
429 gmx_bool bLocalAndNonlocal)
433 gmx_nbnxn_cuda_t *nb;
436 snew(nb->nbparam, 1);
437 snew(nb->plist[InteractionLocality::Local], 1);
438 if (bLocalAndNonlocal)
440 snew(nb->plist[InteractionLocality::NonLocal], 1);
443 nb->bUseTwoStreams = bLocalAndNonlocal;
445 nb->timers = new cu_timers_t();
446 snew(nb->timings, 1);
449 pmalloc((void**)&nb->nbst.e_lj, sizeof(*nb->nbst.e_lj));
450 pmalloc((void**)&nb->nbst.e_el, sizeof(*nb->nbst.e_el));
451 pmalloc((void**)&nb->nbst.fshift, SHIFTS * sizeof(*nb->nbst.fshift));
453 init_plist(nb->plist[InteractionLocality::Local]);
455 /* set device info, just point it to the right GPU among the detected ones */
456 nb->dev_info = deviceInfo;
458 /* local/non-local GPU streams */
459 stat = cudaStreamCreate(&nb->stream[InteractionLocality::Local]);
460 CU_RET_ERR(stat, "cudaStreamCreate on stream[InterationLocality::Local] failed");
461 if (nb->bUseTwoStreams)
463 init_plist(nb->plist[InteractionLocality::NonLocal]);
465 /* Note that the device we're running on does not have to support
466 * priorities, because we are querying the priority range which in this
467 * case will be a single value.
469 int highest_priority;
470 stat = cudaDeviceGetStreamPriorityRange(nullptr, &highest_priority);
471 CU_RET_ERR(stat, "cudaDeviceGetStreamPriorityRange failed");
473 stat = cudaStreamCreateWithPriority(&nb->stream[InteractionLocality::NonLocal],
476 CU_RET_ERR(stat, "cudaStreamCreateWithPriority on stream[InteractionLocality::NonLocal] failed");
479 /* init events for sychronization (timing disabled for performance reasons!) */
480 stat = cudaEventCreateWithFlags(&nb->nonlocal_done, cudaEventDisableTiming);
481 CU_RET_ERR(stat, "cudaEventCreate on nonlocal_done failed");
482 stat = cudaEventCreateWithFlags(&nb->misc_ops_and_local_H2D_done, cudaEventDisableTiming);
483 CU_RET_ERR(stat, "cudaEventCreate on misc_ops_and_local_H2D_done failed");
485 nb->xAvailableOnDevice = new GpuEventSynchronizer();
486 nb->xNonLocalCopyD2HDone = new GpuEventSynchronizer();
488 /* WARNING: CUDA timings are incorrect with multiple streams.
489 * This is the main reason why they are disabled by default.
491 // TODO: Consider turning on by default when we can detect nr of streams.
492 nb->bDoTime = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
496 init_timings(nb->timings);
499 /* set the kernel type for the current GPU */
500 /* pick L1 cache configuration */
501 cuda_set_cacheconfig();
503 cuda_init_const(nb, ic, listParams, nbat->params());
506 nb->natoms_alloc = 0;
507 nb->atomIndicesSize = 0;
508 nb->atomIndicesSize_alloc = 0;
510 nb->ncxy_na_alloc = 0;
512 nb->ncxy_ind_alloc = 0;
514 nb->nfrvec_alloc = 0;
520 fprintf(debug, "Initialized CUDA data structures.\n");
526 void gpu_init_pairlist(gmx_nbnxn_cuda_t *nb,
527 const NbnxnPairlistGpu *h_plist,
528 const InteractionLocality iloc)
531 bool bDoTime = (nb->bDoTime && !h_plist->sci.empty());
532 cudaStream_t stream = nb->stream[iloc];
533 cu_plist_t *d_plist = nb->plist[iloc];
535 if (d_plist->na_c < 0)
537 d_plist->na_c = h_plist->na_ci;
541 if (d_plist->na_c != h_plist->na_ci)
543 sprintf(sbuf, "In cu_init_plist: the #atoms per cell has changed (from %d to %d)",
544 d_plist->na_c, h_plist->na_ci);
549 gpu_timers_t::Interaction &iTimers = nb->timers->interaction[iloc];
553 iTimers.pl_h2d.openTimingRegion(stream);
554 iTimers.didPairlistH2D = true;
557 Context context = nullptr;
559 reallocateDeviceBuffer(&d_plist->sci, h_plist->sci.size(),
560 &d_plist->nsci, &d_plist->sci_nalloc, context);
561 copyToDeviceBuffer(&d_plist->sci, h_plist->sci.data(), 0, h_plist->sci.size(),
562 stream, GpuApiCallBehavior::Async,
563 bDoTime ? iTimers.pl_h2d.fetchNextEvent() : nullptr);
565 reallocateDeviceBuffer(&d_plist->cj4, h_plist->cj4.size(),
566 &d_plist->ncj4, &d_plist->cj4_nalloc, context);
567 copyToDeviceBuffer(&d_plist->cj4, h_plist->cj4.data(), 0, h_plist->cj4.size(),
568 stream, GpuApiCallBehavior::Async,
569 bDoTime ? iTimers.pl_h2d.fetchNextEvent() : nullptr);
571 reallocateDeviceBuffer(&d_plist->imask, h_plist->cj4.size()*c_nbnxnGpuClusterpairSplit,
572 &d_plist->nimask, &d_plist->imask_nalloc, context);
574 reallocateDeviceBuffer(&d_plist->excl, h_plist->excl.size(),
575 &d_plist->nexcl, &d_plist->excl_nalloc, context);
576 copyToDeviceBuffer(&d_plist->excl, h_plist->excl.data(), 0, h_plist->excl.size(),
577 stream, GpuApiCallBehavior::Async,
578 bDoTime ? iTimers.pl_h2d.fetchNextEvent() : nullptr);
582 iTimers.pl_h2d.closeTimingRegion(stream);
585 /* the next use of thist list we be the first one, so we need to prune */
586 d_plist->haveFreshList = true;
589 void gpu_upload_shiftvec(gmx_nbnxn_cuda_t *nb,
590 const nbnxn_atomdata_t *nbatom)
592 cu_atomdata_t *adat = nb->atdat;
593 cudaStream_t ls = nb->stream[InteractionLocality::Local];
595 /* only if we have a dynamic box */
596 if (nbatom->bDynamicBox || !adat->bShiftVecUploaded)
598 cu_copy_H2D_async(adat->shift_vec, nbatom->shift_vec.data(),
599 SHIFTS * sizeof(*adat->shift_vec), ls);
600 adat->bShiftVecUploaded = true;
604 /*! Clears the first natoms_clear elements of the GPU nonbonded force output array. */
605 static void nbnxn_cuda_clear_f(gmx_nbnxn_cuda_t *nb, int natoms_clear)
608 cu_atomdata_t *adat = nb->atdat;
609 cudaStream_t ls = nb->stream[InteractionLocality::Local];
611 stat = cudaMemsetAsync(adat->f, 0, natoms_clear * sizeof(*adat->f), ls);
612 CU_RET_ERR(stat, "cudaMemsetAsync on f falied");
615 /*! Clears nonbonded shift force output array and energy outputs on the GPU. */
616 static void nbnxn_cuda_clear_e_fshift(gmx_nbnxn_cuda_t *nb)
619 cu_atomdata_t *adat = nb->atdat;
620 cudaStream_t ls = nb->stream[InteractionLocality::Local];
622 stat = cudaMemsetAsync(adat->fshift, 0, SHIFTS * sizeof(*adat->fshift), ls);
623 CU_RET_ERR(stat, "cudaMemsetAsync on fshift falied");
624 stat = cudaMemsetAsync(adat->e_lj, 0, sizeof(*adat->e_lj), ls);
625 CU_RET_ERR(stat, "cudaMemsetAsync on e_lj falied");
626 stat = cudaMemsetAsync(adat->e_el, 0, sizeof(*adat->e_el), ls);
627 CU_RET_ERR(stat, "cudaMemsetAsync on e_el falied");
630 void gpu_clear_outputs(gmx_nbnxn_cuda_t *nb,
633 nbnxn_cuda_clear_f(nb, nb->atdat->natoms);
634 /* clear shift force array and energies if the outputs were
635 used in the current step */
638 nbnxn_cuda_clear_e_fshift(nb);
642 void gpu_init_atomdata(gmx_nbnxn_cuda_t *nb,
643 const nbnxn_atomdata_t *nbat)
648 bool bDoTime = nb->bDoTime;
649 cu_timers_t *timers = nb->timers;
650 cu_atomdata_t *d_atdat = nb->atdat;
651 cudaStream_t ls = nb->stream[InteractionLocality::Local];
653 natoms = nbat->numAtoms();
658 /* time async copy */
659 timers->atdat.openTimingRegion(ls);
662 /* need to reallocate if we have to copy more atoms than the amount of space
663 available and only allocate if we haven't initialized yet, i.e d_atdat->natoms == -1 */
664 if (natoms > d_atdat->nalloc)
666 nalloc = over_alloc_small(natoms);
668 /* free up first if the arrays have already been initialized */
669 if (d_atdat->nalloc != -1)
671 freeDeviceBuffer(&d_atdat->f);
672 freeDeviceBuffer(&d_atdat->xq);
673 freeDeviceBuffer(&d_atdat->atom_types);
674 freeDeviceBuffer(&d_atdat->lj_comb);
677 stat = cudaMalloc((void **)&d_atdat->f, nalloc*sizeof(*d_atdat->f));
678 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->f");
679 stat = cudaMalloc((void **)&d_atdat->xq, nalloc*sizeof(*d_atdat->xq));
680 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->xq");
681 if (useLjCombRule(nb->nbparam))
683 stat = cudaMalloc((void **)&d_atdat->lj_comb, nalloc*sizeof(*d_atdat->lj_comb));
684 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->lj_comb");
688 stat = cudaMalloc((void **)&d_atdat->atom_types, nalloc*sizeof(*d_atdat->atom_types));
689 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->atom_types");
692 d_atdat->nalloc = nalloc;
696 d_atdat->natoms = natoms;
697 d_atdat->natoms_local = nbat->natoms_local;
699 /* need to clear GPU f output if realloc happened */
702 nbnxn_cuda_clear_f(nb, nalloc);
705 if (useLjCombRule(nb->nbparam))
707 cu_copy_H2D_async(d_atdat->lj_comb, nbat->params().lj_comb.data(),
708 natoms*sizeof(*d_atdat->lj_comb), ls);
712 cu_copy_H2D_async(d_atdat->atom_types, nbat->params().type.data(),
713 natoms*sizeof(*d_atdat->atom_types), ls);
718 timers->atdat.closeTimingRegion(ls);
722 static void nbnxn_cuda_free_nbparam_table(cu_nbparam_t *nbparam)
724 if (nbparam->eeltype == eelCuEWALD_TAB || nbparam->eeltype == eelCuEWALD_TAB_TWIN)
726 destroyParamLookupTable(nbparam->coulomb_tab, nbparam->coulomb_tab_texobj);
730 void gpu_free(gmx_nbnxn_cuda_t *nb)
733 cu_atomdata_t *atdat;
734 cu_nbparam_t *nbparam;
742 nbparam = nb->nbparam;
744 nbnxn_cuda_free_nbparam_table(nbparam);
746 stat = cudaEventDestroy(nb->nonlocal_done);
747 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->nonlocal_done");
748 stat = cudaEventDestroy(nb->misc_ops_and_local_H2D_done);
749 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->misc_ops_and_local_H2D_done");
754 /* The non-local counters/stream (second in the array) are needed only with DD. */
755 for (int i = 0; i <= (nb->bUseTwoStreams ? 1 : 0); i++)
757 stat = cudaStreamDestroy(nb->stream[i]);
758 CU_RET_ERR(stat, "cudaStreamDestroy failed on stream");
762 if (!useLjCombRule(nb->nbparam))
764 destroyParamLookupTable(nbparam->nbfp, nbparam->nbfp_texobj);
768 if (nbparam->vdwtype == evdwCuEWALDGEOM || nbparam->vdwtype == evdwCuEWALDLB)
770 destroyParamLookupTable(nbparam->nbfp_comb, nbparam->nbfp_comb_texobj);
773 stat = cudaFree(atdat->shift_vec);
774 CU_RET_ERR(stat, "cudaFree failed on atdat->shift_vec");
775 stat = cudaFree(atdat->fshift);
776 CU_RET_ERR(stat, "cudaFree failed on atdat->fshift");
778 stat = cudaFree(atdat->e_lj);
779 CU_RET_ERR(stat, "cudaFree failed on atdat->e_lj");
780 stat = cudaFree(atdat->e_el);
781 CU_RET_ERR(stat, "cudaFree failed on atdat->e_el");
783 freeDeviceBuffer(&atdat->f);
784 freeDeviceBuffer(&atdat->xq);
785 freeDeviceBuffer(&atdat->atom_types);
786 freeDeviceBuffer(&atdat->lj_comb);
789 auto *plist = nb->plist[InteractionLocality::Local];
790 freeDeviceBuffer(&plist->sci);
791 freeDeviceBuffer(&plist->cj4);
792 freeDeviceBuffer(&plist->imask);
793 freeDeviceBuffer(&plist->excl);
795 if (nb->bUseTwoStreams)
797 auto *plist_nl = nb->plist[InteractionLocality::NonLocal];
798 freeDeviceBuffer(&plist_nl->sci);
799 freeDeviceBuffer(&plist_nl->cj4);
800 freeDeviceBuffer(&plist_nl->imask);
801 freeDeviceBuffer(&plist_nl->excl);
806 pfree(nb->nbst.e_lj);
807 nb->nbst.e_lj = nullptr;
809 pfree(nb->nbst.e_el);
810 nb->nbst.e_el = nullptr;
812 pfree(nb->nbst.fshift);
813 nb->nbst.fshift = nullptr;
822 fprintf(debug, "Cleaned up CUDA data structures.\n");
826 //! This function is documented in the header file
827 gmx_wallclock_gpu_nbnxn_t *gpu_get_timings(gmx_nbnxn_cuda_t *nb)
829 return (nb != nullptr && nb->bDoTime) ? nb->timings : nullptr;
832 void gpu_reset_timings(nonbonded_verlet_t* nbv)
834 if (nbv->gpu_nbv && nbv->gpu_nbv->bDoTime)
836 init_timings(nbv->gpu_nbv->timings);
840 int gpu_min_ci_balanced(gmx_nbnxn_cuda_t *nb)
842 return nb != nullptr ?
843 gpu_min_ci_balanced_factor*nb->dev_info->prop.multiProcessorCount : 0;
847 gmx_bool gpu_is_kernel_ewald_analytical(const gmx_nbnxn_cuda_t *nb)
849 return ((nb->nbparam->eeltype == eelCuEWALD_ANA) ||
850 (nb->nbparam->eeltype == eelCuEWALD_ANA_TWIN));
853 void *gpu_get_command_stream(gmx_nbnxn_gpu_t *nb,
854 const InteractionLocality iloc)
858 return static_cast<void *>(&nb->stream[iloc]);
861 void *gpu_get_xq(gmx_nbnxn_gpu_t *nb)
865 return static_cast<void *>(nb->atdat->xq);
868 void *gpu_get_f(gmx_nbnxn_gpu_t *nb)
872 return static_cast<void *>(nb->atdat->f);
875 rvec *gpu_get_fshift(gmx_nbnxn_gpu_t *nb)
879 return reinterpret_cast<rvec *>(nb->atdat->fshift);
882 /* Initialization for X buffer operations on GPU. */
883 /* TODO Remove explicit pinning from host arrays from here and manage in a more natural way*/
884 void nbnxn_gpu_init_x_to_nbat_x(const Nbnxm::GridSet &gridSet,
885 gmx_nbnxn_gpu_t *gpu_nbv)
887 cudaStream_t stream = gpu_nbv->stream[InteractionLocality::Local];
888 bool bDoTime = gpu_nbv->bDoTime;
889 const int maxNumColumns = gridSet.numColumnsMax();
891 reallocateDeviceBuffer(&gpu_nbv->cxy_na, maxNumColumns*gridSet.grids().size(),
892 &gpu_nbv->ncxy_na, &gpu_nbv->ncxy_na_alloc, nullptr);
893 reallocateDeviceBuffer(&gpu_nbv->cxy_ind, maxNumColumns*gridSet.grids().size(),
894 &gpu_nbv->ncxy_ind, &gpu_nbv->ncxy_ind_alloc, nullptr);
896 for (unsigned int g = 0; g < gridSet.grids().size(); g++)
899 const Nbnxm::Grid &grid = gridSet.grids()[g];
901 const int numColumns = grid.numColumns();
902 const int *atomIndices = gridSet.atomIndices().data();
903 const int atomIndicesSize = gridSet.atomIndices().size();
904 const int *cxy_na = grid.cxy_na().data();
905 const int *cxy_ind = grid.cxy_ind().data();
906 // TODO Should be done once per gridset
907 const int numRealAtomsTotal = gridSet.numRealAtomsTotal();
909 reallocateDeviceBuffer(&gpu_nbv->xrvec, numRealAtomsTotal, &gpu_nbv->natoms, &gpu_nbv->natoms_alloc, nullptr);
910 reallocateDeviceBuffer(&gpu_nbv->atomIndices, atomIndicesSize, &gpu_nbv->atomIndicesSize, &gpu_nbv->atomIndicesSize_alloc, nullptr);
912 if (atomIndicesSize > 0)
917 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(stream);
920 copyToDeviceBuffer(&gpu_nbv->atomIndices, atomIndices, 0, atomIndicesSize, stream, GpuApiCallBehavior::Async, nullptr);
924 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(stream);
933 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(stream);
936 int* destPtr = &gpu_nbv->cxy_na[maxNumColumns*g];
937 copyToDeviceBuffer(&destPtr, cxy_na, 0, numColumns, stream, GpuApiCallBehavior::Async, nullptr);
941 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(stream);
946 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(stream);
949 destPtr = &gpu_nbv->cxy_ind[maxNumColumns*g];
950 copyToDeviceBuffer(&destPtr, cxy_ind, 0, numColumns, stream, GpuApiCallBehavior::Async, nullptr);
954 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(stream);
960 // The above data is transferred on the local stream but is a
961 // dependency of the nonlocal stream (specifically the nonlocal X
962 // buf ops kernel). We therefore set a dependency to ensure
963 // that the nonlocal stream waits on the local stream here.
964 // This call records an event in the local stream:
965 nbnxnInsertNonlocalGpuDependency(gpu_nbv, Nbnxm::InteractionLocality::Local);
966 // ...and this call instructs the nonlocal stream to wait on that event:
967 nbnxnInsertNonlocalGpuDependency(gpu_nbv, Nbnxm::InteractionLocality::NonLocal);
972 /* Initialization for F buffer operations on GPU. */
973 void nbnxn_gpu_init_add_nbat_f_to_f(const int *cell,
974 gmx_nbnxn_gpu_t *gpu_nbv,
978 cudaStream_t stream = gpu_nbv->stream[InteractionLocality::Local];
980 reallocateDeviceBuffer(&gpu_nbv->frvec, natoms_total, &gpu_nbv->nfrvec, &gpu_nbv->nfrvec_alloc, nullptr);
982 if (natoms_total > 0)
984 reallocateDeviceBuffer(&gpu_nbv->cell, natoms_total, &gpu_nbv->ncell, &gpu_nbv->ncell_alloc, nullptr);
985 copyToDeviceBuffer(&gpu_nbv->cell, cell, 0, natoms_total, stream, GpuApiCallBehavior::Async, nullptr);