2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Define CUDA implementation of nbnxn_gpu_data_mgmt.h
39 * \author Szilard Pall <pall.szilard@gmail.com>
48 // TODO We would like to move this down, but the way NbnxmGpu
49 // is currently declared means this has to be before gpu_types.h
50 #include "nbnxm_cuda_types.h"
52 // TODO Remove this comment when the above order issue is resolved
53 #include "gromacs/gpu_utils/cudautils.cuh"
54 #include "gromacs/gpu_utils/device_stream_manager.h"
55 #include "gromacs/gpu_utils/gpu_utils.h"
56 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
57 #include "gromacs/gpu_utils/pmalloc_cuda.h"
58 #include "gromacs/hardware/gpu_hw_info.h"
59 #include "gromacs/math/vectypes.h"
60 #include "gromacs/mdlib/force_flags.h"
61 #include "gromacs/mdtypes/interaction_const.h"
62 #include "gromacs/mdtypes/md_enums.h"
63 #include "gromacs/nbnxm/atomdata.h"
64 #include "gromacs/nbnxm/gpu_data_mgmt.h"
65 #include "gromacs/nbnxm/gridset.h"
66 #include "gromacs/nbnxm/nbnxm.h"
67 #include "gromacs/nbnxm/nbnxm_gpu.h"
68 #include "gromacs/nbnxm/pairlistsets.h"
69 #include "gromacs/pbcutil/ishift.h"
70 #include "gromacs/timing/gpu_timing.h"
71 #include "gromacs/utility/basedefinitions.h"
72 #include "gromacs/utility/cstringutil.h"
73 #include "gromacs/utility/fatalerror.h"
74 #include "gromacs/utility/real.h"
75 #include "gromacs/utility/smalloc.h"
77 #include "nbnxm_cuda.h"
82 /* This is a heuristically determined parameter for the Kepler
83 * and Maxwell architectures for the minimum size of ci lists by multiplying
84 * this constant with the # of multiprocessors on the current device.
85 * Since the maximum number of blocks per multiprocessor is 16, the ideal
86 * count for small systems is 32 or 48 blocks per multiprocessor. Because
87 * there is a bit of fluctuations in the generated block counts, we use
88 * a target of 44 instead of the ideal value of 48.
90 static unsigned int gpu_min_ci_balanced_factor = 44;
93 static void nbnxn_cuda_clear_e_fshift(NbnxmGpu* nb);
96 static void nbnxn_cuda_free_nbparam_table(cu_nbparam_t* nbparam);
98 /*! \brief Return whether combination rules are used.
100 * \param[in] pointer to nonbonded paramter struct
101 * \return true if combination rules are used in this run, false otherwise
103 static inline bool useLjCombRule(const cu_nbparam_t* nbparam)
105 return (nbparam->vdwtype == evdwCuCUTCOMBGEOM || nbparam->vdwtype == evdwCuCUTCOMBLB);
108 /*! \brief Initialized the Ewald Coulomb correction GPU table.
110 Tabulates the Ewald Coulomb force and initializes the size/scale
111 and the table GPU array. If called with an already allocated table,
112 it just re-uploads the table.
114 static void init_ewald_coulomb_force_table(const EwaldCorrectionTables& tables, cu_nbparam_t* nbp)
116 if (nbp->coulomb_tab != nullptr)
118 nbnxn_cuda_free_nbparam_table(nbp);
121 nbp->coulomb_tab_scale = tables.scale;
122 initParamLookupTable(nbp->coulomb_tab, nbp->coulomb_tab_texobj, tables.tableF.data(),
123 tables.tableF.size());
127 /*! Initializes the atomdata structure first time, it only gets filled at
129 static void init_atomdata_first(cu_atomdata_t* ad, int ntypes)
134 stat = cudaMalloc((void**)&ad->shift_vec, SHIFTS * sizeof(*ad->shift_vec));
135 CU_RET_ERR(stat, "cudaMalloc failed on ad->shift_vec");
136 ad->bShiftVecUploaded = false;
138 stat = cudaMalloc((void**)&ad->fshift, SHIFTS * sizeof(*ad->fshift));
139 CU_RET_ERR(stat, "cudaMalloc failed on ad->fshift");
141 stat = cudaMalloc((void**)&ad->e_lj, sizeof(*ad->e_lj));
142 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_lj");
143 stat = cudaMalloc((void**)&ad->e_el, sizeof(*ad->e_el));
144 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_el");
146 /* initialize to nullptr poiters to data that is not allocated here and will
147 need reallocation in nbnxn_cuda_init_atomdata */
151 /* size -1 indicates that the respective array hasn't been initialized yet */
156 /*! Selects the Ewald kernel type, analytical on SM 3.0 and later, tabulated on
157 earlier GPUs, single or twin cut-off. */
158 static int pick_ewald_kernel_type(const interaction_const_t& ic)
160 bool bTwinCut = (ic.rcoulomb != ic.rvdw);
161 bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
164 /* Benchmarking/development environment variables to force the use of
165 analytical or tabulated Ewald kernel. */
166 bForceAnalyticalEwald = (getenv("GMX_CUDA_NB_ANA_EWALD") != nullptr);
167 bForceTabulatedEwald = (getenv("GMX_CUDA_NB_TAB_EWALD") != nullptr);
169 if (bForceAnalyticalEwald && bForceTabulatedEwald)
172 "Both analytical and tabulated Ewald CUDA non-bonded kernels "
173 "requested through environment variables.");
176 /* By default use analytical Ewald. */
177 bUseAnalyticalEwald = true;
178 if (bForceAnalyticalEwald)
182 fprintf(debug, "Using analytical Ewald CUDA kernels\n");
185 else if (bForceTabulatedEwald)
187 bUseAnalyticalEwald = false;
191 fprintf(debug, "Using tabulated Ewald CUDA kernels\n");
195 /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
196 forces it (use it for debugging/benchmarking only). */
197 if (!bTwinCut && (getenv("GMX_CUDA_NB_EWALD_TWINCUT") == nullptr))
199 kernel_type = bUseAnalyticalEwald ? eelCuEWALD_ANA : eelCuEWALD_TAB;
203 kernel_type = bUseAnalyticalEwald ? eelCuEWALD_ANA_TWIN : eelCuEWALD_TAB_TWIN;
209 /*! Copies all parameters related to the cut-off from ic to nbp */
210 static void set_cutoff_parameters(cu_nbparam_t* nbp, const interaction_const_t* ic, const PairlistParams& listParams)
212 nbp->ewald_beta = ic->ewaldcoeff_q;
213 nbp->sh_ewald = ic->sh_ewald;
214 nbp->epsfac = ic->epsfac;
215 nbp->two_k_rf = 2.0 * ic->k_rf;
216 nbp->c_rf = ic->c_rf;
217 nbp->rvdw_sq = ic->rvdw * ic->rvdw;
218 nbp->rcoulomb_sq = ic->rcoulomb * ic->rcoulomb;
219 nbp->rlistOuter_sq = listParams.rlistOuter * listParams.rlistOuter;
220 nbp->rlistInner_sq = listParams.rlistInner * listParams.rlistInner;
221 nbp->useDynamicPruning = listParams.useDynamicPruning;
223 nbp->sh_lj_ewald = ic->sh_lj_ewald;
224 nbp->ewaldcoeff_lj = ic->ewaldcoeff_lj;
226 nbp->rvdw_switch = ic->rvdw_switch;
227 nbp->dispersion_shift = ic->dispersion_shift;
228 nbp->repulsion_shift = ic->repulsion_shift;
229 nbp->vdw_switch = ic->vdw_switch;
232 /*! Initializes the nonbonded parameter data structure. */
233 static void init_nbparam(cu_nbparam_t* nbp,
234 const interaction_const_t* ic,
235 const PairlistParams& listParams,
236 const nbnxn_atomdata_t::Params& nbatParams)
240 ntypes = nbatParams.numTypes;
242 set_cutoff_parameters(nbp, ic, listParams);
244 /* The kernel code supports LJ combination rules (geometric and LB) for
245 * all kernel types, but we only generate useful combination rule kernels.
246 * We currently only use LJ combination rule (geometric and LB) kernels
247 * for plain cut-off LJ. On Maxwell the force only kernels speed up 15%
248 * with PME and 20% with RF, the other kernels speed up about half as much.
249 * For LJ force-switch the geometric rule would give 7% speed-up, but this
250 * combination is rarely used. LJ force-switch with LB rule is more common,
251 * but gives only 1% speed-up.
253 if (ic->vdwtype == evdwCUT)
255 switch (ic->vdw_modifier)
258 case eintmodPOTSHIFT:
259 switch (nbatParams.comb_rule)
261 case ljcrNONE: nbp->vdwtype = evdwCuCUT; break;
262 case ljcrGEOM: nbp->vdwtype = evdwCuCUTCOMBGEOM; break;
263 case ljcrLB: nbp->vdwtype = evdwCuCUTCOMBLB; break;
266 "The requested LJ combination rule is not implemented in the CUDA "
267 "GPU accelerated kernels!");
270 case eintmodFORCESWITCH: nbp->vdwtype = evdwCuFSWITCH; break;
271 case eintmodPOTSWITCH: nbp->vdwtype = evdwCuPSWITCH; break;
274 "The requested VdW interaction modifier is not implemented in the CUDA GPU "
275 "accelerated kernels!");
278 else if (ic->vdwtype == evdwPME)
280 if (ic->ljpme_comb_rule == ljcrGEOM)
282 assert(nbatParams.comb_rule == ljcrGEOM);
283 nbp->vdwtype = evdwCuEWALDGEOM;
287 assert(nbatParams.comb_rule == ljcrLB);
288 nbp->vdwtype = evdwCuEWALDLB;
294 "The requested VdW type is not implemented in the CUDA GPU accelerated kernels!");
297 if (ic->eeltype == eelCUT)
299 nbp->eeltype = eelCuCUT;
301 else if (EEL_RF(ic->eeltype))
303 nbp->eeltype = eelCuRF;
305 else if ((EEL_PME(ic->eeltype) || ic->eeltype == eelEWALD))
307 nbp->eeltype = pick_ewald_kernel_type(*ic);
311 /* Shouldn't happen, as this is checked when choosing Verlet-scheme */
313 "The requested electrostatics type is not implemented in the CUDA GPU accelerated "
317 /* generate table for PME */
318 nbp->coulomb_tab = nullptr;
319 if (nbp->eeltype == eelCuEWALD_TAB || nbp->eeltype == eelCuEWALD_TAB_TWIN)
321 GMX_RELEASE_ASSERT(ic->coulombEwaldTables, "Need valid Coulomb Ewald correction tables");
322 init_ewald_coulomb_force_table(*ic->coulombEwaldTables, nbp);
325 /* set up LJ parameter lookup table */
326 if (!useLjCombRule(nbp))
328 initParamLookupTable(nbp->nbfp, nbp->nbfp_texobj, nbatParams.nbfp.data(), 2 * ntypes * ntypes);
331 /* set up LJ-PME parameter lookup table */
332 if (ic->vdwtype == evdwPME)
334 initParamLookupTable(nbp->nbfp_comb, nbp->nbfp_comb_texobj, nbatParams.nbfp_comb.data(), 2 * ntypes);
338 /*! Re-generate the GPU Ewald force table, resets rlist, and update the
339 * electrostatic type switching to twin cut-off (or back) if needed. */
340 void gpu_pme_loadbal_update_param(const nonbonded_verlet_t* nbv, const interaction_const_t* ic)
342 if (!nbv || !nbv->useGpu())
346 cu_nbparam_t* nbp = nbv->gpu_nbv->nbparam;
348 set_cutoff_parameters(nbp, ic, nbv->pairlistSets().params());
350 nbp->eeltype = pick_ewald_kernel_type(*ic);
352 GMX_RELEASE_ASSERT(ic->coulombEwaldTables, "Need valid Coulomb Ewald correction tables");
353 init_ewald_coulomb_force_table(*ic->coulombEwaldTables, nbp);
356 /*! Initializes the pair list data structure. */
357 static void init_plist(cu_plist_t* pl)
359 /* initialize to nullptr pointers to data that is not allocated here and will
360 need reallocation in nbnxn_gpu_init_pairlist */
366 /* size -1 indicates that the respective array hasn't been initialized yet */
373 pl->imask_nalloc = -1;
375 pl->excl_nalloc = -1;
376 pl->haveFreshList = false;
379 /*! Initializes the timings data structure. */
380 static void init_timings(gmx_wallclock_gpu_nbnxn_t* t)
389 for (i = 0; i < 2; i++)
391 for (j = 0; j < 2; j++)
393 t->ktime[i][j].t = 0.0;
394 t->ktime[i][j].c = 0;
398 t->pruneTime.t = 0.0;
399 t->dynamicPruneTime.c = 0;
400 t->dynamicPruneTime.t = 0.0;
403 /*! Initializes simulation constant data. */
404 static void cuda_init_const(NbnxmGpu* nb,
405 const interaction_const_t* ic,
406 const PairlistParams& listParams,
407 const nbnxn_atomdata_t::Params& nbatParams)
409 init_atomdata_first(nb->atdat, nbatParams.numTypes);
410 init_nbparam(nb->nbparam, ic, listParams, nbatParams);
412 /* clear energy and shift force outputs */
413 nbnxn_cuda_clear_e_fshift(nb);
416 NbnxmGpu* gpu_init(const gmx::DeviceStreamManager& deviceStreamManager,
417 const interaction_const_t* ic,
418 const PairlistParams& listParams,
419 const nbnxn_atomdata_t* nbat,
420 bool bLocalAndNonlocal)
424 auto nb = new NbnxmGpu();
425 nb->deviceContext_ = &deviceStreamManager.context();
427 snew(nb->nbparam, 1);
428 snew(nb->plist[InteractionLocality::Local], 1);
429 if (bLocalAndNonlocal)
431 snew(nb->plist[InteractionLocality::NonLocal], 1);
434 nb->bUseTwoStreams = bLocalAndNonlocal;
436 nb->timers = new cu_timers_t();
437 snew(nb->timings, 1);
440 pmalloc((void**)&nb->nbst.e_lj, sizeof(*nb->nbst.e_lj));
441 pmalloc((void**)&nb->nbst.e_el, sizeof(*nb->nbst.e_el));
442 pmalloc((void**)&nb->nbst.fshift, SHIFTS * sizeof(*nb->nbst.fshift));
444 init_plist(nb->plist[InteractionLocality::Local]);
446 /* local/non-local GPU streams */
447 GMX_RELEASE_ASSERT(deviceStreamManager.streamIsValid(gmx::DeviceStreamType::NonBondedLocal),
448 "Local non-bonded stream should be initialized to use GPU for non-bonded.");
449 nb->deviceStreams[InteractionLocality::Local] =
450 &deviceStreamManager.stream(gmx::DeviceStreamType::NonBondedLocal);
451 if (nb->bUseTwoStreams)
453 init_plist(nb->plist[InteractionLocality::NonLocal]);
455 /* Note that the device we're running on does not have to support
456 * priorities, because we are querying the priority range which in this
457 * case will be a single value.
459 GMX_RELEASE_ASSERT(deviceStreamManager.streamIsValid(gmx::DeviceStreamType::NonBondedNonLocal),
460 "Non-local non-bonded stream should be initialized to use GPU for "
461 "non-bonded with domain decomposition.");
462 nb->deviceStreams[InteractionLocality::NonLocal] =
463 &deviceStreamManager.stream(gmx::DeviceStreamType::NonBondedNonLocal);
467 /* init events for sychronization (timing disabled for performance reasons!) */
468 stat = cudaEventCreateWithFlags(&nb->nonlocal_done, cudaEventDisableTiming);
469 CU_RET_ERR(stat, "cudaEventCreate on nonlocal_done failed");
470 stat = cudaEventCreateWithFlags(&nb->misc_ops_and_local_H2D_done, cudaEventDisableTiming);
471 CU_RET_ERR(stat, "cudaEventCreate on misc_ops_and_local_H2D_done failed");
473 nb->xNonLocalCopyD2HDone = new GpuEventSynchronizer();
475 /* WARNING: CUDA timings are incorrect with multiple streams.
476 * This is the main reason why they are disabled by default.
478 // TODO: Consider turning on by default when we can detect nr of streams.
479 nb->bDoTime = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
483 init_timings(nb->timings);
486 /* set the kernel type for the current GPU */
487 /* pick L1 cache configuration */
488 cuda_set_cacheconfig();
490 cuda_init_const(nb, ic, listParams, nbat->params());
492 nb->atomIndicesSize = 0;
493 nb->atomIndicesSize_alloc = 0;
495 nb->ncxy_na_alloc = 0;
497 nb->ncxy_ind_alloc = 0;
503 fprintf(debug, "Initialized CUDA data structures.\n");
509 void gpu_init_pairlist(NbnxmGpu* nb, const NbnxnPairlistGpu* h_plist, const InteractionLocality iloc)
512 bool bDoTime = (nb->bDoTime && !h_plist->sci.empty());
513 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
514 cu_plist_t* d_plist = nb->plist[iloc];
516 if (d_plist->na_c < 0)
518 d_plist->na_c = h_plist->na_ci;
522 if (d_plist->na_c != h_plist->na_ci)
524 sprintf(sbuf, "In cu_init_plist: the #atoms per cell has changed (from %d to %d)",
525 d_plist->na_c, h_plist->na_ci);
530 gpu_timers_t::Interaction& iTimers = nb->timers->interaction[iloc];
534 iTimers.pl_h2d.openTimingRegion(deviceStream);
535 iTimers.didPairlistH2D = true;
538 const DeviceContext& deviceContext = *nb->deviceContext_;
540 reallocateDeviceBuffer(&d_plist->sci, h_plist->sci.size(), &d_plist->nsci, &d_plist->sci_nalloc,
542 copyToDeviceBuffer(&d_plist->sci, h_plist->sci.data(), 0, h_plist->sci.size(), deviceStream,
543 GpuApiCallBehavior::Async, bDoTime ? iTimers.pl_h2d.fetchNextEvent() : nullptr);
545 reallocateDeviceBuffer(&d_plist->cj4, h_plist->cj4.size(), &d_plist->ncj4, &d_plist->cj4_nalloc,
547 copyToDeviceBuffer(&d_plist->cj4, h_plist->cj4.data(), 0, h_plist->cj4.size(), deviceStream,
548 GpuApiCallBehavior::Async, bDoTime ? iTimers.pl_h2d.fetchNextEvent() : nullptr);
550 reallocateDeviceBuffer(&d_plist->imask, h_plist->cj4.size() * c_nbnxnGpuClusterpairSplit,
551 &d_plist->nimask, &d_plist->imask_nalloc, deviceContext);
553 reallocateDeviceBuffer(&d_plist->excl, h_plist->excl.size(), &d_plist->nexcl,
554 &d_plist->excl_nalloc, deviceContext);
555 copyToDeviceBuffer(&d_plist->excl, h_plist->excl.data(), 0, h_plist->excl.size(), deviceStream,
556 GpuApiCallBehavior::Async, bDoTime ? iTimers.pl_h2d.fetchNextEvent() : nullptr);
560 iTimers.pl_h2d.closeTimingRegion(deviceStream);
563 /* the next use of thist list we be the first one, so we need to prune */
564 d_plist->haveFreshList = true;
567 void gpu_upload_shiftvec(NbnxmGpu* nb, const nbnxn_atomdata_t* nbatom)
569 cu_atomdata_t* adat = nb->atdat;
570 cudaStream_t ls = nb->deviceStreams[InteractionLocality::Local]->stream();
572 /* only if we have a dynamic box */
573 if (nbatom->bDynamicBox || !adat->bShiftVecUploaded)
575 cu_copy_H2D_async(adat->shift_vec, nbatom->shift_vec.data(), SHIFTS * sizeof(*adat->shift_vec), ls);
576 adat->bShiftVecUploaded = true;
580 /*! Clears the first natoms_clear elements of the GPU nonbonded force output array. */
581 static void nbnxn_cuda_clear_f(NbnxmGpu* nb, int natoms_clear)
584 cu_atomdata_t* adat = nb->atdat;
585 cudaStream_t ls = nb->deviceStreams[InteractionLocality::Local]->stream();
587 stat = cudaMemsetAsync(adat->f, 0, natoms_clear * sizeof(*adat->f), ls);
588 CU_RET_ERR(stat, "cudaMemsetAsync on f falied");
591 /*! Clears nonbonded shift force output array and energy outputs on the GPU. */
592 static void nbnxn_cuda_clear_e_fshift(NbnxmGpu* nb)
595 cu_atomdata_t* adat = nb->atdat;
596 cudaStream_t ls = nb->deviceStreams[InteractionLocality::Local]->stream();
598 stat = cudaMemsetAsync(adat->fshift, 0, SHIFTS * sizeof(*adat->fshift), ls);
599 CU_RET_ERR(stat, "cudaMemsetAsync on fshift falied");
600 stat = cudaMemsetAsync(adat->e_lj, 0, sizeof(*adat->e_lj), ls);
601 CU_RET_ERR(stat, "cudaMemsetAsync on e_lj falied");
602 stat = cudaMemsetAsync(adat->e_el, 0, sizeof(*adat->e_el), ls);
603 CU_RET_ERR(stat, "cudaMemsetAsync on e_el falied");
606 void gpu_clear_outputs(NbnxmGpu* nb, bool computeVirial)
608 nbnxn_cuda_clear_f(nb, nb->atdat->natoms);
609 /* clear shift force array and energies if the outputs were
610 used in the current step */
613 nbnxn_cuda_clear_e_fshift(nb);
617 void gpu_init_atomdata(NbnxmGpu* nb, const nbnxn_atomdata_t* nbat)
622 bool bDoTime = nb->bDoTime;
623 cu_timers_t* timers = nb->timers;
624 cu_atomdata_t* d_atdat = nb->atdat;
625 const DeviceStream& deviceStream = *nb->deviceStreams[InteractionLocality::Local];
627 natoms = nbat->numAtoms();
632 /* time async copy */
633 timers->atdat.openTimingRegion(deviceStream);
636 /* need to reallocate if we have to copy more atoms than the amount of space
637 available and only allocate if we haven't initialized yet, i.e d_atdat->natoms == -1 */
638 if (natoms > d_atdat->nalloc)
640 nalloc = over_alloc_small(natoms);
642 /* free up first if the arrays have already been initialized */
643 if (d_atdat->nalloc != -1)
645 freeDeviceBuffer(&d_atdat->f);
646 freeDeviceBuffer(&d_atdat->xq);
647 freeDeviceBuffer(&d_atdat->atom_types);
648 freeDeviceBuffer(&d_atdat->lj_comb);
651 stat = cudaMalloc((void**)&d_atdat->f, nalloc * sizeof(*d_atdat->f));
652 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->f");
653 stat = cudaMalloc((void**)&d_atdat->xq, nalloc * sizeof(*d_atdat->xq));
654 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->xq");
655 if (useLjCombRule(nb->nbparam))
657 stat = cudaMalloc((void**)&d_atdat->lj_comb, nalloc * sizeof(*d_atdat->lj_comb));
658 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->lj_comb");
662 stat = cudaMalloc((void**)&d_atdat->atom_types, nalloc * sizeof(*d_atdat->atom_types));
663 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->atom_types");
666 d_atdat->nalloc = nalloc;
670 d_atdat->natoms = natoms;
671 d_atdat->natoms_local = nbat->natoms_local;
673 /* need to clear GPU f output if realloc happened */
676 nbnxn_cuda_clear_f(nb, nalloc);
679 if (useLjCombRule(nb->nbparam))
681 cu_copy_H2D_async(d_atdat->lj_comb, nbat->params().lj_comb.data(),
682 natoms * sizeof(*d_atdat->lj_comb), deviceStream.stream());
686 cu_copy_H2D_async(d_atdat->atom_types, nbat->params().type.data(),
687 natoms * sizeof(*d_atdat->atom_types), deviceStream.stream());
692 timers->atdat.closeTimingRegion(deviceStream);
696 static void nbnxn_cuda_free_nbparam_table(cu_nbparam_t* nbparam)
698 if (nbparam->eeltype == eelCuEWALD_TAB || nbparam->eeltype == eelCuEWALD_TAB_TWIN)
700 destroyParamLookupTable(nbparam->coulomb_tab, nbparam->coulomb_tab_texobj);
704 void gpu_free(NbnxmGpu* nb)
707 cu_atomdata_t* atdat;
708 cu_nbparam_t* nbparam;
716 nbparam = nb->nbparam;
718 nbnxn_cuda_free_nbparam_table(nbparam);
720 stat = cudaEventDestroy(nb->nonlocal_done);
721 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->nonlocal_done");
722 stat = cudaEventDestroy(nb->misc_ops_and_local_H2D_done);
723 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->misc_ops_and_local_H2D_done");
727 if (!useLjCombRule(nb->nbparam))
729 destroyParamLookupTable(nbparam->nbfp, nbparam->nbfp_texobj);
732 if (nbparam->vdwtype == evdwCuEWALDGEOM || nbparam->vdwtype == evdwCuEWALDLB)
734 destroyParamLookupTable(nbparam->nbfp_comb, nbparam->nbfp_comb_texobj);
737 stat = cudaFree(atdat->shift_vec);
738 CU_RET_ERR(stat, "cudaFree failed on atdat->shift_vec");
739 stat = cudaFree(atdat->fshift);
740 CU_RET_ERR(stat, "cudaFree failed on atdat->fshift");
742 stat = cudaFree(atdat->e_lj);
743 CU_RET_ERR(stat, "cudaFree failed on atdat->e_lj");
744 stat = cudaFree(atdat->e_el);
745 CU_RET_ERR(stat, "cudaFree failed on atdat->e_el");
747 freeDeviceBuffer(&atdat->f);
748 freeDeviceBuffer(&atdat->xq);
749 freeDeviceBuffer(&atdat->atom_types);
750 freeDeviceBuffer(&atdat->lj_comb);
753 auto* plist = nb->plist[InteractionLocality::Local];
754 freeDeviceBuffer(&plist->sci);
755 freeDeviceBuffer(&plist->cj4);
756 freeDeviceBuffer(&plist->imask);
757 freeDeviceBuffer(&plist->excl);
759 if (nb->bUseTwoStreams)
761 auto* plist_nl = nb->plist[InteractionLocality::NonLocal];
762 freeDeviceBuffer(&plist_nl->sci);
763 freeDeviceBuffer(&plist_nl->cj4);
764 freeDeviceBuffer(&plist_nl->imask);
765 freeDeviceBuffer(&plist_nl->excl);
770 pfree(nb->nbst.e_lj);
771 nb->nbst.e_lj = nullptr;
773 pfree(nb->nbst.e_el);
774 nb->nbst.e_el = nullptr;
776 pfree(nb->nbst.fshift);
777 nb->nbst.fshift = nullptr;
786 fprintf(debug, "Cleaned up CUDA data structures.\n");
790 //! This function is documented in the header file
791 gmx_wallclock_gpu_nbnxn_t* gpu_get_timings(NbnxmGpu* nb)
793 return (nb != nullptr && nb->bDoTime) ? nb->timings : nullptr;
796 void gpu_reset_timings(nonbonded_verlet_t* nbv)
798 if (nbv->gpu_nbv && nbv->gpu_nbv->bDoTime)
800 init_timings(nbv->gpu_nbv->timings);
804 int gpu_min_ci_balanced(NbnxmGpu* nb)
806 return nb != nullptr ? gpu_min_ci_balanced_factor * nb->deviceContext_->deviceInfo().prop.multiProcessorCount
810 gmx_bool gpu_is_kernel_ewald_analytical(const NbnxmGpu* nb)
812 return ((nb->nbparam->eeltype == eelCuEWALD_ANA) || (nb->nbparam->eeltype == eelCuEWALD_ANA_TWIN));
815 void* gpu_get_xq(NbnxmGpu* nb)
819 return static_cast<void*>(nb->atdat->xq);
822 DeviceBuffer<gmx::RVec> gpu_get_f(NbnxmGpu* nb)
826 return reinterpret_cast<DeviceBuffer<gmx::RVec>>(nb->atdat->f);
829 DeviceBuffer<gmx::RVec> gpu_get_fshift(NbnxmGpu* nb)
833 return reinterpret_cast<DeviceBuffer<gmx::RVec>>(nb->atdat->fshift);
836 /* Initialization for X buffer operations on GPU. */
837 /* TODO Remove explicit pinning from host arrays from here and manage in a more natural way*/
838 void nbnxn_gpu_init_x_to_nbat_x(const Nbnxm::GridSet& gridSet, NbnxmGpu* gpu_nbv)
840 const DeviceStream& deviceStream = *gpu_nbv->deviceStreams[InteractionLocality::Local];
841 bool bDoTime = gpu_nbv->bDoTime;
842 const int maxNumColumns = gridSet.numColumnsMax();
844 reallocateDeviceBuffer(&gpu_nbv->cxy_na, maxNumColumns * gridSet.grids().size(),
845 &gpu_nbv->ncxy_na, &gpu_nbv->ncxy_na_alloc, *gpu_nbv->deviceContext_);
846 reallocateDeviceBuffer(&gpu_nbv->cxy_ind, maxNumColumns * gridSet.grids().size(),
847 &gpu_nbv->ncxy_ind, &gpu_nbv->ncxy_ind_alloc, *gpu_nbv->deviceContext_);
849 for (unsigned int g = 0; g < gridSet.grids().size(); g++)
852 const Nbnxm::Grid& grid = gridSet.grids()[g];
854 const int numColumns = grid.numColumns();
855 const int* atomIndices = gridSet.atomIndices().data();
856 const int atomIndicesSize = gridSet.atomIndices().size();
857 const int* cxy_na = grid.cxy_na().data();
858 const int* cxy_ind = grid.cxy_ind().data();
860 reallocateDeviceBuffer(&gpu_nbv->atomIndices, atomIndicesSize, &gpu_nbv->atomIndicesSize,
861 &gpu_nbv->atomIndicesSize_alloc, *gpu_nbv->deviceContext_);
863 if (atomIndicesSize > 0)
868 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(deviceStream);
871 copyToDeviceBuffer(&gpu_nbv->atomIndices, atomIndices, 0, atomIndicesSize, deviceStream,
872 GpuApiCallBehavior::Async, nullptr);
876 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(deviceStream);
884 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(deviceStream);
887 int* destPtr = &gpu_nbv->cxy_na[maxNumColumns * g];
888 copyToDeviceBuffer(&destPtr, cxy_na, 0, numColumns, deviceStream,
889 GpuApiCallBehavior::Async, nullptr);
893 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(deviceStream);
898 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(deviceStream);
901 destPtr = &gpu_nbv->cxy_ind[maxNumColumns * g];
902 copyToDeviceBuffer(&destPtr, cxy_ind, 0, numColumns, deviceStream,
903 GpuApiCallBehavior::Async, nullptr);
907 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(deviceStream);
912 // The above data is transferred on the local stream but is a
913 // dependency of the nonlocal stream (specifically the nonlocal X
914 // buf ops kernel). We therefore set a dependency to ensure
915 // that the nonlocal stream waits on the local stream here.
916 // This call records an event in the local stream:
917 nbnxnInsertNonlocalGpuDependency(gpu_nbv, Nbnxm::InteractionLocality::Local);
918 // ...and this call instructs the nonlocal stream to wait on that event:
919 nbnxnInsertNonlocalGpuDependency(gpu_nbv, Nbnxm::InteractionLocality::NonLocal);
924 /* Initialization for F buffer operations on GPU. */
925 void nbnxn_gpu_init_add_nbat_f_to_f(const int* cell,
928 GpuEventSynchronizer* const localReductionDone)
931 const DeviceStream& deviceStream = *gpu_nbv->deviceStreams[InteractionLocality::Local];
933 GMX_ASSERT(localReductionDone, "localReductionDone should be a valid pointer");
934 gpu_nbv->localFReductionDone = localReductionDone;
936 if (natoms_total > 0)
938 reallocateDeviceBuffer(&gpu_nbv->cell, natoms_total, &gpu_nbv->ncell, &gpu_nbv->ncell_alloc,
939 *gpu_nbv->deviceContext_);
940 copyToDeviceBuffer(&gpu_nbv->cell, cell, 0, natoms_total, deviceStream,
941 GpuApiCallBehavior::Async, nullptr);