2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define CUDA implementation of nbnxn_gpu_data_mgmt.h
38 * \author Szilard Pall <pall.szilard@gmail.com>
47 #include "gromacs/gpu_utils/cudautils.cuh"
48 #include "gromacs/gpu_utils/gpu_utils.h"
49 #include "gromacs/gpu_utils/pmalloc_cuda.h"
50 #include "gromacs/hardware/gpu_hw_info.h"
51 #include "gromacs/math/vectypes.h"
52 #include "gromacs/mdlib/force_flags.h"
53 #include "gromacs/mdlib/nb_verlet.h"
54 #include "gromacs/mdlib/nbnxn_consts.h"
55 #include "gromacs/mdlib/nbnxn_gpu_common_utils.h"
56 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
57 #include "gromacs/mdtypes/interaction_const.h"
58 #include "gromacs/mdtypes/md_enums.h"
59 #include "gromacs/pbcutil/ishift.h"
60 #include "gromacs/timing/gpu_timing.h"
61 #include "gromacs/utility/basedefinitions.h"
62 #include "gromacs/utility/cstringutil.h"
63 #include "gromacs/utility/fatalerror.h"
64 #include "gromacs/utility/real.h"
65 #include "gromacs/utility/smalloc.h"
67 #include "nbnxn_cuda.h"
68 #include "nbnxn_cuda_types.h"
70 /* This is a heuristically determined parameter for the Fermi, Kepler
71 * and Maxwell architectures for the minimum size of ci lists by multiplying
72 * this constant with the # of multiprocessors on the current device.
73 * Since the maximum number of blocks per multiprocessor is 16, the ideal
74 * count for small systems is 32 or 48 blocks per multiprocessor. Because
75 * there is a bit of fluctuations in the generated block counts, we use
76 * a target of 44 instead of the ideal value of 48.
78 static unsigned int gpu_min_ci_balanced_factor = 44;
81 static void nbnxn_cuda_clear_e_fshift(gmx_nbnxn_cuda_t *nb);
84 static void nbnxn_cuda_free_nbparam_table(cu_nbparam_t *nbparam,
85 const gmx_device_info_t *dev_info);
87 /*! \brief Return whether combination rules are used.
89 * \param[in] pointer to nonbonded paramter struct
90 * \return true if combination rules are used in this run, false otherwise
92 static inline bool useLjCombRule(const cu_nbparam_t *nbparam)
94 return (nbparam->vdwtype == evdwCuCUTCOMBGEOM ||
95 nbparam->vdwtype == evdwCuCUTCOMBLB);
98 /*! \brief Initialized the Ewald Coulomb correction GPU table.
100 Tabulates the Ewald Coulomb force and initializes the size/scale
101 and the table GPU array. If called with an already allocated table,
102 it just re-uploads the table.
104 static void init_ewald_coulomb_force_table(const interaction_const_t *ic,
106 const gmx_device_info_t *dev_info)
108 if (nbp->coulomb_tab != NULL)
110 nbnxn_cuda_free_nbparam_table(nbp, dev_info);
113 nbp->coulomb_tab_scale = ic->tabq_scale;
114 initParamLookupTable(nbp->coulomb_tab, nbp->coulomb_tab_texobj,
115 &nbnxn_cuda_get_coulomb_tab_texref(),
116 ic->tabq_coul_F, ic->tabq_size, dev_info);
120 /*! Initializes the atomdata structure first time, it only gets filled at
122 static void init_atomdata_first(cu_atomdata_t *ad, int ntypes)
127 stat = cudaMalloc((void**)&ad->shift_vec, SHIFTS*sizeof(*ad->shift_vec));
128 CU_RET_ERR(stat, "cudaMalloc failed on ad->shift_vec");
129 ad->bShiftVecUploaded = false;
131 stat = cudaMalloc((void**)&ad->fshift, SHIFTS*sizeof(*ad->fshift));
132 CU_RET_ERR(stat, "cudaMalloc failed on ad->fshift");
134 stat = cudaMalloc((void**)&ad->e_lj, sizeof(*ad->e_lj));
135 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_lj");
136 stat = cudaMalloc((void**)&ad->e_el, sizeof(*ad->e_el));
137 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_el");
139 /* initialize to NULL poiters to data that is not allocated here and will
140 need reallocation in nbnxn_cuda_init_atomdata */
144 /* size -1 indicates that the respective array hasn't been initialized yet */
149 /*! Selects the Ewald kernel type, analytical on SM 3.0 and later, tabulated on
150 earlier GPUs, single or twin cut-off. */
151 static int pick_ewald_kernel_type(bool bTwinCut,
152 const gmx_device_info_t *dev_info)
154 bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
157 /* Benchmarking/development environment variables to force the use of
158 analytical or tabulated Ewald kernel. */
159 bForceAnalyticalEwald = (getenv("GMX_CUDA_NB_ANA_EWALD") != NULL);
160 bForceTabulatedEwald = (getenv("GMX_CUDA_NB_TAB_EWALD") != NULL);
162 if (bForceAnalyticalEwald && bForceTabulatedEwald)
164 gmx_incons("Both analytical and tabulated Ewald CUDA non-bonded kernels "
165 "requested through environment variables.");
168 /* By default, on SM 3.0 and later use analytical Ewald, on earlier tabulated. */
169 if ((dev_info->prop.major >= 3 || bForceAnalyticalEwald) && !bForceTabulatedEwald)
171 bUseAnalyticalEwald = true;
175 fprintf(debug, "Using analytical Ewald CUDA kernels\n");
180 bUseAnalyticalEwald = false;
184 fprintf(debug, "Using tabulated Ewald CUDA kernels\n");
188 /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
189 forces it (use it for debugging/benchmarking only). */
190 if (!bTwinCut && (getenv("GMX_CUDA_NB_EWALD_TWINCUT") == NULL))
192 kernel_type = bUseAnalyticalEwald ? eelCuEWALD_ANA : eelCuEWALD_TAB;
196 kernel_type = bUseAnalyticalEwald ? eelCuEWALD_ANA_TWIN : eelCuEWALD_TAB_TWIN;
202 /*! Copies all parameters related to the cut-off from ic to nbp */
203 static void set_cutoff_parameters(cu_nbparam_t *nbp,
204 const interaction_const_t *ic,
205 const NbnxnListParameters *listParams)
207 nbp->ewald_beta = ic->ewaldcoeff_q;
208 nbp->sh_ewald = ic->sh_ewald;
209 nbp->epsfac = ic->epsfac;
210 nbp->two_k_rf = 2.0 * ic->k_rf;
211 nbp->c_rf = ic->c_rf;
212 nbp->rvdw_sq = ic->rvdw * ic->rvdw;
213 nbp->rcoulomb_sq = ic->rcoulomb * ic->rcoulomb;
214 nbp->rlistOuter_sq = listParams->rlistOuter * listParams->rlistOuter;
215 nbp->rlistInner_sq = listParams->rlistInner * listParams->rlistInner;
216 nbp->useDynamicPruning = listParams->useDynamicPruning;
218 nbp->sh_lj_ewald = ic->sh_lj_ewald;
219 nbp->ewaldcoeff_lj = ic->ewaldcoeff_lj;
221 nbp->rvdw_switch = ic->rvdw_switch;
222 nbp->dispersion_shift = ic->dispersion_shift;
223 nbp->repulsion_shift = ic->repulsion_shift;
224 nbp->vdw_switch = ic->vdw_switch;
227 /*! Initializes the nonbonded parameter data structure. */
228 static void init_nbparam(cu_nbparam_t *nbp,
229 const interaction_const_t *ic,
230 const NbnxnListParameters *listParams,
231 const nbnxn_atomdata_t *nbat,
232 const gmx_device_info_t *dev_info)
236 ntypes = nbat->ntype;
238 set_cutoff_parameters(nbp, ic, listParams);
240 /* The kernel code supports LJ combination rules (geometric and LB) for
241 * all kernel types, but we only generate useful combination rule kernels.
242 * We currently only use LJ combination rule (geometric and LB) kernels
243 * for plain cut-off LJ. On Maxwell the force only kernels speed up 15%
244 * with PME and 20% with RF, the other kernels speed up about half as much.
245 * For LJ force-switch the geometric rule would give 7% speed-up, but this
246 * combination is rarely used. LJ force-switch with LB rule is more common,
247 * but gives only 1% speed-up.
249 if (ic->vdwtype == evdwCUT)
251 switch (ic->vdw_modifier)
254 case eintmodPOTSHIFT:
255 switch (nbat->comb_rule)
258 nbp->vdwtype = evdwCuCUT;
261 nbp->vdwtype = evdwCuCUTCOMBGEOM;
264 nbp->vdwtype = evdwCuCUTCOMBLB;
267 gmx_incons("The requested LJ combination rule is not implemented in the CUDA GPU accelerated kernels!");
271 case eintmodFORCESWITCH:
272 nbp->vdwtype = evdwCuFSWITCH;
274 case eintmodPOTSWITCH:
275 nbp->vdwtype = evdwCuPSWITCH;
278 gmx_incons("The requested VdW interaction modifier is not implemented in the CUDA GPU accelerated kernels!");
282 else if (ic->vdwtype == evdwPME)
284 if (ic->ljpme_comb_rule == ljcrGEOM)
286 assert(nbat->comb_rule == ljcrGEOM);
287 nbp->vdwtype = evdwCuEWALDGEOM;
291 assert(nbat->comb_rule == ljcrLB);
292 nbp->vdwtype = evdwCuEWALDLB;
297 gmx_incons("The requested VdW type is not implemented in the CUDA GPU accelerated kernels!");
300 if (ic->eeltype == eelCUT)
302 nbp->eeltype = eelCuCUT;
304 else if (EEL_RF(ic->eeltype))
306 nbp->eeltype = eelCuRF;
308 else if ((EEL_PME(ic->eeltype) || ic->eeltype == eelEWALD))
310 /* Initially rcoulomb == rvdw, so it's surely not twin cut-off. */
311 nbp->eeltype = pick_ewald_kernel_type(false, dev_info);
315 /* Shouldn't happen, as this is checked when choosing Verlet-scheme */
316 gmx_incons("The requested electrostatics type is not implemented in the CUDA GPU accelerated kernels!");
319 /* generate table for PME */
320 nbp->coulomb_tab = NULL;
321 if (nbp->eeltype == eelCuEWALD_TAB || nbp->eeltype == eelCuEWALD_TAB_TWIN)
323 init_ewald_coulomb_force_table(ic, nbp, dev_info);
326 /* set up LJ parameter lookup table */
327 if (!useLjCombRule(nbp))
329 initParamLookupTable(nbp->nbfp, nbp->nbfp_texobj,
330 &nbnxn_cuda_get_nbfp_texref(),
331 nbat->nbfp, 2*ntypes*ntypes, dev_info);
334 /* set up LJ-PME parameter lookup table */
335 if (ic->vdwtype == evdwPME)
337 initParamLookupTable(nbp->nbfp_comb, nbp->nbfp_comb_texobj,
338 &nbnxn_cuda_get_nbfp_comb_texref(),
339 nbat->nbfp_comb, 2*ntypes, dev_info);
343 /*! Re-generate the GPU Ewald force table, resets rlist, and update the
344 * electrostatic type switching to twin cut-off (or back) if needed. */
345 void nbnxn_gpu_pme_loadbal_update_param(const nonbonded_verlet_t *nbv,
346 const interaction_const_t *ic,
347 const NbnxnListParameters *listParams)
349 if (!nbv || nbv->grp[0].kernel_type != nbnxnk8x8x8_GPU)
353 gmx_nbnxn_cuda_t *nb = nbv->gpu_nbv;
354 cu_nbparam_t *nbp = nb->nbparam;
356 set_cutoff_parameters(nbp, ic, listParams);
358 nbp->eeltype = pick_ewald_kernel_type(ic->rcoulomb != ic->rvdw,
361 init_ewald_coulomb_force_table(ic, nb->nbparam, nb->dev_info);
364 /*! Initializes the pair list data structure. */
365 static void init_plist(cu_plist_t *pl)
367 /* initialize to NULL pointers to data that is not allocated here and will
368 need reallocation in nbnxn_gpu_init_pairlist */
374 /* size -1 indicates that the respective array hasn't been initialized yet */
381 pl->imask_nalloc = -1;
383 pl->excl_nalloc = -1;
384 pl->haveFreshList = false;
387 /*! Initializes the timer data structure. */
388 static void init_timers(cu_timers_t *t, bool bUseTwoStreams)
390 /* The non-local counters/stream (second in the array) are needed only with DD. */
391 for (int i = 0; i <= (bUseTwoStreams ? 1 : 0); i++)
393 t->didPairlistH2D[i] = false;
394 t->didPrune[i] = false;
395 t->didRollingPrune[i] = false;
399 /*! Initializes the timings data structure. */
400 static void init_timings(gmx_wallclock_gpu_nbnxn_t *t)
409 for (i = 0; i < 2; i++)
411 for (j = 0; j < 2; j++)
413 t->ktime[i][j].t = 0.0;
414 t->ktime[i][j].c = 0;
418 t->pruneTime.t = 0.0;
419 t->dynamicPruneTime.c = 0;
420 t->dynamicPruneTime.t = 0.0;
423 /*! Initializes simulation constant data. */
424 static void nbnxn_cuda_init_const(gmx_nbnxn_cuda_t *nb,
425 const interaction_const_t *ic,
426 const NbnxnListParameters *listParams,
427 const nbnxn_atomdata_t *nbat)
429 init_atomdata_first(nb->atdat, nbat->ntype);
430 init_nbparam(nb->nbparam, ic, listParams, nbat, nb->dev_info);
432 /* clear energy and shift force outputs */
433 nbnxn_cuda_clear_e_fshift(nb);
436 void nbnxn_gpu_init(gmx_nbnxn_cuda_t **p_nb,
437 const gmx_device_info_t *deviceInfo,
438 const interaction_const_t *ic,
439 const NbnxnListParameters *listParams,
440 const nbnxn_atomdata_t *nbat,
442 gmx_bool bLocalAndNonlocal)
445 gmx_nbnxn_cuda_t *nb;
454 snew(nb->nbparam, 1);
455 snew(nb->plist[eintLocal], 1);
456 if (bLocalAndNonlocal)
458 snew(nb->plist[eintNonlocal], 1);
461 nb->bUseTwoStreams = bLocalAndNonlocal;
463 nb->timers = new cu_timers_t();
464 snew(nb->timings, 1);
467 pmalloc((void**)&nb->nbst.e_lj, sizeof(*nb->nbst.e_lj));
468 pmalloc((void**)&nb->nbst.e_el, sizeof(*nb->nbst.e_el));
469 pmalloc((void**)&nb->nbst.fshift, SHIFTS * sizeof(*nb->nbst.fshift));
471 init_plist(nb->plist[eintLocal]);
473 /* set device info, just point it to the right GPU among the detected ones */
474 nb->dev_info = deviceInfo;
476 /* local/non-local GPU streams */
477 stat = cudaStreamCreate(&nb->stream[eintLocal]);
478 CU_RET_ERR(stat, "cudaStreamCreate on stream[eintLocal] failed");
479 if (nb->bUseTwoStreams)
481 init_plist(nb->plist[eintNonlocal]);
483 /* Note that the device we're running on does not have to support
484 * priorities, because we are querying the priority range which in this
485 * case will be a single value.
487 int highest_priority;
488 stat = cudaDeviceGetStreamPriorityRange(NULL, &highest_priority);
489 CU_RET_ERR(stat, "cudaDeviceGetStreamPriorityRange failed");
491 stat = cudaStreamCreateWithPriority(&nb->stream[eintNonlocal],
494 CU_RET_ERR(stat, "cudaStreamCreateWithPriority on stream[eintNonlocal] failed");
497 /* init events for sychronization (timing disabled for performance reasons!) */
498 stat = cudaEventCreateWithFlags(&nb->nonlocal_done, cudaEventDisableTiming);
499 CU_RET_ERR(stat, "cudaEventCreate on nonlocal_done failed");
500 stat = cudaEventCreateWithFlags(&nb->misc_ops_and_local_H2D_done, cudaEventDisableTiming);
501 CU_RET_ERR(stat, "cudaEventCreate on misc_ops_and_local_H2D_done failed");
503 /* WARNING: CUDA timings are incorrect with multiple streams.
504 * This is the main reason why they are disabled by default.
506 // TODO: Consider turning on by default when we can detect nr of streams.
507 nb->bDoTime = (getenv("GMX_ENABLE_GPU_TIMING") != NULL);
511 init_timers(nb->timers, nb->bUseTwoStreams);
512 init_timings(nb->timings);
515 /* set the kernel type for the current GPU */
516 /* pick L1 cache configuration */
517 nbnxn_cuda_set_cacheconfig(nb->dev_info);
519 nbnxn_cuda_init_const(nb, ic, listParams, nbat);
525 fprintf(debug, "Initialized CUDA data structures.\n");
529 void nbnxn_gpu_init_pairlist(gmx_nbnxn_cuda_t *nb,
530 const nbnxn_pairlist_t *h_plist,
533 if (canSkipWork(nb, iloc))
539 bool bDoTime = nb->bDoTime;
540 cudaStream_t stream = nb->stream[iloc];
541 cu_plist_t *d_plist = nb->plist[iloc];
543 if (d_plist->na_c < 0)
545 d_plist->na_c = h_plist->na_ci;
549 if (d_plist->na_c != h_plist->na_ci)
551 sprintf(sbuf, "In cu_init_plist: the #atoms per cell has changed (from %d to %d)",
552 d_plist->na_c, h_plist->na_ci);
559 nb->timers->pl_h2d[iloc].openTimingRegion(stream);
560 nb->timers->didPairlistH2D[iloc] = true;
563 cu_realloc_buffered((void **)&d_plist->sci, h_plist->sci, sizeof(*d_plist->sci),
564 &d_plist->nsci, &d_plist->sci_nalloc,
568 cu_realloc_buffered((void **)&d_plist->cj4, h_plist->cj4, sizeof(*d_plist->cj4),
569 &d_plist->ncj4, &d_plist->cj4_nalloc,
573 /* this call only allocates space on the device (no data is transferred) */
574 cu_realloc_buffered((void **)&d_plist->imask, NULL, sizeof(*d_plist->imask),
575 &d_plist->nimask, &d_plist->imask_nalloc,
576 h_plist->ncj4*c_nbnxnGpuClusterpairSplit,
579 cu_realloc_buffered((void **)&d_plist->excl, h_plist->excl, sizeof(*d_plist->excl),
580 &d_plist->nexcl, &d_plist->excl_nalloc,
586 nb->timers->pl_h2d[iloc].closeTimingRegion(stream);
589 /* the next use of thist list we be the first one, so we need to prune */
590 d_plist->haveFreshList = true;
593 void nbnxn_gpu_upload_shiftvec(gmx_nbnxn_cuda_t *nb,
594 const nbnxn_atomdata_t *nbatom)
596 cu_atomdata_t *adat = nb->atdat;
597 cudaStream_t ls = nb->stream[eintLocal];
599 /* only if we have a dynamic box */
600 if (nbatom->bDynamicBox || !adat->bShiftVecUploaded)
602 cu_copy_H2D_async(adat->shift_vec, nbatom->shift_vec,
603 SHIFTS * sizeof(*adat->shift_vec), ls);
604 adat->bShiftVecUploaded = true;
608 /*! Clears the first natoms_clear elements of the GPU nonbonded force output array. */
609 static void nbnxn_cuda_clear_f(gmx_nbnxn_cuda_t *nb, int natoms_clear)
612 cu_atomdata_t *adat = nb->atdat;
613 cudaStream_t ls = nb->stream[eintLocal];
615 stat = cudaMemsetAsync(adat->f, 0, natoms_clear * sizeof(*adat->f), ls);
616 CU_RET_ERR(stat, "cudaMemsetAsync on f falied");
619 /*! Clears nonbonded shift force output array and energy outputs on the GPU. */
620 static void nbnxn_cuda_clear_e_fshift(gmx_nbnxn_cuda_t *nb)
623 cu_atomdata_t *adat = nb->atdat;
624 cudaStream_t ls = nb->stream[eintLocal];
626 stat = cudaMemsetAsync(adat->fshift, 0, SHIFTS * sizeof(*adat->fshift), ls);
627 CU_RET_ERR(stat, "cudaMemsetAsync on fshift falied");
628 stat = cudaMemsetAsync(adat->e_lj, 0, sizeof(*adat->e_lj), ls);
629 CU_RET_ERR(stat, "cudaMemsetAsync on e_lj falied");
630 stat = cudaMemsetAsync(adat->e_el, 0, sizeof(*adat->e_el), ls);
631 CU_RET_ERR(stat, "cudaMemsetAsync on e_el falied");
634 void nbnxn_gpu_clear_outputs(gmx_nbnxn_cuda_t *nb, int flags)
636 nbnxn_cuda_clear_f(nb, nb->atdat->natoms);
637 /* clear shift force array and energies if the outputs were
638 used in the current step */
639 if (flags & GMX_FORCE_VIRIAL)
641 nbnxn_cuda_clear_e_fshift(nb);
645 void nbnxn_gpu_init_atomdata(gmx_nbnxn_cuda_t *nb,
646 const nbnxn_atomdata_t *nbat)
651 bool bDoTime = nb->bDoTime;
652 cu_timers_t *timers = nb->timers;
653 cu_atomdata_t *d_atdat = nb->atdat;
654 cudaStream_t ls = nb->stream[eintLocal];
656 natoms = nbat->natoms;
661 /* time async copy */
662 timers->atdat.openTimingRegion(ls);
665 /* need to reallocate if we have to copy more atoms than the amount of space
666 available and only allocate if we haven't initialized yet, i.e d_atdat->natoms == -1 */
667 if (natoms > d_atdat->nalloc)
669 nalloc = over_alloc_small(natoms);
671 /* free up first if the arrays have already been initialized */
672 if (d_atdat->nalloc != -1)
674 cu_free_buffered(d_atdat->f, &d_atdat->natoms, &d_atdat->nalloc);
675 cu_free_buffered(d_atdat->xq);
676 cu_free_buffered(d_atdat->atom_types);
677 cu_free_buffered(d_atdat->lj_comb);
680 stat = cudaMalloc((void **)&d_atdat->f, nalloc*sizeof(*d_atdat->f));
681 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->f");
682 stat = cudaMalloc((void **)&d_atdat->xq, nalloc*sizeof(*d_atdat->xq));
683 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->xq");
684 if (useLjCombRule(nb->nbparam))
686 stat = cudaMalloc((void **)&d_atdat->lj_comb, nalloc*sizeof(*d_atdat->lj_comb));
687 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->lj_comb");
691 stat = cudaMalloc((void **)&d_atdat->atom_types, nalloc*sizeof(*d_atdat->atom_types));
692 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->atom_types");
695 d_atdat->nalloc = nalloc;
699 d_atdat->natoms = natoms;
700 d_atdat->natoms_local = nbat->natoms_local;
702 /* need to clear GPU f output if realloc happened */
705 nbnxn_cuda_clear_f(nb, nalloc);
708 if (useLjCombRule(nb->nbparam))
710 cu_copy_H2D_async(d_atdat->lj_comb, nbat->lj_comb,
711 natoms*sizeof(*d_atdat->lj_comb), ls);
715 cu_copy_H2D_async(d_atdat->atom_types, nbat->type,
716 natoms*sizeof(*d_atdat->atom_types), ls);
721 timers->atdat.closeTimingRegion(ls);
725 static void nbnxn_cuda_free_nbparam_table(cu_nbparam_t *nbparam,
726 const gmx_device_info_t *dev_info)
728 if (nbparam->eeltype == eelCuEWALD_TAB || nbparam->eeltype == eelCuEWALD_TAB_TWIN)
730 destroyParamLookupTable(nbparam->coulomb_tab, nbparam->coulomb_tab_texobj,
731 &nbnxn_cuda_get_coulomb_tab_texref(), dev_info);
735 void nbnxn_gpu_free(gmx_nbnxn_cuda_t *nb)
738 cu_atomdata_t *atdat;
739 cu_nbparam_t *nbparam;
740 cu_plist_t *plist, *plist_nl;
748 nbparam = nb->nbparam;
749 plist = nb->plist[eintLocal];
750 plist_nl = nb->plist[eintNonlocal];
752 nbnxn_cuda_free_nbparam_table(nbparam, nb->dev_info);
754 stat = cudaEventDestroy(nb->nonlocal_done);
755 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->nonlocal_done");
756 stat = cudaEventDestroy(nb->misc_ops_and_local_H2D_done);
757 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->misc_ops_and_local_H2D_done");
762 /* The non-local counters/stream (second in the array) are needed only with DD. */
763 for (int i = 0; i <= (nb->bUseTwoStreams ? 1 : 0); i++)
765 stat = cudaStreamDestroy(nb->stream[i]);
766 CU_RET_ERR(stat, "cudaStreamDestroy failed on stream");
770 if (!useLjCombRule(nb->nbparam))
772 destroyParamLookupTable(nbparam->nbfp, nbparam->nbfp_texobj,
773 &nbnxn_cuda_get_nbfp_texref(), nb->dev_info);
777 if (nbparam->vdwtype == evdwCuEWALDGEOM || nbparam->vdwtype == evdwCuEWALDLB)
779 destroyParamLookupTable(nbparam->nbfp_comb, nbparam->nbfp_comb_texobj,
780 &nbnxn_cuda_get_nbfp_comb_texref(), nb->dev_info);
783 stat = cudaFree(atdat->shift_vec);
784 CU_RET_ERR(stat, "cudaFree failed on atdat->shift_vec");
785 stat = cudaFree(atdat->fshift);
786 CU_RET_ERR(stat, "cudaFree failed on atdat->fshift");
788 stat = cudaFree(atdat->e_lj);
789 CU_RET_ERR(stat, "cudaFree failed on atdat->e_lj");
790 stat = cudaFree(atdat->e_el);
791 CU_RET_ERR(stat, "cudaFree failed on atdat->e_el");
793 cu_free_buffered(atdat->f, &atdat->natoms, &atdat->nalloc);
794 cu_free_buffered(atdat->xq);
795 cu_free_buffered(atdat->atom_types, &atdat->ntypes);
796 cu_free_buffered(atdat->lj_comb);
798 cu_free_buffered(plist->sci, &plist->nsci, &plist->sci_nalloc);
799 cu_free_buffered(plist->cj4, &plist->ncj4, &plist->cj4_nalloc);
800 cu_free_buffered(plist->imask, &plist->nimask, &plist->imask_nalloc);
801 cu_free_buffered(plist->excl, &plist->nexcl, &plist->excl_nalloc);
802 if (nb->bUseTwoStreams)
804 cu_free_buffered(plist_nl->sci, &plist_nl->nsci, &plist_nl->sci_nalloc);
805 cu_free_buffered(plist_nl->cj4, &plist_nl->ncj4, &plist_nl->cj4_nalloc);
806 cu_free_buffered(plist_nl->imask, &plist_nl->nimask, &plist_nl->imask_nalloc);
807 cu_free_buffered(plist_nl->excl, &plist_nl->nexcl, &plist->excl_nalloc);
813 if (nb->bUseTwoStreams)
822 fprintf(debug, "Cleaned up CUDA data structures.\n");
826 //! This function is documented in the header file
827 gmx_wallclock_gpu_nbnxn_t *nbnxn_gpu_get_timings(gmx_nbnxn_cuda_t *nb)
829 return (nb != nullptr && nb->bDoTime) ? nb->timings : nullptr;
832 void nbnxn_gpu_reset_timings(nonbonded_verlet_t* nbv)
834 if (nbv->gpu_nbv && nbv->gpu_nbv->bDoTime)
836 init_timings(nbv->gpu_nbv->timings);
840 int nbnxn_gpu_min_ci_balanced(gmx_nbnxn_cuda_t *nb)
843 gpu_min_ci_balanced_factor*nb->dev_info->prop.multiProcessorCount : 0;
847 gmx_bool nbnxn_gpu_is_kernel_ewald_analytical(const gmx_nbnxn_cuda_t *nb)
849 return ((nb->nbparam->eeltype == eelCuEWALD_ANA) ||
850 (nb->nbparam->eeltype == eelCuEWALD_ANA_TWIN));