2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
45 #include "gmx_fatal.h"
49 #include "types/enums.h"
50 #include "types/nb_verlet.h"
51 #include "types/interaction_const.h"
52 #include "types/force_flags.h"
53 #include "../nbnxn_consts.h"
54 #include "gmx_detect_hardware.h"
56 #include "nbnxn_cuda_types.h"
57 #include "../../gmxlib/cuda_tools/cudautils.cuh"
58 #include "nbnxn_cuda_data_mgmt.h"
59 #include "pmalloc_cuda.h"
60 #include "gpu_utils.h"
62 #include "gromacs/utility/common.h"
64 static bool bUseCudaEventBlockingSync = false; /* makes the CPU thread block */
66 /* This is a heuristically determined parameter for the Fermi architecture for
67 * the minimum size of ci lists by multiplying this constant with the # of
68 * multiprocessors on the current device.
70 static unsigned int gpu_min_ci_balanced_factor = 40;
72 /* Functions from nbnxn_cuda.cu */
73 extern void nbnxn_cuda_set_cacheconfig(cuda_dev_info_t *devinfo);
74 extern const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_nbfp_texref();
75 extern const struct texture<float, 1, cudaReadModeElementType> &nbnxn_cuda_get_coulomb_tab_texref();
77 /* We should actually be using md_print_warn in md_logging.c,
78 * but we can't include mpi.h in CUDA code.
80 static void md_print_warn(FILE *fplog,
87 /* We should only print to stderr on the master node,
88 * in most cases fplog is only set on the master node, so this works.
91 fprintf(stderr, "\n");
92 vfprintf(stderr, fmt, ap);
93 fprintf(stderr, "\n");
98 vfprintf(fplog, fmt, ap);
106 static void nbnxn_cuda_clear_e_fshift(nbnxn_cuda_ptr_t cu_nb);
109 /*! Tabulates the Ewald Coulomb force and initializes the size/scale
110 and the table GPU array. If called with an already allocated table,
111 it just re-uploads the table.
113 static void init_ewald_coulomb_force_table(cu_nbparam_t *nbp,
114 const cuda_dev_info_t *dev_info)
116 float *ftmp, *coul_tab;
121 tabsize = GPU_EWALD_COULOMB_FORCE_TABLE_SIZE;
122 /* Subtract 2 iso 1 to avoid access out of range due to rounding */
123 tabscale = (tabsize - 2) / sqrt(nbp->rcoulomb_sq);
125 pmalloc((void**)&ftmp, tabsize*sizeof(*ftmp));
127 table_spline3_fill_ewald_lr(ftmp, NULL, NULL, tabsize,
128 1/tabscale, nbp->ewald_beta);
130 /* If the table pointer == NULL the table is generated the first time =>
131 the array pointer will be saved to nbparam and the texture is bound.
133 coul_tab = nbp->coulomb_tab;
134 if (coul_tab == NULL)
136 stat = cudaMalloc((void **)&coul_tab, tabsize*sizeof(*coul_tab));
137 CU_RET_ERR(stat, "cudaMalloc failed on coul_tab");
139 nbp->coulomb_tab = coul_tab;
141 #ifdef TEXOBJ_SUPPORTED
142 /* Only device CC >= 3.0 (Kepler and later) support texture objects */
143 if (dev_info->prop.major >= 3)
146 memset(&rd, 0, sizeof(rd));
147 rd.resType = cudaResourceTypeLinear;
148 rd.res.linear.devPtr = nbp->coulomb_tab;
149 rd.res.linear.desc.f = cudaChannelFormatKindFloat;
150 rd.res.linear.desc.x = 32;
151 rd.res.linear.sizeInBytes = tabsize*sizeof(*coul_tab);
154 memset(&td, 0, sizeof(td));
155 td.readMode = cudaReadModeElementType;
156 stat = cudaCreateTextureObject(&nbp->coulomb_tab_texobj, &rd, &td, NULL);
157 CU_RET_ERR(stat, "cudaCreateTextureObject on coulomb_tab_texobj failed");
162 GMX_UNUSED_VALUE(dev_info);
163 cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
164 stat = cudaBindTexture(NULL, &nbnxn_cuda_get_coulomb_tab_texref(),
165 coul_tab, &cd, tabsize*sizeof(*coul_tab));
166 CU_RET_ERR(stat, "cudaBindTexture on coulomb_tab_texref failed");
170 cu_copy_H2D(coul_tab, ftmp, tabsize*sizeof(*coul_tab));
172 nbp->coulomb_tab_size = tabsize;
173 nbp->coulomb_tab_scale = tabscale;
179 /*! Initializes the atomdata structure first time, it only gets filled at
181 static void init_atomdata_first(cu_atomdata_t *ad, int ntypes)
186 stat = cudaMalloc((void**)&ad->shift_vec, SHIFTS*sizeof(*ad->shift_vec));
187 CU_RET_ERR(stat, "cudaMalloc failed on ad->shift_vec");
188 ad->bShiftVecUploaded = false;
190 stat = cudaMalloc((void**)&ad->fshift, SHIFTS*sizeof(*ad->fshift));
191 CU_RET_ERR(stat, "cudaMalloc failed on ad->fshift");
193 stat = cudaMalloc((void**)&ad->e_lj, sizeof(*ad->e_lj));
194 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_lj");
195 stat = cudaMalloc((void**)&ad->e_el, sizeof(*ad->e_el));
196 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_el");
198 /* initialize to NULL poiters to data that is not allocated here and will
199 need reallocation in nbnxn_cuda_init_atomdata */
203 /* size -1 indicates that the respective array hasn't been initialized yet */
208 /*! Selects the Ewald kernel type, analytical on SM 3.0 and later, tabulated on
209 earlier GPUs, single or twin cut-off. */
210 static int pick_ewald_kernel_type(bool bTwinCut,
211 const cuda_dev_info_t *dev_info)
213 bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
216 /* Benchmarking/development environment variables to force the use of
217 analytical or tabulated Ewald kernel. */
218 bForceAnalyticalEwald = (getenv("GMX_CUDA_NB_ANA_EWALD") != NULL);
219 bForceTabulatedEwald = (getenv("GMX_CUDA_NB_TAB_EWALD") != NULL);
221 if (bForceAnalyticalEwald && bForceTabulatedEwald)
223 gmx_incons("Both analytical and tabulated Ewald CUDA non-bonded kernels "
224 "requested through environment variables.");
227 /* By default, on SM 3.0 and later use analytical Ewald, on earlier tabulated. */
228 if ((dev_info->prop.major >= 3 || bForceAnalyticalEwald) && !bForceTabulatedEwald)
230 bUseAnalyticalEwald = true;
234 fprintf(debug, "Using analytical Ewald CUDA kernels\n");
239 bUseAnalyticalEwald = false;
243 fprintf(debug, "Using tabulated Ewald CUDA kernels\n");
247 /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
248 forces it (use it for debugging/benchmarking only). */
249 if (!bTwinCut && (getenv("GMX_CUDA_NB_EWALD_TWINCUT") == NULL))
251 kernel_type = bUseAnalyticalEwald ? eelCuEWALD_ANA : eelCuEWALD_TAB;
255 kernel_type = bUseAnalyticalEwald ? eelCuEWALD_ANA_TWIN : eelCuEWALD_TAB_TWIN;
262 /*! Initializes the nonbonded parameter data structure. */
263 static void init_nbparam(cu_nbparam_t *nbp,
264 const interaction_const_t *ic,
265 const nbnxn_atomdata_t *nbat,
266 const cuda_dev_info_t *dev_info)
271 ntypes = nbat->ntype;
273 nbp->ewald_beta = ic->ewaldcoeff_q;
274 nbp->sh_ewald = ic->sh_ewald;
275 nbp->epsfac = ic->epsfac;
276 nbp->two_k_rf = 2.0 * ic->k_rf;
277 nbp->c_rf = ic->c_rf;
278 nbp->rvdw_sq = ic->rvdw * ic->rvdw;
279 nbp->rcoulomb_sq = ic->rcoulomb * ic->rcoulomb;
280 nbp->rlist_sq = ic->rlist * ic->rlist;
282 nbp->rvdw_switch = ic->rvdw_switch;
283 nbp->dispersion_shift = ic->dispersion_shift;
284 nbp->repulsion_shift = ic->repulsion_shift;
285 nbp->vdw_switch = ic->vdw_switch;
287 switch (ic->vdw_modifier)
290 case eintmodPOTSHIFT:
291 nbp->vdwtype = evdwCuCUT;
293 case eintmodFORCESWITCH:
294 nbp->vdwtype = evdwCuFSWITCH;
296 case eintmodPOTSWITCH:
297 nbp->vdwtype = evdwCuPSWITCH;
300 gmx_incons("The requested VdW interaction modifier is not implemented in the CUDA GPU accelerated kernels!");
304 if (ic->eeltype == eelCUT)
306 nbp->eeltype = eelCuCUT;
308 else if (EEL_RF(ic->eeltype))
310 nbp->eeltype = eelCuRF;
312 else if ((EEL_PME(ic->eeltype) || ic->eeltype == eelEWALD))
314 /* Initially rcoulomb == rvdw, so it's surely not twin cut-off. */
315 nbp->eeltype = pick_ewald_kernel_type(false, dev_info);
319 /* Shouldn't happen, as this is checked when choosing Verlet-scheme */
320 gmx_incons("The requested electrostatics type is not implemented in the CUDA GPU accelerated kernels!");
323 /* generate table for PME */
324 nbp->coulomb_tab = NULL;
325 if (nbp->eeltype == eelCuEWALD_TAB || nbp->eeltype == eelCuEWALD_TAB_TWIN)
327 init_ewald_coulomb_force_table(nbp, dev_info);
330 nnbfp = 2*ntypes*ntypes;
331 stat = cudaMalloc((void **)&nbp->nbfp, nnbfp*sizeof(*nbp->nbfp));
332 CU_RET_ERR(stat, "cudaMalloc failed on nbp->nbfp");
333 cu_copy_H2D(nbp->nbfp, nbat->nbfp, nnbfp*sizeof(*nbp->nbfp));
335 #ifdef TEXOBJ_SUPPORTED
336 /* Only device CC >= 3.0 (Kepler and later) support texture objects */
337 if (dev_info->prop.major >= 3)
340 memset(&rd, 0, sizeof(rd));
341 rd.resType = cudaResourceTypeLinear;
342 rd.res.linear.devPtr = nbp->nbfp;
343 rd.res.linear.desc.f = cudaChannelFormatKindFloat;
344 rd.res.linear.desc.x = 32;
345 rd.res.linear.sizeInBytes = nnbfp*sizeof(*nbp->nbfp);
348 memset(&td, 0, sizeof(td));
349 td.readMode = cudaReadModeElementType;
350 stat = cudaCreateTextureObject(&nbp->nbfp_texobj, &rd, &td, NULL);
351 CU_RET_ERR(stat, "cudaCreateTextureObject on nbfp_texobj failed");
356 cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
357 stat = cudaBindTexture(NULL, &nbnxn_cuda_get_nbfp_texref(),
358 nbp->nbfp, &cd, nnbfp*sizeof(*nbp->nbfp));
359 CU_RET_ERR(stat, "cudaBindTexture on nbfp_texref failed");
363 /*! Re-generate the GPU Ewald force table, resets rlist, and update the
364 * electrostatic type switching to twin cut-off (or back) if needed. */
365 void nbnxn_cuda_pme_loadbal_update_param(nbnxn_cuda_ptr_t cu_nb,
366 const interaction_const_t *ic)
368 cu_nbparam_t *nbp = cu_nb->nbparam;
370 nbp->rlist_sq = ic->rlist * ic->rlist;
371 nbp->rcoulomb_sq = ic->rcoulomb * ic->rcoulomb;
372 nbp->ewald_beta = ic->ewaldcoeff_q;
374 nbp->eeltype = pick_ewald_kernel_type(ic->rcoulomb != ic->rvdw,
377 init_ewald_coulomb_force_table(cu_nb->nbparam, cu_nb->dev_info);
380 /*! Initializes the pair list data structure. */
381 static void init_plist(cu_plist_t *pl)
383 /* initialize to NULL pointers to data that is not allocated here and will
384 need reallocation in nbnxn_cuda_init_pairlist */
389 /* size -1 indicates that the respective array hasn't been initialized yet */
396 pl->excl_nalloc = -1;
397 pl->bDoPrune = false;
400 /*! Initializes the timer data structure. */
401 static void init_timers(cu_timers_t *t, bool bUseTwoStreams)
404 int eventflags = ( bUseCudaEventBlockingSync ? cudaEventBlockingSync : cudaEventDefault );
406 stat = cudaEventCreateWithFlags(&(t->start_atdat), eventflags);
407 CU_RET_ERR(stat, "cudaEventCreate on start_atdat failed");
408 stat = cudaEventCreateWithFlags(&(t->stop_atdat), eventflags);
409 CU_RET_ERR(stat, "cudaEventCreate on stop_atdat failed");
411 /* The non-local counters/stream (second in the array) are needed only with DD. */
412 for (int i = 0; i <= (bUseTwoStreams ? 1 : 0); i++)
414 stat = cudaEventCreateWithFlags(&(t->start_nb_k[i]), eventflags);
415 CU_RET_ERR(stat, "cudaEventCreate on start_nb_k failed");
416 stat = cudaEventCreateWithFlags(&(t->stop_nb_k[i]), eventflags);
417 CU_RET_ERR(stat, "cudaEventCreate on stop_nb_k failed");
420 stat = cudaEventCreateWithFlags(&(t->start_pl_h2d[i]), eventflags);
421 CU_RET_ERR(stat, "cudaEventCreate on start_pl_h2d failed");
422 stat = cudaEventCreateWithFlags(&(t->stop_pl_h2d[i]), eventflags);
423 CU_RET_ERR(stat, "cudaEventCreate on stop_pl_h2d failed");
425 stat = cudaEventCreateWithFlags(&(t->start_nb_h2d[i]), eventflags);
426 CU_RET_ERR(stat, "cudaEventCreate on start_nb_h2d failed");
427 stat = cudaEventCreateWithFlags(&(t->stop_nb_h2d[i]), eventflags);
428 CU_RET_ERR(stat, "cudaEventCreate on stop_nb_h2d failed");
430 stat = cudaEventCreateWithFlags(&(t->start_nb_d2h[i]), eventflags);
431 CU_RET_ERR(stat, "cudaEventCreate on start_nb_d2h failed");
432 stat = cudaEventCreateWithFlags(&(t->stop_nb_d2h[i]), eventflags);
433 CU_RET_ERR(stat, "cudaEventCreate on stop_nb_d2h failed");
437 /*! Initializes the timings data structure. */
438 static void init_timings(wallclock_gpu_t *t)
447 for (i = 0; i < 2; i++)
449 for (j = 0; j < 2; j++)
451 t->ktime[i][j].t = 0.0;
452 t->ktime[i][j].c = 0;
457 void nbnxn_cuda_init(FILE *fplog,
458 nbnxn_cuda_ptr_t *p_cu_nb,
459 const gmx_gpu_info_t *gpu_info,
460 const gmx_gpu_opt_t *gpu_opt,
462 gmx_bool bLocalAndNonlocal)
467 bool bStreamSync, bNoStreamSync, bTMPIAtomics, bX86, bOldDriver;
479 snew(nb->nbparam, 1);
480 snew(nb->plist[eintLocal], 1);
481 if (bLocalAndNonlocal)
483 snew(nb->plist[eintNonlocal], 1);
486 nb->bUseTwoStreams = bLocalAndNonlocal;
489 snew(nb->timings, 1);
492 pmalloc((void**)&nb->nbst.e_lj, sizeof(*nb->nbst.e_lj));
493 pmalloc((void**)&nb->nbst.e_el, sizeof(*nb->nbst.e_el));
494 pmalloc((void**)&nb->nbst.fshift, SHIFTS * sizeof(*nb->nbst.fshift));
496 init_plist(nb->plist[eintLocal]);
498 /* set device info, just point it to the right GPU among the detected ones */
499 nb->dev_info = &gpu_info->cuda_dev[get_gpu_device_id(gpu_info, gpu_opt, my_gpu_index)];
501 /* local/non-local GPU streams */
502 stat = cudaStreamCreate(&nb->stream[eintLocal]);
503 CU_RET_ERR(stat, "cudaStreamCreate on stream[eintLocal] failed");
504 if (nb->bUseTwoStreams)
506 init_plist(nb->plist[eintNonlocal]);
508 /* CUDA stream priority available in the CUDA RT 5.5 API.
509 * Note that the device we're running on does not have to support
510 * priorities, because we are querying the priority range which in this
511 * case will be a single value.
513 #if CUDA_VERSION >= 5500
515 int highest_priority;
516 stat = cudaDeviceGetStreamPriorityRange(NULL, &highest_priority);
517 CU_RET_ERR(stat, "cudaDeviceGetStreamPriorityRange failed");
519 stat = cudaStreamCreateWithPriority(&nb->stream[eintNonlocal],
522 CU_RET_ERR(stat, "cudaStreamCreateWithPriority on stream[eintNonlocal] failed");
525 stat = cudaStreamCreate(&nb->stream[eintNonlocal]);
526 CU_RET_ERR(stat, "cudaStreamCreate on stream[eintNonlocal] failed");
530 /* init events for sychronization (timing disabled for performance reasons!) */
531 stat = cudaEventCreateWithFlags(&nb->nonlocal_done, cudaEventDisableTiming);
532 CU_RET_ERR(stat, "cudaEventCreate on nonlocal_done failed");
533 stat = cudaEventCreateWithFlags(&nb->misc_ops_done, cudaEventDisableTiming);
534 CU_RET_ERR(stat, "cudaEventCreate on misc_ops_one failed");
536 /* On GPUs with ECC enabled, cudaStreamSynchronize shows a large overhead
537 * (which increases with shorter time/step) caused by a known CUDA driver bug.
538 * To work around the issue we'll use an (admittedly fragile) memory polling
539 * waiting to preserve performance. This requires support for atomic
540 * operations and only works on x86/x86_64.
541 * With polling wait event-timing also needs to be disabled.
543 * The overhead is greatly reduced in API v5.0 drivers and the improvement
544 * is independent of runtime version. Hence, with API v5.0 drivers and later
545 * we won't switch to polling.
547 * NOTE: Unfortunately, this is known to fail when GPUs are shared by (t)MPI,
548 * ranks so we will also disable it in that case.
551 bStreamSync = getenv("GMX_CUDA_STREAMSYNC") != NULL;
552 bNoStreamSync = getenv("GMX_NO_CUDA_STREAMSYNC") != NULL;
557 bTMPIAtomics = false;
560 #ifdef GMX_TARGET_X86
566 if (bStreamSync && bNoStreamSync)
568 gmx_fatal(FARGS, "Conflicting environment variables: both GMX_CUDA_STREAMSYNC and GMX_NO_CUDA_STREAMSYNC defined");
571 stat = cudaDriverGetVersion(&cuda_drv_ver);
572 CU_RET_ERR(stat, "cudaDriverGetVersion failed");
574 bOldDriver = (cuda_drv_ver < 5000);
576 if ((nb->dev_info->prop.ECCEnabled == 1) && bOldDriver)
578 /* Polling wait should be used instead of cudaStreamSynchronize only if:
579 * - ECC is ON & driver is old (checked above),
580 * - we're on x86/x86_64,
581 * - atomics are available, and
582 * - GPUs are not being shared.
584 bool bShouldUsePollSync = (bX86 && bTMPIAtomics &&
585 (gmx_count_gpu_dev_shared(gpu_opt) < 1));
589 nb->bUseStreamSync = true;
591 /* only warn if polling should be used */
592 if (bShouldUsePollSync)
595 "NOTE: Using a GPU with ECC enabled and CUDA driver API version <5.0, but\n"
596 " cudaStreamSynchronize waiting is forced by the GMX_CUDA_STREAMSYNC env. var.\n");
601 nb->bUseStreamSync = !bShouldUsePollSync;
603 if (bShouldUsePollSync)
606 "NOTE: Using a GPU with ECC enabled and CUDA driver API version <5.0, known to\n"
607 " cause performance loss. Switching to the alternative polling GPU wait.\n"
608 " If you encounter issues, switch back to standard GPU waiting by setting\n"
609 " the GMX_CUDA_STREAMSYNC environment variable.\n");
613 /* Tell the user that the ECC+old driver combination can be bad */
615 "NOTE: Using a GPU with ECC enabled and CUDA driver API version <5.0.\n"
616 " A known bug in this driver version can cause performance loss.\n"
617 " However, the polling wait workaround can not be used because\n%s\n"
618 " Consider updating the driver or turning ECC off.",
619 (bX86 && bTMPIAtomics) ?
620 " GPU(s) are being oversubscribed." :
621 " atomic operations are not supported by the platform/CPU+compiler.");
622 md_print_warn(fplog, sbuf);
630 nb->bUseStreamSync = false;
633 "NOTE: Polling wait for GPU synchronization requested by GMX_NO_CUDA_STREAMSYNC\n");
637 /* no/off ECC, cudaStreamSynchronize not turned off by env. var. */
638 nb->bUseStreamSync = true;
642 /* CUDA timing disabled as event timers don't work:
643 - with multiple streams = domain-decomposition;
644 - with the polling waiting hack (without cudaStreamSynchronize);
645 - when turned off by GMX_DISABLE_CUDA_TIMING.
647 nb->bDoTime = (!nb->bUseTwoStreams && nb->bUseStreamSync &&
648 (getenv("GMX_DISABLE_CUDA_TIMING") == NULL));
652 init_timers(nb->timers, nb->bUseTwoStreams);
653 init_timings(nb->timings);
656 /* set the kernel type for the current GPU */
657 /* pick L1 cache configuration */
658 nbnxn_cuda_set_cacheconfig(nb->dev_info);
664 fprintf(debug, "Initialized CUDA data structures.\n");
668 void nbnxn_cuda_init_const(nbnxn_cuda_ptr_t cu_nb,
669 const interaction_const_t *ic,
670 const nonbonded_verlet_group_t *nbv_group)
672 init_atomdata_first(cu_nb->atdat, nbv_group[0].nbat->ntype);
673 init_nbparam(cu_nb->nbparam, ic, nbv_group[0].nbat, cu_nb->dev_info);
675 /* clear energy and shift force outputs */
676 nbnxn_cuda_clear_e_fshift(cu_nb);
679 void nbnxn_cuda_init_pairlist(nbnxn_cuda_ptr_t cu_nb,
680 const nbnxn_pairlist_t *h_plist,
685 bool bDoTime = cu_nb->bDoTime;
686 cudaStream_t stream = cu_nb->stream[iloc];
687 cu_plist_t *d_plist = cu_nb->plist[iloc];
689 if (d_plist->na_c < 0)
691 d_plist->na_c = h_plist->na_ci;
695 if (d_plist->na_c != h_plist->na_ci)
697 sprintf(sbuf, "In cu_init_plist: the #atoms per cell has changed (from %d to %d)",
698 d_plist->na_c, h_plist->na_ci);
705 stat = cudaEventRecord(cu_nb->timers->start_pl_h2d[iloc], stream);
706 CU_RET_ERR(stat, "cudaEventRecord failed");
709 cu_realloc_buffered((void **)&d_plist->sci, h_plist->sci, sizeof(*d_plist->sci),
710 &d_plist->nsci, &d_plist->sci_nalloc,
714 cu_realloc_buffered((void **)&d_plist->cj4, h_plist->cj4, sizeof(*d_plist->cj4),
715 &d_plist->ncj4, &d_plist->cj4_nalloc,
719 cu_realloc_buffered((void **)&d_plist->excl, h_plist->excl, sizeof(*d_plist->excl),
720 &d_plist->nexcl, &d_plist->excl_nalloc,
726 stat = cudaEventRecord(cu_nb->timers->stop_pl_h2d[iloc], stream);
727 CU_RET_ERR(stat, "cudaEventRecord failed");
730 /* need to prune the pair list during the next step */
731 d_plist->bDoPrune = true;
734 void nbnxn_cuda_upload_shiftvec(nbnxn_cuda_ptr_t cu_nb,
735 const nbnxn_atomdata_t *nbatom)
737 cu_atomdata_t *adat = cu_nb->atdat;
738 cudaStream_t ls = cu_nb->stream[eintLocal];
740 /* only if we have a dynamic box */
741 if (nbatom->bDynamicBox || !adat->bShiftVecUploaded)
743 cu_copy_H2D_async(adat->shift_vec, nbatom->shift_vec,
744 SHIFTS * sizeof(*adat->shift_vec), ls);
745 adat->bShiftVecUploaded = true;
749 /*! Clears the first natoms_clear elements of the GPU nonbonded force output array. */
750 static void nbnxn_cuda_clear_f(nbnxn_cuda_ptr_t cu_nb, int natoms_clear)
753 cu_atomdata_t *adat = cu_nb->atdat;
754 cudaStream_t ls = cu_nb->stream[eintLocal];
756 stat = cudaMemsetAsync(adat->f, 0, natoms_clear * sizeof(*adat->f), ls);
757 CU_RET_ERR(stat, "cudaMemsetAsync on f falied");
760 /*! Clears nonbonded shift force output array and energy outputs on the GPU. */
761 static void nbnxn_cuda_clear_e_fshift(nbnxn_cuda_ptr_t cu_nb)
764 cu_atomdata_t *adat = cu_nb->atdat;
765 cudaStream_t ls = cu_nb->stream[eintLocal];
767 stat = cudaMemsetAsync(adat->fshift, 0, SHIFTS * sizeof(*adat->fshift), ls);
768 CU_RET_ERR(stat, "cudaMemsetAsync on fshift falied");
769 stat = cudaMemsetAsync(adat->e_lj, 0, sizeof(*adat->e_lj), ls);
770 CU_RET_ERR(stat, "cudaMemsetAsync on e_lj falied");
771 stat = cudaMemsetAsync(adat->e_el, 0, sizeof(*adat->e_el), ls);
772 CU_RET_ERR(stat, "cudaMemsetAsync on e_el falied");
775 void nbnxn_cuda_clear_outputs(nbnxn_cuda_ptr_t cu_nb, int flags)
777 nbnxn_cuda_clear_f(cu_nb, cu_nb->atdat->natoms);
778 /* clear shift force array and energies if the outputs were
779 used in the current step */
780 if (flags & GMX_FORCE_VIRIAL)
782 nbnxn_cuda_clear_e_fshift(cu_nb);
786 void nbnxn_cuda_init_atomdata(nbnxn_cuda_ptr_t cu_nb,
787 const nbnxn_atomdata_t *nbat)
792 bool bDoTime = cu_nb->bDoTime;
793 cu_timers_t *timers = cu_nb->timers;
794 cu_atomdata_t *d_atdat = cu_nb->atdat;
795 cudaStream_t ls = cu_nb->stream[eintLocal];
797 natoms = nbat->natoms;
802 /* time async copy */
803 stat = cudaEventRecord(timers->start_atdat, ls);
804 CU_RET_ERR(stat, "cudaEventRecord failed");
807 /* need to reallocate if we have to copy more atoms than the amount of space
808 available and only allocate if we haven't initialized yet, i.e d_atdat->natoms == -1 */
809 if (natoms > d_atdat->nalloc)
811 nalloc = over_alloc_small(natoms);
813 /* free up first if the arrays have already been initialized */
814 if (d_atdat->nalloc != -1)
816 cu_free_buffered(d_atdat->f, &d_atdat->natoms, &d_atdat->nalloc);
817 cu_free_buffered(d_atdat->xq);
818 cu_free_buffered(d_atdat->atom_types);
821 stat = cudaMalloc((void **)&d_atdat->f, nalloc*sizeof(*d_atdat->f));
822 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->f");
823 stat = cudaMalloc((void **)&d_atdat->xq, nalloc*sizeof(*d_atdat->xq));
824 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->xq");
826 stat = cudaMalloc((void **)&d_atdat->atom_types, nalloc*sizeof(*d_atdat->atom_types));
827 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->atom_types");
829 d_atdat->nalloc = nalloc;
833 d_atdat->natoms = natoms;
834 d_atdat->natoms_local = nbat->natoms_local;
836 /* need to clear GPU f output if realloc happened */
839 nbnxn_cuda_clear_f(cu_nb, nalloc);
842 cu_copy_H2D_async(d_atdat->atom_types, nbat->type,
843 natoms*sizeof(*d_atdat->atom_types), ls);
847 stat = cudaEventRecord(timers->stop_atdat, ls);
848 CU_RET_ERR(stat, "cudaEventRecord failed");
852 void nbnxn_cuda_free(nbnxn_cuda_ptr_t cu_nb)
855 cu_atomdata_t *atdat;
856 cu_nbparam_t *nbparam;
857 cu_plist_t *plist, *plist_nl;
865 atdat = cu_nb->atdat;
866 nbparam = cu_nb->nbparam;
867 plist = cu_nb->plist[eintLocal];
868 plist_nl = cu_nb->plist[eintNonlocal];
869 timers = cu_nb->timers;
871 if (nbparam->eeltype == eelCuEWALD_TAB || nbparam->eeltype == eelCuEWALD_TAB_TWIN)
874 #ifdef TEXOBJ_SUPPORTED
875 /* Only device CC >= 3.0 (Kepler and later) support texture objects */
876 if (cu_nb->dev_info->prop.major >= 3)
878 stat = cudaDestroyTextureObject(nbparam->coulomb_tab_texobj);
879 CU_RET_ERR(stat, "cudaDestroyTextureObject on coulomb_tab_texobj failed");
884 stat = cudaUnbindTexture(nbnxn_cuda_get_coulomb_tab_texref());
885 CU_RET_ERR(stat, "cudaUnbindTexture on coulomb_tab_texref failed");
887 cu_free_buffered(nbparam->coulomb_tab, &nbparam->coulomb_tab_size);
890 stat = cudaEventDestroy(cu_nb->nonlocal_done);
891 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->nonlocal_done");
892 stat = cudaEventDestroy(cu_nb->misc_ops_done);
893 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->misc_ops_done");
897 stat = cudaEventDestroy(timers->start_atdat);
898 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_atdat");
899 stat = cudaEventDestroy(timers->stop_atdat);
900 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_atdat");
902 /* The non-local counters/stream (second in the array) are needed only with DD. */
903 for (int i = 0; i <= (cu_nb->bUseTwoStreams ? 1 : 0); i++)
905 stat = cudaEventDestroy(timers->start_nb_k[i]);
906 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_nb_k");
907 stat = cudaEventDestroy(timers->stop_nb_k[i]);
908 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_nb_k");
910 stat = cudaEventDestroy(timers->start_pl_h2d[i]);
911 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_pl_h2d");
912 stat = cudaEventDestroy(timers->stop_pl_h2d[i]);
913 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_pl_h2d");
915 stat = cudaStreamDestroy(cu_nb->stream[i]);
916 CU_RET_ERR(stat, "cudaStreamDestroy failed on stream");
918 stat = cudaEventDestroy(timers->start_nb_h2d[i]);
919 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_nb_h2d");
920 stat = cudaEventDestroy(timers->stop_nb_h2d[i]);
921 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_nb_h2d");
923 stat = cudaEventDestroy(timers->start_nb_d2h[i]);
924 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_nb_d2h");
925 stat = cudaEventDestroy(timers->stop_nb_d2h[i]);
926 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_nb_d2h");
930 #ifdef TEXOBJ_SUPPORTED
931 /* Only device CC >= 3.0 (Kepler and later) support texture objects */
932 if (cu_nb->dev_info->prop.major >= 3)
934 stat = cudaDestroyTextureObject(nbparam->nbfp_texobj);
935 CU_RET_ERR(stat, "cudaDestroyTextureObject on nbfp_texobj failed");
940 stat = cudaUnbindTexture(nbnxn_cuda_get_nbfp_texref());
941 CU_RET_ERR(stat, "cudaUnbindTexture on nbfp_texref failed");
943 cu_free_buffered(nbparam->nbfp);
945 stat = cudaFree(atdat->shift_vec);
946 CU_RET_ERR(stat, "cudaFree failed on atdat->shift_vec");
947 stat = cudaFree(atdat->fshift);
948 CU_RET_ERR(stat, "cudaFree failed on atdat->fshift");
950 stat = cudaFree(atdat->e_lj);
951 CU_RET_ERR(stat, "cudaFree failed on atdat->e_lj");
952 stat = cudaFree(atdat->e_el);
953 CU_RET_ERR(stat, "cudaFree failed on atdat->e_el");
955 cu_free_buffered(atdat->f, &atdat->natoms, &atdat->nalloc);
956 cu_free_buffered(atdat->xq);
957 cu_free_buffered(atdat->atom_types, &atdat->ntypes);
959 cu_free_buffered(plist->sci, &plist->nsci, &plist->sci_nalloc);
960 cu_free_buffered(plist->cj4, &plist->ncj4, &plist->cj4_nalloc);
961 cu_free_buffered(plist->excl, &plist->nexcl, &plist->excl_nalloc);
962 if (cu_nb->bUseTwoStreams)
964 cu_free_buffered(plist_nl->sci, &plist_nl->nsci, &plist_nl->sci_nalloc);
965 cu_free_buffered(plist_nl->cj4, &plist_nl->ncj4, &plist_nl->cj4_nalloc);
966 cu_free_buffered(plist_nl->excl, &plist_nl->nexcl, &plist->excl_nalloc);
972 if (cu_nb->bUseTwoStreams)
977 sfree(cu_nb->timings);
982 fprintf(debug, "Cleaned up CUDA data structures.\n");
986 void cu_synchstream_atdat(nbnxn_cuda_ptr_t cu_nb, int iloc)
989 cudaStream_t stream = cu_nb->stream[iloc];
991 stat = cudaStreamWaitEvent(stream, cu_nb->timers->stop_atdat, 0);
992 CU_RET_ERR(stat, "cudaStreamWaitEvent failed");
995 wallclock_gpu_t * nbnxn_cuda_get_timings(nbnxn_cuda_ptr_t cu_nb)
997 return (cu_nb != NULL && cu_nb->bDoTime) ? cu_nb->timings : NULL;
1000 void nbnxn_cuda_reset_timings(nbnxn_cuda_ptr_t cu_nb)
1004 init_timings(cu_nb->timings);
1008 int nbnxn_cuda_min_ci_balanced(nbnxn_cuda_ptr_t cu_nb)
1010 return cu_nb != NULL ?
1011 gpu_min_ci_balanced_factor*cu_nb->dev_info->prop.multiProcessorCount : 0;
1015 gmx_bool nbnxn_cuda_is_kernel_ewald_analytical(const nbnxn_cuda_ptr_t cu_nb)
1017 return ((cu_nb->nbparam->eeltype == eelCuEWALD_ANA) ||
1018 (cu_nb->nbparam->eeltype == eelCuEWALD_ANA_TWIN));