2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2012, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
48 #include "gmx_fatal.h"
52 #include "types/nb_verlet.h"
53 #include "types/interaction_const.h"
54 #include "types/force_flags.h"
55 #include "../nbnxn_consts.h"
57 #include "nbnxn_cuda_types.h"
58 #include "../../gmxlib/cuda_tools/cudautils.cuh"
59 #include "nbnxn_cuda_data_mgmt.h"
60 #include "pmalloc_cuda.h"
61 #include "gpu_utils.h"
63 static bool bUseCudaEventBlockingSync = false; /* makes the CPU thread block */
65 /* This is a heuristically determined parameter for the Fermi architecture for
66 * the minimum size of ci lists by multiplying this constant with the # of
67 * multiprocessors on the current device.
69 static unsigned int gpu_min_ci_balanced_factor = 40;
71 /* Functions from nbnxn_cuda.cu */
72 extern void nbnxn_cuda_set_cacheconfig(cuda_dev_info_t *devinfo);
73 extern const struct texture<float, 1, cudaReadModeElementType>& nbnxn_cuda_get_nbfp_texref();
74 extern const struct texture<float, 1, cudaReadModeElementType>& nbnxn_cuda_get_coulomb_tab_texref();
76 /* We should actually be using md_print_warn in md_logging.c,
77 * but we can't include mpi.h in CUDA code.
79 static void md_print_warn(FILE *fplog,
86 /* We should only print to stderr on the master node,
87 * in most cases fplog is only set on the master node, so this works.
90 fprintf(stderr, "\n");
91 vfprintf(stderr, fmt, ap);
92 fprintf(stderr, "\n");
97 vfprintf(fplog, fmt, ap);
105 static void nbnxn_cuda_clear_e_fshift(nbnxn_cuda_ptr_t cu_nb);
108 /*! Tabulates the Ewald Coulomb force and initializes the size/scale
109 and the table GPU array. If called with an already allocated table,
110 it just re-uploads the table.
112 static void init_ewald_coulomb_force_table(cu_nbparam_t *nbp)
114 float *ftmp, *coul_tab;
119 tabsize = GPU_EWALD_COULOMB_FORCE_TABLE_SIZE;
120 /* Subtract 2 iso 1 to avoid access out of range due to rounding */
121 tabscale = (tabsize - 2) / sqrt(nbp->rcoulomb_sq);
123 pmalloc((void**)&ftmp, tabsize*sizeof(*ftmp));
125 table_spline3_fill_ewald_lr(ftmp, NULL, NULL, tabsize,
126 1/tabscale, nbp->ewald_beta);
128 /* If the table pointer == NULL the table is generated the first time =>
129 the array pointer will be saved to nbparam and the texture is bound.
131 coul_tab = nbp->coulomb_tab;
132 if (coul_tab == NULL)
134 stat = cudaMalloc((void **)&coul_tab, tabsize*sizeof(*coul_tab));
135 CU_RET_ERR(stat, "cudaMalloc failed on coul_tab");
137 nbp->coulomb_tab = coul_tab;
139 cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
140 stat = cudaBindTexture(NULL, &nbnxn_cuda_get_coulomb_tab_texref(),
141 coul_tab, &cd, tabsize*sizeof(*coul_tab));
142 CU_RET_ERR(stat, "cudaBindTexture on coul_tab failed");
145 cu_copy_H2D(coul_tab, ftmp, tabsize*sizeof(*coul_tab));
147 nbp->coulomb_tab_size = tabsize;
148 nbp->coulomb_tab_scale = tabscale;
154 /*! Initializes the atomdata structure first time, it only gets filled at
156 static void init_atomdata_first(cu_atomdata_t *ad, int ntypes)
161 stat = cudaMalloc((void**)&ad->shift_vec, SHIFTS*sizeof(*ad->shift_vec));
162 CU_RET_ERR(stat, "cudaMalloc failed on ad->shift_vec");
163 ad->bShiftVecUploaded = false;
165 stat = cudaMalloc((void**)&ad->fshift, SHIFTS*sizeof(*ad->fshift));
166 CU_RET_ERR(stat, "cudaMalloc failed on ad->fshift");
168 stat = cudaMalloc((void**)&ad->e_lj, sizeof(*ad->e_lj));
169 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_lj");
170 stat = cudaMalloc((void**)&ad->e_el, sizeof(*ad->e_el));
171 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_el");
173 /* initialize to NULL poiters to data that is not allocated here and will
174 need reallocation in nbnxn_cuda_init_atomdata */
178 /* size -1 indicates that the respective array hasn't been initialized yet */
183 /*! Initializes the nonbonded parameter data structure. */
184 static void init_nbparam(cu_nbparam_t *nbp,
185 const interaction_const_t *ic,
186 const nonbonded_verlet_t *nbv)
191 ntypes = nbv->grp[0].nbat->ntype;
193 nbp->ewald_beta = ic->ewaldcoeff;
194 nbp->sh_ewald = ic->sh_ewald;
195 nbp->epsfac = ic->epsfac;
196 nbp->two_k_rf = 2.0 * ic->k_rf;
197 nbp->c_rf = ic->c_rf;
198 nbp->rvdw_sq = ic->rvdw * ic->rvdw;
199 nbp->rcoulomb_sq= ic->rcoulomb * ic->rcoulomb;
200 nbp->rlist_sq = ic->rlist * ic->rlist;
201 nbp->sh_invrc6 = ic->sh_invrc6;
203 if (ic->eeltype == eelCUT)
205 nbp->eeltype = eelCuCUT;
207 else if (EEL_RF(ic->eeltype))
209 nbp->eeltype = eelCuRF;
211 else if ((EEL_PME(ic->eeltype) || ic->eeltype==eelEWALD))
213 /* Initially rcoulomb == rvdw, so it's surely not twin cut-off, unless
214 forced by the env. var. (used only for benchmarking). */
215 if (getenv("GMX_CUDA_NB_EWALD_TWINCUT") == NULL)
217 nbp->eeltype = eelCuEWALD;
221 nbp->eeltype = eelCuEWALD_TWIN;
226 /* Shouldn't happen, as this is checked when choosing Verlet-scheme */
227 gmx_incons("The requested electrostatics type is not implemented in the CUDA GPU accelerated kernels!");
230 /* generate table for PME */
231 if (nbp->eeltype == eelCuEWALD)
233 nbp->coulomb_tab = NULL;
234 init_ewald_coulomb_force_table(nbp);
237 nnbfp = 2*ntypes*ntypes;
238 stat = cudaMalloc((void **)&nbp->nbfp, nnbfp*sizeof(*nbp->nbfp));
239 CU_RET_ERR(stat, "cudaMalloc failed on nbp->nbfp");
240 cu_copy_H2D(nbp->nbfp, nbv->grp[0].nbat->nbfp, nnbfp*sizeof(*nbp->nbfp));
242 cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
243 stat = cudaBindTexture(NULL, &nbnxn_cuda_get_nbfp_texref(),
244 nbp->nbfp, &cd, nnbfp*sizeof(*nbp->nbfp));
245 CU_RET_ERR(stat, "cudaBindTexture on nbfp failed");
248 /*! Re-generate the GPU Ewald force table, resets rlist, and update the
249 * electrostatic type switching to twin cut-off (or back) if needed. */
250 void nbnxn_cuda_pme_loadbal_update_param(nbnxn_cuda_ptr_t cu_nb,
251 const interaction_const_t *ic)
253 cu_nbparam_t *nbp = cu_nb->nbparam;
255 nbp->rlist_sq = ic->rlist * ic->rlist;
256 nbp->rcoulomb_sq = ic->rcoulomb * ic->rcoulomb;
257 nbp->ewald_beta = ic->ewaldcoeff;
259 /* When switching to/from twin cut-off, the electrostatics type needs updating.
260 (The env. var. that forces twin cut-off is for benchmarking only!) */
261 if (ic->rcoulomb == ic->rvdw &&
262 getenv("GMX_CUDA_NB_EWALD_TWINCUT") == NULL)
264 nbp->eeltype = eelCuEWALD;
268 nbp->eeltype = eelCuEWALD_TWIN;
271 init_ewald_coulomb_force_table(cu_nb->nbparam);
274 /*! Initializes the pair list data structure. */
275 static void init_plist(cu_plist_t *pl)
277 /* initialize to NULL pointers to data that is not allocated here and will
278 need reallocation in nbnxn_cuda_init_pairlist */
283 /* size -1 indicates that the respective array hasn't been initialized yet */
290 pl->excl_nalloc = -1;
291 pl->bDoPrune = false;
294 /*! Initializes the timer data structure. */
295 static void init_timers(cu_timers_t *t, bool bUseTwoStreams)
298 int eventflags = ( bUseCudaEventBlockingSync ? cudaEventBlockingSync: cudaEventDefault );
300 stat = cudaEventCreateWithFlags(&(t->start_atdat), eventflags);
301 CU_RET_ERR(stat, "cudaEventCreate on start_atdat failed");
302 stat = cudaEventCreateWithFlags(&(t->stop_atdat), eventflags);
303 CU_RET_ERR(stat, "cudaEventCreate on stop_atdat failed");
305 /* The non-local counters/stream (second in the array) are needed only with DD. */
306 for (int i = 0; i <= (bUseTwoStreams ? 1 : 0); i++)
308 stat = cudaEventCreateWithFlags(&(t->start_nb_k[i]), eventflags);
309 CU_RET_ERR(stat, "cudaEventCreate on start_nb_k failed");
310 stat = cudaEventCreateWithFlags(&(t->stop_nb_k[i]), eventflags);
311 CU_RET_ERR(stat, "cudaEventCreate on stop_nb_k failed");
314 stat = cudaEventCreateWithFlags(&(t->start_pl_h2d[i]), eventflags);
315 CU_RET_ERR(stat, "cudaEventCreate on start_pl_h2d failed");
316 stat = cudaEventCreateWithFlags(&(t->stop_pl_h2d[i]), eventflags);
317 CU_RET_ERR(stat, "cudaEventCreate on stop_pl_h2d failed");
319 stat = cudaEventCreateWithFlags(&(t->start_nb_h2d[i]), eventflags);
320 CU_RET_ERR(stat, "cudaEventCreate on start_nb_h2d failed");
321 stat = cudaEventCreateWithFlags(&(t->stop_nb_h2d[i]), eventflags);
322 CU_RET_ERR(stat, "cudaEventCreate on stop_nb_h2d failed");
324 stat = cudaEventCreateWithFlags(&(t->start_nb_d2h[i]), eventflags);
325 CU_RET_ERR(stat, "cudaEventCreate on start_nb_d2h failed");
326 stat = cudaEventCreateWithFlags(&(t->stop_nb_d2h[i]), eventflags);
327 CU_RET_ERR(stat, "cudaEventCreate on stop_nb_d2h failed");
331 /*! Initializes the timings data structure. */
332 static void init_timings(wallclock_gpu_t *t)
341 for (i = 0; i < 2; i++)
343 for(j = 0; j < 2; j++)
345 t->ktime[i][j].t = 0.0;
346 t->ktime[i][j].c = 0;
351 /* Decide which kernel version to use (default or legacy) based on:
352 * - CUDA version used for compilation
353 * - non-bonded kernel selector environment variables
354 * - GPU architecture version
356 static int pick_nbnxn_kernel_version(FILE *fplog,
357 cuda_dev_info_t *devinfo)
359 bool bForceLegacyKernel, bForceDefaultKernel, bCUDA40, bCUDA32;
363 /* Legacy kernel (former k2), kept for backward compatibility as it is
364 faster than the default with CUDA 3.2/4.0 on Fermi (not on Kepler). */
365 bForceLegacyKernel = (getenv("GMX_CUDA_NB_LEGACY") != NULL);
366 /* default kernel (former k3). */
367 bForceDefaultKernel = (getenv("GMX_CUDA_NB_DEFAULT") != NULL);
369 if ((unsigned)(bForceLegacyKernel + bForceDefaultKernel) > 1)
371 gmx_fatal(FARGS, "Multiple CUDA non-bonded kernels requested; to manually pick a kernel set only one \n"
372 "of the following environment variables: \n"
373 "GMX_CUDA_NB_DEFAULT, GMX_CUDA_NB_LEGACY");
376 bCUDA32 = bCUDA40 = false;
377 #if CUDA_VERSION == 3200
379 sprintf(sbuf, "3.2");
380 #elif CUDA_VERSION == 4000
382 sprintf(sbuf, "4.0");
385 /* default is default ;) */
386 kver = eNbnxnCuKDefault;
388 /* Consider switching to legacy kernels only on Fermi */
389 if (devinfo->prop.major < 3 && (bCUDA32 || bCUDA40))
391 /* use legacy kernel unless something else is forced by an env. var */
392 if (bForceDefaultKernel)
395 "NOTE: CUDA %s compilation detected; with this compiler version the legacy\n"
396 " non-bonded kernels perform best. However, the default kernels were\n"
397 " selected by the GMX_CUDA_NB_DEFAULT environment variable.\n"
398 " For best performance upgrade your CUDA toolkit.\n",
403 kver = eNbnxnCuKLegacy;
408 /* issue note if the non-default kernel is forced by an env. var */
409 if (bForceLegacyKernel)
412 "NOTE: Legacy non-bonded CUDA kernels selected by the GMX_CUDA_NB_LEGACY\n"
413 " env. var. Consider using using the default kernels which should be faster!\n");
415 kver = eNbnxnCuKLegacy;
422 void nbnxn_cuda_init(FILE *fplog,
423 nbnxn_cuda_ptr_t *p_cu_nb,
424 gmx_gpu_info_t *gpu_info, int my_gpu_index,
425 gmx_bool bLocalAndNonlocal)
430 bool bStreamSync, bNoStreamSync, bTMPIAtomics, bX86, bOldDriver;
435 if (p_cu_nb == NULL) return;
439 snew(nb->nbparam, 1);
440 snew(nb->plist[eintLocal], 1);
441 if (bLocalAndNonlocal)
443 snew(nb->plist[eintNonlocal], 1);
446 nb->bUseTwoStreams = bLocalAndNonlocal;
449 snew(nb->timings, 1);
452 pmalloc((void**)&nb->nbst.e_lj, sizeof(*nb->nbst.e_lj));
453 pmalloc((void**)&nb->nbst.e_el, sizeof(*nb->nbst.e_el));
454 pmalloc((void**)&nb->nbst.fshift, SHIFTS * sizeof(*nb->nbst.fshift));
456 init_plist(nb->plist[eintLocal]);
458 /* local/non-local GPU streams */
459 stat = cudaStreamCreate(&nb->stream[eintLocal]);
460 CU_RET_ERR(stat, "cudaStreamCreate on stream[eintLocal] failed");
461 if (nb->bUseTwoStreams)
463 init_plist(nb->plist[eintNonlocal]);
464 stat = cudaStreamCreate(&nb->stream[eintNonlocal]);
465 CU_RET_ERR(stat, "cudaStreamCreate on stream[eintNonlocal] failed");
468 /* init events for sychronization (timing disabled for performance reasons!) */
469 stat = cudaEventCreateWithFlags(&nb->nonlocal_done, cudaEventDisableTiming);
470 CU_RET_ERR(stat, "cudaEventCreate on nonlocal_done failed");
471 stat = cudaEventCreateWithFlags(&nb->misc_ops_done, cudaEventDisableTiming);
472 CU_RET_ERR(stat, "cudaEventCreate on misc_ops_one failed");
474 /* set device info, just point it to the right GPU among the detected ones */
475 nb->dev_info = &gpu_info->cuda_dev[get_gpu_device_id(gpu_info, my_gpu_index)];
477 /* On GPUs with ECC enabled, cudaStreamSynchronize shows a large overhead
478 * (which increases with shorter time/step) caused by a known CUDA driver bug.
479 * To work around the issue we'll use an (admittedly fragile) memory polling
480 * waiting to preserve performance. This requires support for atomic
481 * operations and only works on x86/x86_64.
482 * With polling wait event-timing also needs to be disabled.
484 * The overhead is greatly reduced in API v5.0 drivers and the improvement
485 $ is independent of runtime version. Hence, with API v5.0 drivers and later
486 * we won't switch to polling.
488 * NOTE: Unfortunately, this is known to fail when GPUs are shared by (t)MPI,
489 * ranks so we will also disable it in that case.
492 bStreamSync = getenv("GMX_CUDA_STREAMSYNC") != NULL;
493 bNoStreamSync = getenv("GMX_NO_CUDA_STREAMSYNC") != NULL;
498 bTMPIAtomics = false;
501 #if defined(i386) || defined(__x86_64__)
507 if (bStreamSync && bNoStreamSync)
509 gmx_fatal(FARGS, "Conflicting environment variables: both GMX_CUDA_STREAMSYNC and GMX_NO_CUDA_STREAMSYNC defined");
512 stat = cudaDriverGetVersion(&cuda_drv_ver);
513 CU_RET_ERR(stat, "cudaDriverGetVersion failed");
514 bOldDriver = (cuda_drv_ver < 5000);
516 if (nb->dev_info->prop.ECCEnabled == 1)
520 nb->bUseStreamSync = true;
522 /* only warn if polling should be used */
523 if (bOldDriver && !gpu_info->bDevShare)
526 "NOTE: Using a GPU with ECC enabled and CUDA driver API version <5.0, but\n"
527 " cudaStreamSynchronize waiting is forced by the GMX_CUDA_STREAMSYNC env. var.\n");
532 /* Can/should turn of cudaStreamSynchronize wait only if
533 * - we're on x86/x86_64
534 * - atomics are available
535 * - GPUs are not being shared
536 * - and driver is old. */
538 (bX86 && bTMPIAtomics && !gpu_info->bDevShare && bOldDriver) ?
541 if (nb->bUseStreamSync)
544 "NOTE: Using a GPU with ECC enabled and CUDA driver API version <5.0, known to\n"
545 " cause performance loss. Switching to the alternative polling GPU waiting.\n"
546 " If you encounter issues, switch back to standard GPU waiting by setting\n"
547 " the GMX_CUDA_STREAMSYNC environment variable.\n");
551 /* Tell the user that the ECC+old driver combination can be bad */
553 "NOTE: Using a GPU with ECC enabled and CUDA driver API version <5.0. A bug in this\n"
554 " driver can cause performance loss.\n"
555 " However, the polling waiting workaround can not be used because\n%s\n"
556 " Consider updating the driver or turning ECC off.",
557 (!bX86 || !bTMPIAtomics) ?
558 " atomic operations are not supported by the platform/CPU+compiler." :
559 " GPU(s) are being oversubscribed.");
560 md_print_warn(fplog, sbuf);
568 nb->bUseStreamSync = false;
571 "NOTE: Polling wait for GPU synchronization requested by GMX_NO_CUDA_STREAMSYNC\n");
575 /* no/off ECC, cudaStreamSynchronize not turned off by env. var. */
576 nb->bUseStreamSync = true;
580 /* CUDA timing disabled as event timers don't work:
581 - with multiple streams = domain-decomposition;
582 - with the polling waiting hack (without cudaStreamSynchronize);
583 - when turned off by GMX_DISABLE_CUDA_TIMING.
585 nb->bDoTime = (!nb->bUseTwoStreams && nb->bUseStreamSync &&
586 (getenv("GMX_DISABLE_CUDA_TIMING") == NULL));
590 init_timers(nb->timers, nb->bUseTwoStreams);
591 init_timings(nb->timings);
594 /* set the kernel type for the current GPU */
595 nb->kernel_ver = pick_nbnxn_kernel_version(fplog, nb->dev_info);
596 /* pick L1 cache configuration */
597 nbnxn_cuda_set_cacheconfig(nb->dev_info);
603 fprintf(debug, "Initialized CUDA data structures.\n");
607 void nbnxn_cuda_init_const(nbnxn_cuda_ptr_t cu_nb,
608 const interaction_const_t *ic,
609 const nonbonded_verlet_t *nbv)
611 init_atomdata_first(cu_nb->atdat, nbv->grp[0].nbat->ntype);
612 init_nbparam(cu_nb->nbparam, ic, nbv);
614 /* clear energy and shift force outputs */
615 nbnxn_cuda_clear_e_fshift(cu_nb);
618 void nbnxn_cuda_init_pairlist(nbnxn_cuda_ptr_t cu_nb,
619 const nbnxn_pairlist_t *h_plist,
624 bool bDoTime = cu_nb->bDoTime;
625 cudaStream_t stream = cu_nb->stream[iloc];
626 cu_plist_t *d_plist = cu_nb->plist[iloc];
628 if (d_plist->na_c < 0)
630 d_plist->na_c = h_plist->na_ci;
634 if (d_plist->na_c != h_plist->na_ci)
636 sprintf(sbuf, "In cu_init_plist: the #atoms per cell has changed (from %d to %d)",
637 d_plist->na_c, h_plist->na_ci);
644 stat = cudaEventRecord(cu_nb->timers->start_pl_h2d[iloc], stream);
645 CU_RET_ERR(stat, "cudaEventRecord failed");
648 cu_realloc_buffered((void **)&d_plist->sci, h_plist->sci, sizeof(*d_plist->sci),
649 &d_plist->nsci, &d_plist->sci_nalloc,
653 cu_realloc_buffered((void **)&d_plist->cj4, h_plist->cj4, sizeof(*d_plist->cj4),
654 &d_plist->ncj4, &d_plist->cj4_nalloc,
658 cu_realloc_buffered((void **)&d_plist->excl, h_plist->excl, sizeof(*d_plist->excl),
659 &d_plist->nexcl, &d_plist->excl_nalloc,
665 stat = cudaEventRecord(cu_nb->timers->stop_pl_h2d[iloc], stream);
666 CU_RET_ERR(stat, "cudaEventRecord failed");
669 /* need to prune the pair list during the next step */
670 d_plist->bDoPrune = true;
673 void nbnxn_cuda_upload_shiftvec(nbnxn_cuda_ptr_t cu_nb,
674 const nbnxn_atomdata_t *nbatom)
676 cu_atomdata_t *adat = cu_nb->atdat;
677 cudaStream_t ls = cu_nb->stream[eintLocal];
679 /* only if we have a dynamic box */
680 if (nbatom->bDynamicBox || !adat->bShiftVecUploaded)
682 cu_copy_H2D_async(adat->shift_vec, nbatom->shift_vec,
683 SHIFTS * sizeof(*adat->shift_vec), ls);
684 adat->bShiftVecUploaded = true;
688 /*! Clears the first natoms_clear elements of the GPU nonbonded force output array. */
689 static void nbnxn_cuda_clear_f(nbnxn_cuda_ptr_t cu_nb, int natoms_clear)
692 cu_atomdata_t *adat = cu_nb->atdat;
693 cudaStream_t ls = cu_nb->stream[eintLocal];
695 stat = cudaMemsetAsync(adat->f, 0, natoms_clear * sizeof(*adat->f), ls);
696 CU_RET_ERR(stat, "cudaMemsetAsync on f falied");
699 /*! Clears nonbonded shift force output array and energy outputs on the GPU. */
700 static void nbnxn_cuda_clear_e_fshift(nbnxn_cuda_ptr_t cu_nb)
703 cu_atomdata_t *adat = cu_nb->atdat;
704 cudaStream_t ls = cu_nb->stream[eintLocal];
706 stat = cudaMemsetAsync(adat->fshift, 0, SHIFTS * sizeof(*adat->fshift), ls);
707 CU_RET_ERR(stat, "cudaMemsetAsync on fshift falied");
708 stat = cudaMemsetAsync(adat->e_lj, 0, sizeof(*adat->e_lj), ls);
709 CU_RET_ERR(stat, "cudaMemsetAsync on e_lj falied");
710 stat = cudaMemsetAsync(adat->e_el, 0, sizeof(*adat->e_el), ls);
711 CU_RET_ERR(stat, "cudaMemsetAsync on e_el falied");
714 void nbnxn_cuda_clear_outputs(nbnxn_cuda_ptr_t cu_nb, int flags)
716 nbnxn_cuda_clear_f(cu_nb, cu_nb->atdat->natoms);
717 /* clear shift force array and energies if the outputs were
718 used in the current step */
719 if (flags & GMX_FORCE_VIRIAL)
721 nbnxn_cuda_clear_e_fshift(cu_nb);
725 void nbnxn_cuda_init_atomdata(nbnxn_cuda_ptr_t cu_nb,
726 const nbnxn_atomdata_t *nbat)
731 bool bDoTime = cu_nb->bDoTime;
732 cu_timers_t *timers = cu_nb->timers;
733 cu_atomdata_t *d_atdat = cu_nb->atdat;
734 cudaStream_t ls = cu_nb->stream[eintLocal];
736 natoms = nbat->natoms;
741 /* time async copy */
742 stat = cudaEventRecord(timers->start_atdat, ls);
743 CU_RET_ERR(stat, "cudaEventRecord failed");
746 /* need to reallocate if we have to copy more atoms than the amount of space
747 available and only allocate if we haven't initialized yet, i.e d_atdat->natoms == -1 */
748 if (natoms > d_atdat->nalloc)
750 nalloc = over_alloc_small(natoms);
752 /* free up first if the arrays have already been initialized */
753 if (d_atdat->nalloc != -1)
755 cu_free_buffered(d_atdat->f, &d_atdat->natoms, &d_atdat->nalloc);
756 cu_free_buffered(d_atdat->xq);
757 cu_free_buffered(d_atdat->atom_types);
760 stat = cudaMalloc((void **)&d_atdat->f, nalloc*sizeof(*d_atdat->f));
761 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->f");
762 stat = cudaMalloc((void **)&d_atdat->xq, nalloc*sizeof(*d_atdat->xq));
763 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->xq");
765 stat = cudaMalloc((void **)&d_atdat->atom_types, nalloc*sizeof(*d_atdat->atom_types));
766 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->atom_types");
768 d_atdat->nalloc = nalloc;
772 d_atdat->natoms = natoms;
773 d_atdat->natoms_local = nbat->natoms_local;
775 /* need to clear GPU f output if realloc happened */
778 nbnxn_cuda_clear_f(cu_nb, nalloc);
781 cu_copy_H2D_async(d_atdat->atom_types, nbat->type,
782 natoms*sizeof(*d_atdat->atom_types), ls);
786 stat = cudaEventRecord(timers->stop_atdat, ls);
787 CU_RET_ERR(stat, "cudaEventRecord failed");
791 void nbnxn_cuda_free(FILE *fplog, nbnxn_cuda_ptr_t cu_nb)
794 cu_atomdata_t *atdat;
795 cu_nbparam_t *nbparam;
796 cu_plist_t *plist, *plist_nl;
799 if (cu_nb == NULL) return;
801 atdat = cu_nb->atdat;
802 nbparam = cu_nb->nbparam;
803 plist = cu_nb->plist[eintLocal];
804 plist_nl = cu_nb->plist[eintNonlocal];
805 timers = cu_nb->timers;
807 if (nbparam->eeltype == eelCuEWALD || nbparam->eeltype == eelCuEWALD_TWIN)
809 stat = cudaUnbindTexture(nbnxn_cuda_get_coulomb_tab_texref());
810 CU_RET_ERR(stat, "cudaUnbindTexture on coulomb_tab failed");
811 cu_free_buffered(nbparam->coulomb_tab, &nbparam->coulomb_tab_size);
814 stat = cudaEventDestroy(cu_nb->nonlocal_done);
815 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->nonlocal_done");
816 stat = cudaEventDestroy(cu_nb->misc_ops_done);
817 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->misc_ops_done");
821 stat = cudaEventDestroy(timers->start_atdat);
822 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_atdat");
823 stat = cudaEventDestroy(timers->stop_atdat);
824 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_atdat");
826 /* The non-local counters/stream (second in the array) are needed only with DD. */
827 for (int i = 0; i <= (cu_nb->bUseTwoStreams ? 1 : 0); i++)
829 stat = cudaEventDestroy(timers->start_nb_k[i]);
830 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_nb_k");
831 stat = cudaEventDestroy(timers->stop_nb_k[i]);
832 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_nb_k");
834 stat = cudaEventDestroy(timers->start_pl_h2d[i]);
835 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_pl_h2d");
836 stat = cudaEventDestroy(timers->stop_pl_h2d[i]);
837 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_pl_h2d");
839 stat = cudaStreamDestroy(cu_nb->stream[i]);
840 CU_RET_ERR(stat, "cudaStreamDestroy failed on stream");
842 stat = cudaEventDestroy(timers->start_nb_h2d[i]);
843 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_nb_h2d");
844 stat = cudaEventDestroy(timers->stop_nb_h2d[i]);
845 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_nb_h2d");
847 stat = cudaEventDestroy(timers->start_nb_d2h[i]);
848 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_nb_d2h");
849 stat = cudaEventDestroy(timers->stop_nb_d2h[i]);
850 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_nb_d2h");
854 stat = cudaUnbindTexture(nbnxn_cuda_get_nbfp_texref());
855 CU_RET_ERR(stat, "cudaUnbindTexture on coulomb_tab failed");
856 cu_free_buffered(nbparam->nbfp);
858 stat = cudaFree(atdat->shift_vec);
859 CU_RET_ERR(stat, "cudaFree failed on atdat->shift_vec");
860 stat = cudaFree(atdat->fshift);
861 CU_RET_ERR(stat, "cudaFree failed on atdat->fshift");
863 stat = cudaFree(atdat->e_lj);
864 CU_RET_ERR(stat, "cudaFree failed on atdat->e_lj");
865 stat = cudaFree(atdat->e_el);
866 CU_RET_ERR(stat, "cudaFree failed on atdat->e_el");
868 cu_free_buffered(atdat->f, &atdat->natoms, &atdat->nalloc);
869 cu_free_buffered(atdat->xq);
870 cu_free_buffered(atdat->atom_types, &atdat->ntypes);
872 cu_free_buffered(plist->sci, &plist->nsci, &plist->sci_nalloc);
873 cu_free_buffered(plist->cj4, &plist->ncj4, &plist->cj4_nalloc);
874 cu_free_buffered(plist->excl, &plist->nexcl, &plist->excl_nalloc);
875 if (cu_nb->bUseTwoStreams)
877 cu_free_buffered(plist_nl->sci, &plist_nl->nsci, &plist_nl->sci_nalloc);
878 cu_free_buffered(plist_nl->cj4, &plist_nl->ncj4, &plist_nl->cj4_nalloc);
879 cu_free_buffered(plist_nl->excl, &plist_nl->nexcl, &plist->excl_nalloc);
885 if (cu_nb->bUseTwoStreams)
890 sfree(cu_nb->timings);
895 fprintf(debug, "Cleaned up CUDA data structures.\n");
899 void cu_synchstream_atdat(nbnxn_cuda_ptr_t cu_nb, int iloc)
902 cudaStream_t stream = cu_nb->stream[iloc];
904 stat = cudaStreamWaitEvent(stream, cu_nb->timers->stop_atdat, 0);
905 CU_RET_ERR(stat, "cudaStreamWaitEvent failed");
908 wallclock_gpu_t * nbnxn_cuda_get_timings(nbnxn_cuda_ptr_t cu_nb)
910 return (cu_nb != NULL && cu_nb->bDoTime) ? cu_nb->timings : NULL;
913 void nbnxn_cuda_reset_timings(nbnxn_cuda_ptr_t cu_nb)
917 init_timings(cu_nb->timings);
921 int nbnxn_cuda_min_ci_balanced(nbnxn_cuda_ptr_t cu_nb)
923 return cu_nb != NULL ?
924 gpu_min_ci_balanced_factor*cu_nb->dev_info->prop.multiProcessorCount : 0;