1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2012, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
45 #include "gmx_fatal.h"
49 #include "types/nb_verlet.h"
50 #include "types/interaction_const.h"
51 #include "types/force_flags.h"
52 #include "../nbnxn_consts.h"
54 #include "nbnxn_cuda_types.h"
55 #include "../../gmxlib/cuda_tools/cudautils.cuh"
56 #include "nbnxn_cuda_data_mgmt.h"
57 #include "pmalloc_cuda.h"
58 #include "gpu_utils.h"
60 static bool bUseCudaEventBlockingSync = false; /* makes the CPU thread block */
62 /* This is a heuristically determined parameter for the Fermi architecture for
63 * the minimum size of ci lists by multiplying this constant with the # of
64 * multiprocessors on the current device.
66 static unsigned int gpu_min_ci_balanced_factor = 40;
68 /* Functions from nbnxn_cuda.cu */
69 extern void nbnxn_cuda_set_cacheconfig(cuda_dev_info_t *devinfo);
70 extern const struct texture<float, 1, cudaReadModeElementType>& nbnxn_cuda_get_nbfp_texref();
71 extern const struct texture<float, 1, cudaReadModeElementType>& nbnxn_cuda_get_coulomb_tab_texref();
73 /* We should actually be using md_print_warn in md_logging.c,
74 * but we can't include mpi.h in CUDA code.
76 static void md_print_warn(FILE *fplog,
83 /* We should only print to stderr on the master node,
84 * in most cases fplog is only set on the master node, so this works.
87 fprintf(stderr, "\n");
88 vfprintf(stderr, fmt, ap);
89 fprintf(stderr, "\n");
94 vfprintf(fplog, fmt, ap);
102 static void nbnxn_cuda_clear_e_fshift(nbnxn_cuda_ptr_t cu_nb);
105 /*! Tabulates the Ewald Coulomb force and initializes the size/scale
106 and the table GPU array. If called with an already allocated table,
107 it just re-uploads the table.
109 static void init_ewald_coulomb_force_table(cu_nbparam_t *nbp)
111 float *ftmp, *coul_tab;
116 tabsize = GPU_EWALD_COULOMB_FORCE_TABLE_SIZE;
117 /* Subtract 2 iso 1 to avoid access out of range due to rounding */
118 tabscale = (tabsize - 2) / sqrt(nbp->rcoulomb_sq);
120 pmalloc((void**)&ftmp, tabsize*sizeof(*ftmp));
122 table_spline3_fill_ewald_lr(ftmp, NULL, NULL, tabsize,
123 1/tabscale, nbp->ewald_beta);
125 /* If the table pointer == NULL the table is generated the first time =>
126 the array pointer will be saved to nbparam and the texture is bound.
128 coul_tab = nbp->coulomb_tab;
129 if (coul_tab == NULL)
131 stat = cudaMalloc((void **)&coul_tab, tabsize*sizeof(*coul_tab));
132 CU_RET_ERR(stat, "cudaMalloc failed on coul_tab");
134 nbp->coulomb_tab = coul_tab;
136 cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
137 stat = cudaBindTexture(NULL, &nbnxn_cuda_get_coulomb_tab_texref(),
138 coul_tab, &cd, tabsize*sizeof(*coul_tab));
139 CU_RET_ERR(stat, "cudaBindTexture on coul_tab failed");
142 cu_copy_H2D(coul_tab, ftmp, tabsize*sizeof(*coul_tab));
144 nbp->coulomb_tab_size = tabsize;
145 nbp->coulomb_tab_scale = tabscale;
151 /*! Initializes the atomdata structure first time, it only gets filled at
153 static void init_atomdata_first(cu_atomdata_t *ad, int ntypes)
158 stat = cudaMalloc((void**)&ad->shift_vec, SHIFTS*sizeof(*ad->shift_vec));
159 CU_RET_ERR(stat, "cudaMalloc failed on ad->shift_vec");
160 ad->bShiftVecUploaded = false;
162 stat = cudaMalloc((void**)&ad->fshift, SHIFTS*sizeof(*ad->fshift));
163 CU_RET_ERR(stat, "cudaMalloc failed on ad->fshift");
165 stat = cudaMalloc((void**)&ad->e_lj, sizeof(*ad->e_lj));
166 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_lj");
167 stat = cudaMalloc((void**)&ad->e_el, sizeof(*ad->e_el));
168 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_el");
170 /* initialize to NULL poiters to data that is not allocated here and will
171 need reallocation in nbnxn_cuda_init_atomdata */
175 /* size -1 indicates that the respective array hasn't been initialized yet */
180 /*! Initializes the nonbonded parameter data structure. */
181 static void init_nbparam(cu_nbparam_t *nbp,
182 const interaction_const_t *ic,
183 const nonbonded_verlet_t *nbv)
188 ntypes = nbv->grp[0].nbat->ntype;
190 nbp->ewald_beta = ic->ewaldcoeff;
191 nbp->sh_ewald = ic->sh_ewald;
192 nbp->epsfac = ic->epsfac;
193 nbp->two_k_rf = 2.0 * ic->k_rf;
194 nbp->c_rf = ic->c_rf;
195 nbp->rvdw_sq = ic->rvdw * ic->rvdw;
196 nbp->rcoulomb_sq= ic->rcoulomb * ic->rcoulomb;
197 nbp->rlist_sq = ic->rlist * ic->rlist;
198 nbp->sh_invrc6 = ic->sh_invrc6;
200 if (ic->eeltype == eelCUT)
202 nbp->eeltype = eelCuCUT;
204 else if (EEL_RF(ic->eeltype))
206 nbp->eeltype = eelCuRF;
208 else if ((EEL_PME(ic->eeltype) || ic->eeltype==eelEWALD))
210 /* Initially rcoulomb == rvdw, so it's surely not twin cut-off, unless
211 forced by the env. var. (used only for benchmarking). */
212 if (getenv("GMX_CUDA_NB_EWALD_TWINCUT") == NULL)
214 nbp->eeltype = eelCuEWALD;
218 nbp->eeltype = eelCuEWALD_TWIN;
223 /* Shouldn't happen, as this is checked when choosing Verlet-scheme */
224 gmx_incons("The requested electrostatics type is not implemented in the CUDA GPU accelerated kernels!");
227 /* generate table for PME */
228 if (nbp->eeltype == eelCuEWALD)
230 nbp->coulomb_tab = NULL;
231 init_ewald_coulomb_force_table(nbp);
234 nnbfp = 2*ntypes*ntypes;
235 stat = cudaMalloc((void **)&nbp->nbfp, nnbfp*sizeof(*nbp->nbfp));
236 CU_RET_ERR(stat, "cudaMalloc failed on nbp->nbfp");
237 cu_copy_H2D(nbp->nbfp, nbv->grp[0].nbat->nbfp, nnbfp*sizeof(*nbp->nbfp));
239 cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
240 stat = cudaBindTexture(NULL, &nbnxn_cuda_get_nbfp_texref(),
241 nbp->nbfp, &cd, nnbfp*sizeof(*nbp->nbfp));
242 CU_RET_ERR(stat, "cudaBindTexture on nbfp failed");
245 /*! Re-generate the GPU Ewald force table, resets rlist, and update the
246 * electrostatic type switching to twin cut-off (or back) if needed. */
247 void nbnxn_cuda_pme_loadbal_update_param(nbnxn_cuda_ptr_t cu_nb,
248 const interaction_const_t *ic)
250 cu_nbparam_t *nbp = cu_nb->nbparam;
252 nbp->rlist_sq = ic->rlist * ic->rlist;
253 nbp->rcoulomb_sq = ic->rcoulomb * ic->rcoulomb;
254 nbp->ewald_beta = ic->ewaldcoeff;
256 /* When switching to/from twin cut-off, the electrostatics type needs updating.
257 (The env. var. that forces twin cut-off is for benchmarking only!) */
258 if (ic->rcoulomb == ic->rvdw &&
259 getenv("GMX_CUDA_NB_EWALD_TWINCUT") == NULL)
261 nbp->eeltype = eelCuEWALD;
265 nbp->eeltype = eelCuEWALD_TWIN;
268 init_ewald_coulomb_force_table(cu_nb->nbparam);
271 /*! Initializes the pair list data structure. */
272 static void init_plist(cu_plist_t *pl)
274 /* initialize to NULL pointers to data that is not allocated here and will
275 need reallocation in nbnxn_cuda_init_pairlist */
280 /* size -1 indicates that the respective array hasn't been initialized yet */
287 pl->excl_nalloc = -1;
288 pl->bDoPrune = false;
291 /*! Initializes the timer data structure. */
292 static void init_timers(cu_timers_t *t, bool bUseTwoStreams)
295 int eventflags = ( bUseCudaEventBlockingSync ? cudaEventBlockingSync: cudaEventDefault );
297 stat = cudaEventCreateWithFlags(&(t->start_atdat), eventflags);
298 CU_RET_ERR(stat, "cudaEventCreate on start_atdat failed");
299 stat = cudaEventCreateWithFlags(&(t->stop_atdat), eventflags);
300 CU_RET_ERR(stat, "cudaEventCreate on stop_atdat failed");
302 /* The non-local counters/stream (second in the array) are needed only with DD. */
303 for (int i = 0; i <= (bUseTwoStreams ? 1 : 0); i++)
305 stat = cudaEventCreateWithFlags(&(t->start_nb_k[i]), eventflags);
306 CU_RET_ERR(stat, "cudaEventCreate on start_nb_k failed");
307 stat = cudaEventCreateWithFlags(&(t->stop_nb_k[i]), eventflags);
308 CU_RET_ERR(stat, "cudaEventCreate on stop_nb_k failed");
311 stat = cudaEventCreateWithFlags(&(t->start_pl_h2d[i]), eventflags);
312 CU_RET_ERR(stat, "cudaEventCreate on start_pl_h2d failed");
313 stat = cudaEventCreateWithFlags(&(t->stop_pl_h2d[i]), eventflags);
314 CU_RET_ERR(stat, "cudaEventCreate on stop_pl_h2d failed");
316 stat = cudaEventCreateWithFlags(&(t->start_nb_h2d[i]), eventflags);
317 CU_RET_ERR(stat, "cudaEventCreate on start_nb_h2d failed");
318 stat = cudaEventCreateWithFlags(&(t->stop_nb_h2d[i]), eventflags);
319 CU_RET_ERR(stat, "cudaEventCreate on stop_nb_h2d failed");
321 stat = cudaEventCreateWithFlags(&(t->start_nb_d2h[i]), eventflags);
322 CU_RET_ERR(stat, "cudaEventCreate on start_nb_d2h failed");
323 stat = cudaEventCreateWithFlags(&(t->stop_nb_d2h[i]), eventflags);
324 CU_RET_ERR(stat, "cudaEventCreate on stop_nb_d2h failed");
328 /*! Initializes the timings data structure. */
329 static void init_timings(wallclock_gpu_t *t)
338 for (i = 0; i < 2; i++)
340 for(j = 0; j < 2; j++)
342 t->ktime[i][j].t = 0.0;
343 t->ktime[i][j].c = 0;
348 /* Decide which kernel version to use (default or legacy) based on:
349 * - CUDA version used for compilation
350 * - non-bonded kernel selector environment variables
351 * - GPU architecture version
353 static int pick_nbnxn_kernel_version(FILE *fplog,
354 cuda_dev_info_t *devinfo)
356 bool bForceLegacyKernel, bForceDefaultKernel, bCUDA40, bCUDA32;
360 /* Legacy kernel (former k2), kept for backward compatibility as it is
361 faster than the default with CUDA 3.2/4.0 on Fermi (not on Kepler). */
362 bForceLegacyKernel = (getenv("GMX_CUDA_NB_LEGACY") != NULL);
363 /* default kernel (former k3). */
364 bForceDefaultKernel = (getenv("GMX_CUDA_NB_DEFAULT") != NULL);
366 if ((unsigned)(bForceLegacyKernel + bForceDefaultKernel) > 1)
368 gmx_fatal(FARGS, "Multiple CUDA non-bonded kernels requested; to manually pick a kernel set only one \n"
369 "of the following environment variables: \n"
370 "GMX_CUDA_NB_DEFAULT, GMX_CUDA_NB_LEGACY");
373 bCUDA32 = bCUDA40 = false;
374 #if CUDA_VERSION == 3200
376 sprintf(sbuf, "3.2");
377 #elif CUDA_VERSION == 4000
379 sprintf(sbuf, "4.0");
382 /* default is default ;) */
383 kver = eNbnxnCuKDefault;
385 /* Consider switching to legacy kernels only on Fermi */
386 if (devinfo->prop.major < 3 && (bCUDA32 || bCUDA40))
388 /* use legacy kernel unless something else is forced by an env. var */
389 if (bForceDefaultKernel)
392 "NOTE: CUDA %s compilation detected; with this compiler version the legacy\n"
393 " non-bonded kernels perform best. However, the default kernels were\n"
394 " selected by the GMX_CUDA_NB_DEFAULT environment variable.\n"
395 " For best performance upgrade your CUDA toolkit.\n",
400 kver = eNbnxnCuKLegacy;
405 /* issue note if the non-default kernel is forced by an env. var */
406 if (bForceLegacyKernel)
409 "NOTE: Legacy non-bonded CUDA kernels selected by the GMX_CUDA_NB_LEGACY\n"
410 " env. var. Consider using using the default kernels which should be faster!\n");
412 kver = eNbnxnCuKLegacy;
419 void nbnxn_cuda_init(FILE *fplog,
420 nbnxn_cuda_ptr_t *p_cu_nb,
421 gmx_gpu_info_t *gpu_info, int my_gpu_index,
422 gmx_bool bLocalAndNonlocal)
427 bool bStreamSync, bNoStreamSync, bTMPIAtomics, bX86, bOldDriver;
432 if (p_cu_nb == NULL) return;
436 snew(nb->nbparam, 1);
437 snew(nb->plist[eintLocal], 1);
438 if (bLocalAndNonlocal)
440 snew(nb->plist[eintNonlocal], 1);
443 nb->bUseTwoStreams = bLocalAndNonlocal;
446 snew(nb->timings, 1);
449 pmalloc((void**)&nb->nbst.e_lj, sizeof(*nb->nbst.e_lj));
450 pmalloc((void**)&nb->nbst.e_el, sizeof(*nb->nbst.e_el));
451 pmalloc((void**)&nb->nbst.fshift, SHIFTS * sizeof(*nb->nbst.fshift));
453 init_plist(nb->plist[eintLocal]);
455 /* local/non-local GPU streams */
456 stat = cudaStreamCreate(&nb->stream[eintLocal]);
457 CU_RET_ERR(stat, "cudaStreamCreate on stream[eintLocal] failed");
458 if (nb->bUseTwoStreams)
460 init_plist(nb->plist[eintNonlocal]);
461 stat = cudaStreamCreate(&nb->stream[eintNonlocal]);
462 CU_RET_ERR(stat, "cudaStreamCreate on stream[eintNonlocal] failed");
465 /* init events for sychronization (timing disabled for performance reasons!) */
466 stat = cudaEventCreateWithFlags(&nb->nonlocal_done, cudaEventDisableTiming);
467 CU_RET_ERR(stat, "cudaEventCreate on nonlocal_done failed");
468 stat = cudaEventCreateWithFlags(&nb->misc_ops_done, cudaEventDisableTiming);
469 CU_RET_ERR(stat, "cudaEventCreate on misc_ops_one failed");
471 /* set device info, just point it to the right GPU among the detected ones */
472 nb->dev_info = &gpu_info->cuda_dev[get_gpu_device_id(gpu_info, my_gpu_index)];
474 /* On GPUs with ECC enabled, cudaStreamSynchronize shows a large overhead
475 * (which increases with shorter time/step) caused by a known CUDA driver bug.
476 * To work around the issue we'll use an (admittedly fragile) memory polling
477 * waiting to preserve performance. This requires support for atomic
478 * operations and only works on x86/x86_64.
479 * With polling wait event-timing also needs to be disabled.
481 * The overhead is greatly reduced in API v5.0 drivers and the improvement
482 $ is independent of runtime version. Hence, with API v5.0 drivers and later
483 * we won't switch to polling.
485 * NOTE: Unfortunately, this is known to fail when GPUs are shared by (t)MPI,
486 * ranks so we will also disable it in that case.
489 bStreamSync = getenv("GMX_CUDA_STREAMSYNC") != NULL;
490 bNoStreamSync = getenv("GMX_NO_CUDA_STREAMSYNC") != NULL;
495 bTMPIAtomics = false;
498 #if defined(i386) || defined(__x86_64__)
504 if (bStreamSync && bNoStreamSync)
506 gmx_fatal(FARGS, "Conflicting environment variables: both GMX_CUDA_STREAMSYNC and GMX_NO_CUDA_STREAMSYNC defined");
509 stat = cudaDriverGetVersion(&cuda_drv_ver);
510 CU_RET_ERR(stat, "cudaDriverGetVersion failed");
511 bOldDriver = (cuda_drv_ver < 5000);
513 if (nb->dev_info->prop.ECCEnabled == 1)
517 nb->bUseStreamSync = true;
519 /* only warn if polling should be used */
520 if (bOldDriver && !gpu_info->bDevShare)
523 "NOTE: Using a GPU with ECC enabled and CUDA driver API version <5.0, but\n"
524 " cudaStreamSynchronize waiting is forced by the GMX_CUDA_STREAMSYNC env. var.\n");
529 /* Can/should turn of cudaStreamSynchronize wait only if
530 * - we're on x86/x86_64
531 * - atomics are available
532 * - GPUs are not being shared
533 * - and driver is old. */
535 (bX86 && bTMPIAtomics && !gpu_info->bDevShare && bOldDriver) ?
538 if (nb->bUseStreamSync)
541 "NOTE: Using a GPU with ECC enabled and CUDA driver API version <5.0, known to\n"
542 " cause performance loss. Switching to the alternative polling GPU waiting.\n"
543 " If you encounter issues, switch back to standard GPU waiting by setting\n"
544 " the GMX_CUDA_STREAMSYNC environment variable.\n");
548 /* Tell the user that the ECC+old driver combination can be bad */
550 "NOTE: Using a GPU with ECC enabled and CUDA driver API version <5.0. A bug in this\n"
551 " driver can cause performance loss.\n"
552 " However, the polling waiting workaround can not be used because\n%s\n"
553 " Consider updating the driver or turning ECC off.",
554 (!bX86 || !bTMPIAtomics) ?
555 " atomic operations are not supported by the platform/CPU+compiler." :
556 " GPU(s) are being oversubscribed.");
557 md_print_warn(fplog, sbuf);
565 nb->bUseStreamSync = false;
568 "NOTE: Polling wait for GPU synchronization requested by GMX_NO_CUDA_STREAMSYNC\n");
572 /* no/off ECC, cudaStreamSynchronize not turned off by env. var. */
573 nb->bUseStreamSync = true;
577 /* CUDA timing disabled as event timers don't work:
578 - with multiple streams = domain-decomposition;
579 - with the polling waiting hack (without cudaStreamSynchronize);
580 - when turned off by GMX_DISABLE_CUDA_TIMING.
582 nb->bDoTime = (!nb->bUseTwoStreams && nb->bUseStreamSync &&
583 (getenv("GMX_DISABLE_CUDA_TIMING") == NULL));
587 init_timers(nb->timers, nb->bUseTwoStreams);
588 init_timings(nb->timings);
591 /* set the kernel type for the current GPU */
592 nb->kernel_ver = pick_nbnxn_kernel_version(fplog, nb->dev_info);
593 /* pick L1 cache configuration */
594 nbnxn_cuda_set_cacheconfig(nb->dev_info);
600 fprintf(debug, "Initialized CUDA data structures.\n");
604 void nbnxn_cuda_init_const(nbnxn_cuda_ptr_t cu_nb,
605 const interaction_const_t *ic,
606 const nonbonded_verlet_t *nbv)
608 init_atomdata_first(cu_nb->atdat, nbv->grp[0].nbat->ntype);
609 init_nbparam(cu_nb->nbparam, ic, nbv);
611 /* clear energy and shift force outputs */
612 nbnxn_cuda_clear_e_fshift(cu_nb);
615 void nbnxn_cuda_init_pairlist(nbnxn_cuda_ptr_t cu_nb,
616 const nbnxn_pairlist_t *h_plist,
621 bool bDoTime = cu_nb->bDoTime;
622 cudaStream_t stream = cu_nb->stream[iloc];
623 cu_plist_t *d_plist = cu_nb->plist[iloc];
625 if (d_plist->na_c < 0)
627 d_plist->na_c = h_plist->na_ci;
631 if (d_plist->na_c != h_plist->na_ci)
633 sprintf(sbuf, "In cu_init_plist: the #atoms per cell has changed (from %d to %d)",
634 d_plist->na_c, h_plist->na_ci);
641 stat = cudaEventRecord(cu_nb->timers->start_pl_h2d[iloc], stream);
642 CU_RET_ERR(stat, "cudaEventRecord failed");
645 cu_realloc_buffered((void **)&d_plist->sci, h_plist->sci, sizeof(*d_plist->sci),
646 &d_plist->nsci, &d_plist->sci_nalloc,
650 cu_realloc_buffered((void **)&d_plist->cj4, h_plist->cj4, sizeof(*d_plist->cj4),
651 &d_plist->ncj4, &d_plist->cj4_nalloc,
655 cu_realloc_buffered((void **)&d_plist->excl, h_plist->excl, sizeof(*d_plist->excl),
656 &d_plist->nexcl, &d_plist->excl_nalloc,
662 stat = cudaEventRecord(cu_nb->timers->stop_pl_h2d[iloc], stream);
663 CU_RET_ERR(stat, "cudaEventRecord failed");
666 /* need to prune the pair list during the next step */
667 d_plist->bDoPrune = true;
670 void nbnxn_cuda_upload_shiftvec(nbnxn_cuda_ptr_t cu_nb,
671 const nbnxn_atomdata_t *nbatom)
673 cu_atomdata_t *adat = cu_nb->atdat;
674 cudaStream_t ls = cu_nb->stream[eintLocal];
676 /* only if we have a dynamic box */
677 if (nbatom->bDynamicBox || !adat->bShiftVecUploaded)
679 cu_copy_H2D_async(adat->shift_vec, nbatom->shift_vec,
680 SHIFTS * sizeof(*adat->shift_vec), ls);
681 adat->bShiftVecUploaded = true;
685 /*! Clears the first natoms_clear elements of the GPU nonbonded force output array. */
686 static void nbnxn_cuda_clear_f(nbnxn_cuda_ptr_t cu_nb, int natoms_clear)
689 cu_atomdata_t *adat = cu_nb->atdat;
690 cudaStream_t ls = cu_nb->stream[eintLocal];
692 stat = cudaMemsetAsync(adat->f, 0, natoms_clear * sizeof(*adat->f), ls);
693 CU_RET_ERR(stat, "cudaMemsetAsync on f falied");
696 /*! Clears nonbonded shift force output array and energy outputs on the GPU. */
697 static void nbnxn_cuda_clear_e_fshift(nbnxn_cuda_ptr_t cu_nb)
700 cu_atomdata_t *adat = cu_nb->atdat;
701 cudaStream_t ls = cu_nb->stream[eintLocal];
703 stat = cudaMemsetAsync(adat->fshift, 0, SHIFTS * sizeof(*adat->fshift), ls);
704 CU_RET_ERR(stat, "cudaMemsetAsync on fshift falied");
705 stat = cudaMemsetAsync(adat->e_lj, 0, sizeof(*adat->e_lj), ls);
706 CU_RET_ERR(stat, "cudaMemsetAsync on e_lj falied");
707 stat = cudaMemsetAsync(adat->e_el, 0, sizeof(*adat->e_el), ls);
708 CU_RET_ERR(stat, "cudaMemsetAsync on e_el falied");
711 void nbnxn_cuda_clear_outputs(nbnxn_cuda_ptr_t cu_nb, int flags)
713 nbnxn_cuda_clear_f(cu_nb, cu_nb->atdat->natoms);
714 /* clear shift force array and energies if the outputs were
715 used in the current step */
716 if (flags & GMX_FORCE_VIRIAL)
718 nbnxn_cuda_clear_e_fshift(cu_nb);
722 void nbnxn_cuda_init_atomdata(nbnxn_cuda_ptr_t cu_nb,
723 const nbnxn_atomdata_t *nbat)
728 bool bDoTime = cu_nb->bDoTime;
729 cu_timers_t *timers = cu_nb->timers;
730 cu_atomdata_t *d_atdat = cu_nb->atdat;
731 cudaStream_t ls = cu_nb->stream[eintLocal];
733 natoms = nbat->natoms;
738 /* time async copy */
739 stat = cudaEventRecord(timers->start_atdat, ls);
740 CU_RET_ERR(stat, "cudaEventRecord failed");
743 /* need to reallocate if we have to copy more atoms than the amount of space
744 available and only allocate if we haven't initialized yet, i.e d_atdat->natoms == -1 */
745 if (natoms > d_atdat->nalloc)
747 nalloc = over_alloc_small(natoms);
749 /* free up first if the arrays have already been initialized */
750 if (d_atdat->nalloc != -1)
752 cu_free_buffered(d_atdat->f, &d_atdat->natoms, &d_atdat->nalloc);
753 cu_free_buffered(d_atdat->xq);
754 cu_free_buffered(d_atdat->atom_types);
757 stat = cudaMalloc((void **)&d_atdat->f, nalloc*sizeof(*d_atdat->f));
758 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->f");
759 stat = cudaMalloc((void **)&d_atdat->xq, nalloc*sizeof(*d_atdat->xq));
760 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->xq");
762 stat = cudaMalloc((void **)&d_atdat->atom_types, nalloc*sizeof(*d_atdat->atom_types));
763 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->atom_types");
765 d_atdat->nalloc = nalloc;
769 d_atdat->natoms = natoms;
770 d_atdat->natoms_local = nbat->natoms_local;
772 /* need to clear GPU f output if realloc happened */
775 nbnxn_cuda_clear_f(cu_nb, nalloc);
778 cu_copy_H2D_async(d_atdat->atom_types, nbat->type,
779 natoms*sizeof(*d_atdat->atom_types), ls);
783 stat = cudaEventRecord(timers->stop_atdat, ls);
784 CU_RET_ERR(stat, "cudaEventRecord failed");
788 void nbnxn_cuda_free(FILE *fplog, nbnxn_cuda_ptr_t cu_nb)
791 cu_atomdata_t *atdat;
792 cu_nbparam_t *nbparam;
793 cu_plist_t *plist, *plist_nl;
796 if (cu_nb == NULL) return;
798 atdat = cu_nb->atdat;
799 nbparam = cu_nb->nbparam;
800 plist = cu_nb->plist[eintLocal];
801 plist_nl = cu_nb->plist[eintNonlocal];
802 timers = cu_nb->timers;
804 if (nbparam->eeltype == eelCuEWALD || nbparam->eeltype == eelCuEWALD_TWIN)
806 stat = cudaUnbindTexture(nbnxn_cuda_get_coulomb_tab_texref());
807 CU_RET_ERR(stat, "cudaUnbindTexture on coulomb_tab failed");
808 cu_free_buffered(nbparam->coulomb_tab, &nbparam->coulomb_tab_size);
811 stat = cudaEventDestroy(cu_nb->nonlocal_done);
812 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->nonlocal_done");
813 stat = cudaEventDestroy(cu_nb->misc_ops_done);
814 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->misc_ops_done");
818 stat = cudaEventDestroy(timers->start_atdat);
819 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_atdat");
820 stat = cudaEventDestroy(timers->stop_atdat);
821 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_atdat");
823 /* The non-local counters/stream (second in the array) are needed only with DD. */
824 for (int i = 0; i <= (cu_nb->bUseTwoStreams ? 1 : 0); i++)
826 stat = cudaEventDestroy(timers->start_nb_k[i]);
827 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_nb_k");
828 stat = cudaEventDestroy(timers->stop_nb_k[i]);
829 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_nb_k");
831 stat = cudaEventDestroy(timers->start_pl_h2d[i]);
832 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_pl_h2d");
833 stat = cudaEventDestroy(timers->stop_pl_h2d[i]);
834 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_pl_h2d");
836 stat = cudaStreamDestroy(cu_nb->stream[i]);
837 CU_RET_ERR(stat, "cudaStreamDestroy failed on stream");
839 stat = cudaEventDestroy(timers->start_nb_h2d[i]);
840 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_nb_h2d");
841 stat = cudaEventDestroy(timers->stop_nb_h2d[i]);
842 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_nb_h2d");
844 stat = cudaEventDestroy(timers->start_nb_d2h[i]);
845 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_nb_d2h");
846 stat = cudaEventDestroy(timers->stop_nb_d2h[i]);
847 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_nb_d2h");
851 stat = cudaUnbindTexture(nbnxn_cuda_get_nbfp_texref());
852 CU_RET_ERR(stat, "cudaUnbindTexture on coulomb_tab failed");
853 cu_free_buffered(nbparam->nbfp);
855 stat = cudaFree(atdat->shift_vec);
856 CU_RET_ERR(stat, "cudaFree failed on atdat->shift_vec");
857 stat = cudaFree(atdat->fshift);
858 CU_RET_ERR(stat, "cudaFree failed on atdat->fshift");
860 stat = cudaFree(atdat->e_lj);
861 CU_RET_ERR(stat, "cudaFree failed on atdat->e_lj");
862 stat = cudaFree(atdat->e_el);
863 CU_RET_ERR(stat, "cudaFree failed on atdat->e_el");
865 cu_free_buffered(atdat->f, &atdat->natoms, &atdat->nalloc);
866 cu_free_buffered(atdat->xq);
867 cu_free_buffered(atdat->atom_types, &atdat->ntypes);
869 cu_free_buffered(plist->sci, &plist->nsci, &plist->sci_nalloc);
870 cu_free_buffered(plist->cj4, &plist->ncj4, &plist->cj4_nalloc);
871 cu_free_buffered(plist->excl, &plist->nexcl, &plist->excl_nalloc);
872 if (cu_nb->bUseTwoStreams)
874 cu_free_buffered(plist_nl->sci, &plist_nl->nsci, &plist_nl->sci_nalloc);
875 cu_free_buffered(plist_nl->cj4, &plist_nl->ncj4, &plist_nl->cj4_nalloc);
876 cu_free_buffered(plist_nl->excl, &plist_nl->nexcl, &plist->excl_nalloc);
882 if (cu_nb->bUseTwoStreams)
887 sfree(cu_nb->timings);
892 fprintf(debug, "Cleaned up CUDA data structures.\n");
896 void cu_synchstream_atdat(nbnxn_cuda_ptr_t cu_nb, int iloc)
899 cudaStream_t stream = cu_nb->stream[iloc];
901 stat = cudaStreamWaitEvent(stream, cu_nb->timers->stop_atdat, 0);
902 CU_RET_ERR(stat, "cudaStreamWaitEvent failed");
905 wallclock_gpu_t * nbnxn_cuda_get_timings(nbnxn_cuda_ptr_t cu_nb)
907 return (cu_nb != NULL && cu_nb->bDoTime) ? cu_nb->timings : NULL;
910 void nbnxn_cuda_reset_timings(nbnxn_cuda_ptr_t cu_nb)
914 init_timings(cu_nb->timings);
918 int nbnxn_cuda_min_ci_balanced(nbnxn_cuda_ptr_t cu_nb)
920 return cu_nb != NULL ?
921 gpu_min_ci_balanced_factor*cu_nb->dev_info->prop.multiProcessorCount : 0;