2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2012, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
46 #include "gmx_fatal.h"
50 #include "types/nb_verlet.h"
51 #include "types/interaction_const.h"
52 #include "types/force_flags.h"
53 #include "../nbnxn_consts.h"
55 #include "nbnxn_cuda_types.h"
56 #include "../../gmxlib/cuda_tools/cudautils.cuh"
57 #include "nbnxn_cuda_data_mgmt.h"
58 #include "pmalloc_cuda.h"
59 #include "gpu_utils.h"
61 static bool bUseCudaEventBlockingSync = false; /* makes the CPU thread block */
63 /* This is a heuristically determined parameter for the Fermi architecture for
64 * the minimum size of ci lists by multiplying this constant with the # of
65 * multiprocessors on the current device.
67 static unsigned int gpu_min_ci_balanced_factor = 40;
69 /* Functions from nbnxn_cuda.cu */
70 extern void nbnxn_cuda_set_cacheconfig(cuda_dev_info_t *devinfo);
71 extern const struct texture<float, 1, cudaReadModeElementType>& nbnxn_cuda_get_nbfp_texref();
72 extern const struct texture<float, 1, cudaReadModeElementType>& nbnxn_cuda_get_coulomb_tab_texref();
75 static void nbnxn_cuda_clear_e_fshift(nbnxn_cuda_ptr_t cu_nb);
78 /*! Tabulates the Ewald Coulomb force and initializes the size/scale
79 and the table GPU array. If called with an already allocated table,
80 it just re-uploads the table.
82 static void init_ewald_coulomb_force_table(cu_nbparam_t *nbp)
84 float *ftmp, *coul_tab;
89 tabsize = GPU_EWALD_COULOMB_FORCE_TABLE_SIZE;
90 /* Subtract 2 iso 1 to avoid access out of range due to rounding */
91 tabscale = (tabsize - 2) / sqrt(nbp->rcoulomb_sq);
93 pmalloc((void**)&ftmp, tabsize*sizeof(*ftmp));
95 table_spline3_fill_ewald_lr(ftmp, NULL, NULL, tabsize,
96 1/tabscale, nbp->ewald_beta);
98 /* If the table pointer == NULL the table is generated the first time =>
99 the array pointer will be saved to nbparam and the texture is bound.
101 coul_tab = nbp->coulomb_tab;
102 if (coul_tab == NULL)
104 stat = cudaMalloc((void **)&coul_tab, tabsize*sizeof(*coul_tab));
105 CU_RET_ERR(stat, "cudaMalloc failed on coul_tab");
107 nbp->coulomb_tab = coul_tab;
109 cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
110 stat = cudaBindTexture(NULL, &nbnxn_cuda_get_coulomb_tab_texref(),
111 coul_tab, &cd, tabsize*sizeof(*coul_tab));
112 CU_RET_ERR(stat, "cudaBindTexture on coul_tab failed");
115 cu_copy_H2D(coul_tab, ftmp, tabsize*sizeof(*coul_tab));
117 nbp->coulomb_tab_size = tabsize;
118 nbp->coulomb_tab_scale = tabscale;
124 /*! Initializes the atomdata structure first time, it only gets filled at
126 static void init_atomdata_first(cu_atomdata_t *ad, int ntypes)
131 stat = cudaMalloc((void**)&ad->shift_vec, SHIFTS*sizeof(*ad->shift_vec));
132 CU_RET_ERR(stat, "cudaMalloc failed on ad->shift_vec");
133 ad->bShiftVecUploaded = false;
135 stat = cudaMalloc((void**)&ad->fshift, SHIFTS*sizeof(*ad->fshift));
136 CU_RET_ERR(stat, "cudaMalloc failed on ad->fshift");
138 stat = cudaMalloc((void**)&ad->e_lj, sizeof(*ad->e_lj));
139 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_lj");
140 stat = cudaMalloc((void**)&ad->e_el, sizeof(*ad->e_el));
141 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_el");
143 /* initialize to NULL poiters to data that is not allocated here and will
144 need reallocation in nbnxn_cuda_init_atomdata */
148 /* size -1 indicates that the respective array hasn't been initialized yet */
153 /*! Initializes the nonbonded parameter data structure. */
154 static void init_nbparam(cu_nbparam_t *nbp,
155 const interaction_const_t *ic,
156 const nonbonded_verlet_t *nbv)
161 ntypes = nbv->grp[0].nbat->ntype;
163 nbp->ewald_beta = ic->ewaldcoeff;
164 nbp->sh_ewald = ic->sh_ewald;
165 nbp->epsfac = ic->epsfac;
166 nbp->two_k_rf = 2.0 * ic->k_rf;
167 nbp->c_rf = ic->c_rf;
168 nbp->rvdw_sq = ic->rvdw * ic->rvdw;
169 nbp->rcoulomb_sq= ic->rcoulomb * ic->rcoulomb;
170 nbp->rlist_sq = ic->rlist * ic->rlist;
171 nbp->sh_invrc6 = ic->sh_invrc6;
173 if (ic->eeltype == eelCUT)
175 nbp->eeltype = eelCuCUT;
177 else if (EEL_RF(ic->eeltype))
179 nbp->eeltype = eelCuRF;
181 else if ((EEL_PME(ic->eeltype) || ic->eeltype==eelEWALD))
183 /* Initially rcoulomb == rvdw, so it's surely not twin cut-off, unless
184 forced by the env. var. (used only for benchmarking). */
185 if (getenv("GMX_CUDA_NB_EWALD_TWINCUT") == NULL)
187 nbp->eeltype = eelCuEWALD;
191 nbp->eeltype = eelCuEWALD_TWIN;
196 /* Shouldn't happen, as this is checked when choosing Verlet-scheme */
197 gmx_incons("The requested electrostatics type is not implemented in the CUDA GPU accelerated kernels!");
200 /* generate table for PME */
201 if (nbp->eeltype == eelCuEWALD)
203 nbp->coulomb_tab = NULL;
204 init_ewald_coulomb_force_table(nbp);
207 nnbfp = 2*ntypes*ntypes;
208 stat = cudaMalloc((void **)&nbp->nbfp, nnbfp*sizeof(*nbp->nbfp));
209 CU_RET_ERR(stat, "cudaMalloc failed on nbp->nbfp");
210 cu_copy_H2D(nbp->nbfp, nbv->grp[0].nbat->nbfp, nnbfp*sizeof(*nbp->nbfp));
212 cudaChannelFormatDesc cd = cudaCreateChannelDesc<float>();
213 stat = cudaBindTexture(NULL, &nbnxn_cuda_get_nbfp_texref(),
214 nbp->nbfp, &cd, nnbfp*sizeof(*nbp->nbfp));
215 CU_RET_ERR(stat, "cudaBindTexture on nbfp failed");
218 /*! Re-generate the GPU Ewald force table, resets rlist, and update the
219 * electrostatic type switching to twin cut-off (or back) if needed. */
220 void nbnxn_cuda_pme_loadbal_update_param(nbnxn_cuda_ptr_t cu_nb,
221 const interaction_const_t *ic)
223 cu_nbparam_t *nbp = cu_nb->nbparam;
225 nbp->rlist_sq = ic->rlist * ic->rlist;
226 nbp->rcoulomb_sq = ic->rcoulomb * ic->rcoulomb;
227 nbp->ewald_beta = ic->ewaldcoeff;
229 /* When switching to/from twin cut-off, the electrostatics type needs updating.
230 (The env. var. that forces twin cut-off is for benchmarking only!) */
231 if (ic->rcoulomb == ic->rvdw &&
232 getenv("GMX_CUDA_NB_EWALD_TWINCUT") == NULL)
234 nbp->eeltype = eelCuEWALD;
238 nbp->eeltype = eelCuEWALD_TWIN;
241 init_ewald_coulomb_force_table(cu_nb->nbparam);
244 /*! Initializes the pair list data structure. */
245 static void init_plist(cu_plist_t *pl)
247 /* initialize to NULL pointers to data that is not allocated here and will
248 need reallocation in nbnxn_cuda_init_pairlist */
253 /* size -1 indicates that the respective array hasn't been initialized yet */
260 pl->excl_nalloc = -1;
261 pl->bDoPrune = false;
264 /*! Initializes the timer data structure. */
265 static void init_timers(cu_timers_t *t, bool bUseTwoStreams)
268 int eventflags = ( bUseCudaEventBlockingSync ? cudaEventBlockingSync: cudaEventDefault );
270 stat = cudaEventCreateWithFlags(&(t->start_atdat), eventflags);
271 CU_RET_ERR(stat, "cudaEventCreate on start_atdat failed");
272 stat = cudaEventCreateWithFlags(&(t->stop_atdat), eventflags);
273 CU_RET_ERR(stat, "cudaEventCreate on stop_atdat failed");
275 /* The non-local counters/stream (second in the array) are needed only with DD. */
276 for (int i = 0; i <= (bUseTwoStreams ? 1 : 0); i++)
278 stat = cudaEventCreateWithFlags(&(t->start_nb_k[i]), eventflags);
279 CU_RET_ERR(stat, "cudaEventCreate on start_nb_k failed");
280 stat = cudaEventCreateWithFlags(&(t->stop_nb_k[i]), eventflags);
281 CU_RET_ERR(stat, "cudaEventCreate on stop_nb_k failed");
284 stat = cudaEventCreateWithFlags(&(t->start_pl_h2d[i]), eventflags);
285 CU_RET_ERR(stat, "cudaEventCreate on start_pl_h2d failed");
286 stat = cudaEventCreateWithFlags(&(t->stop_pl_h2d[i]), eventflags);
287 CU_RET_ERR(stat, "cudaEventCreate on stop_pl_h2d failed");
289 stat = cudaEventCreateWithFlags(&(t->start_nb_h2d[i]), eventflags);
290 CU_RET_ERR(stat, "cudaEventCreate on start_nb_h2d failed");
291 stat = cudaEventCreateWithFlags(&(t->stop_nb_h2d[i]), eventflags);
292 CU_RET_ERR(stat, "cudaEventCreate on stop_nb_h2d failed");
294 stat = cudaEventCreateWithFlags(&(t->start_nb_d2h[i]), eventflags);
295 CU_RET_ERR(stat, "cudaEventCreate on start_nb_d2h failed");
296 stat = cudaEventCreateWithFlags(&(t->stop_nb_d2h[i]), eventflags);
297 CU_RET_ERR(stat, "cudaEventCreate on stop_nb_d2h failed");
301 /*! Initializes the timings data structure. */
302 static void init_timings(wallclock_gpu_t *t)
311 for (i = 0; i < 2; i++)
313 for(j = 0; j < 2; j++)
315 t->ktime[i][j].t = 0.0;
316 t->ktime[i][j].c = 0;
321 /* Decide which kernel version to use (default or legacy) based on:
323 * - non-bonded kernel selector environment variables
324 * - GPU SM version TODO ???
326 static int pick_nbnxn_kernel_version()
328 bool bLegacyKernel, bDefaultKernel, bCUDA40, bCUDA32;
332 /* legacy kernel (former k2), kept for now for backward compatibility,
333 faster than the default with CUDA 3.2/4.0 (TODO: on Kepler?). */
334 bLegacyKernel = (getenv("GMX_CUDA_NB_LEGACY") != NULL);
335 /* default kernel (former k3). */
336 bDefaultKernel = (getenv("GMX_CUDA_NB_DEFAULT") != NULL);
338 if ((unsigned)(bLegacyKernel + bDefaultKernel) > 1)
340 gmx_fatal(FARGS, "Multiple CUDA non-bonded kernels requested; to manually pick a kernel set only one \n"
341 "of the following environment variables: \n"
342 "GMX_CUDA_NB_DEFAULT, GMX_CUDA_NB_LEGACY");
345 bCUDA32 = bCUDA40 = false;
346 #if CUDA_VERSION == 3200
348 sprintf(sbuf, "3.2");
349 #elif CUDA_VERSION == 4000
351 sprintf(sbuf, "4.0");
354 /* default is default ;) */
355 kver = eNbnxnCuKDefault;
357 if (bCUDA32 || bCUDA40)
359 /* use legacy kernel unless something else is forced by an env. var */
363 "\nNOTE: CUDA %s compilation detected; with this compiler version the legacy\n"
364 " non-bonded kernels perform best. However, the default kernels were\n"
365 " selected by the GMX_CUDA_NB_DEFAULT environment variable.\n"
366 " For best performance upgrade your CUDA toolkit.",
371 kver = eNbnxnCuKLegacy;
376 /* issue not if the non-default kernel is forced by an env. var */
380 "\nNOTE: Legacy non-bonded CUDA kernels were selected by the GMX_CUDA_NB_LEGACY\n"
381 " env. var. Consider using using the default kernels which should be faster!\n");
383 kver = eNbnxnCuKLegacy;
390 void nbnxn_cuda_init(FILE *fplog,
391 nbnxn_cuda_ptr_t *p_cu_nb,
392 gmx_gpu_info_t *gpu_info, int my_gpu_index,
393 gmx_bool bLocalAndNonlocal)
398 bool bStreamSync, bNoStreamSync, bTMPIAtomics, bX86;
402 if (p_cu_nb == NULL) return;
406 snew(nb->nbparam, 1);
407 snew(nb->plist[eintLocal], 1);
408 if (bLocalAndNonlocal)
410 snew(nb->plist[eintNonlocal], 1);
413 nb->bUseTwoStreams = bLocalAndNonlocal;
416 snew(nb->timings, 1);
419 pmalloc((void**)&nb->nbst.e_lj, sizeof(*nb->nbst.e_lj));
420 pmalloc((void**)&nb->nbst.e_el, sizeof(*nb->nbst.e_el));
421 pmalloc((void**)&nb->nbst.fshift, SHIFTS * sizeof(*nb->nbst.fshift));
423 init_plist(nb->plist[eintLocal]);
425 /* local/non-local GPU streams */
426 stat = cudaStreamCreate(&nb->stream[eintLocal]);
427 CU_RET_ERR(stat, "cudaStreamCreate on stream[eintLocal] failed");
428 if (nb->bUseTwoStreams)
430 init_plist(nb->plist[eintNonlocal]);
431 stat = cudaStreamCreate(&nb->stream[eintNonlocal]);
432 CU_RET_ERR(stat, "cudaStreamCreate on stream[eintNonlocal] failed");
435 /* init events for sychronization (timing disabled for performance reasons!) */
436 stat = cudaEventCreateWithFlags(&nb->nonlocal_done, cudaEventDisableTiming);
437 CU_RET_ERR(stat, "cudaEventCreate on nonlocal_done failed");
438 stat = cudaEventCreateWithFlags(&nb->misc_ops_done, cudaEventDisableTiming);
439 CU_RET_ERR(stat, "cudaEventCreate on misc_ops_one failed");
441 /* set device info, just point it to the right GPU among the detected ones */
442 nb->dev_info = &gpu_info->cuda_dev[get_gpu_device_id(gpu_info, my_gpu_index)];
444 /* On GPUs with ECC enabled, cudaStreamSynchronize shows a large overhead
445 * (which increases with shorter time/step) caused by a known CUDA driver bug.
446 * To work around the issue we'll use an (admittedly fragile) memory polling
447 * waiting to preserve performance. This requires support for atomic
448 * operations and only works on x86/x86_64.
449 * With polling wait event-timing also needs to be disabled.
452 bStreamSync = getenv("GMX_CUDA_STREAMSYNC") != NULL;
453 bNoStreamSync = getenv("GMX_NO_CUDA_STREAMSYNC") != NULL;
458 bTMPIAtomics = false;
461 #if defined(i386) || defined(__x86_64__)
467 if (bStreamSync && bNoStreamSync)
469 gmx_fatal(FARGS, "Conflicting environment variables: both GMX_CUDA_STREAMSYNC and GMX_NO_CUDA_STREAMSYNC defined");
472 if (nb->dev_info->prop.ECCEnabled == 1)
476 nb->bUseStreamSync = true;
479 "NOTE: Using a GPU with ECC enabled, but cudaStreamSynchronize-based waiting is\n"
480 " forced by the GMX_CUDA_STREAMSYNC env. var. Due to a CUDA bug, this \n"
481 " combination causes performance loss.");
482 fprintf(stderr, "\n%s\n", sbuf);
485 fprintf(fplog, "\n%s\n", sbuf);
490 /* can use polling wait only on x86/x86_64 *if* atomics are available */
491 nb->bUseStreamSync = ((bX86 && bTMPIAtomics) == false);
496 "Using a GPU with ECC on; the standard cudaStreamSynchronize waiting, due to a\n"
497 " CUDA bug, causes performance loss when used in combination with ECC.\n"
498 " However, the polling waiting workaround can not be used as it is only\n"
499 " supported on x86/x86_64, but not on the current architecture.");
500 gmx_warning("%s\n", sbuf);
503 fprintf(fplog, "\n%s\n", sbuf);
507 else if (bTMPIAtomics)
512 "NOTE: Using a GPU with ECC enabled; will use polling waiting.\n");
518 "Using a GPU with ECC on; the standard cudaStreamSynchronize waiting, due to a\n"
519 " CUDA bug, causes performance loss when used in combination with ECC.\n"
520 " However, the polling waiting workaround can not be used as atomic\n"
521 " operations are not supported by the current CPU+compiler combination.");
522 gmx_warning("%s\n", sbuf);
525 fprintf(fplog, "\n%s\n", sbuf);
534 nb->bUseStreamSync = false;
537 "NOTE: Using a GPU with no/disabled ECC, but cudaStreamSynchronize-based waiting\n"
538 " is turned off and polling turned on by the GMX_NO_CUDA_STREAMSYNC env. var.");
539 fprintf(stderr, "\n%s\n", sbuf);
542 fprintf(fplog, "\n%s\n", sbuf);
547 /* no/off ECC, cudaStreamSynchronize not turned off by env. var. */
548 nb->bUseStreamSync = true;
552 /* CUDA timing disabled as event timers don't work:
553 - with multiple streams = domain-decomposition;
554 - with the polling waiting hack (without cudaStreamSynchronize);
555 - when turned off by GMX_DISABLE_CUDA_TIMING.
557 nb->bDoTime = (!nb->bUseTwoStreams && nb->bUseStreamSync &&
558 (getenv("GMX_DISABLE_CUDA_TIMING") == NULL));
562 init_timers(nb->timers, nb->bUseTwoStreams);
563 init_timings(nb->timings);
566 /* set the kernel type for the current GPU */
567 nb->kernel_ver = pick_nbnxn_kernel_version();
568 /* pick L1 cache configuration */
569 nbnxn_cuda_set_cacheconfig(nb->dev_info);
575 fprintf(debug, "Initialized CUDA data structures.\n");
579 void nbnxn_cuda_init_const(nbnxn_cuda_ptr_t cu_nb,
580 const interaction_const_t *ic,
581 const nonbonded_verlet_t *nbv)
583 init_atomdata_first(cu_nb->atdat, nbv->grp[0].nbat->ntype);
584 init_nbparam(cu_nb->nbparam, ic, nbv);
586 /* clear energy and shift force outputs */
587 nbnxn_cuda_clear_e_fshift(cu_nb);
590 void nbnxn_cuda_init_pairlist(nbnxn_cuda_ptr_t cu_nb,
591 const nbnxn_pairlist_t *h_plist,
596 bool bDoTime = cu_nb->bDoTime;
597 cudaStream_t stream = cu_nb->stream[iloc];
598 cu_plist_t *d_plist = cu_nb->plist[iloc];
600 if (d_plist->na_c < 0)
602 d_plist->na_c = h_plist->na_ci;
606 if (d_plist->na_c != h_plist->na_ci)
608 sprintf(sbuf, "In cu_init_plist: the #atoms per cell has changed (from %d to %d)",
609 d_plist->na_c, h_plist->na_ci);
616 stat = cudaEventRecord(cu_nb->timers->start_pl_h2d[iloc], stream);
617 CU_RET_ERR(stat, "cudaEventRecord failed");
620 cu_realloc_buffered((void **)&d_plist->sci, h_plist->sci, sizeof(*d_plist->sci),
621 &d_plist->nsci, &d_plist->sci_nalloc,
625 cu_realloc_buffered((void **)&d_plist->cj4, h_plist->cj4, sizeof(*d_plist->cj4),
626 &d_plist->ncj4, &d_plist->cj4_nalloc,
630 cu_realloc_buffered((void **)&d_plist->excl, h_plist->excl, sizeof(*d_plist->excl),
631 &d_plist->nexcl, &d_plist->excl_nalloc,
637 stat = cudaEventRecord(cu_nb->timers->stop_pl_h2d[iloc], stream);
638 CU_RET_ERR(stat, "cudaEventRecord failed");
641 /* need to prune the pair list during the next step */
642 d_plist->bDoPrune = true;
645 void nbnxn_cuda_upload_shiftvec(nbnxn_cuda_ptr_t cu_nb,
646 const nbnxn_atomdata_t *nbatom)
648 cu_atomdata_t *adat = cu_nb->atdat;
649 cudaStream_t ls = cu_nb->stream[eintLocal];
651 /* only if we have a dynamic box */
652 if (nbatom->bDynamicBox || !adat->bShiftVecUploaded)
654 cu_copy_H2D_async(adat->shift_vec, nbatom->shift_vec,
655 SHIFTS * sizeof(*adat->shift_vec), ls);
656 adat->bShiftVecUploaded = true;
660 /*! Clears the first natoms_clear elements of the GPU nonbonded force output array. */
661 static void nbnxn_cuda_clear_f(nbnxn_cuda_ptr_t cu_nb, int natoms_clear)
664 cu_atomdata_t *adat = cu_nb->atdat;
665 cudaStream_t ls = cu_nb->stream[eintLocal];
667 stat = cudaMemsetAsync(adat->f, 0, natoms_clear * sizeof(*adat->f), ls);
668 CU_RET_ERR(stat, "cudaMemsetAsync on f falied");
671 /*! Clears nonbonded shift force output array and energy outputs on the GPU. */
672 static void nbnxn_cuda_clear_e_fshift(nbnxn_cuda_ptr_t cu_nb)
675 cu_atomdata_t *adat = cu_nb->atdat;
676 cudaStream_t ls = cu_nb->stream[eintLocal];
678 stat = cudaMemsetAsync(adat->fshift, 0, SHIFTS * sizeof(*adat->fshift), ls);
679 CU_RET_ERR(stat, "cudaMemsetAsync on fshift falied");
680 stat = cudaMemsetAsync(adat->e_lj, 0, sizeof(*adat->e_lj), ls);
681 CU_RET_ERR(stat, "cudaMemsetAsync on e_lj falied");
682 stat = cudaMemsetAsync(adat->e_el, 0, sizeof(*adat->e_el), ls);
683 CU_RET_ERR(stat, "cudaMemsetAsync on e_el falied");
686 void nbnxn_cuda_clear_outputs(nbnxn_cuda_ptr_t cu_nb, int flags)
688 nbnxn_cuda_clear_f(cu_nb, cu_nb->atdat->natoms);
689 /* clear shift force array and energies if the outputs were
690 used in the current step */
691 if (flags & GMX_FORCE_VIRIAL)
693 nbnxn_cuda_clear_e_fshift(cu_nb);
697 void nbnxn_cuda_init_atomdata(nbnxn_cuda_ptr_t cu_nb,
698 const nbnxn_atomdata_t *nbat)
703 bool bDoTime = cu_nb->bDoTime;
704 cu_timers_t *timers = cu_nb->timers;
705 cu_atomdata_t *d_atdat = cu_nb->atdat;
706 cudaStream_t ls = cu_nb->stream[eintLocal];
708 natoms = nbat->natoms;
713 /* time async copy */
714 stat = cudaEventRecord(timers->start_atdat, ls);
715 CU_RET_ERR(stat, "cudaEventRecord failed");
718 /* need to reallocate if we have to copy more atoms than the amount of space
719 available and only allocate if we haven't initialized yet, i.e d_atdat->natoms == -1 */
720 if (natoms > d_atdat->nalloc)
722 nalloc = over_alloc_small(natoms);
724 /* free up first if the arrays have already been initialized */
725 if (d_atdat->nalloc != -1)
727 cu_free_buffered(d_atdat->f, &d_atdat->natoms, &d_atdat->nalloc);
728 cu_free_buffered(d_atdat->xq);
729 cu_free_buffered(d_atdat->atom_types);
732 stat = cudaMalloc((void **)&d_atdat->f, nalloc*sizeof(*d_atdat->f));
733 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->f");
734 stat = cudaMalloc((void **)&d_atdat->xq, nalloc*sizeof(*d_atdat->xq));
735 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->xq");
737 stat = cudaMalloc((void **)&d_atdat->atom_types, nalloc*sizeof(*d_atdat->atom_types));
738 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->atom_types");
740 d_atdat->nalloc = nalloc;
744 d_atdat->natoms = natoms;
745 d_atdat->natoms_local = nbat->natoms_local;
747 /* need to clear GPU f output if realloc happened */
750 nbnxn_cuda_clear_f(cu_nb, nalloc);
753 cu_copy_H2D_async(d_atdat->atom_types, nbat->type,
754 natoms*sizeof(*d_atdat->atom_types), ls);
758 stat = cudaEventRecord(timers->stop_atdat, ls);
759 CU_RET_ERR(stat, "cudaEventRecord failed");
763 void nbnxn_cuda_free(FILE *fplog, nbnxn_cuda_ptr_t cu_nb)
766 cu_atomdata_t *atdat;
767 cu_nbparam_t *nbparam;
768 cu_plist_t *plist, *plist_nl;
771 if (cu_nb == NULL) return;
773 atdat = cu_nb->atdat;
774 nbparam = cu_nb->nbparam;
775 plist = cu_nb->plist[eintLocal];
776 plist_nl = cu_nb->plist[eintNonlocal];
777 timers = cu_nb->timers;
779 if (nbparam->eeltype == eelCuEWALD || nbparam->eeltype == eelCuEWALD_TWIN)
781 stat = cudaUnbindTexture(nbnxn_cuda_get_coulomb_tab_texref());
782 CU_RET_ERR(stat, "cudaUnbindTexture on coulomb_tab failed");
783 cu_free_buffered(nbparam->coulomb_tab, &nbparam->coulomb_tab_size);
786 stat = cudaEventDestroy(cu_nb->nonlocal_done);
787 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->nonlocal_done");
788 stat = cudaEventDestroy(cu_nb->misc_ops_done);
789 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->misc_ops_done");
793 stat = cudaEventDestroy(timers->start_atdat);
794 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_atdat");
795 stat = cudaEventDestroy(timers->stop_atdat);
796 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_atdat");
798 /* The non-local counters/stream (second in the array) are needed only with DD. */
799 for (int i = 0; i <= (cu_nb->bUseTwoStreams ? 1 : 0); i++)
801 stat = cudaEventDestroy(timers->start_nb_k[i]);
802 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_nb_k");
803 stat = cudaEventDestroy(timers->stop_nb_k[i]);
804 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_nb_k");
806 stat = cudaEventDestroy(timers->start_pl_h2d[i]);
807 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_pl_h2d");
808 stat = cudaEventDestroy(timers->stop_pl_h2d[i]);
809 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_pl_h2d");
811 stat = cudaStreamDestroy(cu_nb->stream[i]);
812 CU_RET_ERR(stat, "cudaStreamDestroy failed on stream");
814 stat = cudaEventDestroy(timers->start_nb_h2d[i]);
815 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_nb_h2d");
816 stat = cudaEventDestroy(timers->stop_nb_h2d[i]);
817 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_nb_h2d");
819 stat = cudaEventDestroy(timers->start_nb_d2h[i]);
820 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->start_nb_d2h");
821 stat = cudaEventDestroy(timers->stop_nb_d2h[i]);
822 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->stop_nb_d2h");
826 stat = cudaUnbindTexture(nbnxn_cuda_get_nbfp_texref());
827 CU_RET_ERR(stat, "cudaUnbindTexture on coulomb_tab failed");
828 cu_free_buffered(nbparam->nbfp);
830 stat = cudaFree(atdat->shift_vec);
831 CU_RET_ERR(stat, "cudaFree failed on atdat->shift_vec");
832 stat = cudaFree(atdat->fshift);
833 CU_RET_ERR(stat, "cudaFree failed on atdat->fshift");
835 stat = cudaFree(atdat->e_lj);
836 CU_RET_ERR(stat, "cudaFree failed on atdat->e_lj");
837 stat = cudaFree(atdat->e_el);
838 CU_RET_ERR(stat, "cudaFree failed on atdat->e_el");
840 cu_free_buffered(atdat->f, &atdat->natoms, &atdat->nalloc);
841 cu_free_buffered(atdat->xq);
842 cu_free_buffered(atdat->atom_types, &atdat->ntypes);
844 cu_free_buffered(plist->sci, &plist->nsci, &plist->sci_nalloc);
845 cu_free_buffered(plist->cj4, &plist->ncj4, &plist->cj4_nalloc);
846 cu_free_buffered(plist->excl, &plist->nexcl, &plist->excl_nalloc);
847 if (cu_nb->bUseTwoStreams)
849 cu_free_buffered(plist_nl->sci, &plist_nl->nsci, &plist_nl->sci_nalloc);
850 cu_free_buffered(plist_nl->cj4, &plist_nl->ncj4, &plist_nl->cj4_nalloc);
851 cu_free_buffered(plist_nl->excl, &plist_nl->nexcl, &plist->excl_nalloc);
856 fprintf(debug, "Cleaned up CUDA data structures.\n");
860 void cu_synchstream_atdat(nbnxn_cuda_ptr_t cu_nb, int iloc)
863 cudaStream_t stream = cu_nb->stream[iloc];
865 stat = cudaStreamWaitEvent(stream, cu_nb->timers->stop_atdat, 0);
866 CU_RET_ERR(stat, "cudaStreamWaitEvent failed");
869 wallclock_gpu_t * nbnxn_cuda_get_timings(nbnxn_cuda_ptr_t cu_nb)
871 return (cu_nb != NULL && cu_nb->bDoTime) ? cu_nb->timings : NULL;
874 void nbnxn_cuda_reset_timings(nbnxn_cuda_ptr_t cu_nb)
878 init_timings(cu_nb->timings);
882 int nbnxn_cuda_min_ci_balanced(nbnxn_cuda_ptr_t cu_nb)
884 return cu_nb != NULL ?
885 gpu_min_ci_balanced_factor*cu_nb->dev_info->prop.multiProcessorCount : 0;