* \param [in] aloc Atom locality flag.
*/
GPU_FUNC_QUALIFIER
-void gpu_copy_xq_to_gpu(gmx_nbnxm_gpu_t gmx_unused* nb,
+void gpu_copy_xq_to_gpu(NbnxmGpu gmx_unused* nb,
const struct nbnxn_atomdata_t gmx_unused* nbdata,
gmx::AtomLocality gmx_unused aloc) GPU_FUNC_TERM;
*
*/
GPU_FUNC_QUALIFIER
-void gpu_launch_kernel(gmx_nbnxm_gpu_t gmx_unused* nb,
+void gpu_launch_kernel(NbnxmGpu gmx_unused* nb,
const gmx::StepWorkload gmx_unused& stepWork,
gmx::InteractionLocality gmx_unused iloc) GPU_FUNC_TERM;
* \param [in] numParts Number of parts the pair list is split into in the rolling kernel.
*/
GPU_FUNC_QUALIFIER
-void gpu_launch_kernel_pruneonly(gmx_nbnxm_gpu_t gmx_unused* nb,
+void gpu_launch_kernel_pruneonly(NbnxmGpu gmx_unused* nb,
gmx::InteractionLocality gmx_unused iloc,
int gmx_unused numParts) GPU_FUNC_TERM;
* (and energies/shift forces if required).
*/
GPU_FUNC_QUALIFIER
-void gpu_launch_cpyback(gmx_nbnxm_gpu_t gmx_unused* nb,
+void gpu_launch_cpyback(NbnxmGpu gmx_unused* nb,
nbnxn_atomdata_t gmx_unused* nbatom,
const gmx::StepWorkload gmx_unused& stepWork,
gmx::AtomLocality gmx_unused aloc) GPU_FUNC_TERM;
* \returns True if the nonbonded tasks associated with \p aloc locality have completed
*/
GPU_FUNC_QUALIFIER
-bool gpu_try_finish_task(gmx_nbnxm_gpu_t gmx_unused* nb,
+bool gpu_try_finish_task(NbnxmGpu gmx_unused* nb,
const gmx::StepWorkload gmx_unused& stepWork,
gmx::AtomLocality gmx_unused aloc,
real gmx_unused* e_lj,
* \param[out] shiftForces Shift forces buffer to accumulate into
* \param[out] wcycle Pointer to wallcycle data structure */
GPU_FUNC_QUALIFIER
-float gpu_wait_finish_task(gmx_nbnxm_gpu_t gmx_unused* nb,
+float gpu_wait_finish_task(NbnxmGpu gmx_unused* nb,
const gmx::StepWorkload gmx_unused& stepWork,
gmx::AtomLocality gmx_unused aloc,
real gmx_unused* e_lj,
* Called on the NS step and performs (re-)allocations and memory copies. !*/
CUDA_FUNC_QUALIFIER
void nbnxn_gpu_init_x_to_nbat_x(const Nbnxm::GridSet gmx_unused& gridSet,
- gmx_nbnxm_gpu_t gmx_unused* gpu_nbv) CUDA_FUNC_TERM;
+ NbnxmGpu gmx_unused* gpu_nbv) CUDA_FUNC_TERM;
/*! \brief X buffer operations on GPU: performs conversion from rvec to nb format.
*
CUDA_FUNC_QUALIFIER
void nbnxn_gpu_x_to_nbat_x(const Nbnxm::Grid gmx_unused& grid,
bool gmx_unused setFillerCoords,
- gmx_nbnxm_gpu_t gmx_unused* gpu_nbv,
+ NbnxmGpu gmx_unused* gpu_nbv,
DeviceBuffer<float> gmx_unused d_x,
GpuEventSynchronizer gmx_unused* xReadyOnDevice,
gmx::AtomLocality gmx_unused locality,
* \param[in] interactionLocality Local or NonLocal sync point
*/
CUDA_FUNC_QUALIFIER
-void nbnxnInsertNonlocalGpuDependency(const gmx_nbnxm_gpu_t gmx_unused* nb,
+void nbnxnInsertNonlocalGpuDependency(const NbnxmGpu gmx_unused* nb,
gmx::InteractionLocality gmx_unused interactionLocality) CUDA_FUNC_TERM;
/*! \brief Set up internal flags that indicate what type of short-range work there is.
* \param[in] iLocality Interaction locality identifier
*/
GPU_FUNC_QUALIFIER
-void setupGpuShortRangeWork(gmx_nbnxm_gpu_t gmx_unused* nb,
+void setupGpuShortRangeWork(NbnxmGpu gmx_unused* nb,
const gmx::GpuBonded gmx_unused* gpuBonded,
gmx::InteractionLocality gmx_unused iLocality) GPU_FUNC_TERM;
* \param[in] aLocality Atom locality identifier
*/
GPU_FUNC_QUALIFIER
-bool haveGpuShortRangeWork(const gmx_nbnxm_gpu_t gmx_unused* nb, gmx::AtomLocality gmx_unused aLocality)
+bool haveGpuShortRangeWork(const NbnxmGpu gmx_unused* nb, gmx::AtomLocality gmx_unused aLocality)
GPU_FUNC_TERM_WITH_RETURN(false);
/*! \brief Initialization for F buffer operations on GPU */
CUDA_FUNC_QUALIFIER
void nbnxn_gpu_init_add_nbat_f_to_f(const int gmx_unused* cell,
- gmx_nbnxm_gpu_t gmx_unused* gpu_nbv,
+ NbnxmGpu gmx_unused* gpu_nbv,
int gmx_unused natoms_total,
GpuEventSynchronizer gmx_unused* localReductionDone) CUDA_FUNC_TERM;
CUDA_FUNC_QUALIFIER
void nbnxn_gpu_add_nbat_f_to_f(gmx::AtomLocality gmx_unused atomLocality,
DeviceBuffer<float> gmx_unused totalForcesDevice,
- gmx_nbnxm_gpu_t gmx_unused* gpu_nbv,
+ NbnxmGpu gmx_unused* gpu_nbv,
void gmx_unused* pmeForcesDevice,
gmx::ArrayRef<GpuEventSynchronizer* const> gmx_unused dependencyList,
int gmx_unused atomStart,
* \param[in] nb The nonbonded data GPU structure
*/
CUDA_FUNC_QUALIFIER
-void nbnxn_wait_x_on_device(gmx_nbnxm_gpu_t gmx_unused* nb) CUDA_FUNC_TERM;
+void nbnxn_wait_x_on_device(NbnxmGpu gmx_unused* nb) CUDA_FUNC_TERM;
} // namespace Nbnxm
#endif