Some code blocks still needed cleaning from clang-tidy.
Came up during manual run in preparation for CI job.
GPU_FUNC_QUALIFIER void pme_gpu_launch_spread(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
GpuEventSynchronizer* GPU_FUNC_ARGUMENT(xReadyOnDevice),
gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
- const real GPU_FUNC_ARGUMENT(lambdaQ)) GPU_FUNC_TERM;
+ real GPU_FUNC_ARGUMENT(lambdaQ)) GPU_FUNC_TERM;
/*! \brief
* Launches middle stages of PME (FFT R2C, solving, FFT C2R) either on GPU or on CPU, depending on the run mode.
*/
GPU_FUNC_QUALIFIER void pme_gpu_launch_gather(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
- const real GPU_FUNC_ARGUMENT(lambdaQ)) GPU_FUNC_TERM;
+ real GPU_FUNC_ARGUMENT(lambdaQ)) GPU_FUNC_TERM;
/*! \brief
* Attempts to complete PME GPU tasks.
gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
gmx::ForceWithVirial* GPU_FUNC_ARGUMENT(forceWithVirial),
gmx_enerdata_t* GPU_FUNC_ARGUMENT(enerd),
- const real GPU_FUNC_ARGUMENT(lambdaQ),
+ real GPU_FUNC_ARGUMENT(lambdaQ),
GpuTaskCompletion GPU_FUNC_ARGUMENT(completionKind))
GPU_FUNC_TERM_WITH_RETURN(false);
gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
gmx::ForceWithVirial* GPU_FUNC_ARGUMENT(forceWithVirial),
gmx_enerdata_t* GPU_FUNC_ARGUMENT(enerd),
- const real GPU_FUNC_ARGUMENT(lambdaQ)) GPU_FUNC_TERM;
+ real GPU_FUNC_ARGUMENT(lambdaQ)) GPU_FUNC_TERM;
/*! \brief
* The PME GPU reinitialization function that is called both at the end of any PME computation and on any load balancing.
// time needed for that checking, but do not yet record that the
// gather has occured.
bool needToSynchronize = true;
- constexpr bool c_streamQuerySupported = bool(GMX_GPU_CUDA);
+ constexpr bool c_streamQuerySupported = GMX_GPU_CUDA;
// TODO: implement c_streamQuerySupported with an additional GpuEventSynchronizer per stream (#2521)
if ((completionKind == GpuTaskCompletion::Check) && c_streamQuerySupported)
* \param[in] pmeGpu The PME GPU structure.
* \param[in] gridIndex The index of the grid on which to perform the calculations.
*/
- GpuParallel3dFft(const PmeGpu* pmeGpu, const int gridIndex);
+ GpuParallel3dFft(const PmeGpu* pmeGpu, int gridIndex);
/*! \brief Destroys the FFT plans. */
~GpuParallel3dFft();
/*! \brief Performs the FFT transform in given direction
float** GPU_FUNC_ARGUMENT(h_grids),
bool GPU_FUNC_ARGUMENT(computeSplines),
bool GPU_FUNC_ARGUMENT(spreadCharges),
- const real GPU_FUNC_ARGUMENT(lambda)) GPU_FUNC_TERM;
+ real GPU_FUNC_ARGUMENT(lambda)) GPU_FUNC_TERM;
/*! \libinternal \brief
* 3D FFT R2C/C2R routine.
* \param[in,out] deviceBuffer Device buffer to store data in.
*/
template<typename ValueType>
-void destroyParamLookupTable(DeviceBuffer<ValueType>* deviceBuffer, DeviceTexture& /* deviceTexture*/)
+void destroyParamLookupTable(DeviceBuffer<ValueType>* deviceBuffer, const DeviceTexture& /* deviceTexture*/)
{
freeDeviceBuffer(deviceBuffer);
}
/* DtoH f */
GMX_ASSERT(sizeof(*nbatom->out[0].f.data()) == sizeof(float),
"The host force buffer should be in single precision to match device data size.");
- copyFromDeviceBuffer(&nbatom->out[0].f.data()[adat_begin * DIM], &adat->f, adat_begin * DIM,
+ copyFromDeviceBuffer(&nbatom->out[0].f[adat_begin * DIM], &adat->f, adat_begin * DIM,
adat_len * DIM, deviceStream, GpuApiCallBehavior::Async,
bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
}
/* Free kernels */
+ // NOLINTNEXTLINE(bugprone-sizeof-expression)
int kernel_count = sizeof(nb->kernel_ener_noprune_ptr) / sizeof(nb->kernel_ener_noprune_ptr[0][0]);
free_kernels(nb->kernel_ener_noprune_ptr[0], kernel_count);
+ // NOLINTNEXTLINE(bugprone-sizeof-expression)
kernel_count = sizeof(nb->kernel_ener_prune_ptr) / sizeof(nb->kernel_ener_prune_ptr[0][0]);
free_kernels(nb->kernel_ener_prune_ptr[0], kernel_count);
+ // NOLINTNEXTLINE(bugprone-sizeof-expression)
kernel_count = sizeof(nb->kernel_noener_noprune_ptr) / sizeof(nb->kernel_noener_noprune_ptr[0][0]);
free_kernels(nb->kernel_noener_noprune_ptr[0], kernel_count);
+ // NOLINTNEXTLINE(bugprone-sizeof-expression)
kernel_count = sizeof(nb->kernel_noener_prune_ptr) / sizeof(nb->kernel_noener_prune_ptr[0][0]);
free_kernels(nb->kernel_noener_prune_ptr[0], kernel_count);