Fix cuda clang-tidy issues
authorMark Abraham <mark.j.abraham@gmail.com>
Fri, 22 Oct 2021 05:37:14 +0000 (07:37 +0200)
committerArtem Zhmurov <zhmurov@gmail.com>
Fri, 22 Oct 2021 07:16:18 +0000 (07:16 +0000)
src/gromacs/ewald/pme_coordinate_receiver_gpu_impl.cu
src/gromacs/ewald/pme_gpu_internal.cpp

index 5b1fa48adef94e3ae66b5269898606aaf3d899c3..d1b9c01557aa728932c9b5aa03452bf19ae5d212 100644 (file)
@@ -63,7 +63,7 @@ PmeCoordinateReceiverGpu::Impl::Impl(MPI_Comm                     comm,
 {
     // Create streams to manage pipelining
     ppCommManagers_.reserve(ppRanks.size());
-    for (auto& ppRank : ppRanks)
+    for (const auto& ppRank : ppRanks)
     {
         ppCommManagers_.emplace_back(PpCommManager{
                 ppRank,
@@ -110,9 +110,8 @@ void PmeCoordinateReceiverGpu::Impl::receiveCoordinatesSynchronizerFromPpCudaDir
 
 #if GMX_MPI
     // Receive event from PP task
-    // NOLINTNEXTLINE(bugprone-sizeof-expression)
     MPI_Irecv(&ppCommManagers_[ppRank].sync,
-              sizeof(GpuEventSynchronizer*),
+              sizeof(GpuEventSynchronizer*), // NOLINT(bugprone-sizeof-expression)
               MPI_BYTE,
               ppRank,
               0,
index 2f7da67a3a0524ec5a60e4761c7cbd361b733e0d..35fff48b6aee68f439c6298ec76c4c0e35e04d79 100644 (file)
@@ -594,7 +594,7 @@ static void pme_gpu_init_internal(PmeGpu* pmeGpu, const DeviceContext& deviceCon
      */
 
 #if GMX_GPU_CUDA
-    pmeGpu->kernelParams->usePipeline       = false;
+    pmeGpu->kernelParams->usePipeline       = char(false);
     pmeGpu->kernelParams->pipelineAtomStart = 0;
     pmeGpu->kernelParams->pipelineAtomEnd   = 0;
     pmeGpu->maxGridWidthX                   = deviceContext.deviceInfo().prop.maxGridSize[0];
@@ -1387,10 +1387,10 @@ void pme_gpu_spread(const PmeGpu*                  pmeGpu,
     pme_gpu_start_timing(pmeGpu, timingId);
     auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
 
-    kernelParamsPtr->usePipeline = computeSplines && spreadCharges && useGpuDirectComm
-                                   && (pmeCoordinateReceiverGpu->ppCommNumSenderRanks() > 1)
-                                   && !writeGlobalOrSaveSplines;
-    if (kernelParamsPtr->usePipeline)
+    kernelParamsPtr->usePipeline = char(computeSplines && spreadCharges && useGpuDirectComm
+                                        && (pmeCoordinateReceiverGpu->ppCommNumSenderRanks() > 1)
+                                        && !writeGlobalOrSaveSplines);
+    if (kernelParamsPtr->usePipeline != 0)
     {
         int numStagesInPipeline = pmeCoordinateReceiverGpu->ppCommNumSenderRanks();