Fix clang-tidy complaints
authorAndrey Alekseenko <al42and@gmail.com>
Tue, 8 Jun 2021 12:19:05 +0000 (15:19 +0300)
committerAndrey Alekseenko <al42and@gmail.com>
Tue, 8 Jun 2021 12:19:05 +0000 (15:19 +0300)
Introduced in 6cd214d6 (MR !1591)

src/gromacs/ewald/pme_force_sender_gpu_impl.cu
src/gromacs/ewald/pme_pp_comm_gpu_impl.cu

index b2d32ab8840a553f4688ae400e8554c89aa76970..5356683083ebec4ffb31b75e4596b12433a11795 100644 (file)
@@ -91,7 +91,7 @@ void PmeForceSenderGpu::Impl::setForceSendBuffer(DeviceBuffer<Float3> d_f)
 
 #if GMX_MPI
 
-    if (localForcePtr_.size() == 0)
+    if (localForcePtr_.empty())
     {
         localForcePtr_.resize(ppRanks_.size());
     }
@@ -132,6 +132,7 @@ void PmeForceSenderGpu::Impl::sendFToPpCudaDirect(int ppRank, int numAtoms)
                                        ppCommStream_[ppRank]->stream());
     CU_RET_ERR(stat, "cudaMemcpyAsync on Recv from PME CUDA direct data transfer failed");
     ppCommEvent_[ppRank]->markEvent(*ppCommStream_[ppRank]);
+    // NOLINTNEXTLINE(bugprone-sizeof-expression)
     MPI_Send(&ppCommEvent_[ppRank], sizeof(GpuEventSynchronizer*), MPI_BYTE, ppRank, 0, comm_);
 #else
     GMX_UNUSED_VALUE(ppRank);
index 66131a808ad82ffc7f9b725a004f3160eae4bbf4..8acb6aa671679978a0d6379f0ddba05056dd0299 100644 (file)
@@ -108,6 +108,7 @@ void PmePpCommGpu::Impl::receiveForceFromPmeCudaDirect(float3* recvPtr, bool rec
     // Recieve event from PME task after PME->PP force data push has
     // been scheduled and enqueue this to PP stream.
     GpuEventSynchronizer* eventptr;
+    // NOLINTNEXTLINE(bugprone-sizeof-expression)
     MPI_Recv(&eventptr, sizeof(GpuEventSynchronizer*), MPI_BYTE, pmeRank_, 0, comm_, MPI_STATUS_IGNORE);
     eventptr->enqueueWaitEvent(pmePpCommStream_);