Fix style violations
authorMark Abraham <mark.j.abraham@gmail.com>
Mon, 2 Aug 2021 08:59:07 +0000 (08:59 +0000)
committerArtem Zhmurov <zhmurov@gmail.com>
Mon, 2 Aug 2021 08:59:07 +0000 (08:59 +0000)
src/gromacs/ewald/pme_force_sender_gpu_impl.cu
src/gromacs/ewald/pme_pp_comm_gpu.h
src/gromacs/ewald/pme_pp_comm_gpu_impl.cpp
src/gromacs/ewald/pme_pp_comm_gpu_impl.cu
src/gromacs/ewald/pme_pp_comm_gpu_impl.h
src/gromacs/mdrun/runner.cpp

index 64b3440d2e1bb9f11054a2706934f5de88d126c5..105437e7f80c4905c7f1cdf35748a26832254af2 100644 (file)
@@ -62,11 +62,11 @@ PmeForceSenderGpu::Impl::Impl(GpuEventSynchronizer*  pmeForcesReady,
     pmeForcesReady_(pmeForcesReady),
     comm_(comm),
     ppRanks_(ppRanks),
-    deviceContext_(deviceContext),
     ppCommStream_(ppRanks.size()),
     ppCommEvent_(ppRanks.size()),
-    pmeRemoteGpuForcePtr_(ppRanks.size()),
-    pmeRemoteCpuForcePtr_(ppRanks.size())
+    deviceContext_(deviceContext),
+    pmeRemoteCpuForcePtr_(ppRanks.size()),
+    pmeRemoteGpuForcePtr_(ppRanks.size())
 {
     // Create streams and events to manage pushing of force buffers to remote PP ranks
     std::unique_ptr<DeviceStream>         stream;
index a11f185a1237ce93460b778d4e11d6ff017e5ebd..ca7cdceef90084b3044b5b304e017f552083ded8 100644 (file)
@@ -75,7 +75,7 @@ public:
      */
     PmePpCommGpu(MPI_Comm                comm,
                  int                     pmeRank,
-                 std::vector<gmx::RVec>& pmeCpuForceBuffer,
+                 std::vector<gmx::RVec>* pmeCpuForceBuffer,
                  const DeviceContext&    deviceContext,
                  const DeviceStream&     deviceStream);
     ~PmePpCommGpu();
index 0a5c60a4dd452af9d4b001cfbb64c236f6d946ee..3b8377fbb8eb426b632a3965c54be256020b9bbb 100644 (file)
@@ -64,7 +64,7 @@ class PmePpCommGpu::Impl
 /*!\brief Constructor stub. */
 PmePpCommGpu::PmePpCommGpu(MPI_Comm /* comm */,
                            int /* pmeRank */,
-                           std::vector<gmx::RVec>& /* pmeCpuForceBuffer */,
+                           std::vector<gmx::RVec>* /* pmeCpuForceBuffer */,
                            const DeviceContext& /* deviceContext */,
                            const DeviceStream& /* deviceStream */) :
     impl_(nullptr)
index 50e9a4189fc82426205a0405cbbddfacd67f86ce..e7d05b01f5fb8f5edc9a84045c71278c0bbb007d 100644 (file)
@@ -61,7 +61,7 @@ namespace gmx
 
 PmePpCommGpu::Impl::Impl(MPI_Comm                comm,
                          int                     pmeRank,
-                         std::vector<gmx::RVec>& pmeCpuForceBuffer,
+                         std::vector<gmx::RVec>* pmeCpuForceBuffer,
                          const DeviceContext&    deviceContext,
                          const DeviceStream&     deviceStream) :
     deviceContext_(deviceContext),
@@ -89,7 +89,7 @@ void PmePpCommGpu::Impl::reinit(int size)
         MPI_Recv(&remotePmeXBuffer_, sizeof(float3*), MPI_BYTE, pmeRank_, 0, comm_, MPI_STATUS_IGNORE);
         // send host and device force buffer addresses to PME rank
         MPI_Send(&d_pmeForces_, sizeof(float3*), MPI_BYTE, pmeRank_, 0, comm_);
-        RVec* pmeCpuForceBufferData = pmeCpuForceBuffer_.data();
+        RVec* pmeCpuForceBufferData = pmeCpuForceBuffer_->data();
         MPI_Send(&pmeCpuForceBufferData, sizeof(RVec*), MPI_BYTE, pmeRank_, 0, comm_);
     }
 
@@ -221,7 +221,7 @@ GpuEventSynchronizer* PmePpCommGpu::Impl::getForcesReadySynchronizer()
 
 PmePpCommGpu::PmePpCommGpu(MPI_Comm                comm,
                            int                     pmeRank,
-                           std::vector<gmx::RVec>& pmeCpuForceBuffer,
+                           std::vector<gmx::RVec>* pmeCpuForceBuffer,
                            const DeviceContext&    deviceContext,
                            const DeviceStream&     deviceStream) :
     impl_(new Impl(comm, pmeRank, pmeCpuForceBuffer, deviceContext, deviceStream))
index f62faea93374de33c2501d83d868d2fe8bd06f88..c9ac0d6dd2c548113f0414e351ac6a9acafedf7e 100644 (file)
@@ -66,7 +66,7 @@ public:
      */
     Impl(MPI_Comm                comm,
          int                     pmeRank,
-         std::vector<gmx::RVec>& pmeCpuForceBuffer,
+         std::vector<gmx::RVec>* pmeCpuForceBuffer,
          const DeviceContext&    deviceContext,
          const DeviceStream&     deviceStream);
     ~Impl();
@@ -165,7 +165,7 @@ private:
     //! Rank of PME task
     int pmeRank_ = -1;
     //! Buffer for PME force on CPU
-    std::vector<gmx::RVec>& pmeCpuForceBuffer_;
+    std::vector<gmx::RVec>* pmeCpuForceBuffer_;
     //! Buffer for staging PME force on GPU
     DeviceBuffer<gmx::RVec> d_pmeForces_;
     //! number of atoms in PME force staging array
index 42bab51cb8f428c557e2b3ff2c6e306fd0106037..f7d2bc24f16abee6947b2c24e2f1d31135970515 100644 (file)
@@ -1649,7 +1649,7 @@ int Mdrunner::mdrunner()
             fr->pmePpCommGpu = std::make_unique<gmx::PmePpCommGpu>(
                     cr->mpi_comm_mysim,
                     cr->dd->pme_nodeid,
-                    cr->dd->pmeForceReceiveBuffer,
+                    &cr->dd->pmeForceReceiveBuffer,
                     deviceStreamManager->context(),
                     deviceStreamManager->stream(DeviceStreamType::PmePpTransfer));
         }