Fix build error without thread or regular mpi
authorPaul Bauer <paul.bauer.q@gmail.com>
Fri, 29 Nov 2019 10:32:17 +0000 (11:32 +0100)
committerMark Abraham <mark.j.abraham@gmail.com>
Fri, 29 Nov 2019 15:30:55 +0000 (16:30 +0100)
Some more unused variable warnings that I haven't fixed yet.

Fixes #3218

Change-Id: Id47718bb0af97d3a494afa133af8b54ba71b9d5d

src/gromacs/domdec/gpuhaloexchange_impl.cu
src/gromacs/ewald/pme_coordinate_receiver_gpu_impl.cu
src/gromacs/ewald/pme_force_sender_gpu_impl.cu
src/gromacs/ewald/pme_pp_comm_gpu_impl.cu

index b1e743db28e09d74af0a5e327f93142cf5cc8329..660566a9dd9117343497c5d6308c8c6e255f840e 100644 (file)
@@ -58,6 +58,7 @@
 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
 #include "gromacs/gpu_utils/vectype_ops.cuh"
 #include "gromacs/pbcutil/ishift.h"
+#include "gromacs/utility/gmxmpi.h"
 
 #include "domdec_internal.h"
 
@@ -321,6 +322,8 @@ void GpuHaloExchange::Impl::communicateHaloData(float3*               d_ptr,
                      recvRank, 0, &remoteCoordinatesReadyOnDeviceEvent, sizeof(GpuEventSynchronizer*),
                      MPI_BYTE, sendRank, 0, mpi_comm_mysim_, MPI_STATUS_IGNORE);
         remoteCoordinatesReadyOnDeviceEvent->enqueueWaitEvent(nonLocalStream_);
+#else
+        GMX_UNUSED_VALUE(coordinatesReadyOnDeviceEvent);
 #endif
     }
     else
index dbb0ace8b23ee4eec0cce5dec27cca6f1eb76f1b..3ca69e27a882225bba32590da8afb32f0d2f0501 100644 (file)
@@ -87,6 +87,8 @@ void PmeCoordinateReceiverGpu::Impl::sendCoordinateBufferAddressToPpRanks(rvec*
 
 #if GMX_MPI
         MPI_Send(&sendBuf, sizeof(void**), MPI_BYTE, receiver.rankId, 0, comm_);
+#else
+        GMX_UNUSED_VALUE(sendBuf);
 #endif
     }
 }
@@ -101,6 +103,8 @@ void PmeCoordinateReceiverGpu::Impl::launchReceiveCoordinatesFromPpCudaDirect(in
     MPI_Irecv(&ppSync_[recvCount_], sizeof(GpuEventSynchronizer*), MPI_BYTE, ppRank, 0, comm_,
               &request_[recvCount_]);
     recvCount_++;
+#else
+    GMX_UNUSED_VALUE(ppRank);
 #endif
 }
 
index 73a7c19c91c203b2839bbaf1cce28c31529b30f1..1268bac7921307034e4164d82c2279b538a8f043 100644 (file)
@@ -82,6 +82,8 @@ void PmeForceSenderGpu::Impl::sendForceBufferAddressToPpRanks(rvec* d_f)
 
 #if GMX_MPI
         MPI_Send(&sendBuf, sizeof(void**), MPI_BYTE, receiver.rankId, 0, comm_);
+#else
+        GMX_UNUSED_VALUE(sendBuf);
 #endif
     }
 }
@@ -98,6 +100,9 @@ void PmeForceSenderGpu::Impl::sendFToPpCudaDirect(int ppRank)
     // TODO Using MPI_Isend would be more efficient, particularly when
     // sending to multiple PP ranks
     MPI_Send(&pmeSyncPtr, sizeof(GpuEventSynchronizer*), MPI_BYTE, ppRank, 0, comm_);
+#else
+    GMX_UNUSED_VALUE(pmeSyncPtr);
+    GMX_UNUSED_VALUE(ppRank);
 #endif
 }
 
index 9a87e951e56425b87257b87f9c0836199f692ada..827a1bab343f956694a8b09435bed71d99d2195c 100644 (file)
@@ -68,12 +68,15 @@ PmePpCommGpu::Impl::~Impl() = default;
 void PmePpCommGpu::Impl::reinit(int size)
 {
     // This rank will access PME rank memory directly, so needs to receive the remote PME buffer addresses.
+#if GMX_MPI
     MPI_Recv(&remotePmeXBuffer_, sizeof(void**), MPI_BYTE, pmeRank_, 0, comm_, MPI_STATUS_IGNORE);
     MPI_Recv(&remotePmeFBuffer_, sizeof(void**), MPI_BYTE, pmeRank_, 0, comm_, MPI_STATUS_IGNORE);
 
     // Reallocate buffer used for staging PME force on GPU
     reallocateDeviceBuffer(&d_pmeForces_, size, &d_pmeForcesSize_, &d_pmeForcesSizeAlloc_, nullptr);
-
+#else
+    GMX_UNUSED_VALUE(size);
+#endif
     return;
 }
 
@@ -81,7 +84,7 @@ void PmePpCommGpu::Impl::reinit(int size)
 // launchRecvForceFromPmeCudaDirect() and sycnRecvForceFromPmeCudaDirect()
 void PmePpCommGpu::Impl::receiveForceFromPmeCudaDirect(void* recvPtr, int recvSize, bool receivePmeForceToGpu)
 {
-
+#if GMX_MPI
     // Receive event from PME task and add to stream, to ensure pull of data doesn't
     // occur before PME force calc is completed
     GpuEventSynchronizer* pmeSync;
@@ -107,6 +110,11 @@ void PmePpCommGpu::Impl::receiveForceFromPmeCudaDirect(void* recvPtr, int recvSi
         // them with other forces on the CPU
         cudaStreamSynchronize(pmePpCommStream_);
     }
+#else
+    GMX_UNUSED_VALUE(recvPtr);
+    GMX_UNUSED_VALUE(recvSize);
+    GMX_UNUSED_VALUE(receivePmeForceToGpu);
+#endif
 }
 
 void PmePpCommGpu::Impl::sendCoordinatesToPmeCudaDirect(void* sendPtr,
@@ -114,7 +122,7 @@ void PmePpCommGpu::Impl::sendCoordinatesToPmeCudaDirect(void* sendPtr,
                                                         bool gmx_unused sendPmeCoordinatesFromGpu,
                                                         GpuEventSynchronizer* coordinatesReadyOnDeviceEvent)
 {
-
+#if GMX_MPI
     // ensure stream waits until coordinate data is available on device
     coordinatesReadyOnDeviceEvent->enqueueWaitEvent(pmePpCommStream_);
 
@@ -126,6 +134,12 @@ void PmePpCommGpu::Impl::sendCoordinatesToPmeCudaDirect(void* sendPtr,
     pmeCoordinatesSynchronizer_.markEvent(pmePpCommStream_);
     GpuEventSynchronizer* pmeSync = &pmeCoordinatesSynchronizer_;
     MPI_Send(&pmeSync, sizeof(GpuEventSynchronizer*), MPI_BYTE, pmeRank_, 0, comm_);
+#else
+    GMX_UNUSED_VALUE(sendPtr);
+    GMX_UNUSED_VALUE(sendSize);
+    GMX_UNUSED_VALUE(sendPmeCoordinatesFromGpu);
+    GMX_UNUSED_VALUE(coordinatesReadyOnDeviceEvent);
+#endif
 }
 void* PmePpCommGpu::Impl::getGpuForceStagingPtr()
 {