Fix unused variable warning in ewald code
authorPaul Bauer <paul.bauer.q@gmail.com>
Tue, 26 Nov 2019 10:29:04 +0000 (11:29 +0100)
committerPaul Bauer <paul.bauer.q@gmail.com>
Tue, 26 Nov 2019 10:54:14 +0000 (11:54 +0100)
Fixes #3193

Change-Id: Id3f8422ec95e6864553a6e9f03a144fbe4ed8674

src/gromacs/ewald/pme_only.cpp
src/gromacs/ewald/pme_pp.cpp

index 0610a2dc67724e61e86898b75fe0478d618567bb..be7a977daf1313fbf6f982727bd2de86dde75a8a 100644 (file)
@@ -483,6 +483,7 @@ static int gmx_pme_recv_coeffs_coords(struct gmx_pme_t*            pme,
         messages = 0;
     } while (status == -1);
 #else
+    GMX_UNUSED_VALUE(pme);
     GMX_UNUSED_VALUE(pme_pp);
     GMX_UNUSED_VALUE(box);
     GMX_UNUSED_VALUE(maxshift_x);
@@ -494,6 +495,8 @@ static int gmx_pme_recv_coeffs_coords(struct gmx_pme_t*            pme,
     GMX_UNUSED_VALUE(grid_size);
     GMX_UNUSED_VALUE(ewaldcoeff_q);
     GMX_UNUSED_VALUE(ewaldcoeff_lj);
+    GMX_UNUSED_VALUE(useGpuForPme);
+    GMX_UNUSED_VALUE(stateGpu);
 
     status = pmerecvqxX;
 #endif
@@ -506,6 +509,7 @@ static int gmx_pme_recv_coeffs_coords(struct gmx_pme_t*            pme,
     return status;
 }
 
+#if GMX_MPI
 /*! \brief Send force data to PP ranks */
 static void sendFToPP(void* sendbuf, PpRanks receiver, gmx_pme_pp* pme_pp, int* messages)
 {
@@ -520,14 +524,13 @@ static void sendFToPP(void* sendbuf, PpRanks receiver, gmx_pme_pp* pme_pp, int*
     }
     else
     {
-#if GMX_MPI
         // Send using MPI
         MPI_Isend(sendbuf, receiver.numAtoms * sizeof(rvec), MPI_BYTE, receiver.rankId, 0,
                   pme_pp->mpi_comm_mysim, &pme_pp->req[*messages]);
         *messages = *messages + 1;
-#endif
     }
 }
+#endif
 
 /*! \brief Send the PME mesh force, virial and energy to the PP-only ranks. */
 static void gmx_pme_send_force_vir_ener(const gmx_pme_t& pme,
@@ -582,6 +585,7 @@ static void gmx_pme_send_force_vir_ener(const gmx_pme_t& pme,
     MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
 #else
     gmx_call("MPI not enabled");
+    GMX_UNUSED_VALUE(pme);
     GMX_UNUSED_VALUE(pme_pp);
     GMX_UNUSED_VALUE(output);
     GMX_UNUSED_VALUE(dvdlambda_q);
index fd9de7ed5fd74ab883ec7f8cdc01ac5191449d79..acd0a25ef74b5b7c75d4943c0248988a7965b1bd 100644 (file)
@@ -230,7 +230,11 @@ static void gmx_pme_send_coeffs_coords(t_forcerec*      fr,
             }
         }
     }
-
+#else
+    GMX_UNUSED_VALUE(fr);
+    GMX_UNUSED_VALUE(reinitGpuPmePpComms);
+    GMX_UNUSED_VALUE(sendCoordinatesFromGpu);
+    GMX_UNUSED_VALUE(coordinatesReadyOnDeviceEvent);
 #endif
     if (!c_useDelayedWait)
     {
@@ -420,6 +424,8 @@ static void recvFFromPme(gmx::PmePpCommGpu* pmePpCommGpu,
 #if GMX_MPI
         MPI_Recv(recvptr, n * sizeof(rvec), MPI_BYTE, cr->dd->pme_nodeid, 0, cr->mpi_comm_mysim,
                  MPI_STATUS_IGNORE);
+#else
+        GMX_UNUSED_VALUE(cr);
 #endif
     }
 }