Apply clang-format to source tree
[alexxy/gromacs.git] / src / gromacs / ewald / pme_pp_comm_gpu.h
index b58125513e87caa3593aa30c1e5553bad5e706ab..e9d8c4ff697c921ba82a704d3171ba9ae71a49c9 100644 (file)
@@ -57,51 +57,53 @@ namespace gmx
 class PmePpCommGpu
 {
 
-    public:
-        /*! \brief Creates PME-PP GPU communication object
-         * \param[in] comm            Communicator used for simulation
-         * \param[in] pmeRank         Rank of PME task
-         */
-        PmePpCommGpu(MPI_Comm comm, int pmeRank);
-        ~PmePpCommGpu();
+public:
+    /*! \brief Creates PME-PP GPU communication object
+     * \param[in] comm            Communicator used for simulation
+     * \param[in] pmeRank         Rank of PME task
+     */
+    PmePpCommGpu(MPI_Comm comm, int pmeRank);
+    ~PmePpCommGpu();
 
-        /*! \brief Perform steps required when buffer size changes
-         * \param[in]  size   Number of elements in buffer
-         */
-        void reinit(int size);
+    /*! \brief Perform steps required when buffer size changes
+     * \param[in]  size   Number of elements in buffer
+     */
+    void reinit(int size);
 
-        /*! \brief
-         * Pull data from PME GPU directly using CUDA Memory copy.
-         * \param[out] recvPtr  Buffer to receive PME force data
-         * \param[in]  recvSize Number of elements to receive
-         * \param[in] recvPmeForceToGpu Whether receive is to GPU, otherwise CPU
-         */
-        void receiveForceFromPmeCudaDirect(void *recvPtr, int recvSize, bool recvPmeForceToGpu);
+    /*! \brief
+     * Pull data from PME GPU directly using CUDA Memory copy.
+     * \param[out] recvPtr  Buffer to receive PME force data
+     * \param[in]  recvSize Number of elements to receive
+     * \param[in] recvPmeForceToGpu Whether receive is to GPU, otherwise CPU
+     */
+    void receiveForceFromPmeCudaDirect(void* recvPtr, int recvSize, bool recvPmeForceToGpu);
 
-        /*! \brief Push coordinates buffer directly to GPU memory on PME task
-         * \param[in] sendPtr Buffer with coordinate data
-         * \param[in] sendSize Number of elements to send
-         * \param[in] sendPmeCoordinatesFromGpu Whether send is from GPU, otherwise CPU
-         * \param[in] coordinatesReadyOnDeviceEvent Event recorded when coordinates are available on device
-         */
-        void sendCoordinatesToPmeCudaDirect(void *sendPtr, int sendSize, bool sendPmeCoordinatesFromGpu, GpuEventSynchronizer* coordinatesReadyOnDeviceEvent);
+    /*! \brief Push coordinates buffer directly to GPU memory on PME task
+     * \param[in] sendPtr Buffer with coordinate data
+     * \param[in] sendSize Number of elements to send
+     * \param[in] sendPmeCoordinatesFromGpu Whether send is from GPU, otherwise CPU
+     * \param[in] coordinatesReadyOnDeviceEvent Event recorded when coordinates are available on device
+     */
+    void sendCoordinatesToPmeCudaDirect(void*                 sendPtr,
+                                        int                   sendSize,
+                                        bool                  sendPmeCoordinatesFromGpu,
+                                        GpuEventSynchronizer* coordinatesReadyOnDeviceEvent);
 
-        /*! \brief
-         * Return pointer to buffer used for staging PME force on GPU
-         */
-        void* getGpuForceStagingPtr();
+    /*! \brief
+     * Return pointer to buffer used for staging PME force on GPU
+     */
+    void* getGpuForceStagingPtr();
 
-        /*! \brief
-         * Return pointer to event recorded when forces are ready
-         */
-        void* getForcesReadySynchronizer();
-
-    private:
-        class Impl;
-        gmx::PrivateImplPointer<Impl> impl_;
+    /*! \brief
+     * Return pointer to event recorded when forces are ready
+     */
+    void* getForcesReadySynchronizer();
 
+private:
+    class Impl;
+    gmx::PrivateImplPointer<Impl> impl_;
 };
 
-} //namespace gmx
+} // namespace gmx
 
 #endif