2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements class which recieves coordinates to GPU memory on PME task using CUDA
40 * \author Alan Gray <alang@nvidia.com>
42 * \ingroup module_ewald
46 #include "pme_coordinate_receiver_gpu_impl.h"
53 #include "gromacs/ewald/pme.h"
54 #include "gromacs/gpu_utils/cudautils.cuh"
55 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
56 #include "gromacs/utility/gmxmpi.h"
61 PmeCoordinateReceiverGpu::Impl::Impl(void* pmeStream, MPI_Comm comm, gmx::ArrayRef<PpRanks> ppRanks) :
62 pmeStream_(*static_cast<cudaStream_t*>(pmeStream)),
68 "PME-PP GPU Communication is currently only supported with thread-MPI enabled");
71 PmeCoordinateReceiverGpu::Impl::~Impl() = default;
73 void PmeCoordinateReceiverGpu::Impl::sendCoordinateBufferAddressToPpRanks(rvec* d_x)
78 for (const auto& receiver : ppRanks_)
81 ind_end = ind_start + receiver.numAtoms;
83 // Data will be transferred directly from GPU.
84 void* sendBuf = reinterpret_cast<void*>(&d_x[ind_start]);
87 MPI_Send(&sendBuf, sizeof(void**), MPI_BYTE, receiver.rankId, 0, comm_);
92 /*! \brief Receive coordinate data directly using CUDA memory copy */
93 void PmeCoordinateReceiverGpu::Impl::receiveCoordinatesFromPpCudaDirect(int ppRank)
95 // Data will be pushed directly from PP task
98 // Receive event from PP task and add to PME stream, to ensure PME calculation doesn't
99 // commence until coordinate data has been transferred
100 GpuEventSynchronizer* ppSync;
101 MPI_Recv(&ppSync, sizeof(GpuEventSynchronizer*), MPI_BYTE, ppRank, 0, comm_, MPI_STATUS_IGNORE);
102 ppSync->enqueueWaitEvent(pmeStream_);
106 PmeCoordinateReceiverGpu::PmeCoordinateReceiverGpu(void* pmeStream,
108 gmx::ArrayRef<PpRanks> ppRanks) :
109 impl_(new Impl(pmeStream, comm, ppRanks))
113 PmeCoordinateReceiverGpu::~PmeCoordinateReceiverGpu() = default;
115 void PmeCoordinateReceiverGpu::sendCoordinateBufferAddressToPpRanks(rvec* d_x)
117 impl_->sendCoordinateBufferAddressToPpRanks(d_x);
120 void PmeCoordinateReceiverGpu::receiveCoordinatesFromPpCudaDirect(int ppRank)
122 impl_->receiveCoordinatesFromPpCudaDirect(ppRank);