*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
+ * Copyright (c) 2013,2014,2015,2016,2017 The GROMACS development team.
+ * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include "config.h"
-#include <assert.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
+#include <cassert>
#include <cmath>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
#include <algorithm>
+#include <list>
-#include "gromacs/ewald/ewald-utils.h"
+#include "gromacs/domdec/domdec.h"
+#include "gromacs/ewald/ewald_utils.h"
#include "gromacs/fft/parallel_3dfft.h"
#include "gromacs/fileio/pdbio.h"
#include "gromacs/gmxlib/network.h"
#include "gromacs/gmxlib/nrnb.h"
+#include "gromacs/hardware/hw_info.h"
#include "gromacs/math/gmxcomplex.h"
#include "gromacs/math/invertmatrix.h"
#include "gromacs/math/units.h"
#include "gromacs/mdtypes/forcerec.h"
#include "gromacs/mdtypes/inputrec.h"
#include "gromacs/mdtypes/md_enums.h"
+#include "gromacs/mdtypes/simulation_workload.h"
#include "gromacs/pbcutil/pbc.h"
#include "gromacs/timing/cyclecounter.h"
#include "gromacs/timing/wallcycle.h"
#include "gromacs/timing/walltime_accounting.h"
+#include "gromacs/topology/topology.h"
#include "gromacs/utility/basedefinitions.h"
+#include "gromacs/utility/cstringutil.h"
#include "gromacs/utility/exceptions.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxmpi.h"
#include "gromacs/utility/stringutil.h"
#include "gromacs/utility/unique_cptr.h"
-#include "calculate-spline-moduli.h"
-#include "pme-gather.h"
-#include "pme-gpu-internal.h"
-#include "pme-grid.h"
-#include "pme-internal.h"
-#include "pme-redistribute.h"
-#include "pme-solve.h"
-#include "pme-spline-work.h"
-#include "pme-spread.h"
+#include "calculate_spline_moduli.h"
+#include "pme_gather.h"
+#include "pme_gpu_internal.h"
+#include "pme_grid.h"
+#include "pme_internal.h"
+#include "pme_redistribute.h"
+#include "pme_solve.h"
+#include "pme_spline_work.h"
+#include "pme_spread.h"
+
+/*! \brief Help build a descriptive message in \c error if there are
+ * \c errorReasons why PME on GPU is not supported.
+ *
+ * \returns Whether the lack of errorReasons indicate there is support. */
+static bool addMessageIfNotSupported(const std::list<std::string>& errorReasons, std::string* error)
+{
+ bool isSupported = errorReasons.empty();
+ if (!isSupported && error)
+ {
+ std::string regressionTestMarker = "PME GPU does not support";
+ // this prefix is tested for in the regression tests script gmxtest.pl
+ *error = regressionTestMarker;
+ if (errorReasons.size() == 1)
+ {
+ *error += " " + errorReasons.back();
+ }
+ else
+ {
+ *error += ": " + gmx::joinStrings(errorReasons, "; ");
+ }
+ *error += ".";
+ }
+ return isSupported;
+}
+
+bool pme_gpu_supports_build(std::string* error)
+{
+ std::list<std::string> errorReasons;
+ if (GMX_DOUBLE)
+ {
+ errorReasons.emplace_back("a double-precision build");
+ }
+ if (!GMX_GPU)
+ {
+ errorReasons.emplace_back("a non-GPU build");
+ }
+ if (GMX_GPU_SYCL)
+ {
+ errorReasons.emplace_back("SYCL build"); // SYCL-TODO
+ }
+ return addMessageIfNotSupported(errorReasons, error);
+}
+
+bool pme_gpu_supports_hardware(const gmx_hw_info_t gmx_unused& hwinfo, std::string* error)
+{
+ std::list<std::string> errorReasons;
+
+ if (GMX_GPU_OPENCL)
+ {
+#ifdef __APPLE__
+ errorReasons.emplace_back("Apple OS X operating system");
+#endif
+ }
+ return addMessageIfNotSupported(errorReasons, error);
+}
+
+bool pme_gpu_supports_input(const t_inputrec& ir, std::string* error)
+{
+ std::list<std::string> errorReasons;
+ if (!EEL_PME(ir.coulombtype))
+ {
+ errorReasons.emplace_back("systems that do not use PME for electrostatics");
+ }
+ if (ir.pme_order != 4)
+ {
+ errorReasons.emplace_back("interpolation orders other than 4");
+ }
+ if (EVDW_PME(ir.vdwtype))
+ {
+ errorReasons.emplace_back("Lennard-Jones PME");
+ }
+ if (!EI_DYNAMICS(ir.eI))
+ {
+ errorReasons.emplace_back(
+ "Cannot compute PME interactions on a GPU, because PME GPU requires a dynamical "
+ "integrator (md, sd, etc).");
+ }
+ return addMessageIfNotSupported(errorReasons, error);
+}
+
+bool pme_gpu_mixed_mode_supports_input(const t_inputrec& ir, std::string* error)
+{
+ std::list<std::string> errorReasons;
+ if (ir.efep != efepNO)
+ {
+ errorReasons.emplace_back("Free Energy Perturbation (in PME GPU mixed mode)");
+ }
+ return addMessageIfNotSupported(errorReasons, error);
+}
+
+/*! \brief \libinternal
+ * Finds out if PME with given inputs is possible to run on GPU.
+ * This function is an internal final check, validating the whole PME structure on creation,
+ * but it still duplicates the preliminary checks from the above (externally exposed) pme_gpu_supports_input() - just in case.
+ *
+ * \param[in] pme The PME structure.
+ * \param[out] error The error message if the input is not supported on GPU.
+ * \returns True if this PME input is possible to run on GPU, false otherwise.
+ */
+static bool pme_gpu_check_restrictions(const gmx_pme_t* pme, std::string* error)
+{
+ std::list<std::string> errorReasons;
+ if (pme->nnodes != 1)
+ {
+ errorReasons.emplace_back("PME decomposition");
+ }
+ if (pme->pme_order != 4)
+ {
+ errorReasons.emplace_back("interpolation orders other than 4");
+ }
+ if (pme->doLJ)
+ {
+ errorReasons.emplace_back("Lennard-Jones PME");
+ }
+ if (GMX_DOUBLE)
+ {
+ errorReasons.emplace_back("double precision");
+ }
+ if (!GMX_GPU)
+ {
+ errorReasons.emplace_back("non-GPU build of GROMACS");
+ }
+ if (GMX_GPU_SYCL)
+ {
+ errorReasons.emplace_back("SYCL build of GROMACS"); // SYCL-TODO
+ }
+ return addMessageIfNotSupported(errorReasons, error);
+}
+
+PmeRunMode pme_run_mode(const gmx_pme_t* pme)
+{
+ GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
+ return pme->runMode;
+}
+
+gmx::PinningPolicy pme_get_pinning_policy()
+{
+ return gmx::PinningPolicy::PinnedIfSupported;
+}
/*! \brief Number of bytes in a cache line.
*
const int gmxCacheLineSize = 64;
//! Set up coordinate communication
-static void setup_coordinate_communication(pme_atomcomm_t *atc)
+static void setup_coordinate_communication(PmeAtomComm* atc)
{
int nslab, n, i;
int fw, bw;
nslab = atc->nslab;
n = 0;
- for (i = 1; i <= nslab/2; i++)
+ for (i = 1; i <= nslab / 2; i++)
{
fw = (atc->nodeid + i) % nslab;
bw = (atc->nodeid - i + nslab) % nslab;
if (n < nslab - 1)
{
- atc->node_dest[n] = fw;
- atc->node_src[n] = bw;
+ atc->slabCommSetup[n].node_dest = fw;
+ atc->slabCommSetup[n].node_src = bw;
n++;
}
if (n < nslab - 1)
{
- atc->node_dest[n] = bw;
- atc->node_src[n] = fw;
+ atc->slabCommSetup[n].node_dest = bw;
+ atc->slabCommSetup[n].node_src = fw;
n++;
}
}
/*! \brief Round \p n up to the next multiple of \p f */
static int mult_up(int n, int f)
{
- return ((n + f - 1)/f)*f;
+ return ((n + f - 1) / f) * f;
}
/*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
-static double estimate_pme_load_imbalance(struct gmx_pme_t *pme)
+static double estimate_pme_load_imbalance(struct gmx_pme_t* pme)
{
int nma, nmi;
double n1, n2, n3;
nma = pme->nnodes_major;
nmi = pme->nnodes_minor;
- n1 = mult_up(pme->nkx, nma)*mult_up(pme->nky, nmi)*pme->nkz;
- n2 = mult_up(pme->nkx, nma)*mult_up(pme->nkz, nmi)*pme->nky;
- n3 = mult_up(pme->nky, nma)*mult_up(pme->nkz, nmi)*pme->nkx;
+ n1 = mult_up(pme->nkx, nma) * mult_up(pme->nky, nmi) * pme->nkz;
+ n2 = mult_up(pme->nkx, nma) * mult_up(pme->nkz, nmi) * pme->nky;
+ n3 = mult_up(pme->nky, nma) * mult_up(pme->nkz, nmi) * pme->nkx;
/* pme_solve is roughly double the cost of an fft */
- return (n1 + n2 + 3*n3)/(double)(6*pme->nkx*pme->nky*pme->nkz);
+ return (n1 + n2 + 3 * n3) / static_cast<double>(6 * pme->nkx * pme->nky * pme->nkz);
}
-/*! \brief Initialize atom communication data structure */
-static void init_atomcomm(struct gmx_pme_t *pme, pme_atomcomm_t *atc,
- int dimind, gmx_bool bSpread)
+#ifndef DOXYGEN
+
+PmeAtomComm::PmeAtomComm(MPI_Comm PmeMpiCommunicator,
+ const int numThreads,
+ const int pmeOrder,
+ const int dimIndex,
+ const bool doSpread) :
+ dimind(dimIndex),
+ bSpread(doSpread),
+ pme_order(pmeOrder),
+ nthread(numThreads),
+ spline(nthread)
{
- int thread;
-
- atc->dimind = dimind;
- atc->nslab = 1;
- atc->nodeid = 0;
- atc->pd_nalloc = 0;
-#if GMX_MPI
- if (pme->nnodes > 1)
+ if (PmeMpiCommunicator != MPI_COMM_NULL)
{
- atc->mpi_comm = pme->mpi_comm_d[dimind];
- MPI_Comm_size(atc->mpi_comm, &atc->nslab);
- MPI_Comm_rank(atc->mpi_comm, &atc->nodeid);
+ mpi_comm = PmeMpiCommunicator;
+# if GMX_MPI
+ MPI_Comm_size(mpi_comm, &nslab);
+ MPI_Comm_rank(mpi_comm, &nodeid);
+# endif
}
if (debug)
{
- fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", atc->dimind, atc->nslab, atc->nodeid);
+ fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", dimind, nslab, nodeid);
}
-#endif
- atc->bSpread = bSpread;
- atc->pme_order = pme->pme_order;
-
- if (atc->nslab > 1)
+ if (nslab > 1)
{
- snew(atc->node_dest, atc->nslab);
- snew(atc->node_src, atc->nslab);
- setup_coordinate_communication(atc);
+ slabCommSetup.resize(nslab);
+ setup_coordinate_communication(this);
- snew(atc->count_thread, pme->nthread);
- for (thread = 0; thread < pme->nthread; thread++)
- {
- snew(atc->count_thread[thread], atc->nslab);
- }
- atc->count = atc->count_thread[0];
- snew(atc->rcount, atc->nslab);
- snew(atc->buf_index, atc->nslab);
- }
-
- atc->nthread = pme->nthread;
- if (atc->nthread > 1)
- {
- snew(atc->thread_plist, atc->nthread);
- }
- snew(atc->spline, atc->nthread);
- for (thread = 0; thread < atc->nthread; thread++)
- {
- if (atc->nthread > 1)
+ count_thread.resize(nthread);
+ for (auto& countThread : count_thread)
{
- snew(atc->thread_plist[thread].n, atc->nthread+2*gmxCacheLineSize);
- atc->thread_plist[thread].n += gmxCacheLineSize;
+ countThread.resize(nslab);
}
}
-}
-/*! \brief Destroy an atom communication data structure and its child structs */
-static void destroy_atomcomm(pme_atomcomm_t *atc)
-{
- sfree(atc->pd);
- if (atc->nslab > 1)
+ if (nthread > 1)
{
- sfree(atc->node_dest);
- sfree(atc->node_src);
- for (int i = 0; i < atc->nthread; i++)
- {
- sfree(atc->count_thread[i]);
- }
- sfree(atc->count_thread);
- sfree(atc->rcount);
- sfree(atc->buf_index);
-
- sfree(atc->x);
- sfree(atc->coefficient);
- sfree(atc->f);
- }
- sfree(atc->idx);
- sfree(atc->fractx);
+ threadMap.resize(nthread);
- sfree(atc->thread_idx);
- for (int i = 0; i < atc->nthread; i++)
- {
- if (atc->nthread > 1)
- {
- int *n_ptr = atc->thread_plist[i].n - gmxCacheLineSize;
- sfree(n_ptr);
- sfree(atc->thread_plist[i].i);
- }
- sfree(atc->spline[i].ind);
- for (int d = 0; d < ZZ; d++)
+# pragma omp parallel for num_threads(nthread) schedule(static)
+ for (int thread = 0; thread < nthread; thread++)
{
- sfree(atc->spline[i].theta[d]);
- sfree(atc->spline[i].dtheta[d]);
+ try
+ {
+ /* Allocate buffer with padding to avoid cache polution */
+ threadMap[thread].nBuffer.resize(nthread + 2 * gmxCacheLineSize);
+ threadMap[thread].n = threadMap[thread].nBuffer.data() + gmxCacheLineSize;
+ }
+ GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
- sfree_aligned(atc->spline[i].ptr_dtheta_z);
- sfree_aligned(atc->spline[i].ptr_theta_z);
- }
- if (atc->nthread > 1)
- {
- sfree(atc->thread_plist);
}
- sfree(atc->spline);
}
+#endif // !DOXYGEN
+
/*! \brief Initialize data structure for communication */
-static void
-init_overlap_comm(pme_overlap_t * ol,
- int norder,
-#if GMX_MPI
- MPI_Comm comm,
-#endif
- int nnodes,
- int nodeid,
- int ndata,
- int commplainsize)
+static void init_overlap_comm(pme_overlap_t* ol, int norder, MPI_Comm comm, int nnodes, int nodeid, int ndata, int commplainsize)
{
- int b, i;
- pme_grid_comm_t *pgc;
- gmx_bool bCont;
- int fft_start, fft_end, send_index1, recv_index1;
-#if GMX_MPI
- MPI_Status stat;
+ gmx_bool bCont;
ol->mpi_comm = comm;
-#endif
-
- ol->nnodes = nnodes;
- ol->nodeid = nodeid;
+ ol->nnodes = nnodes;
+ ol->nodeid = nodeid;
/* Linear translation of the PME grid won't affect reciprocal space
* calculations, so to optimize we only interpolate "upwards",
* that belong to higher nodes (modulo nnodes)
*/
- snew(ol->s2g0, ol->nnodes+1);
- snew(ol->s2g1, ol->nnodes);
+ ol->s2g0.resize(ol->nnodes + 1);
+ ol->s2g1.resize(ol->nnodes);
if (debug)
{
fprintf(debug, "PME slab boundaries:");
}
- for (i = 0; i < nnodes; i++)
+ for (int i = 0; i < nnodes; i++)
{
/* s2g0 the local interpolation grid start.
* s2g1 the local interpolation grid end.
* spatially uniform along dimension x or y, we need to round
* s2g0 down and s2g1 up.
*/
- ol->s2g0[i] = ( i *ndata + 0 )/nnodes;
- ol->s2g1[i] = ((i+1)*ndata + nnodes-1)/nnodes + norder - 1;
+ ol->s2g0[i] = (i * ndata + 0) / nnodes;
+ ol->s2g1[i] = ((i + 1) * ndata + nnodes - 1) / nnodes + norder - 1;
if (debug)
{
}
/* Determine with how many nodes we need to communicate the grid overlap */
- b = 0;
+ int testRankCount = 0;
do
{
- b++;
+ testRankCount++;
bCont = FALSE;
- for (i = 0; i < nnodes; i++)
+ for (int i = 0; i < nnodes; i++)
{
- if ((i+b < nnodes && ol->s2g1[i] > ol->s2g0[i+b]) ||
- (i+b >= nnodes && ol->s2g1[i] > ol->s2g0[i+b-nnodes] + ndata))
+ if ((i + testRankCount < nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount])
+ || (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
{
bCont = TRUE;
}
}
- }
- while (bCont && b < nnodes);
- ol->noverlap_nodes = b - 1;
-
- snew(ol->send_id, ol->noverlap_nodes);
- snew(ol->recv_id, ol->noverlap_nodes);
- for (b = 0; b < ol->noverlap_nodes; b++)
- {
- ol->send_id[b] = (ol->nodeid + (b + 1)) % ol->nnodes;
- ol->recv_id[b] = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
- }
- snew(ol->comm_data, ol->noverlap_nodes);
+ } while (bCont && testRankCount < nnodes);
+ ol->comm_data.resize(testRankCount - 1);
ol->send_size = 0;
- for (b = 0; b < ol->noverlap_nodes; b++)
+
+ for (size_t b = 0; b < ol->comm_data.size(); b++)
{
- pgc = &ol->comm_data[b];
+ pme_grid_comm_t* pgc = &ol->comm_data[b];
+
/* Send */
- fft_start = ol->s2g0[ol->send_id[b]];
- fft_end = ol->s2g0[ol->send_id[b]+1];
- if (ol->send_id[b] < nodeid)
+ pgc->send_id = (ol->nodeid + (b + 1)) % ol->nnodes;
+ int fft_start = ol->s2g0[pgc->send_id];
+ int fft_end = ol->s2g0[pgc->send_id + 1];
+ if (pgc->send_id < nodeid)
{
fft_start += ndata;
- fft_end += ndata;
+ fft_end += ndata;
}
- send_index1 = ol->s2g1[nodeid];
- send_index1 = std::min(send_index1, fft_end);
- pgc->send_index0 = fft_start;
- pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
- ol->send_size += pgc->send_nindex;
+ int send_index1 = ol->s2g1[nodeid];
+ send_index1 = std::min(send_index1, fft_end);
+ pgc->send_index0 = fft_start;
+ pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
+ ol->send_size += pgc->send_nindex;
/* We always start receiving to the first index of our slab */
- fft_start = ol->s2g0[ol->nodeid];
- fft_end = ol->s2g0[ol->nodeid+1];
- recv_index1 = ol->s2g1[ol->recv_id[b]];
- if (ol->recv_id[b] > nodeid)
+ pgc->recv_id = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
+ fft_start = ol->s2g0[ol->nodeid];
+ fft_end = ol->s2g0[ol->nodeid + 1];
+ int recv_index1 = ol->s2g1[pgc->recv_id];
+ if (pgc->recv_id > nodeid)
{
recv_index1 -= ndata;
}
#if GMX_MPI
/* Communicate the buffer sizes to receive */
- for (b = 0; b < ol->noverlap_nodes; b++)
+ MPI_Status stat;
+ for (size_t b = 0; b < ol->comm_data.size(); b++)
{
- MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->send_id[b], b,
- &ol->comm_data[b].recv_size, 1, MPI_INT, ol->recv_id[b], b,
- ol->mpi_comm, &stat);
+ MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->comm_data[b].send_id, b, &ol->comm_data[b].recv_size,
+ 1, MPI_INT, ol->comm_data[b].recv_id, b, ol->mpi_comm, &stat);
}
#endif
/* For non-divisible grid we need pme_order iso pme_order-1 */
- snew(ol->sendbuf, norder*commplainsize);
- snew(ol->recvbuf, norder*commplainsize);
-}
-
-/*! \brief Destroy data structure for communication */
-static void
-destroy_overlap_comm(const pme_overlap_t *ol)
-{
- sfree(ol->s2g0);
- sfree(ol->s2g1);
- sfree(ol->send_id);
- sfree(ol->recv_id);
- sfree(ol->comm_data);
- sfree(ol->sendbuf);
- sfree(ol->recvbuf);
+ ol->sendbuf.resize(norder * commplainsize);
+ ol->recvbuf.resize(norder * commplainsize);
}
int minimalPmeGridSize(int pmeOrder)
* But we use the maximum for simplicity since in practice there is not
* much performance difference between pme_order and 2*(pme_order -1).
*/
- int minimalSize = 2*(pmeOrder - 1);
+ int minimalSize = 2 * (pmeOrder - 1);
GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
return minimalSize;
}
-bool gmx_pme_check_restrictions(int pme_order,
- int nkx, int nky, int nkz,
- int nnodes_major,
- bool useThreads,
- bool errorsAreFatal)
+bool gmx_pme_check_restrictions(int pme_order, int nkx, int nky, int nkz, int numPmeDomainsAlongX, bool useThreads, bool errorsAreFatal)
{
if (pme_order > PME_ORDER_MAX)
{
}
std::string message = gmx::formatString(
- "pme_order (%d) is larger than the maximum allowed value (%d). Modify and recompile the code if you really need such a high order.",
- pme_order, PME_ORDER_MAX);
- GMX_THROW(InconsistentInputError(message));
+ "pme_order (%d) is larger than the maximum allowed value (%d). Modify and "
+ "recompile the code if you really need such a high order.",
+ pme_order, PME_ORDER_MAX);
+ GMX_THROW(gmx::InconsistentInputError(message));
}
const int minGridSize = minimalPmeGridSize(pme_order);
- if (nkx < minGridSize ||
- nky < minGridSize ||
- nkz < minGridSize)
+ if (nkx < minGridSize || nky < minGridSize || nkz < minGridSize)
{
if (!errorsAreFatal)
{
return false;
}
- std::string message = gmx::formatString(
- "The PME grid sizes need to be >= 2*(pme_order-1) (%d)",
- minGridSize);
- GMX_THROW(InconsistentInputError(message));
+ std::string message =
+ gmx::formatString("The PME grid sizes need to be >= 2*(pme_order-1) (%d)", minGridSize);
+ GMX_THROW(gmx::InconsistentInputError(message));
}
/* Check for a limitation of the (current) sum_fftgrid_dd code.
* We only allow multiple communication pulses in dim 1, not in dim 0.
*/
- if (useThreads && (nkx < nnodes_major*pme_order &&
- nkx != nnodes_major*(pme_order - 1)))
+ if (useThreads
+ && (nkx < numPmeDomainsAlongX * pme_order && nkx != numPmeDomainsAlongX * (pme_order - 1)))
{
if (!errorsAreFatal)
{
return false;
}
- gmx_fatal(FARGS, "The number of PME grid lines per rank along x is %g. But when using OpenMP threads, the number of grid lines per rank along x should be >= pme_order (%d) or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly more along y and/or z) by specifying -dd manually.",
- nkx/(double)nnodes_major, pme_order);
+ gmx_fatal(FARGS,
+ "The number of PME grid lines per rank along x is %g. But when using OpenMP "
+ "threads, the number of grid lines per rank along x should be >= pme_order (%d) "
+ "or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly "
+ "more along y and/or z) by specifying -dd manually.",
+ nkx / static_cast<double>(numPmeDomainsAlongX), pme_order);
}
return true;
/*! \brief Round \p enumerator */
static int div_round_up(int enumerator, int denominator)
{
- return (enumerator + denominator - 1)/denominator;
+ return (enumerator + denominator - 1) / denominator;
}
-gmx_pme_t *gmx_pme_init(const t_commrec *cr,
- int nnodes_major,
- int nnodes_minor,
- const t_inputrec *ir,
- int homenr,
+gmx_pme_t* gmx_pme_init(const t_commrec* cr,
+ const NumPmeDomains& numPmeDomains,
+ const t_inputrec* ir,
gmx_bool bFreeEnergy_q,
gmx_bool bFreeEnergy_lj,
gmx_bool bReproducible,
real ewaldcoeff_lj,
int nthread,
PmeRunMode runMode,
- PmeGpu *pmeGpu,
- gmx_device_info_t *gpuInfo,
- const gmx::MDLogger & /*mdlog*/)
+ PmeGpu* pmeGpu,
+ const DeviceContext* deviceContext,
+ const DeviceStream* deviceStream,
+ const PmeGpuProgram* pmeGpuProgram,
+ const gmx::MDLogger& mdlog)
{
- int use_threads, sum_use_threads, i;
- ivec ndata;
+ int use_threads, sum_use_threads, i;
+ ivec ndata;
if (debug)
{
fprintf(debug, "Creating PME data structures.\n");
}
- unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
+ gmx::unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
- pme->sum_qgrid_tmp = nullptr;
- pme->sum_qgrid_dd_tmp = nullptr;
+ pme->sum_qgrid_tmp = nullptr;
+ pme->sum_qgrid_dd_tmp = nullptr;
- pme->buf_nalloc = 0;
+ pme->buf_nalloc = 0;
- pme->nnodes = 1;
- pme->bPPnode = TRUE;
+ pme->nnodes = 1;
+ pme->bPPnode = TRUE;
- pme->nnodes_major = nnodes_major;
- pme->nnodes_minor = nnodes_minor;
+ pme->nnodes_major = numPmeDomains.x;
+ pme->nnodes_minor = numPmeDomains.y;
-#if GMX_MPI
- if (nnodes_major*nnodes_minor > 1)
+ if (numPmeDomains.x * numPmeDomains.y > 1)
{
pme->mpi_comm = cr->mpi_comm_mygroup;
+#if GMX_MPI
MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
- if (pme->nnodes != nnodes_major*nnodes_minor)
+#endif
+ if (pme->nnodes != numPmeDomains.x * numPmeDomains.y)
{
gmx_incons("PME rank count mismatch");
}
{
pme->mpi_comm = MPI_COMM_NULL;
}
-#endif
if (pme->nnodes == 1)
{
-#if GMX_MPI
pme->mpi_comm_d[0] = MPI_COMM_NULL;
pme->mpi_comm_d[1] = MPI_COMM_NULL;
-#endif
- pme->ndecompdim = 0;
- pme->nodeid_major = 0;
- pme->nodeid_minor = 0;
-#if GMX_MPI
- pme->mpi_comm_d[0] = pme->mpi_comm_d[1] = MPI_COMM_NULL;
-#endif
+ pme->ndecompdim = 0;
+ pme->nodeid_major = 0;
+ pme->nodeid_minor = 0;
}
else
{
- if (nnodes_minor == 1)
+ if (numPmeDomains.y == 1)
{
-#if GMX_MPI
pme->mpi_comm_d[0] = pme->mpi_comm;
pme->mpi_comm_d[1] = MPI_COMM_NULL;
-#endif
- pme->ndecompdim = 1;
- pme->nodeid_major = pme->nodeid;
- pme->nodeid_minor = 0;
-
+ pme->ndecompdim = 1;
+ pme->nodeid_major = pme->nodeid;
+ pme->nodeid_minor = 0;
}
- else if (nnodes_major == 1)
+ else if (numPmeDomains.x == 1)
{
-#if GMX_MPI
pme->mpi_comm_d[0] = MPI_COMM_NULL;
pme->mpi_comm_d[1] = pme->mpi_comm;
-#endif
- pme->ndecompdim = 1;
- pme->nodeid_major = 0;
- pme->nodeid_minor = pme->nodeid;
+ pme->ndecompdim = 1;
+ pme->nodeid_major = 0;
+ pme->nodeid_minor = pme->nodeid;
}
else
{
- if (pme->nnodes % nnodes_major != 0)
+ if (pme->nnodes % numPmeDomains.x != 0)
{
- gmx_incons("For 2D PME decomposition, #PME ranks must be divisible by the number of ranks in the major dimension");
+ gmx_incons(
+ "For 2D PME decomposition, #PME ranks must be divisible by the number of "
+ "domains along x");
}
pme->ndecompdim = 2;
#if GMX_MPI
- MPI_Comm_split(pme->mpi_comm, pme->nodeid % nnodes_minor,
- pme->nodeid, &pme->mpi_comm_d[0]); /* My communicator along major dimension */
- MPI_Comm_split(pme->mpi_comm, pme->nodeid/nnodes_minor,
- pme->nodeid, &pme->mpi_comm_d[1]); /* My communicator along minor dimension */
+ MPI_Comm_split(pme->mpi_comm, pme->nodeid % numPmeDomains.y, pme->nodeid,
+ &pme->mpi_comm_d[0]); /* My communicator along major dimension */
+ MPI_Comm_split(pme->mpi_comm, pme->nodeid / numPmeDomains.y, pme->nodeid,
+ &pme->mpi_comm_d[1]); /* My communicator along minor dimension */
MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
#endif
}
- pme->bPPnode = thisRankHasDuty(cr, DUTY_PP);
}
+ // cr is always initialized if there is a a PP rank, so we can safely assume
+ // that when it is not, like in ewald tests, we not on a PP rank.
+ pme->bPPnode = ((cr != nullptr && cr->duty != 0) && thisRankHasDuty(cr, DUTY_PP));
pme->nthread = nthread;
#if GMX_MPI
if (pme->nnodes > 1)
{
- MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT,
- MPI_SUM, pme->mpi_comm);
+ MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT, MPI_SUM, pme->mpi_comm);
}
else
#endif
}
pme->bUseThreads = (sum_use_threads > 0);
- if (ir->ePBC == epbcSCREW)
+ if (ir->pbcType == PbcType::Screw)
{
gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
}
pme->ewaldcoeff_lj = ewaldcoeff_lj;
/* Always constant electrostatics coefficients */
- pme->epsilon_r = ir->epsilon_r;
+ pme->epsilon_r = ir->epsilon_r;
/* Always constant LJ coefficients */
pme->ljpme_combination_rule = ir->ljpme_combination_rule;
pme->boxScaler = new EwaldBoxZScaler(*ir);
/* If we violate restrictions, generate a fatal error here */
- gmx_pme_check_restrictions(pme->pme_order,
- pme->nkx, pme->nky, pme->nkz,
- pme->nnodes_major,
- pme->bUseThreads,
- true);
+ gmx_pme_check_restrictions(pme->pme_order, pme->nkx, pme->nky, pme->nkz, pme->nnodes_major,
+ pme->bUseThreads, true);
if (pme->nnodes > 1)
{
imbal = estimate_pme_load_imbalance(pme.get());
if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
{
- fprintf(stderr,
- "\n"
- "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
- " For optimal PME load balancing\n"
- " PME grid_x (%d) and grid_y (%d) should be divisible by #PME_ranks_x (%d)\n"
- " and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_ranks_y (%d)\n"
- "\n",
- (int)((imbal-1)*100 + 0.5),
- pme->nkx, pme->nky, pme->nnodes_major,
- pme->nky, pme->nkz, pme->nnodes_minor);
+ GMX_LOG(mdlog.warning)
+ .asParagraph()
+ .appendTextFormatted(
+ "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
+ " For optimal PME load balancing\n"
+ " PME grid_x (%d) and grid_y (%d) should be divisible by "
+ "#PME_ranks_x "
+ "(%d)\n"
+ " and PME grid_y (%d) and grid_z (%d) should be divisible by "
+ "#PME_ranks_y "
+ "(%d)",
+ gmx::roundToInt((imbal - 1) * 100), pme->nkx, pme->nky,
+ pme->nnodes_major, pme->nky, pme->nkz, pme->nnodes_minor);
}
}
* y is always copied through a buffer: we don't need padding in z,
* but we do need the overlap in x because of the communication order.
*/
- init_overlap_comm(&pme->overlap[0], pme->pme_order,
-#if GMX_MPI
- pme->mpi_comm_d[0],
-#endif
- pme->nnodes_major, pme->nodeid_major,
- pme->nkx,
- (div_round_up(pme->nky, pme->nnodes_minor)+pme->pme_order)*(pme->nkz+pme->pme_order-1));
+ init_overlap_comm(&pme->overlap[0], pme->pme_order, pme->mpi_comm_d[0], pme->nnodes_major,
+ pme->nodeid_major, pme->nkx,
+ (div_round_up(pme->nky, pme->nnodes_minor) + pme->pme_order)
+ * (pme->nkz + pme->pme_order - 1));
/* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
* We do this with an offset buffer of equal size, so we need to allocate
* extra for the offset. That's what the (+1)*pme->nkz is for.
*/
- init_overlap_comm(&pme->overlap[1], pme->pme_order,
-#if GMX_MPI
- pme->mpi_comm_d[1],
-#endif
- pme->nnodes_minor, pme->nodeid_minor,
- pme->nky,
- (div_round_up(pme->nkx, pme->nnodes_major)+pme->pme_order+1)*pme->nkz);
+ init_overlap_comm(&pme->overlap[1], pme->pme_order, pme->mpi_comm_d[1], pme->nnodes_minor,
+ pme->nodeid_minor, pme->nky,
+ (div_round_up(pme->nkx, pme->nnodes_major) + pme->pme_order + 1) * pme->nkz);
/* Double-check for a limitation of the (current) sum_fftgrid_dd code.
* Note that gmx_pme_check_restrictions checked for this already.
*/
- if (pme->bUseThreads && pme->overlap[0].noverlap_nodes > 1)
+ if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
{
- gmx_incons("More than one communication pulse required for grid overlap communication along the major dimension while using threads");
+ gmx_incons(
+ "More than one communication pulse required for grid overlap communication along "
+ "the major dimension while using threads");
}
snew(pme->bsp_mod[XX], pme->nkx);
/* The required size of the interpolation grid, including overlap.
* The allocated size (pmegrid_n?) might be slightly larger.
*/
- pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] -
- pme->overlap[0].s2g0[pme->nodeid_major];
- pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] -
- pme->overlap[1].s2g0[pme->nodeid_minor];
+ pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] - pme->overlap[0].s2g0[pme->nodeid_major];
+ pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] - pme->overlap[1].s2g0[pme->nodeid_minor];
pme->pmegrid_nz_base = pme->nkz;
pme->pmegrid_nz = pme->pmegrid_nz_base + pme->pme_order - 1;
set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
pme->pmegrid_start_iz = 0;
- make_gridindex_to_localindex(pme->nkx,
- pme->pmegrid_start_ix,
- pme->pmegrid_nx - (pme->pme_order-1),
- &pme->nnx, &pme->fshx);
- make_gridindex_to_localindex(pme->nky,
- pme->pmegrid_start_iy,
- pme->pmegrid_ny - (pme->pme_order-1),
- &pme->nny, &pme->fshy);
- make_gridindex_to_localindex(pme->nkz,
- pme->pmegrid_start_iz,
- pme->pmegrid_nz_base,
- &pme->nnz, &pme->fshz);
+ make_gridindex_to_localindex(pme->nkx, pme->pmegrid_start_ix,
+ pme->pmegrid_nx - (pme->pme_order - 1), &pme->nnx, &pme->fshx);
+ make_gridindex_to_localindex(pme->nky, pme->pmegrid_start_iy,
+ pme->pmegrid_ny - (pme->pme_order - 1), &pme->nny, &pme->fshy);
+ make_gridindex_to_localindex(pme->nkz, pme->pmegrid_start_iz, pme->pmegrid_nz_base, &pme->nnz,
+ &pme->fshz);
pme->spline_work = make_pme_spline_work(pme->pme_order);
- ndata[0] = pme->nkx;
- ndata[1] = pme->nky;
- ndata[2] = pme->nkz;
+ ndata[0] = pme->nkx;
+ ndata[1] = pme->nky;
+ ndata[2] = pme->nkz;
/* It doesn't matter if we allocate too many grids here,
* we only allocate and use the ones we need.
*/
for (i = 0; i < pme->ngrids; ++i)
{
- if ((i < DO_Q && pme->doCoulomb && (i == 0 ||
- bFreeEnergy_q)) ||
- (i >= DO_Q && pme->doLJ && (i == 2 ||
- bFreeEnergy_lj ||
- ir->ljpme_combination_rule == eljpmeLB)))
+ if ((i < DO_Q && pme->doCoulomb && (i == 0 || bFreeEnergy_q))
+ || (i >= DO_Q && pme->doLJ
+ && (i == 2 || bFreeEnergy_lj || ir->ljpme_combination_rule == eljpmeLB)))
{
- pmegrids_init(&pme->pmegrid[i],
- pme->pmegrid_nx, pme->pmegrid_ny, pme->pmegrid_nz,
- pme->pmegrid_nz_base,
- pme->pme_order,
- pme->bUseThreads,
- pme->nthread,
- pme->overlap[0].s2g1[pme->nodeid_major]-pme->overlap[0].s2g0[pme->nodeid_major+1],
- pme->overlap[1].s2g1[pme->nodeid_minor]-pme->overlap[1].s2g0[pme->nodeid_minor+1]);
+ pmegrids_init(&pme->pmegrid[i], pme->pmegrid_nx, pme->pmegrid_ny, pme->pmegrid_nz,
+ pme->pmegrid_nz_base, pme->pme_order, pme->bUseThreads, pme->nthread,
+ pme->overlap[0].s2g1[pme->nodeid_major]
+ - pme->overlap[0].s2g0[pme->nodeid_major + 1],
+ pme->overlap[1].s2g1[pme->nodeid_minor]
+ - pme->overlap[1].s2g0[pme->nodeid_minor + 1]);
/* This routine will allocate the grid data to fit the FFTs */
- const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed) ? gmx::PinningPolicy::CanBePinned : gmx::PinningPolicy::CannotBePinned;
- gmx_parallel_3dfft_init(&pme->pfft_setup[i], ndata,
- &pme->fftgrid[i], &pme->cfftgrid[i],
- pme->mpi_comm_d,
- bReproducible, pme->nthread, allocateRealGridForGpu);
-
+ const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed)
+ ? gmx::PinningPolicy::PinnedIfSupported
+ : gmx::PinningPolicy::CannotBePinned;
+ gmx_parallel_3dfft_init(&pme->pfft_setup[i], ndata, &pme->fftgrid[i], &pme->cfftgrid[i],
+ pme->mpi_comm_d, bReproducible, pme->nthread, allocateRealGridForGpu);
}
}
}
/* Use atc[0] for spreading */
- init_atomcomm(pme.get(), &pme->atc[0], nnodes_major > 1 ? 0 : 1, TRUE);
+ const int firstDimIndex = (numPmeDomains.x > 1 ? 0 : 1);
+ MPI_Comm mpiCommFirstDim = (pme->nnodes > 1 ? pme->mpi_comm_d[firstDimIndex] : MPI_COMM_NULL);
+ bool doSpread = true;
+ pme->atc.emplace_back(mpiCommFirstDim, pme->nthread, pme->pme_order, firstDimIndex, doSpread);
if (pme->ndecompdim >= 2)
{
- init_atomcomm(pme.get(), &pme->atc[1], 1, FALSE);
+ const int secondDimIndex = 1;
+ doSpread = false;
+ pme->atc.emplace_back(pme->mpi_comm_d[1], pme->nthread, pme->pme_order, secondDimIndex, doSpread);
}
- if (pme->nnodes == 1)
+ // Initial check of validity of the input for running on the GPU
+ if (pme->runMode != PmeRunMode::CPU)
{
- pme->atc[0].n = homenr;
- pme_realloc_atomcomm_things(&pme->atc[0]);
+ std::string errorString;
+ bool canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
+ if (!canRunOnGpu)
+ {
+ GMX_THROW(gmx::NotImplementedError(errorString));
+ }
+ pme_gpu_reinit(pme.get(), deviceContext, deviceStream, pmeGpuProgram);
+ }
+ else
+ {
+ GMX_ASSERT(pme->gpu == nullptr, "Should not have PME GPU object when PME is on a CPU.");
}
- pme->lb_buf1 = nullptr;
- pme->lb_buf2 = nullptr;
- pme->lb_buf_nalloc = 0;
-
- pme_gpu_reinit(pme.get(), gpuInfo);
pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
return pme.release();
}
-void gmx_pme_reinit(struct gmx_pme_t **pmedata,
- t_commrec * cr,
- struct gmx_pme_t * pme_src,
- const t_inputrec * ir,
+void gmx_pme_reinit(struct gmx_pme_t** pmedata,
+ const t_commrec* cr,
+ struct gmx_pme_t* pme_src,
+ const t_inputrec* ir,
const ivec grid_size,
real ewaldcoeff_q,
real ewaldcoeff_lj)
{
- int homenr;
-
// Create a copy of t_inputrec fields that are used in gmx_pme_init().
// TODO: This would be better as just copying a sub-structure that contains
// all the PME parameters and nothing else.
t_inputrec irc;
- irc.ePBC = ir->ePBC;
+ irc.pbcType = ir->pbcType;
irc.coulombtype = ir->coulombtype;
irc.vdwtype = ir->vdwtype;
irc.efep = ir->efep;
irc.nky = grid_size[YY];
irc.nkz = grid_size[ZZ];
- if (pme_src->nnodes == 1)
- {
- homenr = pme_src->atc[0].n;
- }
- else
- {
- homenr = -1;
- }
-
try
{
+ // This is reinit. Any logging should have been done at first init.
+ // Here we should avoid writing notes for settings the user did not
+ // set directly.
const gmx::MDLogger dummyLogger;
- // This is reinit which is currently only changing grid size/coefficients,
- // so we don't expect the actual logging.
- // TODO: when PME is an object, it should take reference to mdlog on construction and save it.
GMX_ASSERT(pmedata, "Invalid PME pointer");
- *pmedata = gmx_pme_init(cr, pme_src->nnodes_major, pme_src->nnodes_minor,
- &irc, homenr, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE, ewaldcoeff_q, ewaldcoeff_lj,
- pme_src->nthread, pme_src->runMode, pme_src->gpu, nullptr, dummyLogger);
- //TODO this is mostly passing around current values
+ NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
+ *pmedata = gmx_pme_init(cr, numPmeDomains, &irc, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE,
+ ewaldcoeff_q, ewaldcoeff_lj, pme_src->nthread, pme_src->runMode,
+ pme_src->gpu, nullptr, nullptr, nullptr, dummyLogger);
+ /* When running PME on the CPU not using domain decomposition,
+ * the atom data is allocated once only in gmx_pme_(re)init().
+ */
+ if (!pme_src->gpu && pme_src->nnodes == 1)
+ {
+ gmx_pme_reinit_atoms(*pmedata, pme_src->atc[0].numAtoms(), nullptr, nullptr);
+ }
+ // TODO this is mostly passing around current values
}
- GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+ GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
/* We can easily reuse the allocated pme grids in pme_src */
reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
/* We would like to reuse the fft grids, but that's harder */
}
-void gmx_pme_calc_energy(struct gmx_pme_t *pme, int n, rvec *x, real *q, real *V)
+void gmx_pme_calc_energy(gmx_pme_t* pme, gmx::ArrayRef<const gmx::RVec> x, gmx::ArrayRef<const real> q, real* V)
{
- pme_atomcomm_t *atc;
- pmegrids_t *grid;
+ pmegrids_t* grid;
if (pme->nnodes > 1)
{
gmx_incons("gmx_pme_calc_energy called in parallel");
}
- if (pme->bFEP_q > 1)
+ if (pme->bFEP_q)
{
gmx_incons("gmx_pme_calc_energy with free energy");
}
- atc = &pme->atc_energy;
- atc->nthread = 1;
- if (atc->spline == nullptr)
+ if (!pme->atc_energy)
{
- snew(atc->spline, atc->nthread);
+ pme->atc_energy = std::make_unique<PmeAtomComm>(MPI_COMM_NULL, 1, pme->pme_order, 0, true);
}
- atc->nslab = 1;
- atc->bSpread = TRUE;
- atc->pme_order = pme->pme_order;
- atc->n = n;
- pme_realloc_atomcomm_things(atc);
+ PmeAtomComm* atc = pme->atc_energy.get();
+ atc->setNumAtoms(x.ssize());
atc->x = x;
atc->coefficient = q;
}
/*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
-static void
-calc_initial_lb_coeffs(struct gmx_pme_t *pme, real *local_c6, real *local_sigma)
+static void calc_initial_lb_coeffs(gmx::ArrayRef<real> coefficient, const real* local_c6, const real* local_sigma)
{
- int i;
- for (i = 0; i < pme->atc[0].n; ++i)
+ for (gmx::index i = 0; i < coefficient.ssize(); ++i)
{
- real sigma4;
- sigma4 = local_sigma[i];
- sigma4 = sigma4*sigma4;
- sigma4 = sigma4*sigma4;
- pme->atc[0].coefficient[i] = local_c6[i] / sigma4;
+ real sigma4 = local_sigma[i];
+ sigma4 = sigma4 * sigma4;
+ sigma4 = sigma4 * sigma4;
+ coefficient[i] = local_c6[i] / sigma4;
}
}
/*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
-static void
-calc_next_lb_coeffs(struct gmx_pme_t *pme, real *local_sigma)
+static void calc_next_lb_coeffs(gmx::ArrayRef<real> coefficient, const real* local_sigma)
{
- int i;
-
- for (i = 0; i < pme->atc[0].n; ++i)
+ for (gmx::index i = 0; i < coefficient.ssize(); ++i)
{
- pme->atc[0].coefficient[i] *= local_sigma[i];
+ coefficient[i] *= local_sigma[i];
}
}
-int gmx_pme_do(struct gmx_pme_t *pme,
- int start, int homenr,
- rvec x[], rvec f[],
- real chargeA[], real chargeB[],
- real c6A[], real c6B[],
- real sigmaA[], real sigmaB[],
- matrix box, t_commrec *cr,
- int maxshift_x, int maxshift_y,
- t_nrnb *nrnb, gmx_wallcycle_t wcycle,
- matrix vir_q, matrix vir_lj,
- real *energy_q, real *energy_lj,
- real lambda_q, real lambda_lj,
- real *dvdlambda_q, real *dvdlambda_lj,
- int flags)
+int gmx_pme_do(struct gmx_pme_t* pme,
+ gmx::ArrayRef<const gmx::RVec> coordinates,
+ gmx::ArrayRef<gmx::RVec> forces,
+ real chargeA[],
+ real chargeB[],
+ real c6A[],
+ real c6B[],
+ real sigmaA[],
+ real sigmaB[],
+ const matrix box,
+ const t_commrec* cr,
+ int maxshift_x,
+ int maxshift_y,
+ t_nrnb* nrnb,
+ gmx_wallcycle* wcycle,
+ matrix vir_q,
+ matrix vir_lj,
+ real* energy_q,
+ real* energy_lj,
+ real lambda_q,
+ real lambda_lj,
+ real* dvdlambda_q,
+ real* dvdlambda_lj,
+ const gmx::StepWorkload& stepWork)
{
- GMX_ASSERT(pme->runMode == PmeRunMode::CPU, "gmx_pme_do should not be called on the GPU PME run.");
-
- int d, i, j, npme, grid_index, max_grid_index;
- int n_d;
- pme_atomcomm_t *atc = nullptr;
- pmegrids_t *pmegrid = nullptr;
- real *grid = nullptr;
- rvec *f_d;
- real *coefficient = nullptr;
- real energy_AB[4];
- matrix vir_AB[4];
+ GMX_ASSERT(pme->runMode == PmeRunMode::CPU,
+ "gmx_pme_do should not be called on the GPU PME run.");
+
+ int d, npme, grid_index, max_grid_index;
+ PmeAtomComm& atc = pme->atc[0];
+ pmegrids_t* pmegrid = nullptr;
+ real* grid = nullptr;
+ real* coefficient = nullptr;
+ PmeOutput output[2]; // The second is used for the B state with FEP
real scale, lambda;
gmx_bool bClearF;
gmx_parallel_3dfft_t pfft_setup;
- real * fftgrid;
- t_complex * cfftgrid;
+ real* fftgrid;
+ t_complex* cfftgrid;
int thread;
gmx_bool bFirst, bDoSplines;
int fep_state;
- int fep_states_lj = pme->bFEP_lj ? 2 : 1;
- const gmx_bool bCalcEnerVir = flags & GMX_PME_CALC_ENER_VIR;
- const gmx_bool bBackFFT = flags & (GMX_PME_CALC_F | GMX_PME_CALC_POT);
- const gmx_bool bCalcF = flags & GMX_PME_CALC_F;
+ int fep_states_lj = pme->bFEP_lj ? 2 : 1;
+ // There's no support for computing energy without virial, or vice versa
+ const bool computeEnergyAndVirial = (stepWork.computeEnergy || stepWork.computeVirial);
- /* We could be passing lambda!=1 while no q or LJ is actually perturbed */
+ /* We could be passing lambda!=0 while no q or LJ is actually perturbed */
if (!pme->bFEP_q)
{
- lambda_q = 1;
+ lambda_q = 0;
}
if (!pme->bFEP_lj)
{
- lambda_lj = 1;
+ lambda_lj = 0;
}
assert(pme->nnodes > 0);
if (pme->nnodes > 1)
{
- atc = &pme->atc[0];
- atc->npd = homenr;
- if (atc->npd > atc->pd_nalloc)
+ atc.pd.resize(coordinates.ssize());
+ for (int d = pme->ndecompdim - 1; d >= 0; d--)
{
- atc->pd_nalloc = over_alloc_dd(atc->npd);
- srenew(atc->pd, atc->pd_nalloc);
- }
- for (d = pme->ndecompdim-1; d >= 0; d--)
- {
- atc = &pme->atc[d];
- atc->maxshift = (atc->dimind == 0 ? maxshift_x : maxshift_y);
+ PmeAtomComm& atc = pme->atc[d];
+ atc.maxshift = (atc.dimind == 0 ? maxshift_x : maxshift_y);
}
}
else
{
- atc = &pme->atc[0];
- /* This could be necessary for TPI */
- pme->atc[0].n = homenr;
- if (DOMAINDECOMP(cr))
- {
- pme_realloc_atomcomm_things(atc);
- }
- atc->x = x;
- atc->f = f;
+ GMX_ASSERT(coordinates.ssize() == atc.numAtoms(), "We expect atc.numAtoms() coordinates");
+ GMX_ASSERT(forces.ssize() >= atc.numAtoms(),
+ "We need a force buffer with at least atc.numAtoms() elements");
+
+ atc.x = coordinates;
+ atc.f = forces;
}
matrix scaledBox;
* If grid_index < 2 we should be doing electrostatic PME
* If grid_index >= 2 we should be doing LJ-PME
*/
- if ((grid_index < DO_Q && (!pme->doCoulomb ||
- (grid_index == 1 && !pme->bFEP_q))) ||
- (grid_index >= DO_Q && (!pme->doLJ ||
- (grid_index == 3 && !pme->bFEP_lj))))
+ if ((grid_index < DO_Q && (!pme->doCoulomb || (grid_index == 1 && !pme->bFEP_q)))
+ || (grid_index >= DO_Q && (!pme->doLJ || (grid_index == 3 && !pme->bFEP_lj))))
{
continue;
}
pfft_setup = pme->pfft_setup[grid_index];
switch (grid_index)
{
- case 0: coefficient = chargeA + start; break;
- case 1: coefficient = chargeB + start; break;
- case 2: coefficient = c6A + start; break;
- case 3: coefficient = c6B + start; break;
+ case 0: coefficient = chargeA; break;
+ case 1: coefficient = chargeB; break;
+ case 2: coefficient = c6A; break;
+ case 3: coefficient = c6B; break;
}
grid = pmegrid->grid.grid;
if (debug)
{
- fprintf(debug, "PME: number of ranks = %d, rank = %d\n",
- cr->nnodes, cr->nodeid);
- fprintf(debug, "Grid = %p\n", (void*)grid);
+ fprintf(debug, "PME: number of ranks = %d, rank = %d\n", cr->nnodes, cr->nodeid);
+ fprintf(debug, "Grid = %p\n", static_cast<void*>(grid));
if (grid == nullptr)
{
gmx_fatal(FARGS, "No grid!");
}
}
- where();
if (pme->nnodes == 1)
{
- atc->coefficient = coefficient;
+ atc.coefficient = gmx::arrayRefFromArray(coefficient, coordinates.size());
}
else
{
wallcycle_start(wcycle, ewcPME_REDISTXF);
- do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, coefficient);
- where();
+ do_redist_pos_coeffs(pme, cr, bFirst, coordinates, coefficient);
wallcycle_stop(wcycle, ewcPME_REDISTXF);
}
if (debug)
{
- fprintf(debug, "Rank= %6d, pme local particles=%6d\n",
- cr->nodeid, atc->n);
+ fprintf(debug, "Rank= %6d, pme local particles=%6d\n", cr->nodeid, atc.numAtoms());
}
- if (flags & GMX_PME_SPREAD)
+ wallcycle_start(wcycle, ewcPME_SPREAD);
+
+ /* Spread the coefficients on a grid */
+ spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
+
+ if (bFirst)
{
- wallcycle_start(wcycle, ewcPME_SPREAD);
+ inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
+ }
+ inc_nrnb(nrnb, eNR_SPREADBSP, pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
- /* Spread the coefficients on a grid */
- spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
+ if (!pme->bUseThreads)
+ {
+ wrap_periodic_pmegrid(pme, grid);
- if (bFirst)
+ /* sum contributions to local grid from other nodes */
+ if (pme->nnodes > 1)
{
- inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
+ gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
}
- inc_nrnb(nrnb, eNR_SPREADBSP,
- pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
-
- if (!pme->bUseThreads)
- {
- wrap_periodic_pmegrid(pme, grid);
-
- /* sum contributions to local grid from other nodes */
-#if GMX_MPI
- if (pme->nnodes > 1)
- {
- gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
- where();
- }
-#endif
- copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
- }
+ copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
+ }
- wallcycle_stop(wcycle, ewcPME_SPREAD);
+ wallcycle_stop(wcycle, ewcPME_SPREAD);
- /* TODO If the OpenMP and single-threaded implementations
- converge, then spread_on_grid() and
- copy_pmegrid_to_fftgrid() will perhaps live in the same
- source file.
- */
- }
+ /* TODO If the OpenMP and single-threaded implementations
+ converge, then spread_on_grid() and
+ copy_pmegrid_to_fftgrid() will perhaps live in the same
+ source file.
+ */
/* Here we start a large thread parallel region */
#pragma omp parallel num_threads(pme->nthread) private(thread)
try
{
thread = gmx_omp_get_thread_num();
- if (flags & GMX_PME_SOLVE)
- {
- int loop_count;
+ int loop_count;
- /* do 3d-fft */
- if (thread == 0)
- {
- wallcycle_start(wcycle, ewcPME_FFT);
- }
- gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
- thread, wcycle);
- if (thread == 0)
- {
- wallcycle_stop(wcycle, ewcPME_FFT);
- }
- where();
+ /* do 3d-fft */
+ if (thread == 0)
+ {
+ wallcycle_start(wcycle, ewcPME_FFT);
+ }
+ gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
+ if (thread == 0)
+ {
+ wallcycle_stop(wcycle, ewcPME_FFT);
+ }
- /* solve in k-space for our local cells */
- if (thread == 0)
- {
- wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
- }
- if (grid_index < DO_Q)
- {
- loop_count =
- solve_pme_yzx(pme, cfftgrid,
- scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
- bCalcEnerVir,
- pme->nthread, thread);
- }
- else
- {
- loop_count =
+ /* solve in k-space for our local cells */
+ if (thread == 0)
+ {
+ wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
+ }
+ if (grid_index < DO_Q)
+ {
+ loop_count = solve_pme_yzx(
+ pme, cfftgrid, scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
+ computeEnergyAndVirial, pme->nthread, thread);
+ }
+ else
+ {
+ loop_count =
solve_pme_lj_yzx(pme, &cfftgrid, FALSE,
- scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
- bCalcEnerVir,
- pme->nthread, thread);
- }
-
- if (thread == 0)
- {
- wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
- where();
- inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
- }
+ scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
+ computeEnergyAndVirial, pme->nthread, thread);
}
- if (bBackFFT)
+ if (thread == 0)
{
- /* do 3d-invfft */
- if (thread == 0)
- {
- where();
- wallcycle_start(wcycle, ewcPME_FFT);
- }
- gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
- thread, wcycle);
- if (thread == 0)
- {
- wallcycle_stop(wcycle, ewcPME_FFT);
+ wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
+ inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
+ }
- where();
+ /* do 3d-invfft */
+ if (thread == 0)
+ {
+ wallcycle_start(wcycle, ewcPME_FFT);
+ }
+ gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
+ if (thread == 0)
+ {
+ wallcycle_stop(wcycle, ewcPME_FFT);
- if (pme->nodeid == 0)
- {
- real ntot = pme->nkx*pme->nky*pme->nkz;
- npme = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
- inc_nrnb(nrnb, eNR_FFT, 2*npme);
- }
- /* Note: this wallcycle region is closed below
- outside an OpenMP region, so take care if
- refactoring code here. */
- wallcycle_start(wcycle, ewcPME_GATHER);
+ if (pme->nodeid == 0)
+ {
+ real ntot = pme->nkx * pme->nky * pme->nkz;
+ npme = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
+ inc_nrnb(nrnb, eNR_FFT, 2 * npme);
}
- copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
+ /* Note: this wallcycle region is closed below
+ outside an OpenMP region, so take care if
+ refactoring code here. */
+ wallcycle_start(wcycle, ewcPME_GATHER);
}
- } GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+
+ copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
+ }
+ GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
/* End of thread parallel section.
* With MPI we have to synchronize here before gmx_sum_qgrid_dd.
*/
- if (bBackFFT)
+ /* distribute local grid to all nodes */
+ if (pme->nnodes > 1)
{
- /* distribute local grid to all nodes */
-#if GMX_MPI
- if (pme->nnodes > 1)
- {
- gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
- }
-#endif
- where();
-
- unwrap_periodic_pmegrid(pme, grid);
+ gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
}
- if (bCalcF)
+ unwrap_periodic_pmegrid(pme, grid);
+
+ if (stepWork.computeForces)
{
/* interpolate forces for our local atoms */
- where();
/* If we are running without parallelization,
* atc->f is the actual force array, not a buffer,
{
try
{
- gather_f_bsplines(pme, grid, bClearF, atc,
- &atc->spline[thread],
- pme->bFEP ? (grid_index % 2 == 0 ? 1.0-lambda : lambda) : 1.0);
+ gather_f_bsplines(pme, grid, bClearF, &atc, &atc.spline[thread],
+ pme->bFEP ? (grid_index % 2 == 0 ? 1.0 - lambda : lambda) : 1.0);
}
- GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+ GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
- where();
inc_nrnb(nrnb, eNR_GATHERFBSP,
- pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
+ pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
/* Note: this wallcycle region is opened above inside an OpenMP
region, so take care if refactoring code here. */
wallcycle_stop(wcycle, ewcPME_GATHER);
}
- if (bCalcEnerVir)
+ if (computeEnergyAndVirial)
{
/* This should only be called on the master thread
* and after the threads have synchronized.
*/
if (grid_index < 2)
{
- get_pme_ener_vir_q(pme->solve_work, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
+ get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output[grid_index % 2]);
}
else
{
- get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
+ get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[grid_index % 2]);
}
}
bFirst = FALSE;
for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
{
real *local_c6 = nullptr, *local_sigma = nullptr, *RedistC6 = nullptr, *RedistSigma = nullptr;
+ gmx::ArrayRef<real> coefficientBuffer;
if (pme->nnodes == 1)
{
- if (pme->lb_buf1 == nullptr)
- {
- pme->lb_buf_nalloc = pme->atc[0].n;
- snew(pme->lb_buf1, pme->lb_buf_nalloc);
- }
- pme->atc[0].coefficient = pme->lb_buf1;
+ pme->lb_buf1.resize(atc.numAtoms());
+ coefficientBuffer = pme->lb_buf1;
switch (fep_state)
{
case 0:
- local_c6 = c6A;
- local_sigma = sigmaA;
+ local_c6 = c6A;
+ local_sigma = sigmaA;
break;
case 1:
- local_c6 = c6B;
- local_sigma = sigmaB;
+ local_c6 = c6B;
+ local_sigma = sigmaB;
break;
- default:
- gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
+ default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
}
}
else
{
- atc = &pme->atc[0];
+ coefficientBuffer = atc.coefficientBuffer;
switch (fep_state)
{
case 0:
- RedistC6 = c6A;
- RedistSigma = sigmaA;
+ RedistC6 = c6A;
+ RedistSigma = sigmaA;
break;
case 1:
- RedistC6 = c6B;
- RedistSigma = sigmaB;
+ RedistC6 = c6B;
+ RedistSigma = sigmaB;
break;
- default:
- gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
+ default: gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
}
wallcycle_start(wcycle, ewcPME_REDISTXF);
- do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, RedistC6);
- if (pme->lb_buf_nalloc < atc->n)
- {
- pme->lb_buf_nalloc = atc->nalloc;
- srenew(pme->lb_buf1, pme->lb_buf_nalloc);
- srenew(pme->lb_buf2, pme->lb_buf_nalloc);
- }
- local_c6 = pme->lb_buf1;
- for (i = 0; i < atc->n; ++i)
+ do_redist_pos_coeffs(pme, cr, bFirst, coordinates, RedistC6);
+ pme->lb_buf1.resize(atc.numAtoms());
+ pme->lb_buf2.resize(atc.numAtoms());
+ local_c6 = pme->lb_buf1.data();
+ for (int i = 0; i < atc.numAtoms(); ++i)
{
- local_c6[i] = atc->coefficient[i];
+ local_c6[i] = atc.coefficient[i];
}
- where();
- do_redist_pos_coeffs(pme, cr, start, homenr, FALSE, x, RedistSigma);
- local_sigma = pme->lb_buf2;
- for (i = 0; i < atc->n; ++i)
+ do_redist_pos_coeffs(pme, cr, FALSE, coordinates, RedistSigma);
+ local_sigma = pme->lb_buf2.data();
+ for (int i = 0; i < atc.numAtoms(); ++i)
{
- local_sigma[i] = atc->coefficient[i];
+ local_sigma[i] = atc.coefficient[i];
}
- where();
wallcycle_stop(wcycle, ewcPME_REDISTXF);
}
- calc_initial_lb_coeffs(pme, local_c6, local_sigma);
+ atc.coefficient = coefficientBuffer;
+ calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
/*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
for (grid_index = 2; grid_index < 9; ++grid_index)
pmegrid = &pme->pmegrid[grid_index];
fftgrid = pme->fftgrid[grid_index];
pfft_setup = pme->pfft_setup[grid_index];
- calc_next_lb_coeffs(pme, local_sigma);
+ calc_next_lb_coeffs(coefficientBuffer, local_sigma);
grid = pmegrid->grid.grid;
- where();
- if (flags & GMX_PME_SPREAD)
- {
- wallcycle_start(wcycle, ewcPME_SPREAD);
- /* Spread the c6 on a grid */
- spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
+ wallcycle_start(wcycle, ewcPME_SPREAD);
+ /* Spread the c6 on a grid */
+ spread_on_grid(pme, &atc, pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
- if (bFirst)
- {
- inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
- }
+ if (bFirst)
+ {
+ inc_nrnb(nrnb, eNR_WEIGHTS, DIM * atc.numAtoms());
+ }
- inc_nrnb(nrnb, eNR_SPREADBSP,
- pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
- if (pme->nthread == 1)
+ inc_nrnb(nrnb, eNR_SPREADBSP,
+ pme->pme_order * pme->pme_order * pme->pme_order * atc.numAtoms());
+ if (pme->nthread == 1)
+ {
+ wrap_periodic_pmegrid(pme, grid);
+ /* sum contributions to local grid from other nodes */
+ if (pme->nnodes > 1)
{
- wrap_periodic_pmegrid(pme, grid);
- /* sum contributions to local grid from other nodes */
-#if GMX_MPI
- if (pme->nnodes > 1)
- {
- gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
- where();
- }
-#endif
- copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
+ gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
}
- wallcycle_stop(wcycle, ewcPME_SPREAD);
+ copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
}
+ wallcycle_stop(wcycle, ewcPME_SPREAD);
+
/*Here we start a large thread parallel region*/
#pragma omp parallel num_threads(pme->nthread) private(thread)
{
try
{
thread = gmx_omp_get_thread_num();
- if (flags & GMX_PME_SOLVE)
+ /* do 3d-fft */
+ if (thread == 0)
{
- /* do 3d-fft */
- if (thread == 0)
- {
- wallcycle_start(wcycle, ewcPME_FFT);
- }
+ wallcycle_start(wcycle, ewcPME_FFT);
+ }
- gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
- thread, wcycle);
- if (thread == 0)
- {
- wallcycle_stop(wcycle, ewcPME_FFT);
- }
- where();
+ gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX, thread, wcycle);
+ if (thread == 0)
+ {
+ wallcycle_stop(wcycle, ewcPME_FFT);
}
}
- GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+ GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
bFirst = FALSE;
}
- if (flags & GMX_PME_SOLVE)
- {
- /* solve in k-space for our local cells */
+ /* solve in k-space for our local cells */
#pragma omp parallel num_threads(pme->nthread) private(thread)
+ {
+ try
{
- try
+ int loop_count;
+ thread = gmx_omp_get_thread_num();
+ if (thread == 0)
{
- int loop_count;
- thread = gmx_omp_get_thread_num();
- if (thread == 0)
- {
- wallcycle_start(wcycle, ewcLJPME);
- }
+ wallcycle_start(wcycle, ewcLJPME);
+ }
- loop_count =
+ loop_count =
solve_pme_lj_yzx(pme, &pme->cfftgrid[2], TRUE,
- scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
- bCalcEnerVir,
- pme->nthread, thread);
- if (thread == 0)
- {
- wallcycle_stop(wcycle, ewcLJPME);
- where();
- inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
- }
+ scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ],
+ computeEnergyAndVirial, pme->nthread, thread);
+ if (thread == 0)
+ {
+ wallcycle_stop(wcycle, ewcLJPME);
+ inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
}
- GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
}
+ GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
- if (bCalcEnerVir)
+ if (computeEnergyAndVirial)
{
/* This should only be called on the master thread and
* after the threads have synchronized.
*/
- get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &energy_AB[2+fep_state], vir_AB[2+fep_state]);
+ get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output[fep_state]);
}
- if (bBackFFT)
+ bFirst = !pme->doCoulomb;
+ calc_initial_lb_coeffs(coefficientBuffer, local_c6, local_sigma);
+ for (grid_index = 8; grid_index >= 2; --grid_index)
{
- bFirst = !pme->doCoulomb;
- calc_initial_lb_coeffs(pme, local_c6, local_sigma);
- for (grid_index = 8; grid_index >= 2; --grid_index)
- {
- /* Unpack structure */
- pmegrid = &pme->pmegrid[grid_index];
- fftgrid = pme->fftgrid[grid_index];
- pfft_setup = pme->pfft_setup[grid_index];
- grid = pmegrid->grid.grid;
- calc_next_lb_coeffs(pme, local_sigma);
- where();
+ /* Unpack structure */
+ pmegrid = &pme->pmegrid[grid_index];
+ fftgrid = pme->fftgrid[grid_index];
+ pfft_setup = pme->pfft_setup[grid_index];
+ grid = pmegrid->grid.grid;
+ calc_next_lb_coeffs(coefficientBuffer, local_sigma);
#pragma omp parallel num_threads(pme->nthread) private(thread)
+ {
+ try
{
- try
+ thread = gmx_omp_get_thread_num();
+ /* do 3d-invfft */
+ if (thread == 0)
{
- thread = gmx_omp_get_thread_num();
- /* do 3d-invfft */
- if (thread == 0)
- {
- where();
- wallcycle_start(wcycle, ewcPME_FFT);
- }
+ wallcycle_start(wcycle, ewcPME_FFT);
+ }
- gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
- thread, wcycle);
- if (thread == 0)
- {
- wallcycle_stop(wcycle, ewcPME_FFT);
+ gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL, thread, wcycle);
+ if (thread == 0)
+ {
+ wallcycle_stop(wcycle, ewcPME_FFT);
- where();
- if (pme->nodeid == 0)
- {
- real ntot = pme->nkx*pme->nky*pme->nkz;
- npme = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
- inc_nrnb(nrnb, eNR_FFT, 2*npme);
- }
- wallcycle_start(wcycle, ewcPME_GATHER);
+ if (pme->nodeid == 0)
+ {
+ real ntot = pme->nkx * pme->nky * pme->nkz;
+ npme = static_cast<int>(ntot * std::log(ntot) / std::log(2.0));
+ inc_nrnb(nrnb, eNR_FFT, 2 * npme);
}
-
- copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
+ wallcycle_start(wcycle, ewcPME_GATHER);
}
- GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
- } /*#pragma omp parallel*/
- /* distribute local grid to all nodes */
-#if GMX_MPI
- if (pme->nnodes > 1)
- {
- gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
+ copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
}
-#endif
- where();
+ GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
+ } /*#pragma omp parallel*/
- unwrap_periodic_pmegrid(pme, grid);
+ /* distribute local grid to all nodes */
+ if (pme->nnodes > 1)
+ {
+ gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
+ }
- if (bCalcF)
- {
- /* interpolate forces for our local atoms */
- where();
- bClearF = (bFirst && PAR(cr));
- scale = pme->bFEP ? (fep_state < 1 ? 1.0-lambda_lj : lambda_lj) : 1.0;
- scale *= lb_scale_factor[grid_index-2];
+ unwrap_periodic_pmegrid(pme, grid);
+
+ if (stepWork.computeForces)
+ {
+ /* interpolate forces for our local atoms */
+ bClearF = (bFirst && PAR(cr));
+ scale = pme->bFEP ? (fep_state < 1 ? 1.0 - lambda_lj : lambda_lj) : 1.0;
+ scale *= lb_scale_factor[grid_index - 2];
#pragma omp parallel for num_threads(pme->nthread) schedule(static)
- for (thread = 0; thread < pme->nthread; thread++)
+ for (thread = 0; thread < pme->nthread; thread++)
+ {
+ try
{
- try
- {
- gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
- &pme->atc[0].spline[thread],
- scale);
- }
- GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+ gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
+ &pme->atc[0].spline[thread], scale);
}
+ GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
+ }
- where();
- inc_nrnb(nrnb, eNR_GATHERFBSP,
- pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
- }
- wallcycle_stop(wcycle, ewcPME_GATHER);
+ inc_nrnb(nrnb, eNR_GATHERFBSP,
+ pme->pme_order * pme->pme_order * pme->pme_order * pme->atc[0].numAtoms());
+ }
+ wallcycle_stop(wcycle, ewcPME_GATHER);
- bFirst = FALSE;
- } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
- } /* if (bCalcF) */
- } /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
- } /* if ((flags & GMX_PME_DO_LJ) && pme->ljpme_combination_rule == eljpmeLB) */
+ bFirst = FALSE;
+ } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
+ } /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
+ } /* if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB) */
- if (bCalcF && pme->nnodes > 1)
+ if (stepWork.computeForces && pme->nnodes > 1)
{
wallcycle_start(wcycle, ewcPME_REDISTXF);
for (d = 0; d < pme->ndecompdim; d++)
{
- atc = &pme->atc[d];
+ gmx::ArrayRef<gmx::RVec> forcesRef;
if (d == pme->ndecompdim - 1)
{
- n_d = homenr;
- f_d = f + start;
+ const size_t numAtoms = coordinates.size();
+ GMX_ASSERT(forces.size() >= numAtoms, "Need at least numAtoms forces");
+ forcesRef = forces.subArray(0, numAtoms);
}
else
{
- n_d = pme->atc[d+1].n;
- f_d = pme->atc[d+1].f;
+ forcesRef = pme->atc[d + 1].f;
}
if (DOMAINDECOMP(cr))
{
- dd_pmeredist_f(pme, atc, n_d, f_d,
- d == pme->ndecompdim-1 && pme->bPPnode);
+ dd_pmeredist_f(pme, &pme->atc[d], forcesRef, d == pme->ndecompdim - 1 && pme->bPPnode);
}
}
wallcycle_stop(wcycle, ewcPME_REDISTXF);
}
- where();
- if (bCalcEnerVir)
+ if (computeEnergyAndVirial)
{
if (pme->doCoulomb)
{
if (!pme->bFEP_q)
{
- *energy_q = energy_AB[0];
- m_add(vir_q, vir_AB[0], vir_q);
+ *energy_q = output[0].coulombEnergy_;
+ m_add(vir_q, output[0].coulombVirial_, vir_q);
}
else
{
- *energy_q = (1.0-lambda_q)*energy_AB[0] + lambda_q*energy_AB[1];
- *dvdlambda_q += energy_AB[1] - energy_AB[0];
- for (i = 0; i < DIM; i++)
+ *energy_q = (1.0 - lambda_q) * output[0].coulombEnergy_ + lambda_q * output[1].coulombEnergy_;
+ *dvdlambda_q += output[1].coulombEnergy_ - output[0].coulombEnergy_;
+ for (int i = 0; i < DIM; i++)
{
- for (j = 0; j < DIM; j++)
+ for (int j = 0; j < DIM; j++)
{
- vir_q[i][j] += (1.0-lambda_q)*vir_AB[0][i][j] +
- lambda_q*vir_AB[1][i][j];
+ vir_q[i][j] += (1.0 - lambda_q) * output[0].coulombVirial_[i][j]
+ + lambda_q * output[1].coulombVirial_[i][j];
}
}
}
{
if (!pme->bFEP_lj)
{
- *energy_lj = energy_AB[2];
- m_add(vir_lj, vir_AB[2], vir_lj);
+ *energy_lj = output[0].lennardJonesEnergy_;
+ m_add(vir_lj, output[0].lennardJonesVirial_, vir_lj);
}
else
{
- *energy_lj = (1.0-lambda_lj)*energy_AB[2] + lambda_lj*energy_AB[3];
- *dvdlambda_lj += energy_AB[3] - energy_AB[2];
- for (i = 0; i < DIM; i++)
+ *energy_lj = (1.0 - lambda_lj) * output[0].lennardJonesEnergy_
+ + lambda_lj * output[1].lennardJonesEnergy_;
+ *dvdlambda_lj += output[1].lennardJonesEnergy_ - output[0].lennardJonesEnergy_;
+ for (int i = 0; i < DIM; i++)
{
- for (j = 0; j < DIM; j++)
+ for (int j = 0; j < DIM; j++)
{
- vir_lj[i][j] += (1.0-lambda_lj)*vir_AB[2][i][j] + lambda_lj*vir_AB[3][i][j];
+ vir_lj[i][j] += (1.0 - lambda_lj) * output[0].lennardJonesVirial_[i][j]
+ + lambda_lj * output[1].lennardJonesVirial_[i][j];
}
}
}
return 0;
}
-void gmx_pme_destroy(gmx_pme_t *pme)
+void gmx_pme_destroy(gmx_pme_t* pme)
{
if (!pme)
{
sfree(pme->cfftgrid);
sfree(pme->pfft_setup);
- for (int i = 0; i < std::max(1, pme->ndecompdim); i++) //pme->atc[0] is always allocated
- {
- destroy_atomcomm(&pme->atc[i]);
- }
-
for (int i = 0; i < DIM; i++)
{
sfree(pme->bsp_mod[i]);
}
- destroy_overlap_comm(&pme->overlap[0]);
- destroy_overlap_comm(&pme->overlap[1]);
-
- sfree(pme->lb_buf1);
- sfree(pme->lb_buf2);
-
sfree(pme->bufv);
sfree(pme->bufr);
sfree(pme->sum_qgrid_tmp);
sfree(pme->sum_qgrid_dd_tmp);
- if (pme_gpu_active(pme) && pme->gpu)
+ destroy_pme_spline_work(pme->spline_work);
+
+ if (pme->gpu != nullptr)
{
pme_gpu_destroy(pme->gpu);
}
delete pme;
}
-void gmx_pme_reinit_atoms(const gmx_pme_t *pme, const int nAtoms, const real *charges)
+void gmx_pme_reinit_atoms(gmx_pme_t* pme, const int numAtoms, const real* chargesA, const real* chargesB)
{
- if (pme_gpu_active(pme))
+ if (pme->gpu != nullptr)
{
- pme_gpu_reinit_atoms(pme->gpu, nAtoms, charges);
+ GMX_ASSERT(!(pme->bFEP_q && chargesB == nullptr),
+ "B state charges must be specified if running Coulomb FEP on the GPU");
+ pme_gpu_reinit_atoms(pme->gpu, numAtoms, chargesA, pme->bFEP_q ? chargesB : nullptr);
}
- // TODO: handle the CPU case here; handle the whole t_mdatoms
+ else
+ {
+ pme->atc[0].setNumAtoms(numAtoms);
+ // TODO: set the charges here as well
+ }
+}
+
+bool gmx_pme_grid_matches(const gmx_pme_t& pme, const ivec grid_size)
+{
+ return (pme.nkx == grid_size[XX] && pme.nky == grid_size[YY] && pme.nkz == grid_size[ZZ]);
}