Apply clang-format to source tree
[alexxy/gromacs.git] / src / gromacs / ewald / pme_redistribute.cpp
index 11258fd0ad32cefd798f2643c34b0f9a2ecd0b04..d06e44921d7dcbb185a17ba1a263b5e7b0423f96 100644 (file)
 #include "pme_internal.h"
 
 //! Calculate the slab indices and store in \p atc, store counts in \p count
-static void pme_calc_pidx(int start, int end,
-                          const matrix recipbox, const rvec x[],
-                          PmeAtomComm *atc, int *count)
+static void pme_calc_pidx(int start, int end, const matrix recipbox, const rvec x[], PmeAtomComm* atc, int* count)
 {
     int         nslab, i;
     int         si;
-    const real *xptr;
+    const realxptr;
     real        s;
     real        rxx, ryx, rzx, ryy, rzy;
-    int        *pd;
+    int*        pd;
 
     /* Calculate PME task index (pidx) for each grid index.
      * Here we always assign equally sized slabs to each node
@@ -95,10 +93,10 @@ static void pme_calc_pidx(int start, int end,
         /* Calculate the node index in x-dimension */
         for (i = start; i < end; i++)
         {
-            xptr   = x[i];
+            xptr = x[i];
             /* Fractional coordinates along box vectors */
-            s     = nslab*(xptr[XX]*rxx + xptr[YY]*ryx + xptr[ZZ]*rzx);
-            si    = static_cast<int>(s + 2*nslab) % nslab;
+            s     = nslab * (xptr[XX] * rxx + xptr[YY] * ryx + xptr[ZZ] * rzx);
+            si    = static_cast<int>(s + 2 * nslab) % nslab;
             pd[i] = si;
             count[si]++;
         }
@@ -110,10 +108,10 @@ static void pme_calc_pidx(int start, int end,
         /* Calculate the node index in y-dimension */
         for (i = start; i < end; i++)
         {
-            xptr   = x[i];
+            xptr = x[i];
             /* Fractional coordinates along box vectors */
-            s     = nslab*(xptr[YY]*ryy + xptr[ZZ]*rzy);
-            si    = static_cast<int>(s + 2*nslab) % nslab;
+            s     = nslab * (xptr[YY] * ryy + xptr[ZZ] * rzy);
+            si    = static_cast<int>(s + 2 * nslab) % nslab;
             pd[i] = si;
             count[si]++;
         }
@@ -121,9 +119,7 @@ static void pme_calc_pidx(int start, int end,
 }
 
 //! Wrapper function for calculating slab indices, stored in \p atc
-static void pme_calc_pidx_wrapper(gmx::ArrayRef<const gmx::RVec>  x,
-                                  const matrix                    recipbox,
-                                  PmeAtomComm                    *atc)
+static void pme_calc_pidx_wrapper(gmx::ArrayRef<const gmx::RVec> x, const matrix recipbox, PmeAtomComm* atc)
 {
     int nthread = atc->nthread;
 
@@ -133,12 +129,10 @@ static void pme_calc_pidx_wrapper(gmx::ArrayRef<const gmx::RVec>  x,
         try
         {
             const int natoms = x.ssize();
-            pme_calc_pidx(natoms* thread   /nthread,
-                          natoms*(thread+1)/nthread,
-                          recipbox, as_rvec_array(x.data()),
-                          atc, atc->count_thread[thread].data());
+            pme_calc_pidx(natoms * thread / nthread, natoms * (thread + 1) / nthread, recipbox,
+                          as_rvec_array(x.data()), atc, atc->count_thread[thread].data());
         }
-        GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
+        GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
     }
     /* Non-parallel reduction, since nslab is small */
 
@@ -162,18 +156,16 @@ void SplineCoefficients::realloc(const int nalloc)
     bufferY_.resize(nalloc);
     coefficients[YY] = bufferY_.data();
     /* In z we add padding, this is only required for the aligned 4-wide SIMD code */
-    bufferZ_.resize(nalloc + 2*padding);
+    bufferZ_.resize(nalloc + 2 * padding);
     coefficients[ZZ] = bufferZ_.data() + padding;
 }
 
 #endif // !DOXYGEN
 
 //! Reallocates all buffers in \p spline to fit atoms in \p atc
-static void pme_realloc_splinedata(splinedata_t         *spline,
-                                   const PmeAtomComm    *atc)
+static void pme_realloc_splinedata(splinedata_t* spline, const PmeAtomComm* atc)
 {
-    if (spline->nalloc >= atc->x.ssize() &&
-        spline->nalloc >= atc->numAtoms())
+    if (spline->nalloc >= atc->x.ssize() && spline->nalloc >= atc->numAtoms())
     {
         return;
     }
@@ -186,8 +178,8 @@ static void pme_realloc_splinedata(splinedata_t         *spline,
         spline->ind[i] = i;
     }
 
-    spline->theta.realloc(atc->pme_order*spline->nalloc);
-    spline->dtheta.realloc(atc->pme_order*spline->nalloc);
+    spline->theta.realloc(atc->pme_order * spline->nalloc);
+    spline->dtheta.realloc(atc->pme_order * spline->nalloc);
 }
 
 #ifndef DOXYGEN
@@ -212,7 +204,7 @@ void PmeAtomComm::setNumAtoms(const int numAtoms)
         {
             coefficientBuffer.reserve(1);
         }
-        coefficient = coefficientBuffer;
+        coefficient          = coefficientBuffer;
         const int nalloc_old = fBuffer.size();
         fBuffer.resize(numAtoms_);
         for (int i = nalloc_old; i < numAtoms_; i++)
@@ -241,10 +233,13 @@ void PmeAtomComm::setNumAtoms(const int numAtoms)
 #endif // !DOXYGEN
 
 //! Communicates buffers between rank separated by \p shift slabs
-static void pme_dd_sendrecv(PmeAtomComm gmx_unused *atc,
-                            gmx_bool gmx_unused bBackward, int gmx_unused shift,
-                            void gmx_unused *buf_s, int gmx_unused nbyte_s,
-                            void gmx_unused *buf_r, int gmx_unused nbyte_r)
+static void pme_dd_sendrecv(PmeAtomComm gmx_unused* atc,
+                            gmx_bool gmx_unused bBackward,
+                            int gmx_unused shift,
+                            void gmx_unused* buf_s,
+                            int gmx_unused nbyte_s,
+                            void gmx_unused* buf_r,
+                            int gmx_unused nbyte_r)
 {
 #if GMX_MPI
     int        dest, src;
@@ -263,54 +258,49 @@ static void pme_dd_sendrecv(PmeAtomComm gmx_unused *atc,
 
     if (nbyte_s > 0 && nbyte_r > 0)
     {
-        MPI_Sendrecv(buf_s, nbyte_s, MPI_BYTE,
-                     dest, shift,
-                     buf_r, nbyte_r, MPI_BYTE,
-                     src, shift,
+        MPI_Sendrecv(buf_s, nbyte_s, MPI_BYTE, dest, shift, buf_r, nbyte_r, MPI_BYTE, src, shift,
                      atc->mpi_comm, &stat);
     }
     else if (nbyte_s > 0)
     {
-        MPI_Send(buf_s, nbyte_s, MPI_BYTE,
-                 dest, shift,
-                 atc->mpi_comm);
+        MPI_Send(buf_s, nbyte_s, MPI_BYTE, dest, shift, atc->mpi_comm);
     }
     else if (nbyte_r > 0)
     {
-        MPI_Recv(buf_r, nbyte_r, MPI_BYTE,
-                 src, shift,
-                 atc->mpi_comm, &stat);
+        MPI_Recv(buf_r, nbyte_r, MPI_BYTE, src, shift, atc->mpi_comm, &stat);
     }
 #endif
 }
 
 //! Redistristributes \p data and optionally coordinates between MPI ranks
-static void dd_pmeredist_pos_coeffs(gmx_pme_t                      *pme,
-                                    const gmx_bool                  bX,
-                                    gmx::ArrayRef<const gmx::RVec>  x,
-                                    const real                     *data,
-                                    PmeAtomComm                    *atc)
+static void dd_pmeredist_pos_coeffs(gmx_pme_t*                     pme,
+                                    const gmx_bool                 bX,
+                                    gmx::ArrayRef<const gmx::RVec> x,
+                                    const real*                    data,
+                                    PmeAtomComm*                   atc)
 {
-    int  nnodes_comm, i, local_pos, buf_pos, node;
+    int nnodes_comm, i, local_pos, buf_pos, node;
 
-    nnodes_comm = std::min(2*atc->maxshift, atc->nslab-1);
+    nnodes_comm = std::min(2 * atc->maxshift, atc->nslab - 1);
 
     auto sendCount = atc->sendCount();
     int  nsend     = 0;
     for (i = 0; i < nnodes_comm; i++)
     {
-        const int commnode                 = atc->slabCommSetup[i].node_dest;
-        atc->slabCommSetup[commnode].buf_index  = nsend;
-        nsend                                  += sendCount[commnode];
+        const int commnode                     = atc->slabCommSetup[i].node_dest;
+        atc->slabCommSetup[commnode].buf_index = nsend;
+        nsend += sendCount[commnode];
     }
     if (bX)
     {
         if (sendCount[atc->nodeid] + nsend != x.ssize())
         {
-            gmx_fatal(FARGS, "%zd particles communicated to PME rank %d are more than 2/3 times the cut-off out of the domain decomposition cell of their charge group in dimension %c.\n"
-                      "This usually means that your system is not well equilibrated.",
-                      x.ssize() - (sendCount[atc->nodeid] + nsend),
-                      pme->nodeid, 'x'+atc->dimind);
+            gmx_fatal(
+                    FARGS,
+                    "%zd particles communicated to PME rank %d are more than 2/3 times the cut-off "
+                    "out of the domain decomposition cell of their charge group in dimension %c.\n"
+                    "This usually means that your system is not well equilibrated.",
+                    x.ssize() - (sendCount[atc->nodeid] + nsend), pme->nodeid, 'x' + atc->dimind);
         }
 
         if (nsend > pme->buf_nalloc)
@@ -328,12 +318,11 @@ static void dd_pmeredist_pos_coeffs(gmx_pme_t                      *pme,
             /* Communicate the count */
             if (debug)
             {
-                fprintf(debug, "dimind %d PME rank %d send to rank %d: %d\n",
-                        atc->dimind, atc->nodeid, commnode, scount);
+                fprintf(debug, "dimind %d PME rank %d send to rank %d: %d\n", atc->dimind,
+                        atc->nodeid, commnode, scount);
             }
-            pme_dd_sendrecv(atc, FALSE, i,
-                            &scount, sizeof(int),
-                            &atc->slabCommSetup[i].rcount, sizeof(int));
+            pme_dd_sendrecv(atc, FALSE, i, &scount, sizeof(int), &atc->slabCommSetup[i].rcount,
+                            sizeof(int));
             numAtoms += atc->slabCommSetup[i].rcount;
         }
 
@@ -357,7 +346,7 @@ static void dd_pmeredist_pos_coeffs(gmx_pme_t                      *pme,
         else
         {
             /* Copy to the send buffer */
-            int &buf_index = atc->slabCommSetup[node].buf_index;
+            intbuf_index = atc->slabCommSetup[node].buf_index;
             if (bX)
             {
                 copy_rvec(x[i], pme->bufv[buf_index]);
@@ -377,27 +366,23 @@ static void dd_pmeredist_pos_coeffs(gmx_pme_t                      *pme,
             if (bX)
             {
                 /* Communicate the coordinates */
-                pme_dd_sendrecv(atc, FALSE, i,
-                                pme->bufv[buf_pos], scount*sizeof(rvec),
-                                atc->xBuffer[local_pos], rcount*sizeof(rvec));
+                pme_dd_sendrecv(atc, FALSE, i, pme->bufv[buf_pos], scount * sizeof(rvec),
+                                atc->xBuffer[local_pos], rcount * sizeof(rvec));
             }
             /* Communicate the coefficients */
-            pme_dd_sendrecv(atc, FALSE, i,
-                            pme->bufr+buf_pos, scount*sizeof(real),
-                            atc->coefficientBuffer.data() + local_pos, rcount*sizeof(real));
-            buf_pos   += scount;
+            pme_dd_sendrecv(atc, FALSE, i, pme->bufr + buf_pos, scount * sizeof(real),
+                            atc->coefficientBuffer.data() + local_pos, rcount * sizeof(real));
+            buf_pos += scount;
             local_pos += atc->slabCommSetup[i].rcount;
         }
     }
 }
 
-void dd_pmeredist_f(struct gmx_pme_t *pme, PmeAtomComm *atc,
-                    gmx::ArrayRef<gmx::RVec> f,
-                    gmx_bool bAddF)
+void dd_pmeredist_f(struct gmx_pme_t* pme, PmeAtomComm* atc, gmx::ArrayRef<gmx::RVec> f, gmx_bool bAddF)
 {
-    int  nnodes_comm, local_pos, buf_pos, i, node;
+    int nnodes_comm, local_pos, buf_pos, i, node;
 
-    nnodes_comm = std::min(2*atc->maxshift, atc->nslab-1);
+    nnodes_comm = std::min(2 * atc->maxshift, atc->nslab - 1);
 
     local_pos = atc->sendCount()[atc->nodeid];
     buf_pos   = 0;
@@ -409,13 +394,12 @@ void dd_pmeredist_f(struct gmx_pme_t *pme, PmeAtomComm *atc,
         if (scount > 0 || rcount > 0)
         {
             /* Communicate the forces */
-            pme_dd_sendrecv(atc, TRUE, i,
-                            atc->f[local_pos], scount*sizeof(rvec),
-                            pme->bufv[buf_pos], rcount*sizeof(rvec));
+            pme_dd_sendrecv(atc, TRUE, i, atc->f[local_pos], scount * sizeof(rvec),
+                            pme->bufv[buf_pos], rcount * sizeof(rvec));
             local_pos += scount;
         }
-        atc->slabCommSetup[commnode].buf_index  = buf_pos;
-        buf_pos                                += rcount;
+        atc->slabCommSetup[commnode].buf_index = buf_pos;
+        buf_pos += rcount;
     }
 
     local_pos = 0;
@@ -459,14 +443,16 @@ void dd_pmeredist_f(struct gmx_pme_t *pme, PmeAtomComm *atc,
     }
 }
 
-void
-do_redist_pos_coeffs(struct gmx_pme_t *pme, const t_commrec *cr,
-                     gmx_bool bFirst, gmx::ArrayRef<const gmx::RVec> x, const real *data)
+void do_redist_pos_coeffs(struct gmx_pme_t*              pme,
+                          const t_commrec*               cr,
+                          gmx_bool                       bFirst,
+                          gmx::ArrayRef<const gmx::RVec> x,
+                          const real*                    data)
 {
     for (int d = pme->ndecompdim - 1; d >= 0; d--)
     {
-        gmx::ArrayRef<const gmx::RVec>  xRef;
-        const real                     *param_d;
+        gmx::ArrayRef<const gmx::RVec> xRef;
+        const real*                    param_d;
         if (d == pme->ndecompdim - 1)
         {
             /* Start out with the local coordinates and charges */
@@ -476,11 +462,11 @@ do_redist_pos_coeffs(struct gmx_pme_t *pme, const t_commrec *cr,
         else
         {
             /* Redistribute the data collected along the previous dimension */
-            const PmeAtomComm &atc = pme->atc[d + 1];
+            const PmeAtomCommatc = pme->atc[d + 1];
             xRef                   = atc.x;
             param_d                = atc.coefficient.data();
         }
-        PmeAtomComm &atc = pme->atc[d];
+        PmeAtomCommatc = pme->atc[d];
         atc.pd.resize(xRef.size());
         pme_calc_pidx_wrapper(xRef, pme->recipbox, &atc);
         /* Redistribute x (only once) and qA/c6A or qB/c6B */