Code beautification with uncrustify
[alexxy/gromacs.git] / src / gromacs / mdlib / pme_pp.c
index 491655c8884a2bf089e886f086598a913c65b797..1baf577d06b1f956e9fe68db7278ce9a0af2d073 100644 (file)
@@ -1,12 +1,12 @@
 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
  *
- * 
+ *
  *                This source code is part of
- * 
+ *
  *                 G   R   O   M   A   C   S
- * 
+ *
  *          GROningen MAchine for Chemical Simulations
- * 
+ *
  *                        VERSION 3.2.0
  * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * of the License, or (at your option) any later version.
- * 
+ *
  * If you want to redistribute modifications, please consider that
  * scientific software is very special. Version control is crucial -
  * bugs must be traceable. We will be happy to consider code for
  * inclusion in the official distribution, but derived work must not
  * be called official GROMACS. Details are found in the README & COPYING
  * files - if they are missing, get the official version at www.gromacs.org.
- * 
+ *
  * To help us fund GROMACS development, we humbly ask that you cite
  * the papers on the package - you can find them in the top README file.
- * 
+ *
  * For more info, check our website at http://www.gromacs.org
- * 
+ *
  * And Hey:
  * GROwing Monsters And Cloning Shrimps
  */
 
 typedef struct gmx_pme_pp {
 #ifdef GMX_MPI
-  MPI_Comm mpi_comm_mysim;
+    MPI_Comm     mpi_comm_mysim;
 #endif
-  int  nnode;        /* The number of PP node to communicate with  */
-  int  *node;        /* The PP node ranks                          */
-  int  node_peer;    /* The peer PP node rank                      */
-  int  *nat;         /* The number of atom for each PP node        */
-  int  flags_charge; /* The flags sent along with the last charges */
-  real *chargeA;
-  real *chargeB;
-  rvec *x;
-  rvec *f;
-  int  nalloc;
+    int          nnode;        /* The number of PP node to communicate with  */
+    int         *node;         /* The PP node ranks                          */
+    int          node_peer;    /* The peer PP node rank                      */
+    int         *nat;          /* The number of atom for each PP node        */
+    int          flags_charge; /* The flags sent along with the last charges */
+    real        *chargeA;
+    real        *chargeB;
+    rvec        *x;
+    rvec        *f;
+    int          nalloc;
 #ifdef GMX_MPI
-  MPI_Request *req;
-  MPI_Status  *stat;
+    MPI_Request *req;
+    MPI_Status  *stat;
 #endif
 } t_gmx_pme_pp;
 
 typedef struct gmx_pme_comm_n_box {
-    int    natoms;
-    matrix box;
-    int    maxshift_x;
-    int    maxshift_y;
-    real   lambda;
-    int    flags;
+    int             natoms;
+    matrix          box;
+    int             maxshift_x;
+    int             maxshift_y;
+    real            lambda;
+    int             flags;
     gmx_large_int_t step;
-    ivec   grid_size;     /* For PME grid tuning */
-    real   ewaldcoeff;    /* For PME grid tuning */
+    ivec            grid_size;  /* For PME grid tuning */
+    real            ewaldcoeff; /* For PME grid tuning */
 } gmx_pme_comm_n_box_t;
 
 typedef struct {
-  matrix vir;
-  real   energy;
-  real   dvdlambda;
-  float  cycles;
-  gmx_stop_cond_t stop_cond;
+    matrix          vir;
+    real            energy;
+    real            dvdlambda;
+    float           cycles;
+    gmx_stop_cond_t stop_cond;
 } gmx_pme_comm_vir_ene_t;
 
 
@@ -114,23 +114,23 @@ typedef struct {
 
 gmx_pme_pp_t gmx_pme_pp_init(t_commrec *cr)
 {
-  struct gmx_pme_pp *pme_pp;
-  int rank;
+    struct gmx_pme_pp *pme_pp;
+    int                rank;
 
-  snew(pme_pp,1);
+    snew(pme_pp, 1);
 
 #ifdef GMX_MPI
-  pme_pp->mpi_comm_mysim = cr->mpi_comm_mysim;
-  MPI_Comm_rank(cr->mpi_comm_mygroup,&rank);
-  get_pme_ddnodes(cr,rank,&pme_pp->nnode,&pme_pp->node,&pme_pp->node_peer);
-  snew(pme_pp->nat,pme_pp->nnode);
-  snew(pme_pp->req,2*pme_pp->nnode);
-  snew(pme_pp->stat,2*pme_pp->nnode);
-  pme_pp->nalloc = 0;
-  pme_pp->flags_charge = 0;
+    pme_pp->mpi_comm_mysim = cr->mpi_comm_mysim;
+    MPI_Comm_rank(cr->mpi_comm_mygroup, &rank);
+    get_pme_ddnodes(cr, rank, &pme_pp->nnode, &pme_pp->node, &pme_pp->node_peer);
+    snew(pme_pp->nat, pme_pp->nnode);
+    snew(pme_pp->req, 2*pme_pp->nnode);
+    snew(pme_pp->stat, 2*pme_pp->nnode);
+    pme_pp->nalloc       = 0;
+    pme_pp->flags_charge = 0;
 #endif
 
-  return pme_pp;
+    return pme_pp;
 }
 
 /* This should be faster with a real non-blocking MPI implementation */
@@ -139,132 +139,152 @@ gmx_pme_pp_t gmx_pme_pp_init(t_commrec *cr)
 static void gmx_pme_send_q_x_wait(gmx_domdec_t *dd)
 {
 #ifdef GMX_MPI
-  if (dd->nreq_pme) {
-    MPI_Waitall(dd->nreq_pme,dd->req_pme,MPI_STATUSES_IGNORE);
-    dd->nreq_pme = 0;
-  }
+    if (dd->nreq_pme)
+    {
+        MPI_Waitall(dd->nreq_pme, dd->req_pme, MPI_STATUSES_IGNORE);
+        dd->nreq_pme = 0;
+    }
 #endif
 }
 
 static void gmx_pme_send_q_x(t_commrec *cr, int flags,
-                            real *chargeA, real *chargeB,
-                            matrix box, rvec *x,
-                            real lambda,
-                            int maxshift_x, int maxshift_y,
-                            gmx_large_int_t step)
+                             real *chargeA, real *chargeB,
+                             matrix box, rvec *x,
+                             real lambda,
+                             int maxshift_x, int maxshift_y,
+                             gmx_large_int_t step)
 {
-  gmx_domdec_t *dd;
-  gmx_pme_comm_n_box_t *cnb;
-  int  n;
+    gmx_domdec_t         *dd;
+    gmx_pme_comm_n_box_t *cnb;
+    int                   n;
 
-  dd = cr->dd;
-  n = dd->nat_home;
+    dd = cr->dd;
+    n  = dd->nat_home;
 
-  if (debug)
-    fprintf(debug,"PP node %d sending to PME node %d: %d%s%s\n",
-           cr->sim_nodeid,dd->pme_nodeid,n,
-           flags & PP_PME_CHARGE ? " charges" : "",
-           flags & PP_PME_COORD  ? " coordinates" : "");
+    if (debug)
+    {
+        fprintf(debug, "PP node %d sending to PME node %d: %d%s%s\n",
+                cr->sim_nodeid, dd->pme_nodeid, n,
+                flags & PP_PME_CHARGE ? " charges" : "",
+                flags & PP_PME_COORD  ? " coordinates" : "");
+    }
 
 #ifdef GMX_PME_DELAYED_WAIT
-  /* When can not use cnb until pending communication has finished */
-  gmx_pme_send_x_q_wait(dd);
+    /* When can not use cnb until pending communication has finished */
+    gmx_pme_send_x_q_wait(dd);
 #endif
 
-  if (dd->pme_receive_vir_ener) {
-    /* Peer PP node: communicate all data */
-    if (dd->cnb == NULL)
-      snew(dd->cnb,1);
-    cnb = dd->cnb;
-
-    cnb->flags      = flags;
-    cnb->natoms     = n;
-    cnb->maxshift_x = maxshift_x;
-    cnb->maxshift_y = maxshift_y;
-    cnb->lambda     = lambda;
-    cnb->step       = step;
-    if (flags & PP_PME_COORD)
-      copy_mat(box,cnb->box);
+    if (dd->pme_receive_vir_ener)
+    {
+        /* Peer PP node: communicate all data */
+        if (dd->cnb == NULL)
+        {
+            snew(dd->cnb, 1);
+        }
+        cnb = dd->cnb;
+
+        cnb->flags      = flags;
+        cnb->natoms     = n;
+        cnb->maxshift_x = maxshift_x;
+        cnb->maxshift_y = maxshift_y;
+        cnb->lambda     = lambda;
+        cnb->step       = step;
+        if (flags & PP_PME_COORD)
+        {
+            copy_mat(box, cnb->box);
+        }
 #ifdef GMX_MPI
-    MPI_Isend(cnb,sizeof(*cnb),MPI_BYTE,
-             dd->pme_nodeid,0,cr->mpi_comm_mysim,
-             &dd->req_pme[dd->nreq_pme++]);
+        MPI_Isend(cnb, sizeof(*cnb), MPI_BYTE,
+                  dd->pme_nodeid, 0, cr->mpi_comm_mysim,
+                  &dd->req_pme[dd->nreq_pme++]);
 #endif
-  } else if (flags & PP_PME_CHARGE) {
+    }
+    else if (flags & PP_PME_CHARGE)
+    {
 #ifdef GMX_MPI
-    /* Communicate only the number of atoms */
-    MPI_Isend(&n,sizeof(n),MPI_BYTE,
-             dd->pme_nodeid,0,cr->mpi_comm_mysim,
-             &dd->req_pme[dd->nreq_pme++]);
+        /* Communicate only the number of atoms */
+        MPI_Isend(&n, sizeof(n), MPI_BYTE,
+                  dd->pme_nodeid, 0, cr->mpi_comm_mysim,
+                  &dd->req_pme[dd->nreq_pme++]);
 #endif
-  }
+    }
 
 #ifdef GMX_MPI
-  if (n > 0) {
-    if (flags & PP_PME_CHARGE) {
-      MPI_Isend(chargeA,n*sizeof(real),MPI_BYTE,
-               dd->pme_nodeid,1,cr->mpi_comm_mysim,
-               &dd->req_pme[dd->nreq_pme++]);
-    }
-    if (flags & PP_PME_CHARGEB) {
-      MPI_Isend(chargeB,n*sizeof(real),MPI_BYTE,
-               dd->pme_nodeid,2,cr->mpi_comm_mysim,
-               &dd->req_pme[dd->nreq_pme++]);
-    }
-    if (flags & PP_PME_COORD) {
-      MPI_Isend(x[0],n*sizeof(rvec),MPI_BYTE,
-               dd->pme_nodeid,3,cr->mpi_comm_mysim,
-               &dd->req_pme[dd->nreq_pme++]);
+    if (n > 0)
+    {
+        if (flags & PP_PME_CHARGE)
+        {
+            MPI_Isend(chargeA, n*sizeof(real), MPI_BYTE,
+                      dd->pme_nodeid, 1, cr->mpi_comm_mysim,
+                      &dd->req_pme[dd->nreq_pme++]);
+        }
+        if (flags & PP_PME_CHARGEB)
+        {
+            MPI_Isend(chargeB, n*sizeof(real), MPI_BYTE,
+                      dd->pme_nodeid, 2, cr->mpi_comm_mysim,
+                      &dd->req_pme[dd->nreq_pme++]);
+        }
+        if (flags & PP_PME_COORD)
+        {
+            MPI_Isend(x[0], n*sizeof(rvec), MPI_BYTE,
+                      dd->pme_nodeid, 3, cr->mpi_comm_mysim,
+                      &dd->req_pme[dd->nreq_pme++]);
+        }
     }
-  }
 
 #ifndef GMX_PME_DELAYED_WAIT
-  /* Wait for the data to arrive */
-  /* We can skip this wait as we are sure x and q will not be modified
-   * before the next call to gmx_pme_send_x_q or gmx_pme_receive_f.
-   */
-  gmx_pme_send_q_x_wait(dd);
+    /* Wait for the data to arrive */
+    /* We can skip this wait as we are sure x and q will not be modified
+     * before the next call to gmx_pme_send_x_q or gmx_pme_receive_f.
+     */
+    gmx_pme_send_q_x_wait(dd);
 #endif
 #endif
 }
 
 void gmx_pme_send_q(t_commrec *cr,
-                   gmx_bool bFreeEnergy, real *chargeA, real *chargeB,
-                   int maxshift_x, int maxshift_y)
+                    gmx_bool bFreeEnergy, real *chargeA, real *chargeB,
+                    int maxshift_x, int maxshift_y)
 {
-  int flags;
+    int flags;
 
-  flags = PP_PME_CHARGE;
-  if (bFreeEnergy)
-    flags |= PP_PME_CHARGEB;
+    flags = PP_PME_CHARGE;
+    if (bFreeEnergy)
+    {
+        flags |= PP_PME_CHARGEB;
+    }
 
-  gmx_pme_send_q_x(cr,flags,
-                  chargeA,chargeB,NULL,NULL,0,maxshift_x,maxshift_y,-1);
+    gmx_pme_send_q_x(cr, flags,
+                     chargeA, chargeB, NULL, NULL, 0, maxshift_x, maxshift_y, -1);
 }
 
 void gmx_pme_send_x(t_commrec *cr, matrix box, rvec *x,
-                   gmx_bool bFreeEnergy, real lambda,
-                   gmx_bool bEnerVir,
-                   gmx_large_int_t step)
+                    gmx_bool bFreeEnergy, real lambda,
+                    gmx_bool bEnerVir,
+                    gmx_large_int_t step)
 {
-  int flags;
-  
-  flags = PP_PME_COORD;
-  if (bFreeEnergy)
-    flags |= PP_PME_FEP;
-  if (bEnerVir)
-    flags |= PP_PME_ENER_VIR;
-
-  gmx_pme_send_q_x(cr,flags,NULL,NULL,box,x,lambda,0,0,step);
+    int flags;
+
+    flags = PP_PME_COORD;
+    if (bFreeEnergy)
+    {
+        flags |= PP_PME_FEP;
+    }
+    if (bEnerVir)
+    {
+        flags |= PP_PME_ENER_VIR;
+    }
+
+    gmx_pme_send_q_x(cr, flags, NULL, NULL, box, x, lambda, 0, 0, step);
 }
 
 void gmx_pme_send_finish(t_commrec *cr)
 {
-  int flags;
+    int flags;
 
-  flags = PP_PME_FINISH;
+    flags = PP_PME_FINISH;
 
-  gmx_pme_send_q_x(cr,flags,NULL,NULL,NULL,NULL,0,0,0,-1);
+    gmx_pme_send_q_x(cr, flags, NULL, NULL, NULL, NULL, 0, 0, 0, -1);
 }
 
 void gmx_pme_send_switch(t_commrec *cr, ivec grid_size, real ewaldcoeff)
@@ -275,43 +295,44 @@ void gmx_pme_send_switch(t_commrec *cr, ivec grid_size, real ewaldcoeff)
     if (cr->dd->pme_receive_vir_ener)
     {
         cnb.flags = PP_PME_SWITCH;
-        copy_ivec(grid_size,cnb.grid_size);
+        copy_ivec(grid_size, cnb.grid_size);
         cnb.ewaldcoeff = ewaldcoeff;
 
         /* We send this, uncommon, message blocking to simplify the code */
-        MPI_Send(&cnb,sizeof(cnb),MPI_BYTE,
-                 cr->dd->pme_nodeid,0,cr->mpi_comm_mysim);
+        MPI_Send(&cnb, sizeof(cnb), MPI_BYTE,
+                 cr->dd->pme_nodeid, 0, cr->mpi_comm_mysim);
     }
 #endif
 }
 
 int gmx_pme_recv_q_x(struct gmx_pme_pp *pme_pp,
                      real **chargeA, real **chargeB,
-                     matrix box, rvec **x,rvec **f,
+                     matrix box, rvec **x, rvec **f,
                      int *maxshift_x, int *maxshift_y,
-                     gmx_bool *bFreeEnergy,real *lambda,
+                     gmx_bool *bFreeEnergy, real *lambda,
                      gmx_bool *bEnerVir,
                      gmx_large_int_t *step,
                      ivec grid_size, real *ewaldcoeff)
 {
     gmx_pme_comm_n_box_t cnb;
-    int  nat=0,q,messages,sender;
-    real *charge_pp;
+    int                  nat = 0, q, messages, sender;
+    real                *charge_pp;
 
     messages = 0;
 
     /* avoid compiler warning about unused variable without MPI support */
-    cnb.flags = 0;     
+    cnb.flags = 0;
 #ifdef GMX_MPI
-    do {
+    do
+    {
         /* Receive the send count, box and time step from the peer PP node */
-        MPI_Recv(&cnb,sizeof(cnb),MPI_BYTE,
-                 pme_pp->node_peer,0,
-                 pme_pp->mpi_comm_mysim,MPI_STATUS_IGNORE);
+        MPI_Recv(&cnb, sizeof(cnb), MPI_BYTE,
+                 pme_pp->node_peer, 0,
+                 pme_pp->mpi_comm_mysim, MPI_STATUS_IGNORE);
 
         if (debug)
         {
-            fprintf(debug,"PME only node receiving:%s%s%s%s\n",
+            fprintf(debug, "PME only node receiving:%s%s%s%s\n",
                     (cnb.flags & PP_PME_CHARGE) ? " charges" : "",
                     (cnb.flags & PP_PME_COORD ) ? " coordinates" : "",
                     (cnb.flags & PP_PME_FINISH) ? " finish" : "",
@@ -321,38 +342,48 @@ int gmx_pme_recv_q_x(struct gmx_pme_pp *pme_pp,
         if (cnb.flags & PP_PME_SWITCH)
         {
             /* Special case, receive the new parameters and return */
-            copy_ivec(cnb.grid_size,grid_size);
+            copy_ivec(cnb.grid_size, grid_size);
             *ewaldcoeff = cnb.ewaldcoeff;
 
             return -2;
         }
 
-        if (cnb.flags & PP_PME_CHARGE) {
+        if (cnb.flags & PP_PME_CHARGE)
+        {
             /* Receive the send counts from the other PP nodes */
-            for(sender=0; sender<pme_pp->nnode; sender++) {
-                if (pme_pp->node[sender] == pme_pp->node_peer) {
+            for (sender = 0; sender < pme_pp->nnode; sender++)
+            {
+                if (pme_pp->node[sender] == pme_pp->node_peer)
+                {
                     pme_pp->nat[sender] = cnb.natoms;
-                } else {
-                    MPI_Irecv(&(pme_pp->nat[sender]),sizeof(pme_pp->nat[0]),
+                }
+                else
+                {
+                    MPI_Irecv(&(pme_pp->nat[sender]), sizeof(pme_pp->nat[0]),
                               MPI_BYTE,
-                              pme_pp->node[sender],0,
-                              pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
+                              pme_pp->node[sender], 0,
+                              pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
                 }
             }
             MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
             messages = 0;
 
             nat = 0;
-            for(sender=0; sender<pme_pp->nnode; sender++)
+            for (sender = 0; sender < pme_pp->nnode; sender++)
+            {
                 nat += pme_pp->nat[sender];
+            }
 
-            if (nat > pme_pp->nalloc) {
+            if (nat > pme_pp->nalloc)
+            {
                 pme_pp->nalloc = over_alloc_dd(nat);
-                srenew(pme_pp->chargeA,pme_pp->nalloc);
+                srenew(pme_pp->chargeA, pme_pp->nalloc);
                 if (cnb.flags & PP_PME_CHARGEB)
-                    srenew(pme_pp->chargeB,pme_pp->nalloc);
-                srenew(pme_pp->x,pme_pp->nalloc);
-                srenew(pme_pp->f,pme_pp->nalloc);
+                {
+                    srenew(pme_pp->chargeB, pme_pp->nalloc);
+                }
+                srenew(pme_pp->x, pme_pp->nalloc);
+                srenew(pme_pp->f, pme_pp->nalloc);
             }
 
             /* maxshift is sent when the charges are sent */
@@ -360,25 +391,34 @@ int gmx_pme_recv_q_x(struct gmx_pme_pp *pme_pp,
             *maxshift_y = cnb.maxshift_y;
 
             /* Receive the charges in place */
-            for(q=0; q<((cnb.flags & PP_PME_CHARGEB) ? 2 : 1); q++) {
+            for (q = 0; q < ((cnb.flags & PP_PME_CHARGEB) ? 2 : 1); q++)
+            {
                 if (q == 0)
+                {
                     charge_pp = pme_pp->chargeA;
+                }
                 else
+                {
                     charge_pp = pme_pp->chargeB;
+                }
                 nat = 0;
-                for(sender=0; sender<pme_pp->nnode; sender++) {
-                    if (pme_pp->nat[sender] > 0) {
+                for (sender = 0; sender < pme_pp->nnode; sender++)
+                {
+                    if (pme_pp->nat[sender] > 0)
+                    {
                         MPI_Irecv(charge_pp+nat,
                                   pme_pp->nat[sender]*sizeof(real),
                                   MPI_BYTE,
-                                  pme_pp->node[sender],1+q,
+                                  pme_pp->node[sender], 1+q,
                                   pme_pp->mpi_comm_mysim,
                                   &pme_pp->req[messages++]);
                         nat += pme_pp->nat[sender];
                         if (debug)
-                            fprintf(debug,"Received from PP node %d: %d "
-                                "charges\n",
-                                    pme_pp->node[sender],pme_pp->nat[sender]);
+                        {
+                            fprintf(debug, "Received from PP node %d: %d "
+                                    "charges\n",
+                                    pme_pp->node[sender], pme_pp->nat[sender]);
+                        }
                     }
                 }
             }
@@ -386,35 +426,44 @@ int gmx_pme_recv_q_x(struct gmx_pme_pp *pme_pp,
             pme_pp->flags_charge = cnb.flags;
         }
 
-        if (cnb.flags & PP_PME_COORD) {
+        if (cnb.flags & PP_PME_COORD)
+        {
             if (!(pme_pp->flags_charge & PP_PME_CHARGE))
+            {
                 gmx_incons("PME-only node received coordinates before charges"
-                    );
+                           );
+            }
 
             /* The box, FE flag and lambda are sent along with the coordinates
              *  */
-            copy_mat(cnb.box,box);
+            copy_mat(cnb.box, box);
             *bFreeEnergy = (cnb.flags & PP_PME_FEP);
             *lambda      = cnb.lambda;
-           *bEnerVir    = (cnb.flags & PP_PME_ENER_VIR);
+            *bEnerVir    = (cnb.flags & PP_PME_ENER_VIR);
 
             if (*bFreeEnergy && !(pme_pp->flags_charge & PP_PME_CHARGEB))
+            {
                 gmx_incons("PME-only node received free energy request, but "
-                    "did not receive B-state charges");
+                           "did not receive B-state charges");
+            }
 
             /* Receive the coordinates in place */
             nat = 0;
-            for(sender=0; sender<pme_pp->nnode; sender++) {
-                if (pme_pp->nat[sender] > 0) {
-                    MPI_Irecv(pme_pp->x[nat],pme_pp->nat[sender]*sizeof(rvec),
+            for (sender = 0; sender < pme_pp->nnode; sender++)
+            {
+                if (pme_pp->nat[sender] > 0)
+                {
+                    MPI_Irecv(pme_pp->x[nat], pme_pp->nat[sender]*sizeof(rvec),
                               MPI_BYTE,
-                              pme_pp->node[sender],3,
-                              pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
+                              pme_pp->node[sender], 3,
+                              pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
                     nat += pme_pp->nat[sender];
                     if (debug)
-                        fprintf(debug,"Received from PP node %d: %d "
-                            "coordinates\n",
-                                pme_pp->node[sender],pme_pp->nat[sender]);
+                    {
+                        fprintf(debug, "Received from PP node %d: %d "
+                                "coordinates\n",
+                                pme_pp->node[sender], pme_pp->nat[sender]);
+                    }
                 }
             }
         }
@@ -422,7 +471,8 @@ int gmx_pme_recv_q_x(struct gmx_pme_pp *pme_pp,
         /* Wait for the coordinates and/or charges to arrive */
         MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
         messages = 0;
-    } while (!(cnb.flags & (PP_PME_COORD | PP_PME_FINISH)));
+    }
+    while (!(cnb.flags & (PP_PME_COORD | PP_PME_FINISH)));
 
     *step = cnb.step;
 #endif
@@ -437,114 +487,126 @@ int gmx_pme_recv_q_x(struct gmx_pme_pp *pme_pp,
 }
 
 static void receive_virial_energy(t_commrec *cr,
-                                 matrix vir,real *energy,real *dvdlambda,
-                                 float *pme_cycles) 
+                                  matrix vir, real *energy, real *dvdlambda,
+                                  float *pme_cycles)
 {
-  gmx_pme_comm_vir_ene_t cve;
+    gmx_pme_comm_vir_ene_t cve;
 
-  if (cr->dd->pme_receive_vir_ener) {
-    if (debug)
-      fprintf(debug,
-             "PP node %d receiving from PME node %d: virial and energy\n",
-             cr->sim_nodeid,cr->dd->pme_nodeid);
+    if (cr->dd->pme_receive_vir_ener)
+    {
+        if (debug)
+        {
+            fprintf(debug,
+                    "PP node %d receiving from PME node %d: virial and energy\n",
+                    cr->sim_nodeid, cr->dd->pme_nodeid);
+        }
 #ifdef GMX_MPI
-    MPI_Recv(&cve,sizeof(cve),MPI_BYTE,cr->dd->pme_nodeid,1,cr->mpi_comm_mysim,
-            MPI_STATUS_IGNORE);
+        MPI_Recv(&cve, sizeof(cve), MPI_BYTE, cr->dd->pme_nodeid, 1, cr->mpi_comm_mysim,
+                 MPI_STATUS_IGNORE);
 #else
-    memset(&cve,0,sizeof(cve));
+        memset(&cve, 0, sizeof(cve));
 #endif
-       
-    m_add(vir,cve.vir,vir);
-    *energy = cve.energy;
-    *dvdlambda += cve.dvdlambda;
-    *pme_cycles = cve.cycles;
 
-    if ( cve.stop_cond != gmx_stop_cond_none )
+        m_add(vir, cve.vir, vir);
+        *energy     = cve.energy;
+        *dvdlambda += cve.dvdlambda;
+        *pme_cycles = cve.cycles;
+
+        if (cve.stop_cond != gmx_stop_cond_none)
+        {
+            gmx_set_stop_condition(cve.stop_cond);
+        }
+    }
+    else
     {
-        gmx_set_stop_condition(cve.stop_cond);
+        *energy     = 0;
+        *pme_cycles = 0;
     }
-  } else {
-    *energy = 0;
-    *pme_cycles = 0;
-  }
 }
 
 void gmx_pme_receive_f(t_commrec *cr,
-                      rvec f[], matrix vir, 
-                      real *energy, real *dvdlambda,
-                      float *pme_cycles)
+                       rvec f[], matrix vir,
+                       real *energy, real *dvdlambda,
+                       float *pme_cycles)
 {
-  int natoms,i;
+    int natoms, i;
 
 #ifdef GMX_PME_DELAYED_WAIT
-  /* Wait for the x request to finish */
-  gmx_pme_send_q_x_wait(cr->dd);
+    /* Wait for the x request to finish */
+    gmx_pme_send_q_x_wait(cr->dd);
 #endif
 
-  natoms = cr->dd->nat_home;
+    natoms = cr->dd->nat_home;
 
-  if (natoms > cr->dd->pme_recv_f_alloc)
-  {
-      cr->dd->pme_recv_f_alloc = over_alloc_dd(natoms);
-      srenew(cr->dd->pme_recv_f_buf, cr->dd->pme_recv_f_alloc);
-  }
+    if (natoms > cr->dd->pme_recv_f_alloc)
+    {
+        cr->dd->pme_recv_f_alloc = over_alloc_dd(natoms);
+        srenew(cr->dd->pme_recv_f_buf, cr->dd->pme_recv_f_alloc);
+    }
 
-#ifdef GMX_MPI  
-  MPI_Recv(cr->dd->pme_recv_f_buf[0], 
-           natoms*sizeof(rvec),MPI_BYTE,
-          cr->dd->pme_nodeid,0,cr->mpi_comm_mysim,
-          MPI_STATUS_IGNORE);
+#ifdef GMX_MPI
+    MPI_Recv(cr->dd->pme_recv_f_buf[0],
+             natoms*sizeof(rvec), MPI_BYTE,
+             cr->dd->pme_nodeid, 0, cr->mpi_comm_mysim,
+             MPI_STATUS_IGNORE);
 #endif
 
-  for(i=0; i<natoms; i++)
-      rvec_inc(f[i],cr->dd->pme_recv_f_buf[i]);
+    for (i = 0; i < natoms; i++)
+    {
+        rvec_inc(f[i], cr->dd->pme_recv_f_buf[i]);
+    }
+
 
-  
-  receive_virial_energy(cr,vir,energy,dvdlambda,pme_cycles);
+    receive_virial_energy(cr, vir, energy, dvdlambda, pme_cycles);
 }
 
 void gmx_pme_send_force_vir_ener(struct gmx_pme_pp *pme_pp,
-                                rvec *f, matrix vir,
-                                real energy, real dvdlambda,
-                                float cycles)
+                                 rvec *f, matrix vir,
+                                 real energy, real dvdlambda,
+                                 float cycles)
 {
-  gmx_pme_comm_vir_ene_t cve; 
-  int messages,ind_start,ind_end,receiver;
+    gmx_pme_comm_vir_ene_t cve;
+    int                    messages, ind_start, ind_end, receiver;
 
-  cve.cycles = cycles;
+    cve.cycles = cycles;
 
-  /* Now the evaluated forces have to be transferred to the PP nodes */
-  messages = 0;
-  ind_end = 0;
-  for (receiver=0; receiver<pme_pp->nnode; receiver++) {
-    ind_start = ind_end;
-    ind_end   = ind_start + pme_pp->nat[receiver];
+    /* Now the evaluated forces have to be transferred to the PP nodes */
+    messages = 0;
+    ind_end  = 0;
+    for (receiver = 0; receiver < pme_pp->nnode; receiver++)
+    {
+        ind_start = ind_end;
+        ind_end   = ind_start + pme_pp->nat[receiver];
 #ifdef GMX_MPI
-    if (MPI_Isend(f[ind_start],(ind_end-ind_start)*sizeof(rvec),MPI_BYTE,
-                 pme_pp->node[receiver],0,
-                 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]) != 0)
-      gmx_comm("MPI_Isend failed in do_pmeonly");
+        if (MPI_Isend(f[ind_start], (ind_end-ind_start)*sizeof(rvec), MPI_BYTE,
+                      pme_pp->node[receiver], 0,
+                      pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]) != 0)
+        {
+            gmx_comm("MPI_Isend failed in do_pmeonly");
+        }
 #endif
     }
-  
-  /* send virial and energy to our last PP node */
-  copy_mat(vir,cve.vir);
-  cve.energy    = energy;
-  cve.dvdlambda = dvdlambda;
-  /* check for the signals to send back to a PP node */
-  cve.stop_cond = gmx_get_stop_condition();
-  cve.cycles = cycles;
-  
-  if (debug)
-    fprintf(debug,"PME node sending to PP node %d: virial and energy\n",
-           pme_pp->node_peer);
+
+    /* send virial and energy to our last PP node */
+    copy_mat(vir, cve.vir);
+    cve.energy    = energy;
+    cve.dvdlambda = dvdlambda;
+    /* check for the signals to send back to a PP node */
+    cve.stop_cond = gmx_get_stop_condition();
+
+    cve.cycles = cycles;
+
+    if (debug)
+    {
+        fprintf(debug, "PME node sending to PP node %d: virial and energy\n",
+                pme_pp->node_peer);
+    }
 #ifdef GMX_MPI
-  MPI_Isend(&cve,sizeof(cve),MPI_BYTE,
-           pme_pp->node_peer,1,
-           pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
-  
-  /* Wait for the forces to arrive */
-  MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
+    MPI_Isend(&cve, sizeof(cve), MPI_BYTE,
+              pme_pp->node_peer, 1,
+              pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
+
+    /* Wait for the forces to arrive */
+    MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
 #endif
 }