Apply re-formatting to C++ in src/ tree.
[alexxy/gromacs.git] / src / gromacs / ewald / pme_grid.cpp
index 3d277d314666eb1d123aa07be7455d5a8b69fca0..3f3eb48c238518c9280bef29762cef993f179365 100644 (file)
@@ -108,8 +108,12 @@ void gmx_sum_qgrid_dd(gmx_pme_t* pme, real* grid, const int direction)
         /* Copy data to contiguous send buffer */
         if (debug)
         {
-            fprintf(debug, "PME send rank %d %d -> %d grid start %d Communicating %d to %d\n",
-                    pme->nodeid, overlap->nodeid, send_id, pme->pmegrid_start_iy,
+            fprintf(debug,
+                    "PME send rank %d %d -> %d grid start %d Communicating %d to %d\n",
+                    pme->nodeid,
+                    overlap->nodeid,
+                    send_id,
+                    pme->pmegrid_start_iy,
                     send_index0 - pme->pmegrid_start_iy,
                     send_index0 - pme->pmegrid_start_iy + send_nindex);
         }
@@ -131,15 +135,28 @@ void gmx_sum_qgrid_dd(gmx_pme_t* pme, real* grid, const int direction)
 
         datasize = pme->pmegrid_nx * pme->nkz;
 
-        MPI_Sendrecv(overlap->sendbuf.data(), send_nindex * datasize, GMX_MPI_REAL, send_id, ipulse,
-                     overlap->recvbuf.data(), recv_nindex * datasize, GMX_MPI_REAL, recv_id, ipulse,
-                     overlap->mpi_comm, &stat);
+        MPI_Sendrecv(overlap->sendbuf.data(),
+                     send_nindex * datasize,
+                     GMX_MPI_REAL,
+                     send_id,
+                     ipulse,
+                     overlap->recvbuf.data(),
+                     recv_nindex * datasize,
+                     GMX_MPI_REAL,
+                     recv_id,
+                     ipulse,
+                     overlap->mpi_comm,
+                     &stat);
 
         /* Get data from contiguous recv buffer */
         if (debug)
         {
-            fprintf(debug, "PME recv rank %d %d <- %d grid start %d Communicating %d to %d\n",
-                    pme->nodeid, overlap->nodeid, recv_id, pme->pmegrid_start_iy,
+            fprintf(debug,
+                    "PME recv rank %d %d <- %d grid start %d Communicating %d to %d\n",
+                    pme->nodeid,
+                    overlap->nodeid,
+                    recv_id,
+                    pme->pmegrid_start_iy,
                     recv_index0 - pme->pmegrid_start_iy,
                     recv_index0 - pme->pmegrid_start_iy + recv_nindex);
         }
@@ -203,18 +220,36 @@ void gmx_sum_qgrid_dd(gmx_pme_t* pme, real* grid, const int direction)
 
         if (debug)
         {
-            fprintf(debug, "PME send rank %d %d -> %d grid start %d Communicating %d to %d\n",
-                    pme->nodeid, overlap->nodeid, send_id, pme->pmegrid_start_ix,
+            fprintf(debug,
+                    "PME send rank %d %d -> %d grid start %d Communicating %d to %d\n",
+                    pme->nodeid,
+                    overlap->nodeid,
+                    send_id,
+                    pme->pmegrid_start_ix,
                     send_index0 - pme->pmegrid_start_ix,
                     send_index0 - pme->pmegrid_start_ix + send_nindex);
-            fprintf(debug, "PME recv rank %d %d <- %d grid start %d Communicating %d to %d\n",
-                    pme->nodeid, overlap->nodeid, recv_id, pme->pmegrid_start_ix,
+            fprintf(debug,
+                    "PME recv rank %d %d <- %d grid start %d Communicating %d to %d\n",
+                    pme->nodeid,
+                    overlap->nodeid,
+                    recv_id,
+                    pme->pmegrid_start_ix,
                     recv_index0 - pme->pmegrid_start_ix,
                     recv_index0 - pme->pmegrid_start_ix + recv_nindex);
         }
 
-        MPI_Sendrecv(sendptr, send_nindex * datasize, GMX_MPI_REAL, send_id, ipulse, recvptr,
-                     recv_nindex * datasize, GMX_MPI_REAL, recv_id, ipulse, overlap->mpi_comm, &stat);
+        MPI_Sendrecv(sendptr,
+                     send_nindex * datasize,
+                     GMX_MPI_REAL,
+                     send_id,
+                     ipulse,
+                     recvptr,
+                     recv_nindex * datasize,
+                     GMX_MPI_REAL,
+                     recv_id,
+                     ipulse,
+                     overlap->mpi_comm,
+                     &stat);
 
         /* ADD data from contiguous recv buffer */
         if (direction == GMX_SUM_GRID_FORWARD)
@@ -244,8 +279,8 @@ int copy_pmegrid_to_fftgrid(const gmx_pme_t* pme, const real* pmegrid, real* fft
     int  pmeidx, fftidx;
 
     /* Dimensions should be identical for A/B grid, so we just use A here */
-    gmx_parallel_3dfft_real_limits(pme->pfft_setup[grid_index], local_fft_ndata, local_fft_offset,
-                                   local_fft_size);
+    gmx_parallel_3dfft_real_limits(
+            pme->pfft_setup[grid_index], local_fft_ndata, local_fft_offset, local_fft_size);
 
     local_pme_size[0] = pme->pmegrid_nx;
     local_pme_size[1] = pme->pmegrid_ny;
@@ -280,14 +315,31 @@ int copy_pmegrid_to_fftgrid(const gmx_pme_t* pme, const real* pmegrid, real* fft
                     val = 100 * pmegrid[pmeidx];
                     if (pmegrid[pmeidx] != 0)
                     {
-                        gmx_fprintf_pdb_atomline(fp, epdbATOM, pmeidx, "CA", ' ', "GLY", ' ', pmeidx,
-                                                 ' ', 5.0 * ix, 5.0 * iy, 5.0 * iz, 1.0, val, "");
+                        gmx_fprintf_pdb_atomline(fp,
+                                                 epdbATOM,
+                                                 pmeidx,
+                                                 "CA",
+                                                 ' ',
+                                                 "GLY",
+                                                 ' ',
+                                                 pmeidx,
+                                                 ' ',
+                                                 5.0 * ix,
+                                                 5.0 * iy,
+                                                 5.0 * iz,
+                                                 1.0,
+                                                 val,
+                                                 "");
                     }
                     if (pmegrid[pmeidx] != 0)
                     {
-                        fprintf(fp2, "%-12s  %5d  %5d  %5d  %12.5e\n", "qgrid",
-                                pme->pmegrid_start_ix + ix, pme->pmegrid_start_iy + iy,
-                                pme->pmegrid_start_iz + iz, pmegrid[pmeidx]);
+                        fprintf(fp2,
+                                "%-12s  %5d  %5d  %5d  %12.5e\n",
+                                "qgrid",
+                                pme->pmegrid_start_ix + ix,
+                                pme->pmegrid_start_iy + iy,
+                                pme->pmegrid_start_iz + iz,
+                                pmegrid[pmeidx]);
                     }
 #endif
                 }
@@ -331,8 +383,8 @@ int copy_fftgrid_to_pmegrid(struct gmx_pme_t* pme, const real* fftgrid, real* pm
     c1 = omp_cyc_start();
 #endif
     /* Dimensions should be identical for A/B grid, so we just use A here */
-    gmx_parallel_3dfft_real_limits(pme->pfft_setup[grid_index], local_fft_ndata, local_fft_offset,
-                                   local_fft_size);
+    gmx_parallel_3dfft_real_limits(
+            pme->pfft_setup[grid_index], local_fft_ndata, local_fft_offset, local_fft_size);
 
     local_pme_size[0] = pme->pmegrid_nx;
     local_pme_size[1] = pme->pmegrid_ny;
@@ -630,7 +682,10 @@ static void make_subgrid_division(const ivec n, int ovl, int nthread, ivec nsub)
         gmx_fatal(FARGS,
                   "PME grid thread division (%d x %d x %d) does not match the total number of "
                   "threads (%d)",
-                  nsub[XX], nsub[YY], nsub[ZZ], nthread);
+                  nsub[XX],
+                  nsub[YY],
+                  nsub[ZZ],
+                  nthread);
     }
 }
 
@@ -675,17 +730,20 @@ void pmegrids_init(pmegrids_t* grids,
 
         if (debug)
         {
-            fprintf(debug, "pmegrid thread local division: %d x %d x %d\n", grids->nc[XX],
-                    grids->nc[YY], grids->nc[ZZ]);
-            fprintf(debug, "pmegrid %d %d %d max thread pmegrid %d %d %d\n", nx, ny, nz, nst[XX],
-                    nst[YY], nst[ZZ]);
+            fprintf(debug,
+                    "pmegrid thread local division: %d x %d x %d\n",
+                    grids->nc[XX],
+                    grids->nc[YY],
+                    grids->nc[ZZ]);
+            fprintf(debug, "pmegrid %d %d %d max thread pmegrid %d %d %d\n", nx, ny, nz, nst[XX], nst[YY], nst[ZZ]);
         }
 
         snew(grids->grid_th, grids->nthread);
         t        = 0;
         gridsize = nst[XX] * nst[YY] * nst[ZZ];
         set_gridsize_alignment(&gridsize, pme_order);
-        snew_aligned(grids->grid_all, grids->nthread * gridsize + (grids->nthread + 1) * GMX_CACHE_SEP,
+        snew_aligned(grids->grid_all,
+                     grids->nthread * gridsize + (grids->nthread + 1) * GMX_CACHE_SEP,
                      SIMD4_ALIGNMENT);
 
         for (x = 0; x < grids->nc[XX]; x++)
@@ -694,10 +752,18 @@ void pmegrids_init(pmegrids_t* grids,
             {
                 for (z = 0; z < grids->nc[ZZ]; z++)
                 {
-                    pmegrid_init(&grids->grid_th[t], x, y, z, (n[XX] * (x)) / grids->nc[XX],
-                                 (n[YY] * (y)) / grids->nc[YY], (n[ZZ] * (z)) / grids->nc[ZZ],
-                                 (n[XX] * (x + 1)) / grids->nc[XX], (n[YY] * (y + 1)) / grids->nc[YY],
-                                 (n[ZZ] * (z + 1)) / grids->nc[ZZ], TRUE, pme_order,
+                    pmegrid_init(&grids->grid_th[t],
+                                 x,
+                                 y,
+                                 z,
+                                 (n[XX] * (x)) / grids->nc[XX],
+                                 (n[YY] * (y)) / grids->nc[YY],
+                                 (n[ZZ] * (z)) / grids->nc[ZZ],
+                                 (n[XX] * (x + 1)) / grids->nc[XX],
+                                 (n[YY] * (y + 1)) / grids->nc[YY],
+                                 (n[ZZ] * (z + 1)) / grids->nc[ZZ],
+                                 TRUE,
+                                 pme_order,
                                  grids->grid_all + GMX_CACHE_SEP + t * (gridsize + GMX_CACHE_SEP));
                     t++;
                 }
@@ -742,7 +808,9 @@ void pmegrids_init(pmegrids_t* grids,
         }
         if (debug != nullptr)
         {
-            fprintf(debug, "pmegrid thread grid communication range in %c: %d\n", 'x' + d,
+            fprintf(debug,
+                    "pmegrid thread grid communication range in %c: %d\n",
+                    'x' + d,
                     grids->nthread_comm[d]);
         }
         /* It should be possible to make grids->nthread_comm[d]==grids->nc[d]