* the research papers on the package. Check out http://www.gromacs.org.
*/
-#ifdef HAVE_CONFIG_H
-#include <config.h>
-#endif
+#include "gmxpre.h"
+
+#include "config.h"
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <string.h>
#include <stdlib.h>
-#include "typedefs.h"
-#include "smalloc.h"
-#include "gmx_fatal.h"
-#include "gmx_fatal_collective.h"
-#include "vec.h"
-#include "domdec.h"
-#include "domdec_network.h"
-#include "nrnb.h"
-#include "pbc.h"
-#include "chargegroup.h"
-#include "constr.h"
-#include "mdatoms.h"
-#include "names.h"
-#include "force.h"
-#include "pme.h"
-#include "mdrun.h"
-#include "nsgrid.h"
-#include "shellfc.h"
-#include "mtop_util.h"
-#include "gmx_ga2la.h"
-#include "macros.h"
+#include <assert.h>
+
+#include "gromacs/bonded/bonded.h"
+#include "gromacs/legacyheaders/typedefs.h"
+#include "gromacs/legacyheaders/network.h"
+#include "gromacs/math/vec.h"
+#include "gromacs/legacyheaders/domdec.h"
+#include "gromacs/legacyheaders/domdec_network.h"
+#include "gromacs/legacyheaders/nrnb.h"
+#include "gromacs/legacyheaders/chargegroup.h"
+#include "gromacs/legacyheaders/constr.h"
+#include "gromacs/legacyheaders/mdatoms.h"
+#include "gromacs/legacyheaders/names.h"
+#include "gromacs/legacyheaders/force.h"
+#include "gromacs/legacyheaders/pme.h"
+#include "gromacs/legacyheaders/mdrun.h"
+#include "gromacs/legacyheaders/nsgrid.h"
+#include "gromacs/legacyheaders/shellfc.h"
+#include "gromacs/topology/mtop_util.h"
+#include "gromacs/legacyheaders/gmx_ga2la.h"
+#include "gromacs/legacyheaders/macros.h"
#include "nbnxn_search.h"
-#include "bondf.h"
-#include "gmx_omp_nthreads.h"
-#include "gpu_utils.h"
+#include "gromacs/legacyheaders/bonded-threading.h"
+#include "gromacs/legacyheaders/gmx_omp_nthreads.h"
+#include "gromacs/legacyheaders/gpu_utils.h"
-#include "gromacs/fileio/futil.h"
+#include "gromacs/utility/futil.h"
#include "gromacs/fileio/gmxfio.h"
#include "gromacs/fileio/pdbio.h"
+#include "gromacs/imd/imd.h"
+#include "gromacs/mdlib/nb_verlet.h"
+#include "gromacs/pbcutil/ishift.h"
+#include "gromacs/pbcutil/pbc.h"
+#include "gromacs/pulling/pull.h"
+#include "gromacs/pulling/pull_rotation.h"
+#include "gromacs/swap/swapcoords.h"
#include "gromacs/timing/wallcycle.h"
+#include "gromacs/utility/basenetwork.h"
+#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxmpi.h"
-#include "gromacs/swap/swapcoords.h"
#include "gromacs/utility/qsort_threadsafe.h"
-#include "gromacs/pulling/pull.h"
-#include "gromacs/pulling/pull_rotation.h"
+#include "gromacs/utility/smalloc.h"
#define DDRANK(dd, rank) (rank)
#define DDMASTERRANK(dd) (dd->masterrank)
/* Factor to account for pressure scaling during nstlist steps */
#define DD_PRES_SCALE_MARGIN 1.02
-/* Allowed performance loss before we DLB or warn */
-#define DD_PERF_LOSS 0.05
+/* Turn on DLB when the load imbalance causes this amount of total loss.
+ * There is a bit of overhead with DLB and it's difficult to achieve
+ * a load imbalance of less than 2% with DLB.
+ */
+#define DD_PERF_LOSS_DLB_ON 0.02
+
+/* Warn about imbalance due to PP or PP/PME load imbalance at this loss */
+#define DD_PERF_LOSS_WARN 0.05
#define DD_CELL_F_SIZE(dd, di) ((dd)->nc[(dd)->dim[(di)]]+1+(di)*2+1+(di))
{
gmx_domdec_master_t *ma = NULL;
int buf2[2], *ibuf, i, ncg_home = 0, *cg = NULL, nat_home = 0;
- t_block *cgs_gl;
if (state_local->ddp_count == dd->comm->master_cg_ddp_count)
{
if (state_local->ddp_count == dd->ddp_count)
{
+ /* The local state and DD are in sync, use the DD indices */
ncg_home = dd->ncg_home;
cg = dd->index_gl;
nat_home = dd->nat_home;
}
else if (state_local->ddp_count_cg_gl == state_local->ddp_count)
{
+ /* The DD is out of sync with the local state, but we have stored
+ * the cg indices with the local state, so we can use those.
+ */
+ t_block *cgs_gl;
+
cgs_gl = &dd->comm->cgs_gl;
ncg_home = state_local->ncg_gl;
gmx_incons("Attempted to collect a vector for a state for which the charge group distribution is unknown");
}
- buf2[0] = dd->ncg_home;
- buf2[1] = dd->nat_home;
+ buf2[0] = ncg_home;
+ buf2[1] = nat_home;
if (DDMASTER(dd))
{
ma = dd->ma;
/* Collect the charge group indices on the master */
dd_gatherv(dd,
- dd->ncg_home*sizeof(int), dd->index_gl,
+ ncg_home*sizeof(int), cg,
DDMASTER(dd) ? ma->ibuf : NULL,
DDMASTER(dd) ? ma->ibuf+dd->nnodes : NULL,
DDMASTER(dd) ? ma->cg : NULL);
gmx_domdec_t *dd, matrix box, gmx_ddbox_t *ddbox)
{
rvec grid_s[2], *grid_r = NULL, cx, r;
- char fname[STRLEN], format[STRLEN], buf[22];
+ char fname[STRLEN], buf[22];
FILE *out;
int a, i, d, z, y, x;
matrix tric;
snew(grid_r, 2*dd->nnodes);
}
- dd_gather(dd, 2*sizeof(rvec), grid_s[0], DDMASTER(dd) ? grid_r[0] : NULL);
+ dd_gather(dd, 2*sizeof(rvec), grid_s, DDMASTER(dd) ? grid_r : NULL);
if (DDMASTER(dd))
{
}
}
sprintf(fname, "%s_%s.pdb", fn, gmx_step_str(step, buf));
- sprintf(format, "%s%s\n", get_pdbformat(), "%6.2f%6.2f");
out = gmx_fio_fopen(fname, "w");
gmx_write_pdb_box(out, dd->bScrewPBC ? epbcSCREW : epbcXYZ, box);
a = 1;
cx[YY] = grid_r[i*2+y][YY];
cx[ZZ] = grid_r[i*2+z][ZZ];
mvmul(tric, cx, r);
- fprintf(out, format, "ATOM", a++, "CA", "GLY", ' ', 1+i,
- ' ', 10*r[XX], 10*r[YY], 10*r[ZZ], 1.0, vol);
+ gmx_fprintf_pdb_atomline(out, epdbATOM, a++, "CA", ' ', "GLY", ' ', i+1, ' ',
+ 10*r[XX], 10*r[YY], 10*r[ZZ], 1.0, vol, "");
}
}
}
gmx_mtop_t *mtop, t_commrec *cr,
int natoms, rvec x[], matrix box)
{
- char fname[STRLEN], format[STRLEN], format4[STRLEN], buf[22];
+ char fname[STRLEN], buf[22];
FILE *out;
int i, ii, resnr, c;
char *atomname, *resname;
sprintf(fname, "%s_%s_n%d.pdb", fn, gmx_step_str(step, buf), cr->sim_nodeid);
- sprintf(format, "%s%s\n", get_pdbformat(), "%6.2f%6.2f");
- sprintf(format4, "%s%s\n", get_pdbformat4(), "%6.2f%6.2f");
-
out = gmx_fio_fopen(fname, "w");
fprintf(out, "TITLE %s\n", title);
{
b = dd->comm->zones.n + 1;
}
- fprintf(out, strlen(atomname) < 4 ? format : format4,
- "ATOM", (ii+1)%100000,
- atomname, resname, ' ', resnr%10000, ' ',
- 10*x[i][XX], 10*x[i][YY], 10*x[i][ZZ], 1.0, b);
+ gmx_fprintf_pdb_atomline(out, epdbATOM, ii+1, atomname, ' ', resname, ' ', resnr, ' ',
+ 10*x[i][XX], 10*x[i][YY], 10*x[i][ZZ], 1.0, b, "");
}
fprintf(out, "TER\n");
if (debug)
{
- fprintf(debug, "Receive coordinates from PP nodes:");
+ fprintf(debug, "Receive coordinates from PP ranks:");
for (x = 0; x < *nmy_ddnodes; x++)
{
fprintf(debug, " %d", (*my_ddnodes)[x]);
if (!bLocalCG[dd->index_gl[i]])
{
fprintf(stderr,
- "DD node %d, %s: cg %d, global cg %d is not marked in bLocalCG (ncg_home %d)\n", dd->rank, where, i+1, dd->index_gl[i]+1, dd->ncg_home);
+ "DD rank %d, %s: cg %d, global cg %d is not marked in bLocalCG (ncg_home %d)\n", dd->rank, where, i+1, dd->index_gl[i]+1, dd->ncg_home);
nerr++;
}
}
}
if (ngl != dd->ncg_tot)
{
- fprintf(stderr, "DD node %d, %s: In bLocalCG %d cgs are marked as local, whereas there are %d\n", dd->rank, where, ngl, dd->ncg_tot);
+ fprintf(stderr, "DD rank %d, %s: In bLocalCG %d cgs are marked as local, whereas there are %d\n", dd->rank, where, ngl, dd->ncg_tot);
nerr++;
}
{
if (have[dd->gatindex[a]] > 0)
{
- fprintf(stderr, "DD node %d: global atom %d occurs twice: index %d and %d\n", dd->rank, dd->gatindex[a]+1, have[dd->gatindex[a]], a+1);
+ fprintf(stderr, "DD rank %d: global atom %d occurs twice: index %d and %d\n", dd->rank, dd->gatindex[a]+1, have[dd->gatindex[a]], a+1);
}
else
{
{
if (a >= dd->nat_tot)
{
- fprintf(stderr, "DD node %d: global atom %d marked as local atom %d, which is larger than nat_tot (%d)\n", dd->rank, i+1, a+1, dd->nat_tot);
+ fprintf(stderr, "DD rank %d: global atom %d marked as local atom %d, which is larger than nat_tot (%d)\n", dd->rank, i+1, a+1, dd->nat_tot);
nerr++;
}
else
have[a] = 1;
if (dd->gatindex[a] != i)
{
- fprintf(stderr, "DD node %d: global atom %d marked as local atom %d, which has global atom index %d\n", dd->rank, i+1, a+1, dd->gatindex[a]+1);
+ fprintf(stderr, "DD rank %d: global atom %d marked as local atom %d, which has global atom index %d\n", dd->rank, i+1, a+1, dd->gatindex[a]+1);
nerr++;
}
}
if (ngl != dd->nat_tot)
{
fprintf(stderr,
- "DD node %d, %s: %d global atom indices, %d local atoms\n",
+ "DD rank %d, %s: %d global atom indices, %d local atoms\n",
dd->rank, where, ngl, dd->nat_tot);
}
for (a = 0; a < dd->nat_tot; a++)
if (have[a] == 0)
{
fprintf(stderr,
- "DD node %d, %s: local atom %d, global %d has no global index\n",
+ "DD rank %d, %s: local atom %d, global %d has no global index\n",
dd->rank, where, a+1, dd->gatindex[a]+1);
}
}
if (nerr > 0)
{
- gmx_fatal(FARGS, "DD node %d, %s: %d atom/cg index inconsistencies",
+ gmx_fatal(FARGS, "DD rank %d, %s: %d atom/cg index inconsistencies",
dd->rank, where, nerr);
}
}
/* This error should never be triggered under normal
* circumstances, but you never know ...
*/
- gmx_fatal(FARGS, "Step %s: The domain decomposition grid has shifted too much in the %c-direction around cell %d %d %d. This should not have happened. Running with less nodes might avoid this issue.",
+ gmx_fatal(FARGS, "Step %s: The domain decomposition grid has shifted too much in the %c-direction around cell %d %d %d. This should not have happened. Running with fewer ranks might avoid this issue.",
gmx_step_str(step, buf),
dim2char(dim), dd->ci[XX], dd->ci[YY], dd->ci[ZZ]);
}
}
}
+enum {
+ setcellsizeslbLOCAL, setcellsizeslbMASTER, setcellsizeslbPULSE_ONLY
+};
+
+/* Set the domain boundaries. Use for static (or no) load balancing,
+ * and also for the starting state for dynamic load balancing.
+ * setmode determine if and where the boundaries are stored, use enum above.
+ * Returns the number communication pulses in npulse.
+ */
static void set_dd_cell_sizes_slb(gmx_domdec_t *dd, gmx_ddbox_t *ddbox,
- gmx_bool bMaster, ivec npulse)
+ int setmode, ivec npulse)
{
gmx_domdec_comm_t *comm;
int d, j;
{
/* Uniform grid */
cell_dx = ddbox->box_size[d]/dd->nc[d];
- if (bMaster)
+ switch (setmode)
{
- for (j = 0; j < dd->nc[d]+1; j++)
- {
- dd->ma->cell_x[d][j] = ddbox->box0[d] + j*cell_dx;
- }
- }
- else
- {
- comm->cell_x0[d] = ddbox->box0[d] + (dd->ci[d] )*cell_dx;
- comm->cell_x1[d] = ddbox->box0[d] + (dd->ci[d]+1)*cell_dx;
+ case setcellsizeslbMASTER:
+ for (j = 0; j < dd->nc[d]+1; j++)
+ {
+ dd->ma->cell_x[d][j] = ddbox->box0[d] + j*cell_dx;
+ }
+ break;
+ case setcellsizeslbLOCAL:
+ comm->cell_x0[d] = ddbox->box0[d] + (dd->ci[d] )*cell_dx;
+ comm->cell_x1[d] = ddbox->box0[d] + (dd->ci[d]+1)*cell_dx;
+ break;
+ default:
+ break;
}
cellsize = cell_dx*ddbox->skew_fac[d];
- while (cellsize*npulse[d] < comm->cutoff && npulse[d] < dd->nc[d]-1)
+ while (cellsize*npulse[d] < comm->cutoff)
{
npulse[d]++;
}
* all cell borders in a loop to obtain identical values
* to the master distribution case and to determine npulse.
*/
- if (bMaster)
+ if (setmode == setcellsizeslbMASTER)
{
cell_x = dd->ma->cell_x[d];
}
}
cellsize_min[d] = min(cellsize_min[d], cellsize);
}
- if (!bMaster)
+ if (setmode == setcellsizeslbLOCAL)
{
comm->cell_x0[d] = cell_x[dd->ci[d]];
comm->cell_x1[d] = cell_x[dd->ci[d]+1];
+ }
+ if (setmode != setcellsizeslbMASTER)
+ {
sfree(cell_x);
}
}
if (d < ddbox->npbcdim &&
dd->nc[d] > 1 && npulse[d] >= dd->nc[d])
{
- gmx_fatal_collective(FARGS, NULL, dd,
- "The box size in direction %c (%f) times the triclinic skew factor (%f) is too small for a cut-off of %f with %d domain decomposition cells, use 1 or more than %d %s or increase the box size in this direction",
- dim2char(d), ddbox->box_size[d], ddbox->skew_fac[d],
- comm->cutoff,
- dd->nc[d], dd->nc[d],
- dd->nnodes > dd->nc[d] ? "cells" : "processors");
+ char error_string[STRLEN];
+
+ sprintf(error_string,
+ "The box size in direction %c (%f) times the triclinic skew factor (%f) is too small for a cut-off of %f with %d domain decomposition cells, use 1 or more than %d %s or increase the box size in this direction",
+ dim2char(d), ddbox->box_size[d], ddbox->skew_fac[d],
+ comm->cutoff,
+ dd->nc[d], dd->nc[d],
+ dd->nnodes > dd->nc[d] ? "cells" : "ranks");
+
+ if (setmode == setcellsizeslbLOCAL)
+ {
+ gmx_fatal_collective(FARGS, NULL, dd, error_string);
+ }
+ else
+ {
+ gmx_fatal(FARGS, error_string);
+ }
}
}
}
else
{
- set_dd_cell_sizes_slb(dd, ddbox, FALSE, npulse);
+ set_dd_cell_sizes_slb(dd, ddbox, setcellsizeslbLOCAL, npulse);
realloc_comm_ind(dd, npulse);
}
int i, cg_gl;
int *ibuf, buf2[2] = { 0, 0 };
gmx_bool bMaster = DDMASTER(dd);
+
if (bMaster)
{
ma = dd->ma;
check_screw_box(box);
}
- set_dd_cell_sizes_slb(dd, ddbox, TRUE, npulse);
+ set_dd_cell_sizes_slb(dd, ddbox, setcellsizeslbMASTER, npulse);
distribute_cg(fplog, step, box, ddbox->tric_dir, cgs, pos, dd);
for (i = 0; i < dd->nnodes; i++)
fprintf(fplog, "\n");
fprintf(stderr, "\n");
- if (lossf >= DD_PERF_LOSS)
+ if (lossf >= DD_PERF_LOSS_WARN)
{
sprintf(buf,
"NOTE: %.1f %% of the available CPU time was lost due to load imbalance\n"
fprintf(fplog, "%s\n", buf);
fprintf(stderr, "%s\n", buf);
}
- if (npme > 0 && fabs(lossp) >= DD_PERF_LOSS)
+ if (npme > 0 && fabs(lossp) >= DD_PERF_LOSS_WARN)
{
sprintf(buf,
- "NOTE: %.1f %% performance was lost because the PME nodes\n"
- " had %s work to do than the PP nodes.\n"
- " You might want to %s the number of PME nodes\n"
+ "NOTE: %.1f %% performance was lost because the PME ranks\n"
+ " had %s work to do than the PP ranks.\n"
+ " You might want to %s the number of PME ranks\n"
" or %s the cut-off and the grid spacing.\n",
fabs(lossp*100),
(lossp < 0) ? "less" : "more",
if (fplog)
{
fprintf(fplog,
- "Domain decomposition nodeid %d, coordinates %d %d %d\n\n",
+ "Domain decomposition rank %d, coordinates %d %d %d\n\n",
dd->rank, dd->ci[XX], dd->ci[YY], dd->ci[ZZ]);
}
if (debug)
{
fprintf(debug,
- "Domain decomposition nodeid %d, coordinates %d %d %d\n\n",
+ "Domain decomposition rank %d, coordinates %d %d %d\n\n",
dd->rank, dd->ci[XX], dd->ci[YY], dd->ci[ZZ]);
}
}
}
else if (fplog)
{
- fprintf(fplog, "#pmenodes (%d) is not a multiple of nx*ny (%d*%d) or nx*nz (%d*%d)\n", cr->npmenodes, dd->nc[XX], dd->nc[YY], dd->nc[XX], dd->nc[ZZ]);
+ fprintf(fplog, "Number of PME-only ranks (%d) is not a multiple of nx*ny (%d*%d) or nx*nz (%d*%d)\n", cr->npmenodes, dd->nc[XX], dd->nc[YY], dd->nc[XX], dd->nc[ZZ]);
fprintf(fplog,
"Will not use a Cartesian communicator for PP <-> PME\n\n");
}
if (fplog)
{
- fprintf(fplog, "Cartesian nodeid %d, coordinates %d %d %d\n\n",
+ fprintf(fplog, "Cartesian rank %d, coordinates %d %d %d\n\n",
cr->sim_nodeid, dd->ci[XX], dd->ci[YY], dd->ci[ZZ]);
}
case ddnoPP_PME:
if (fplog)
{
- fprintf(fplog, "Order of the nodes: PP first, PME last\n");
+ fprintf(fplog, "Order of the ranks: PP first, PME last\n");
}
break;
case ddnoINTERLEAVE:
*/
if (fplog)
{
- fprintf(fplog, "Interleaving PP and PME nodes\n");
+ fprintf(fplog, "Interleaving PP and PME ranks\n");
}
comm->pmenodes = dd_pmenodes(cr);
break;
if (fplog)
{
- fprintf(fplog, "This is a %s only node\n\n",
+ fprintf(fplog, "This rank does only %s work.\n\n",
(cr->duty & DUTY_PP) ? "particle-particle" : "PME-mesh");
}
}
if (fplog)
{
fprintf(fplog,
- "\nInitializing Domain Decomposition on %d nodes\n", cr->nnodes);
+ "\nInitializing Domain Decomposition on %d ranks\n", cr->nnodes);
}
snew(dd, 1);
if (dd->nc[XX] == 0)
{
bC = (dd->bInterCGcons && rconstr > r_bonded_limit);
- sprintf(buf, "Change the number of nodes or mdrun option %s%s%s",
+ sprintf(buf, "Change the number of ranks or mdrun option %s%s%s",
!bC ? "-rdd" : "-rcon",
comm->eDLB != edlbNO ? " or -dds" : "",
bC ? " or your LINCS settings" : "");
gmx_fatal_collective(FARGS, cr, NULL,
- "There is no domain decomposition for %d nodes that is compatible with the given box and a minimum cell size of %g nm\n"
+ "There is no domain decomposition for %d ranks that is compatible with the given box and a minimum cell size of %g nm\n"
"%s\n"
"Look in the log file for details on the domain decomposition",
cr->nnodes-cr->npmenodes, limit, buf);
if (fplog)
{
fprintf(fplog,
- "Domain decomposition grid %d x %d x %d, separate PME nodes %d\n",
+ "Domain decomposition grid %d x %d x %d, separate PME ranks %d\n",
dd->nc[XX], dd->nc[YY], dd->nc[ZZ], cr->npmenodes);
}
if (cr->nnodes - dd->nnodes != cr->npmenodes)
{
gmx_fatal_collective(FARGS, cr, NULL,
- "The size of the domain decomposition grid (%d) does not match the number of nodes (%d). The total number of nodes is %d",
+ "The size of the domain decomposition grid (%d) does not match the number of ranks (%d). The total number of ranks is %d",
dd->nnodes, cr->nnodes - cr->npmenodes, cr->nnodes);
}
if (cr->npmenodes > dd->nnodes)
{
gmx_fatal_collective(FARGS, cr, NULL,
- "The number of separate PME nodes (%d) is larger than the number of PP nodes (%d), this is not supported.", cr->npmenodes, dd->nnodes);
+ "The number of separate PME ranks (%d) is larger than the number of PP ranks (%d), this is not supported.", cr->npmenodes, dd->nnodes);
}
if (cr->npmenodes > 0)
{
}
else
{
- set_dd_cell_sizes_slb(dd, ddbox, FALSE, np);
+ set_dd_cell_sizes_slb(dd, ddbox, setcellsizeslbPULSE_ONLY, np);
fprintf(fplog, "The initial number of communication pulses is:");
for (d = 0; d < dd->ndim; d++)
{
if (dd->pme_nodeid >= 0)
{
gmx_fatal_collective(FARGS, NULL, dd,
- "Can not have separate PME nodes without PME electrostatics");
+ "Can not have separate PME ranks without PME electrostatics");
}
}
corner[YY] -= corner[ZZ]*box[ZZ][YY]/box[ZZ][ZZ];
}
/* Apply the triclinic couplings */
+ assert(ddbox->npbcdim <= DIM);
for (i = YY; i < ddbox->npbcdim; i++)
{
for (j = XX; j < i; j++)
if (DDMASTER(dd))
{
bTurnOnDLB =
- (dd_force_imb_perf_loss(dd) >= DD_PERF_LOSS);
+ (dd_force_imb_perf_loss(dd) >= DD_PERF_LOSS_DLB_ON);
if (debug)
{
fprintf(debug, "step %s, imb loss %f\n",
if (vsite != NULL)
{
/* Now we have updated mdatoms, we can do the last vsite bookkeeping */
- split_vsites_over_threads(top_local->idef.il, mdatoms, FALSE, vsite);
+ split_vsites_over_threads(top_local->idef.il, top_local->idef.iparams,
+ mdatoms, FALSE, vsite);
}
if (shellfc)
if (!(cr->duty & DUTY_PME))
{
/* Send the charges and/or c6/sigmas to our PME only node */
- gmx_pme_send_parameters(cr, mdatoms->nChargePerturbed, mdatoms->nTypePerturbed,
+ gmx_pme_send_parameters(cr,
+ fr->ic,
+ mdatoms->nChargePerturbed, mdatoms->nTypePerturbed,
mdatoms->chargeA, mdatoms->chargeB,
mdatoms->sqrt_c6A, mdatoms->sqrt_c6B,
mdatoms->sigmaA, mdatoms->sigmaB,
dd_make_local_swap_groups(dd, ir->swap);
}
+ /* Update the local atoms to be communicated via the IMD protocol if bIMD is TRUE. */
+ dd_make_local_IMD_atoms(ir->bIMD, dd, ir->imd);
+
add_dd_statistics(dd);
/* Make sure we only count the cycles for this DD partitioning */