</dl></dd>
<dt><b>nstcalclr: (-1) [steps]</b></dt>
-<dd><dl compact>
-Controls the period between calculations of long-range forces. Useful
-only with <b>cutoff-scheme</b>=<b>group</b>.
+<dd>
+Controls the period between calculations of long-range forces when
+using the group cut-off scheme.
+<dl compact>
<dt><b>1</b></dt>
<dd>Calculate the long-range forces every single step. This is useful
to have separate neighbor lists with buffers for electrostatics and Van
<dd>Calculate long-range forces on steps where neighbor searching is
performed. While this is the default value, you might want to consider
updating the long-range forces more frequently.</dd>
+</dl>
Note that twin-range force evaluation might be enabled automatically
by PP-PME load balancing. This is done in order to maintain the chosen
Van der Waals interaction radius even if the load balancing is
interactions with a longer cutoff than the PME electrostatics every
2-3 steps), the load balancing will have also a small effect on
Lennard-Jones, since the short-range cutoff (inside which forces are
-evaluated every step) is changed.</dl>
+evaluated every step) is changed.
</dd>
<h3>Bonds</h3>
<dl>
-<dt><b>constraints<!--QuietIdx-->constraint algorithms<!--QuietEIdx-->:</b></dt>
+<dt><b>constraints<!--QuietIdx-->constraint algorithms<!--EQuietIdx-->:</b></dt>
<dd><dl compact>
<dt><b>none</b></dt>
<dd>No constraints except for those defined explicitly in the topology,
/* Use OpenMP multithreading */
#cmakedefine GMX_OPENMP
-/* Use old threading (domain decomp force calc) code */
-#cmakedefine GMX_THREAD_SHM_FDECOMP
-
/* Ignore calls to nice(3) */
#cmakedefine GMX_NO_NICE
static gmx_bool bSort=FALSE;
static int ewald_geometry=eewg3D;
static int nnodes=1;
- static int nthreads=1;
static int pme_order=0;
static rvec grid = { -1, -1, -1 };
static real rc = 0.0;
static t_pargs pa[] = {
{ "-np", FALSE, etINT, {&nnodes},
"Number of nodes, must be the same as used for [TT]grompp[tt]" },
- { "-nt", FALSE, etINT, {&nthreads},
- "Number of threads to start on each node" },
{ "-v", FALSE, etBOOL,{&bVerbose},
"Be loud and noisy" },
{ "-sort", FALSE, etBOOL,{&bSort},
if (nnodes > 1)
gmx_fatal(FARGS,"GROMACS compiled without MPI support - can't do parallel runs");
#endif
-#ifndef GMX_THREAD_SHM_FDECOMP
- if(nthreads > 1)
- gmx_fatal(FARGS,"GROMACS compiled without threads support - can only use one thread");
-#endif
/* Open log files on all processors */
open_log(ftp2fn(efLOG,NFILE,fnm),cr);
} gmx_nodecomm_t;
-typedef struct {
- int dummy;
-} gmx_commrec_thread_t;
-
typedef struct {
/* The nodeids in one sim are numbered sequentially from 0.
* All communication within some simulation should happen
int nnodes_intra; /* total number of intra nodes */
int nnodes_pp_intra; /* total number of PP intra nodes */
-#ifdef GMX_THREAD_SHM_FDECOMP
- gmx_commrec_thread_t thread;
-#endif
-
gmx_nodecomm_t nc;
/* For domain decomposition */
#include <config.h>
#endif
-#ifdef GMX_THREAD_SHM_FDECOMP
-#include <pthread.h>
-#endif
-
#include <math.h>
#include <string.h>
#include "sysstuff.h"
fprintf(debug,"Initiating neighbourlist (ielec=%d, ivdw=%d, free=%d) for %s interactions,\nwith %d SR, %d LR atoms.\n",
nl->ielec,nl->ivdw,nl->free_energy,gmx_nblist_geometry_names[nl->igeometry],maxsr,maxlr);
}
-
-#ifdef GMX_THREAD_SHM_FDECOMP
- nl->counter = 0;
- snew(nl->mtx,1);
- pthread_mutex_init(nl->mtx,NULL);
-#endif
}
}