# all tokens are separated by any mix of ',' commas, '=' equal signs
# and whitespace (space, tab)
#
+
+# Teach uncrustify about the GROMACS attribute aliases that we use
+# to hide compiler differences. This means that declarations like
+#
+# int i, j;
+# int nthreads gmx_unused;
+#
+# does not align i with gmx_unused.
+set ATTRIBUTE gmx_unused gmx_inline gmx_restrict
int nthreads, f_thread_t *f_t)
{
int t, i;
+ int nthreads_loop gmx_unused;
/* This reduction can run over any number of threads */
-#pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntBonded)) private(t) schedule(static)
+ nthreads_loop = gmx_omp_nthreads_get(emntBonded);
+#pragma omp parallel for num_threads(nthreads_loop) private(t) schedule(static)
for (i = 0; i < n; i++)
{
for (t = 1; t < nthreads; t++)
t_grpopts *opts;
gmx_groups_t *groups;
gmx_molblock_t *molblock;
+ int nthreads gmx_unused;
bLJPME = EVDW_PME(ir->vdwtype);
alook = gmx_mtop_atomlookup_init(mtop);
-#pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntDefault)) schedule(static)
+ nthreads = gmx_omp_nthreads_get(emntDefault);
+#pragma omp parallel for num_threads(nthreads) schedule(static)
for (i = 0; i < md->nr; i++)
{
int g, ag, molb;
int start, end;
rvec *x1, *x2;
real dvdl_constr;
+ int nthreads gmx_unused;
s1 = &ems1->s;
s2 = &ems2->s;
x1 = s1->x;
x2 = s2->x;
-#pragma omp parallel num_threads(gmx_omp_nthreads_get(emntUpdate))
+ nthreads = gmx_omp_nthreads_get(emntUpdate);
+#pragma omp parallel num_threads(nthreads)
{
int gf, i, m;
nbnxn_pairlist_t **nbl;
int coulkt, vdwkt = 0;
int nb;
+ int nthreads gmx_unused;
nnbl = nbl_list->nnbl;
nbl = nbl_list->nbl;
gmx_incons("Unsupported VdW interaction type");
}}
-#pragma omp parallel for schedule(static) num_threads(gmx_omp_nthreads_get(emntNonbonded))
+ nthreads = gmx_omp_nthreads_get(emntNonbonded);
+#pragma omp parallel for schedule(static) num_threads(nthreads)
for (nb = 0; nb < nnbl; nb++)
{{
nbnxn_atomdata_output_t *out;
int coult;
int vdwt;
int nb;
+ int nthreads gmx_unused;
nnbl = nbl_list->nnbl;
nbl = nbl_list->nbl;
gmx_incons("Unsupported vdwtype in nbnxn reference kernel");
}
-#pragma omp parallel for schedule(static) num_threads(gmx_omp_nthreads_get(emntNonbonded))
+ nthreads = gmx_omp_nthreads_get(emntNonbonded);
+#pragma omp parallel for schedule(static) num_threads(nthreads)
for (nb = 0; nb < nnbl; nb++)
{
nbnxn_atomdata_output_t *out;
nbnxn_pairlist_t **nbl;
int coulkt, vdwkt = 0;
int nb;
+ int nthreads gmx_unused;
nnbl = nbl_list->nnbl;
nbl = nbl_list->nbl;
gmx_incons("Unsupported VdW interaction type");
}
-#pragma omp parallel for schedule(static) num_threads(gmx_omp_nthreads_get(emntNonbonded))
+ nthreads = gmx_omp_nthreads_get(emntNonbonded);
+#pragma omp parallel for schedule(static) num_threads(nthreads)
for (nb = 0; nb < nnbl; nb++)
{
nbnxn_atomdata_output_t *out;
nbnxn_pairlist_t **nbl;
int coulkt, vdwkt = 0;
int nb;
+ int nthreads gmx_unused;
nnbl = nbl_list->nnbl;
nbl = nbl_list->nbl;
gmx_incons("Unsupported VdW interaction type");
}
-#pragma omp parallel for schedule(static) num_threads(gmx_omp_nthreads_get(emntNonbonded))
+ nthreads = gmx_omp_nthreads_get(emntNonbonded);
+#pragma omp parallel for schedule(static) num_threads(nthreads)
for (nb = 0; nb < nnbl; nb++)
{
nbnxn_atomdata_output_t *out;
float *bbcz;
nbnxn_bb_t *bb;
int ncd, sc;
+ int nthreads gmx_unused;
grid = &nbs->grid[0];
bbcz = grid->bbcz_simple;
bb = grid->bb_simple;
-#pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntPairsearch)) schedule(static)
+ nthreads = gmx_omp_nthreads_get(emntPairsearch);
+#pragma omp parallel for num_threads(nthreads) schedule(static)
for (sc = 0; sc < grid->nc; sc++)
{
int c, tx, na;
{
int nsci, ncj4, nexcl;
int n, i;
+ int nthreads gmx_unused;
if (nblc->bSimple)
{
/* Each thread should copy its own data to the combined arrays,
* as otherwise data will go back and forth between different caches.
*/
-#pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntPairsearch)) schedule(static)
+ nthreads = gmx_omp_nthreads_get(emntPairsearch);
+#pragma omp parallel for num_threads(nthreads) schedule(static)
for (n = 0; n < nnbl; n++)
{
int sci_offset;
}
else
{
-#pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntUpdate)) schedule(static)
+ nth = gmx_omp_nthreads_get(emntUpdate);
+
+#pragma omp parallel for num_threads(nth) schedule(static)
for (i = start; i < nrend; i++)
{
copy_rvec(upd->xp[i], state->x[i]);