src/gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.c: warning: includes "config.h" unnecessarily
src/gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn_common.h: warning: should include "config.h"
src/gromacs/mdlib/nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.c: warning: includes "config.h" unnecessarily
-src/gromacs/mdlib/nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn_inner.h: warning: should include "config.h"
# These are specific to Folding@Home, and easiest to suppress here
*: warning: includes non-local file as "corewrap.h"
-C N
[ dihedrals ]
N CA CB CG2 torsion_ILE_N_CA_CB_CG2_mult1
- B CA CB CG2 torsion_ILE_N_CA_CB_CG2_mult2
+ N CA CB CG2 torsion_ILE_N_CA_CB_CG2_mult2
[ impropers ]
- -C CA N H
- CA OC1 C OC2
-
+ -C CA N H
+ CA OC1 C OC2
+
[ CVAL ]
[ atoms ]
N N -0.38210 1
C +N
[ dihedrals ]
N CA CB CG2 torsion_ILE_N_CA_CB_CG2_mult1
- B CA CB CG2 torsion_ILE_N_CA_CB_CG2_mult2
+ N CA CB CG2 torsion_ILE_N_CA_CB_CG2_mult2
[ impropers ]
CA +N C O
-
+
[ NVAL ]
[ atoms ]
N N3 0.05770 1
{
corner[ZZ] = zones->size[z].x1[ZZ];
}
- if (dd->ndim == 1 && box[ZZ][YY] != 0)
- {
- /* With 1D domain decomposition the cg's are not in
- * the triclinic box, but triclinic x-y and rectangular y-z.
- * Shift y back, so it will later end up at 0.
- */
- corner[YY] -= corner[ZZ]*box[ZZ][YY]/box[ZZ][ZZ];
- }
/* Apply the triclinic couplings */
assert(ddbox->npbcdim <= DIM);
for (i = YY; i < ddbox->npbcdim; i++)
{
for (j = XX; j < i; j++)
{
- corner[j] += corner[i]*box[i][j]/box[i][i];
+ /* With 1D domain decomposition the cg's are not in
+ * a triclinic box, but triclinic x-y and rectangular y/x-z.
+ * So we should ignore the coupling for the non
+ * domain-decomposed dimension of the pair x and y.
+ */
+ if (!(dd->ndim == 1 && ((dd->dim[0] == XX && j == YY) ||
+ (dd->dim[0] == YY && j == XX))))
+ {
+ corner[j] += corner[i]*box[i][j]/box[i][i];
+ }
}
}
if (c == 0)
int i, ret, cpu_count, cpu_set;
gmx_bool bAllSet;
#endif
+#ifdef GMX_LIB_MPI
+ gmx_bool bAllSet_All;
+#endif
assert(hw_opt);
if (!bAfterOpenmpInit)
bAllSet = bAllSet && (CPU_ISSET(i, &mask_current) != 0);
}
+#ifdef GMX_LIB_MPI
+ MPI_Allreduce(&bAllSet, &bAllSet_All, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);
+ bAllSet = bAllSet_All;
+#endif
+
if (!bAllSet)
{
if (hw_opt->thread_affinity == threadaffAUTO)
warning_error(wi, "nstcalclr must be a positive number (divisor of nstcalclr), or -1 to follow nstlist.");
}
- if (EEL_PME(ir->coulombtype) && ir->rcoulomb > ir->rvdw && ir->nstcalclr > 1)
+ if (EEL_PME(ir->coulombtype) && ir->rcoulomb > ir->rlist && ir->nstcalclr > 1)
{
warning_error(wi, "When used with PME, the long-range component of twin-range interactions must be updated every step (nstcalclr)");
}
sprintf(err_buf, "wall-ewald-zfac should be >= 2");
CHECK(ir->wall_ewald_zfac < 2);
}
+ if ((ir->ewald_geometry == eewg3DC) && (ir->ePBC != epbcXY) &&
+ EEL_FULL(ir->coulombtype))
+ {
+ sprintf(warn_buf, "With %s and ewald_geometry = %s you should use pbc = %s",
+ eel_names[ir->coulombtype], eewg_names[eewg3DC], epbc_names[epbcXY]);
+ warning(wi, warn_buf);
+ }
+ if ((ir->epsilon_surface != 0) && EEL_FULL(ir->coulombtype))
+ {
+ if (ir->cutoff_scheme == ecutsVERLET)
+ {
+ sprintf(warn_buf, "Since molecules/charge groups are broken using the Verlet scheme, you can not use a dipole correction to the %s electrostatics.",
+ eel_names[ir->coulombtype]);
+ warning(wi, warn_buf);
+ }
+ else
+ {
+ sprintf(warn_buf, "Dipole corrections to %s electrostatics only work if all charge groups that can cross PBC boundaries are dipoles. If this is not the case set epsilon_surface to 0",
+ eel_names[ir->coulombtype]);
+ warning_note(wi, warn_buf);
+ }
+ }
if (ir_vdw_switched(ir))
{
{
do_simtemp_params(ir);
}
+
+ /* Because sc-coul (=FALSE by default) only acts on the lambda state
+ * setup and not on the old way of specifying the free-energy setup,
+ * we should check for using soft-core when not needed, since that
+ * can complicate the sampling significantly.
+ * Note that we only check for the automated coupling setup.
+ * If the (advanced) user does FEP through manual topology changes,
+ * this check will not be triggered.
+ */
+ if (ir->efep != efepNO && ir->fepvals->n_lambda == 0 &&
+ ir->fepvals->sc_alpha != 0 &&
+ ((opts->couple_lam0 == ecouplamVDW && opts->couple_lam0 == ecouplamVDWQ) ||
+ (opts->couple_lam1 == ecouplamVDWQ && opts->couple_lam1 == ecouplamVDW)))
+ {
+ warning(wi, "You are using soft-core interactions while the Van der Waals interactions are not decoupled (note that the sc-coul option is only active when using lambda states). Although this will not lead to errors, you will need much more sampling than without soft-core interactions. Consider using sc-alpha=0.");
+ }
}
else
{
nscan = sscanf(groups, "%d %d %d", &pcrd->group[0], &pcrd->group[1], &idum);
if (nscan != 2)
{
- fprintf(stderr, "ERROR: %s should have %d components\n", buf, 2);
+ fprintf(stderr, "ERROR: %s should contain %d pull group indices\n",
+ buf, 2);
nerror++;
}
sprintf(buf, "pull-coord%d-origin", i);
if (strcmp(pgnames[g], "") == 0)
{
- gmx_fatal(FARGS, "Group pull_group%d required by grompp was undefined.", g);
+ gmx_fatal(FARGS, "Pull option pull_group%d required by grompp has not been set.", g);
}
ig = search_string(pgnames[g], grps->nr, gnames);
int *ind_r; /* constraint index for updating atom data */
int ind_nalloc; /* allocation size of ind and ind_r */
tensor vir_r_m_dr; /* temporary variable for virial calculation */
+ real dhdlambda; /* temporary variable for lambda derivative */
} lincs_thread_t;
typedef struct gmx_lincsdata {
static void do_lincsp(rvec *x, rvec *f, rvec *fp, t_pbc *pbc,
struct gmx_lincsdata *lincsd, int th,
real *invmass,
- int econq, real *dvdlambda,
+ int econq, gmx_bool bCalcDHDL,
gmx_bool bCalcVir, tensor rmdf)
{
int b0, b1, b, i, j, k, n;
lincs_update_atoms(lincsd, th, 1.0, sol, r,
(econq != econqForce) ? invmass : NULL, fp);
- if (dvdlambda != NULL)
+ if (bCalcDHDL)
{
-#pragma omp barrier
+ real dhdlambda;
+
+ dhdlambda = 0;
for (b = b0; b < b1; b++)
{
- *dvdlambda -= sol[b]*lincsd->ddist[b];
+ dhdlambda -= sol[b]*lincsd->ddist[b];
}
- /* 10 ncons flops */
+
+ lincsd->th[th].dhdlambda = dhdlambda;
}
if (bCalcVir)
struct gmx_lincsdata *lincsd, int th,
real *invmass,
t_commrec *cr,
- gmx_bool bCalcLambda,
+ gmx_bool bCalcDHDL,
real wangle, int *warn,
real invdt, rvec *v,
gmx_bool bCalcVir, tensor vir_r_m_dr)
/* 16 ncons flops */
}
- if (nlocat != NULL && bCalcLambda)
+ if (nlocat != NULL && (bCalcDHDL || bCalcVir))
{
- /* In lincs_update_atoms thread might cross-read mlambda */
+ /* In lincs_update_atoms threads might cross-read mlambda */
#pragma omp barrier
/* Only account for local atoms */
}
}
+ if (bCalcDHDL)
+ {
+ real dhdl;
+
+ dhdl = 0;
+ for (b = b0; b < b1; b++)
+ {
+ /* Note that this this is dhdl*dt^2, the dt^2 factor is corrected
+ * later after the contributions are reduced over the threads.
+ */
+ dhdl -= lincsd->mlambda[b]*lincsd->ddist[b];
+ }
+ lincsd->th[th].dhdlambda = dhdl;
+ }
+
if (bCalcVir)
{
/* Constraint virial */
t_nrnb *nrnb,
int maxwarn, int *warncount)
{
+ gmx_bool bCalcDHDL;
char buf[STRLEN], buf2[22], buf3[STRLEN];
int i, warn, p_imax;
real ncons_loc, p_ssd, p_max = 0;
bOK = TRUE;
+ /* This boolean should be set by a flag passed to this routine.
+ * We can also easily check if any constraint length is changed,
+ * if not dH/dlambda=0 and we can also set the boolean to FALSE.
+ */
+ bCalcDHDL = (ir->efep != efepNO && dvdlambda != NULL);
+
if (lincsd->nc == 0 && cr->dd == NULL)
{
if (bLog || bEner)
if (econq == econqCoord)
{
+ /* We can't use bCalcDHDL here, since NULL can be passed for dvdlambda
+ * also with efep!=fepNO.
+ */
if (ir->efep != efepNO)
{
if (md->nMassPerturbed && lincsd->matlam != md->lambda)
do_lincs(x, xprime, box, pbc, lincsd, th,
md->invmass, cr,
- bCalcVir || (ir->efep != efepNO),
+ bCalcDHDL,
ir->LincsWarnAngle, &warn,
invdt, v, bCalcVir,
th == 0 ? vir_r_m_dr : lincsd->th[th].vir_r_m_dr);
}
- if (ir->efep != efepNO)
- {
- real dt_2, dvdl = 0;
-
- /* TODO This should probably use invdt, so that sd integrator scaling works properly */
- dt_2 = 1.0/(ir->delta_t*ir->delta_t);
- for (i = 0; (i < lincsd->nc); i++)
- {
- dvdl -= lincsd->mlambda[i]*dt_2*lincsd->ddist[i];
- }
- *dvdlambda += dvdl;
- }
-
if (bLog && fplog && lincsd->nc > 0)
{
fprintf(fplog, " Rel. Constraint Deviation: RMS MAX between atoms\n");
int th = gmx_omp_get_thread_num();
do_lincsp(x, xprime, min_proj, pbc, lincsd, th,
- md->invmass, econq, ir->efep != efepNO ? dvdlambda : NULL,
+ md->invmass, econq, bCalcDHDL,
bCalcVir, th == 0 ? vir_r_m_dr : lincsd->th[th].vir_r_m_dr);
}
}
+ if (bCalcDHDL)
+ {
+ /* Reduce the dH/dlambda contributions over the threads */
+ real dhdlambda;
+ int th;
+
+ dhdlambda = 0;
+ for (th = 0; th < lincsd->nth; th++)
+ {
+ dhdlambda += lincsd->th[th].dhdlambda;
+ }
+ if (econqCoord)
+ {
+ /* dhdlambda contains dH/dlambda*dt^2, correct for this */
+ /* TODO This should probably use invdt, so that sd integrator scaling works properly */
+ dhdlambda /= ir->delta_t*ir->delta_t;
+ }
+ *dvdlambda += dhdlambda;
+ }
+
if (bCalcVir && lincsd->nth > 1)
{
for (i = 1; i < lincsd->nth; i++)
#include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_simd_utils.h"
static gmx_inline void gmx_simdcall
-gmx_load_simd_4xn_interactions(int gmx_unused excl,
+gmx_load_simd_4xn_interactions(int excl,
gmx_exclfilter gmx_unused filter_S0,
gmx_exclfilter gmx_unused filter_S1,
gmx_exclfilter gmx_unused filter_S2,
gmx_exclfilter gmx_unused filter_S3,
- const char gmx_unused *interaction_mask_indices,
real gmx_unused *simd_interaction_array,
gmx_simd_bool_t *interact_S0,
gmx_simd_bool_t *interact_S1,
*interact_S1 = gmx_checkbitmask_pb(mask_pr_S, filter_S1);
*interact_S2 = gmx_checkbitmask_pb(mask_pr_S, filter_S2);
*interact_S3 = gmx_checkbitmask_pb(mask_pr_S, filter_S3);
-#endif
-#ifdef GMX_SIMD_IBM_QPX
+#elif defined GMX_SIMD_IBM_QPX
const int size = GMX_SIMD_REAL_WIDTH * sizeof(real);
- *interact_S0 = gmx_load_interaction_mask_pb(size*interaction_mask_indices[0], simd_interaction_array);
- *interact_S1 = gmx_load_interaction_mask_pb(size*interaction_mask_indices[1], simd_interaction_array);
- *interact_S2 = gmx_load_interaction_mask_pb(size*interaction_mask_indices[2], simd_interaction_array);
- *interact_S3 = gmx_load_interaction_mask_pb(size*interaction_mask_indices[3], simd_interaction_array);
+ *interact_S0 = gmx_load_interaction_mask_pb(size*((excl >> (0 * UNROLLJ)) & 0xF), simd_interaction_array);
+ *interact_S1 = gmx_load_interaction_mask_pb(size*((excl >> (1 * UNROLLJ)) & 0xF), simd_interaction_array);
+ *interact_S2 = gmx_load_interaction_mask_pb(size*((excl >> (2 * UNROLLJ)) & 0xF), simd_interaction_array);
+ *interact_S3 = gmx_load_interaction_mask_pb(size*((excl >> (3 * UNROLLJ)) & 0xF), simd_interaction_array);
+#else
+#error "Need implementation of gmx_load_simd_4xn_interactions"
#endif
}
gmx_load_simd_4xn_interactions(l_cj[cjind].excl,
filter_S0, filter_S1,
filter_S2, filter_S3,
-#ifdef GMX_SIMD_IBM_QPX
- l_cj[cjind].interaction_mask_indices,
nbat->simd_interaction_array,
-#else
- /* The struct fields do not exist
- except on BlueGene/Q */
- NULL,
- NULL,
-#endif
&interact_S0, &interact_S1,
&interact_S2, &interact_S3);
#endif /* CHECK_EXCLS */
typedef struct {
int cj; /* The j-cluster */
unsigned int excl; /* The exclusion (interaction) bits */
- /* Indices into the arrays of SIMD interaction masks. */
- char interaction_mask_indices[4];
} nbnxn_cj_t;
/* In nbnxn_ci_t the integer shift contains the shift in the lower 7 bits.
#include "nbnxn_search.h"
-#include "config.h"
-
#include <assert.h>
#include <math.h>
#include <string.h>
inner_e = ge - (se << na_cj_2log);
nbl->cj[found].excl &= ~(1U<<((inner_i<<na_cj_2log) + inner_e));
-/* The next code line is usually not needed. We do not want to version
- * away the above line, because there is logic that relies on being
- * able to detect easily whether any exclusions exist. */
-#if (defined GMX_SIMD_IBM_QPX)
- nbl->cj[found].interaction_mask_indices[inner_i] &= ~(1U << inner_e);
-#endif
}
}
}
* the research papers on the package. Check out http://www.gromacs.org.
*/
-#include "config.h"
-
#if GMX_SIMD_REAL_WIDTH >= NBNXN_CPU_CLUSTER_I_SIZE
#define STRIDE_S (GMX_SIMD_REAL_WIDTH)
#else
/* Store cj and the interaction mask */
nbl->cj[nbl->ncj].cj = CI_TO_CJ_SIMD_4XN(gridj->cell0) + cj;
nbl->cj[nbl->ncj].excl = get_imask_simd_4xn(remove_sub_diag, ci, cj);
-#ifdef GMX_SIMD_IBM_QPX
- nbl->cj[nbl->ncj].interaction_mask_indices[0] = (nbl->cj[nbl->ncj].excl & 0x000F) >> (0 * 4);
- nbl->cj[nbl->ncj].interaction_mask_indices[1] = (nbl->cj[nbl->ncj].excl & 0x00F0) >> (1 * 4);
- nbl->cj[nbl->ncj].interaction_mask_indices[2] = (nbl->cj[nbl->ncj].excl & 0x0F00) >> (2 * 4);
- nbl->cj[nbl->ncj].interaction_mask_indices[3] = (nbl->cj[nbl->ncj].excl & 0xF000) >> (3 * 4);
-#endif
nbl->ncj++;
}
/* Increase the closing index in i super-cell list */