* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
-#include "config.h"
+#include "gmxpre.h"
-#include "types/commrec.h"
-#include "network.h"
-#include "calcgrid.h"
-#include "pme.h"
-#include "domdec.h"
-#include "nbnxn_cuda_data_mgmt.h"
-#include "force.h"
-#include "macros.h"
-#include "md_logging.h"
#include "pme_loadbal.h"
+#include "config.h"
+
+#include "gromacs/legacyheaders/calcgrid.h"
+#include "gromacs/legacyheaders/domdec.h"
+#include "gromacs/legacyheaders/force.h"
+#include "gromacs/legacyheaders/macros.h"
+#include "gromacs/legacyheaders/md_logging.h"
+#include "gromacs/legacyheaders/network.h"
+#include "gromacs/legacyheaders/pme.h"
+#include "gromacs/legacyheaders/sim_util.h"
+#include "gromacs/legacyheaders/types/commrec.h"
#include "gromacs/math/vec.h"
+#include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_data_mgmt.h"
#include "gromacs/pbcutil/pbc.h"
#include "gromacs/utility/cstringutil.h"
#include "gromacs/utility/smalloc.h"
while (sp <= 1.001*pme_lb->setup[pme_lb->cur].spacing || !grid_ok);
set->rcut_coulomb = pme_lb->cut_spacing*sp;
+ if (set->rcut_coulomb < pme_lb->rcut_coulomb_start)
+ {
+ /* This is unlikely, but can happen when e.g. continuing from
+ * a checkpoint after equilibration where the box shrank a lot.
+ * We want to avoid rcoulomb getting smaller than rvdw
+ * and there might be more issues with decreasing rcoulomb.
+ */
+ set->rcut_coulomb = pme_lb->rcut_coulomb_start;
+ }
if (pme_lb->cutoff_scheme == ecutsVERLET)
{
pme_lb->cur = pme_lb->start - 1;
}
-gmx_bool pme_load_balance(pme_load_balancing_t pme_lb,
- t_commrec *cr,
- FILE *fp_err,
- FILE *fp_log,
- t_inputrec *ir,
- t_state *state,
- double cycles,
- interaction_const_t *ic,
- nonbonded_verlet_t *nbv,
- gmx_pme_t *pmedata,
- gmx_int64_t step)
+gmx_bool pme_load_balance(pme_load_balancing_t pme_lb,
+ t_commrec *cr,
+ FILE *fp_err,
+ FILE *fp_log,
+ t_inputrec *ir,
+ t_state *state,
+ double cycles,
+ interaction_const_t *ic,
+ struct nonbonded_verlet_t *nbv,
+ gmx_pme_t *pmedata,
+ gmx_int64_t step)
{
gmx_bool OK;
pme_setup_t *set;
}
bUsesSimpleTables = uses_simple_tables(ir->cutoff_scheme, nbv, 0);
- if (pme_lb->cutoff_scheme == ecutsVERLET &&
- nbv->grp[0].kernel_type == nbnxnk8x8x8_CUDA)
- {
- nbnxn_cuda_pme_loadbal_update_param(nbv->cu_nbv, ic);
-
- /* With tMPI + GPUs some ranks may be sharing GPU(s) and therefore
- * also sharing texture references. To keep the code simple, we don't
- * treat texture references as shared resources, but this means that
- * the coulomb_tab texture ref will get updated by multiple threads.
- * Hence, to ensure that the non-bonded kernels don't start before all
- * texture binding operations are finished, we need to wait for all ranks
- * to arrive here before continuing.
- *
- * Note that we could omit this barrier if GPUs are not shared (or
- * texture objects are used), but as this is initialization code, there
- * is not point in complicating things.
- */
+ nbnxn_cuda_pme_loadbal_update_param(nbv, ic);
+
+ /* With tMPI + GPUs some ranks may be sharing GPU(s) and therefore
+ * also sharing texture references. To keep the code simple, we don't
+ * treat texture references as shared resources, but this means that
+ * the coulomb_tab texture ref will get updated by multiple threads.
+ * Hence, to ensure that the non-bonded kernels don't start before all
+ * texture binding operations are finished, we need to wait for all ranks
+ * to arrive here before continuing.
+ *
+ * Note that we could omit this barrier if GPUs are not shared (or
+ * texture objects are used), but as this is initialization code, there
+ * is not point in complicating things.
+ */
#ifdef GMX_THREAD_MPI
- if (PAR(cr))
- {
- gmx_barrier(cr);
- }
-#endif /* GMX_THREAD_MPI */
+ if (PAR(cr) && use_GPU(nbv))
+ {
+ gmx_barrier(cr);
}
+#endif /* GMX_THREAD_MPI */
/* Usually we won't need the simple tables with GPUs.
* But we do with hybrid acceleration and with free energy.