From: Mark Abraham Date: Tue, 19 Dec 2017 13:23:59 +0000 (+1100) Subject: Merge branch release-2016 into release-2018 X-Git-Url: http://biod.pnpi.spb.ru/gitweb/?a=commitdiff_plain;h=bb1ff722eb582c04653dde39e0df9d4c80619096;p=alexxy%2Fgromacs.git Merge branch release-2016 into release-2018 Change-Id: I2c3aa754de1b8ff971854740da9815fff8a41f0d --- bb1ff722eb582c04653dde39e0df9d4c80619096 diff --cc src/gromacs/mdlib/sim_util.cpp index b746326ac9,7c71829401..ee757e4181 --- a/src/gromacs/mdlib/sim_util.cpp +++ b/src/gromacs/mdlib/sim_util.cpp @@@ -2724,12 -2582,17 +2724,16 @@@ void finish_run(FILE *fplog, const gmx: communication deadlocks, we always do the communication for the report, even if we've decided not to write the report, because how long it takes to finish the run is not important when we've - decided not to report on the simulation performance. */ - bool printReport = SIMMASTER(cr); + decided not to report on the simulation performance. - if (!walltime_accounting_get_valid_finish(walltime_accounting)) + Further, we only report performance for dynamical integrators, + because those are the only ones for which we plan to + consider doing any optimizations. */ + bool printReport = EI_DYNAMICS(inputrec->eI) && SIMMASTER(cr); + + if (printReport && !walltime_accounting_get_valid_finish(walltime_accounting)) { - md_print_warn(cr, fplog, - "Simulation ended prematurely, no performance report will be written."); + GMX_LOG(mdlog.warning).asParagraph().appendText("Simulation ended prematurely, no performance report will be written."); printReport = false; } diff --cc src/gromacs/taskassignment/resourcedivision.cpp index a195d60501,89fe730f80..015c8b6857 --- a/src/gromacs/taskassignment/resourcedivision.cpp +++ b/src/gromacs/taskassignment/resourcedivision.cpp @@@ -216,6 -193,14 +216,14 @@@ gmx_unused static int get_tmpi_omp_thre */ if (ngpu > 0) { - if (hw_opt->nthreads_omp > 0) ++ if (hw_opt.nthreads_omp > 0) + { + /* In this case it is unclear if we should use 1 rank per GPU + * or more or less, so we require also setting the number of ranks. + */ + gmx_fatal(FARGS, "When using GPUs, setting the number of OpenMP threads without specifying the number of ranks can lead to conflicting demands. Please specify the number of thread-MPI ranks as well (option -ntmpi)."); + } + nrank = ngpu; /* When the user sets nthreads_omp, we can end up oversubscribing CPU cores