2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
52 #include "gromacs/domdec/domdec.h"
53 #include "gromacs/domdec/domdec_struct.h"
54 #include "gromacs/essentialdynamics/edsam.h"
55 #include "gromacs/ewald/pme.h"
56 #include "gromacs/gmxlib/chargegroup.h"
57 #include "gromacs/gmxlib/network.h"
58 #include "gromacs/gmxlib/nrnb.h"
59 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
60 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
61 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
62 #include "gromacs/imd/imd.h"
63 #include "gromacs/listed-forces/bonded.h"
64 #include "gromacs/listed-forces/disre.h"
65 #include "gromacs/listed-forces/orires.h"
66 #include "gromacs/math/functions.h"
67 #include "gromacs/math/units.h"
68 #include "gromacs/math/vec.h"
69 #include "gromacs/math/vecdump.h"
70 #include "gromacs/mdlib/calcmu.h"
71 #include "gromacs/mdlib/constr.h"
72 #include "gromacs/mdlib/force.h"
73 #include "gromacs/mdlib/forcerec.h"
74 #include "gromacs/mdlib/genborn.h"
75 #include "gromacs/mdlib/gmx_omp_nthreads.h"
76 #include "gromacs/mdlib/mdrun.h"
77 #include "gromacs/mdlib/nb_verlet.h"
78 #include "gromacs/mdlib/nbnxn_atomdata.h"
79 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
80 #include "gromacs/mdlib/nbnxn_grid.h"
81 #include "gromacs/mdlib/nbnxn_search.h"
82 #include "gromacs/mdlib/qmmm.h"
83 #include "gromacs/mdlib/update.h"
84 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
85 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref.h"
86 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
87 #include "gromacs/mdlib/nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
88 #include "gromacs/mdtypes/commrec.h"
89 #include "gromacs/mdtypes/iforceprovider.h"
90 #include "gromacs/mdtypes/inputrec.h"
91 #include "gromacs/mdtypes/md_enums.h"
92 #include "gromacs/mdtypes/state.h"
93 #include "gromacs/pbcutil/ishift.h"
94 #include "gromacs/pbcutil/mshift.h"
95 #include "gromacs/pbcutil/pbc.h"
96 #include "gromacs/pulling/pull.h"
97 #include "gromacs/pulling/pull_rotation.h"
98 #include "gromacs/timing/cyclecounter.h"
99 #include "gromacs/timing/gpu_timing.h"
100 #include "gromacs/timing/wallcycle.h"
101 #include "gromacs/timing/wallcyclereporting.h"
102 #include "gromacs/timing/walltime_accounting.h"
103 #include "gromacs/utility/arrayref.h"
104 #include "gromacs/utility/basedefinitions.h"
105 #include "gromacs/utility/cstringutil.h"
106 #include "gromacs/utility/exceptions.h"
107 #include "gromacs/utility/fatalerror.h"
108 #include "gromacs/utility/gmxassert.h"
109 #include "gromacs/utility/gmxmpi.h"
110 #include "gromacs/utility/logger.h"
111 #include "gromacs/utility/pleasecite.h"
112 #include "gromacs/utility/smalloc.h"
113 #include "gromacs/utility/sysinfo.h"
115 #include "nbnxn_gpu.h"
117 void print_time(FILE *out,
118 gmx_walltime_accounting_t walltime_accounting,
121 t_commrec gmx_unused *cr)
124 char timebuf[STRLEN];
125 double dt, elapsed_seconds, time_per_step;
134 fprintf(out, "step %s", gmx_step_str(step, buf));
137 if ((step >= ir->nstlist))
139 double seconds_since_epoch = gmx_gettime();
140 elapsed_seconds = seconds_since_epoch - walltime_accounting_get_start_time_stamp(walltime_accounting);
141 time_per_step = elapsed_seconds/(step - ir->init_step + 1);
142 dt = (ir->nsteps + ir->init_step - step) * time_per_step;
148 finish = (time_t) (seconds_since_epoch + dt);
149 gmx_ctime_r(&finish, timebuf, STRLEN);
150 sprintf(buf, "%s", timebuf);
151 buf[strlen(buf)-1] = '\0';
152 fprintf(out, ", will finish %s", buf);
156 fprintf(out, ", remaining wall clock time: %5d s ", (int)dt);
161 fprintf(out, " performance: %.1f ns/day ",
162 ir->delta_t/1000*24*60*60/time_per_step);
175 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
178 char time_string[STRLEN];
187 char timebuf[STRLEN];
188 time_t temp_time = (time_t) the_time;
190 gmx_ctime_r(&temp_time, timebuf, STRLEN);
191 for (i = 0; timebuf[i] >= ' '; i++)
193 time_string[i] = timebuf[i];
195 time_string[i] = '\0';
198 fprintf(fplog, "%s on rank %d %s\n", title, nodeid, time_string);
201 void print_start(FILE *fplog, t_commrec *cr,
202 gmx_walltime_accounting_t walltime_accounting,
207 sprintf(buf, "Started %s", name);
208 print_date_and_time(fplog, cr->nodeid, buf,
209 walltime_accounting_get_start_time_stamp(walltime_accounting));
212 static void sum_forces(rvec f[], const PaddedRVecVector *forceToAdd)
214 /* TODO: remove this - 1 when padding is properly implemented */
215 int end = forceToAdd->size() - 1;
216 const rvec *fAdd = as_rvec_array(forceToAdd->data());
218 // cppcheck-suppress unreadVariable
219 int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
220 #pragma omp parallel for num_threads(nt) schedule(static)
221 for (int i = 0; i < end; i++)
223 rvec_inc(f[i], fAdd[i]);
227 static void calc_virial(int start, int homenr, rvec x[], rvec f[],
228 tensor vir_part, t_graph *graph, matrix box,
229 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
233 /* The short-range virial from surrounding boxes */
235 calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
236 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
238 /* Calculate partial virial, for local atoms only, based on short range.
239 * Total virial is computed in global_stat, called from do_md
241 f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
242 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
244 /* Add position restraint contribution */
245 for (i = 0; i < DIM; i++)
247 vir_part[i][i] += fr->vir_diag_posres[i];
250 /* Add wall contribution */
251 for (i = 0; i < DIM; i++)
253 vir_part[i][ZZ] += fr->vir_wall_z[i];
258 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
262 static void pull_potential_wrapper(t_commrec *cr,
264 matrix box, rvec x[],
268 gmx_enerdata_t *enerd,
271 gmx_wallcycle_t wcycle)
276 /* Calculate the center of mass forces, this requires communication,
277 * which is why pull_potential is called close to other communication.
278 * The virial contribution is calculated directly,
279 * which is why we call pull_potential after calc_virial.
281 wallcycle_start(wcycle, ewcPULLPOT);
282 set_pbc(&pbc, ir->ePBC, box);
284 enerd->term[F_COM_PULL] +=
285 pull_potential(ir->pull_work, mdatoms, &pbc,
286 cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
287 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
288 wallcycle_stop(wcycle, ewcPULLPOT);
291 static void pme_receive_force_ener(t_commrec *cr,
292 gmx_wallcycle_t wcycle,
293 gmx_enerdata_t *enerd,
296 real e_q, e_lj, dvdl_q, dvdl_lj;
297 float cycles_ppdpme, cycles_seppme;
299 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
300 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
302 /* In case of node-splitting, the PP nodes receive the long-range
303 * forces, virial and energy from the PME nodes here.
305 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
308 gmx_pme_receive_f(cr, as_rvec_array(fr->f_novirsum->data()), fr->vir_el_recip, &e_q,
309 fr->vir_lj_recip, &e_lj, &dvdl_q, &dvdl_lj,
311 enerd->term[F_COUL_RECIP] += e_q;
312 enerd->term[F_LJ_RECIP] += e_lj;
313 enerd->dvdl_lin[efptCOUL] += dvdl_q;
314 enerd->dvdl_lin[efptVDW] += dvdl_lj;
318 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
320 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
323 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
324 gmx_int64_t step, real forceTolerance,
325 const rvec *x, const rvec *f)
327 real force2Tolerance = gmx::square(forceTolerance);
328 std::uintmax_t numNonFinite = 0;
329 for (int i = 0; i < md->homenr; i++)
331 real force2 = norm2(f[i]);
332 bool nonFinite = !std::isfinite(force2);
333 if (force2 >= force2Tolerance || nonFinite)
335 fprintf(fp, "step %" GMX_PRId64 " atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
337 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(force2));
344 if (numNonFinite > 0)
346 /* Note that with MPI this fatal call on one rank might interrupt
347 * the printing on other ranks. But we can only avoid that with
348 * an expensive MPI barrier that we would need at each step.
350 gmx_fatal(FARGS, "At step %" GMX_PRId64 " detected non-finite forces on %ju atoms", step, numNonFinite);
354 static void post_process_forces(t_commrec *cr,
356 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
358 matrix box, rvec x[],
363 t_forcerec *fr, gmx_vsite_t *vsite,
370 /* Spread the mesh force on virtual sites to the other particles...
371 * This is parallellized. MPI communication is performed
372 * if the constructing atoms aren't local.
374 wallcycle_start(wcycle, ewcVSITESPREAD);
375 spread_vsite_f(vsite, x, as_rvec_array(fr->f_novirsum->data()), nullptr,
376 (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
378 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
379 wallcycle_stop(wcycle, ewcVSITESPREAD);
381 if (flags & GMX_FORCE_VIRIAL)
383 /* Now add the forces, this is local */
384 sum_forces(f, fr->f_novirsum);
386 if (EEL_FULL(fr->eeltype))
388 /* Add the mesh contribution to the virial */
389 m_add(vir_force, fr->vir_el_recip, vir_force);
391 if (EVDW_PME(fr->vdwtype))
393 /* Add the mesh contribution to the virial */
394 m_add(vir_force, fr->vir_lj_recip, vir_force);
398 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
403 if (fr->print_force >= 0)
405 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
409 static void do_nb_verlet(t_forcerec *fr,
410 interaction_const_t *ic,
411 gmx_enerdata_t *enerd,
412 int flags, int ilocality,
415 gmx_wallcycle_t wcycle)
417 int enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
418 nonbonded_verlet_group_t *nbvg;
419 gmx_bool bUsingGpuKernels;
421 if (!(flags & GMX_FORCE_NONBONDED))
423 /* skip non-bonded calculation */
427 nbvg = &fr->nbv->grp[ilocality];
429 /* GPU kernel launch overhead is already timed separately */
430 if (fr->cutoff_scheme != ecutsVERLET)
432 gmx_incons("Invalid cut-off scheme passed!");
435 bUsingGpuKernels = (nbvg->kernel_type == nbnxnk8x8x8_GPU);
437 if (!bUsingGpuKernels)
439 wallcycle_sub_start(wcycle, ewcsNONBONDED);
441 switch (nbvg->kernel_type)
443 case nbnxnk4x4_PlainC:
444 nbnxn_kernel_ref(&nbvg->nbl_lists,
450 enerd->grpp.ener[egCOULSR],
452 enerd->grpp.ener[egBHAMSR] :
453 enerd->grpp.ener[egLJSR]);
456 case nbnxnk4xN_SIMD_4xN:
457 nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
464 enerd->grpp.ener[egCOULSR],
466 enerd->grpp.ener[egBHAMSR] :
467 enerd->grpp.ener[egLJSR]);
469 case nbnxnk4xN_SIMD_2xNN:
470 nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
477 enerd->grpp.ener[egCOULSR],
479 enerd->grpp.ener[egBHAMSR] :
480 enerd->grpp.ener[egLJSR]);
483 case nbnxnk8x8x8_GPU:
484 nbnxn_gpu_launch_kernel(fr->nbv->gpu_nbv, nbvg->nbat, flags, ilocality);
487 case nbnxnk8x8x8_PlainC:
488 nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
493 nbvg->nbat->out[0].f,
495 enerd->grpp.ener[egCOULSR],
497 enerd->grpp.ener[egBHAMSR] :
498 enerd->grpp.ener[egLJSR]);
502 gmx_incons("Invalid nonbonded kernel type passed!");
505 if (!bUsingGpuKernels)
507 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
510 if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
512 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
514 else if ((!bUsingGpuKernels && nbvg->ewald_excl == ewaldexclAnalytical) ||
515 (bUsingGpuKernels && nbnxn_gpu_is_kernel_ewald_analytical(fr->nbv->gpu_nbv)))
517 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
521 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
523 enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
524 if (flags & GMX_FORCE_ENERGY)
526 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
527 enr_nbnxn_kernel_ljc += 1;
528 enr_nbnxn_kernel_lj += 1;
531 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
532 nbvg->nbl_lists.natpair_ljq);
533 inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
534 nbvg->nbl_lists.natpair_lj);
535 /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
536 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
537 nbvg->nbl_lists.natpair_q);
539 if (ic->vdw_modifier == eintmodFORCESWITCH)
541 /* We add up the switch cost separately */
542 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_FSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
543 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
545 if (ic->vdw_modifier == eintmodPOTSWITCH)
547 /* We add up the switch cost separately */
548 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_PSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
549 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
551 if (ic->vdwtype == evdwPME)
553 /* We add up the LJ Ewald cost separately */
554 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_EWALD+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
555 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
559 static void do_nb_verlet_fep(nbnxn_pairlist_set_t *nbl_lists,
566 gmx_enerdata_t *enerd,
569 gmx_wallcycle_t wcycle)
572 nb_kernel_data_t kernel_data;
574 real dvdl_nb[efptNR];
579 /* Add short-range interactions */
580 donb_flags |= GMX_NONBONDED_DO_SR;
582 /* Currently all group scheme kernels always calculate (shift-)forces */
583 if (flags & GMX_FORCE_FORCES)
585 donb_flags |= GMX_NONBONDED_DO_FORCE;
587 if (flags & GMX_FORCE_VIRIAL)
589 donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
591 if (flags & GMX_FORCE_ENERGY)
593 donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
596 kernel_data.flags = donb_flags;
597 kernel_data.lambda = lambda;
598 kernel_data.dvdl = dvdl_nb;
600 kernel_data.energygrp_elec = enerd->grpp.ener[egCOULSR];
601 kernel_data.energygrp_vdw = enerd->grpp.ener[egLJSR];
603 /* reset free energy components */
604 for (i = 0; i < efptNR; i++)
609 assert(gmx_omp_nthreads_get(emntNonbonded) == nbl_lists->nnbl);
611 wallcycle_sub_start(wcycle, ewcsNONBONDED);
612 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
613 for (th = 0; th < nbl_lists->nnbl; th++)
617 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
618 x, f, fr, mdatoms, &kernel_data, nrnb);
620 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
623 if (fepvals->sc_alpha != 0)
625 enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
626 enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
630 enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
631 enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
634 /* If we do foreign lambda and we have soft-core interactions
635 * we have to recalculate the (non-linear) energies contributions.
637 if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
639 kernel_data.flags = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE)) | GMX_NONBONDED_DO_FOREIGNLAMBDA;
640 kernel_data.lambda = lam_i;
641 kernel_data.energygrp_elec = enerd->foreign_grpp.ener[egCOULSR];
642 kernel_data.energygrp_vdw = enerd->foreign_grpp.ener[egLJSR];
643 /* Note that we add to kernel_data.dvdl, but ignore the result */
645 for (i = 0; i < enerd->n_lambda; i++)
647 for (j = 0; j < efptNR; j++)
649 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
651 reset_foreign_enerdata(enerd);
652 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
653 for (th = 0; th < nbl_lists->nnbl; th++)
657 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
658 x, f, fr, mdatoms, &kernel_data, nrnb);
660 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
663 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
664 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
668 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
671 gmx_bool use_GPU(const nonbonded_verlet_t *nbv)
673 return nbv != nullptr && nbv->bUseGPU;
676 static gmx_inline void clear_rvecs_omp(int n, rvec v[])
678 int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, n);
680 /* Note that we would like to avoid this conditional by putting it
681 * into the omp pragma instead, but then we still take the full
682 * omp parallel for overhead (at least with gcc5).
686 for (int i = 0; i < n; i++)
693 #pragma omp parallel for num_threads(nth) schedule(static)
694 for (int i = 0; i < n; i++)
701 /*! \brief This routine checks if the potential energy is finite.
703 * Note that passing this check does not guarantee finite forces,
704 * since those use slightly different arithmetics. But in most cases
705 * there is just a narrow coordinate range where forces are not finite
706 * and energies are finite.
708 * \param[in] enerd The energy data; the non-bonded group energies need to be added in here before calling this routine
710 static void checkPotentialEnergyValidity(const gmx_enerdata_t *enerd)
712 if (!std::isfinite(enerd->term[F_EPOT]))
714 gmx_fatal(FARGS, "The total potential energy is %g, which is not finite. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A non-finite potential energy can be caused by overlapping interactions in bonded interactions or very large or NaN coordinate values. Usually this is caused by a badly or non-equilibrated initial configuration or incorrect interactions or parameters in the topology.",
717 enerd->term[F_COUL_SR]);
721 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
722 t_inputrec *inputrec,
723 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
725 gmx_groups_t gmx_unused *groups,
726 matrix box, rvec x[], history_t *hist,
727 PaddedRVecVector *force,
730 gmx_enerdata_t *enerd, t_fcdata *fcd,
731 real *lambda, t_graph *graph,
732 t_forcerec *fr, interaction_const_t *ic,
733 gmx_vsite_t *vsite, rvec mu_tot,
734 double t, gmx_edsam_t ed,
740 gmx_bool bStateChanged, bNS, bFillGrid, bCalcCGCM;
741 gmx_bool bDoForces, bUseGPU, bUseOrEmulGPU;
742 gmx_bool bDiffKernels = FALSE;
743 rvec vzero, box_diag;
744 float cycles_pme, cycles_force, cycles_wait_gpu;
745 /* TODO To avoid loss of precision, float can't be used for a
746 * cycle count. Build an object that can do this right and perhaps
747 * also be used by gmx_wallcycle_t */
748 gmx_cycles_t cycleCountBeforeLocalWorkCompletes = 0;
749 nonbonded_verlet_t *nbv;
756 const int homenr = mdatoms->homenr;
758 clear_mat(vir_force);
760 if (DOMAINDECOMP(cr))
762 cg1 = cr->dd->ncg_tot;
773 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
774 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
775 bFillGrid = (bNS && bStateChanged);
776 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
777 bDoForces = (flags & GMX_FORCE_FORCES);
778 bUseGPU = fr->nbv->bUseGPU;
779 bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
783 update_forcerec(fr, box);
785 if (inputrecNeedMutot(inputrec))
787 /* Calculate total (local) dipole moment in a temporary common array.
788 * This makes it possible to sum them over nodes faster.
790 calc_mu(start, homenr,
791 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
796 if (fr->ePBC != epbcNONE)
798 /* Compute shift vectors every step,
799 * because of pressure coupling or box deformation!
801 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
803 calc_shifts(box, fr->shift_vec);
808 put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
809 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
811 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
813 unshift_self(graph, box, x);
817 nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
818 fr->shift_vec, nbv->grp[0].nbat);
821 if (!(cr->duty & DUTY_PME))
826 /* Send particle coordinates to the pme nodes.
827 * Since this is only implemented for domain decomposition
828 * and domain decomposition does not use the graph,
829 * we do not need to worry about shifting.
832 wallcycle_start(wcycle, ewcPP_PMESENDX);
834 bBS = (inputrec->nwall == 2);
838 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
841 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
842 lambda[efptCOUL], lambda[efptVDW],
843 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
846 wallcycle_stop(wcycle, ewcPP_PMESENDX);
850 /* do gridding for pair search */
853 if (graph && bStateChanged)
855 /* Calculate intramolecular shift vectors to make molecules whole */
856 mk_mshift(fplog, graph, fr->ePBC, box, x);
860 box_diag[XX] = box[XX][XX];
861 box_diag[YY] = box[YY][YY];
862 box_diag[ZZ] = box[ZZ][ZZ];
864 wallcycle_start(wcycle, ewcNS);
867 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
868 nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
870 0, mdatoms->homenr, -1, fr->cginfo, x,
872 nbv->grp[eintLocal].kernel_type,
873 nbv->grp[eintLocal].nbat);
874 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
878 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
879 nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
881 nbv->grp[eintNonlocal].kernel_type,
882 nbv->grp[eintNonlocal].nbat);
883 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
886 if (nbv->ngrp == 1 ||
887 nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
889 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
890 nbv->nbs, mdatoms, fr->cginfo);
894 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
895 nbv->nbs, mdatoms, fr->cginfo);
896 nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
897 nbv->nbs, mdatoms, fr->cginfo);
899 wallcycle_stop(wcycle, ewcNS);
902 /* initialize the GPU atom data and copy shift vector */
907 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
908 nbnxn_gpu_init_atomdata(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
909 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
912 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
913 nbnxn_gpu_upload_shiftvec(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
914 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
917 /* do local pair search */
920 wallcycle_start_nocount(wcycle, ewcNS);
921 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
922 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
925 nbv->min_ci_balanced,
926 &nbv->grp[eintLocal].nbl_lists,
928 nbv->grp[eintLocal].kernel_type,
930 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
934 /* initialize local pair-list on the GPU */
935 nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
936 nbv->grp[eintLocal].nbl_lists.nbl[0],
939 wallcycle_stop(wcycle, ewcNS);
943 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
944 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
945 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
946 nbv->grp[eintLocal].nbat);
947 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
948 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
953 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
954 /* launch local nonbonded F on GPU */
955 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
957 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
960 /* Communicate coordinates and sum dipole if necessary +
961 do non-local pair search */
962 if (DOMAINDECOMP(cr))
964 bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
965 nbv->grp[eintLocal].kernel_type);
969 /* With GPU+CPU non-bonded calculations we need to copy
970 * the local coordinates to the non-local nbat struct
971 * (in CPU format) as the non-local kernel call also
972 * calculates the local - non-local interactions.
974 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
975 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
976 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
977 nbv->grp[eintNonlocal].nbat);
978 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
979 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
984 wallcycle_start_nocount(wcycle, ewcNS);
985 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
989 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
992 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
995 nbv->min_ci_balanced,
996 &nbv->grp[eintNonlocal].nbl_lists,
998 nbv->grp[eintNonlocal].kernel_type,
1001 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1003 if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_GPU)
1005 /* initialize non-local pair-list on the GPU */
1006 nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
1007 nbv->grp[eintNonlocal].nbl_lists.nbl[0],
1010 wallcycle_stop(wcycle, ewcNS);
1014 wallcycle_start(wcycle, ewcMOVEX);
1015 dd_move_x(cr->dd, box, x);
1016 wallcycle_stop(wcycle, ewcMOVEX);
1018 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1019 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1020 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
1021 nbv->grp[eintNonlocal].nbat);
1022 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1023 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1026 if (bUseGPU && !bDiffKernels)
1028 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1029 /* launch non-local nonbonded F on GPU */
1030 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
1032 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1038 /* launch D2H copy-back F */
1039 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1040 if (DOMAINDECOMP(cr) && !bDiffKernels)
1042 nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintNonlocal].nbat,
1043 flags, eatNonlocal);
1045 nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintLocal].nbat,
1047 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1050 if (bStateChanged && inputrecNeedMutot(inputrec))
1054 gmx_sumd(2*DIM, mu, cr);
1057 for (i = 0; i < 2; i++)
1059 for (j = 0; j < DIM; j++)
1061 fr->mu_tot[i][j] = mu[i*DIM + j];
1065 if (fr->efep == efepNO)
1067 copy_rvec(fr->mu_tot[0], mu_tot);
1071 for (j = 0; j < DIM; j++)
1074 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1075 lambda[efptCOUL]*fr->mu_tot[1][j];
1079 /* Reset energies */
1080 reset_enerdata(enerd);
1081 clear_rvecs(SHIFTS, fr->fshift);
1083 if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1085 wallcycle_start(wcycle, ewcPPDURINGPME);
1086 dd_force_flop_start(cr->dd, nrnb);
1091 /* Enforced rotation has its own cycle counter that starts after the collective
1092 * coordinates have been communicated. It is added to ddCyclF to allow
1093 * for proper load-balancing */
1094 wallcycle_start(wcycle, ewcROT);
1095 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1096 wallcycle_stop(wcycle, ewcROT);
1099 /* Temporary solution until all routines take PaddedRVecVector */
1100 rvec *f = as_rvec_array(force->data());
1102 /* Start the force cycle counter.
1103 * This counter is stopped after do_force_lowlevel.
1104 * No parallel communication should occur while this counter is running,
1105 * since that will interfere with the dynamic load balancing.
1107 wallcycle_start(wcycle, ewcFORCE);
1110 /* Reset forces for which the virial is calculated separately:
1111 * PME/Ewald forces if necessary */
1112 if (fr->bF_NoVirSum)
1114 if (flags & GMX_FORCE_VIRIAL)
1116 fr->f_novirsum = fr->forceBufferNoVirialSummation;
1120 /* We are not calculating the pressure so we do not need
1121 * a separate array for forces that do not contribute
1124 fr->f_novirsum = force;
1128 if (fr->bF_NoVirSum)
1130 if (flags & GMX_FORCE_VIRIAL)
1132 /* TODO: remove this - 1 when padding is properly implemented */
1133 clear_rvecs_omp(fr->f_novirsum->size() - 1,
1134 as_rvec_array(fr->f_novirsum->data()));
1137 /* Clear the short- and long-range forces */
1138 clear_rvecs_omp(fr->natoms_force_constr, f);
1140 clear_rvec(fr->vir_diag_posres);
1143 if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1145 clear_pull_forces(inputrec->pull_work);
1148 /* We calculate the non-bonded forces, when done on the CPU, here.
1149 * We do this before calling do_force_lowlevel, because in that
1150 * function, the listed forces are calculated before PME, which
1151 * does communication. With this order, non-bonded and listed
1152 * force calculation imbalance can be balanced out by the domain
1153 * decomposition load balancing.
1158 /* Maybe we should move this into do_force_lowlevel */
1159 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1163 if (fr->efep != efepNO)
1165 /* Calculate the local and non-local free energy interactions here.
1166 * Happens here on the CPU both with and without GPU.
1168 if (fr->nbv->grp[eintLocal].nbl_lists.nbl_fep[0]->nrj > 0)
1170 do_nb_verlet_fep(&fr->nbv->grp[eintLocal].nbl_lists,
1172 inputrec->fepvals, lambda,
1173 enerd, flags, nrnb, wcycle);
1176 if (DOMAINDECOMP(cr) &&
1177 fr->nbv->grp[eintNonlocal].nbl_lists.nbl_fep[0]->nrj > 0)
1179 do_nb_verlet_fep(&fr->nbv->grp[eintNonlocal].nbl_lists,
1181 inputrec->fepvals, lambda,
1182 enerd, flags, nrnb, wcycle);
1186 if (!bUseOrEmulGPU || bDiffKernels)
1190 if (DOMAINDECOMP(cr))
1192 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1193 bDiffKernels ? enbvClearFYes : enbvClearFNo,
1203 aloc = eintNonlocal;
1206 /* Add all the non-bonded force to the normal force array.
1207 * This can be split into a local and a non-local part when overlapping
1208 * communication with calculation with domain decomposition.
1210 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1211 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1212 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1213 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1214 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1215 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1216 wallcycle_start_nocount(wcycle, ewcFORCE);
1218 /* if there are multiple fshift output buffers reduce them */
1219 if ((flags & GMX_FORCE_VIRIAL) &&
1220 nbv->grp[aloc].nbl_lists.nnbl > 1)
1222 /* This is not in a subcounter because it takes a
1223 negligible and constant-sized amount of time */
1224 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1229 /* update QMMMrec, if necessary */
1232 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1235 /* Compute the bonded and non-bonded energies and optionally forces */
1236 do_force_lowlevel(fr, inputrec, &(top->idef),
1237 cr, nrnb, wcycle, mdatoms,
1238 x, hist, f, enerd, fcd, top, fr->born,
1240 inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1241 flags, &cycles_pme);
1243 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1247 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1250 if (bUseOrEmulGPU && !bDiffKernels)
1252 /* wait for non-local forces (or calculate in emulation mode) */
1253 if (DOMAINDECOMP(cr))
1259 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1260 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1262 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1264 cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1265 cycles_wait_gpu += cycles_tmp;
1266 cycles_force += cycles_tmp;
1270 wallcycle_start_nocount(wcycle, ewcFORCE);
1271 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1273 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1275 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1276 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1277 /* skip the reduction if there was no non-local work to do */
1278 if (nbv->grp[eintNonlocal].nbl_lists.nbl[0]->nsci > 0)
1280 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1281 nbv->grp[eintNonlocal].nbat, f);
1283 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1284 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1288 if (bDoForces && DOMAINDECOMP(cr))
1292 /* We are done with the CPU compute, but the GPU local non-bonded
1293 * kernel can still be running while we communicate the forces.
1294 * We start a counter here, so we can, hopefully, time the rest
1295 * of the GPU kernel execution and data transfer.
1297 cycleCountBeforeLocalWorkCompletes = gmx_cycles_read();
1300 /* Communicate the forces */
1301 wallcycle_start(wcycle, ewcMOVEF);
1302 dd_move_f(cr->dd, f, fr->fshift);
1303 wallcycle_stop(wcycle, ewcMOVEF);
1308 /* wait for local forces (or calculate in emulation mode) */
1311 float cycles_tmp, cycles_wait_est;
1312 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1313 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1314 * but even with a step of 0.1 ms the difference is less than 1%
1317 const float gpuWaitApiOverheadMargin = 2e6f; /* cycles */
1319 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1320 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1322 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1324 cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1326 if (bDoForces && DOMAINDECOMP(cr))
1328 cycles_wait_est = gmx_cycles_read() - cycleCountBeforeLocalWorkCompletes;
1330 if (cycles_tmp < gpuWaitApiOverheadMargin)
1332 /* We measured few cycles, it could be that the kernel
1333 * and transfer finished earlier and there was no actual
1334 * wait time, only API call overhead.
1335 * Then the actual time could be anywhere between 0 and
1336 * cycles_wait_est. As a compromise, we use half the time.
1338 cycles_wait_est *= 0.5f;
1343 /* No force communication so we actually timed the wait */
1344 cycles_wait_est = cycles_tmp;
1346 /* Even though this is after dd_move_f, the actual task we are
1347 * waiting for runs asynchronously with dd_move_f and we usually
1348 * have nothing to balance it with, so we can and should add
1349 * the time to the force time for load balancing.
1351 cycles_force += cycles_wait_est;
1352 cycles_wait_gpu += cycles_wait_est;
1354 /* now clear the GPU outputs while we finish the step on the CPU */
1355 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1356 nbnxn_gpu_clear_outputs(nbv->gpu_nbv, flags);
1357 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1361 wallcycle_start_nocount(wcycle, ewcFORCE);
1362 do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1363 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1365 wallcycle_stop(wcycle, ewcFORCE);
1367 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1368 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1369 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1370 nbv->grp[eintLocal].nbat, f);
1371 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1372 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1375 if (DOMAINDECOMP(cr))
1377 dd_force_flop_stop(cr->dd, nrnb);
1380 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1383 dd_cycles_add(cr->dd, cycles_wait_gpu, ddCyclWaitGPU);
1390 /* Compute forces due to electric field */
1391 if (fr->efield != nullptr)
1393 fr->efield->calculateForces(cr, mdatoms, fr->f_novirsum, t);
1396 /* If we have NoVirSum forces, but we do not calculate the virial,
1397 * we sum fr->f_novirsum=f later.
1399 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1401 wallcycle_start(wcycle, ewcVSITESPREAD);
1402 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, nullptr, nrnb,
1403 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1404 wallcycle_stop(wcycle, ewcVSITESPREAD);
1407 if (flags & GMX_FORCE_VIRIAL)
1409 /* Calculation of the virial must be done after vsites! */
1410 calc_virial(0, mdatoms->homenr, x, f,
1411 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1415 if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1417 /* Since the COM pulling is always done mass-weighted, no forces are
1418 * applied to vsites and this call can be done after vsite spreading.
1420 pull_potential_wrapper(cr, inputrec, box, x,
1421 f, vir_force, mdatoms, enerd, lambda, t,
1425 /* Add the forces from enforced rotation potentials (if any) */
1428 wallcycle_start(wcycle, ewcROTadd);
1429 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1430 wallcycle_stop(wcycle, ewcROTadd);
1433 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1434 IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1436 if (PAR(cr) && !(cr->duty & DUTY_PME))
1438 /* In case of node-splitting, the PP nodes receive the long-range
1439 * forces, virial and energy from the PME nodes here.
1441 pme_receive_force_ener(cr, wcycle, enerd, fr);
1446 post_process_forces(cr, step, nrnb, wcycle,
1447 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1451 if (flags & GMX_FORCE_ENERGY)
1453 /* Sum the potential energy terms from group contributions */
1454 sum_epot(&(enerd->grpp), enerd->term);
1456 if (!EI_TPI(inputrec->eI))
1458 checkPotentialEnergyValidity(enerd);
1463 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1464 t_inputrec *inputrec,
1465 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1466 gmx_localtop_t *top,
1467 gmx_groups_t *groups,
1468 matrix box, rvec x[], history_t *hist,
1469 PaddedRVecVector *force,
1472 gmx_enerdata_t *enerd, t_fcdata *fcd,
1473 real *lambda, t_graph *graph,
1474 t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1475 double t, gmx_edsam_t ed,
1476 gmx_bool bBornRadii,
1481 gmx_bool bStateChanged, bNS, bFillGrid, bCalcCGCM;
1483 float cycles_pme, cycles_force;
1485 const int start = 0;
1486 const int homenr = mdatoms->homenr;
1488 clear_mat(vir_force);
1491 if (DOMAINDECOMP(cr))
1493 cg1 = cr->dd->ncg_tot;
1504 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
1505 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1506 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1507 bFillGrid = (bNS && bStateChanged);
1508 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
1509 bDoForces = (flags & GMX_FORCE_FORCES);
1513 update_forcerec(fr, box);
1515 if (inputrecNeedMutot(inputrec))
1517 /* Calculate total (local) dipole moment in a temporary common array.
1518 * This makes it possible to sum them over nodes faster.
1520 calc_mu(start, homenr,
1521 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1526 if (fr->ePBC != epbcNONE)
1528 /* Compute shift vectors every step,
1529 * because of pressure coupling or box deformation!
1531 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1533 calc_shifts(box, fr->shift_vec);
1538 put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1539 &(top->cgs), x, fr->cg_cm);
1540 inc_nrnb(nrnb, eNR_CGCM, homenr);
1541 inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1543 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1545 unshift_self(graph, box, x);
1550 calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1551 inc_nrnb(nrnb, eNR_CGCM, homenr);
1554 if (bCalcCGCM && gmx_debug_at)
1556 pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1560 if (!(cr->duty & DUTY_PME))
1565 /* Send particle coordinates to the pme nodes.
1566 * Since this is only implemented for domain decomposition
1567 * and domain decomposition does not use the graph,
1568 * we do not need to worry about shifting.
1571 wallcycle_start(wcycle, ewcPP_PMESENDX);
1573 bBS = (inputrec->nwall == 2);
1576 copy_mat(box, boxs);
1577 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1580 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
1581 lambda[efptCOUL], lambda[efptVDW],
1582 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
1585 wallcycle_stop(wcycle, ewcPP_PMESENDX);
1587 #endif /* GMX_MPI */
1589 /* Communicate coordinates and sum dipole if necessary */
1590 if (DOMAINDECOMP(cr))
1592 wallcycle_start(wcycle, ewcMOVEX);
1593 dd_move_x(cr->dd, box, x);
1594 wallcycle_stop(wcycle, ewcMOVEX);
1597 if (inputrecNeedMutot(inputrec))
1603 gmx_sumd(2*DIM, mu, cr);
1605 for (i = 0; i < 2; i++)
1607 for (j = 0; j < DIM; j++)
1609 fr->mu_tot[i][j] = mu[i*DIM + j];
1613 if (fr->efep == efepNO)
1615 copy_rvec(fr->mu_tot[0], mu_tot);
1619 for (j = 0; j < DIM; j++)
1622 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1627 /* Reset energies */
1628 reset_enerdata(enerd);
1629 clear_rvecs(SHIFTS, fr->fshift);
1633 wallcycle_start(wcycle, ewcNS);
1635 if (graph && bStateChanged)
1637 /* Calculate intramolecular shift vectors to make molecules whole */
1638 mk_mshift(fplog, graph, fr->ePBC, box, x);
1641 /* Do the actual neighbour searching */
1643 groups, top, mdatoms,
1644 cr, nrnb, bFillGrid);
1646 wallcycle_stop(wcycle, ewcNS);
1649 if (inputrec->implicit_solvent && bNS)
1651 make_gb_nblist(cr, inputrec->gb_algorithm,
1652 x, box, fr, &top->idef, graph, fr->born);
1655 if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1657 wallcycle_start(wcycle, ewcPPDURINGPME);
1658 dd_force_flop_start(cr->dd, nrnb);
1663 /* Enforced rotation has its own cycle counter that starts after the collective
1664 * coordinates have been communicated. It is added to ddCyclF to allow
1665 * for proper load-balancing */
1666 wallcycle_start(wcycle, ewcROT);
1667 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1668 wallcycle_stop(wcycle, ewcROT);
1671 /* Temporary solution until all routines take PaddedRVecVector */
1672 rvec *f = as_rvec_array(force->data());
1674 /* Start the force cycle counter.
1675 * This counter is stopped after do_force_lowlevel.
1676 * No parallel communication should occur while this counter is running,
1677 * since that will interfere with the dynamic load balancing.
1679 wallcycle_start(wcycle, ewcFORCE);
1683 /* Reset forces for which the virial is calculated separately:
1684 * PME/Ewald forces if necessary */
1685 if (fr->bF_NoVirSum)
1687 if (flags & GMX_FORCE_VIRIAL)
1689 fr->f_novirsum = fr->forceBufferNoVirialSummation;
1690 /* TODO: remove this - 1 when padding is properly implemented */
1691 clear_rvecs(fr->f_novirsum->size() - 1,
1692 as_rvec_array(fr->f_novirsum->data()));
1696 /* We are not calculating the pressure so we do not need
1697 * a separate array for forces that do not contribute
1700 fr->f_novirsum = force;
1704 /* Clear the short- and long-range forces */
1705 clear_rvecs(fr->natoms_force_constr, f);
1707 clear_rvec(fr->vir_diag_posres);
1709 if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1711 clear_pull_forces(inputrec->pull_work);
1714 /* update QMMMrec, if necessary */
1717 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1720 /* Compute the bonded and non-bonded energies and optionally forces */
1721 do_force_lowlevel(fr, inputrec, &(top->idef),
1722 cr, nrnb, wcycle, mdatoms,
1723 x, hist, f, enerd, fcd, top, fr->born,
1725 inputrec->fepvals, lambda,
1726 graph, &(top->excls), fr->mu_tot,
1730 cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1734 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1737 if (DOMAINDECOMP(cr))
1739 dd_force_flop_stop(cr->dd, nrnb);
1742 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1748 /* Compute forces due to electric field */
1749 if (fr->efield != nullptr)
1751 fr->efield->calculateForces(cr, mdatoms, fr->f_novirsum, t);
1754 /* Communicate the forces */
1755 if (DOMAINDECOMP(cr))
1757 wallcycle_start(wcycle, ewcMOVEF);
1758 dd_move_f(cr->dd, f, fr->fshift);
1759 /* Do we need to communicate the separate force array
1760 * for terms that do not contribute to the single sum virial?
1761 * Position restraints and electric fields do not introduce
1762 * inter-cg forces, only full electrostatics methods do.
1763 * When we do not calculate the virial, fr->f_novirsum = f,
1764 * so we have already communicated these forces.
1766 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1767 (flags & GMX_FORCE_VIRIAL))
1769 dd_move_f(cr->dd, as_rvec_array(fr->f_novirsum->data()), nullptr);
1771 wallcycle_stop(wcycle, ewcMOVEF);
1774 /* If we have NoVirSum forces, but we do not calculate the virial,
1775 * we sum fr->f_novirsum=f later.
1777 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1779 wallcycle_start(wcycle, ewcVSITESPREAD);
1780 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, nullptr, nrnb,
1781 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1782 wallcycle_stop(wcycle, ewcVSITESPREAD);
1785 if (flags & GMX_FORCE_VIRIAL)
1787 /* Calculation of the virial must be done after vsites! */
1788 calc_virial(0, mdatoms->homenr, x, f,
1789 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1793 if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1795 pull_potential_wrapper(cr, inputrec, box, x,
1796 f, vir_force, mdatoms, enerd, lambda, t,
1800 /* Add the forces from enforced rotation potentials (if any) */
1803 wallcycle_start(wcycle, ewcROTadd);
1804 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1805 wallcycle_stop(wcycle, ewcROTadd);
1808 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1809 IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1811 if (PAR(cr) && !(cr->duty & DUTY_PME))
1813 /* In case of node-splitting, the PP nodes receive the long-range
1814 * forces, virial and energy from the PME nodes here.
1816 pme_receive_force_ener(cr, wcycle, enerd, fr);
1821 post_process_forces(cr, step, nrnb, wcycle,
1822 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1826 if (flags & GMX_FORCE_ENERGY)
1828 /* Sum the potential energy terms from group contributions */
1829 sum_epot(&(enerd->grpp), enerd->term);
1831 if (!EI_TPI(inputrec->eI))
1833 checkPotentialEnergyValidity(enerd);
1839 void do_force(FILE *fplog, t_commrec *cr,
1840 t_inputrec *inputrec,
1841 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1842 gmx_localtop_t *top,
1843 gmx_groups_t *groups,
1844 matrix box, PaddedRVecVector *coordinates, history_t *hist,
1845 PaddedRVecVector *force,
1848 gmx_enerdata_t *enerd, t_fcdata *fcd,
1849 gmx::ArrayRef<real> lambda, t_graph *graph,
1851 gmx_vsite_t *vsite, rvec mu_tot,
1852 double t, gmx_edsam_t ed,
1853 gmx_bool bBornRadii,
1856 /* modify force flag if not doing nonbonded */
1857 if (!fr->bNonbonded)
1859 flags &= ~GMX_FORCE_NONBONDED;
1862 GMX_ASSERT(coordinates->size() >= static_cast<unsigned int>(fr->natoms_force + 1) ||
1863 fr->natoms_force == 0, "We might need 1 element extra for SIMD");
1864 GMX_ASSERT(force->size() >= static_cast<unsigned int>(fr->natoms_force + 1) ||
1865 fr->natoms_force == 0, "We might need 1 element extra for SIMD");
1867 rvec *x = as_rvec_array(coordinates->data());
1869 switch (inputrec->cutoff_scheme)
1872 do_force_cutsVERLET(fplog, cr, inputrec,
1880 lambda.data(), graph,
1888 do_force_cutsGROUP(fplog, cr, inputrec,
1896 lambda.data(), graph,
1903 gmx_incons("Invalid cut-off scheme passed!");
1908 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
1909 t_inputrec *ir, t_mdatoms *md,
1910 t_state *state, t_commrec *cr, t_nrnb *nrnb,
1911 t_forcerec *fr, gmx_localtop_t *top)
1913 int i, m, start, end;
1915 real dt = ir->delta_t;
1919 /* We need to allocate one element extra, since we might use
1920 * (unaligned) 4-wide SIMD loads to access rvec entries.
1922 snew(savex, state->natoms + 1);
1929 fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
1930 start, md->homenr, end);
1932 /* Do a first constrain to reset particles... */
1933 step = ir->init_step;
1936 char buf[STEPSTRSIZE];
1937 fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
1938 gmx_step_str(step, buf));
1942 /* constrain the current position */
1943 constrain(nullptr, TRUE, FALSE, constr, &(top->idef),
1944 ir, cr, step, 0, 1.0, md,
1945 as_rvec_array(state->x.data()), as_rvec_array(state->x.data()), nullptr,
1946 fr->bMolPBC, state->box,
1947 state->lambda[efptBONDED], &dvdl_dum,
1948 nullptr, nullptr, nrnb, econqCoord);
1951 /* constrain the inital velocity, and save it */
1952 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
1953 constrain(nullptr, TRUE, FALSE, constr, &(top->idef),
1954 ir, cr, step, 0, 1.0, md,
1955 as_rvec_array(state->x.data()), as_rvec_array(state->v.data()), as_rvec_array(state->v.data()),
1956 fr->bMolPBC, state->box,
1957 state->lambda[efptBONDED], &dvdl_dum,
1958 nullptr, nullptr, nrnb, econqVeloc);
1960 /* constrain the inital velocities at t-dt/2 */
1961 if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
1963 for (i = start; (i < end); i++)
1965 for (m = 0; (m < DIM); m++)
1967 /* Reverse the velocity */
1968 state->v[i][m] = -state->v[i][m];
1969 /* Store the position at t-dt in buf */
1970 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
1973 /* Shake the positions at t=-dt with the positions at t=0
1974 * as reference coordinates.
1978 char buf[STEPSTRSIZE];
1979 fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
1980 gmx_step_str(step, buf));
1983 constrain(nullptr, TRUE, FALSE, constr, &(top->idef),
1984 ir, cr, step, -1, 1.0, md,
1985 as_rvec_array(state->x.data()), savex, nullptr,
1986 fr->bMolPBC, state->box,
1987 state->lambda[efptBONDED], &dvdl_dum,
1988 as_rvec_array(state->v.data()), nullptr, nrnb, econqCoord);
1990 for (i = start; i < end; i++)
1992 for (m = 0; m < DIM; m++)
1994 /* Re-reverse the velocities */
1995 state->v[i][m] = -state->v[i][m];
2004 integrate_table(real vdwtab[], real scale, int offstart, int rstart, int rend,
2005 double *enerout, double *virout)
2007 double enersum, virsum;
2008 double invscale, invscale2, invscale3;
2009 double r, ea, eb, ec, pa, pb, pc, pd;
2014 invscale = 1.0/scale;
2015 invscale2 = invscale*invscale;
2016 invscale3 = invscale*invscale2;
2018 /* Following summation derived from cubic spline definition,
2019 * Numerical Recipies in C, second edition, p. 113-116. Exact for
2020 * the cubic spline. We first calculate the negative of the
2021 * energy from rvdw to rvdw_switch, assuming that g(r)=1, and then
2022 * add the more standard, abrupt cutoff correction to that result,
2023 * yielding the long-range correction for a switched function. We
2024 * perform both the pressure and energy loops at the same time for
2025 * simplicity, as the computational cost is low. */
2029 /* Since the dispersion table has been scaled down a factor
2030 * 6.0 and the repulsion a factor 12.0 to compensate for the
2031 * c6/c12 parameters inside nbfp[] being scaled up (to save
2032 * flops in kernels), we need to correct for this.
2043 for (ri = rstart; ri < rend; ++ri)
2047 eb = 2.0*invscale2*r;
2051 pb = 3.0*invscale2*r;
2052 pc = 3.0*invscale*r*r;
2055 /* this "8" is from the packing in the vdwtab array - perhaps
2056 should be defined? */
2058 offset = 8*ri + offstart;
2059 y0 = vdwtab[offset];
2060 f = vdwtab[offset+1];
2061 g = vdwtab[offset+2];
2062 h = vdwtab[offset+3];
2064 enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2065 virsum += f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2067 *enerout = 4.0*M_PI*enersum*tabfactor;
2068 *virout = 4.0*M_PI*virsum*tabfactor;
2071 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2073 double eners[2], virs[2], enersum, virsum;
2074 double r0, rc3, rc9;
2076 real scale, *vdwtab;
2078 fr->enershiftsix = 0;
2079 fr->enershifttwelve = 0;
2080 fr->enerdiffsix = 0;
2081 fr->enerdifftwelve = 0;
2083 fr->virdifftwelve = 0;
2085 if (eDispCorr != edispcNO)
2087 for (i = 0; i < 2; i++)
2092 if ((fr->vdw_modifier == eintmodPOTSHIFT) ||
2093 (fr->vdw_modifier == eintmodPOTSWITCH) ||
2094 (fr->vdw_modifier == eintmodFORCESWITCH) ||
2095 (fr->vdwtype == evdwSHIFT) ||
2096 (fr->vdwtype == evdwSWITCH))
2098 if (((fr->vdw_modifier == eintmodPOTSWITCH) ||
2099 (fr->vdw_modifier == eintmodFORCESWITCH) ||
2100 (fr->vdwtype == evdwSWITCH)) && fr->rvdw_switch == 0)
2103 "With dispersion correction rvdw-switch can not be zero "
2104 "for vdw-type = %s", evdw_names[fr->vdwtype]);
2107 /* TODO This code depends on the logic in tables.c that
2108 constructs the table layout, which should be made
2109 explicit in future cleanup. */
2110 GMX_ASSERT(fr->dispersionCorrectionTable->interaction == GMX_TABLE_INTERACTION_VDWREP_VDWDISP,
2111 "Dispersion-correction code needs a table with both repulsion and dispersion terms");
2112 scale = fr->dispersionCorrectionTable->scale;
2113 vdwtab = fr->dispersionCorrectionTable->data;
2115 /* Round the cut-offs to exact table values for precision */
2116 ri0 = static_cast<int>(floor(fr->rvdw_switch*scale));
2117 ri1 = static_cast<int>(ceil(fr->rvdw*scale));
2119 /* The code below has some support for handling force-switching, i.e.
2120 * when the force (instead of potential) is switched over a limited
2121 * region. This leads to a constant shift in the potential inside the
2122 * switching region, which we can handle by adding a constant energy
2123 * term in the force-switch case just like when we do potential-shift.
2125 * For now this is not enabled, but to keep the functionality in the
2126 * code we check separately for switch and shift. When we do force-switch
2127 * the shifting point is rvdw_switch, while it is the cutoff when we
2128 * have a classical potential-shift.
2130 * For a pure potential-shift the potential has a constant shift
2131 * all the way out to the cutoff, and that is it. For other forms
2132 * we need to calculate the constant shift up to the point where we
2133 * start modifying the potential.
2135 ri0 = (fr->vdw_modifier == eintmodPOTSHIFT) ? ri1 : ri0;
2141 if ((fr->vdw_modifier == eintmodFORCESWITCH) ||
2142 (fr->vdwtype == evdwSHIFT))
2144 /* Determine the constant energy shift below rvdw_switch.
2145 * Table has a scale factor since we have scaled it down to compensate
2146 * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2148 fr->enershiftsix = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2149 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2151 else if (fr->vdw_modifier == eintmodPOTSHIFT)
2153 fr->enershiftsix = (real)(-1.0/(rc3*rc3));
2154 fr->enershifttwelve = (real)( 1.0/(rc9*rc3));
2157 /* Add the constant part from 0 to rvdw_switch.
2158 * This integration from 0 to rvdw_switch overcounts the number
2159 * of interactions by 1, as it also counts the self interaction.
2160 * We will correct for this later.
2162 eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2163 eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2165 /* Calculate the contribution in the range [r0,r1] where we
2166 * modify the potential. For a pure potential-shift modifier we will
2167 * have ri0==ri1, and there will not be any contribution here.
2169 for (i = 0; i < 2; i++)
2173 integrate_table(vdwtab, scale, (i == 0 ? 0 : 4), ri0, ri1, &enersum, &virsum);
2174 eners[i] -= enersum;
2178 /* Alright: Above we compensated by REMOVING the parts outside r0
2179 * corresponding to the ideal VdW 1/r6 and /r12 potentials.
2181 * Regardless of whether r0 is the point where we start switching,
2182 * or the cutoff where we calculated the constant shift, we include
2183 * all the parts we are missing out to infinity from r0 by
2184 * calculating the analytical dispersion correction.
2186 eners[0] += -4.0*M_PI/(3.0*rc3);
2187 eners[1] += 4.0*M_PI/(9.0*rc9);
2188 virs[0] += 8.0*M_PI/rc3;
2189 virs[1] += -16.0*M_PI/(3.0*rc9);
2191 else if (fr->vdwtype == evdwCUT ||
2192 EVDW_PME(fr->vdwtype) ||
2193 fr->vdwtype == evdwUSER)
2195 if (fr->vdwtype == evdwUSER && fplog)
2198 "WARNING: using dispersion correction with user tables\n");
2201 /* Note that with LJ-PME, the dispersion correction is multiplied
2202 * by the difference between the actual C6 and the value of C6
2203 * that would produce the combination rule.
2204 * This means the normal energy and virial difference formulas
2208 rc3 = fr->rvdw*fr->rvdw*fr->rvdw;
2210 /* Contribution beyond the cut-off */
2211 eners[0] += -4.0*M_PI/(3.0*rc3);
2212 eners[1] += 4.0*M_PI/(9.0*rc9);
2213 if (fr->vdw_modifier == eintmodPOTSHIFT)
2215 /* Contribution within the cut-off */
2216 eners[0] += -4.0*M_PI/(3.0*rc3);
2217 eners[1] += 4.0*M_PI/(3.0*rc9);
2219 /* Contribution beyond the cut-off */
2220 virs[0] += 8.0*M_PI/rc3;
2221 virs[1] += -16.0*M_PI/(3.0*rc9);
2226 "Dispersion correction is not implemented for vdw-type = %s",
2227 evdw_names[fr->vdwtype]);
2230 /* When we deprecate the group kernels the code below can go too */
2231 if (fr->vdwtype == evdwPME && fr->cutoff_scheme == ecutsGROUP)
2233 /* Calculate self-interaction coefficient (assuming that
2234 * the reciprocal-space contribution is constant in the
2235 * region that contributes to the self-interaction).
2237 fr->enershiftsix = gmx::power6(fr->ewaldcoeff_lj) / 6.0;
2239 eners[0] += -gmx::power3(std::sqrt(M_PI)*fr->ewaldcoeff_lj)/3.0;
2240 virs[0] += gmx::power3(std::sqrt(M_PI)*fr->ewaldcoeff_lj);
2243 fr->enerdiffsix = eners[0];
2244 fr->enerdifftwelve = eners[1];
2245 /* The 0.5 is due to the Gromacs definition of the virial */
2246 fr->virdiffsix = 0.5*virs[0];
2247 fr->virdifftwelve = 0.5*virs[1];
2251 void calc_dispcorr(t_inputrec *ir, t_forcerec *fr,
2252 matrix box, real lambda, tensor pres, tensor virial,
2253 real *prescorr, real *enercorr, real *dvdlcorr)
2255 gmx_bool bCorrAll, bCorrPres;
2256 real dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2266 if (ir->eDispCorr != edispcNO)
2268 bCorrAll = (ir->eDispCorr == edispcAllEner ||
2269 ir->eDispCorr == edispcAllEnerPres);
2270 bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2271 ir->eDispCorr == edispcAllEnerPres);
2273 invvol = 1/det(box);
2276 /* Only correct for the interactions with the inserted molecule */
2277 dens = (fr->numAtomsForDispersionCorrection - fr->n_tpi)*invvol;
2282 dens = fr->numAtomsForDispersionCorrection*invvol;
2283 ninter = 0.5*fr->numAtomsForDispersionCorrection;
2286 if (ir->efep == efepNO)
2288 avcsix = fr->avcsix[0];
2289 avctwelve = fr->avctwelve[0];
2293 avcsix = (1 - lambda)*fr->avcsix[0] + lambda*fr->avcsix[1];
2294 avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2297 enerdiff = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2298 *enercorr += avcsix*enerdiff;
2300 if (ir->efep != efepNO)
2302 dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2306 enerdiff = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2307 *enercorr += avctwelve*enerdiff;
2308 if (fr->efep != efepNO)
2310 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2316 svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2317 if (ir->eDispCorr == edispcAllEnerPres)
2319 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2321 /* The factor 2 is because of the Gromacs virial definition */
2322 spres = -2.0*invvol*svir*PRESFAC;
2324 for (m = 0; m < DIM; m++)
2326 virial[m][m] += svir;
2327 pres[m][m] += spres;
2332 /* Can't currently control when it prints, for now, just print when degugging */
2337 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2343 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2344 *enercorr, spres, svir);
2348 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2352 if (fr->efep != efepNO)
2354 *dvdlcorr += dvdlambda;
2359 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2360 t_graph *graph, rvec x[])
2364 fprintf(fplog, "Removing pbc first time\n");
2366 calc_shifts(box, fr->shift_vec);
2369 mk_mshift(fplog, graph, fr->ePBC, box, x);
2372 p_graph(debug, "do_pbc_first 1", graph);
2374 shift_self(graph, box, x);
2375 /* By doing an extra mk_mshift the molecules that are broken
2376 * because they were e.g. imported from another software
2377 * will be made whole again. Such are the healing powers
2380 mk_mshift(fplog, graph, fr->ePBC, box, x);
2383 p_graph(debug, "do_pbc_first 2", graph);
2388 fprintf(fplog, "Done rmpbc\n");
2392 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2393 const gmx_mtop_t *mtop, rvec x[],
2398 gmx_molblock_t *molb;
2400 if (bFirst && fplog)
2402 fprintf(fplog, "Removing pbc first time\n");
2407 for (mb = 0; mb < mtop->nmolblock; mb++)
2409 molb = &mtop->molblock[mb];
2410 if (molb->natoms_mol == 1 ||
2411 (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2413 /* Just one atom or charge group in the molecule, no PBC required */
2414 as += molb->nmol*molb->natoms_mol;
2418 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2419 mk_graph_ilist(nullptr, mtop->moltype[molb->type].ilist,
2420 0, molb->natoms_mol, FALSE, FALSE, graph);
2422 for (mol = 0; mol < molb->nmol; mol++)
2424 mk_mshift(fplog, graph, ePBC, box, x+as);
2426 shift_self(graph, box, x+as);
2427 /* The molecule is whole now.
2428 * We don't need the second mk_mshift call as in do_pbc_first,
2429 * since we no longer need this graph.
2432 as += molb->natoms_mol;
2440 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2441 const gmx_mtop_t *mtop, rvec x[])
2443 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2446 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2447 gmx_mtop_t *mtop, rvec x[])
2449 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2452 void put_atoms_in_box_omp(int ePBC, const matrix box, int natoms, rvec x[])
2455 nth = gmx_omp_nthreads_get(emntDefault);
2457 #pragma omp parallel for num_threads(nth) schedule(static)
2458 for (t = 0; t < nth; t++)
2464 offset = (natoms*t )/nth;
2465 len = (natoms*(t + 1))/nth - offset;
2466 put_atoms_in_box(ePBC, box, len, x + offset);
2468 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2472 // TODO This can be cleaned up a lot, and move back to runner.cpp
2473 void finish_run(FILE *fplog, const gmx::MDLogger &mdlog, t_commrec *cr,
2474 t_inputrec *inputrec,
2475 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2476 gmx_walltime_accounting_t walltime_accounting,
2477 nonbonded_verlet_t *nbv,
2478 gmx_bool bWriteStat)
2480 t_nrnb *nrnb_tot = nullptr;
2482 double nbfs = 0, mflop = 0;
2483 double elapsed_time,
2484 elapsed_time_over_all_ranks,
2485 elapsed_time_over_all_threads,
2486 elapsed_time_over_all_threads_over_all_ranks;
2488 if (!walltime_accounting_get_valid_finish(walltime_accounting))
2490 GMX_LOG(mdlog.warning).asParagraph().appendText("Simulation ended prematurely, no performance report will be written.");
2498 MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2499 cr->mpi_comm_mysim);
2507 elapsed_time = walltime_accounting_get_elapsed_time(walltime_accounting);
2508 elapsed_time_over_all_ranks = elapsed_time;
2509 elapsed_time_over_all_threads = walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting);
2510 elapsed_time_over_all_threads_over_all_ranks = elapsed_time_over_all_threads;
2514 /* reduce elapsed_time over all MPI ranks in the current simulation */
2515 MPI_Allreduce(&elapsed_time,
2516 &elapsed_time_over_all_ranks,
2517 1, MPI_DOUBLE, MPI_SUM,
2518 cr->mpi_comm_mysim);
2519 elapsed_time_over_all_ranks /= cr->nnodes;
2520 /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2521 * current simulation. */
2522 MPI_Allreduce(&elapsed_time_over_all_threads,
2523 &elapsed_time_over_all_threads_over_all_ranks,
2524 1, MPI_DOUBLE, MPI_SUM,
2525 cr->mpi_comm_mysim);
2531 print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2538 if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2540 print_dd_statistics(cr, inputrec, fplog);
2543 /* TODO Move the responsibility for any scaling by thread counts
2544 * to the code that handled the thread region, so that there's a
2545 * mechanism to keep cycle counting working during the transition
2546 * to task parallelism. */
2547 int nthreads_pp = gmx_omp_nthreads_get(emntNonbonded);
2548 int nthreads_pme = gmx_omp_nthreads_get(emntPME);
2549 wallcycle_scale_by_num_threads(wcycle, cr->duty == DUTY_PME, nthreads_pp, nthreads_pme);
2550 auto cycle_sum(wallcycle_sum(cr, wcycle));
2554 struct gmx_wallclock_gpu_t* gputimes = use_GPU(nbv) ? nbnxn_gpu_get_timings(nbv->gpu_nbv) : nullptr;
2556 wallcycle_print(fplog, mdlog, cr->nnodes, cr->npmenodes, nthreads_pp, nthreads_pme,
2557 elapsed_time_over_all_ranks,
2558 wcycle, cycle_sum, gputimes);
2560 if (EI_DYNAMICS(inputrec->eI))
2562 delta_t = inputrec->delta_t;
2567 print_perf(fplog, elapsed_time_over_all_threads_over_all_ranks,
2568 elapsed_time_over_all_ranks,
2569 walltime_accounting_get_nsteps_done(walltime_accounting),
2570 delta_t, nbfs, mflop);
2574 print_perf(stderr, elapsed_time_over_all_threads_over_all_ranks,
2575 elapsed_time_over_all_ranks,
2576 walltime_accounting_get_nsteps_done(walltime_accounting),
2577 delta_t, nbfs, mflop);
2582 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, gmx::ArrayRef<real> lambda, double *lam0)
2584 /* this function works, but could probably use a logic rewrite to keep all the different
2585 types of efep straight. */
2587 if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2592 t_lambda *fep = ir->fepvals;
2593 *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
2594 if checkpoint is set -- a kludge is in for now
2597 for (int i = 0; i < efptNR; i++)
2599 /* overwrite lambda state with init_lambda for now for backwards compatibility */
2600 if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2602 lambda[i] = fep->init_lambda;
2605 lam0[i] = lambda[i];
2610 lambda[i] = fep->all_lambda[i][*fep_state];
2613 lam0[i] = lambda[i];
2619 /* need to rescale control temperatures to match current state */
2620 for (int i = 0; i < ir->opts.ngtc; i++)
2622 if (ir->opts.ref_t[i] > 0)
2624 ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2629 /* Send to the log the information on the current lambdas */
2630 if (fplog != nullptr)
2632 fprintf(fplog, "Initial vector of lambda components:[ ");
2633 for (const auto &l : lambda)
2635 fprintf(fplog, "%10.4f ", l);
2637 fprintf(fplog, "]\n");
2643 void init_md(FILE *fplog,
2644 t_commrec *cr, gmx::IMDOutputProvider *outputProvider,
2645 t_inputrec *ir, const gmx_output_env_t *oenv,
2646 double *t, double *t0,
2647 gmx::ArrayRef<real> lambda, int *fep_state, double *lam0,
2648 t_nrnb *nrnb, gmx_mtop_t *mtop,
2650 int nfile, const t_filenm fnm[],
2651 gmx_mdoutf_t *outf, t_mdebin **mdebin,
2652 tensor force_vir, tensor shake_vir, rvec mu_tot,
2653 gmx_bool *bSimAnn, t_vcm **vcm, unsigned long Flags,
2654 gmx_wallcycle_t wcycle)
2658 /* Initial values */
2659 *t = *t0 = ir->init_t;
2662 for (i = 0; i < ir->opts.ngtc; i++)
2664 /* set bSimAnn if any group is being annealed */
2665 if (ir->opts.annealing[i] != eannNO)
2671 /* Initialize lambda variables */
2672 initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2674 // TODO upd is never NULL in practice, but the analysers don't know that
2677 *upd = init_update(ir);
2681 update_annealing_target_temp(ir, ir->init_t, upd ? *upd : nullptr);
2686 *vcm = init_vcm(fplog, &mtop->groups, ir);
2689 if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2691 if (ir->etc == etcBERENDSEN)
2693 please_cite(fplog, "Berendsen84a");
2695 if (ir->etc == etcVRESCALE)
2697 please_cite(fplog, "Bussi2007a");
2699 if (ir->eI == eiSD1)
2701 please_cite(fplog, "Goga2012");
2708 *outf = init_mdoutf(fplog, nfile, fnm, Flags, cr, outputProvider, ir, mtop, oenv, wcycle);
2710 *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? nullptr : mdoutf_get_fp_ene(*outf),
2711 mtop, ir, mdoutf_get_fp_dhdl(*outf));
2714 /* Initiate variables */
2715 clear_mat(force_vir);
2716 clear_mat(shake_vir);