2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
52 #include "gromacs/domdec/domdec.h"
53 #include "gromacs/domdec/domdec_struct.h"
54 #include "gromacs/essentialdynamics/edsam.h"
55 #include "gromacs/ewald/pme.h"
56 #include "gromacs/gmxlib/chargegroup.h"
57 #include "gromacs/gmxlib/network.h"
58 #include "gromacs/gmxlib/nrnb.h"
59 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
60 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
61 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
62 #include "gromacs/imd/imd.h"
63 #include "gromacs/listed-forces/bonded.h"
64 #include "gromacs/listed-forces/disre.h"
65 #include "gromacs/listed-forces/orires.h"
66 #include "gromacs/math/functions.h"
67 #include "gromacs/math/units.h"
68 #include "gromacs/math/vec.h"
69 #include "gromacs/math/vecdump.h"
70 #include "gromacs/mdlib/calcmu.h"
71 #include "gromacs/mdlib/constr.h"
72 #include "gromacs/mdlib/force.h"
73 #include "gromacs/mdlib/forcerec.h"
74 #include "gromacs/mdlib/genborn.h"
75 #include "gromacs/mdlib/gmx_omp_nthreads.h"
76 #include "gromacs/mdlib/mdrun.h"
77 #include "gromacs/mdlib/nb_verlet.h"
78 #include "gromacs/mdlib/nbnxn_atomdata.h"
79 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
80 #include "gromacs/mdlib/nbnxn_grid.h"
81 #include "gromacs/mdlib/nbnxn_search.h"
82 #include "gromacs/mdlib/qmmm.h"
83 #include "gromacs/mdlib/update.h"
84 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
85 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref.h"
86 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
87 #include "gromacs/mdlib/nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
88 #include "gromacs/mdtypes/commrec.h"
89 #include "gromacs/mdtypes/inputrec.h"
90 #include "gromacs/mdtypes/md_enums.h"
91 #include "gromacs/mdtypes/state.h"
92 #include "gromacs/pbcutil/ishift.h"
93 #include "gromacs/pbcutil/mshift.h"
94 #include "gromacs/pbcutil/pbc.h"
95 #include "gromacs/pulling/pull.h"
96 #include "gromacs/pulling/pull_rotation.h"
97 #include "gromacs/timing/cyclecounter.h"
98 #include "gromacs/timing/gpu_timing.h"
99 #include "gromacs/timing/wallcycle.h"
100 #include "gromacs/timing/wallcyclereporting.h"
101 #include "gromacs/timing/walltime_accounting.h"
102 #include "gromacs/utility/arrayref.h"
103 #include "gromacs/utility/basedefinitions.h"
104 #include "gromacs/utility/cstringutil.h"
105 #include "gromacs/utility/exceptions.h"
106 #include "gromacs/utility/fatalerror.h"
107 #include "gromacs/utility/gmxassert.h"
108 #include "gromacs/utility/gmxmpi.h"
109 #include "gromacs/utility/pleasecite.h"
110 #include "gromacs/utility/smalloc.h"
111 #include "gromacs/utility/sysinfo.h"
113 #include "nbnxn_gpu.h"
115 void print_time(FILE *out,
116 gmx_walltime_accounting_t walltime_accounting,
119 t_commrec gmx_unused *cr)
122 char timebuf[STRLEN];
123 double dt, elapsed_seconds, time_per_step;
132 fprintf(out, "step %s", gmx_step_str(step, buf));
135 if ((step >= ir->nstlist))
137 double seconds_since_epoch = gmx_gettime();
138 elapsed_seconds = seconds_since_epoch - walltime_accounting_get_start_time_stamp(walltime_accounting);
139 time_per_step = elapsed_seconds/(step - ir->init_step + 1);
140 dt = (ir->nsteps + ir->init_step - step) * time_per_step;
146 finish = (time_t) (seconds_since_epoch + dt);
147 gmx_ctime_r(&finish, timebuf, STRLEN);
148 sprintf(buf, "%s", timebuf);
149 buf[strlen(buf)-1] = '\0';
150 fprintf(out, ", will finish %s", buf);
154 fprintf(out, ", remaining wall clock time: %5d s ", (int)dt);
159 fprintf(out, " performance: %.1f ns/day ",
160 ir->delta_t/1000*24*60*60/time_per_step);
173 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
176 char time_string[STRLEN];
185 char timebuf[STRLEN];
186 time_t temp_time = (time_t) the_time;
188 gmx_ctime_r(&temp_time, timebuf, STRLEN);
189 for (i = 0; timebuf[i] >= ' '; i++)
191 time_string[i] = timebuf[i];
193 time_string[i] = '\0';
196 fprintf(fplog, "%s on rank %d %s\n", title, nodeid, time_string);
199 void print_start(FILE *fplog, t_commrec *cr,
200 gmx_walltime_accounting_t walltime_accounting,
205 sprintf(buf, "Started %s", name);
206 print_date_and_time(fplog, cr->nodeid, buf,
207 walltime_accounting_get_start_time_stamp(walltime_accounting));
210 static void sum_forces(rvec f[], const PaddedRVecVector *forceToAdd)
212 /* TODO: remove this - 1 when padding is properly implemented */
213 int end = forceToAdd->size() - 1;
214 const rvec *fAdd = as_rvec_array(forceToAdd->data());
216 // cppcheck-suppress unreadVariable
217 int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
218 #pragma omp parallel for num_threads(nt) schedule(static)
219 for (int i = 0; i < end; i++)
221 rvec_inc(f[i], fAdd[i]);
225 static void calc_virial(int start, int homenr, rvec x[], rvec f[],
226 tensor vir_part, t_graph *graph, matrix box,
227 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
231 /* The short-range virial from surrounding boxes */
233 calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
234 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
236 /* Calculate partial virial, for local atoms only, based on short range.
237 * Total virial is computed in global_stat, called from do_md
239 f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
240 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
242 /* Add position restraint contribution */
243 for (i = 0; i < DIM; i++)
245 vir_part[i][i] += fr->vir_diag_posres[i];
248 /* Add wall contribution */
249 for (i = 0; i < DIM; i++)
251 vir_part[i][ZZ] += fr->vir_wall_z[i];
256 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
260 static void pull_potential_wrapper(t_commrec *cr,
262 matrix box, rvec x[],
266 gmx_enerdata_t *enerd,
269 gmx_wallcycle_t wcycle)
274 /* Calculate the center of mass forces, this requires communication,
275 * which is why pull_potential is called close to other communication.
276 * The virial contribution is calculated directly,
277 * which is why we call pull_potential after calc_virial.
279 wallcycle_start(wcycle, ewcPULLPOT);
280 set_pbc(&pbc, ir->ePBC, box);
282 enerd->term[F_COM_PULL] +=
283 pull_potential(ir->pull_work, mdatoms, &pbc,
284 cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
285 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
286 wallcycle_stop(wcycle, ewcPULLPOT);
289 static void pme_receive_force_ener(t_commrec *cr,
290 gmx_wallcycle_t wcycle,
291 gmx_enerdata_t *enerd,
294 real e_q, e_lj, dvdl_q, dvdl_lj;
295 float cycles_ppdpme, cycles_seppme;
297 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
298 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
300 /* In case of node-splitting, the PP nodes receive the long-range
301 * forces, virial and energy from the PME nodes here.
303 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
306 gmx_pme_receive_f(cr, as_rvec_array(fr->f_novirsum->data()), fr->vir_el_recip, &e_q,
307 fr->vir_lj_recip, &e_lj, &dvdl_q, &dvdl_lj,
309 enerd->term[F_COUL_RECIP] += e_q;
310 enerd->term[F_LJ_RECIP] += e_lj;
311 enerd->dvdl_lin[efptCOUL] += dvdl_q;
312 enerd->dvdl_lin[efptVDW] += dvdl_lj;
316 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
318 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
321 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
322 gmx_int64_t step, real forceTolerance,
323 const rvec *x, const rvec *f)
325 real force2Tolerance = gmx::square(forceTolerance);
326 std::uintmax_t numNonFinite = 0;
327 for (int i = 0; i < md->homenr; i++)
329 real force2 = norm2(f[i]);
330 bool nonFinite = !std::isfinite(force2);
331 if (force2 >= force2Tolerance || nonFinite)
333 fprintf(fp, "step %" GMX_PRId64 " atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
335 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(force2));
342 if (numNonFinite > 0)
344 /* Note that with MPI this fatal call on one rank might interrupt
345 * the printing on other ranks. But we can only avoid that with
346 * an expensive MPI barrier that we would need at each step.
348 gmx_fatal(FARGS, "At step %" GMX_PRId64 " detected non-finite forces on %ju atoms", step, numNonFinite);
352 static void post_process_forces(t_commrec *cr,
354 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
356 matrix box, rvec x[],
361 t_forcerec *fr, gmx_vsite_t *vsite,
368 /* Spread the mesh force on virtual sites to the other particles...
369 * This is parallellized. MPI communication is performed
370 * if the constructing atoms aren't local.
372 wallcycle_start(wcycle, ewcVSITESPREAD);
373 spread_vsite_f(vsite, x, as_rvec_array(fr->f_novirsum->data()), nullptr,
374 (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
376 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
377 wallcycle_stop(wcycle, ewcVSITESPREAD);
379 if (flags & GMX_FORCE_VIRIAL)
381 /* Now add the forces, this is local */
382 sum_forces(f, fr->f_novirsum);
384 if (EEL_FULL(fr->eeltype))
386 /* Add the mesh contribution to the virial */
387 m_add(vir_force, fr->vir_el_recip, vir_force);
389 if (EVDW_PME(fr->vdwtype))
391 /* Add the mesh contribution to the virial */
392 m_add(vir_force, fr->vir_lj_recip, vir_force);
396 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
401 if (fr->print_force >= 0)
403 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
407 static void do_nb_verlet(t_forcerec *fr,
408 interaction_const_t *ic,
409 gmx_enerdata_t *enerd,
410 int flags, int ilocality,
413 gmx_wallcycle_t wcycle)
415 int enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
416 nonbonded_verlet_group_t *nbvg;
417 gmx_bool bUsingGpuKernels;
419 if (!(flags & GMX_FORCE_NONBONDED))
421 /* skip non-bonded calculation */
425 nbvg = &fr->nbv->grp[ilocality];
427 /* GPU kernel launch overhead is already timed separately */
428 if (fr->cutoff_scheme != ecutsVERLET)
430 gmx_incons("Invalid cut-off scheme passed!");
433 bUsingGpuKernels = (nbvg->kernel_type == nbnxnk8x8x8_GPU);
435 if (!bUsingGpuKernels)
437 wallcycle_sub_start(wcycle, ewcsNONBONDED);
439 switch (nbvg->kernel_type)
441 case nbnxnk4x4_PlainC:
442 nbnxn_kernel_ref(&nbvg->nbl_lists,
448 enerd->grpp.ener[egCOULSR],
450 enerd->grpp.ener[egBHAMSR] :
451 enerd->grpp.ener[egLJSR]);
454 case nbnxnk4xN_SIMD_4xN:
455 nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
462 enerd->grpp.ener[egCOULSR],
464 enerd->grpp.ener[egBHAMSR] :
465 enerd->grpp.ener[egLJSR]);
467 case nbnxnk4xN_SIMD_2xNN:
468 nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
475 enerd->grpp.ener[egCOULSR],
477 enerd->grpp.ener[egBHAMSR] :
478 enerd->grpp.ener[egLJSR]);
481 case nbnxnk8x8x8_GPU:
482 nbnxn_gpu_launch_kernel(fr->nbv->gpu_nbv, nbvg->nbat, flags, ilocality);
485 case nbnxnk8x8x8_PlainC:
486 nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
491 nbvg->nbat->out[0].f,
493 enerd->grpp.ener[egCOULSR],
495 enerd->grpp.ener[egBHAMSR] :
496 enerd->grpp.ener[egLJSR]);
500 gmx_incons("Invalid nonbonded kernel type passed!");
503 if (!bUsingGpuKernels)
505 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
508 if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
510 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
512 else if ((!bUsingGpuKernels && nbvg->ewald_excl == ewaldexclAnalytical) ||
513 (bUsingGpuKernels && nbnxn_gpu_is_kernel_ewald_analytical(fr->nbv->gpu_nbv)))
515 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
519 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
521 enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
522 if (flags & GMX_FORCE_ENERGY)
524 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
525 enr_nbnxn_kernel_ljc += 1;
526 enr_nbnxn_kernel_lj += 1;
529 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
530 nbvg->nbl_lists.natpair_ljq);
531 inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
532 nbvg->nbl_lists.natpair_lj);
533 /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
534 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
535 nbvg->nbl_lists.natpair_q);
537 if (ic->vdw_modifier == eintmodFORCESWITCH)
539 /* We add up the switch cost separately */
540 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_FSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
541 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
543 if (ic->vdw_modifier == eintmodPOTSWITCH)
545 /* We add up the switch cost separately */
546 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_PSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
547 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
549 if (ic->vdwtype == evdwPME)
551 /* We add up the LJ Ewald cost separately */
552 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_EWALD+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
553 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
557 static void do_nb_verlet_fep(nbnxn_pairlist_set_t *nbl_lists,
564 gmx_enerdata_t *enerd,
567 gmx_wallcycle_t wcycle)
570 nb_kernel_data_t kernel_data;
572 real dvdl_nb[efptNR];
577 /* Add short-range interactions */
578 donb_flags |= GMX_NONBONDED_DO_SR;
580 /* Currently all group scheme kernels always calculate (shift-)forces */
581 if (flags & GMX_FORCE_FORCES)
583 donb_flags |= GMX_NONBONDED_DO_FORCE;
585 if (flags & GMX_FORCE_VIRIAL)
587 donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
589 if (flags & GMX_FORCE_ENERGY)
591 donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
594 kernel_data.flags = donb_flags;
595 kernel_data.lambda = lambda;
596 kernel_data.dvdl = dvdl_nb;
598 kernel_data.energygrp_elec = enerd->grpp.ener[egCOULSR];
599 kernel_data.energygrp_vdw = enerd->grpp.ener[egLJSR];
601 /* reset free energy components */
602 for (i = 0; i < efptNR; i++)
607 assert(gmx_omp_nthreads_get(emntNonbonded) == nbl_lists->nnbl);
609 wallcycle_sub_start(wcycle, ewcsNONBONDED);
610 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
611 for (th = 0; th < nbl_lists->nnbl; th++)
615 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
616 x, f, fr, mdatoms, &kernel_data, nrnb);
618 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
621 if (fepvals->sc_alpha != 0)
623 enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
624 enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
628 enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
629 enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
632 /* If we do foreign lambda and we have soft-core interactions
633 * we have to recalculate the (non-linear) energies contributions.
635 if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
637 kernel_data.flags = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE)) | GMX_NONBONDED_DO_FOREIGNLAMBDA;
638 kernel_data.lambda = lam_i;
639 kernel_data.energygrp_elec = enerd->foreign_grpp.ener[egCOULSR];
640 kernel_data.energygrp_vdw = enerd->foreign_grpp.ener[egLJSR];
641 /* Note that we add to kernel_data.dvdl, but ignore the result */
643 for (i = 0; i < enerd->n_lambda; i++)
645 for (j = 0; j < efptNR; j++)
647 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
649 reset_foreign_enerdata(enerd);
650 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
651 for (th = 0; th < nbl_lists->nnbl; th++)
655 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
656 x, f, fr, mdatoms, &kernel_data, nrnb);
658 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
661 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
662 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
666 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
669 gmx_bool use_GPU(const nonbonded_verlet_t *nbv)
671 return nbv != nullptr && nbv->bUseGPU;
674 static gmx_inline void clear_rvecs_omp(int n, rvec v[])
676 int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, n);
678 /* Note that we would like to avoid this conditional by putting it
679 * into the omp pragma instead, but then we still take the full
680 * omp parallel for overhead (at least with gcc5).
684 for (int i = 0; i < n; i++)
691 #pragma omp parallel for num_threads(nth) schedule(static)
692 for (int i = 0; i < n; i++)
699 /*! \brief This routine checks if the potential energy is finite.
701 * Note that passing this check does not guarantee finite forces,
702 * since those use slightly different arithmetics. But in most cases
703 * there is just a narrow coordinate range where forces are not finite
704 * and energies are finite.
706 * \param[in] enerd The energy data; the non-bonded group energies need to be added in here before calling this routine
708 static void checkPotentialEnergyValidity(const gmx_enerdata_t *enerd)
710 if (!std::isfinite(enerd->term[F_EPOT]))
712 gmx_fatal(FARGS, "The total potential energy is %g, which is not finite. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A non-finite potential energy can be caused by overlapping interactions in bonded interactions or very large or NaN coordinate values. Usually this is caused by a badly or non-equilibrated initial configuration or incorrect interactions or parameters in the topology.",
715 enerd->term[F_COUL_SR]);
719 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
720 t_inputrec *inputrec,
721 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
723 gmx_groups_t gmx_unused *groups,
724 matrix box, rvec x[], history_t *hist,
725 PaddedRVecVector *force,
728 gmx_enerdata_t *enerd, t_fcdata *fcd,
729 real *lambda, t_graph *graph,
730 t_forcerec *fr, interaction_const_t *ic,
731 gmx_vsite_t *vsite, rvec mu_tot,
732 double t, gmx_edsam_t ed,
738 gmx_bool bStateChanged, bNS, bFillGrid, bCalcCGCM;
739 gmx_bool bDoForces, bUseGPU, bUseOrEmulGPU;
740 gmx_bool bDiffKernels = FALSE;
741 rvec vzero, box_diag;
742 float cycles_pme, cycles_force, cycles_wait_gpu;
743 /* TODO To avoid loss of precision, float can't be used for a
744 * cycle count. Build an object that can do this right and perhaps
745 * also be used by gmx_wallcycle_t */
746 gmx_cycles_t cycleCountBeforeLocalWorkCompletes = 0;
747 nonbonded_verlet_t *nbv;
754 const int homenr = mdatoms->homenr;
756 clear_mat(vir_force);
758 if (DOMAINDECOMP(cr))
760 cg1 = cr->dd->ncg_tot;
771 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
772 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
773 bFillGrid = (bNS && bStateChanged);
774 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
775 bDoForces = (flags & GMX_FORCE_FORCES);
776 bUseGPU = fr->nbv->bUseGPU;
777 bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
781 update_forcerec(fr, box);
783 if (inputrecNeedMutot(inputrec))
785 /* Calculate total (local) dipole moment in a temporary common array.
786 * This makes it possible to sum them over nodes faster.
788 calc_mu(start, homenr,
789 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
794 if (fr->ePBC != epbcNONE)
796 /* Compute shift vectors every step,
797 * because of pressure coupling or box deformation!
799 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
801 calc_shifts(box, fr->shift_vec);
806 put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
807 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
809 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
811 unshift_self(graph, box, x);
815 nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
816 fr->shift_vec, nbv->grp[0].nbat);
819 if (!(cr->duty & DUTY_PME))
824 /* Send particle coordinates to the pme nodes.
825 * Since this is only implemented for domain decomposition
826 * and domain decomposition does not use the graph,
827 * we do not need to worry about shifting.
830 wallcycle_start(wcycle, ewcPP_PMESENDX);
832 bBS = (inputrec->nwall == 2);
836 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
839 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
840 lambda[efptCOUL], lambda[efptVDW],
841 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
844 wallcycle_stop(wcycle, ewcPP_PMESENDX);
848 /* do gridding for pair search */
851 if (graph && bStateChanged)
853 /* Calculate intramolecular shift vectors to make molecules whole */
854 mk_mshift(fplog, graph, fr->ePBC, box, x);
858 box_diag[XX] = box[XX][XX];
859 box_diag[YY] = box[YY][YY];
860 box_diag[ZZ] = box[ZZ][ZZ];
862 wallcycle_start(wcycle, ewcNS);
865 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
866 nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
868 0, mdatoms->homenr, -1, fr->cginfo, x,
870 nbv->grp[eintLocal].kernel_type,
871 nbv->grp[eintLocal].nbat);
872 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
876 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
877 nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
879 nbv->grp[eintNonlocal].kernel_type,
880 nbv->grp[eintNonlocal].nbat);
881 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
884 if (nbv->ngrp == 1 ||
885 nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
887 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
888 nbv->nbs, mdatoms, fr->cginfo);
892 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
893 nbv->nbs, mdatoms, fr->cginfo);
894 nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
895 nbv->nbs, mdatoms, fr->cginfo);
897 wallcycle_stop(wcycle, ewcNS);
900 /* initialize the GPU atom data and copy shift vector */
905 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
906 nbnxn_gpu_init_atomdata(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
907 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
910 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
911 nbnxn_gpu_upload_shiftvec(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
912 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
915 /* do local pair search */
918 wallcycle_start_nocount(wcycle, ewcNS);
919 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
920 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
923 nbv->min_ci_balanced,
924 &nbv->grp[eintLocal].nbl_lists,
926 nbv->grp[eintLocal].kernel_type,
928 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
932 /* initialize local pair-list on the GPU */
933 nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
934 nbv->grp[eintLocal].nbl_lists.nbl[0],
937 wallcycle_stop(wcycle, ewcNS);
941 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
942 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
943 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
944 nbv->grp[eintLocal].nbat);
945 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
946 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
951 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
952 /* launch local nonbonded F on GPU */
953 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
955 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
958 /* Communicate coordinates and sum dipole if necessary +
959 do non-local pair search */
960 if (DOMAINDECOMP(cr))
962 bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
963 nbv->grp[eintLocal].kernel_type);
967 /* With GPU+CPU non-bonded calculations we need to copy
968 * the local coordinates to the non-local nbat struct
969 * (in CPU format) as the non-local kernel call also
970 * calculates the local - non-local interactions.
972 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
973 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
974 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
975 nbv->grp[eintNonlocal].nbat);
976 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
977 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
982 wallcycle_start_nocount(wcycle, ewcNS);
983 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
987 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
990 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
993 nbv->min_ci_balanced,
994 &nbv->grp[eintNonlocal].nbl_lists,
996 nbv->grp[eintNonlocal].kernel_type,
999 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1001 if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_GPU)
1003 /* initialize non-local pair-list on the GPU */
1004 nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
1005 nbv->grp[eintNonlocal].nbl_lists.nbl[0],
1008 wallcycle_stop(wcycle, ewcNS);
1012 wallcycle_start(wcycle, ewcMOVEX);
1013 dd_move_x(cr->dd, box, x);
1014 wallcycle_stop(wcycle, ewcMOVEX);
1016 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1017 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1018 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
1019 nbv->grp[eintNonlocal].nbat);
1020 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1021 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1024 if (bUseGPU && !bDiffKernels)
1026 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1027 /* launch non-local nonbonded F on GPU */
1028 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
1030 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1036 /* launch D2H copy-back F */
1037 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1038 if (DOMAINDECOMP(cr) && !bDiffKernels)
1040 nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintNonlocal].nbat,
1041 flags, eatNonlocal);
1043 nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintLocal].nbat,
1045 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1048 if (bStateChanged && inputrecNeedMutot(inputrec))
1052 gmx_sumd(2*DIM, mu, cr);
1055 for (i = 0; i < 2; i++)
1057 for (j = 0; j < DIM; j++)
1059 fr->mu_tot[i][j] = mu[i*DIM + j];
1063 if (fr->efep == efepNO)
1065 copy_rvec(fr->mu_tot[0], mu_tot);
1069 for (j = 0; j < DIM; j++)
1072 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1073 lambda[efptCOUL]*fr->mu_tot[1][j];
1077 /* Reset energies */
1078 reset_enerdata(enerd);
1079 clear_rvecs(SHIFTS, fr->fshift);
1081 if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1083 wallcycle_start(wcycle, ewcPPDURINGPME);
1084 dd_force_flop_start(cr->dd, nrnb);
1089 /* Enforced rotation has its own cycle counter that starts after the collective
1090 * coordinates have been communicated. It is added to ddCyclF to allow
1091 * for proper load-balancing */
1092 wallcycle_start(wcycle, ewcROT);
1093 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1094 wallcycle_stop(wcycle, ewcROT);
1097 /* Temporary solution until all routines take PaddedRVecVector */
1098 rvec *f = as_rvec_array(force->data());
1100 /* Start the force cycle counter.
1101 * This counter is stopped after do_force_lowlevel.
1102 * No parallel communication should occur while this counter is running,
1103 * since that will interfere with the dynamic load balancing.
1105 wallcycle_start(wcycle, ewcFORCE);
1108 /* Reset forces for which the virial is calculated separately:
1109 * PME/Ewald forces if necessary */
1110 if (fr->bF_NoVirSum)
1112 if (flags & GMX_FORCE_VIRIAL)
1114 fr->f_novirsum = fr->forceBufferNoVirialSummation;
1118 /* We are not calculating the pressure so we do not need
1119 * a separate array for forces that do not contribute
1122 fr->f_novirsum = force;
1126 if (fr->bF_NoVirSum)
1128 if (flags & GMX_FORCE_VIRIAL)
1130 /* TODO: remove this - 1 when padding is properly implemented */
1131 clear_rvecs_omp(fr->f_novirsum->size() - 1,
1132 as_rvec_array(fr->f_novirsum->data()));
1135 /* Clear the short- and long-range forces */
1136 clear_rvecs_omp(fr->natoms_force_constr, f);
1138 clear_rvec(fr->vir_diag_posres);
1141 if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1143 clear_pull_forces(inputrec->pull_work);
1146 /* We calculate the non-bonded forces, when done on the CPU, here.
1147 * We do this before calling do_force_lowlevel, because in that
1148 * function, the listed forces are calculated before PME, which
1149 * does communication. With this order, non-bonded and listed
1150 * force calculation imbalance can be balanced out by the domain
1151 * decomposition load balancing.
1156 /* Maybe we should move this into do_force_lowlevel */
1157 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1161 if (fr->efep != efepNO)
1163 /* Calculate the local and non-local free energy interactions here.
1164 * Happens here on the CPU both with and without GPU.
1166 if (fr->nbv->grp[eintLocal].nbl_lists.nbl_fep[0]->nrj > 0)
1168 do_nb_verlet_fep(&fr->nbv->grp[eintLocal].nbl_lists,
1170 inputrec->fepvals, lambda,
1171 enerd, flags, nrnb, wcycle);
1174 if (DOMAINDECOMP(cr) &&
1175 fr->nbv->grp[eintNonlocal].nbl_lists.nbl_fep[0]->nrj > 0)
1177 do_nb_verlet_fep(&fr->nbv->grp[eintNonlocal].nbl_lists,
1179 inputrec->fepvals, lambda,
1180 enerd, flags, nrnb, wcycle);
1184 if (!bUseOrEmulGPU || bDiffKernels)
1188 if (DOMAINDECOMP(cr))
1190 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1191 bDiffKernels ? enbvClearFYes : enbvClearFNo,
1201 aloc = eintNonlocal;
1204 /* Add all the non-bonded force to the normal force array.
1205 * This can be split into a local and a non-local part when overlapping
1206 * communication with calculation with domain decomposition.
1208 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1209 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1210 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1211 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1212 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1213 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1214 wallcycle_start_nocount(wcycle, ewcFORCE);
1216 /* if there are multiple fshift output buffers reduce them */
1217 if ((flags & GMX_FORCE_VIRIAL) &&
1218 nbv->grp[aloc].nbl_lists.nnbl > 1)
1220 /* This is not in a subcounter because it takes a
1221 negligible and constant-sized amount of time */
1222 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1227 /* update QMMMrec, if necessary */
1230 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1233 /* Compute the bonded and non-bonded energies and optionally forces */
1234 do_force_lowlevel(fr, inputrec, &(top->idef),
1235 cr, nrnb, wcycle, mdatoms,
1236 x, hist, f, enerd, fcd, top, fr->born,
1238 inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1239 flags, &cycles_pme);
1241 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1245 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1248 if (bUseOrEmulGPU && !bDiffKernels)
1250 /* wait for non-local forces (or calculate in emulation mode) */
1251 if (DOMAINDECOMP(cr))
1257 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1258 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1260 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1262 cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1263 cycles_wait_gpu += cycles_tmp;
1264 cycles_force += cycles_tmp;
1268 wallcycle_start_nocount(wcycle, ewcFORCE);
1269 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1271 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1273 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1274 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1275 /* skip the reduction if there was no non-local work to do */
1276 if (nbv->grp[eintNonlocal].nbl_lists.nbl[0]->nsci > 0)
1278 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1279 nbv->grp[eintNonlocal].nbat, f);
1281 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1282 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1286 if (bDoForces && DOMAINDECOMP(cr))
1290 /* We are done with the CPU compute, but the GPU local non-bonded
1291 * kernel can still be running while we communicate the forces.
1292 * We start a counter here, so we can, hopefully, time the rest
1293 * of the GPU kernel execution and data transfer.
1295 cycleCountBeforeLocalWorkCompletes = gmx_cycles_read();
1298 /* Communicate the forces */
1299 wallcycle_start(wcycle, ewcMOVEF);
1300 dd_move_f(cr->dd, f, fr->fshift);
1301 wallcycle_stop(wcycle, ewcMOVEF);
1306 /* wait for local forces (or calculate in emulation mode) */
1309 float cycles_tmp, cycles_wait_est;
1310 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1311 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1312 * but even with a step of 0.1 ms the difference is less than 1%
1315 const float gpuWaitApiOverheadMargin = 2e6f; /* cycles */
1317 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1318 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1320 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1322 cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1324 if (bDoForces && DOMAINDECOMP(cr))
1326 cycles_wait_est = gmx_cycles_read() - cycleCountBeforeLocalWorkCompletes;
1328 if (cycles_tmp < gpuWaitApiOverheadMargin)
1330 /* We measured few cycles, it could be that the kernel
1331 * and transfer finished earlier and there was no actual
1332 * wait time, only API call overhead.
1333 * Then the actual time could be anywhere between 0 and
1334 * cycles_wait_est. As a compromise, we use half the time.
1336 cycles_wait_est *= 0.5f;
1341 /* No force communication so we actually timed the wait */
1342 cycles_wait_est = cycles_tmp;
1344 /* Even though this is after dd_move_f, the actual task we are
1345 * waiting for runs asynchronously with dd_move_f and we usually
1346 * have nothing to balance it with, so we can and should add
1347 * the time to the force time for load balancing.
1349 cycles_force += cycles_wait_est;
1350 cycles_wait_gpu += cycles_wait_est;
1352 /* now clear the GPU outputs while we finish the step on the CPU */
1353 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1354 nbnxn_gpu_clear_outputs(nbv->gpu_nbv, flags);
1355 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1359 wallcycle_start_nocount(wcycle, ewcFORCE);
1360 do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1361 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1363 wallcycle_stop(wcycle, ewcFORCE);
1365 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1366 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1367 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1368 nbv->grp[eintLocal].nbat, f);
1369 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1370 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1373 if (DOMAINDECOMP(cr))
1375 dd_force_flop_stop(cr->dd, nrnb);
1378 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1381 dd_cycles_add(cr->dd, cycles_wait_gpu, ddCyclWaitGPU);
1388 /* Compute forces due to electric field */
1389 if (fr->efield != nullptr)
1391 fr->efield->calculateForces(cr, mdatoms, fr->f_novirsum, t);
1394 /* If we have NoVirSum forces, but we do not calculate the virial,
1395 * we sum fr->f_novirsum=f later.
1397 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1399 wallcycle_start(wcycle, ewcVSITESPREAD);
1400 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, nullptr, nrnb,
1401 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1402 wallcycle_stop(wcycle, ewcVSITESPREAD);
1405 if (flags & GMX_FORCE_VIRIAL)
1407 /* Calculation of the virial must be done after vsites! */
1408 calc_virial(0, mdatoms->homenr, x, f,
1409 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1413 if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1415 /* Since the COM pulling is always done mass-weighted, no forces are
1416 * applied to vsites and this call can be done after vsite spreading.
1418 pull_potential_wrapper(cr, inputrec, box, x,
1419 f, vir_force, mdatoms, enerd, lambda, t,
1423 /* Add the forces from enforced rotation potentials (if any) */
1426 wallcycle_start(wcycle, ewcROTadd);
1427 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1428 wallcycle_stop(wcycle, ewcROTadd);
1431 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1432 IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1434 if (PAR(cr) && !(cr->duty & DUTY_PME))
1436 /* In case of node-splitting, the PP nodes receive the long-range
1437 * forces, virial and energy from the PME nodes here.
1439 pme_receive_force_ener(cr, wcycle, enerd, fr);
1444 post_process_forces(cr, step, nrnb, wcycle,
1445 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1449 if (flags & GMX_FORCE_ENERGY)
1451 /* Sum the potential energy terms from group contributions */
1452 sum_epot(&(enerd->grpp), enerd->term);
1454 if (!EI_TPI(inputrec->eI))
1456 checkPotentialEnergyValidity(enerd);
1461 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1462 t_inputrec *inputrec,
1463 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1464 gmx_localtop_t *top,
1465 gmx_groups_t *groups,
1466 matrix box, rvec x[], history_t *hist,
1467 PaddedRVecVector *force,
1470 gmx_enerdata_t *enerd, t_fcdata *fcd,
1471 real *lambda, t_graph *graph,
1472 t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1473 double t, gmx_edsam_t ed,
1474 gmx_bool bBornRadii,
1479 gmx_bool bStateChanged, bNS, bFillGrid, bCalcCGCM;
1481 float cycles_pme, cycles_force;
1483 const int start = 0;
1484 const int homenr = mdatoms->homenr;
1486 clear_mat(vir_force);
1489 if (DOMAINDECOMP(cr))
1491 cg1 = cr->dd->ncg_tot;
1502 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
1503 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1504 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1505 bFillGrid = (bNS && bStateChanged);
1506 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
1507 bDoForces = (flags & GMX_FORCE_FORCES);
1511 update_forcerec(fr, box);
1513 if (inputrecNeedMutot(inputrec))
1515 /* Calculate total (local) dipole moment in a temporary common array.
1516 * This makes it possible to sum them over nodes faster.
1518 calc_mu(start, homenr,
1519 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1524 if (fr->ePBC != epbcNONE)
1526 /* Compute shift vectors every step,
1527 * because of pressure coupling or box deformation!
1529 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1531 calc_shifts(box, fr->shift_vec);
1536 put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1537 &(top->cgs), x, fr->cg_cm);
1538 inc_nrnb(nrnb, eNR_CGCM, homenr);
1539 inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1541 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1543 unshift_self(graph, box, x);
1548 calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1549 inc_nrnb(nrnb, eNR_CGCM, homenr);
1552 if (bCalcCGCM && gmx_debug_at)
1554 pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1558 if (!(cr->duty & DUTY_PME))
1563 /* Send particle coordinates to the pme nodes.
1564 * Since this is only implemented for domain decomposition
1565 * and domain decomposition does not use the graph,
1566 * we do not need to worry about shifting.
1569 wallcycle_start(wcycle, ewcPP_PMESENDX);
1571 bBS = (inputrec->nwall == 2);
1574 copy_mat(box, boxs);
1575 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1578 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
1579 lambda[efptCOUL], lambda[efptVDW],
1580 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
1583 wallcycle_stop(wcycle, ewcPP_PMESENDX);
1585 #endif /* GMX_MPI */
1587 /* Communicate coordinates and sum dipole if necessary */
1588 if (DOMAINDECOMP(cr))
1590 wallcycle_start(wcycle, ewcMOVEX);
1591 dd_move_x(cr->dd, box, x);
1592 wallcycle_stop(wcycle, ewcMOVEX);
1595 if (inputrecNeedMutot(inputrec))
1601 gmx_sumd(2*DIM, mu, cr);
1603 for (i = 0; i < 2; i++)
1605 for (j = 0; j < DIM; j++)
1607 fr->mu_tot[i][j] = mu[i*DIM + j];
1611 if (fr->efep == efepNO)
1613 copy_rvec(fr->mu_tot[0], mu_tot);
1617 for (j = 0; j < DIM; j++)
1620 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1625 /* Reset energies */
1626 reset_enerdata(enerd);
1627 clear_rvecs(SHIFTS, fr->fshift);
1631 wallcycle_start(wcycle, ewcNS);
1633 if (graph && bStateChanged)
1635 /* Calculate intramolecular shift vectors to make molecules whole */
1636 mk_mshift(fplog, graph, fr->ePBC, box, x);
1639 /* Do the actual neighbour searching */
1641 groups, top, mdatoms,
1642 cr, nrnb, bFillGrid);
1644 wallcycle_stop(wcycle, ewcNS);
1647 if (inputrec->implicit_solvent && bNS)
1649 make_gb_nblist(cr, inputrec->gb_algorithm,
1650 x, box, fr, &top->idef, graph, fr->born);
1653 if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1655 wallcycle_start(wcycle, ewcPPDURINGPME);
1656 dd_force_flop_start(cr->dd, nrnb);
1661 /* Enforced rotation has its own cycle counter that starts after the collective
1662 * coordinates have been communicated. It is added to ddCyclF to allow
1663 * for proper load-balancing */
1664 wallcycle_start(wcycle, ewcROT);
1665 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1666 wallcycle_stop(wcycle, ewcROT);
1669 /* Temporary solution until all routines take PaddedRVecVector */
1670 rvec *f = as_rvec_array(force->data());
1672 /* Start the force cycle counter.
1673 * This counter is stopped after do_force_lowlevel.
1674 * No parallel communication should occur while this counter is running,
1675 * since that will interfere with the dynamic load balancing.
1677 wallcycle_start(wcycle, ewcFORCE);
1681 /* Reset forces for which the virial is calculated separately:
1682 * PME/Ewald forces if necessary */
1683 if (fr->bF_NoVirSum)
1685 if (flags & GMX_FORCE_VIRIAL)
1687 fr->f_novirsum = fr->forceBufferNoVirialSummation;
1688 /* TODO: remove this - 1 when padding is properly implemented */
1689 clear_rvecs(fr->f_novirsum->size() - 1,
1690 as_rvec_array(fr->f_novirsum->data()));
1694 /* We are not calculating the pressure so we do not need
1695 * a separate array for forces that do not contribute
1698 fr->f_novirsum = force;
1702 /* Clear the short- and long-range forces */
1703 clear_rvecs(fr->natoms_force_constr, f);
1705 clear_rvec(fr->vir_diag_posres);
1707 if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1709 clear_pull_forces(inputrec->pull_work);
1712 /* update QMMMrec, if necessary */
1715 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1718 /* Compute the bonded and non-bonded energies and optionally forces */
1719 do_force_lowlevel(fr, inputrec, &(top->idef),
1720 cr, nrnb, wcycle, mdatoms,
1721 x, hist, f, enerd, fcd, top, fr->born,
1723 inputrec->fepvals, lambda,
1724 graph, &(top->excls), fr->mu_tot,
1728 cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1732 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1735 if (DOMAINDECOMP(cr))
1737 dd_force_flop_stop(cr->dd, nrnb);
1740 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1746 /* Compute forces due to electric field */
1747 if (fr->efield != nullptr)
1749 fr->efield->calculateForces(cr, mdatoms, fr->f_novirsum, t);
1752 /* Communicate the forces */
1753 if (DOMAINDECOMP(cr))
1755 wallcycle_start(wcycle, ewcMOVEF);
1756 dd_move_f(cr->dd, f, fr->fshift);
1757 /* Do we need to communicate the separate force array
1758 * for terms that do not contribute to the single sum virial?
1759 * Position restraints and electric fields do not introduce
1760 * inter-cg forces, only full electrostatics methods do.
1761 * When we do not calculate the virial, fr->f_novirsum = f,
1762 * so we have already communicated these forces.
1764 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1765 (flags & GMX_FORCE_VIRIAL))
1767 dd_move_f(cr->dd, as_rvec_array(fr->f_novirsum->data()), nullptr);
1769 wallcycle_stop(wcycle, ewcMOVEF);
1772 /* If we have NoVirSum forces, but we do not calculate the virial,
1773 * we sum fr->f_novirsum=f later.
1775 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1777 wallcycle_start(wcycle, ewcVSITESPREAD);
1778 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, nullptr, nrnb,
1779 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1780 wallcycle_stop(wcycle, ewcVSITESPREAD);
1783 if (flags & GMX_FORCE_VIRIAL)
1785 /* Calculation of the virial must be done after vsites! */
1786 calc_virial(0, mdatoms->homenr, x, f,
1787 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1791 if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1793 pull_potential_wrapper(cr, inputrec, box, x,
1794 f, vir_force, mdatoms, enerd, lambda, t,
1798 /* Add the forces from enforced rotation potentials (if any) */
1801 wallcycle_start(wcycle, ewcROTadd);
1802 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1803 wallcycle_stop(wcycle, ewcROTadd);
1806 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1807 IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1809 if (PAR(cr) && !(cr->duty & DUTY_PME))
1811 /* In case of node-splitting, the PP nodes receive the long-range
1812 * forces, virial and energy from the PME nodes here.
1814 pme_receive_force_ener(cr, wcycle, enerd, fr);
1819 post_process_forces(cr, step, nrnb, wcycle,
1820 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1824 if (flags & GMX_FORCE_ENERGY)
1826 /* Sum the potential energy terms from group contributions */
1827 sum_epot(&(enerd->grpp), enerd->term);
1829 if (!EI_TPI(inputrec->eI))
1831 checkPotentialEnergyValidity(enerd);
1837 void do_force(FILE *fplog, t_commrec *cr,
1838 t_inputrec *inputrec,
1839 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1840 gmx_localtop_t *top,
1841 gmx_groups_t *groups,
1842 matrix box, PaddedRVecVector *coordinates, history_t *hist,
1843 PaddedRVecVector *force,
1846 gmx_enerdata_t *enerd, t_fcdata *fcd,
1847 gmx::ArrayRef<real> lambda, t_graph *graph,
1849 gmx_vsite_t *vsite, rvec mu_tot,
1850 double t, gmx_edsam_t ed,
1851 gmx_bool bBornRadii,
1854 /* modify force flag if not doing nonbonded */
1855 if (!fr->bNonbonded)
1857 flags &= ~GMX_FORCE_NONBONDED;
1860 GMX_ASSERT(coordinates->size() >= static_cast<unsigned int>(fr->natoms_force + 1), "We might need 1 element extra for SIMD");
1861 GMX_ASSERT(force->size() >= static_cast<unsigned int>(fr->natoms_force + 1), "We might need 1 element extra for SIMD");
1863 rvec *x = as_rvec_array(coordinates->data());
1865 switch (inputrec->cutoff_scheme)
1868 do_force_cutsVERLET(fplog, cr, inputrec,
1876 lambda.data(), graph,
1884 do_force_cutsGROUP(fplog, cr, inputrec,
1892 lambda.data(), graph,
1899 gmx_incons("Invalid cut-off scheme passed!");
1904 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
1905 t_inputrec *ir, t_mdatoms *md,
1906 t_state *state, t_commrec *cr, t_nrnb *nrnb,
1907 t_forcerec *fr, gmx_localtop_t *top)
1909 int i, m, start, end;
1911 real dt = ir->delta_t;
1915 /* We need to allocate one element extra, since we might use
1916 * (unaligned) 4-wide SIMD loads to access rvec entries.
1918 snew(savex, state->natoms + 1);
1925 fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
1926 start, md->homenr, end);
1928 /* Do a first constrain to reset particles... */
1929 step = ir->init_step;
1932 char buf[STEPSTRSIZE];
1933 fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
1934 gmx_step_str(step, buf));
1938 /* constrain the current position */
1939 constrain(nullptr, TRUE, FALSE, constr, &(top->idef),
1940 ir, cr, step, 0, 1.0, md,
1941 as_rvec_array(state->x.data()), as_rvec_array(state->x.data()), nullptr,
1942 fr->bMolPBC, state->box,
1943 state->lambda[efptBONDED], &dvdl_dum,
1944 nullptr, nullptr, nrnb, econqCoord);
1947 /* constrain the inital velocity, and save it */
1948 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
1949 constrain(nullptr, TRUE, FALSE, constr, &(top->idef),
1950 ir, cr, step, 0, 1.0, md,
1951 as_rvec_array(state->x.data()), as_rvec_array(state->v.data()), as_rvec_array(state->v.data()),
1952 fr->bMolPBC, state->box,
1953 state->lambda[efptBONDED], &dvdl_dum,
1954 nullptr, nullptr, nrnb, econqVeloc);
1956 /* constrain the inital velocities at t-dt/2 */
1957 if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
1959 for (i = start; (i < end); i++)
1961 for (m = 0; (m < DIM); m++)
1963 /* Reverse the velocity */
1964 state->v[i][m] = -state->v[i][m];
1965 /* Store the position at t-dt in buf */
1966 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
1969 /* Shake the positions at t=-dt with the positions at t=0
1970 * as reference coordinates.
1974 char buf[STEPSTRSIZE];
1975 fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
1976 gmx_step_str(step, buf));
1979 constrain(nullptr, TRUE, FALSE, constr, &(top->idef),
1980 ir, cr, step, -1, 1.0, md,
1981 as_rvec_array(state->x.data()), savex, nullptr,
1982 fr->bMolPBC, state->box,
1983 state->lambda[efptBONDED], &dvdl_dum,
1984 as_rvec_array(state->v.data()), nullptr, nrnb, econqCoord);
1986 for (i = start; i < end; i++)
1988 for (m = 0; m < DIM; m++)
1990 /* Re-reverse the velocities */
1991 state->v[i][m] = -state->v[i][m];
2000 integrate_table(real vdwtab[], real scale, int offstart, int rstart, int rend,
2001 double *enerout, double *virout)
2003 double enersum, virsum;
2004 double invscale, invscale2, invscale3;
2005 double r, ea, eb, ec, pa, pb, pc, pd;
2010 invscale = 1.0/scale;
2011 invscale2 = invscale*invscale;
2012 invscale3 = invscale*invscale2;
2014 /* Following summation derived from cubic spline definition,
2015 * Numerical Recipies in C, second edition, p. 113-116. Exact for
2016 * the cubic spline. We first calculate the negative of the
2017 * energy from rvdw to rvdw_switch, assuming that g(r)=1, and then
2018 * add the more standard, abrupt cutoff correction to that result,
2019 * yielding the long-range correction for a switched function. We
2020 * perform both the pressure and energy loops at the same time for
2021 * simplicity, as the computational cost is low. */
2025 /* Since the dispersion table has been scaled down a factor
2026 * 6.0 and the repulsion a factor 12.0 to compensate for the
2027 * c6/c12 parameters inside nbfp[] being scaled up (to save
2028 * flops in kernels), we need to correct for this.
2039 for (ri = rstart; ri < rend; ++ri)
2043 eb = 2.0*invscale2*r;
2047 pb = 3.0*invscale2*r;
2048 pc = 3.0*invscale*r*r;
2051 /* this "8" is from the packing in the vdwtab array - perhaps
2052 should be defined? */
2054 offset = 8*ri + offstart;
2055 y0 = vdwtab[offset];
2056 f = vdwtab[offset+1];
2057 g = vdwtab[offset+2];
2058 h = vdwtab[offset+3];
2060 enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2061 virsum += f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2063 *enerout = 4.0*M_PI*enersum*tabfactor;
2064 *virout = 4.0*M_PI*virsum*tabfactor;
2067 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2069 double eners[2], virs[2], enersum, virsum;
2070 double r0, rc3, rc9;
2072 real scale, *vdwtab;
2074 fr->enershiftsix = 0;
2075 fr->enershifttwelve = 0;
2076 fr->enerdiffsix = 0;
2077 fr->enerdifftwelve = 0;
2079 fr->virdifftwelve = 0;
2081 if (eDispCorr != edispcNO)
2083 for (i = 0; i < 2; i++)
2088 if ((fr->vdw_modifier == eintmodPOTSHIFT) ||
2089 (fr->vdw_modifier == eintmodPOTSWITCH) ||
2090 (fr->vdw_modifier == eintmodFORCESWITCH) ||
2091 (fr->vdwtype == evdwSHIFT) ||
2092 (fr->vdwtype == evdwSWITCH))
2094 if (((fr->vdw_modifier == eintmodPOTSWITCH) ||
2095 (fr->vdw_modifier == eintmodFORCESWITCH) ||
2096 (fr->vdwtype == evdwSWITCH)) && fr->rvdw_switch == 0)
2099 "With dispersion correction rvdw-switch can not be zero "
2100 "for vdw-type = %s", evdw_names[fr->vdwtype]);
2103 /* TODO This code depends on the logic in tables.c that
2104 constructs the table layout, which should be made
2105 explicit in future cleanup. */
2106 GMX_ASSERT(fr->dispersionCorrectionTable->interaction == GMX_TABLE_INTERACTION_VDWREP_VDWDISP,
2107 "Dispersion-correction code needs a table with both repulsion and dispersion terms");
2108 scale = fr->dispersionCorrectionTable->scale;
2109 vdwtab = fr->dispersionCorrectionTable->data;
2111 /* Round the cut-offs to exact table values for precision */
2112 ri0 = static_cast<int>(floor(fr->rvdw_switch*scale));
2113 ri1 = static_cast<int>(ceil(fr->rvdw*scale));
2115 /* The code below has some support for handling force-switching, i.e.
2116 * when the force (instead of potential) is switched over a limited
2117 * region. This leads to a constant shift in the potential inside the
2118 * switching region, which we can handle by adding a constant energy
2119 * term in the force-switch case just like when we do potential-shift.
2121 * For now this is not enabled, but to keep the functionality in the
2122 * code we check separately for switch and shift. When we do force-switch
2123 * the shifting point is rvdw_switch, while it is the cutoff when we
2124 * have a classical potential-shift.
2126 * For a pure potential-shift the potential has a constant shift
2127 * all the way out to the cutoff, and that is it. For other forms
2128 * we need to calculate the constant shift up to the point where we
2129 * start modifying the potential.
2131 ri0 = (fr->vdw_modifier == eintmodPOTSHIFT) ? ri1 : ri0;
2137 if ((fr->vdw_modifier == eintmodFORCESWITCH) ||
2138 (fr->vdwtype == evdwSHIFT))
2140 /* Determine the constant energy shift below rvdw_switch.
2141 * Table has a scale factor since we have scaled it down to compensate
2142 * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2144 fr->enershiftsix = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2145 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2147 else if (fr->vdw_modifier == eintmodPOTSHIFT)
2149 fr->enershiftsix = (real)(-1.0/(rc3*rc3));
2150 fr->enershifttwelve = (real)( 1.0/(rc9*rc3));
2153 /* Add the constant part from 0 to rvdw_switch.
2154 * This integration from 0 to rvdw_switch overcounts the number
2155 * of interactions by 1, as it also counts the self interaction.
2156 * We will correct for this later.
2158 eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2159 eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2161 /* Calculate the contribution in the range [r0,r1] where we
2162 * modify the potential. For a pure potential-shift modifier we will
2163 * have ri0==ri1, and there will not be any contribution here.
2165 for (i = 0; i < 2; i++)
2169 integrate_table(vdwtab, scale, (i == 0 ? 0 : 4), ri0, ri1, &enersum, &virsum);
2170 eners[i] -= enersum;
2174 /* Alright: Above we compensated by REMOVING the parts outside r0
2175 * corresponding to the ideal VdW 1/r6 and /r12 potentials.
2177 * Regardless of whether r0 is the point where we start switching,
2178 * or the cutoff where we calculated the constant shift, we include
2179 * all the parts we are missing out to infinity from r0 by
2180 * calculating the analytical dispersion correction.
2182 eners[0] += -4.0*M_PI/(3.0*rc3);
2183 eners[1] += 4.0*M_PI/(9.0*rc9);
2184 virs[0] += 8.0*M_PI/rc3;
2185 virs[1] += -16.0*M_PI/(3.0*rc9);
2187 else if (fr->vdwtype == evdwCUT ||
2188 EVDW_PME(fr->vdwtype) ||
2189 fr->vdwtype == evdwUSER)
2191 if (fr->vdwtype == evdwUSER && fplog)
2194 "WARNING: using dispersion correction with user tables\n");
2197 /* Note that with LJ-PME, the dispersion correction is multiplied
2198 * by the difference between the actual C6 and the value of C6
2199 * that would produce the combination rule.
2200 * This means the normal energy and virial difference formulas
2204 rc3 = fr->rvdw*fr->rvdw*fr->rvdw;
2206 /* Contribution beyond the cut-off */
2207 eners[0] += -4.0*M_PI/(3.0*rc3);
2208 eners[1] += 4.0*M_PI/(9.0*rc9);
2209 if (fr->vdw_modifier == eintmodPOTSHIFT)
2211 /* Contribution within the cut-off */
2212 eners[0] += -4.0*M_PI/(3.0*rc3);
2213 eners[1] += 4.0*M_PI/(3.0*rc9);
2215 /* Contribution beyond the cut-off */
2216 virs[0] += 8.0*M_PI/rc3;
2217 virs[1] += -16.0*M_PI/(3.0*rc9);
2222 "Dispersion correction is not implemented for vdw-type = %s",
2223 evdw_names[fr->vdwtype]);
2226 /* When we deprecate the group kernels the code below can go too */
2227 if (fr->vdwtype == evdwPME && fr->cutoff_scheme == ecutsGROUP)
2229 /* Calculate self-interaction coefficient (assuming that
2230 * the reciprocal-space contribution is constant in the
2231 * region that contributes to the self-interaction).
2233 fr->enershiftsix = gmx::power6(fr->ewaldcoeff_lj) / 6.0;
2235 eners[0] += -gmx::power3(std::sqrt(M_PI)*fr->ewaldcoeff_lj)/3.0;
2236 virs[0] += gmx::power3(std::sqrt(M_PI)*fr->ewaldcoeff_lj);
2239 fr->enerdiffsix = eners[0];
2240 fr->enerdifftwelve = eners[1];
2241 /* The 0.5 is due to the Gromacs definition of the virial */
2242 fr->virdiffsix = 0.5*virs[0];
2243 fr->virdifftwelve = 0.5*virs[1];
2247 void calc_dispcorr(t_inputrec *ir, t_forcerec *fr,
2248 matrix box, real lambda, tensor pres, tensor virial,
2249 real *prescorr, real *enercorr, real *dvdlcorr)
2251 gmx_bool bCorrAll, bCorrPres;
2252 real dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2262 if (ir->eDispCorr != edispcNO)
2264 bCorrAll = (ir->eDispCorr == edispcAllEner ||
2265 ir->eDispCorr == edispcAllEnerPres);
2266 bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2267 ir->eDispCorr == edispcAllEnerPres);
2269 invvol = 1/det(box);
2272 /* Only correct for the interactions with the inserted molecule */
2273 dens = (fr->numAtomsForDispersionCorrection - fr->n_tpi)*invvol;
2278 dens = fr->numAtomsForDispersionCorrection*invvol;
2279 ninter = 0.5*fr->numAtomsForDispersionCorrection;
2282 if (ir->efep == efepNO)
2284 avcsix = fr->avcsix[0];
2285 avctwelve = fr->avctwelve[0];
2289 avcsix = (1 - lambda)*fr->avcsix[0] + lambda*fr->avcsix[1];
2290 avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2293 enerdiff = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2294 *enercorr += avcsix*enerdiff;
2296 if (ir->efep != efepNO)
2298 dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2302 enerdiff = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2303 *enercorr += avctwelve*enerdiff;
2304 if (fr->efep != efepNO)
2306 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2312 svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2313 if (ir->eDispCorr == edispcAllEnerPres)
2315 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2317 /* The factor 2 is because of the Gromacs virial definition */
2318 spres = -2.0*invvol*svir*PRESFAC;
2320 for (m = 0; m < DIM; m++)
2322 virial[m][m] += svir;
2323 pres[m][m] += spres;
2328 /* Can't currently control when it prints, for now, just print when degugging */
2333 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2339 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2340 *enercorr, spres, svir);
2344 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2348 if (fr->efep != efepNO)
2350 *dvdlcorr += dvdlambda;
2355 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2356 t_graph *graph, rvec x[])
2360 fprintf(fplog, "Removing pbc first time\n");
2362 calc_shifts(box, fr->shift_vec);
2365 mk_mshift(fplog, graph, fr->ePBC, box, x);
2368 p_graph(debug, "do_pbc_first 1", graph);
2370 shift_self(graph, box, x);
2371 /* By doing an extra mk_mshift the molecules that are broken
2372 * because they were e.g. imported from another software
2373 * will be made whole again. Such are the healing powers
2376 mk_mshift(fplog, graph, fr->ePBC, box, x);
2379 p_graph(debug, "do_pbc_first 2", graph);
2384 fprintf(fplog, "Done rmpbc\n");
2388 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2389 const gmx_mtop_t *mtop, rvec x[],
2394 gmx_molblock_t *molb;
2396 if (bFirst && fplog)
2398 fprintf(fplog, "Removing pbc first time\n");
2403 for (mb = 0; mb < mtop->nmolblock; mb++)
2405 molb = &mtop->molblock[mb];
2406 if (molb->natoms_mol == 1 ||
2407 (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2409 /* Just one atom or charge group in the molecule, no PBC required */
2410 as += molb->nmol*molb->natoms_mol;
2414 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2415 mk_graph_ilist(nullptr, mtop->moltype[molb->type].ilist,
2416 0, molb->natoms_mol, FALSE, FALSE, graph);
2418 for (mol = 0; mol < molb->nmol; mol++)
2420 mk_mshift(fplog, graph, ePBC, box, x+as);
2422 shift_self(graph, box, x+as);
2423 /* The molecule is whole now.
2424 * We don't need the second mk_mshift call as in do_pbc_first,
2425 * since we no longer need this graph.
2428 as += molb->natoms_mol;
2436 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2437 const gmx_mtop_t *mtop, rvec x[])
2439 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2442 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2443 gmx_mtop_t *mtop, rvec x[])
2445 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2448 void put_atoms_in_box_omp(int ePBC, const matrix box, int natoms, rvec x[])
2451 nth = gmx_omp_nthreads_get(emntDefault);
2453 #pragma omp parallel for num_threads(nth) schedule(static)
2454 for (t = 0; t < nth; t++)
2460 offset = (natoms*t )/nth;
2461 len = (natoms*(t + 1))/nth - offset;
2462 put_atoms_in_box(ePBC, box, len, x + offset);
2464 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2468 // TODO This can be cleaned up a lot, and move back to runner.cpp
2469 void finish_run(FILE *fplog, const gmx::MDLogger &mdlog, t_commrec *cr,
2470 t_inputrec *inputrec,
2471 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2472 gmx_walltime_accounting_t walltime_accounting,
2473 nonbonded_verlet_t *nbv,
2474 gmx_bool bWriteStat)
2476 t_nrnb *nrnb_tot = nullptr;
2478 double nbfs = 0, mflop = 0;
2479 double elapsed_time,
2480 elapsed_time_over_all_ranks,
2481 elapsed_time_over_all_threads,
2482 elapsed_time_over_all_threads_over_all_ranks;
2488 MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2489 cr->mpi_comm_mysim);
2497 elapsed_time = walltime_accounting_get_elapsed_time(walltime_accounting);
2498 elapsed_time_over_all_ranks = elapsed_time;
2499 elapsed_time_over_all_threads = walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting);
2500 elapsed_time_over_all_threads_over_all_ranks = elapsed_time_over_all_threads;
2504 /* reduce elapsed_time over all MPI ranks in the current simulation */
2505 MPI_Allreduce(&elapsed_time,
2506 &elapsed_time_over_all_ranks,
2507 1, MPI_DOUBLE, MPI_SUM,
2508 cr->mpi_comm_mysim);
2509 elapsed_time_over_all_ranks /= cr->nnodes;
2510 /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2511 * current simulation. */
2512 MPI_Allreduce(&elapsed_time_over_all_threads,
2513 &elapsed_time_over_all_threads_over_all_ranks,
2514 1, MPI_DOUBLE, MPI_SUM,
2515 cr->mpi_comm_mysim);
2521 print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2528 if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2530 print_dd_statistics(cr, inputrec, fplog);
2533 /* TODO Move the responsibility for any scaling by thread counts
2534 * to the code that handled the thread region, so that there's a
2535 * mechanism to keep cycle counting working during the transition
2536 * to task parallelism. */
2537 int nthreads_pp = gmx_omp_nthreads_get(emntNonbonded);
2538 int nthreads_pme = gmx_omp_nthreads_get(emntPME);
2539 wallcycle_scale_by_num_threads(wcycle, cr->duty == DUTY_PME, nthreads_pp, nthreads_pme);
2540 auto cycle_sum(wallcycle_sum(cr, wcycle));
2544 struct gmx_wallclock_gpu_t* gputimes = use_GPU(nbv) ? nbnxn_gpu_get_timings(nbv->gpu_nbv) : nullptr;
2546 wallcycle_print(fplog, mdlog, cr->nnodes, cr->npmenodes, nthreads_pp, nthreads_pme,
2547 elapsed_time_over_all_ranks,
2548 wcycle, cycle_sum, gputimes);
2550 if (EI_DYNAMICS(inputrec->eI))
2552 delta_t = inputrec->delta_t;
2557 print_perf(fplog, elapsed_time_over_all_threads_over_all_ranks,
2558 elapsed_time_over_all_ranks,
2559 walltime_accounting_get_nsteps_done(walltime_accounting),
2560 delta_t, nbfs, mflop);
2564 print_perf(stderr, elapsed_time_over_all_threads_over_all_ranks,
2565 elapsed_time_over_all_ranks,
2566 walltime_accounting_get_nsteps_done(walltime_accounting),
2567 delta_t, nbfs, mflop);
2572 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, gmx::ArrayRef<real> lambda, double *lam0)
2574 /* this function works, but could probably use a logic rewrite to keep all the different
2575 types of efep straight. */
2577 if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2582 t_lambda *fep = ir->fepvals;
2583 *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
2584 if checkpoint is set -- a kludge is in for now
2587 for (int i = 0; i < efptNR; i++)
2589 /* overwrite lambda state with init_lambda for now for backwards compatibility */
2590 if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2592 lambda[i] = fep->init_lambda;
2595 lam0[i] = lambda[i];
2600 lambda[i] = fep->all_lambda[i][*fep_state];
2603 lam0[i] = lambda[i];
2609 /* need to rescale control temperatures to match current state */
2610 for (int i = 0; i < ir->opts.ngtc; i++)
2612 if (ir->opts.ref_t[i] > 0)
2614 ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2619 /* Send to the log the information on the current lambdas */
2620 if (fplog != nullptr)
2622 fprintf(fplog, "Initial vector of lambda components:[ ");
2623 for (const auto &l : lambda)
2625 fprintf(fplog, "%10.4f ", l);
2627 fprintf(fplog, "]\n");
2633 void init_md(FILE *fplog,
2634 t_commrec *cr, t_inputrec *ir, const gmx_output_env_t *oenv,
2635 double *t, double *t0,
2636 gmx::ArrayRef<real> lambda, int *fep_state, double *lam0,
2637 t_nrnb *nrnb, gmx_mtop_t *mtop,
2639 int nfile, const t_filenm fnm[],
2640 gmx_mdoutf_t *outf, t_mdebin **mdebin,
2641 tensor force_vir, tensor shake_vir, rvec mu_tot,
2642 gmx_bool *bSimAnn, t_vcm **vcm, unsigned long Flags,
2643 gmx_wallcycle_t wcycle)
2647 /* Initial values */
2648 *t = *t0 = ir->init_t;
2651 for (i = 0; i < ir->opts.ngtc; i++)
2653 /* set bSimAnn if any group is being annealed */
2654 if (ir->opts.annealing[i] != eannNO)
2660 /* Initialize lambda variables */
2661 initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2663 // TODO upd is never NULL in practice, but the analysers don't know that
2666 *upd = init_update(ir);
2670 update_annealing_target_temp(ir, ir->init_t, upd ? *upd : nullptr);
2675 *vcm = init_vcm(fplog, &mtop->groups, ir);
2678 if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2680 if (ir->etc == etcBERENDSEN)
2682 please_cite(fplog, "Berendsen84a");
2684 if (ir->etc == etcVRESCALE)
2686 please_cite(fplog, "Bussi2007a");
2688 if (ir->eI == eiSD1)
2690 please_cite(fplog, "Goga2012");
2697 *outf = init_mdoutf(fplog, nfile, fnm, Flags, cr, ir, mtop, oenv, wcycle);
2699 *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? nullptr : mdoutf_get_fp_ene(*outf),
2700 mtop, ir, mdoutf_get_fp_dhdl(*outf));
2703 /* Initiate variables */
2704 clear_mat(force_vir);
2705 clear_mat(shake_vir);