2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
52 #include "gromacs/domdec/domdec.h"
53 #include "gromacs/domdec/domdec_struct.h"
54 #include "gromacs/essentialdynamics/edsam.h"
55 #include "gromacs/ewald/pme.h"
56 #include "gromacs/gmxlib/chargegroup.h"
57 #include "gromacs/gmxlib/network.h"
58 #include "gromacs/gmxlib/nrnb.h"
59 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
60 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
61 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
62 #include "gromacs/imd/imd.h"
63 #include "gromacs/listed-forces/bonded.h"
64 #include "gromacs/listed-forces/disre.h"
65 #include "gromacs/listed-forces/orires.h"
66 #include "gromacs/math/functions.h"
67 #include "gromacs/math/units.h"
68 #include "gromacs/math/vec.h"
69 #include "gromacs/math/vecdump.h"
70 #include "gromacs/mdlib/calcmu.h"
71 #include "gromacs/mdlib/constr.h"
72 #include "gromacs/mdlib/force.h"
73 #include "gromacs/mdlib/forcerec.h"
74 #include "gromacs/mdlib/genborn.h"
75 #include "gromacs/mdlib/gmx_omp_nthreads.h"
76 #include "gromacs/mdlib/mdrun.h"
77 #include "gromacs/mdlib/nb_verlet.h"
78 #include "gromacs/mdlib/nbnxn_atomdata.h"
79 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
80 #include "gromacs/mdlib/nbnxn_grid.h"
81 #include "gromacs/mdlib/nbnxn_search.h"
82 #include "gromacs/mdlib/qmmm.h"
83 #include "gromacs/mdlib/update.h"
84 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
85 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref.h"
86 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
87 #include "gromacs/mdlib/nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
88 #include "gromacs/mdtypes/commrec.h"
89 #include "gromacs/mdtypes/inputrec.h"
90 #include "gromacs/mdtypes/md_enums.h"
91 #include "gromacs/pbcutil/ishift.h"
92 #include "gromacs/pbcutil/mshift.h"
93 #include "gromacs/pbcutil/pbc.h"
94 #include "gromacs/pulling/pull.h"
95 #include "gromacs/pulling/pull_rotation.h"
96 #include "gromacs/timing/cyclecounter.h"
97 #include "gromacs/timing/gpu_timing.h"
98 #include "gromacs/timing/wallcycle.h"
99 #include "gromacs/timing/wallcyclereporting.h"
100 #include "gromacs/timing/walltime_accounting.h"
101 #include "gromacs/utility/basedefinitions.h"
102 #include "gromacs/utility/cstringutil.h"
103 #include "gromacs/utility/exceptions.h"
104 #include "gromacs/utility/fatalerror.h"
105 #include "gromacs/utility/gmxassert.h"
106 #include "gromacs/utility/gmxmpi.h"
107 #include "gromacs/utility/pleasecite.h"
108 #include "gromacs/utility/smalloc.h"
109 #include "gromacs/utility/sysinfo.h"
111 #include "nbnxn_gpu.h"
113 void print_time(FILE *out,
114 gmx_walltime_accounting_t walltime_accounting,
117 t_commrec gmx_unused *cr)
120 char timebuf[STRLEN];
121 double dt, elapsed_seconds, time_per_step;
130 fprintf(out, "step %s", gmx_step_str(step, buf));
133 if ((step >= ir->nstlist))
135 double seconds_since_epoch = gmx_gettime();
136 elapsed_seconds = seconds_since_epoch - walltime_accounting_get_start_time_stamp(walltime_accounting);
137 time_per_step = elapsed_seconds/(step - ir->init_step + 1);
138 dt = (ir->nsteps + ir->init_step - step) * time_per_step;
144 finish = (time_t) (seconds_since_epoch + dt);
145 gmx_ctime_r(&finish, timebuf, STRLEN);
146 sprintf(buf, "%s", timebuf);
147 buf[strlen(buf)-1] = '\0';
148 fprintf(out, ", will finish %s", buf);
152 fprintf(out, ", remaining wall clock time: %5d s ", (int)dt);
157 fprintf(out, " performance: %.1f ns/day ",
158 ir->delta_t/1000*24*60*60/time_per_step);
171 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
174 char time_string[STRLEN];
183 char timebuf[STRLEN];
184 time_t temp_time = (time_t) the_time;
186 gmx_ctime_r(&temp_time, timebuf, STRLEN);
187 for (i = 0; timebuf[i] >= ' '; i++)
189 time_string[i] = timebuf[i];
191 time_string[i] = '\0';
194 fprintf(fplog, "%s on rank %d %s\n", title, nodeid, time_string);
197 void print_start(FILE *fplog, t_commrec *cr,
198 gmx_walltime_accounting_t walltime_accounting,
203 sprintf(buf, "Started %s", name);
204 print_date_and_time(fplog, cr->nodeid, buf,
205 walltime_accounting_get_start_time_stamp(walltime_accounting));
208 static void sum_forces(rvec f[], const PaddedRVecVector *forceToAdd)
210 /* TODO: remove this - 1 when padding is properly implemented */
211 int end = forceToAdd->size() - 1;
212 const rvec *fAdd = as_rvec_array(forceToAdd->data());
214 // cppcheck-suppress unreadVariable
215 int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
216 #pragma omp parallel for num_threads(nt) schedule(static)
217 for (int i = 0; i < end; i++)
219 rvec_inc(f[i], fAdd[i]);
223 static void calc_virial(int start, int homenr, rvec x[], rvec f[],
224 tensor vir_part, t_graph *graph, matrix box,
225 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
229 /* The short-range virial from surrounding boxes */
231 calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
232 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
234 /* Calculate partial virial, for local atoms only, based on short range.
235 * Total virial is computed in global_stat, called from do_md
237 f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
238 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
240 /* Add position restraint contribution */
241 for (i = 0; i < DIM; i++)
243 vir_part[i][i] += fr->vir_diag_posres[i];
246 /* Add wall contribution */
247 for (i = 0; i < DIM; i++)
249 vir_part[i][ZZ] += fr->vir_wall_z[i];
254 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
258 static void pull_potential_wrapper(t_commrec *cr,
260 matrix box, rvec x[],
264 gmx_enerdata_t *enerd,
267 gmx_wallcycle_t wcycle)
272 /* Calculate the center of mass forces, this requires communication,
273 * which is why pull_potential is called close to other communication.
274 * The virial contribution is calculated directly,
275 * which is why we call pull_potential after calc_virial.
277 wallcycle_start(wcycle, ewcPULLPOT);
278 set_pbc(&pbc, ir->ePBC, box);
280 enerd->term[F_COM_PULL] +=
281 pull_potential(ir->pull_work, mdatoms, &pbc,
282 cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
283 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
284 wallcycle_stop(wcycle, ewcPULLPOT);
287 static void pme_receive_force_ener(t_commrec *cr,
288 gmx_wallcycle_t wcycle,
289 gmx_enerdata_t *enerd,
292 real e_q, e_lj, dvdl_q, dvdl_lj;
293 float cycles_ppdpme, cycles_seppme;
295 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
296 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
298 /* In case of node-splitting, the PP nodes receive the long-range
299 * forces, virial and energy from the PME nodes here.
301 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
304 gmx_pme_receive_f(cr, as_rvec_array(fr->f_novirsum->data()), fr->vir_el_recip, &e_q,
305 fr->vir_lj_recip, &e_lj, &dvdl_q, &dvdl_lj,
307 enerd->term[F_COUL_RECIP] += e_q;
308 enerd->term[F_LJ_RECIP] += e_lj;
309 enerd->dvdl_lin[efptCOUL] += dvdl_q;
310 enerd->dvdl_lin[efptVDW] += dvdl_lj;
314 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
316 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
319 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
320 gmx_int64_t step, real forceTolerance,
321 const rvec *x, const rvec *f)
323 real force2Tolerance = gmx::square(forceTolerance);
324 std::uintmax_t numNonFinite = 0;
325 for (int i = 0; i < md->homenr; i++)
327 real force2 = norm2(f[i]);
328 bool nonFinite = !std::isfinite(force2);
329 if (force2 >= force2Tolerance || nonFinite)
331 fprintf(fp, "step %" GMX_PRId64 " atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
333 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(force2));
340 if (numNonFinite > 0)
342 /* Note that with MPI this fatal call on one rank might interrupt
343 * the printing on other ranks. But we can only avoid that with
344 * an expensive MPI barrier that we would need at each step.
346 gmx_fatal(FARGS, "At step %" GMX_PRId64 " detected non-finite forces on %ju atoms", step, numNonFinite);
350 static void post_process_forces(t_commrec *cr,
352 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
354 matrix box, rvec x[],
359 t_forcerec *fr, gmx_vsite_t *vsite,
366 /* Spread the mesh force on virtual sites to the other particles...
367 * This is parallellized. MPI communication is performed
368 * if the constructing atoms aren't local.
370 wallcycle_start(wcycle, ewcVSITESPREAD);
371 spread_vsite_f(vsite, x, as_rvec_array(fr->f_novirsum->data()), nullptr,
372 (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
374 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
375 wallcycle_stop(wcycle, ewcVSITESPREAD);
377 if (flags & GMX_FORCE_VIRIAL)
379 /* Now add the forces, this is local */
380 sum_forces(f, fr->f_novirsum);
382 if (EEL_FULL(fr->eeltype))
384 /* Add the mesh contribution to the virial */
385 m_add(vir_force, fr->vir_el_recip, vir_force);
387 if (EVDW_PME(fr->vdwtype))
389 /* Add the mesh contribution to the virial */
390 m_add(vir_force, fr->vir_lj_recip, vir_force);
394 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
399 if (fr->print_force >= 0)
401 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
405 static void do_nb_verlet(t_forcerec *fr,
406 interaction_const_t *ic,
407 gmx_enerdata_t *enerd,
408 int flags, int ilocality,
411 gmx_wallcycle_t wcycle)
413 int enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
414 nonbonded_verlet_group_t *nbvg;
415 gmx_bool bUsingGpuKernels;
417 if (!(flags & GMX_FORCE_NONBONDED))
419 /* skip non-bonded calculation */
423 nbvg = &fr->nbv->grp[ilocality];
425 /* GPU kernel launch overhead is already timed separately */
426 if (fr->cutoff_scheme != ecutsVERLET)
428 gmx_incons("Invalid cut-off scheme passed!");
431 bUsingGpuKernels = (nbvg->kernel_type == nbnxnk8x8x8_GPU);
433 if (!bUsingGpuKernels)
435 wallcycle_sub_start(wcycle, ewcsNONBONDED);
437 switch (nbvg->kernel_type)
439 case nbnxnk4x4_PlainC:
440 nbnxn_kernel_ref(&nbvg->nbl_lists,
446 enerd->grpp.ener[egCOULSR],
448 enerd->grpp.ener[egBHAMSR] :
449 enerd->grpp.ener[egLJSR]);
452 case nbnxnk4xN_SIMD_4xN:
453 nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
460 enerd->grpp.ener[egCOULSR],
462 enerd->grpp.ener[egBHAMSR] :
463 enerd->grpp.ener[egLJSR]);
465 case nbnxnk4xN_SIMD_2xNN:
466 nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
473 enerd->grpp.ener[egCOULSR],
475 enerd->grpp.ener[egBHAMSR] :
476 enerd->grpp.ener[egLJSR]);
479 case nbnxnk8x8x8_GPU:
480 nbnxn_gpu_launch_kernel(fr->nbv->gpu_nbv, nbvg->nbat, flags, ilocality);
483 case nbnxnk8x8x8_PlainC:
484 nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
489 nbvg->nbat->out[0].f,
491 enerd->grpp.ener[egCOULSR],
493 enerd->grpp.ener[egBHAMSR] :
494 enerd->grpp.ener[egLJSR]);
498 gmx_incons("Invalid nonbonded kernel type passed!");
501 if (!bUsingGpuKernels)
503 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
506 if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
508 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
510 else if ((!bUsingGpuKernels && nbvg->ewald_excl == ewaldexclAnalytical) ||
511 (bUsingGpuKernels && nbnxn_gpu_is_kernel_ewald_analytical(fr->nbv->gpu_nbv)))
513 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
517 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
519 enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
520 if (flags & GMX_FORCE_ENERGY)
522 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
523 enr_nbnxn_kernel_ljc += 1;
524 enr_nbnxn_kernel_lj += 1;
527 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
528 nbvg->nbl_lists.natpair_ljq);
529 inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
530 nbvg->nbl_lists.natpair_lj);
531 /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
532 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
533 nbvg->nbl_lists.natpair_q);
535 if (ic->vdw_modifier == eintmodFORCESWITCH)
537 /* We add up the switch cost separately */
538 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_FSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
539 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
541 if (ic->vdw_modifier == eintmodPOTSWITCH)
543 /* We add up the switch cost separately */
544 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_PSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
545 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
547 if (ic->vdwtype == evdwPME)
549 /* We add up the LJ Ewald cost separately */
550 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_EWALD+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
551 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
555 static void do_nb_verlet_fep(nbnxn_pairlist_set_t *nbl_lists,
562 gmx_enerdata_t *enerd,
565 gmx_wallcycle_t wcycle)
568 nb_kernel_data_t kernel_data;
570 real dvdl_nb[efptNR];
575 /* Add short-range interactions */
576 donb_flags |= GMX_NONBONDED_DO_SR;
578 /* Currently all group scheme kernels always calculate (shift-)forces */
579 if (flags & GMX_FORCE_FORCES)
581 donb_flags |= GMX_NONBONDED_DO_FORCE;
583 if (flags & GMX_FORCE_VIRIAL)
585 donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
587 if (flags & GMX_FORCE_ENERGY)
589 donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
592 kernel_data.flags = donb_flags;
593 kernel_data.lambda = lambda;
594 kernel_data.dvdl = dvdl_nb;
596 kernel_data.energygrp_elec = enerd->grpp.ener[egCOULSR];
597 kernel_data.energygrp_vdw = enerd->grpp.ener[egLJSR];
599 /* reset free energy components */
600 for (i = 0; i < efptNR; i++)
605 assert(gmx_omp_nthreads_get(emntNonbonded) == nbl_lists->nnbl);
607 wallcycle_sub_start(wcycle, ewcsNONBONDED);
608 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
609 for (th = 0; th < nbl_lists->nnbl; th++)
613 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
614 x, f, fr, mdatoms, &kernel_data, nrnb);
616 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
619 if (fepvals->sc_alpha != 0)
621 enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
622 enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
626 enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
627 enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
630 /* If we do foreign lambda and we have soft-core interactions
631 * we have to recalculate the (non-linear) energies contributions.
633 if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
635 kernel_data.flags = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE)) | GMX_NONBONDED_DO_FOREIGNLAMBDA;
636 kernel_data.lambda = lam_i;
637 kernel_data.energygrp_elec = enerd->foreign_grpp.ener[egCOULSR];
638 kernel_data.energygrp_vdw = enerd->foreign_grpp.ener[egLJSR];
639 /* Note that we add to kernel_data.dvdl, but ignore the result */
641 for (i = 0; i < enerd->n_lambda; i++)
643 for (j = 0; j < efptNR; j++)
645 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
647 reset_foreign_enerdata(enerd);
648 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
649 for (th = 0; th < nbl_lists->nnbl; th++)
653 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
654 x, f, fr, mdatoms, &kernel_data, nrnb);
656 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
659 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
660 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
664 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
667 gmx_bool use_GPU(const nonbonded_verlet_t *nbv)
669 return nbv != nullptr && nbv->bUseGPU;
672 static gmx_inline void clear_rvecs_omp(int n, rvec v[])
674 int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, n);
676 /* Note that we would like to avoid this conditional by putting it
677 * into the omp pragma instead, but then we still take the full
678 * omp parallel for overhead (at least with gcc5).
682 for (int i = 0; i < n; i++)
689 #pragma omp parallel for num_threads(nth) schedule(static)
690 for (int i = 0; i < n; i++)
697 /*! \brief This routine checks if the potential energy is finite.
699 * Note that passing this check does not guarantee finite forces,
700 * since those use slightly different arithmetics. But in most cases
701 * there is just a narrow coordinate range where forces are not finite
702 * and energies are finite.
704 * \param[in] enerd The energy data; the non-bonded group energies need to be added in here before calling this routine
706 static void checkPotentialEnergyValidity(const gmx_enerdata_t *enerd)
708 if (!std::isfinite(enerd->term[F_EPOT]))
710 gmx_fatal(FARGS, "The total potential energy is %g, which is not finite. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A non-finite potential energy can be caused by overlapping interactions in bonded interactions or very large or NaN coordinate values. Usually this is caused by a badly or non-equilibrated initial configuration or incorrect interactions or parameters in the topology.",
713 enerd->term[F_COUL_SR]);
717 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
718 t_inputrec *inputrec,
719 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
721 gmx_groups_t gmx_unused *groups,
722 matrix box, rvec x[], history_t *hist,
723 PaddedRVecVector *force,
726 gmx_enerdata_t *enerd, t_fcdata *fcd,
727 real *lambda, t_graph *graph,
728 t_forcerec *fr, interaction_const_t *ic,
729 gmx_vsite_t *vsite, rvec mu_tot,
730 double t, gmx_edsam_t ed,
736 gmx_bool bStateChanged, bNS, bFillGrid, bCalcCGCM;
737 gmx_bool bDoForces, bUseGPU, bUseOrEmulGPU;
738 gmx_bool bDiffKernels = FALSE;
739 rvec vzero, box_diag;
740 float cycles_pme, cycles_force, cycles_wait_gpu;
741 /* TODO To avoid loss of precision, float can't be used for a
742 * cycle count. Build an object that can do this right and perhaps
743 * also be used by gmx_wallcycle_t */
744 gmx_cycles_t cycleCountBeforeLocalWorkCompletes = 0;
745 nonbonded_verlet_t *nbv;
752 const int homenr = mdatoms->homenr;
754 clear_mat(vir_force);
756 if (DOMAINDECOMP(cr))
758 cg1 = cr->dd->ncg_tot;
769 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
770 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
771 bFillGrid = (bNS && bStateChanged);
772 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
773 bDoForces = (flags & GMX_FORCE_FORCES);
774 bUseGPU = fr->nbv->bUseGPU;
775 bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
779 update_forcerec(fr, box);
781 if (inputrecNeedMutot(inputrec))
783 /* Calculate total (local) dipole moment in a temporary common array.
784 * This makes it possible to sum them over nodes faster.
786 calc_mu(start, homenr,
787 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
792 if (fr->ePBC != epbcNONE)
794 /* Compute shift vectors every step,
795 * because of pressure coupling or box deformation!
797 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
799 calc_shifts(box, fr->shift_vec);
804 put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
805 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
807 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
809 unshift_self(graph, box, x);
813 nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
814 fr->shift_vec, nbv->grp[0].nbat);
817 if (!(cr->duty & DUTY_PME))
822 /* Send particle coordinates to the pme nodes.
823 * Since this is only implemented for domain decomposition
824 * and domain decomposition does not use the graph,
825 * we do not need to worry about shifting.
828 wallcycle_start(wcycle, ewcPP_PMESENDX);
830 bBS = (inputrec->nwall == 2);
834 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
837 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
838 lambda[efptCOUL], lambda[efptVDW],
839 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
842 wallcycle_stop(wcycle, ewcPP_PMESENDX);
846 /* do gridding for pair search */
849 if (graph && bStateChanged)
851 /* Calculate intramolecular shift vectors to make molecules whole */
852 mk_mshift(fplog, graph, fr->ePBC, box, x);
856 box_diag[XX] = box[XX][XX];
857 box_diag[YY] = box[YY][YY];
858 box_diag[ZZ] = box[ZZ][ZZ];
860 wallcycle_start(wcycle, ewcNS);
863 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
864 nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
866 0, mdatoms->homenr, -1, fr->cginfo, x,
868 nbv->grp[eintLocal].kernel_type,
869 nbv->grp[eintLocal].nbat);
870 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
874 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
875 nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
877 nbv->grp[eintNonlocal].kernel_type,
878 nbv->grp[eintNonlocal].nbat);
879 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
882 if (nbv->ngrp == 1 ||
883 nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
885 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
886 nbv->nbs, mdatoms, fr->cginfo);
890 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
891 nbv->nbs, mdatoms, fr->cginfo);
892 nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
893 nbv->nbs, mdatoms, fr->cginfo);
895 wallcycle_stop(wcycle, ewcNS);
898 /* initialize the GPU atom data and copy shift vector */
903 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
904 nbnxn_gpu_init_atomdata(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
905 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
908 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
909 nbnxn_gpu_upload_shiftvec(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
910 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
913 /* do local pair search */
916 wallcycle_start_nocount(wcycle, ewcNS);
917 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
918 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
921 nbv->min_ci_balanced,
922 &nbv->grp[eintLocal].nbl_lists,
924 nbv->grp[eintLocal].kernel_type,
926 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
930 /* initialize local pair-list on the GPU */
931 nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
932 nbv->grp[eintLocal].nbl_lists.nbl[0],
935 wallcycle_stop(wcycle, ewcNS);
939 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
940 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
941 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
942 nbv->grp[eintLocal].nbat);
943 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
944 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
949 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
950 /* launch local nonbonded F on GPU */
951 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
953 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
956 /* Communicate coordinates and sum dipole if necessary +
957 do non-local pair search */
958 if (DOMAINDECOMP(cr))
960 bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
961 nbv->grp[eintLocal].kernel_type);
965 /* With GPU+CPU non-bonded calculations we need to copy
966 * the local coordinates to the non-local nbat struct
967 * (in CPU format) as the non-local kernel call also
968 * calculates the local - non-local interactions.
970 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
971 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
972 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
973 nbv->grp[eintNonlocal].nbat);
974 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
975 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
980 wallcycle_start_nocount(wcycle, ewcNS);
981 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
985 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
988 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
991 nbv->min_ci_balanced,
992 &nbv->grp[eintNonlocal].nbl_lists,
994 nbv->grp[eintNonlocal].kernel_type,
997 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
999 if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_GPU)
1001 /* initialize non-local pair-list on the GPU */
1002 nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
1003 nbv->grp[eintNonlocal].nbl_lists.nbl[0],
1006 wallcycle_stop(wcycle, ewcNS);
1010 wallcycle_start(wcycle, ewcMOVEX);
1011 dd_move_x(cr->dd, box, x);
1012 wallcycle_stop(wcycle, ewcMOVEX);
1014 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1015 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1016 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
1017 nbv->grp[eintNonlocal].nbat);
1018 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1019 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1022 if (bUseGPU && !bDiffKernels)
1024 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1025 /* launch non-local nonbonded F on GPU */
1026 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
1028 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1034 /* launch D2H copy-back F */
1035 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1036 if (DOMAINDECOMP(cr) && !bDiffKernels)
1038 nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintNonlocal].nbat,
1039 flags, eatNonlocal);
1041 nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintLocal].nbat,
1043 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1046 if (bStateChanged && inputrecNeedMutot(inputrec))
1050 gmx_sumd(2*DIM, mu, cr);
1053 for (i = 0; i < 2; i++)
1055 for (j = 0; j < DIM; j++)
1057 fr->mu_tot[i][j] = mu[i*DIM + j];
1061 if (fr->efep == efepNO)
1063 copy_rvec(fr->mu_tot[0], mu_tot);
1067 for (j = 0; j < DIM; j++)
1070 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1071 lambda[efptCOUL]*fr->mu_tot[1][j];
1075 /* Reset energies */
1076 reset_enerdata(enerd);
1077 clear_rvecs(SHIFTS, fr->fshift);
1079 if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1081 wallcycle_start(wcycle, ewcPPDURINGPME);
1082 dd_force_flop_start(cr->dd, nrnb);
1087 /* Enforced rotation has its own cycle counter that starts after the collective
1088 * coordinates have been communicated. It is added to ddCyclF to allow
1089 * for proper load-balancing */
1090 wallcycle_start(wcycle, ewcROT);
1091 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1092 wallcycle_stop(wcycle, ewcROT);
1095 /* Temporary solution until all routines take PaddedRVecVector */
1096 rvec *f = as_rvec_array(force->data());
1098 /* Start the force cycle counter.
1099 * This counter is stopped after do_force_lowlevel.
1100 * No parallel communication should occur while this counter is running,
1101 * since that will interfere with the dynamic load balancing.
1103 wallcycle_start(wcycle, ewcFORCE);
1106 /* Reset forces for which the virial is calculated separately:
1107 * PME/Ewald forces if necessary */
1108 if (fr->bF_NoVirSum)
1110 if (flags & GMX_FORCE_VIRIAL)
1112 fr->f_novirsum = fr->forceBufferNoVirialSummation;
1116 /* We are not calculating the pressure so we do not need
1117 * a separate array for forces that do not contribute
1120 fr->f_novirsum = force;
1124 if (fr->bF_NoVirSum)
1126 if (flags & GMX_FORCE_VIRIAL)
1128 /* TODO: remove this - 1 when padding is properly implemented */
1129 clear_rvecs_omp(fr->f_novirsum->size() - 1,
1130 as_rvec_array(fr->f_novirsum->data()));
1133 /* Clear the short- and long-range forces */
1134 clear_rvecs_omp(fr->natoms_force_constr, f);
1136 clear_rvec(fr->vir_diag_posres);
1139 if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1141 clear_pull_forces(inputrec->pull_work);
1144 /* We calculate the non-bonded forces, when done on the CPU, here.
1145 * We do this before calling do_force_lowlevel, because in that
1146 * function, the listed forces are calculated before PME, which
1147 * does communication. With this order, non-bonded and listed
1148 * force calculation imbalance can be balanced out by the domain
1149 * decomposition load balancing.
1154 /* Maybe we should move this into do_force_lowlevel */
1155 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1159 if (fr->efep != efepNO)
1161 /* Calculate the local and non-local free energy interactions here.
1162 * Happens here on the CPU both with and without GPU.
1164 if (fr->nbv->grp[eintLocal].nbl_lists.nbl_fep[0]->nrj > 0)
1166 do_nb_verlet_fep(&fr->nbv->grp[eintLocal].nbl_lists,
1168 inputrec->fepvals, lambda,
1169 enerd, flags, nrnb, wcycle);
1172 if (DOMAINDECOMP(cr) &&
1173 fr->nbv->grp[eintNonlocal].nbl_lists.nbl_fep[0]->nrj > 0)
1175 do_nb_verlet_fep(&fr->nbv->grp[eintNonlocal].nbl_lists,
1177 inputrec->fepvals, lambda,
1178 enerd, flags, nrnb, wcycle);
1182 if (!bUseOrEmulGPU || bDiffKernels)
1186 if (DOMAINDECOMP(cr))
1188 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1189 bDiffKernels ? enbvClearFYes : enbvClearFNo,
1199 aloc = eintNonlocal;
1202 /* Add all the non-bonded force to the normal force array.
1203 * This can be split into a local and a non-local part when overlapping
1204 * communication with calculation with domain decomposition.
1206 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1207 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1208 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1209 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1210 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1211 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1212 wallcycle_start_nocount(wcycle, ewcFORCE);
1214 /* if there are multiple fshift output buffers reduce them */
1215 if ((flags & GMX_FORCE_VIRIAL) &&
1216 nbv->grp[aloc].nbl_lists.nnbl > 1)
1218 /* This is not in a subcounter because it takes a
1219 negligible and constant-sized amount of time */
1220 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1225 /* update QMMMrec, if necessary */
1228 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1231 /* Compute the bonded and non-bonded energies and optionally forces */
1232 do_force_lowlevel(fr, inputrec, &(top->idef),
1233 cr, nrnb, wcycle, mdatoms,
1234 x, hist, f, enerd, fcd, top, fr->born,
1236 inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1237 flags, &cycles_pme);
1239 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1243 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1246 if (bUseOrEmulGPU && !bDiffKernels)
1248 /* wait for non-local forces (or calculate in emulation mode) */
1249 if (DOMAINDECOMP(cr))
1255 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1256 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1258 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1260 cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1261 cycles_wait_gpu += cycles_tmp;
1262 cycles_force += cycles_tmp;
1266 wallcycle_start_nocount(wcycle, ewcFORCE);
1267 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1269 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1271 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1272 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1273 /* skip the reduction if there was no non-local work to do */
1274 if (nbv->grp[eintNonlocal].nbl_lists.nbl[0]->nsci > 0)
1276 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1277 nbv->grp[eintNonlocal].nbat, f);
1279 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1280 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1284 if (bDoForces && DOMAINDECOMP(cr))
1288 /* We are done with the CPU compute, but the GPU local non-bonded
1289 * kernel can still be running while we communicate the forces.
1290 * We start a counter here, so we can, hopefully, time the rest
1291 * of the GPU kernel execution and data transfer.
1293 cycleCountBeforeLocalWorkCompletes = gmx_cycles_read();
1296 /* Communicate the forces */
1297 wallcycle_start(wcycle, ewcMOVEF);
1298 dd_move_f(cr->dd, f, fr->fshift);
1299 wallcycle_stop(wcycle, ewcMOVEF);
1304 /* wait for local forces (or calculate in emulation mode) */
1307 float cycles_tmp, cycles_wait_est;
1308 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1309 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1310 * but even with a step of 0.1 ms the difference is less than 1%
1313 const float gpuWaitApiOverheadMargin = 2e6f; /* cycles */
1315 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1316 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1318 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1320 cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1322 if (bDoForces && DOMAINDECOMP(cr))
1324 cycles_wait_est = gmx_cycles_read() - cycleCountBeforeLocalWorkCompletes;
1326 if (cycles_tmp < gpuWaitApiOverheadMargin)
1328 /* We measured few cycles, it could be that the kernel
1329 * and transfer finished earlier and there was no actual
1330 * wait time, only API call overhead.
1331 * Then the actual time could be anywhere between 0 and
1332 * cycles_wait_est. As a compromise, we use half the time.
1334 cycles_wait_est *= 0.5f;
1339 /* No force communication so we actually timed the wait */
1340 cycles_wait_est = cycles_tmp;
1342 /* Even though this is after dd_move_f, the actual task we are
1343 * waiting for runs asynchronously with dd_move_f and we usually
1344 * have nothing to balance it with, so we can and should add
1345 * the time to the force time for load balancing.
1347 cycles_force += cycles_wait_est;
1348 cycles_wait_gpu += cycles_wait_est;
1350 /* now clear the GPU outputs while we finish the step on the CPU */
1351 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1352 nbnxn_gpu_clear_outputs(nbv->gpu_nbv, flags);
1353 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1357 wallcycle_start_nocount(wcycle, ewcFORCE);
1358 do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1359 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1361 wallcycle_stop(wcycle, ewcFORCE);
1363 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1364 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1365 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1366 nbv->grp[eintLocal].nbat, f);
1367 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1368 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1371 if (DOMAINDECOMP(cr))
1373 dd_force_flop_stop(cr->dd, nrnb);
1376 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1379 dd_cycles_add(cr->dd, cycles_wait_gpu, ddCyclWaitGPU);
1386 /* Compute forces due to electric field */
1387 if (fr->efield != nullptr)
1389 fr->efield->calculateForces(cr, mdatoms, fr->f_novirsum, t);
1392 /* If we have NoVirSum forces, but we do not calculate the virial,
1393 * we sum fr->f_novirsum=f later.
1395 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1397 wallcycle_start(wcycle, ewcVSITESPREAD);
1398 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, nullptr, nrnb,
1399 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1400 wallcycle_stop(wcycle, ewcVSITESPREAD);
1403 if (flags & GMX_FORCE_VIRIAL)
1405 /* Calculation of the virial must be done after vsites! */
1406 calc_virial(0, mdatoms->homenr, x, f,
1407 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1411 if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1413 /* Since the COM pulling is always done mass-weighted, no forces are
1414 * applied to vsites and this call can be done after vsite spreading.
1416 pull_potential_wrapper(cr, inputrec, box, x,
1417 f, vir_force, mdatoms, enerd, lambda, t,
1421 /* Add the forces from enforced rotation potentials (if any) */
1424 wallcycle_start(wcycle, ewcROTadd);
1425 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1426 wallcycle_stop(wcycle, ewcROTadd);
1429 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1430 IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1432 if (PAR(cr) && !(cr->duty & DUTY_PME))
1434 /* In case of node-splitting, the PP nodes receive the long-range
1435 * forces, virial and energy from the PME nodes here.
1437 pme_receive_force_ener(cr, wcycle, enerd, fr);
1442 post_process_forces(cr, step, nrnb, wcycle,
1443 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1447 if (flags & GMX_FORCE_ENERGY)
1449 /* Sum the potential energy terms from group contributions */
1450 sum_epot(&(enerd->grpp), enerd->term);
1452 if (!EI_TPI(inputrec->eI))
1454 checkPotentialEnergyValidity(enerd);
1459 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1460 t_inputrec *inputrec,
1461 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1462 gmx_localtop_t *top,
1463 gmx_groups_t *groups,
1464 matrix box, rvec x[], history_t *hist,
1465 PaddedRVecVector *force,
1468 gmx_enerdata_t *enerd, t_fcdata *fcd,
1469 real *lambda, t_graph *graph,
1470 t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1471 double t, gmx_edsam_t ed,
1472 gmx_bool bBornRadii,
1477 gmx_bool bStateChanged, bNS, bFillGrid, bCalcCGCM;
1479 float cycles_pme, cycles_force;
1481 const int start = 0;
1482 const int homenr = mdatoms->homenr;
1484 clear_mat(vir_force);
1487 if (DOMAINDECOMP(cr))
1489 cg1 = cr->dd->ncg_tot;
1500 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
1501 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1502 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1503 bFillGrid = (bNS && bStateChanged);
1504 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
1505 bDoForces = (flags & GMX_FORCE_FORCES);
1509 update_forcerec(fr, box);
1511 if (inputrecNeedMutot(inputrec))
1513 /* Calculate total (local) dipole moment in a temporary common array.
1514 * This makes it possible to sum them over nodes faster.
1516 calc_mu(start, homenr,
1517 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1522 if (fr->ePBC != epbcNONE)
1524 /* Compute shift vectors every step,
1525 * because of pressure coupling or box deformation!
1527 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1529 calc_shifts(box, fr->shift_vec);
1534 put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1535 &(top->cgs), x, fr->cg_cm);
1536 inc_nrnb(nrnb, eNR_CGCM, homenr);
1537 inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1539 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1541 unshift_self(graph, box, x);
1546 calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1547 inc_nrnb(nrnb, eNR_CGCM, homenr);
1550 if (bCalcCGCM && gmx_debug_at)
1552 pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1556 if (!(cr->duty & DUTY_PME))
1561 /* Send particle coordinates to the pme nodes.
1562 * Since this is only implemented for domain decomposition
1563 * and domain decomposition does not use the graph,
1564 * we do not need to worry about shifting.
1567 wallcycle_start(wcycle, ewcPP_PMESENDX);
1569 bBS = (inputrec->nwall == 2);
1572 copy_mat(box, boxs);
1573 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1576 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
1577 lambda[efptCOUL], lambda[efptVDW],
1578 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
1581 wallcycle_stop(wcycle, ewcPP_PMESENDX);
1583 #endif /* GMX_MPI */
1585 /* Communicate coordinates and sum dipole if necessary */
1586 if (DOMAINDECOMP(cr))
1588 wallcycle_start(wcycle, ewcMOVEX);
1589 dd_move_x(cr->dd, box, x);
1590 wallcycle_stop(wcycle, ewcMOVEX);
1593 if (inputrecNeedMutot(inputrec))
1599 gmx_sumd(2*DIM, mu, cr);
1601 for (i = 0; i < 2; i++)
1603 for (j = 0; j < DIM; j++)
1605 fr->mu_tot[i][j] = mu[i*DIM + j];
1609 if (fr->efep == efepNO)
1611 copy_rvec(fr->mu_tot[0], mu_tot);
1615 for (j = 0; j < DIM; j++)
1618 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1623 /* Reset energies */
1624 reset_enerdata(enerd);
1625 clear_rvecs(SHIFTS, fr->fshift);
1629 wallcycle_start(wcycle, ewcNS);
1631 if (graph && bStateChanged)
1633 /* Calculate intramolecular shift vectors to make molecules whole */
1634 mk_mshift(fplog, graph, fr->ePBC, box, x);
1637 /* Do the actual neighbour searching */
1639 groups, top, mdatoms,
1640 cr, nrnb, bFillGrid);
1642 wallcycle_stop(wcycle, ewcNS);
1645 if (inputrec->implicit_solvent && bNS)
1647 make_gb_nblist(cr, inputrec->gb_algorithm,
1648 x, box, fr, &top->idef, graph, fr->born);
1651 if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1653 wallcycle_start(wcycle, ewcPPDURINGPME);
1654 dd_force_flop_start(cr->dd, nrnb);
1659 /* Enforced rotation has its own cycle counter that starts after the collective
1660 * coordinates have been communicated. It is added to ddCyclF to allow
1661 * for proper load-balancing */
1662 wallcycle_start(wcycle, ewcROT);
1663 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1664 wallcycle_stop(wcycle, ewcROT);
1667 /* Temporary solution until all routines take PaddedRVecVector */
1668 rvec *f = as_rvec_array(force->data());
1670 /* Start the force cycle counter.
1671 * This counter is stopped after do_force_lowlevel.
1672 * No parallel communication should occur while this counter is running,
1673 * since that will interfere with the dynamic load balancing.
1675 wallcycle_start(wcycle, ewcFORCE);
1679 /* Reset forces for which the virial is calculated separately:
1680 * PME/Ewald forces if necessary */
1681 if (fr->bF_NoVirSum)
1683 if (flags & GMX_FORCE_VIRIAL)
1685 fr->f_novirsum = fr->forceBufferNoVirialSummation;
1686 /* TODO: remove this - 1 when padding is properly implemented */
1687 clear_rvecs(fr->f_novirsum->size() - 1,
1688 as_rvec_array(fr->f_novirsum->data()));
1692 /* We are not calculating the pressure so we do not need
1693 * a separate array for forces that do not contribute
1696 fr->f_novirsum = force;
1700 /* Clear the short- and long-range forces */
1701 clear_rvecs(fr->natoms_force_constr, f);
1703 clear_rvec(fr->vir_diag_posres);
1705 if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1707 clear_pull_forces(inputrec->pull_work);
1710 /* update QMMMrec, if necessary */
1713 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1716 /* Compute the bonded and non-bonded energies and optionally forces */
1717 do_force_lowlevel(fr, inputrec, &(top->idef),
1718 cr, nrnb, wcycle, mdatoms,
1719 x, hist, f, enerd, fcd, top, fr->born,
1721 inputrec->fepvals, lambda,
1722 graph, &(top->excls), fr->mu_tot,
1726 cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1730 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1733 if (DOMAINDECOMP(cr))
1735 dd_force_flop_stop(cr->dd, nrnb);
1738 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1744 /* Compute forces due to electric field */
1745 if (fr->efield != nullptr)
1747 fr->efield->calculateForces(cr, mdatoms, fr->f_novirsum, t);
1750 /* Communicate the forces */
1751 if (DOMAINDECOMP(cr))
1753 wallcycle_start(wcycle, ewcMOVEF);
1754 dd_move_f(cr->dd, f, fr->fshift);
1755 /* Do we need to communicate the separate force array
1756 * for terms that do not contribute to the single sum virial?
1757 * Position restraints and electric fields do not introduce
1758 * inter-cg forces, only full electrostatics methods do.
1759 * When we do not calculate the virial, fr->f_novirsum = f,
1760 * so we have already communicated these forces.
1762 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1763 (flags & GMX_FORCE_VIRIAL))
1765 dd_move_f(cr->dd, as_rvec_array(fr->f_novirsum->data()), nullptr);
1767 wallcycle_stop(wcycle, ewcMOVEF);
1770 /* If we have NoVirSum forces, but we do not calculate the virial,
1771 * we sum fr->f_novirsum=f later.
1773 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1775 wallcycle_start(wcycle, ewcVSITESPREAD);
1776 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, nullptr, nrnb,
1777 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1778 wallcycle_stop(wcycle, ewcVSITESPREAD);
1781 if (flags & GMX_FORCE_VIRIAL)
1783 /* Calculation of the virial must be done after vsites! */
1784 calc_virial(0, mdatoms->homenr, x, f,
1785 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1789 if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1791 pull_potential_wrapper(cr, inputrec, box, x,
1792 f, vir_force, mdatoms, enerd, lambda, t,
1796 /* Add the forces from enforced rotation potentials (if any) */
1799 wallcycle_start(wcycle, ewcROTadd);
1800 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1801 wallcycle_stop(wcycle, ewcROTadd);
1804 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1805 IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1807 if (PAR(cr) && !(cr->duty & DUTY_PME))
1809 /* In case of node-splitting, the PP nodes receive the long-range
1810 * forces, virial and energy from the PME nodes here.
1812 pme_receive_force_ener(cr, wcycle, enerd, fr);
1817 post_process_forces(cr, step, nrnb, wcycle,
1818 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1822 if (flags & GMX_FORCE_ENERGY)
1824 /* Sum the potential energy terms from group contributions */
1825 sum_epot(&(enerd->grpp), enerd->term);
1827 if (!EI_TPI(inputrec->eI))
1829 checkPotentialEnergyValidity(enerd);
1835 void do_force(FILE *fplog, t_commrec *cr,
1836 t_inputrec *inputrec,
1837 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1838 gmx_localtop_t *top,
1839 gmx_groups_t *groups,
1840 matrix box, PaddedRVecVector *coordinates, history_t *hist,
1841 PaddedRVecVector *force,
1844 gmx_enerdata_t *enerd, t_fcdata *fcd,
1845 std::vector<real> *lambda, t_graph *graph,
1847 gmx_vsite_t *vsite, rvec mu_tot,
1848 double t, gmx_edsam_t ed,
1849 gmx_bool bBornRadii,
1852 /* modify force flag if not doing nonbonded */
1853 if (!fr->bNonbonded)
1855 flags &= ~GMX_FORCE_NONBONDED;
1858 GMX_ASSERT(coordinates->size() >= static_cast<unsigned int>(fr->natoms_force + 1), "We might need 1 element extra for SIMD");
1859 GMX_ASSERT(force->size() >= static_cast<unsigned int>(fr->natoms_force + 1), "We might need 1 element extra for SIMD");
1861 rvec *x = as_rvec_array(coordinates->data());
1863 switch (inputrec->cutoff_scheme)
1866 do_force_cutsVERLET(fplog, cr, inputrec,
1874 lambda->data(), graph,
1882 do_force_cutsGROUP(fplog, cr, inputrec,
1890 lambda->data(), graph,
1897 gmx_incons("Invalid cut-off scheme passed!");
1902 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
1903 t_inputrec *ir, t_mdatoms *md,
1904 t_state *state, t_commrec *cr, t_nrnb *nrnb,
1905 t_forcerec *fr, gmx_localtop_t *top)
1907 int i, m, start, end;
1909 real dt = ir->delta_t;
1913 /* We need to allocate one element extra, since we might use
1914 * (unaligned) 4-wide SIMD loads to access rvec entries.
1916 snew(savex, state->natoms + 1);
1923 fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
1924 start, md->homenr, end);
1926 /* Do a first constrain to reset particles... */
1927 step = ir->init_step;
1930 char buf[STEPSTRSIZE];
1931 fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
1932 gmx_step_str(step, buf));
1936 /* constrain the current position */
1937 constrain(nullptr, TRUE, FALSE, constr, &(top->idef),
1938 ir, cr, step, 0, 1.0, md,
1939 as_rvec_array(state->x.data()), as_rvec_array(state->x.data()), nullptr,
1940 fr->bMolPBC, state->box,
1941 state->lambda[efptBONDED], &dvdl_dum,
1942 nullptr, nullptr, nrnb, econqCoord);
1945 /* constrain the inital velocity, and save it */
1946 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
1947 constrain(nullptr, TRUE, FALSE, constr, &(top->idef),
1948 ir, cr, step, 0, 1.0, md,
1949 as_rvec_array(state->x.data()), as_rvec_array(state->v.data()), as_rvec_array(state->v.data()),
1950 fr->bMolPBC, state->box,
1951 state->lambda[efptBONDED], &dvdl_dum,
1952 nullptr, nullptr, nrnb, econqVeloc);
1954 /* constrain the inital velocities at t-dt/2 */
1955 if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
1957 for (i = start; (i < end); i++)
1959 for (m = 0; (m < DIM); m++)
1961 /* Reverse the velocity */
1962 state->v[i][m] = -state->v[i][m];
1963 /* Store the position at t-dt in buf */
1964 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
1967 /* Shake the positions at t=-dt with the positions at t=0
1968 * as reference coordinates.
1972 char buf[STEPSTRSIZE];
1973 fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
1974 gmx_step_str(step, buf));
1977 constrain(nullptr, TRUE, FALSE, constr, &(top->idef),
1978 ir, cr, step, -1, 1.0, md,
1979 as_rvec_array(state->x.data()), savex, nullptr,
1980 fr->bMolPBC, state->box,
1981 state->lambda[efptBONDED], &dvdl_dum,
1982 as_rvec_array(state->v.data()), nullptr, nrnb, econqCoord);
1984 for (i = start; i < end; i++)
1986 for (m = 0; m < DIM; m++)
1988 /* Re-reverse the velocities */
1989 state->v[i][m] = -state->v[i][m];
1998 integrate_table(real vdwtab[], real scale, int offstart, int rstart, int rend,
1999 double *enerout, double *virout)
2001 double enersum, virsum;
2002 double invscale, invscale2, invscale3;
2003 double r, ea, eb, ec, pa, pb, pc, pd;
2008 invscale = 1.0/scale;
2009 invscale2 = invscale*invscale;
2010 invscale3 = invscale*invscale2;
2012 /* Following summation derived from cubic spline definition,
2013 * Numerical Recipies in C, second edition, p. 113-116. Exact for
2014 * the cubic spline. We first calculate the negative of the
2015 * energy from rvdw to rvdw_switch, assuming that g(r)=1, and then
2016 * add the more standard, abrupt cutoff correction to that result,
2017 * yielding the long-range correction for a switched function. We
2018 * perform both the pressure and energy loops at the same time for
2019 * simplicity, as the computational cost is low. */
2023 /* Since the dispersion table has been scaled down a factor
2024 * 6.0 and the repulsion a factor 12.0 to compensate for the
2025 * c6/c12 parameters inside nbfp[] being scaled up (to save
2026 * flops in kernels), we need to correct for this.
2037 for (ri = rstart; ri < rend; ++ri)
2041 eb = 2.0*invscale2*r;
2045 pb = 3.0*invscale2*r;
2046 pc = 3.0*invscale*r*r;
2049 /* this "8" is from the packing in the vdwtab array - perhaps
2050 should be defined? */
2052 offset = 8*ri + offstart;
2053 y0 = vdwtab[offset];
2054 f = vdwtab[offset+1];
2055 g = vdwtab[offset+2];
2056 h = vdwtab[offset+3];
2058 enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2059 virsum += f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2061 *enerout = 4.0*M_PI*enersum*tabfactor;
2062 *virout = 4.0*M_PI*virsum*tabfactor;
2065 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2067 double eners[2], virs[2], enersum, virsum;
2068 double r0, rc3, rc9;
2070 real scale, *vdwtab;
2072 fr->enershiftsix = 0;
2073 fr->enershifttwelve = 0;
2074 fr->enerdiffsix = 0;
2075 fr->enerdifftwelve = 0;
2077 fr->virdifftwelve = 0;
2079 if (eDispCorr != edispcNO)
2081 for (i = 0; i < 2; i++)
2086 if ((fr->vdw_modifier == eintmodPOTSHIFT) ||
2087 (fr->vdw_modifier == eintmodPOTSWITCH) ||
2088 (fr->vdw_modifier == eintmodFORCESWITCH) ||
2089 (fr->vdwtype == evdwSHIFT) ||
2090 (fr->vdwtype == evdwSWITCH))
2092 if (((fr->vdw_modifier == eintmodPOTSWITCH) ||
2093 (fr->vdw_modifier == eintmodFORCESWITCH) ||
2094 (fr->vdwtype == evdwSWITCH)) && fr->rvdw_switch == 0)
2097 "With dispersion correction rvdw-switch can not be zero "
2098 "for vdw-type = %s", evdw_names[fr->vdwtype]);
2101 /* TODO This code depends on the logic in tables.c that
2102 constructs the table layout, which should be made
2103 explicit in future cleanup. */
2104 GMX_ASSERT(fr->dispersionCorrectionTable->interaction == GMX_TABLE_INTERACTION_VDWREP_VDWDISP,
2105 "Dispersion-correction code needs a table with both repulsion and dispersion terms");
2106 scale = fr->dispersionCorrectionTable->scale;
2107 vdwtab = fr->dispersionCorrectionTable->data;
2109 /* Round the cut-offs to exact table values for precision */
2110 ri0 = static_cast<int>(floor(fr->rvdw_switch*scale));
2111 ri1 = static_cast<int>(ceil(fr->rvdw*scale));
2113 /* The code below has some support for handling force-switching, i.e.
2114 * when the force (instead of potential) is switched over a limited
2115 * region. This leads to a constant shift in the potential inside the
2116 * switching region, which we can handle by adding a constant energy
2117 * term in the force-switch case just like when we do potential-shift.
2119 * For now this is not enabled, but to keep the functionality in the
2120 * code we check separately for switch and shift. When we do force-switch
2121 * the shifting point is rvdw_switch, while it is the cutoff when we
2122 * have a classical potential-shift.
2124 * For a pure potential-shift the potential has a constant shift
2125 * all the way out to the cutoff, and that is it. For other forms
2126 * we need to calculate the constant shift up to the point where we
2127 * start modifying the potential.
2129 ri0 = (fr->vdw_modifier == eintmodPOTSHIFT) ? ri1 : ri0;
2135 if ((fr->vdw_modifier == eintmodFORCESWITCH) ||
2136 (fr->vdwtype == evdwSHIFT))
2138 /* Determine the constant energy shift below rvdw_switch.
2139 * Table has a scale factor since we have scaled it down to compensate
2140 * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2142 fr->enershiftsix = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2143 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2145 else if (fr->vdw_modifier == eintmodPOTSHIFT)
2147 fr->enershiftsix = (real)(-1.0/(rc3*rc3));
2148 fr->enershifttwelve = (real)( 1.0/(rc9*rc3));
2151 /* Add the constant part from 0 to rvdw_switch.
2152 * This integration from 0 to rvdw_switch overcounts the number
2153 * of interactions by 1, as it also counts the self interaction.
2154 * We will correct for this later.
2156 eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2157 eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2159 /* Calculate the contribution in the range [r0,r1] where we
2160 * modify the potential. For a pure potential-shift modifier we will
2161 * have ri0==ri1, and there will not be any contribution here.
2163 for (i = 0; i < 2; i++)
2167 integrate_table(vdwtab, scale, (i == 0 ? 0 : 4), ri0, ri1, &enersum, &virsum);
2168 eners[i] -= enersum;
2172 /* Alright: Above we compensated by REMOVING the parts outside r0
2173 * corresponding to the ideal VdW 1/r6 and /r12 potentials.
2175 * Regardless of whether r0 is the point where we start switching,
2176 * or the cutoff where we calculated the constant shift, we include
2177 * all the parts we are missing out to infinity from r0 by
2178 * calculating the analytical dispersion correction.
2180 eners[0] += -4.0*M_PI/(3.0*rc3);
2181 eners[1] += 4.0*M_PI/(9.0*rc9);
2182 virs[0] += 8.0*M_PI/rc3;
2183 virs[1] += -16.0*M_PI/(3.0*rc9);
2185 else if (fr->vdwtype == evdwCUT ||
2186 EVDW_PME(fr->vdwtype) ||
2187 fr->vdwtype == evdwUSER)
2189 if (fr->vdwtype == evdwUSER && fplog)
2192 "WARNING: using dispersion correction with user tables\n");
2195 /* Note that with LJ-PME, the dispersion correction is multiplied
2196 * by the difference between the actual C6 and the value of C6
2197 * that would produce the combination rule.
2198 * This means the normal energy and virial difference formulas
2202 rc3 = fr->rvdw*fr->rvdw*fr->rvdw;
2204 /* Contribution beyond the cut-off */
2205 eners[0] += -4.0*M_PI/(3.0*rc3);
2206 eners[1] += 4.0*M_PI/(9.0*rc9);
2207 if (fr->vdw_modifier == eintmodPOTSHIFT)
2209 /* Contribution within the cut-off */
2210 eners[0] += -4.0*M_PI/(3.0*rc3);
2211 eners[1] += 4.0*M_PI/(3.0*rc9);
2213 /* Contribution beyond the cut-off */
2214 virs[0] += 8.0*M_PI/rc3;
2215 virs[1] += -16.0*M_PI/(3.0*rc9);
2220 "Dispersion correction is not implemented for vdw-type = %s",
2221 evdw_names[fr->vdwtype]);
2224 /* When we deprecate the group kernels the code below can go too */
2225 if (fr->vdwtype == evdwPME && fr->cutoff_scheme == ecutsGROUP)
2227 /* Calculate self-interaction coefficient (assuming that
2228 * the reciprocal-space contribution is constant in the
2229 * region that contributes to the self-interaction).
2231 fr->enershiftsix = gmx::power6(fr->ewaldcoeff_lj) / 6.0;
2233 eners[0] += -gmx::power3(std::sqrt(M_PI)*fr->ewaldcoeff_lj)/3.0;
2234 virs[0] += gmx::power3(std::sqrt(M_PI)*fr->ewaldcoeff_lj);
2237 fr->enerdiffsix = eners[0];
2238 fr->enerdifftwelve = eners[1];
2239 /* The 0.5 is due to the Gromacs definition of the virial */
2240 fr->virdiffsix = 0.5*virs[0];
2241 fr->virdifftwelve = 0.5*virs[1];
2245 void calc_dispcorr(t_inputrec *ir, t_forcerec *fr,
2246 matrix box, real lambda, tensor pres, tensor virial,
2247 real *prescorr, real *enercorr, real *dvdlcorr)
2249 gmx_bool bCorrAll, bCorrPres;
2250 real dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2260 if (ir->eDispCorr != edispcNO)
2262 bCorrAll = (ir->eDispCorr == edispcAllEner ||
2263 ir->eDispCorr == edispcAllEnerPres);
2264 bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2265 ir->eDispCorr == edispcAllEnerPres);
2267 invvol = 1/det(box);
2270 /* Only correct for the interactions with the inserted molecule */
2271 dens = (fr->numAtomsForDispersionCorrection - fr->n_tpi)*invvol;
2276 dens = fr->numAtomsForDispersionCorrection*invvol;
2277 ninter = 0.5*fr->numAtomsForDispersionCorrection;
2280 if (ir->efep == efepNO)
2282 avcsix = fr->avcsix[0];
2283 avctwelve = fr->avctwelve[0];
2287 avcsix = (1 - lambda)*fr->avcsix[0] + lambda*fr->avcsix[1];
2288 avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2291 enerdiff = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2292 *enercorr += avcsix*enerdiff;
2294 if (ir->efep != efepNO)
2296 dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2300 enerdiff = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2301 *enercorr += avctwelve*enerdiff;
2302 if (fr->efep != efepNO)
2304 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2310 svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2311 if (ir->eDispCorr == edispcAllEnerPres)
2313 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2315 /* The factor 2 is because of the Gromacs virial definition */
2316 spres = -2.0*invvol*svir*PRESFAC;
2318 for (m = 0; m < DIM; m++)
2320 virial[m][m] += svir;
2321 pres[m][m] += spres;
2326 /* Can't currently control when it prints, for now, just print when degugging */
2331 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2337 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2338 *enercorr, spres, svir);
2342 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2346 if (fr->efep != efepNO)
2348 *dvdlcorr += dvdlambda;
2353 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2354 t_graph *graph, rvec x[])
2358 fprintf(fplog, "Removing pbc first time\n");
2360 calc_shifts(box, fr->shift_vec);
2363 mk_mshift(fplog, graph, fr->ePBC, box, x);
2366 p_graph(debug, "do_pbc_first 1", graph);
2368 shift_self(graph, box, x);
2369 /* By doing an extra mk_mshift the molecules that are broken
2370 * because they were e.g. imported from another software
2371 * will be made whole again. Such are the healing powers
2374 mk_mshift(fplog, graph, fr->ePBC, box, x);
2377 p_graph(debug, "do_pbc_first 2", graph);
2382 fprintf(fplog, "Done rmpbc\n");
2386 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2387 const gmx_mtop_t *mtop, rvec x[],
2392 gmx_molblock_t *molb;
2394 if (bFirst && fplog)
2396 fprintf(fplog, "Removing pbc first time\n");
2401 for (mb = 0; mb < mtop->nmolblock; mb++)
2403 molb = &mtop->molblock[mb];
2404 if (molb->natoms_mol == 1 ||
2405 (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2407 /* Just one atom or charge group in the molecule, no PBC required */
2408 as += molb->nmol*molb->natoms_mol;
2412 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2413 mk_graph_ilist(nullptr, mtop->moltype[molb->type].ilist,
2414 0, molb->natoms_mol, FALSE, FALSE, graph);
2416 for (mol = 0; mol < molb->nmol; mol++)
2418 mk_mshift(fplog, graph, ePBC, box, x+as);
2420 shift_self(graph, box, x+as);
2421 /* The molecule is whole now.
2422 * We don't need the second mk_mshift call as in do_pbc_first,
2423 * since we no longer need this graph.
2426 as += molb->natoms_mol;
2434 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2435 const gmx_mtop_t *mtop, rvec x[])
2437 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2440 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2441 gmx_mtop_t *mtop, rvec x[])
2443 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2446 void put_atoms_in_box_omp(int ePBC, const matrix box, int natoms, rvec x[])
2449 nth = gmx_omp_nthreads_get(emntDefault);
2451 #pragma omp parallel for num_threads(nth) schedule(static)
2452 for (t = 0; t < nth; t++)
2458 offset = (natoms*t )/nth;
2459 len = (natoms*(t + 1))/nth - offset;
2460 put_atoms_in_box(ePBC, box, len, x + offset);
2462 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2466 // TODO This can be cleaned up a lot, and move back to runner.cpp
2467 void finish_run(FILE *fplog, const gmx::MDLogger &mdlog, t_commrec *cr,
2468 t_inputrec *inputrec,
2469 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2470 gmx_walltime_accounting_t walltime_accounting,
2471 nonbonded_verlet_t *nbv,
2472 gmx_bool bWriteStat)
2474 t_nrnb *nrnb_tot = nullptr;
2476 double nbfs = 0, mflop = 0;
2477 double elapsed_time,
2478 elapsed_time_over_all_ranks,
2479 elapsed_time_over_all_threads,
2480 elapsed_time_over_all_threads_over_all_ranks;
2486 MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2487 cr->mpi_comm_mysim);
2495 elapsed_time = walltime_accounting_get_elapsed_time(walltime_accounting);
2496 elapsed_time_over_all_ranks = elapsed_time;
2497 elapsed_time_over_all_threads = walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting);
2498 elapsed_time_over_all_threads_over_all_ranks = elapsed_time_over_all_threads;
2502 /* reduce elapsed_time over all MPI ranks in the current simulation */
2503 MPI_Allreduce(&elapsed_time,
2504 &elapsed_time_over_all_ranks,
2505 1, MPI_DOUBLE, MPI_SUM,
2506 cr->mpi_comm_mysim);
2507 elapsed_time_over_all_ranks /= cr->nnodes;
2508 /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2509 * current simulation. */
2510 MPI_Allreduce(&elapsed_time_over_all_threads,
2511 &elapsed_time_over_all_threads_over_all_ranks,
2512 1, MPI_DOUBLE, MPI_SUM,
2513 cr->mpi_comm_mysim);
2519 print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2526 if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2528 print_dd_statistics(cr, inputrec, fplog);
2531 /* TODO Move the responsibility for any scaling by thread counts
2532 * to the code that handled the thread region, so that there's a
2533 * mechanism to keep cycle counting working during the transition
2534 * to task parallelism. */
2535 int nthreads_pp = gmx_omp_nthreads_get(emntNonbonded);
2536 int nthreads_pme = gmx_omp_nthreads_get(emntPME);
2537 wallcycle_scale_by_num_threads(wcycle, cr->duty == DUTY_PME, nthreads_pp, nthreads_pme);
2538 auto cycle_sum(wallcycle_sum(cr, wcycle));
2542 struct gmx_wallclock_gpu_t* gputimes = use_GPU(nbv) ? nbnxn_gpu_get_timings(nbv->gpu_nbv) : nullptr;
2544 wallcycle_print(fplog, mdlog, cr->nnodes, cr->npmenodes, nthreads_pp, nthreads_pme,
2545 elapsed_time_over_all_ranks,
2546 wcycle, cycle_sum, gputimes);
2548 if (EI_DYNAMICS(inputrec->eI))
2550 delta_t = inputrec->delta_t;
2555 print_perf(fplog, elapsed_time_over_all_threads_over_all_ranks,
2556 elapsed_time_over_all_ranks,
2557 walltime_accounting_get_nsteps_done(walltime_accounting),
2558 delta_t, nbfs, mflop);
2562 print_perf(stderr, elapsed_time_over_all_threads_over_all_ranks,
2563 elapsed_time_over_all_ranks,
2564 walltime_accounting_get_nsteps_done(walltime_accounting),
2565 delta_t, nbfs, mflop);
2570 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, std::vector<real> *lambda, double *lam0)
2572 /* this function works, but could probably use a logic rewrite to keep all the different
2573 types of efep straight. */
2575 if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2580 t_lambda *fep = ir->fepvals;
2581 *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
2582 if checkpoint is set -- a kludge is in for now
2585 lambda->resize(efptNR);
2587 for (int i = 0; i < efptNR; i++)
2589 /* overwrite lambda state with init_lambda for now for backwards compatibility */
2590 if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2592 (*lambda)[i] = fep->init_lambda;
2595 lam0[i] = (*lambda)[i];
2600 (*lambda)[i] = fep->all_lambda[i][*fep_state];
2603 lam0[i] = (*lambda)[i];
2609 /* need to rescale control temperatures to match current state */
2610 for (int i = 0; i < ir->opts.ngtc; i++)
2612 if (ir->opts.ref_t[i] > 0)
2614 ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2619 /* Send to the log the information on the current lambdas */
2620 if (fplog != nullptr)
2622 fprintf(fplog, "Initial vector of lambda components:[ ");
2623 for (int i = 0; i < efptNR; i++)
2625 fprintf(fplog, "%10.4f ", (*lambda)[i]);
2627 fprintf(fplog, "]\n");
2633 void init_md(FILE *fplog,
2634 t_commrec *cr, t_inputrec *ir, const gmx_output_env_t *oenv,
2635 double *t, double *t0,
2636 std::vector<real> *lambda, int *fep_state, double *lam0,
2637 t_nrnb *nrnb, gmx_mtop_t *mtop,
2639 int nfile, const t_filenm fnm[],
2640 gmx_mdoutf_t *outf, t_mdebin **mdebin,
2641 tensor force_vir, tensor shake_vir, rvec mu_tot,
2642 gmx_bool *bSimAnn, t_vcm **vcm, unsigned long Flags,
2643 gmx_wallcycle_t wcycle)
2647 /* Initial values */
2648 *t = *t0 = ir->init_t;
2651 for (i = 0; i < ir->opts.ngtc; i++)
2653 /* set bSimAnn if any group is being annealed */
2654 if (ir->opts.annealing[i] != eannNO)
2660 /* Initialize lambda variables */
2661 initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2663 // TODO upd is never NULL in practice, but the analysers don't know that
2666 *upd = init_update(ir);
2670 update_annealing_target_temp(ir, ir->init_t, upd ? *upd : nullptr);
2675 *vcm = init_vcm(fplog, &mtop->groups, ir);
2678 if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2680 if (ir->etc == etcBERENDSEN)
2682 please_cite(fplog, "Berendsen84a");
2684 if (ir->etc == etcVRESCALE)
2686 please_cite(fplog, "Bussi2007a");
2688 if (ir->eI == eiSD1)
2690 please_cite(fplog, "Goga2012");
2697 *outf = init_mdoutf(fplog, nfile, fnm, Flags, cr, ir, mtop, oenv, wcycle);
2699 *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? nullptr : mdoutf_get_fp_ene(*outf),
2700 mtop, ir, mdoutf_get_fp_dhdl(*outf));
2703 /* Initiate variables */
2704 clear_mat(force_vir);
2705 clear_mat(shake_vir);