2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/essentialdynamics/edsam.h"
53 #include "gromacs/ewald/pme.h"
54 #include "gromacs/gmxlib/chargegroup.h"
55 #include "gromacs/gmxlib/network.h"
56 #include "gromacs/gmxlib/nrnb.h"
57 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
58 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
59 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
60 #include "gromacs/imd/imd.h"
61 #include "gromacs/listed-forces/bonded.h"
62 #include "gromacs/listed-forces/disre.h"
63 #include "gromacs/listed-forces/orires.h"
64 #include "gromacs/math/functions.h"
65 #include "gromacs/math/units.h"
66 #include "gromacs/math/vec.h"
67 #include "gromacs/math/vecdump.h"
68 #include "gromacs/mdlib/calcmu.h"
69 #include "gromacs/mdlib/constr.h"
70 #include "gromacs/mdlib/force.h"
71 #include "gromacs/mdlib/forcerec.h"
72 #include "gromacs/mdlib/genborn.h"
73 #include "gromacs/mdlib/gmx_omp_nthreads.h"
74 #include "gromacs/mdlib/mdrun.h"
75 #include "gromacs/mdlib/nb_verlet.h"
76 #include "gromacs/mdlib/nbnxn_atomdata.h"
77 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
78 #include "gromacs/mdlib/nbnxn_grid.h"
79 #include "gromacs/mdlib/nbnxn_search.h"
80 #include "gromacs/mdlib/qmmm.h"
81 #include "gromacs/mdlib/update.h"
82 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
83 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref.h"
84 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
85 #include "gromacs/mdlib/nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
86 #include "gromacs/mdtypes/commrec.h"
87 #include "gromacs/mdtypes/inputrec.h"
88 #include "gromacs/mdtypes/md_enums.h"
89 #include "gromacs/pbcutil/ishift.h"
90 #include "gromacs/pbcutil/mshift.h"
91 #include "gromacs/pbcutil/pbc.h"
92 #include "gromacs/pulling/pull.h"
93 #include "gromacs/pulling/pull_rotation.h"
94 #include "gromacs/timing/cyclecounter.h"
95 #include "gromacs/timing/gpu_timing.h"
96 #include "gromacs/timing/wallcycle.h"
97 #include "gromacs/timing/wallcyclereporting.h"
98 #include "gromacs/timing/walltime_accounting.h"
99 #include "gromacs/utility/basedefinitions.h"
100 #include "gromacs/utility/cstringutil.h"
101 #include "gromacs/utility/exceptions.h"
102 #include "gromacs/utility/fatalerror.h"
103 #include "gromacs/utility/gmxassert.h"
104 #include "gromacs/utility/gmxmpi.h"
105 #include "gromacs/utility/pleasecite.h"
106 #include "gromacs/utility/smalloc.h"
107 #include "gromacs/utility/sysinfo.h"
109 #include "nbnxn_gpu.h"
111 void print_time(FILE *out,
112 gmx_walltime_accounting_t walltime_accounting,
115 t_commrec gmx_unused *cr)
118 char timebuf[STRLEN];
119 double dt, elapsed_seconds, time_per_step;
128 fprintf(out, "step %s", gmx_step_str(step, buf));
131 if ((step >= ir->nstlist))
133 double seconds_since_epoch = gmx_gettime();
134 elapsed_seconds = seconds_since_epoch - walltime_accounting_get_start_time_stamp(walltime_accounting);
135 time_per_step = elapsed_seconds/(step - ir->init_step + 1);
136 dt = (ir->nsteps + ir->init_step - step) * time_per_step;
142 finish = (time_t) (seconds_since_epoch + dt);
143 gmx_ctime_r(&finish, timebuf, STRLEN);
144 sprintf(buf, "%s", timebuf);
145 buf[strlen(buf)-1] = '\0';
146 fprintf(out, ", will finish %s", buf);
150 fprintf(out, ", remaining wall clock time: %5d s ", (int)dt);
155 fprintf(out, " performance: %.1f ns/day ",
156 ir->delta_t/1000*24*60*60/time_per_step);
169 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
172 char time_string[STRLEN];
181 char timebuf[STRLEN];
182 time_t temp_time = (time_t) the_time;
184 gmx_ctime_r(&temp_time, timebuf, STRLEN);
185 for (i = 0; timebuf[i] >= ' '; i++)
187 time_string[i] = timebuf[i];
189 time_string[i] = '\0';
192 fprintf(fplog, "%s on rank %d %s\n", title, nodeid, time_string);
195 void print_start(FILE *fplog, t_commrec *cr,
196 gmx_walltime_accounting_t walltime_accounting,
201 sprintf(buf, "Started %s", name);
202 print_date_and_time(fplog, cr->nodeid, buf,
203 walltime_accounting_get_start_time_stamp(walltime_accounting));
206 static void sum_forces(rvec f[], const PaddedRVecVector *forceToAdd)
208 /* TODO: remove this - 1 when padding is properly implemented */
209 int end = forceToAdd->size() - 1;
210 const rvec *fAdd = as_rvec_array(forceToAdd->data());
212 // cppcheck-suppress unreadVariable
213 int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
214 #pragma omp parallel for num_threads(nt) schedule(static)
215 for (int i = 0; i < end; i++)
217 rvec_inc(f[i], fAdd[i]);
221 static void calc_virial(int start, int homenr, rvec x[], rvec f[],
222 tensor vir_part, t_graph *graph, matrix box,
223 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
227 /* The short-range virial from surrounding boxes */
229 calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
230 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
232 /* Calculate partial virial, for local atoms only, based on short range.
233 * Total virial is computed in global_stat, called from do_md
235 f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
236 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
238 /* Add position restraint contribution */
239 for (i = 0; i < DIM; i++)
241 vir_part[i][i] += fr->vir_diag_posres[i];
244 /* Add wall contribution */
245 for (i = 0; i < DIM; i++)
247 vir_part[i][ZZ] += fr->vir_wall_z[i];
252 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
256 static void pull_potential_wrapper(t_commrec *cr,
258 matrix box, rvec x[],
262 gmx_enerdata_t *enerd,
265 gmx_wallcycle_t wcycle)
270 /* Calculate the center of mass forces, this requires communication,
271 * which is why pull_potential is called close to other communication.
272 * The virial contribution is calculated directly,
273 * which is why we call pull_potential after calc_virial.
275 wallcycle_start(wcycle, ewcPULLPOT);
276 set_pbc(&pbc, ir->ePBC, box);
278 enerd->term[F_COM_PULL] +=
279 pull_potential(ir->pull_work, mdatoms, &pbc,
280 cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
281 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
282 wallcycle_stop(wcycle, ewcPULLPOT);
285 static void pme_receive_force_ener(t_commrec *cr,
286 gmx_wallcycle_t wcycle,
287 gmx_enerdata_t *enerd,
290 real e_q, e_lj, dvdl_q, dvdl_lj;
291 float cycles_ppdpme, cycles_seppme;
293 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
294 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
296 /* In case of node-splitting, the PP nodes receive the long-range
297 * forces, virial and energy from the PME nodes here.
299 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
302 gmx_pme_receive_f(cr, as_rvec_array(fr->f_novirsum->data()), fr->vir_el_recip, &e_q,
303 fr->vir_lj_recip, &e_lj, &dvdl_q, &dvdl_lj,
305 enerd->term[F_COUL_RECIP] += e_q;
306 enerd->term[F_LJ_RECIP] += e_lj;
307 enerd->dvdl_lin[efptCOUL] += dvdl_q;
308 enerd->dvdl_lin[efptVDW] += dvdl_lj;
312 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
314 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
317 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
318 gmx_int64_t step, real pforce, rvec *x, rvec *f)
322 char buf[STEPSTRSIZE];
324 pf2 = gmx::square(pforce);
325 for (i = 0; i < md->homenr; i++)
328 /* We also catch NAN, if the compiler does not optimize this away. */
329 if (fn2 >= pf2 || fn2 != fn2)
331 fprintf(fp, "step %s atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
332 gmx_step_str(step, buf),
333 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(fn2));
338 static void post_process_forces(t_commrec *cr,
340 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
342 matrix box, rvec x[],
347 t_forcerec *fr, gmx_vsite_t *vsite,
354 /* Spread the mesh force on virtual sites to the other particles...
355 * This is parallellized. MPI communication is performed
356 * if the constructing atoms aren't local.
358 wallcycle_start(wcycle, ewcVSITESPREAD);
359 spread_vsite_f(vsite, x, as_rvec_array(fr->f_novirsum->data()), NULL,
360 (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
362 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
363 wallcycle_stop(wcycle, ewcVSITESPREAD);
365 if (flags & GMX_FORCE_VIRIAL)
367 /* Now add the forces, this is local */
368 sum_forces(f, fr->f_novirsum);
370 if (EEL_FULL(fr->eeltype))
372 /* Add the mesh contribution to the virial */
373 m_add(vir_force, fr->vir_el_recip, vir_force);
375 if (EVDW_PME(fr->vdwtype))
377 /* Add the mesh contribution to the virial */
378 m_add(vir_force, fr->vir_lj_recip, vir_force);
382 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
387 if (fr->print_force >= 0)
389 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
393 static void do_nb_verlet(t_forcerec *fr,
394 interaction_const_t *ic,
395 gmx_enerdata_t *enerd,
396 int flags, int ilocality,
399 gmx_wallcycle_t wcycle)
401 int enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
402 nonbonded_verlet_group_t *nbvg;
403 gmx_bool bUsingGpuKernels;
405 if (!(flags & GMX_FORCE_NONBONDED))
407 /* skip non-bonded calculation */
411 nbvg = &fr->nbv->grp[ilocality];
413 /* GPU kernel launch overhead is already timed separately */
414 if (fr->cutoff_scheme != ecutsVERLET)
416 gmx_incons("Invalid cut-off scheme passed!");
419 bUsingGpuKernels = (nbvg->kernel_type == nbnxnk8x8x8_GPU);
421 if (!bUsingGpuKernels)
423 wallcycle_sub_start(wcycle, ewcsNONBONDED);
425 switch (nbvg->kernel_type)
427 case nbnxnk4x4_PlainC:
428 nbnxn_kernel_ref(&nbvg->nbl_lists,
434 enerd->grpp.ener[egCOULSR],
436 enerd->grpp.ener[egBHAMSR] :
437 enerd->grpp.ener[egLJSR]);
440 case nbnxnk4xN_SIMD_4xN:
441 nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
448 enerd->grpp.ener[egCOULSR],
450 enerd->grpp.ener[egBHAMSR] :
451 enerd->grpp.ener[egLJSR]);
453 case nbnxnk4xN_SIMD_2xNN:
454 nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
461 enerd->grpp.ener[egCOULSR],
463 enerd->grpp.ener[egBHAMSR] :
464 enerd->grpp.ener[egLJSR]);
467 case nbnxnk8x8x8_GPU:
468 nbnxn_gpu_launch_kernel(fr->nbv->gpu_nbv, nbvg->nbat, flags, ilocality);
471 case nbnxnk8x8x8_PlainC:
472 nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
477 nbvg->nbat->out[0].f,
479 enerd->grpp.ener[egCOULSR],
481 enerd->grpp.ener[egBHAMSR] :
482 enerd->grpp.ener[egLJSR]);
486 gmx_incons("Invalid nonbonded kernel type passed!");
489 if (!bUsingGpuKernels)
491 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
494 if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
496 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
498 else if ((!bUsingGpuKernels && nbvg->ewald_excl == ewaldexclAnalytical) ||
499 (bUsingGpuKernels && nbnxn_gpu_is_kernel_ewald_analytical(fr->nbv->gpu_nbv)))
501 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
505 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
507 enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
508 if (flags & GMX_FORCE_ENERGY)
510 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
511 enr_nbnxn_kernel_ljc += 1;
512 enr_nbnxn_kernel_lj += 1;
515 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
516 nbvg->nbl_lists.natpair_ljq);
517 inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
518 nbvg->nbl_lists.natpair_lj);
519 /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
520 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
521 nbvg->nbl_lists.natpair_q);
523 if (ic->vdw_modifier == eintmodFORCESWITCH)
525 /* We add up the switch cost separately */
526 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_FSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
527 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
529 if (ic->vdw_modifier == eintmodPOTSWITCH)
531 /* We add up the switch cost separately */
532 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_PSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
533 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
535 if (ic->vdwtype == evdwPME)
537 /* We add up the LJ Ewald cost separately */
538 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_EWALD+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
539 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
543 static void do_nb_verlet_fep(nbnxn_pairlist_set_t *nbl_lists,
550 gmx_enerdata_t *enerd,
553 gmx_wallcycle_t wcycle)
556 nb_kernel_data_t kernel_data;
558 real dvdl_nb[efptNR];
563 /* Add short-range interactions */
564 donb_flags |= GMX_NONBONDED_DO_SR;
566 /* Currently all group scheme kernels always calculate (shift-)forces */
567 if (flags & GMX_FORCE_FORCES)
569 donb_flags |= GMX_NONBONDED_DO_FORCE;
571 if (flags & GMX_FORCE_VIRIAL)
573 donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
575 if (flags & GMX_FORCE_ENERGY)
577 donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
580 kernel_data.flags = donb_flags;
581 kernel_data.lambda = lambda;
582 kernel_data.dvdl = dvdl_nb;
584 kernel_data.energygrp_elec = enerd->grpp.ener[egCOULSR];
585 kernel_data.energygrp_vdw = enerd->grpp.ener[egLJSR];
587 /* reset free energy components */
588 for (i = 0; i < efptNR; i++)
593 assert(gmx_omp_nthreads_get(emntNonbonded) == nbl_lists->nnbl);
595 wallcycle_sub_start(wcycle, ewcsNONBONDED);
596 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
597 for (th = 0; th < nbl_lists->nnbl; th++)
601 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
602 x, f, fr, mdatoms, &kernel_data, nrnb);
604 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
607 if (fepvals->sc_alpha != 0)
609 enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
610 enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
614 enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
615 enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
618 /* If we do foreign lambda and we have soft-core interactions
619 * we have to recalculate the (non-linear) energies contributions.
621 if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
623 kernel_data.flags = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE)) | GMX_NONBONDED_DO_FOREIGNLAMBDA;
624 kernel_data.lambda = lam_i;
625 kernel_data.energygrp_elec = enerd->foreign_grpp.ener[egCOULSR];
626 kernel_data.energygrp_vdw = enerd->foreign_grpp.ener[egLJSR];
627 /* Note that we add to kernel_data.dvdl, but ignore the result */
629 for (i = 0; i < enerd->n_lambda; i++)
631 for (j = 0; j < efptNR; j++)
633 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
635 reset_foreign_enerdata(enerd);
636 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
637 for (th = 0; th < nbl_lists->nnbl; th++)
641 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
642 x, f, fr, mdatoms, &kernel_data, nrnb);
644 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
647 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
648 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
652 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
655 gmx_bool use_GPU(const nonbonded_verlet_t *nbv)
657 return nbv != NULL && nbv->bUseGPU;
660 static gmx_inline void clear_rvecs_omp(int n, rvec v[])
662 int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, n);
664 /* Note that we would like to avoid this conditional by putting it
665 * into the omp pragma instead, but then we still take the full
666 * omp parallel for overhead (at least with gcc5).
670 for (int i = 0; i < n; i++)
677 #pragma omp parallel for num_threads(nth) schedule(static)
678 for (int i = 0; i < n; i++)
685 /*! \brief This routine checks if the potential energy is finite.
687 * Note that passing this check does not guarantee finite forces,
688 * since those use slightly different arithmetics. But in most cases
689 * there is just a narrow coordinate range where forces are not finite
690 * and energies are finite.
692 * \param[in] enerd The energy data; the non-bonded group energies need to be added in here before calling this routine
694 static void checkPotentialEnergyValidity(const gmx_enerdata_t *enerd)
696 if (!std::isfinite(enerd->term[F_EPOT]))
698 gmx_fatal(FARGS, "The total potential energy is %g, which is not finite. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A non-finite potential energy can be caused by overlapping interactions in bonded interactions or very large or NaN coordinate values. Usually this is caused by a badly or non-equilibrated initial configuration or incorrect interactions or parameters in the topology.",
701 enerd->term[F_COUL_SR]);
705 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
706 t_inputrec *inputrec,
707 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
709 gmx_groups_t gmx_unused *groups,
710 matrix box, rvec x[], history_t *hist,
711 PaddedRVecVector *force,
714 gmx_enerdata_t *enerd, t_fcdata *fcd,
715 real *lambda, t_graph *graph,
716 t_forcerec *fr, interaction_const_t *ic,
717 gmx_vsite_t *vsite, rvec mu_tot,
718 double t, gmx_edsam_t ed,
724 gmx_bool bStateChanged, bNS, bFillGrid, bCalcCGCM;
725 gmx_bool bDoForces, bUseGPU, bUseOrEmulGPU;
726 gmx_bool bDiffKernels = FALSE;
727 rvec vzero, box_diag;
728 float cycles_pme, cycles_force, cycles_wait_gpu;
729 /* TODO To avoid loss of precision, float can't be used for a
730 * cycle count. Build an object that can do this right and perhaps
731 * also be used by gmx_wallcycle_t */
732 gmx_cycles_t cycleCountBeforeLocalWorkCompletes = 0;
733 nonbonded_verlet_t *nbv;
740 const int homenr = mdatoms->homenr;
742 clear_mat(vir_force);
744 if (DOMAINDECOMP(cr))
746 cg1 = cr->dd->ncg_tot;
757 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
758 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
759 bFillGrid = (bNS && bStateChanged);
760 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
761 bDoForces = (flags & GMX_FORCE_FORCES);
762 bUseGPU = fr->nbv->bUseGPU;
763 bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
767 update_forcerec(fr, box);
769 if (inputrecNeedMutot(inputrec))
771 /* Calculate total (local) dipole moment in a temporary common array.
772 * This makes it possible to sum them over nodes faster.
774 calc_mu(start, homenr,
775 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
780 if (fr->ePBC != epbcNONE)
782 /* Compute shift vectors every step,
783 * because of pressure coupling or box deformation!
785 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
787 calc_shifts(box, fr->shift_vec);
792 put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
793 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
795 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
797 unshift_self(graph, box, x);
801 nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
802 fr->shift_vec, nbv->grp[0].nbat);
805 if (!(cr->duty & DUTY_PME))
810 /* Send particle coordinates to the pme nodes.
811 * Since this is only implemented for domain decomposition
812 * and domain decomposition does not use the graph,
813 * we do not need to worry about shifting.
816 wallcycle_start(wcycle, ewcPP_PMESENDX);
818 bBS = (inputrec->nwall == 2);
822 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
825 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
826 lambda[efptCOUL], lambda[efptVDW],
827 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
830 wallcycle_stop(wcycle, ewcPP_PMESENDX);
834 /* do gridding for pair search */
837 if (graph && bStateChanged)
839 /* Calculate intramolecular shift vectors to make molecules whole */
840 mk_mshift(fplog, graph, fr->ePBC, box, x);
844 box_diag[XX] = box[XX][XX];
845 box_diag[YY] = box[YY][YY];
846 box_diag[ZZ] = box[ZZ][ZZ];
848 wallcycle_start(wcycle, ewcNS);
851 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
852 nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
854 0, mdatoms->homenr, -1, fr->cginfo, x,
856 nbv->grp[eintLocal].kernel_type,
857 nbv->grp[eintLocal].nbat);
858 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
862 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
863 nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
865 nbv->grp[eintNonlocal].kernel_type,
866 nbv->grp[eintNonlocal].nbat);
867 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
870 if (nbv->ngrp == 1 ||
871 nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
873 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
874 nbv->nbs, mdatoms, fr->cginfo);
878 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
879 nbv->nbs, mdatoms, fr->cginfo);
880 nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
881 nbv->nbs, mdatoms, fr->cginfo);
883 wallcycle_stop(wcycle, ewcNS);
886 /* initialize the GPU atom data and copy shift vector */
891 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
892 nbnxn_gpu_init_atomdata(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
893 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
896 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
897 nbnxn_gpu_upload_shiftvec(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
898 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
901 /* do local pair search */
904 wallcycle_start_nocount(wcycle, ewcNS);
905 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
906 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
909 nbv->min_ci_balanced,
910 &nbv->grp[eintLocal].nbl_lists,
912 nbv->grp[eintLocal].kernel_type,
914 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
918 /* initialize local pair-list on the GPU */
919 nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
920 nbv->grp[eintLocal].nbl_lists.nbl[0],
923 wallcycle_stop(wcycle, ewcNS);
927 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
928 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
929 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
930 nbv->grp[eintLocal].nbat);
931 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
932 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
937 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
938 /* launch local nonbonded F on GPU */
939 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
941 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
944 /* Communicate coordinates and sum dipole if necessary +
945 do non-local pair search */
946 if (DOMAINDECOMP(cr))
948 bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
949 nbv->grp[eintLocal].kernel_type);
953 /* With GPU+CPU non-bonded calculations we need to copy
954 * the local coordinates to the non-local nbat struct
955 * (in CPU format) as the non-local kernel call also
956 * calculates the local - non-local interactions.
958 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
959 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
960 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
961 nbv->grp[eintNonlocal].nbat);
962 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
963 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
968 wallcycle_start_nocount(wcycle, ewcNS);
969 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
973 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
976 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
979 nbv->min_ci_balanced,
980 &nbv->grp[eintNonlocal].nbl_lists,
982 nbv->grp[eintNonlocal].kernel_type,
985 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
987 if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_GPU)
989 /* initialize non-local pair-list on the GPU */
990 nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
991 nbv->grp[eintNonlocal].nbl_lists.nbl[0],
994 wallcycle_stop(wcycle, ewcNS);
998 wallcycle_start(wcycle, ewcMOVEX);
999 dd_move_x(cr->dd, box, x);
1000 wallcycle_stop(wcycle, ewcMOVEX);
1002 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1003 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1004 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
1005 nbv->grp[eintNonlocal].nbat);
1006 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1007 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1010 if (bUseGPU && !bDiffKernels)
1012 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1013 /* launch non-local nonbonded F on GPU */
1014 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
1016 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1022 /* launch D2H copy-back F */
1023 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1024 if (DOMAINDECOMP(cr) && !bDiffKernels)
1026 nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintNonlocal].nbat,
1027 flags, eatNonlocal);
1029 nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintLocal].nbat,
1031 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1034 if (bStateChanged && inputrecNeedMutot(inputrec))
1038 gmx_sumd(2*DIM, mu, cr);
1041 for (i = 0; i < 2; i++)
1043 for (j = 0; j < DIM; j++)
1045 fr->mu_tot[i][j] = mu[i*DIM + j];
1049 if (fr->efep == efepNO)
1051 copy_rvec(fr->mu_tot[0], mu_tot);
1055 for (j = 0; j < DIM; j++)
1058 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1059 lambda[efptCOUL]*fr->mu_tot[1][j];
1063 /* Reset energies */
1064 reset_enerdata(enerd);
1065 clear_rvecs(SHIFTS, fr->fshift);
1067 if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1069 wallcycle_start(wcycle, ewcPPDURINGPME);
1070 dd_force_flop_start(cr->dd, nrnb);
1075 /* Enforced rotation has its own cycle counter that starts after the collective
1076 * coordinates have been communicated. It is added to ddCyclF to allow
1077 * for proper load-balancing */
1078 wallcycle_start(wcycle, ewcROT);
1079 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1080 wallcycle_stop(wcycle, ewcROT);
1083 /* Temporary solution until all routines take PaddedRVecVector */
1084 rvec *f = as_rvec_array(force->data());
1086 /* Start the force cycle counter.
1087 * This counter is stopped after do_force_lowlevel.
1088 * No parallel communication should occur while this counter is running,
1089 * since that will interfere with the dynamic load balancing.
1091 wallcycle_start(wcycle, ewcFORCE);
1094 /* Reset forces for which the virial is calculated separately:
1095 * PME/Ewald forces if necessary */
1096 if (fr->bF_NoVirSum)
1098 if (flags & GMX_FORCE_VIRIAL)
1100 fr->f_novirsum = fr->forceBufferNoVirialSummation;
1104 /* We are not calculating the pressure so we do not need
1105 * a separate array for forces that do not contribute
1108 fr->f_novirsum = force;
1112 if (fr->bF_NoVirSum)
1114 if (flags & GMX_FORCE_VIRIAL)
1116 /* TODO: remove this - 1 when padding is properly implemented */
1117 clear_rvecs_omp(fr->f_novirsum->size() - 1,
1118 as_rvec_array(fr->f_novirsum->data()));
1121 /* Clear the short- and long-range forces */
1122 clear_rvecs_omp(fr->natoms_force_constr, f);
1124 clear_rvec(fr->vir_diag_posres);
1127 if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1129 clear_pull_forces(inputrec->pull_work);
1132 /* We calculate the non-bonded forces, when done on the CPU, here.
1133 * We do this before calling do_force_lowlevel, because in that
1134 * function, the listed forces are calculated before PME, which
1135 * does communication. With this order, non-bonded and listed
1136 * force calculation imbalance can be balanced out by the domain
1137 * decomposition load balancing.
1142 /* Maybe we should move this into do_force_lowlevel */
1143 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1147 if (fr->efep != efepNO)
1149 /* Calculate the local and non-local free energy interactions here.
1150 * Happens here on the CPU both with and without GPU.
1152 if (fr->nbv->grp[eintLocal].nbl_lists.nbl_fep[0]->nrj > 0)
1154 do_nb_verlet_fep(&fr->nbv->grp[eintLocal].nbl_lists,
1156 inputrec->fepvals, lambda,
1157 enerd, flags, nrnb, wcycle);
1160 if (DOMAINDECOMP(cr) &&
1161 fr->nbv->grp[eintNonlocal].nbl_lists.nbl_fep[0]->nrj > 0)
1163 do_nb_verlet_fep(&fr->nbv->grp[eintNonlocal].nbl_lists,
1165 inputrec->fepvals, lambda,
1166 enerd, flags, nrnb, wcycle);
1170 if (!bUseOrEmulGPU || bDiffKernels)
1174 if (DOMAINDECOMP(cr))
1176 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1177 bDiffKernels ? enbvClearFYes : enbvClearFNo,
1187 aloc = eintNonlocal;
1190 /* Add all the non-bonded force to the normal force array.
1191 * This can be split into a local and a non-local part when overlapping
1192 * communication with calculation with domain decomposition.
1194 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1195 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1196 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1197 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1198 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1199 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1200 wallcycle_start_nocount(wcycle, ewcFORCE);
1202 /* if there are multiple fshift output buffers reduce them */
1203 if ((flags & GMX_FORCE_VIRIAL) &&
1204 nbv->grp[aloc].nbl_lists.nnbl > 1)
1206 /* This is not in a subcounter because it takes a
1207 negligible and constant-sized amount of time */
1208 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1213 /* update QMMMrec, if necessary */
1216 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1219 /* Compute the bonded and non-bonded energies and optionally forces */
1220 do_force_lowlevel(fr, inputrec, &(top->idef),
1221 cr, nrnb, wcycle, mdatoms,
1222 x, hist, f, enerd, fcd, top, fr->born,
1224 inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1225 flags, &cycles_pme);
1227 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1231 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1234 if (bUseOrEmulGPU && !bDiffKernels)
1236 /* wait for non-local forces (or calculate in emulation mode) */
1237 if (DOMAINDECOMP(cr))
1243 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1244 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1246 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1248 cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1249 cycles_wait_gpu += cycles_tmp;
1250 cycles_force += cycles_tmp;
1254 wallcycle_start_nocount(wcycle, ewcFORCE);
1255 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1257 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1259 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1260 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1261 /* skip the reduction if there was no non-local work to do */
1262 if (nbv->grp[eintNonlocal].nbl_lists.nbl[0]->nsci > 0)
1264 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1265 nbv->grp[eintNonlocal].nbat, f);
1267 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1268 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1272 if (bDoForces && DOMAINDECOMP(cr))
1276 /* We are done with the CPU compute, but the GPU local non-bonded
1277 * kernel can still be running while we communicate the forces.
1278 * We start a counter here, so we can, hopefully, time the rest
1279 * of the GPU kernel execution and data transfer.
1281 cycleCountBeforeLocalWorkCompletes = gmx_cycles_read();
1284 /* Communicate the forces */
1285 wallcycle_start(wcycle, ewcMOVEF);
1286 dd_move_f(cr->dd, f, fr->fshift);
1287 wallcycle_stop(wcycle, ewcMOVEF);
1292 /* wait for local forces (or calculate in emulation mode) */
1295 float cycles_tmp, cycles_wait_est;
1296 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1297 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1298 * but even with a step of 0.1 ms the difference is less than 1%
1301 const float gpuWaitApiOverheadMargin = 2e6f; /* cycles */
1303 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1304 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1306 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1308 cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1310 if (bDoForces && DOMAINDECOMP(cr))
1312 cycles_wait_est = gmx_cycles_read() - cycleCountBeforeLocalWorkCompletes;
1314 if (cycles_tmp < gpuWaitApiOverheadMargin)
1316 /* We measured few cycles, it could be that the kernel
1317 * and transfer finished earlier and there was no actual
1318 * wait time, only API call overhead.
1319 * Then the actual time could be anywhere between 0 and
1320 * cycles_wait_est. As a compromise, we use half the time.
1322 cycles_wait_est *= 0.5f;
1327 /* No force communication so we actually timed the wait */
1328 cycles_wait_est = cycles_tmp;
1330 /* Even though this is after dd_move_f, the actual task we are
1331 * waiting for runs asynchronously with dd_move_f and we usually
1332 * have nothing to balance it with, so we can and should add
1333 * the time to the force time for load balancing.
1335 cycles_force += cycles_wait_est;
1336 cycles_wait_gpu += cycles_wait_est;
1338 /* now clear the GPU outputs while we finish the step on the CPU */
1339 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1340 nbnxn_gpu_clear_outputs(nbv->gpu_nbv, flags);
1341 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1345 wallcycle_start_nocount(wcycle, ewcFORCE);
1346 do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1347 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1349 wallcycle_stop(wcycle, ewcFORCE);
1351 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1352 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1353 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1354 nbv->grp[eintLocal].nbat, f);
1355 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1356 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1359 if (DOMAINDECOMP(cr))
1361 dd_force_flop_stop(cr->dd, nrnb);
1364 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1367 dd_cycles_add(cr->dd, cycles_wait_gpu, ddCyclWaitGPU);
1374 /* Compute forces due to electric field */
1375 if (fr->efield != nullptr)
1377 fr->efield->calculateForces(cr, mdatoms, fr->f_novirsum, t);
1380 /* If we have NoVirSum forces, but we do not calculate the virial,
1381 * we sum fr->f_novirsum=f later.
1383 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1385 wallcycle_start(wcycle, ewcVSITESPREAD);
1386 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1387 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1388 wallcycle_stop(wcycle, ewcVSITESPREAD);
1391 if (flags & GMX_FORCE_VIRIAL)
1393 /* Calculation of the virial must be done after vsites! */
1394 calc_virial(0, mdatoms->homenr, x, f,
1395 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1399 if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1401 /* Since the COM pulling is always done mass-weighted, no forces are
1402 * applied to vsites and this call can be done after vsite spreading.
1404 pull_potential_wrapper(cr, inputrec, box, x,
1405 f, vir_force, mdatoms, enerd, lambda, t,
1409 /* Add the forces from enforced rotation potentials (if any) */
1412 wallcycle_start(wcycle, ewcROTadd);
1413 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1414 wallcycle_stop(wcycle, ewcROTadd);
1417 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1418 IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1420 if (PAR(cr) && !(cr->duty & DUTY_PME))
1422 /* In case of node-splitting, the PP nodes receive the long-range
1423 * forces, virial and energy from the PME nodes here.
1425 pme_receive_force_ener(cr, wcycle, enerd, fr);
1430 post_process_forces(cr, step, nrnb, wcycle,
1431 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1435 if (flags & GMX_FORCE_ENERGY)
1437 /* Sum the potential energy terms from group contributions */
1438 sum_epot(&(enerd->grpp), enerd->term);
1440 if (!EI_TPI(inputrec->eI))
1442 checkPotentialEnergyValidity(enerd);
1447 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1448 t_inputrec *inputrec,
1449 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1450 gmx_localtop_t *top,
1451 gmx_groups_t *groups,
1452 matrix box, rvec x[], history_t *hist,
1453 PaddedRVecVector *force,
1456 gmx_enerdata_t *enerd, t_fcdata *fcd,
1457 real *lambda, t_graph *graph,
1458 t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1459 double t, gmx_edsam_t ed,
1460 gmx_bool bBornRadii,
1465 gmx_bool bStateChanged, bNS, bFillGrid, bCalcCGCM;
1467 float cycles_pme, cycles_force;
1469 const int start = 0;
1470 const int homenr = mdatoms->homenr;
1472 clear_mat(vir_force);
1475 if (DOMAINDECOMP(cr))
1477 cg1 = cr->dd->ncg_tot;
1488 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
1489 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1490 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1491 bFillGrid = (bNS && bStateChanged);
1492 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
1493 bDoForces = (flags & GMX_FORCE_FORCES);
1497 update_forcerec(fr, box);
1499 if (inputrecNeedMutot(inputrec))
1501 /* Calculate total (local) dipole moment in a temporary common array.
1502 * This makes it possible to sum them over nodes faster.
1504 calc_mu(start, homenr,
1505 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1510 if (fr->ePBC != epbcNONE)
1512 /* Compute shift vectors every step,
1513 * because of pressure coupling or box deformation!
1515 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1517 calc_shifts(box, fr->shift_vec);
1522 put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1523 &(top->cgs), x, fr->cg_cm);
1524 inc_nrnb(nrnb, eNR_CGCM, homenr);
1525 inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1527 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1529 unshift_self(graph, box, x);
1534 calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1535 inc_nrnb(nrnb, eNR_CGCM, homenr);
1538 if (bCalcCGCM && gmx_debug_at)
1540 pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1544 if (!(cr->duty & DUTY_PME))
1549 /* Send particle coordinates to the pme nodes.
1550 * Since this is only implemented for domain decomposition
1551 * and domain decomposition does not use the graph,
1552 * we do not need to worry about shifting.
1555 wallcycle_start(wcycle, ewcPP_PMESENDX);
1557 bBS = (inputrec->nwall == 2);
1560 copy_mat(box, boxs);
1561 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1564 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
1565 lambda[efptCOUL], lambda[efptVDW],
1566 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
1569 wallcycle_stop(wcycle, ewcPP_PMESENDX);
1571 #endif /* GMX_MPI */
1573 /* Communicate coordinates and sum dipole if necessary */
1574 if (DOMAINDECOMP(cr))
1576 wallcycle_start(wcycle, ewcMOVEX);
1577 dd_move_x(cr->dd, box, x);
1578 wallcycle_stop(wcycle, ewcMOVEX);
1581 if (inputrecNeedMutot(inputrec))
1587 gmx_sumd(2*DIM, mu, cr);
1589 for (i = 0; i < 2; i++)
1591 for (j = 0; j < DIM; j++)
1593 fr->mu_tot[i][j] = mu[i*DIM + j];
1597 if (fr->efep == efepNO)
1599 copy_rvec(fr->mu_tot[0], mu_tot);
1603 for (j = 0; j < DIM; j++)
1606 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1611 /* Reset energies */
1612 reset_enerdata(enerd);
1613 clear_rvecs(SHIFTS, fr->fshift);
1617 wallcycle_start(wcycle, ewcNS);
1619 if (graph && bStateChanged)
1621 /* Calculate intramolecular shift vectors to make molecules whole */
1622 mk_mshift(fplog, graph, fr->ePBC, box, x);
1625 /* Do the actual neighbour searching */
1627 groups, top, mdatoms,
1628 cr, nrnb, bFillGrid);
1630 wallcycle_stop(wcycle, ewcNS);
1633 if (inputrec->implicit_solvent && bNS)
1635 make_gb_nblist(cr, inputrec->gb_algorithm,
1636 x, box, fr, &top->idef, graph, fr->born);
1639 if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1641 wallcycle_start(wcycle, ewcPPDURINGPME);
1642 dd_force_flop_start(cr->dd, nrnb);
1647 /* Enforced rotation has its own cycle counter that starts after the collective
1648 * coordinates have been communicated. It is added to ddCyclF to allow
1649 * for proper load-balancing */
1650 wallcycle_start(wcycle, ewcROT);
1651 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1652 wallcycle_stop(wcycle, ewcROT);
1655 /* Temporary solution until all routines take PaddedRVecVector */
1656 rvec *f = as_rvec_array(force->data());
1658 /* Start the force cycle counter.
1659 * This counter is stopped after do_force_lowlevel.
1660 * No parallel communication should occur while this counter is running,
1661 * since that will interfere with the dynamic load balancing.
1663 wallcycle_start(wcycle, ewcFORCE);
1667 /* Reset forces for which the virial is calculated separately:
1668 * PME/Ewald forces if necessary */
1669 if (fr->bF_NoVirSum)
1671 if (flags & GMX_FORCE_VIRIAL)
1673 fr->f_novirsum = fr->forceBufferNoVirialSummation;
1674 /* TODO: remove this - 1 when padding is properly implemented */
1675 clear_rvecs(fr->f_novirsum->size() - 1,
1676 as_rvec_array(fr->f_novirsum->data()));
1680 /* We are not calculating the pressure so we do not need
1681 * a separate array for forces that do not contribute
1684 fr->f_novirsum = force;
1688 /* Clear the short- and long-range forces */
1689 clear_rvecs(fr->natoms_force_constr, f);
1691 clear_rvec(fr->vir_diag_posres);
1693 if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1695 clear_pull_forces(inputrec->pull_work);
1698 /* update QMMMrec, if necessary */
1701 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1704 /* Compute the bonded and non-bonded energies and optionally forces */
1705 do_force_lowlevel(fr, inputrec, &(top->idef),
1706 cr, nrnb, wcycle, mdatoms,
1707 x, hist, f, enerd, fcd, top, fr->born,
1709 inputrec->fepvals, lambda,
1710 graph, &(top->excls), fr->mu_tot,
1714 cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1718 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1721 if (DOMAINDECOMP(cr))
1723 dd_force_flop_stop(cr->dd, nrnb);
1726 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1732 /* Compute forces due to electric field */
1733 if (fr->efield != nullptr)
1735 fr->efield->calculateForces(cr, mdatoms, fr->f_novirsum, t);
1738 /* Communicate the forces */
1739 if (DOMAINDECOMP(cr))
1741 wallcycle_start(wcycle, ewcMOVEF);
1742 dd_move_f(cr->dd, f, fr->fshift);
1743 /* Do we need to communicate the separate force array
1744 * for terms that do not contribute to the single sum virial?
1745 * Position restraints and electric fields do not introduce
1746 * inter-cg forces, only full electrostatics methods do.
1747 * When we do not calculate the virial, fr->f_novirsum = f,
1748 * so we have already communicated these forces.
1750 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1751 (flags & GMX_FORCE_VIRIAL))
1753 dd_move_f(cr->dd, as_rvec_array(fr->f_novirsum->data()), NULL);
1755 wallcycle_stop(wcycle, ewcMOVEF);
1758 /* If we have NoVirSum forces, but we do not calculate the virial,
1759 * we sum fr->f_novirsum=f later.
1761 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1763 wallcycle_start(wcycle, ewcVSITESPREAD);
1764 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1765 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1766 wallcycle_stop(wcycle, ewcVSITESPREAD);
1769 if (flags & GMX_FORCE_VIRIAL)
1771 /* Calculation of the virial must be done after vsites! */
1772 calc_virial(0, mdatoms->homenr, x, f,
1773 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1777 if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1779 pull_potential_wrapper(cr, inputrec, box, x,
1780 f, vir_force, mdatoms, enerd, lambda, t,
1784 /* Add the forces from enforced rotation potentials (if any) */
1787 wallcycle_start(wcycle, ewcROTadd);
1788 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1789 wallcycle_stop(wcycle, ewcROTadd);
1792 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1793 IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1795 if (PAR(cr) && !(cr->duty & DUTY_PME))
1797 /* In case of node-splitting, the PP nodes receive the long-range
1798 * forces, virial and energy from the PME nodes here.
1800 pme_receive_force_ener(cr, wcycle, enerd, fr);
1805 post_process_forces(cr, step, nrnb, wcycle,
1806 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1810 if (flags & GMX_FORCE_ENERGY)
1812 /* Sum the potential energy terms from group contributions */
1813 sum_epot(&(enerd->grpp), enerd->term);
1815 if (!EI_TPI(inputrec->eI))
1817 checkPotentialEnergyValidity(enerd);
1823 void do_force(FILE *fplog, t_commrec *cr,
1824 t_inputrec *inputrec,
1825 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1826 gmx_localtop_t *top,
1827 gmx_groups_t *groups,
1828 matrix box, PaddedRVecVector *coordinates, history_t *hist,
1829 PaddedRVecVector *force,
1832 gmx_enerdata_t *enerd, t_fcdata *fcd,
1833 std::vector<real> *lambda, t_graph *graph,
1835 gmx_vsite_t *vsite, rvec mu_tot,
1836 double t, gmx_edsam_t ed,
1837 gmx_bool bBornRadii,
1840 /* modify force flag if not doing nonbonded */
1841 if (!fr->bNonbonded)
1843 flags &= ~GMX_FORCE_NONBONDED;
1846 GMX_ASSERT(coordinates->size() >= static_cast<unsigned int>(fr->natoms_force + 1), "We might need 1 element extra for SIMD");
1847 GMX_ASSERT(force->size() >= static_cast<unsigned int>(fr->natoms_force + 1), "We might need 1 element extra for SIMD");
1849 rvec *x = as_rvec_array(coordinates->data());
1851 switch (inputrec->cutoff_scheme)
1854 do_force_cutsVERLET(fplog, cr, inputrec,
1862 lambda->data(), graph,
1870 do_force_cutsGROUP(fplog, cr, inputrec,
1878 lambda->data(), graph,
1885 gmx_incons("Invalid cut-off scheme passed!");
1890 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
1891 t_inputrec *ir, t_mdatoms *md,
1892 t_state *state, t_commrec *cr, t_nrnb *nrnb,
1893 t_forcerec *fr, gmx_localtop_t *top)
1895 int i, m, start, end;
1897 real dt = ir->delta_t;
1901 /* We need to allocate one element extra, since we might use
1902 * (unaligned) 4-wide SIMD loads to access rvec entries.
1904 snew(savex, state->natoms + 1);
1911 fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
1912 start, md->homenr, end);
1914 /* Do a first constrain to reset particles... */
1915 step = ir->init_step;
1918 char buf[STEPSTRSIZE];
1919 fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
1920 gmx_step_str(step, buf));
1924 /* constrain the current position */
1925 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
1926 ir, cr, step, 0, 1.0, md,
1927 as_rvec_array(state->x.data()), as_rvec_array(state->x.data()), NULL,
1928 fr->bMolPBC, state->box,
1929 state->lambda[efptBONDED], &dvdl_dum,
1930 NULL, NULL, nrnb, econqCoord);
1933 /* constrain the inital velocity, and save it */
1934 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
1935 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
1936 ir, cr, step, 0, 1.0, md,
1937 as_rvec_array(state->x.data()), as_rvec_array(state->v.data()), as_rvec_array(state->v.data()),
1938 fr->bMolPBC, state->box,
1939 state->lambda[efptBONDED], &dvdl_dum,
1940 NULL, NULL, nrnb, econqVeloc);
1942 /* constrain the inital velocities at t-dt/2 */
1943 if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
1945 for (i = start; (i < end); i++)
1947 for (m = 0; (m < DIM); m++)
1949 /* Reverse the velocity */
1950 state->v[i][m] = -state->v[i][m];
1951 /* Store the position at t-dt in buf */
1952 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
1955 /* Shake the positions at t=-dt with the positions at t=0
1956 * as reference coordinates.
1960 char buf[STEPSTRSIZE];
1961 fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
1962 gmx_step_str(step, buf));
1965 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
1966 ir, cr, step, -1, 1.0, md,
1967 as_rvec_array(state->x.data()), savex, NULL,
1968 fr->bMolPBC, state->box,
1969 state->lambda[efptBONDED], &dvdl_dum,
1970 as_rvec_array(state->v.data()), NULL, nrnb, econqCoord);
1972 for (i = start; i < end; i++)
1974 for (m = 0; m < DIM; m++)
1976 /* Re-reverse the velocities */
1977 state->v[i][m] = -state->v[i][m];
1986 integrate_table(real vdwtab[], real scale, int offstart, int rstart, int rend,
1987 double *enerout, double *virout)
1989 double enersum, virsum;
1990 double invscale, invscale2, invscale3;
1991 double r, ea, eb, ec, pa, pb, pc, pd;
1996 invscale = 1.0/scale;
1997 invscale2 = invscale*invscale;
1998 invscale3 = invscale*invscale2;
2000 /* Following summation derived from cubic spline definition,
2001 * Numerical Recipies in C, second edition, p. 113-116. Exact for
2002 * the cubic spline. We first calculate the negative of the
2003 * energy from rvdw to rvdw_switch, assuming that g(r)=1, and then
2004 * add the more standard, abrupt cutoff correction to that result,
2005 * yielding the long-range correction for a switched function. We
2006 * perform both the pressure and energy loops at the same time for
2007 * simplicity, as the computational cost is low. */
2011 /* Since the dispersion table has been scaled down a factor
2012 * 6.0 and the repulsion a factor 12.0 to compensate for the
2013 * c6/c12 parameters inside nbfp[] being scaled up (to save
2014 * flops in kernels), we need to correct for this.
2025 for (ri = rstart; ri < rend; ++ri)
2029 eb = 2.0*invscale2*r;
2033 pb = 3.0*invscale2*r;
2034 pc = 3.0*invscale*r*r;
2037 /* this "8" is from the packing in the vdwtab array - perhaps
2038 should be defined? */
2040 offset = 8*ri + offstart;
2041 y0 = vdwtab[offset];
2042 f = vdwtab[offset+1];
2043 g = vdwtab[offset+2];
2044 h = vdwtab[offset+3];
2046 enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2047 virsum += f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2049 *enerout = 4.0*M_PI*enersum*tabfactor;
2050 *virout = 4.0*M_PI*virsum*tabfactor;
2053 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2055 double eners[2], virs[2], enersum, virsum;
2056 double r0, rc3, rc9;
2058 real scale, *vdwtab;
2060 fr->enershiftsix = 0;
2061 fr->enershifttwelve = 0;
2062 fr->enerdiffsix = 0;
2063 fr->enerdifftwelve = 0;
2065 fr->virdifftwelve = 0;
2067 if (eDispCorr != edispcNO)
2069 for (i = 0; i < 2; i++)
2074 if ((fr->vdw_modifier == eintmodPOTSHIFT) ||
2075 (fr->vdw_modifier == eintmodPOTSWITCH) ||
2076 (fr->vdw_modifier == eintmodFORCESWITCH) ||
2077 (fr->vdwtype == evdwSHIFT) ||
2078 (fr->vdwtype == evdwSWITCH))
2080 if (((fr->vdw_modifier == eintmodPOTSWITCH) ||
2081 (fr->vdw_modifier == eintmodFORCESWITCH) ||
2082 (fr->vdwtype == evdwSWITCH)) && fr->rvdw_switch == 0)
2085 "With dispersion correction rvdw-switch can not be zero "
2086 "for vdw-type = %s", evdw_names[fr->vdwtype]);
2089 /* TODO This code depends on the logic in tables.c that
2090 constructs the table layout, which should be made
2091 explicit in future cleanup. */
2092 GMX_ASSERT(fr->dispersionCorrectionTable->interaction == GMX_TABLE_INTERACTION_VDWREP_VDWDISP,
2093 "Dispersion-correction code needs a table with both repulsion and dispersion terms");
2094 scale = fr->dispersionCorrectionTable->scale;
2095 vdwtab = fr->dispersionCorrectionTable->data;
2097 /* Round the cut-offs to exact table values for precision */
2098 ri0 = static_cast<int>(floor(fr->rvdw_switch*scale));
2099 ri1 = static_cast<int>(ceil(fr->rvdw*scale));
2101 /* The code below has some support for handling force-switching, i.e.
2102 * when the force (instead of potential) is switched over a limited
2103 * region. This leads to a constant shift in the potential inside the
2104 * switching region, which we can handle by adding a constant energy
2105 * term in the force-switch case just like when we do potential-shift.
2107 * For now this is not enabled, but to keep the functionality in the
2108 * code we check separately for switch and shift. When we do force-switch
2109 * the shifting point is rvdw_switch, while it is the cutoff when we
2110 * have a classical potential-shift.
2112 * For a pure potential-shift the potential has a constant shift
2113 * all the way out to the cutoff, and that is it. For other forms
2114 * we need to calculate the constant shift up to the point where we
2115 * start modifying the potential.
2117 ri0 = (fr->vdw_modifier == eintmodPOTSHIFT) ? ri1 : ri0;
2123 if ((fr->vdw_modifier == eintmodFORCESWITCH) ||
2124 (fr->vdwtype == evdwSHIFT))
2126 /* Determine the constant energy shift below rvdw_switch.
2127 * Table has a scale factor since we have scaled it down to compensate
2128 * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2130 fr->enershiftsix = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2131 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2133 else if (fr->vdw_modifier == eintmodPOTSHIFT)
2135 fr->enershiftsix = (real)(-1.0/(rc3*rc3));
2136 fr->enershifttwelve = (real)( 1.0/(rc9*rc3));
2139 /* Add the constant part from 0 to rvdw_switch.
2140 * This integration from 0 to rvdw_switch overcounts the number
2141 * of interactions by 1, as it also counts the self interaction.
2142 * We will correct for this later.
2144 eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2145 eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2147 /* Calculate the contribution in the range [r0,r1] where we
2148 * modify the potential. For a pure potential-shift modifier we will
2149 * have ri0==ri1, and there will not be any contribution here.
2151 for (i = 0; i < 2; i++)
2155 integrate_table(vdwtab, scale, (i == 0 ? 0 : 4), ri0, ri1, &enersum, &virsum);
2156 eners[i] -= enersum;
2160 /* Alright: Above we compensated by REMOVING the parts outside r0
2161 * corresponding to the ideal VdW 1/r6 and /r12 potentials.
2163 * Regardless of whether r0 is the point where we start switching,
2164 * or the cutoff where we calculated the constant shift, we include
2165 * all the parts we are missing out to infinity from r0 by
2166 * calculating the analytical dispersion correction.
2168 eners[0] += -4.0*M_PI/(3.0*rc3);
2169 eners[1] += 4.0*M_PI/(9.0*rc9);
2170 virs[0] += 8.0*M_PI/rc3;
2171 virs[1] += -16.0*M_PI/(3.0*rc9);
2173 else if (fr->vdwtype == evdwCUT ||
2174 EVDW_PME(fr->vdwtype) ||
2175 fr->vdwtype == evdwUSER)
2177 if (fr->vdwtype == evdwUSER && fplog)
2180 "WARNING: using dispersion correction with user tables\n");
2183 /* Note that with LJ-PME, the dispersion correction is multiplied
2184 * by the difference between the actual C6 and the value of C6
2185 * that would produce the combination rule.
2186 * This means the normal energy and virial difference formulas
2190 rc3 = fr->rvdw*fr->rvdw*fr->rvdw;
2192 /* Contribution beyond the cut-off */
2193 eners[0] += -4.0*M_PI/(3.0*rc3);
2194 eners[1] += 4.0*M_PI/(9.0*rc9);
2195 if (fr->vdw_modifier == eintmodPOTSHIFT)
2197 /* Contribution within the cut-off */
2198 eners[0] += -4.0*M_PI/(3.0*rc3);
2199 eners[1] += 4.0*M_PI/(3.0*rc9);
2201 /* Contribution beyond the cut-off */
2202 virs[0] += 8.0*M_PI/rc3;
2203 virs[1] += -16.0*M_PI/(3.0*rc9);
2208 "Dispersion correction is not implemented for vdw-type = %s",
2209 evdw_names[fr->vdwtype]);
2212 /* When we deprecate the group kernels the code below can go too */
2213 if (fr->vdwtype == evdwPME && fr->cutoff_scheme == ecutsGROUP)
2215 /* Calculate self-interaction coefficient (assuming that
2216 * the reciprocal-space contribution is constant in the
2217 * region that contributes to the self-interaction).
2219 fr->enershiftsix = gmx::power6(fr->ewaldcoeff_lj) / 6.0;
2221 eners[0] += -gmx::power3(std::sqrt(M_PI)*fr->ewaldcoeff_lj)/3.0;
2222 virs[0] += gmx::power3(std::sqrt(M_PI)*fr->ewaldcoeff_lj);
2225 fr->enerdiffsix = eners[0];
2226 fr->enerdifftwelve = eners[1];
2227 /* The 0.5 is due to the Gromacs definition of the virial */
2228 fr->virdiffsix = 0.5*virs[0];
2229 fr->virdifftwelve = 0.5*virs[1];
2233 void calc_dispcorr(t_inputrec *ir, t_forcerec *fr,
2234 matrix box, real lambda, tensor pres, tensor virial,
2235 real *prescorr, real *enercorr, real *dvdlcorr)
2237 gmx_bool bCorrAll, bCorrPres;
2238 real dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2248 if (ir->eDispCorr != edispcNO)
2250 bCorrAll = (ir->eDispCorr == edispcAllEner ||
2251 ir->eDispCorr == edispcAllEnerPres);
2252 bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2253 ir->eDispCorr == edispcAllEnerPres);
2255 invvol = 1/det(box);
2258 /* Only correct for the interactions with the inserted molecule */
2259 dens = (fr->numAtomsForDispersionCorrection - fr->n_tpi)*invvol;
2264 dens = fr->numAtomsForDispersionCorrection*invvol;
2265 ninter = 0.5*fr->numAtomsForDispersionCorrection;
2268 if (ir->efep == efepNO)
2270 avcsix = fr->avcsix[0];
2271 avctwelve = fr->avctwelve[0];
2275 avcsix = (1 - lambda)*fr->avcsix[0] + lambda*fr->avcsix[1];
2276 avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2279 enerdiff = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2280 *enercorr += avcsix*enerdiff;
2282 if (ir->efep != efepNO)
2284 dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2288 enerdiff = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2289 *enercorr += avctwelve*enerdiff;
2290 if (fr->efep != efepNO)
2292 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2298 svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2299 if (ir->eDispCorr == edispcAllEnerPres)
2301 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2303 /* The factor 2 is because of the Gromacs virial definition */
2304 spres = -2.0*invvol*svir*PRESFAC;
2306 for (m = 0; m < DIM; m++)
2308 virial[m][m] += svir;
2309 pres[m][m] += spres;
2314 /* Can't currently control when it prints, for now, just print when degugging */
2319 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2325 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2326 *enercorr, spres, svir);
2330 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2334 if (fr->efep != efepNO)
2336 *dvdlcorr += dvdlambda;
2341 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2342 t_graph *graph, rvec x[])
2346 fprintf(fplog, "Removing pbc first time\n");
2348 calc_shifts(box, fr->shift_vec);
2351 mk_mshift(fplog, graph, fr->ePBC, box, x);
2354 p_graph(debug, "do_pbc_first 1", graph);
2356 shift_self(graph, box, x);
2357 /* By doing an extra mk_mshift the molecules that are broken
2358 * because they were e.g. imported from another software
2359 * will be made whole again. Such are the healing powers
2362 mk_mshift(fplog, graph, fr->ePBC, box, x);
2365 p_graph(debug, "do_pbc_first 2", graph);
2370 fprintf(fplog, "Done rmpbc\n");
2374 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2375 const gmx_mtop_t *mtop, rvec x[],
2380 gmx_molblock_t *molb;
2382 if (bFirst && fplog)
2384 fprintf(fplog, "Removing pbc first time\n");
2389 for (mb = 0; mb < mtop->nmolblock; mb++)
2391 molb = &mtop->molblock[mb];
2392 if (molb->natoms_mol == 1 ||
2393 (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2395 /* Just one atom or charge group in the molecule, no PBC required */
2396 as += molb->nmol*molb->natoms_mol;
2400 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2401 mk_graph_ilist(NULL, mtop->moltype[molb->type].ilist,
2402 0, molb->natoms_mol, FALSE, FALSE, graph);
2404 for (mol = 0; mol < molb->nmol; mol++)
2406 mk_mshift(fplog, graph, ePBC, box, x+as);
2408 shift_self(graph, box, x+as);
2409 /* The molecule is whole now.
2410 * We don't need the second mk_mshift call as in do_pbc_first,
2411 * since we no longer need this graph.
2414 as += molb->natoms_mol;
2422 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2423 const gmx_mtop_t *mtop, rvec x[])
2425 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2428 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2429 gmx_mtop_t *mtop, rvec x[])
2431 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2434 void put_atoms_in_box_omp(int ePBC, const matrix box, int natoms, rvec x[])
2437 nth = gmx_omp_nthreads_get(emntDefault);
2439 #pragma omp parallel for num_threads(nth) schedule(static)
2440 for (t = 0; t < nth; t++)
2446 offset = (natoms*t )/nth;
2447 len = (natoms*(t + 1))/nth - offset;
2448 put_atoms_in_box(ePBC, box, len, x + offset);
2450 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2454 // TODO This can be cleaned up a lot, and move back to runner.cpp
2455 void finish_run(FILE *fplog, const gmx::MDLogger &mdlog, t_commrec *cr,
2456 t_inputrec *inputrec,
2457 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2458 gmx_walltime_accounting_t walltime_accounting,
2459 nonbonded_verlet_t *nbv,
2460 gmx_bool bWriteStat)
2462 t_nrnb *nrnb_tot = NULL;
2464 double nbfs = 0, mflop = 0;
2465 double elapsed_time,
2466 elapsed_time_over_all_ranks,
2467 elapsed_time_over_all_threads,
2468 elapsed_time_over_all_threads_over_all_ranks;
2474 MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2475 cr->mpi_comm_mysim);
2483 elapsed_time = walltime_accounting_get_elapsed_time(walltime_accounting);
2484 elapsed_time_over_all_ranks = elapsed_time;
2485 elapsed_time_over_all_threads = walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting);
2486 elapsed_time_over_all_threads_over_all_ranks = elapsed_time_over_all_threads;
2490 /* reduce elapsed_time over all MPI ranks in the current simulation */
2491 MPI_Allreduce(&elapsed_time,
2492 &elapsed_time_over_all_ranks,
2493 1, MPI_DOUBLE, MPI_SUM,
2494 cr->mpi_comm_mysim);
2495 elapsed_time_over_all_ranks /= cr->nnodes;
2496 /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2497 * current simulation. */
2498 MPI_Allreduce(&elapsed_time_over_all_threads,
2499 &elapsed_time_over_all_threads_over_all_ranks,
2500 1, MPI_DOUBLE, MPI_SUM,
2501 cr->mpi_comm_mysim);
2507 print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2514 if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2516 print_dd_statistics(cr, inputrec, fplog);
2519 /* TODO Move the responsibility for any scaling by thread counts
2520 * to the code that handled the thread region, so that there's a
2521 * mechanism to keep cycle counting working during the transition
2522 * to task parallelism. */
2523 int nthreads_pp = gmx_omp_nthreads_get(emntNonbonded);
2524 int nthreads_pme = gmx_omp_nthreads_get(emntPME);
2525 wallcycle_scale_by_num_threads(wcycle, cr->duty == DUTY_PME, nthreads_pp, nthreads_pme);
2526 auto cycle_sum(wallcycle_sum(cr, wcycle));
2530 struct gmx_wallclock_gpu_t* gputimes = use_GPU(nbv) ? nbnxn_gpu_get_timings(nbv->gpu_nbv) : NULL;
2532 wallcycle_print(fplog, mdlog, cr->nnodes, cr->npmenodes, nthreads_pp, nthreads_pme,
2533 elapsed_time_over_all_ranks,
2534 wcycle, cycle_sum, gputimes);
2536 if (EI_DYNAMICS(inputrec->eI))
2538 delta_t = inputrec->delta_t;
2543 print_perf(fplog, elapsed_time_over_all_threads_over_all_ranks,
2544 elapsed_time_over_all_ranks,
2545 walltime_accounting_get_nsteps_done(walltime_accounting),
2546 delta_t, nbfs, mflop);
2550 print_perf(stderr, elapsed_time_over_all_threads_over_all_ranks,
2551 elapsed_time_over_all_ranks,
2552 walltime_accounting_get_nsteps_done(walltime_accounting),
2553 delta_t, nbfs, mflop);
2558 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, std::vector<real> *lambda, double *lam0)
2560 /* this function works, but could probably use a logic rewrite to keep all the different
2561 types of efep straight. */
2563 if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2568 t_lambda *fep = ir->fepvals;
2569 *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
2570 if checkpoint is set -- a kludge is in for now
2573 lambda->resize(efptNR);
2575 for (int i = 0; i < efptNR; i++)
2577 /* overwrite lambda state with init_lambda for now for backwards compatibility */
2578 if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2580 (*lambda)[i] = fep->init_lambda;
2583 lam0[i] = (*lambda)[i];
2588 (*lambda)[i] = fep->all_lambda[i][*fep_state];
2591 lam0[i] = (*lambda)[i];
2597 /* need to rescale control temperatures to match current state */
2598 for (int i = 0; i < ir->opts.ngtc; i++)
2600 if (ir->opts.ref_t[i] > 0)
2602 ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2607 /* Send to the log the information on the current lambdas */
2610 fprintf(fplog, "Initial vector of lambda components:[ ");
2611 for (int i = 0; i < efptNR; i++)
2613 fprintf(fplog, "%10.4f ", (*lambda)[i]);
2615 fprintf(fplog, "]\n");
2621 void init_md(FILE *fplog,
2622 t_commrec *cr, t_inputrec *ir, const gmx_output_env_t *oenv,
2623 double *t, double *t0,
2624 std::vector<real> *lambda, int *fep_state, double *lam0,
2625 t_nrnb *nrnb, gmx_mtop_t *mtop,
2627 int nfile, const t_filenm fnm[],
2628 gmx_mdoutf_t *outf, t_mdebin **mdebin,
2629 tensor force_vir, tensor shake_vir, rvec mu_tot,
2630 gmx_bool *bSimAnn, t_vcm **vcm, unsigned long Flags,
2631 gmx_wallcycle_t wcycle)
2635 /* Initial values */
2636 *t = *t0 = ir->init_t;
2639 for (i = 0; i < ir->opts.ngtc; i++)
2641 /* set bSimAnn if any group is being annealed */
2642 if (ir->opts.annealing[i] != eannNO)
2648 /* Initialize lambda variables */
2649 initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2651 // TODO upd is never NULL in practice, but the analysers don't know that
2654 *upd = init_update(ir);
2658 update_annealing_target_temp(ir, ir->init_t, upd ? *upd : NULL);
2663 *vcm = init_vcm(fplog, &mtop->groups, ir);
2666 if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2668 if (ir->etc == etcBERENDSEN)
2670 please_cite(fplog, "Berendsen84a");
2672 if (ir->etc == etcVRESCALE)
2674 please_cite(fplog, "Bussi2007a");
2676 if (ir->eI == eiSD1)
2678 please_cite(fplog, "Goga2012");
2685 *outf = init_mdoutf(fplog, nfile, fnm, Flags, cr, ir, mtop, oenv, wcycle);
2687 *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? NULL : mdoutf_get_fp_ene(*outf),
2688 mtop, ir, mdoutf_get_fp_dhdl(*outf));
2691 /* Initiate variables */
2692 clear_mat(force_vir);
2693 clear_mat(shake_vir);