2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
42 #ifdef HAVE_SYS_TIME_H
53 #include "chargegroup.h"
73 #include "gmx_random.h"
77 #include "nbnxn_atomdata.h"
78 #include "nbnxn_search.h"
79 #include "nbnxn_kernels/nbnxn_kernel_ref.h"
80 #include "nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
81 #include "nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
82 #include "nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
84 #include "gromacs/timing/wallcycle.h"
85 #include "gromacs/timing/walltime_accounting.h"
86 #include "gromacs/utility/gmxmpi.h"
87 #include "gromacs/essentialdynamics/edsam.h"
88 #include "gromacs/pulling/pull.h"
89 #include "gromacs/pulling/pull_rotation.h"
94 #include "nbnxn_cuda_data_mgmt.h"
95 #include "nbnxn_cuda/nbnxn_cuda.h"
97 void print_time(FILE *out,
98 gmx_walltime_accounting_t walltime_accounting,
101 t_commrec gmx_unused *cr)
104 char timebuf[STRLEN];
105 double dt, elapsed_seconds, time_per_step;
108 #ifndef GMX_THREAD_MPI
114 fprintf(out, "step %s", gmx_step_str(step, buf));
115 if ((step >= ir->nstlist))
117 double seconds_since_epoch = gmx_gettime();
118 elapsed_seconds = seconds_since_epoch - walltime_accounting_get_start_time_stamp(walltime_accounting);
119 time_per_step = elapsed_seconds/(step - ir->init_step + 1);
120 dt = (ir->nsteps + ir->init_step - step) * time_per_step;
126 finish = (time_t) (seconds_since_epoch + dt);
127 gmx_ctime_r(&finish, timebuf, STRLEN);
128 sprintf(buf, "%s", timebuf);
129 buf[strlen(buf)-1] = '\0';
130 fprintf(out, ", will finish %s", buf);
134 fprintf(out, ", remaining wall clock time: %5d s ", (int)dt);
139 fprintf(out, " performance: %.1f ns/day ",
140 ir->delta_t/1000*24*60*60/time_per_step);
143 #ifndef GMX_THREAD_MPI
153 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
154 const gmx_walltime_accounting_t walltime_accounting)
157 char timebuf[STRLEN];
158 char time_string[STRLEN];
163 if (walltime_accounting != NULL)
165 tmptime = (time_t) walltime_accounting_get_start_time_stamp(walltime_accounting);
166 gmx_ctime_r(&tmptime, timebuf, STRLEN);
170 tmptime = (time_t) gmx_gettime();
171 gmx_ctime_r(&tmptime, timebuf, STRLEN);
173 for (i = 0; timebuf[i] >= ' '; i++)
175 time_string[i] = timebuf[i];
177 time_string[i] = '\0';
179 fprintf(fplog, "%s on node %d %s\n", title, nodeid, time_string);
183 static void sum_forces(int start, int end, rvec f[], rvec flr[])
189 pr_rvecs(debug, 0, "fsr", f+start, end-start);
190 pr_rvecs(debug, 0, "flr", flr+start, end-start);
192 for (i = start; (i < end); i++)
194 rvec_inc(f[i], flr[i]);
199 * calc_f_el calculates forces due to an electric field.
201 * force is kJ mol^-1 nm^-1 = e * kJ mol^-1 nm^-1 / e
203 * Et[] contains the parameters for the time dependent
204 * part of the field (not yet used).
205 * Ex[] contains the parameters for
206 * the spatial dependent part of the field. You can have cool periodic
207 * fields in principle, but only a constant field is supported
209 * The function should return the energy due to the electric field
210 * (if any) but for now returns 0.
213 * There can be problems with the virial.
214 * Since the field is not self-consistent this is unavoidable.
215 * For neutral molecules the virial is correct within this approximation.
216 * For neutral systems with many charged molecules the error is small.
217 * But for systems with a net charge or a few charged molecules
218 * the error can be significant when the field is high.
219 * Solution: implement a self-consitent electric field into PME.
221 static void calc_f_el(FILE *fp, int start, int homenr,
222 real charge[], rvec f[],
223 t_cosines Ex[], t_cosines Et[], double t)
229 for (m = 0; (m < DIM); m++)
236 Ext[m] = cos(Et[m].a[0]*(t-t0))*exp(-sqr(t-t0)/(2.0*sqr(Et[m].a[2])));
240 Ext[m] = cos(Et[m].a[0]*t);
249 /* Convert the field strength from V/nm to MD-units */
250 Ext[m] *= Ex[m].a[0]*FIELDFAC;
251 for (i = start; (i < start+homenr); i++)
253 f[i][m] += charge[i]*Ext[m];
263 fprintf(fp, "%10g %10g %10g %10g #FIELD\n", t,
264 Ext[XX]/FIELDFAC, Ext[YY]/FIELDFAC, Ext[ZZ]/FIELDFAC);
268 static void calc_virial(int start, int homenr, rvec x[], rvec f[],
269 tensor vir_part, t_graph *graph, matrix box,
270 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
275 /* The short-range virial from surrounding boxes */
277 calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
278 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
280 /* Calculate partial virial, for local atoms only, based on short range.
281 * Total virial is computed in global_stat, called from do_md
283 f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
284 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
286 /* Add position restraint contribution */
287 for (i = 0; i < DIM; i++)
289 vir_part[i][i] += fr->vir_diag_posres[i];
292 /* Add wall contribution */
293 for (i = 0; i < DIM; i++)
295 vir_part[i][ZZ] += fr->vir_wall_z[i];
300 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
304 static void posres_wrapper(FILE *fplog,
310 matrix box, rvec x[],
311 gmx_enerdata_t *enerd,
319 /* Position restraints always require full pbc */
320 set_pbc(&pbc, ir->ePBC, box);
322 v = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
323 top->idef.iparams_posres,
324 (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
325 ir->ePBC == epbcNONE ? NULL : &pbc,
326 lambda[efptRESTRAINT], &dvdl,
327 fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
330 gmx_print_sepdvdl(fplog, interaction_function[F_POSRES].longname, v, dvdl);
332 enerd->term[F_POSRES] += v;
333 /* If just the force constant changes, the FEP term is linear,
334 * but if k changes, it is not.
336 enerd->dvdl_nonlin[efptRESTRAINT] += dvdl;
337 inc_nrnb(nrnb, eNR_POSRES, top->idef.il[F_POSRES].nr/2);
339 if ((ir->fepvals->n_lambda > 0) && (flags & GMX_FORCE_DHDL))
341 for (i = 0; i < enerd->n_lambda; i++)
343 real dvdl_dum, lambda_dum;
345 lambda_dum = (i == 0 ? lambda[efptRESTRAINT] : ir->fepvals->all_lambda[efptRESTRAINT][i-1]);
346 v = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
347 top->idef.iparams_posres,
348 (const rvec*)x, NULL, NULL,
349 ir->ePBC == epbcNONE ? NULL : &pbc, lambda_dum, &dvdl,
350 fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
351 enerd->enerpart_lambda[i] += v;
356 static void fbposres_wrapper(t_inputrec *ir,
359 matrix box, rvec x[],
360 gmx_enerdata_t *enerd,
366 /* Flat-bottomed position restraints always require full pbc */
367 set_pbc(&pbc, ir->ePBC, box);
368 v = fbposres(top->idef.il[F_FBPOSRES].nr, top->idef.il[F_FBPOSRES].iatoms,
369 top->idef.iparams_fbposres,
370 (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
371 ir->ePBC == epbcNONE ? NULL : &pbc,
372 fr->rc_scaling, fr->ePBC, fr->posres_com);
373 enerd->term[F_FBPOSRES] += v;
374 inc_nrnb(nrnb, eNR_FBPOSRES, top->idef.il[F_FBPOSRES].nr/2);
377 static void pull_potential_wrapper(FILE *fplog,
381 matrix box, rvec x[],
385 gmx_enerdata_t *enerd,
392 /* Calculate the center of mass forces, this requires communication,
393 * which is why pull_potential is called close to other communication.
394 * The virial contribution is calculated directly,
395 * which is why we call pull_potential after calc_virial.
397 set_pbc(&pbc, ir->ePBC, box);
399 enerd->term[F_COM_PULL] +=
400 pull_potential(ir->ePull, ir->pull, mdatoms, &pbc,
401 cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
404 gmx_print_sepdvdl(fplog, "Com pull", enerd->term[F_COM_PULL], dvdl);
406 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
409 static void pme_receive_force_ener(FILE *fplog,
412 gmx_wallcycle_t wcycle,
413 gmx_enerdata_t *enerd,
416 real e_q, e_lj, v, dvdl_q, dvdl_lj;
417 float cycles_ppdpme, cycles_seppme;
419 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
420 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
422 /* In case of node-splitting, the PP nodes receive the long-range
423 * forces, virial and energy from the PME nodes here.
425 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
428 gmx_pme_receive_f(cr, fr->f_novirsum, fr->vir_el_recip, &e_q,
429 fr->vir_lj_recip, &e_lj, &dvdl_q, &dvdl_lj,
433 gmx_print_sepdvdl(fplog, "Electrostatic PME mesh", e_q, dvdl_q);
434 gmx_print_sepdvdl(fplog, "Lennard-Jones PME mesh", e_lj, dvdl_lj);
436 enerd->term[F_COUL_RECIP] += e_q;
437 enerd->term[F_LJ_RECIP] += e_lj;
438 enerd->dvdl_lin[efptCOUL] += dvdl_q;
439 enerd->dvdl_lin[efptVDW] += dvdl_lj;
443 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
445 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
448 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
449 gmx_int64_t step, real pforce, rvec *x, rvec *f)
453 char buf[STEPSTRSIZE];
456 for (i = md->start; i < md->start+md->homenr; i++)
459 /* We also catch NAN, if the compiler does not optimize this away. */
460 if (fn2 >= pf2 || fn2 != fn2)
462 fprintf(fp, "step %s atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
463 gmx_step_str(step, buf),
464 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], sqrt(fn2));
469 static void post_process_forces(t_commrec *cr,
471 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
473 matrix box, rvec x[],
478 t_forcerec *fr, gmx_vsite_t *vsite,
485 /* Spread the mesh force on virtual sites to the other particles...
486 * This is parallellized. MPI communication is performed
487 * if the constructing atoms aren't local.
489 wallcycle_start(wcycle, ewcVSITESPREAD);
490 spread_vsite_f(vsite, x, fr->f_novirsum, NULL,
491 (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
493 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
494 wallcycle_stop(wcycle, ewcVSITESPREAD);
496 if (flags & GMX_FORCE_VIRIAL)
498 /* Now add the forces, this is local */
501 sum_forces(0, fr->f_novirsum_n, f, fr->f_novirsum);
505 sum_forces(mdatoms->start, mdatoms->start+mdatoms->homenr,
508 if (EEL_FULL(fr->eeltype))
510 /* Add the mesh contribution to the virial */
511 m_add(vir_force, fr->vir_el_recip, vir_force);
513 if (EVDW_PME(fr->vdwtype))
515 /* Add the mesh contribution to the virial */
516 m_add(vir_force, fr->vir_lj_recip, vir_force);
520 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
525 if (fr->print_force >= 0)
527 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
531 static void do_nb_verlet(t_forcerec *fr,
532 interaction_const_t *ic,
533 gmx_enerdata_t *enerd,
534 int flags, int ilocality,
537 gmx_wallcycle_t wcycle)
539 int nnbl, kernel_type, enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
541 nonbonded_verlet_group_t *nbvg;
544 if (!(flags & GMX_FORCE_NONBONDED))
546 /* skip non-bonded calculation */
550 nbvg = &fr->nbv->grp[ilocality];
552 /* CUDA kernel launch overhead is already timed separately */
553 if (fr->cutoff_scheme != ecutsVERLET)
555 gmx_incons("Invalid cut-off scheme passed!");
558 bCUDA = (nbvg->kernel_type == nbnxnk8x8x8_CUDA);
562 wallcycle_sub_start(wcycle, ewcsNONBONDED);
564 switch (nbvg->kernel_type)
566 case nbnxnk4x4_PlainC:
567 nbnxn_kernel_ref(&nbvg->nbl_lists,
573 enerd->grpp.ener[egCOULSR],
575 enerd->grpp.ener[egBHAMSR] :
576 enerd->grpp.ener[egLJSR]);
579 case nbnxnk4xN_SIMD_4xN:
580 nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
587 enerd->grpp.ener[egCOULSR],
589 enerd->grpp.ener[egBHAMSR] :
590 enerd->grpp.ener[egLJSR]);
592 case nbnxnk4xN_SIMD_2xNN:
593 nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
600 enerd->grpp.ener[egCOULSR],
602 enerd->grpp.ener[egBHAMSR] :
603 enerd->grpp.ener[egLJSR]);
606 case nbnxnk8x8x8_CUDA:
607 nbnxn_cuda_launch_kernel(fr->nbv->cu_nbv, nbvg->nbat, flags, ilocality);
610 case nbnxnk8x8x8_PlainC:
611 nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
616 nbvg->nbat->out[0].f,
618 enerd->grpp.ener[egCOULSR],
620 enerd->grpp.ener[egBHAMSR] :
621 enerd->grpp.ener[egLJSR]);
625 gmx_incons("Invalid nonbonded kernel type passed!");
630 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
633 if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
635 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
637 else if ((!bCUDA && nbvg->ewald_excl == ewaldexclAnalytical) ||
638 (bCUDA && nbnxn_cuda_is_kernel_ewald_analytical(fr->nbv->cu_nbv)))
640 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
644 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
646 enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
647 if (flags & GMX_FORCE_ENERGY)
649 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
650 enr_nbnxn_kernel_ljc += 1;
651 enr_nbnxn_kernel_lj += 1;
654 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
655 nbvg->nbl_lists.natpair_ljq);
656 inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
657 nbvg->nbl_lists.natpair_lj);
658 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
659 nbvg->nbl_lists.natpair_q);
662 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
663 t_inputrec *inputrec,
664 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
666 gmx_groups_t gmx_unused *groups,
667 matrix box, rvec x[], history_t *hist,
671 gmx_enerdata_t *enerd, t_fcdata *fcd,
672 real *lambda, t_graph *graph,
673 t_forcerec *fr, interaction_const_t *ic,
674 gmx_vsite_t *vsite, rvec mu_tot,
675 double t, FILE *field, gmx_edsam_t ed,
683 gmx_bool bSepDVDL, bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
684 gmx_bool bDoLongRange, bDoForces, bSepLRF, bUseGPU, bUseOrEmulGPU;
685 gmx_bool bDiffKernels = FALSE;
687 rvec vzero, box_diag;
689 float cycles_pme, cycles_force, cycles_wait_gpu;
690 nonbonded_verlet_t *nbv;
695 nb_kernel_type = fr->nbv->grp[0].kernel_type;
697 start = mdatoms->start;
698 homenr = mdatoms->homenr;
700 bSepDVDL = (fr->bSepDVDL && do_per_step(step, inputrec->nstlog));
702 clear_mat(vir_force);
705 if (DOMAINDECOMP(cr))
707 cg1 = cr->dd->ncg_tot;
718 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
719 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
720 bFillGrid = (bNS && bStateChanged);
721 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
722 bDoLongRange = (fr->bTwinRange && bNS && (flags & GMX_FORCE_DO_LR));
723 bDoForces = (flags & GMX_FORCE_FORCES);
724 bSepLRF = (bDoLongRange && bDoForces && (flags & GMX_FORCE_SEPLRF));
725 bUseGPU = fr->nbv->bUseGPU;
726 bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
730 update_forcerec(fr, box);
732 if (NEED_MUTOT(*inputrec))
734 /* Calculate total (local) dipole moment in a temporary common array.
735 * This makes it possible to sum them over nodes faster.
737 calc_mu(start, homenr,
738 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
743 if (fr->ePBC != epbcNONE)
745 /* Compute shift vectors every step,
746 * because of pressure coupling or box deformation!
748 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
750 calc_shifts(box, fr->shift_vec);
755 put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
756 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
758 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
760 unshift_self(graph, box, x);
764 nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
765 fr->shift_vec, nbv->grp[0].nbat);
768 if (!(cr->duty & DUTY_PME))
770 /* Send particle coordinates to the pme nodes.
771 * Since this is only implemented for domain decomposition
772 * and domain decomposition does not use the graph,
773 * we do not need to worry about shifting.
778 wallcycle_start(wcycle, ewcPP_PMESENDX);
780 bBS = (inputrec->nwall == 2);
784 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
787 if (EEL_PME(fr->eeltype))
789 pme_flags |= GMX_PME_DO_COULOMB;
792 if (EVDW_PME(fr->vdwtype))
794 pme_flags |= GMX_PME_DO_LJ;
795 if (fr->ljpme_combination_rule == eljpmeLB)
797 pme_flags |= GMX_PME_LJ_LB;
801 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
802 mdatoms->nChargePerturbed, mdatoms->nTypePerturbed, lambda[efptCOUL], lambda[efptVDW],
803 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
806 wallcycle_stop(wcycle, ewcPP_PMESENDX);
810 /* do gridding for pair search */
813 if (graph && bStateChanged)
815 /* Calculate intramolecular shift vectors to make molecules whole */
816 mk_mshift(fplog, graph, fr->ePBC, box, x);
820 box_diag[XX] = box[XX][XX];
821 box_diag[YY] = box[YY][YY];
822 box_diag[ZZ] = box[ZZ][ZZ];
824 wallcycle_start(wcycle, ewcNS);
827 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
828 nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
830 0, mdatoms->homenr, -1, fr->cginfo, x,
832 nbv->grp[eintLocal].kernel_type,
833 nbv->grp[eintLocal].nbat);
834 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
838 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
839 nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
841 nbv->grp[eintNonlocal].kernel_type,
842 nbv->grp[eintNonlocal].nbat);
843 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
846 if (nbv->ngrp == 1 ||
847 nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
849 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
850 nbv->nbs, mdatoms, fr->cginfo);
854 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
855 nbv->nbs, mdatoms, fr->cginfo);
856 nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
857 nbv->nbs, mdatoms, fr->cginfo);
859 wallcycle_stop(wcycle, ewcNS);
862 /* initialize the GPU atom data and copy shift vector */
867 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
868 nbnxn_cuda_init_atomdata(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
869 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
872 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
873 nbnxn_cuda_upload_shiftvec(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
874 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
877 /* do local pair search */
880 wallcycle_start_nocount(wcycle, ewcNS);
881 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
882 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
885 nbv->min_ci_balanced,
886 &nbv->grp[eintLocal].nbl_lists,
888 nbv->grp[eintLocal].kernel_type,
890 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
894 /* initialize local pair-list on the GPU */
895 nbnxn_cuda_init_pairlist(nbv->cu_nbv,
896 nbv->grp[eintLocal].nbl_lists.nbl[0],
899 wallcycle_stop(wcycle, ewcNS);
903 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
904 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
905 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
906 nbv->grp[eintLocal].nbat);
907 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
908 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
913 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
914 /* launch local nonbonded F on GPU */
915 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
917 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
920 /* Communicate coordinates and sum dipole if necessary +
921 do non-local pair search */
922 if (DOMAINDECOMP(cr))
924 bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
925 nbv->grp[eintLocal].kernel_type);
929 /* With GPU+CPU non-bonded calculations we need to copy
930 * the local coordinates to the non-local nbat struct
931 * (in CPU format) as the non-local kernel call also
932 * calculates the local - non-local interactions.
934 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
935 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
936 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
937 nbv->grp[eintNonlocal].nbat);
938 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
939 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
944 wallcycle_start_nocount(wcycle, ewcNS);
945 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
949 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
952 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
955 nbv->min_ci_balanced,
956 &nbv->grp[eintNonlocal].nbl_lists,
958 nbv->grp[eintNonlocal].kernel_type,
961 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
963 if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_CUDA)
965 /* initialize non-local pair-list on the GPU */
966 nbnxn_cuda_init_pairlist(nbv->cu_nbv,
967 nbv->grp[eintNonlocal].nbl_lists.nbl[0],
970 wallcycle_stop(wcycle, ewcNS);
974 wallcycle_start(wcycle, ewcMOVEX);
975 dd_move_x(cr->dd, box, x);
977 /* When we don't need the total dipole we sum it in global_stat */
978 if (bStateChanged && NEED_MUTOT(*inputrec))
980 gmx_sumd(2*DIM, mu, cr);
982 wallcycle_stop(wcycle, ewcMOVEX);
984 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
985 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
986 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
987 nbv->grp[eintNonlocal].nbat);
988 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
989 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
992 if (bUseGPU && !bDiffKernels)
994 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
995 /* launch non-local nonbonded F on GPU */
996 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
998 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1004 /* launch D2H copy-back F */
1005 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1006 if (DOMAINDECOMP(cr) && !bDiffKernels)
1008 nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintNonlocal].nbat,
1009 flags, eatNonlocal);
1011 nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintLocal].nbat,
1013 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1016 if (bStateChanged && NEED_MUTOT(*inputrec))
1020 gmx_sumd(2*DIM, mu, cr);
1023 for (i = 0; i < 2; i++)
1025 for (j = 0; j < DIM; j++)
1027 fr->mu_tot[i][j] = mu[i*DIM + j];
1031 if (fr->efep == efepNO)
1033 copy_rvec(fr->mu_tot[0], mu_tot);
1037 for (j = 0; j < DIM; j++)
1040 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1041 lambda[efptCOUL]*fr->mu_tot[1][j];
1045 /* Reset energies */
1046 reset_enerdata(fr, bNS, enerd, MASTER(cr));
1047 clear_rvecs(SHIFTS, fr->fshift);
1049 if (DOMAINDECOMP(cr))
1051 if (!(cr->duty & DUTY_PME))
1053 wallcycle_start(wcycle, ewcPPDURINGPME);
1054 dd_force_flop_start(cr->dd, nrnb);
1060 /* Enforced rotation has its own cycle counter that starts after the collective
1061 * coordinates have been communicated. It is added to ddCyclF to allow
1062 * for proper load-balancing */
1063 wallcycle_start(wcycle, ewcROT);
1064 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1065 wallcycle_stop(wcycle, ewcROT);
1068 /* Start the force cycle counter.
1069 * This counter is stopped in do_forcelow_level.
1070 * No parallel communication should occur while this counter is running,
1071 * since that will interfere with the dynamic load balancing.
1073 wallcycle_start(wcycle, ewcFORCE);
1076 /* Reset forces for which the virial is calculated separately:
1077 * PME/Ewald forces if necessary */
1078 if (fr->bF_NoVirSum)
1080 if (flags & GMX_FORCE_VIRIAL)
1082 fr->f_novirsum = fr->f_novirsum_alloc;
1085 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1089 clear_rvecs(homenr, fr->f_novirsum+start);
1094 /* We are not calculating the pressure so we do not need
1095 * a separate array for forces that do not contribute
1102 /* Clear the short- and long-range forces */
1103 clear_rvecs(fr->natoms_force_constr, f);
1104 if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1106 clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1109 clear_rvec(fr->vir_diag_posres);
1112 if (inputrec->ePull == epullCONSTRAINT)
1114 clear_pull_forces(inputrec->pull);
1117 /* We calculate the non-bonded forces, when done on the CPU, here.
1118 * We do this before calling do_force_lowlevel, as in there bondeds
1119 * forces are calculated before PME, which does communication.
1120 * With this order, non-bonded and bonded force calculation imbalance
1121 * can be balanced out by the domain decomposition load balancing.
1126 /* Maybe we should move this into do_force_lowlevel */
1127 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1131 if (!bUseOrEmulGPU || bDiffKernels)
1135 if (DOMAINDECOMP(cr))
1137 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1138 bDiffKernels ? enbvClearFYes : enbvClearFNo,
1148 aloc = eintNonlocal;
1151 /* Add all the non-bonded force to the normal force array.
1152 * This can be split into a local a non-local part when overlapping
1153 * communication with calculation with domain decomposition.
1155 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1156 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1157 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1158 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1159 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1160 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1161 wallcycle_start_nocount(wcycle, ewcFORCE);
1163 /* if there are multiple fshift output buffers reduce them */
1164 if ((flags & GMX_FORCE_VIRIAL) &&
1165 nbv->grp[aloc].nbl_lists.nnbl > 1)
1167 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1172 /* update QMMMrec, if necessary */
1175 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1178 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1180 posres_wrapper(fplog, flags, bSepDVDL, inputrec, nrnb, top, box, x,
1184 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_FBPOSRES].nr > 0)
1186 fbposres_wrapper(inputrec, nrnb, top, box, x, enerd, fr);
1189 /* Compute the bonded and non-bonded energies and optionally forces */
1190 do_force_lowlevel(fplog, step, fr, inputrec, &(top->idef),
1191 cr, nrnb, wcycle, mdatoms,
1192 x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1193 &(top->atomtypes), bBornRadii, box,
1194 inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1195 flags, &cycles_pme);
1199 if (do_per_step(step, inputrec->nstcalclr))
1201 /* Add the long range forces to the short range forces */
1202 for (i = 0; i < fr->natoms_force_constr; i++)
1204 rvec_add(fr->f_twin[i], f[i], f[i]);
1209 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1213 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1216 if (bUseOrEmulGPU && !bDiffKernels)
1218 /* wait for non-local forces (or calculate in emulation mode) */
1219 if (DOMAINDECOMP(cr))
1225 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1226 nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1227 nbv->grp[eintNonlocal].nbat,
1229 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1231 cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1232 cycles_wait_gpu += cycles_tmp;
1233 cycles_force += cycles_tmp;
1237 wallcycle_start_nocount(wcycle, ewcFORCE);
1238 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1240 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1242 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1243 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1244 /* skip the reduction if there was no non-local work to do */
1245 if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1247 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1248 nbv->grp[eintNonlocal].nbat, f);
1250 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1251 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1257 /* Communicate the forces */
1260 wallcycle_start(wcycle, ewcMOVEF);
1261 if (DOMAINDECOMP(cr))
1263 dd_move_f(cr->dd, f, fr->fshift);
1264 /* Do we need to communicate the separate force array
1265 * for terms that do not contribute to the single sum virial?
1266 * Position restraints and electric fields do not introduce
1267 * inter-cg forces, only full electrostatics methods do.
1268 * When we do not calculate the virial, fr->f_novirsum = f,
1269 * so we have already communicated these forces.
1271 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1272 (flags & GMX_FORCE_VIRIAL))
1274 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1278 /* We should not update the shift forces here,
1279 * since f_twin is already included in f.
1281 dd_move_f(cr->dd, fr->f_twin, NULL);
1284 wallcycle_stop(wcycle, ewcMOVEF);
1290 /* wait for local forces (or calculate in emulation mode) */
1293 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1294 nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1295 nbv->grp[eintLocal].nbat,
1297 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1299 cycles_wait_gpu += wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1301 /* now clear the GPU outputs while we finish the step on the CPU */
1303 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1304 nbnxn_cuda_clear_outputs(nbv->cu_nbv, flags);
1305 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1309 wallcycle_start_nocount(wcycle, ewcFORCE);
1310 do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1311 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1313 wallcycle_stop(wcycle, ewcFORCE);
1315 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1316 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1317 if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1319 /* skip the reduction if there was no non-local work to do */
1320 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1321 nbv->grp[eintLocal].nbat, f);
1323 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1324 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1327 if (DOMAINDECOMP(cr))
1329 dd_force_flop_stop(cr->dd, nrnb);
1332 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1335 dd_cycles_add(cr->dd, cycles_wait_gpu, ddCyclWaitGPU);
1342 if (IR_ELEC_FIELD(*inputrec))
1344 /* Compute forces due to electric field */
1345 calc_f_el(MASTER(cr) ? field : NULL,
1346 start, homenr, mdatoms->chargeA, fr->f_novirsum,
1347 inputrec->ex, inputrec->et, t);
1350 /* If we have NoVirSum forces, but we do not calculate the virial,
1351 * we sum fr->f_novirum=f later.
1353 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1355 wallcycle_start(wcycle, ewcVSITESPREAD);
1356 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1357 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1358 wallcycle_stop(wcycle, ewcVSITESPREAD);
1362 wallcycle_start(wcycle, ewcVSITESPREAD);
1363 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1365 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1366 wallcycle_stop(wcycle, ewcVSITESPREAD);
1370 if (flags & GMX_FORCE_VIRIAL)
1372 /* Calculation of the virial must be done after vsites! */
1373 calc_virial(mdatoms->start, mdatoms->homenr, x, f,
1374 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1378 if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1380 pull_potential_wrapper(fplog, bSepDVDL, cr, inputrec, box, x,
1381 f, vir_force, mdatoms, enerd, lambda, t);
1384 /* Add the forces from enforced rotation potentials (if any) */
1387 wallcycle_start(wcycle, ewcROTadd);
1388 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1389 wallcycle_stop(wcycle, ewcROTadd);
1392 if (PAR(cr) && !(cr->duty & DUTY_PME))
1394 /* In case of node-splitting, the PP nodes receive the long-range
1395 * forces, virial and energy from the PME nodes here.
1397 pme_receive_force_ener(fplog, bSepDVDL, cr, wcycle, enerd, fr);
1402 post_process_forces(cr, step, nrnb, wcycle,
1403 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1407 /* Sum the potential energy terms from group contributions */
1408 sum_epot(&(enerd->grpp), enerd->term);
1411 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1412 t_inputrec *inputrec,
1413 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1414 gmx_localtop_t *top,
1415 gmx_groups_t *groups,
1416 matrix box, rvec x[], history_t *hist,
1420 gmx_enerdata_t *enerd, t_fcdata *fcd,
1421 real *lambda, t_graph *graph,
1422 t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1423 double t, FILE *field, gmx_edsam_t ed,
1424 gmx_bool bBornRadii,
1430 gmx_bool bSepDVDL, bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
1431 gmx_bool bDoLongRangeNS, bDoForces, bDoPotential, bSepLRF;
1432 gmx_bool bDoAdressWF;
1434 rvec vzero, box_diag;
1435 real e, v, dvdlambda[efptNR];
1437 float cycles_pme, cycles_force;
1439 start = mdatoms->start;
1440 homenr = mdatoms->homenr;
1442 bSepDVDL = (fr->bSepDVDL && do_per_step(step, inputrec->nstlog));
1444 clear_mat(vir_force);
1448 pd_cg_range(cr, &cg0, &cg1);
1453 if (DOMAINDECOMP(cr))
1455 cg1 = cr->dd->ncg_tot;
1467 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
1468 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1469 /* Should we update the long-range neighborlists at this step? */
1470 bDoLongRangeNS = fr->bTwinRange && bNS;
1471 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1472 bFillGrid = (bNS && bStateChanged);
1473 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
1474 bDoForces = (flags & GMX_FORCE_FORCES);
1475 bDoPotential = (flags & GMX_FORCE_ENERGY);
1476 bSepLRF = ((inputrec->nstcalclr > 1) && bDoForces &&
1477 (flags & GMX_FORCE_SEPLRF) && (flags & GMX_FORCE_DO_LR));
1479 /* should probably move this to the forcerec since it doesn't change */
1480 bDoAdressWF = ((fr->adress_type != eAdressOff));
1484 update_forcerec(fr, box);
1486 if (NEED_MUTOT(*inputrec))
1488 /* Calculate total (local) dipole moment in a temporary common array.
1489 * This makes it possible to sum them over nodes faster.
1491 calc_mu(start, homenr,
1492 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1497 if (fr->ePBC != epbcNONE)
1499 /* Compute shift vectors every step,
1500 * because of pressure coupling or box deformation!
1502 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1504 calc_shifts(box, fr->shift_vec);
1509 put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1510 &(top->cgs), x, fr->cg_cm);
1511 inc_nrnb(nrnb, eNR_CGCM, homenr);
1512 inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1514 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1516 unshift_self(graph, box, x);
1521 calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1522 inc_nrnb(nrnb, eNR_CGCM, homenr);
1529 move_cgcm(fplog, cr, fr->cg_cm);
1533 pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1538 if (!(cr->duty & DUTY_PME))
1540 /* Send particle coordinates to the pme nodes.
1541 * Since this is only implemented for domain decomposition
1542 * and domain decomposition does not use the graph,
1543 * we do not need to worry about shifting.
1548 wallcycle_start(wcycle, ewcPP_PMESENDX);
1550 bBS = (inputrec->nwall == 2);
1553 copy_mat(box, boxs);
1554 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1557 if (EEL_PME(fr->eeltype))
1559 pme_flags |= GMX_PME_DO_COULOMB;
1562 if (EVDW_PME(fr->vdwtype))
1564 pme_flags |= GMX_PME_DO_LJ;
1565 if (fr->ljpme_combination_rule == eljpmeLB)
1567 pme_flags |= GMX_PME_LJ_LB;
1571 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
1572 mdatoms->nChargePerturbed, mdatoms->nTypePerturbed, lambda[efptCOUL], lambda[efptVDW],
1573 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
1576 wallcycle_stop(wcycle, ewcPP_PMESENDX);
1578 #endif /* GMX_MPI */
1580 /* Communicate coordinates and sum dipole if necessary */
1583 wallcycle_start(wcycle, ewcMOVEX);
1584 if (DOMAINDECOMP(cr))
1586 dd_move_x(cr->dd, box, x);
1590 move_x(cr, x, nrnb);
1592 wallcycle_stop(wcycle, ewcMOVEX);
1595 /* update adress weight beforehand */
1596 if (bStateChanged && bDoAdressWF)
1598 /* need pbc for adress weight calculation with pbc_dx */
1599 set_pbc(&pbc, inputrec->ePBC, box);
1600 if (fr->adress_site == eAdressSITEcog)
1602 update_adress_weights_cog(top->idef.iparams, top->idef.il, x, fr, mdatoms,
1603 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1605 else if (fr->adress_site == eAdressSITEcom)
1607 update_adress_weights_com(fplog, cg0, cg1, &(top->cgs), x, fr, mdatoms,
1608 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1610 else if (fr->adress_site == eAdressSITEatomatom)
1612 update_adress_weights_atom_per_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1613 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1617 update_adress_weights_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1618 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1622 if (NEED_MUTOT(*inputrec))
1629 gmx_sumd(2*DIM, mu, cr);
1631 for (i = 0; i < 2; i++)
1633 for (j = 0; j < DIM; j++)
1635 fr->mu_tot[i][j] = mu[i*DIM + j];
1639 if (fr->efep == efepNO)
1641 copy_rvec(fr->mu_tot[0], mu_tot);
1645 for (j = 0; j < DIM; j++)
1648 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1653 /* Reset energies */
1654 reset_enerdata(fr, bNS, enerd, MASTER(cr));
1655 clear_rvecs(SHIFTS, fr->fshift);
1659 wallcycle_start(wcycle, ewcNS);
1661 if (graph && bStateChanged)
1663 /* Calculate intramolecular shift vectors to make molecules whole */
1664 mk_mshift(fplog, graph, fr->ePBC, box, x);
1667 /* Do the actual neighbour searching */
1669 groups, top, mdatoms,
1670 cr, nrnb, bFillGrid,
1673 wallcycle_stop(wcycle, ewcNS);
1676 if (inputrec->implicit_solvent && bNS)
1678 make_gb_nblist(cr, inputrec->gb_algorithm,
1679 x, box, fr, &top->idef, graph, fr->born);
1682 if (DOMAINDECOMP(cr))
1684 if (!(cr->duty & DUTY_PME))
1686 wallcycle_start(wcycle, ewcPPDURINGPME);
1687 dd_force_flop_start(cr->dd, nrnb);
1693 /* Enforced rotation has its own cycle counter that starts after the collective
1694 * coordinates have been communicated. It is added to ddCyclF to allow
1695 * for proper load-balancing */
1696 wallcycle_start(wcycle, ewcROT);
1697 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1698 wallcycle_stop(wcycle, ewcROT);
1701 /* Start the force cycle counter.
1702 * This counter is stopped in do_forcelow_level.
1703 * No parallel communication should occur while this counter is running,
1704 * since that will interfere with the dynamic load balancing.
1706 wallcycle_start(wcycle, ewcFORCE);
1710 /* Reset forces for which the virial is calculated separately:
1711 * PME/Ewald forces if necessary */
1712 if (fr->bF_NoVirSum)
1714 if (flags & GMX_FORCE_VIRIAL)
1716 fr->f_novirsum = fr->f_novirsum_alloc;
1719 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1723 clear_rvecs(homenr, fr->f_novirsum+start);
1728 /* We are not calculating the pressure so we do not need
1729 * a separate array for forces that do not contribute
1736 /* Clear the short- and long-range forces */
1737 clear_rvecs(fr->natoms_force_constr, f);
1738 if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1740 clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1743 clear_rvec(fr->vir_diag_posres);
1745 if (inputrec->ePull == epullCONSTRAINT)
1747 clear_pull_forces(inputrec->pull);
1750 /* update QMMMrec, if necessary */
1753 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1756 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1758 posres_wrapper(fplog, flags, bSepDVDL, inputrec, nrnb, top, box, x,
1762 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_FBPOSRES].nr > 0)
1764 fbposres_wrapper(inputrec, nrnb, top, box, x, enerd, fr);
1767 /* Compute the bonded and non-bonded energies and optionally forces */
1768 do_force_lowlevel(fplog, step, fr, inputrec, &(top->idef),
1769 cr, nrnb, wcycle, mdatoms,
1770 x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1771 &(top->atomtypes), bBornRadii, box,
1772 inputrec->fepvals, lambda,
1773 graph, &(top->excls), fr->mu_tot,
1779 if (do_per_step(step, inputrec->nstcalclr))
1781 /* Add the long range forces to the short range forces */
1782 for (i = 0; i < fr->natoms_force_constr; i++)
1784 rvec_add(fr->f_twin[i], f[i], f[i]);
1789 cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1793 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1796 if (DOMAINDECOMP(cr))
1798 dd_force_flop_stop(cr->dd, nrnb);
1801 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1807 if (IR_ELEC_FIELD(*inputrec))
1809 /* Compute forces due to electric field */
1810 calc_f_el(MASTER(cr) ? field : NULL,
1811 start, homenr, mdatoms->chargeA, fr->f_novirsum,
1812 inputrec->ex, inputrec->et, t);
1815 if (bDoAdressWF && fr->adress_icor == eAdressICThermoForce)
1817 /* Compute thermodynamic force in hybrid AdResS region */
1818 adress_thermo_force(start, homenr, &(top->cgs), x, fr->f_novirsum, fr, mdatoms,
1819 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1822 /* Communicate the forces */
1825 wallcycle_start(wcycle, ewcMOVEF);
1826 if (DOMAINDECOMP(cr))
1828 dd_move_f(cr->dd, f, fr->fshift);
1829 /* Do we need to communicate the separate force array
1830 * for terms that do not contribute to the single sum virial?
1831 * Position restraints and electric fields do not introduce
1832 * inter-cg forces, only full electrostatics methods do.
1833 * When we do not calculate the virial, fr->f_novirsum = f,
1834 * so we have already communicated these forces.
1836 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1837 (flags & GMX_FORCE_VIRIAL))
1839 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1843 /* We should not update the shift forces here,
1844 * since f_twin is already included in f.
1846 dd_move_f(cr->dd, fr->f_twin, NULL);
1851 pd_move_f(cr, f, nrnb);
1854 pd_move_f(cr, fr->f_twin, nrnb);
1857 wallcycle_stop(wcycle, ewcMOVEF);
1860 /* If we have NoVirSum forces, but we do not calculate the virial,
1861 * we sum fr->f_novirum=f later.
1863 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1865 wallcycle_start(wcycle, ewcVSITESPREAD);
1866 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1867 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1868 wallcycle_stop(wcycle, ewcVSITESPREAD);
1872 wallcycle_start(wcycle, ewcVSITESPREAD);
1873 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1875 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1876 wallcycle_stop(wcycle, ewcVSITESPREAD);
1880 if (flags & GMX_FORCE_VIRIAL)
1882 /* Calculation of the virial must be done after vsites! */
1883 calc_virial(mdatoms->start, mdatoms->homenr, x, f,
1884 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1888 if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1890 pull_potential_wrapper(fplog, bSepDVDL, cr, inputrec, box, x,
1891 f, vir_force, mdatoms, enerd, lambda, t);
1894 /* Add the forces from enforced rotation potentials (if any) */
1897 wallcycle_start(wcycle, ewcROTadd);
1898 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1899 wallcycle_stop(wcycle, ewcROTadd);
1902 if (PAR(cr) && !(cr->duty & DUTY_PME))
1904 /* In case of node-splitting, the PP nodes receive the long-range
1905 * forces, virial and energy from the PME nodes here.
1907 pme_receive_force_ener(fplog, bSepDVDL, cr, wcycle, enerd, fr);
1912 post_process_forces(cr, step, nrnb, wcycle,
1913 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1917 /* Sum the potential energy terms from group contributions */
1918 sum_epot(&(enerd->grpp), enerd->term);
1921 void do_force(FILE *fplog, t_commrec *cr,
1922 t_inputrec *inputrec,
1923 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1924 gmx_localtop_t *top,
1925 gmx_groups_t *groups,
1926 matrix box, rvec x[], history_t *hist,
1930 gmx_enerdata_t *enerd, t_fcdata *fcd,
1931 real *lambda, t_graph *graph,
1933 gmx_vsite_t *vsite, rvec mu_tot,
1934 double t, FILE *field, gmx_edsam_t ed,
1935 gmx_bool bBornRadii,
1938 /* modify force flag if not doing nonbonded */
1939 if (!fr->bNonbonded)
1941 flags &= ~GMX_FORCE_NONBONDED;
1944 switch (inputrec->cutoff_scheme)
1947 do_force_cutsVERLET(fplog, cr, inputrec,
1963 do_force_cutsGROUP(fplog, cr, inputrec,
1978 gmx_incons("Invalid cut-off scheme passed!");
1983 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
1984 t_inputrec *ir, t_mdatoms *md,
1985 t_state *state, t_commrec *cr, t_nrnb *nrnb,
1986 t_forcerec *fr, gmx_localtop_t *top)
1988 int i, m, start, end;
1990 real dt = ir->delta_t;
1994 snew(savex, state->natoms);
1997 end = md->homenr + start;
2001 fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
2002 start, md->homenr, end);
2004 /* Do a first constrain to reset particles... */
2005 step = ir->init_step;
2008 char buf[STEPSTRSIZE];
2009 fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
2010 gmx_step_str(step, buf));
2014 /* constrain the current position */
2015 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2016 ir, NULL, cr, step, 0, md,
2017 state->x, state->x, NULL,
2018 fr->bMolPBC, state->box,
2019 state->lambda[efptBONDED], &dvdl_dum,
2020 NULL, NULL, nrnb, econqCoord,
2021 ir->epc == epcMTTK, state->veta, state->veta);
2024 /* constrain the inital velocity, and save it */
2025 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
2026 /* might not yet treat veta correctly */
2027 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2028 ir, NULL, cr, step, 0, md,
2029 state->x, state->v, state->v,
2030 fr->bMolPBC, state->box,
2031 state->lambda[efptBONDED], &dvdl_dum,
2032 NULL, NULL, nrnb, econqVeloc,
2033 ir->epc == epcMTTK, state->veta, state->veta);
2035 /* constrain the inital velocities at t-dt/2 */
2036 if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
2038 for (i = start; (i < end); i++)
2040 for (m = 0; (m < DIM); m++)
2042 /* Reverse the velocity */
2043 state->v[i][m] = -state->v[i][m];
2044 /* Store the position at t-dt in buf */
2045 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
2048 /* Shake the positions at t=-dt with the positions at t=0
2049 * as reference coordinates.
2053 char buf[STEPSTRSIZE];
2054 fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
2055 gmx_step_str(step, buf));
2058 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2059 ir, NULL, cr, step, -1, md,
2060 state->x, savex, NULL,
2061 fr->bMolPBC, state->box,
2062 state->lambda[efptBONDED], &dvdl_dum,
2063 state->v, NULL, nrnb, econqCoord,
2064 ir->epc == epcMTTK, state->veta, state->veta);
2066 for (i = start; i < end; i++)
2068 for (m = 0; m < DIM; m++)
2070 /* Re-reverse the velocities */
2071 state->v[i][m] = -state->v[i][m];
2080 integrate_table(real vdwtab[], real scale, int offstart, int rstart, int rend,
2081 double *enerout, double *virout)
2083 double enersum, virsum;
2084 double invscale, invscale2, invscale3;
2085 double r, ea, eb, ec, pa, pb, pc, pd;
2087 int ri, offset, tabfactor;
2089 invscale = 1.0/scale;
2090 invscale2 = invscale*invscale;
2091 invscale3 = invscale*invscale2;
2093 /* Following summation derived from cubic spline definition,
2094 * Numerical Recipies in C, second edition, p. 113-116. Exact for
2095 * the cubic spline. We first calculate the negative of the
2096 * energy from rvdw to rvdw_switch, assuming that g(r)=1, and then
2097 * add the more standard, abrupt cutoff correction to that result,
2098 * yielding the long-range correction for a switched function. We
2099 * perform both the pressure and energy loops at the same time for
2100 * simplicity, as the computational cost is low. */
2104 /* Since the dispersion table has been scaled down a factor
2105 * 6.0 and the repulsion a factor 12.0 to compensate for the
2106 * c6/c12 parameters inside nbfp[] being scaled up (to save
2107 * flops in kernels), we need to correct for this.
2118 for (ri = rstart; ri < rend; ++ri)
2122 eb = 2.0*invscale2*r;
2126 pb = 3.0*invscale2*r;
2127 pc = 3.0*invscale*r*r;
2130 /* this "8" is from the packing in the vdwtab array - perhaps
2131 should be defined? */
2133 offset = 8*ri + offstart;
2134 y0 = vdwtab[offset];
2135 f = vdwtab[offset+1];
2136 g = vdwtab[offset+2];
2137 h = vdwtab[offset+3];
2139 enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2140 virsum += f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2142 *enerout = 4.0*M_PI*enersum*tabfactor;
2143 *virout = 4.0*M_PI*virsum*tabfactor;
2146 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2148 double eners[2], virs[2], enersum, virsum, y0, f, g, h;
2149 double r0, r1, r, rc3, rc9, ea, eb, ec, pa, pb, pc, pd;
2150 double invscale, invscale2, invscale3;
2151 int ri0, ri1, ri, i, offstart, offset;
2152 real scale, *vdwtab, tabfactor, tmp;
2154 fr->enershiftsix = 0;
2155 fr->enershifttwelve = 0;
2156 fr->enerdiffsix = 0;
2157 fr->enerdifftwelve = 0;
2159 fr->virdifftwelve = 0;
2161 if (eDispCorr != edispcNO)
2163 for (i = 0; i < 2; i++)
2168 if ((fr->vdwtype == evdwSWITCH) || (fr->vdwtype == evdwSHIFT))
2170 if (fr->rvdw_switch == 0)
2173 "With dispersion correction rvdw-switch can not be zero "
2174 "for vdw-type = %s", evdw_names[fr->vdwtype]);
2177 scale = fr->nblists[0].table_elec_vdw.scale;
2178 vdwtab = fr->nblists[0].table_vdw.data;
2180 /* Round the cut-offs to exact table values for precision */
2181 ri0 = floor(fr->rvdw_switch*scale);
2182 ri1 = ceil(fr->rvdw*scale);
2188 if (fr->vdwtype == evdwSHIFT)
2190 /* Determine the constant energy shift below rvdw_switch.
2191 * Table has a scale factor since we have scaled it down to compensate
2192 * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2194 fr->enershiftsix = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2195 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2197 /* Add the constant part from 0 to rvdw_switch.
2198 * This integration from 0 to rvdw_switch overcounts the number
2199 * of interactions by 1, as it also counts the self interaction.
2200 * We will correct for this later.
2202 eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2203 eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2204 for (i = 0; i < 2; i++)
2208 integrate_table(vdwtab, scale, (i == 0 ? 0 : 4), ri0, ri1, &enersum, &virsum);
2209 eners[i] -= enersum;
2213 /* now add the correction for rvdw_switch to infinity */
2214 eners[0] += -4.0*M_PI/(3.0*rc3);
2215 eners[1] += 4.0*M_PI/(9.0*rc9);
2216 virs[0] += 8.0*M_PI/rc3;
2217 virs[1] += -16.0*M_PI/(3.0*rc9);
2219 else if (EVDW_PME(fr->vdwtype))
2221 if (EVDW_SWITCHED(fr->vdwtype) && fr->rvdw_switch == 0)
2224 "With dispersion correction rvdw-switch can not be zero "
2225 "for vdw-type = %s", evdw_names[fr->vdwtype]);
2228 scale = fr->nblists[0].table_vdw.scale;
2229 vdwtab = fr->nblists[0].table_vdw.data;
2231 ri0 = floor(fr->rvdw_switch*scale);
2232 ri1 = ceil(fr->rvdw*scale);
2238 /* Calculate self-interaction coefficient (assuming that
2239 * the reciprocal-space contribution is constant in the
2240 * region that contributes to the self-interaction).
2242 fr->enershiftsix = pow(fr->ewaldcoeff_lj, 6) / 6.0;
2244 /* Calculate C12 values as without PME. */
2245 if (EVDW_SWITCHED(fr->vdwtype))
2249 integrate_table(vdwtab, scale, 4, ri0, ri1, &enersum, &virsum);
2250 eners[1] -= enersum;
2253 /* Add analytical corrections, C6 for the whole range, C12
2254 * from rvdw_switch to infinity.
2257 eners[0] += -pow(sqrt(M_PI)*fr->ewaldcoeff_lj, 3)/3.0;
2258 eners[1] += 4.0*M_PI/(9.0*rc9);
2259 virs[0] += pow(sqrt(M_PI)*fr->ewaldcoeff_lj, 3);
2260 virs[1] += -16.0*M_PI/(3.0*rc9);
2262 else if ((fr->vdwtype == evdwCUT) || (fr->vdwtype == evdwUSER))
2264 if (fr->vdwtype == evdwUSER && fplog)
2267 "WARNING: using dispersion correction with user tables\n");
2269 rc3 = fr->rvdw*fr->rvdw*fr->rvdw;
2271 /* Contribution beyond the cut-off */
2272 eners[0] += -4.0*M_PI/(3.0*rc3);
2273 eners[1] += 4.0*M_PI/(9.0*rc9);
2274 if (fr->vdw_modifier == eintmodPOTSHIFT)
2276 /* Contribution within the cut-off */
2277 eners[0] += -4.0*M_PI/(3.0*rc3);
2278 eners[1] += 4.0*M_PI/(3.0*rc9);
2280 /* Contribution beyond the cut-off */
2281 virs[0] += 8.0*M_PI/rc3;
2282 virs[1] += -16.0*M_PI/(3.0*rc9);
2287 "Dispersion correction is not implemented for vdw-type = %s",
2288 evdw_names[fr->vdwtype]);
2290 fr->enerdiffsix = eners[0];
2291 fr->enerdifftwelve = eners[1];
2292 /* The 0.5 is due to the Gromacs definition of the virial */
2293 fr->virdiffsix = 0.5*virs[0];
2294 fr->virdifftwelve = 0.5*virs[1];
2298 void calc_dispcorr(FILE *fplog, t_inputrec *ir, t_forcerec *fr,
2299 gmx_int64_t step, int natoms,
2300 matrix box, real lambda, tensor pres, tensor virial,
2301 real *prescorr, real *enercorr, real *dvdlcorr)
2303 gmx_bool bCorrAll, bCorrPres;
2304 real dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2314 if (ir->eDispCorr != edispcNO)
2316 bCorrAll = (ir->eDispCorr == edispcAllEner ||
2317 ir->eDispCorr == edispcAllEnerPres);
2318 bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2319 ir->eDispCorr == edispcAllEnerPres);
2321 invvol = 1/det(box);
2324 /* Only correct for the interactions with the inserted molecule */
2325 dens = (natoms - fr->n_tpi)*invvol;
2330 dens = natoms*invvol;
2331 ninter = 0.5*natoms;
2334 if (ir->efep == efepNO)
2336 avcsix = fr->avcsix[0];
2337 avctwelve = fr->avctwelve[0];
2341 avcsix = (1 - lambda)*fr->avcsix[0] + lambda*fr->avcsix[1];
2342 avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2345 enerdiff = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2346 *enercorr += avcsix*enerdiff;
2348 if (ir->efep != efepNO)
2350 dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2354 enerdiff = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2355 *enercorr += avctwelve*enerdiff;
2356 if (fr->efep != efepNO)
2358 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2364 svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2365 if (ir->eDispCorr == edispcAllEnerPres)
2367 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2369 /* The factor 2 is because of the Gromacs virial definition */
2370 spres = -2.0*invvol*svir*PRESFAC;
2372 for (m = 0; m < DIM; m++)
2374 virial[m][m] += svir;
2375 pres[m][m] += spres;
2380 /* Can't currently control when it prints, for now, just print when degugging */
2385 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2391 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2392 *enercorr, spres, svir);
2396 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2400 if (fr->bSepDVDL && do_per_step(step, ir->nstlog))
2402 gmx_print_sepdvdl(fplog, "Dispersion correction", *enercorr, dvdlambda);
2404 if (fr->efep != efepNO)
2406 *dvdlcorr += dvdlambda;
2411 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2412 t_graph *graph, rvec x[])
2416 fprintf(fplog, "Removing pbc first time\n");
2418 calc_shifts(box, fr->shift_vec);
2421 mk_mshift(fplog, graph, fr->ePBC, box, x);
2424 p_graph(debug, "do_pbc_first 1", graph);
2426 shift_self(graph, box, x);
2427 /* By doing an extra mk_mshift the molecules that are broken
2428 * because they were e.g. imported from another software
2429 * will be made whole again. Such are the healing powers
2432 mk_mshift(fplog, graph, fr->ePBC, box, x);
2435 p_graph(debug, "do_pbc_first 2", graph);
2440 fprintf(fplog, "Done rmpbc\n");
2444 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2445 gmx_mtop_t *mtop, rvec x[],
2450 gmx_molblock_t *molb;
2452 if (bFirst && fplog)
2454 fprintf(fplog, "Removing pbc first time\n");
2459 for (mb = 0; mb < mtop->nmolblock; mb++)
2461 molb = &mtop->molblock[mb];
2462 if (molb->natoms_mol == 1 ||
2463 (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2465 /* Just one atom or charge group in the molecule, no PBC required */
2466 as += molb->nmol*molb->natoms_mol;
2470 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2471 mk_graph_ilist(NULL, mtop->moltype[molb->type].ilist,
2472 0, molb->natoms_mol, FALSE, FALSE, graph);
2474 for (mol = 0; mol < molb->nmol; mol++)
2476 mk_mshift(fplog, graph, ePBC, box, x+as);
2478 shift_self(graph, box, x+as);
2479 /* The molecule is whole now.
2480 * We don't need the second mk_mshift call as in do_pbc_first,
2481 * since we no longer need this graph.
2484 as += molb->natoms_mol;
2492 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2493 gmx_mtop_t *mtop, rvec x[])
2495 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2498 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2499 gmx_mtop_t *mtop, rvec x[])
2501 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2504 void finish_run(FILE *fplog, t_commrec *cr,
2505 t_inputrec *inputrec,
2506 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2507 gmx_walltime_accounting_t walltime_accounting,
2508 wallclock_gpu_t *gputimes,
2509 gmx_bool bWriteStat)
2512 t_nrnb *nrnb_tot = NULL;
2515 double elapsed_time,
2516 elapsed_time_over_all_ranks,
2517 elapsed_time_over_all_threads,
2518 elapsed_time_over_all_threads_over_all_ranks;
2519 wallcycle_sum(cr, wcycle);
2525 MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2526 cr->mpi_comm_mysim);
2534 elapsed_time = walltime_accounting_get_elapsed_time(walltime_accounting);
2535 elapsed_time_over_all_ranks = elapsed_time;
2536 elapsed_time_over_all_threads = walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting);
2537 elapsed_time_over_all_threads_over_all_ranks = elapsed_time_over_all_threads;
2541 /* reduce elapsed_time over all MPI ranks in the current simulation */
2542 MPI_Allreduce(&elapsed_time,
2543 &elapsed_time_over_all_ranks,
2544 1, MPI_DOUBLE, MPI_SUM,
2545 cr->mpi_comm_mysim);
2546 elapsed_time_over_all_ranks /= cr->nnodes;
2547 /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2548 * current simulation. */
2549 MPI_Allreduce(&elapsed_time_over_all_threads,
2550 &elapsed_time_over_all_threads_over_all_ranks,
2551 1, MPI_DOUBLE, MPI_SUM,
2552 cr->mpi_comm_mysim);
2558 print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2565 if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2567 print_dd_statistics(cr, inputrec, fplog);
2579 snew(nrnb_all, cr->nnodes);
2580 nrnb_all[0] = *nrnb;
2581 for (s = 1; s < cr->nnodes; s++)
2583 MPI_Recv(nrnb_all[s].n, eNRNB, MPI_DOUBLE, s, 0,
2584 cr->mpi_comm_mysim, &stat);
2586 pr_load(fplog, cr, nrnb_all);
2591 MPI_Send(nrnb->n, eNRNB, MPI_DOUBLE, MASTERRANK(cr), 0,
2592 cr->mpi_comm_mysim);
2599 wallcycle_print(fplog, cr->nnodes, cr->npmenodes,
2600 elapsed_time_over_all_ranks,
2603 if (EI_DYNAMICS(inputrec->eI))
2605 delta_t = inputrec->delta_t;
2614 print_perf(fplog, elapsed_time_over_all_threads_over_all_ranks,
2615 elapsed_time_over_all_ranks,
2616 walltime_accounting_get_nsteps_done(walltime_accounting),
2617 delta_t, nbfs, mflop);
2621 print_perf(stderr, elapsed_time_over_all_threads_over_all_ranks,
2622 elapsed_time_over_all_ranks,
2623 walltime_accounting_get_nsteps_done(walltime_accounting),
2624 delta_t, nbfs, mflop);
2629 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, real *lambda, double *lam0)
2631 /* this function works, but could probably use a logic rewrite to keep all the different
2632 types of efep straight. */
2635 t_lambda *fep = ir->fepvals;
2637 if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2639 for (i = 0; i < efptNR; i++)
2651 *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
2652 if checkpoint is set -- a kludge is in for now
2654 for (i = 0; i < efptNR; i++)
2656 /* overwrite lambda state with init_lambda for now for backwards compatibility */
2657 if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2659 lambda[i] = fep->init_lambda;
2662 lam0[i] = lambda[i];
2667 lambda[i] = fep->all_lambda[i][*fep_state];
2670 lam0[i] = lambda[i];
2676 /* need to rescale control temperatures to match current state */
2677 for (i = 0; i < ir->opts.ngtc; i++)
2679 if (ir->opts.ref_t[i] > 0)
2681 ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2687 /* Send to the log the information on the current lambdas */
2690 fprintf(fplog, "Initial vector of lambda components:[ ");
2691 for (i = 0; i < efptNR; i++)
2693 fprintf(fplog, "%10.4f ", lambda[i]);
2695 fprintf(fplog, "]\n");
2701 void init_md(FILE *fplog,
2702 t_commrec *cr, t_inputrec *ir, const output_env_t oenv,
2703 double *t, double *t0,
2704 real *lambda, int *fep_state, double *lam0,
2705 t_nrnb *nrnb, gmx_mtop_t *mtop,
2707 int nfile, const t_filenm fnm[],
2708 gmx_mdoutf_t **outf, t_mdebin **mdebin,
2709 tensor force_vir, tensor shake_vir, rvec mu_tot,
2710 gmx_bool *bSimAnn, t_vcm **vcm, unsigned long Flags)
2715 /* Initial values */
2716 *t = *t0 = ir->init_t;
2719 for (i = 0; i < ir->opts.ngtc; i++)
2721 /* set bSimAnn if any group is being annealed */
2722 if (ir->opts.annealing[i] != eannNO)
2729 update_annealing_target_temp(&(ir->opts), ir->init_t);
2732 /* Initialize lambda variables */
2733 initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2737 *upd = init_update(ir);
2743 *vcm = init_vcm(fplog, &mtop->groups, ir);
2746 if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2748 if (ir->etc == etcBERENDSEN)
2750 please_cite(fplog, "Berendsen84a");
2752 if (ir->etc == etcVRESCALE)
2754 please_cite(fplog, "Bussi2007a");
2762 *outf = init_mdoutf(nfile, fnm, Flags, cr, ir, oenv);
2764 *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? NULL : (*outf)->fp_ene,
2765 mtop, ir, (*outf)->fp_dhdl);
2770 please_cite(fplog, "Fritsch12");
2771 please_cite(fplog, "Junghans10");
2773 /* Initiate variables */
2774 clear_mat(force_vir);
2775 clear_mat(shake_vir);