1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
11 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
12 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
13 * Copyright (c) 2001-2004, The GROMACS development team,
14 * check out http://www.gromacs.org for more information.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version 2
19 * of the License, or (at your option) any later version.
21 * If you want to redistribute modifications, please consider that
22 * scientific software is very special. Version control is crucial -
23 * bugs must be traceable. We will be happy to consider code for
24 * inclusion in the official distribution, but derived work must not
25 * be called official GROMACS. Details are found in the README & COPYING
26 * files - if they are missing, get the official version at www.gromacs.org.
28 * To help us fund GROMACS development, we humbly ask that you cite
29 * the papers on the package - you can find them in the top README file.
31 * For more info, check our website at http://www.gromacs.org
34 * GROwing Monsters And Cloning Shrimps
41 #include <catamount/dclock.h>
47 #ifdef HAVE_SYS_TIME_H
60 #include "chargegroup.h"
83 #include "pull_rotation.h"
84 #include "gmx_random.h"
87 #include "gmx_wallcycle.h"
89 #include "nbnxn_atomdata.h"
90 #include "nbnxn_search.h"
91 #include "nbnxn_kernels/nbnxn_kernel_ref.h"
92 #include "nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
93 #include "nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
94 #include "nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
96 #include "gromacs/utility/gmxmpi.h"
101 #include "nbnxn_cuda_data_mgmt.h"
102 #include "nbnxn_cuda/nbnxn_cuda.h"
107 #ifdef HAVE_GETTIMEOFDAY
111 gettimeofday(&t, NULL);
113 seconds = (double) t.tv_sec + 1e-6*(double)t.tv_usec;
119 seconds = time(NULL);
126 #define difftime(end, start) ((double)(end)-(double)(start))
128 void print_time(FILE *out, gmx_runtime_t *runtime, gmx_large_int_t step,
129 t_inputrec *ir, t_commrec gmx_unused *cr)
132 char timebuf[STRLEN];
136 #ifndef GMX_THREAD_MPI
142 fprintf(out, "step %s", gmx_step_str(step, buf));
143 if ((step >= ir->nstlist))
145 runtime->last = gmx_gettime();
146 dt = difftime(runtime->last, runtime->real);
147 runtime->time_per_step = dt/(step - ir->init_step + 1);
149 dt = (ir->nsteps + ir->init_step - step)*runtime->time_per_step;
155 finish = (time_t) (runtime->last + dt);
156 gmx_ctime_r(&finish, timebuf, STRLEN);
157 sprintf(buf, "%s", timebuf);
158 buf[strlen(buf)-1] = '\0';
159 fprintf(out, ", will finish %s", buf);
163 fprintf(out, ", remaining runtime: %5d s ", (int)dt);
168 fprintf(out, " performance: %.1f ns/day ",
169 ir->delta_t/1000*24*60*60/runtime->time_per_step);
172 #ifndef GMX_THREAD_MPI
186 static double set_proctime(gmx_runtime_t *runtime)
192 prev = runtime->proc;
193 runtime->proc = dclock();
195 diff = runtime->proc - prev;
199 prev = runtime->proc;
200 runtime->proc = clock();
202 diff = (double)(runtime->proc - prev)/(double)CLOCKS_PER_SEC;
206 /* The counter has probably looped, ignore this data */
213 void runtime_start(gmx_runtime_t *runtime)
215 runtime->real = gmx_gettime();
217 set_proctime(runtime);
218 runtime->realtime = 0;
219 runtime->proctime = 0;
221 runtime->time_per_step = 0;
224 void runtime_end(gmx_runtime_t *runtime)
230 runtime->proctime += set_proctime(runtime);
231 runtime->realtime = now - runtime->real;
235 void runtime_upd_proc(gmx_runtime_t *runtime)
237 runtime->proctime += set_proctime(runtime);
240 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
241 const gmx_runtime_t *runtime)
244 char timebuf[STRLEN];
245 char time_string[STRLEN];
252 tmptime = (time_t) runtime->real;
253 gmx_ctime_r(&tmptime, timebuf, STRLEN);
257 tmptime = (time_t) gmx_gettime();
258 gmx_ctime_r(&tmptime, timebuf, STRLEN);
260 for (i = 0; timebuf[i] >= ' '; i++)
262 time_string[i] = timebuf[i];
264 time_string[i] = '\0';
266 fprintf(fplog, "%s on node %d %s\n", title, nodeid, time_string);
270 static void sum_forces(int start, int end, rvec f[], rvec flr[])
276 pr_rvecs(debug, 0, "fsr", f+start, end-start);
277 pr_rvecs(debug, 0, "flr", flr+start, end-start);
279 for (i = start; (i < end); i++)
281 rvec_inc(f[i], flr[i]);
286 * calc_f_el calculates forces due to an electric field.
288 * force is kJ mol^-1 nm^-1 = e * kJ mol^-1 nm^-1 / e
290 * Et[] contains the parameters for the time dependent
291 * part of the field (not yet used).
292 * Ex[] contains the parameters for
293 * the spatial dependent part of the field. You can have cool periodic
294 * fields in principle, but only a constant field is supported
296 * The function should return the energy due to the electric field
297 * (if any) but for now returns 0.
300 * There can be problems with the virial.
301 * Since the field is not self-consistent this is unavoidable.
302 * For neutral molecules the virial is correct within this approximation.
303 * For neutral systems with many charged molecules the error is small.
304 * But for systems with a net charge or a few charged molecules
305 * the error can be significant when the field is high.
306 * Solution: implement a self-consitent electric field into PME.
308 static void calc_f_el(FILE *fp, int start, int homenr,
309 real charge[], rvec f[],
310 t_cosines Ex[], t_cosines Et[], double t)
316 for (m = 0; (m < DIM); m++)
323 Ext[m] = cos(Et[m].a[0]*(t-t0))*exp(-sqr(t-t0)/(2.0*sqr(Et[m].a[2])));
327 Ext[m] = cos(Et[m].a[0]*t);
336 /* Convert the field strength from V/nm to MD-units */
337 Ext[m] *= Ex[m].a[0]*FIELDFAC;
338 for (i = start; (i < start+homenr); i++)
340 f[i][m] += charge[i]*Ext[m];
350 fprintf(fp, "%10g %10g %10g %10g #FIELD\n", t,
351 Ext[XX]/FIELDFAC, Ext[YY]/FIELDFAC, Ext[ZZ]/FIELDFAC);
355 static void calc_virial(int start, int homenr, rvec x[], rvec f[],
356 tensor vir_part, t_graph *graph, matrix box,
357 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
362 /* The short-range virial from surrounding boxes */
364 calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
365 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
367 /* Calculate partial virial, for local atoms only, based on short range.
368 * Total virial is computed in global_stat, called from do_md
370 f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
371 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
373 /* Add position restraint contribution */
374 for (i = 0; i < DIM; i++)
376 vir_part[i][i] += fr->vir_diag_posres[i];
379 /* Add wall contribution */
380 for (i = 0; i < DIM; i++)
382 vir_part[i][ZZ] += fr->vir_wall_z[i];
387 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
391 static void posres_wrapper(FILE *fplog,
397 matrix box, rvec x[],
398 gmx_enerdata_t *enerd,
406 /* Position restraints always require full pbc */
407 set_pbc(&pbc, ir->ePBC, box);
409 v = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
410 top->idef.iparams_posres,
411 (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
412 ir->ePBC == epbcNONE ? NULL : &pbc,
413 lambda[efptRESTRAINT], &dvdl,
414 fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
417 gmx_print_sepdvdl(fplog, interaction_function[F_POSRES].longname, v, dvdl);
419 enerd->term[F_POSRES] += v;
420 /* If just the force constant changes, the FEP term is linear,
421 * but if k changes, it is not.
423 enerd->dvdl_nonlin[efptRESTRAINT] += dvdl;
424 inc_nrnb(nrnb, eNR_POSRES, top->idef.il[F_POSRES].nr/2);
426 if ((ir->fepvals->n_lambda > 0) && (flags & GMX_FORCE_DHDL))
428 for (i = 0; i < enerd->n_lambda; i++)
430 real dvdl_dum, lambda_dum;
432 lambda_dum = (i == 0 ? lambda[efptRESTRAINT] : ir->fepvals->all_lambda[efptRESTRAINT][i-1]);
433 v = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
434 top->idef.iparams_posres,
435 (const rvec*)x, NULL, NULL,
436 ir->ePBC == epbcNONE ? NULL : &pbc, lambda_dum, &dvdl,
437 fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
438 enerd->enerpart_lambda[i] += v;
443 static void pull_potential_wrapper(FILE *fplog,
447 matrix box, rvec x[],
451 gmx_enerdata_t *enerd,
458 /* Calculate the center of mass forces, this requires communication,
459 * which is why pull_potential is called close to other communication.
460 * The virial contribution is calculated directly,
461 * which is why we call pull_potential after calc_virial.
463 set_pbc(&pbc, ir->ePBC, box);
465 enerd->term[F_COM_PULL] +=
466 pull_potential(ir->ePull, ir->pull, mdatoms, &pbc,
467 cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
470 gmx_print_sepdvdl(fplog, "Com pull", enerd->term[F_COM_PULL], dvdl);
472 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
475 static void pme_receive_force_ener(FILE *fplog,
478 gmx_wallcycle_t wcycle,
479 gmx_enerdata_t *enerd,
483 float cycles_ppdpme, cycles_seppme;
485 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
486 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
488 /* In case of node-splitting, the PP nodes receive the long-range
489 * forces, virial and energy from the PME nodes here.
491 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
493 gmx_pme_receive_f(cr, fr->f_novirsum, fr->vir_el_recip, &e, &dvdl,
497 gmx_print_sepdvdl(fplog, "PME mesh", e, dvdl);
499 enerd->term[F_COUL_RECIP] += e;
500 enerd->dvdl_lin[efptCOUL] += dvdl;
503 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
505 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
508 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
509 gmx_large_int_t step, real pforce, rvec *x, rvec *f)
513 char buf[STEPSTRSIZE];
516 for (i = md->start; i < md->start+md->homenr; i++)
519 /* We also catch NAN, if the compiler does not optimize this away. */
520 if (fn2 >= pf2 || fn2 != fn2)
522 fprintf(fp, "step %s atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
523 gmx_step_str(step, buf),
524 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], sqrt(fn2));
529 static void post_process_forces(t_commrec *cr,
530 gmx_large_int_t step,
531 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
533 matrix box, rvec x[],
538 t_forcerec *fr, gmx_vsite_t *vsite,
545 /* Spread the mesh force on virtual sites to the other particles...
546 * This is parallellized. MPI communication is performed
547 * if the constructing atoms aren't local.
549 wallcycle_start(wcycle, ewcVSITESPREAD);
550 spread_vsite_f(vsite, x, fr->f_novirsum, NULL,
551 (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
553 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
554 wallcycle_stop(wcycle, ewcVSITESPREAD);
556 if (flags & GMX_FORCE_VIRIAL)
558 /* Now add the forces, this is local */
561 sum_forces(0, fr->f_novirsum_n, f, fr->f_novirsum);
565 sum_forces(mdatoms->start, mdatoms->start+mdatoms->homenr,
568 if (EEL_FULL(fr->eeltype))
570 /* Add the mesh contribution to the virial */
571 m_add(vir_force, fr->vir_el_recip, vir_force);
575 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
580 if (fr->print_force >= 0)
582 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
586 static void do_nb_verlet(t_forcerec *fr,
587 interaction_const_t *ic,
588 gmx_enerdata_t *enerd,
589 int flags, int ilocality,
593 int nnbl, kernel_type, enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
595 nonbonded_verlet_group_t *nbvg;
598 if (!(flags & GMX_FORCE_NONBONDED))
600 /* skip non-bonded calculation */
604 nbvg = &fr->nbv->grp[ilocality];
606 /* CUDA kernel launch overhead is already timed separately */
607 if (fr->cutoff_scheme != ecutsVERLET)
609 gmx_incons("Invalid cut-off scheme passed!");
612 bCUDA = (nbvg->kernel_type == nbnxnk8x8x8_CUDA);
616 wallcycle_sub_start(wcycle, ewcsNONBONDED);
618 switch (nbvg->kernel_type)
620 case nbnxnk4x4_PlainC:
621 nbnxn_kernel_ref(&nbvg->nbl_lists,
627 enerd->grpp.ener[egCOULSR],
629 enerd->grpp.ener[egBHAMSR] :
630 enerd->grpp.ener[egLJSR]);
633 case nbnxnk4xN_SIMD_4xN:
634 nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
641 enerd->grpp.ener[egCOULSR],
643 enerd->grpp.ener[egBHAMSR] :
644 enerd->grpp.ener[egLJSR]);
646 case nbnxnk4xN_SIMD_2xNN:
647 nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
654 enerd->grpp.ener[egCOULSR],
656 enerd->grpp.ener[egBHAMSR] :
657 enerd->grpp.ener[egLJSR]);
660 case nbnxnk8x8x8_CUDA:
661 nbnxn_cuda_launch_kernel(fr->nbv->cu_nbv, nbvg->nbat, flags, ilocality);
664 case nbnxnk8x8x8_PlainC:
665 nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
670 nbvg->nbat->out[0].f,
672 enerd->grpp.ener[egCOULSR],
674 enerd->grpp.ener[egBHAMSR] :
675 enerd->grpp.ener[egLJSR]);
679 gmx_incons("Invalid nonbonded kernel type passed!");
684 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
687 if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
689 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
691 else if ((!bCUDA && nbvg->ewald_excl == ewaldexclAnalytical) ||
692 (bCUDA && nbnxn_cuda_is_kernel_ewald_analytical(fr->nbv->cu_nbv)))
694 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
698 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
700 enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
701 if (flags & GMX_FORCE_ENERGY)
703 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
704 enr_nbnxn_kernel_ljc += 1;
705 enr_nbnxn_kernel_lj += 1;
708 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
709 nbvg->nbl_lists.natpair_ljq);
710 inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
711 nbvg->nbl_lists.natpair_lj);
712 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
713 nbvg->nbl_lists.natpair_q);
716 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
717 t_inputrec *inputrec,
718 gmx_large_int_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
720 gmx_groups_t gmx_unused *groups,
721 matrix box, rvec x[], history_t *hist,
725 gmx_enerdata_t *enerd, t_fcdata *fcd,
726 real *lambda, t_graph *graph,
727 t_forcerec *fr, interaction_const_t *ic,
728 gmx_vsite_t *vsite, rvec mu_tot,
729 double t, FILE *field, gmx_edsam_t ed,
737 gmx_bool bSepDVDL, bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
738 gmx_bool bDoLongRange, bDoForces, bSepLRF, bUseGPU, bUseOrEmulGPU;
739 gmx_bool bDiffKernels = FALSE;
741 rvec vzero, box_diag;
743 float cycles_pme, cycles_force;
744 nonbonded_verlet_t *nbv;
748 nb_kernel_type = fr->nbv->grp[0].kernel_type;
750 start = mdatoms->start;
751 homenr = mdatoms->homenr;
753 bSepDVDL = (fr->bSepDVDL && do_per_step(step, inputrec->nstlog));
755 clear_mat(vir_force);
758 if (DOMAINDECOMP(cr))
760 cg1 = cr->dd->ncg_tot;
771 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
772 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
773 bFillGrid = (bNS && bStateChanged);
774 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
775 bDoLongRange = (fr->bTwinRange && bNS && (flags & GMX_FORCE_DO_LR));
776 bDoForces = (flags & GMX_FORCE_FORCES);
777 bSepLRF = (bDoLongRange && bDoForces && (flags & GMX_FORCE_SEPLRF));
778 bUseGPU = fr->nbv->bUseGPU;
779 bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
783 update_forcerec(fr, box);
785 if (NEED_MUTOT(*inputrec))
787 /* Calculate total (local) dipole moment in a temporary common array.
788 * This makes it possible to sum them over nodes faster.
790 calc_mu(start, homenr,
791 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
796 if (fr->ePBC != epbcNONE)
798 /* Compute shift vectors every step,
799 * because of pressure coupling or box deformation!
801 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
803 calc_shifts(box, fr->shift_vec);
808 put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
809 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
811 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
813 unshift_self(graph, box, x);
817 nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
818 fr->shift_vec, nbv->grp[0].nbat);
821 if (!(cr->duty & DUTY_PME))
823 /* Send particle coordinates to the pme nodes.
824 * Since this is only implemented for domain decomposition
825 * and domain decomposition does not use the graph,
826 * we do not need to worry about shifting.
829 wallcycle_start(wcycle, ewcPP_PMESENDX);
831 bBS = (inputrec->nwall == 2);
835 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
838 gmx_pme_send_x(cr, bBS ? boxs : box, x,
839 mdatoms->nChargePerturbed, lambda[efptCOUL],
840 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)), step);
842 wallcycle_stop(wcycle, ewcPP_PMESENDX);
846 /* do gridding for pair search */
849 if (graph && bStateChanged)
851 /* Calculate intramolecular shift vectors to make molecules whole */
852 mk_mshift(fplog, graph, fr->ePBC, box, x);
856 box_diag[XX] = box[XX][XX];
857 box_diag[YY] = box[YY][YY];
858 box_diag[ZZ] = box[ZZ][ZZ];
860 wallcycle_start(wcycle, ewcNS);
863 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
864 nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
866 0, mdatoms->homenr, -1, fr->cginfo, x,
868 nbv->grp[eintLocal].kernel_type,
869 nbv->grp[eintLocal].nbat);
870 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
874 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
875 nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
877 nbv->grp[eintNonlocal].kernel_type,
878 nbv->grp[eintNonlocal].nbat);
879 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
882 if (nbv->ngrp == 1 ||
883 nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
885 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
886 nbv->nbs, mdatoms, fr->cginfo);
890 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
891 nbv->nbs, mdatoms, fr->cginfo);
892 nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
893 nbv->nbs, mdatoms, fr->cginfo);
895 wallcycle_stop(wcycle, ewcNS);
898 /* initialize the GPU atom data and copy shift vector */
903 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
904 nbnxn_cuda_init_atomdata(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
905 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
908 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
909 nbnxn_cuda_upload_shiftvec(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
910 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
913 /* do local pair search */
916 wallcycle_start_nocount(wcycle, ewcNS);
917 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
918 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
921 nbv->min_ci_balanced,
922 &nbv->grp[eintLocal].nbl_lists,
924 nbv->grp[eintLocal].kernel_type,
926 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
930 /* initialize local pair-list on the GPU */
931 nbnxn_cuda_init_pairlist(nbv->cu_nbv,
932 nbv->grp[eintLocal].nbl_lists.nbl[0],
935 wallcycle_stop(wcycle, ewcNS);
939 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
940 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
941 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
942 nbv->grp[eintLocal].nbat);
943 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
944 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
949 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
950 /* launch local nonbonded F on GPU */
951 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
953 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
956 /* Communicate coordinates and sum dipole if necessary +
957 do non-local pair search */
958 if (DOMAINDECOMP(cr))
960 bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
961 nbv->grp[eintLocal].kernel_type);
965 /* With GPU+CPU non-bonded calculations we need to copy
966 * the local coordinates to the non-local nbat struct
967 * (in CPU format) as the non-local kernel call also
968 * calculates the local - non-local interactions.
970 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
971 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
972 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
973 nbv->grp[eintNonlocal].nbat);
974 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
975 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
980 wallcycle_start_nocount(wcycle, ewcNS);
981 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
985 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
988 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
991 nbv->min_ci_balanced,
992 &nbv->grp[eintNonlocal].nbl_lists,
994 nbv->grp[eintNonlocal].kernel_type,
997 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
999 if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_CUDA)
1001 /* initialize non-local pair-list on the GPU */
1002 nbnxn_cuda_init_pairlist(nbv->cu_nbv,
1003 nbv->grp[eintNonlocal].nbl_lists.nbl[0],
1006 wallcycle_stop(wcycle, ewcNS);
1010 wallcycle_start(wcycle, ewcMOVEX);
1011 dd_move_x(cr->dd, box, x);
1013 /* When we don't need the total dipole we sum it in global_stat */
1014 if (bStateChanged && NEED_MUTOT(*inputrec))
1016 gmx_sumd(2*DIM, mu, cr);
1018 wallcycle_stop(wcycle, ewcMOVEX);
1020 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1021 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1022 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
1023 nbv->grp[eintNonlocal].nbat);
1024 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1025 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1028 if (bUseGPU && !bDiffKernels)
1030 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1031 /* launch non-local nonbonded F on GPU */
1032 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
1034 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1040 /* launch D2H copy-back F */
1041 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1042 if (DOMAINDECOMP(cr) && !bDiffKernels)
1044 nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintNonlocal].nbat,
1045 flags, eatNonlocal);
1047 nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintLocal].nbat,
1049 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1052 if (bStateChanged && NEED_MUTOT(*inputrec))
1056 gmx_sumd(2*DIM, mu, cr);
1059 for (i = 0; i < 2; i++)
1061 for (j = 0; j < DIM; j++)
1063 fr->mu_tot[i][j] = mu[i*DIM + j];
1067 if (fr->efep == efepNO)
1069 copy_rvec(fr->mu_tot[0], mu_tot);
1073 for (j = 0; j < DIM; j++)
1076 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1077 lambda[efptCOUL]*fr->mu_tot[1][j];
1081 /* Reset energies */
1082 reset_enerdata(fr, bNS, enerd, MASTER(cr));
1083 clear_rvecs(SHIFTS, fr->fshift);
1085 if (DOMAINDECOMP(cr))
1087 if (!(cr->duty & DUTY_PME))
1089 wallcycle_start(wcycle, ewcPPDURINGPME);
1090 dd_force_flop_start(cr->dd, nrnb);
1096 /* Enforced rotation has its own cycle counter that starts after the collective
1097 * coordinates have been communicated. It is added to ddCyclF to allow
1098 * for proper load-balancing */
1099 wallcycle_start(wcycle, ewcROT);
1100 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1101 wallcycle_stop(wcycle, ewcROT);
1104 /* Start the force cycle counter.
1105 * This counter is stopped in do_forcelow_level.
1106 * No parallel communication should occur while this counter is running,
1107 * since that will interfere with the dynamic load balancing.
1109 wallcycle_start(wcycle, ewcFORCE);
1112 /* Reset forces for which the virial is calculated separately:
1113 * PME/Ewald forces if necessary */
1114 if (fr->bF_NoVirSum)
1116 if (flags & GMX_FORCE_VIRIAL)
1118 fr->f_novirsum = fr->f_novirsum_alloc;
1121 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1125 clear_rvecs(homenr, fr->f_novirsum+start);
1130 /* We are not calculating the pressure so we do not need
1131 * a separate array for forces that do not contribute
1138 /* Clear the short- and long-range forces */
1139 clear_rvecs(fr->natoms_force_constr, f);
1140 if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1142 clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1145 clear_rvec(fr->vir_diag_posres);
1148 if (inputrec->ePull == epullCONSTRAINT)
1150 clear_pull_forces(inputrec->pull);
1153 /* We calculate the non-bonded forces, when done on the CPU, here.
1154 * We do this before calling do_force_lowlevel, as in there bondeds
1155 * forces are calculated before PME, which does communication.
1156 * With this order, non-bonded and bonded force calculation imbalance
1157 * can be balanced out by the domain decomposition load balancing.
1162 /* Maybe we should move this into do_force_lowlevel */
1163 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1167 if (!bUseOrEmulGPU || bDiffKernels)
1171 if (DOMAINDECOMP(cr))
1173 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1174 bDiffKernels ? enbvClearFYes : enbvClearFNo,
1184 aloc = eintNonlocal;
1187 /* Add all the non-bonded force to the normal force array.
1188 * This can be split into a local a non-local part when overlapping
1189 * communication with calculation with domain decomposition.
1191 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1192 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1193 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1194 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1195 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1196 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1197 wallcycle_start_nocount(wcycle, ewcFORCE);
1199 /* if there are multiple fshift output buffers reduce them */
1200 if ((flags & GMX_FORCE_VIRIAL) &&
1201 nbv->grp[aloc].nbl_lists.nnbl > 1)
1203 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1208 /* update QMMMrec, if necessary */
1211 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1214 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1216 posres_wrapper(fplog, flags, bSepDVDL, inputrec, nrnb, top, box, x,
1220 /* Compute the bonded and non-bonded energies and optionally forces */
1221 do_force_lowlevel(fplog, step, fr, inputrec, &(top->idef),
1222 cr, nrnb, wcycle, mdatoms,
1223 x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1224 &(top->atomtypes), bBornRadii, box,
1225 inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1226 flags, &cycles_pme);
1230 if (do_per_step(step, inputrec->nstcalclr))
1232 /* Add the long range forces to the short range forces */
1233 for (i = 0; i < fr->natoms_force_constr; i++)
1235 rvec_add(fr->f_twin[i], f[i], f[i]);
1240 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1244 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1247 if (bUseOrEmulGPU && !bDiffKernels)
1249 /* wait for non-local forces (or calculate in emulation mode) */
1250 if (DOMAINDECOMP(cr))
1254 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1255 nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1256 nbv->grp[eintNonlocal].nbat,
1258 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1260 cycles_force += wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1264 wallcycle_start_nocount(wcycle, ewcFORCE);
1265 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1267 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1269 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1270 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1271 /* skip the reduction if there was no non-local work to do */
1272 if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1274 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1275 nbv->grp[eintNonlocal].nbat, f);
1277 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1278 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1284 /* Communicate the forces */
1287 wallcycle_start(wcycle, ewcMOVEF);
1288 if (DOMAINDECOMP(cr))
1290 dd_move_f(cr->dd, f, fr->fshift);
1291 /* Do we need to communicate the separate force array
1292 * for terms that do not contribute to the single sum virial?
1293 * Position restraints and electric fields do not introduce
1294 * inter-cg forces, only full electrostatics methods do.
1295 * When we do not calculate the virial, fr->f_novirsum = f,
1296 * so we have already communicated these forces.
1298 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1299 (flags & GMX_FORCE_VIRIAL))
1301 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1305 /* We should not update the shift forces here,
1306 * since f_twin is already included in f.
1308 dd_move_f(cr->dd, fr->f_twin, NULL);
1311 wallcycle_stop(wcycle, ewcMOVEF);
1317 /* wait for local forces (or calculate in emulation mode) */
1320 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1321 nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1322 nbv->grp[eintLocal].nbat,
1324 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1326 wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1328 /* now clear the GPU outputs while we finish the step on the CPU */
1330 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1331 nbnxn_cuda_clear_outputs(nbv->cu_nbv, flags);
1332 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1336 wallcycle_start_nocount(wcycle, ewcFORCE);
1337 do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1338 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1340 wallcycle_stop(wcycle, ewcFORCE);
1342 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1343 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1344 if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1346 /* skip the reduction if there was no non-local work to do */
1347 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1348 nbv->grp[eintLocal].nbat, f);
1350 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1351 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1354 if (DOMAINDECOMP(cr))
1356 dd_force_flop_stop(cr->dd, nrnb);
1359 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1365 if (IR_ELEC_FIELD(*inputrec))
1367 /* Compute forces due to electric field */
1368 calc_f_el(MASTER(cr) ? field : NULL,
1369 start, homenr, mdatoms->chargeA, fr->f_novirsum,
1370 inputrec->ex, inputrec->et, t);
1373 /* If we have NoVirSum forces, but we do not calculate the virial,
1374 * we sum fr->f_novirum=f later.
1376 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1378 wallcycle_start(wcycle, ewcVSITESPREAD);
1379 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1380 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1381 wallcycle_stop(wcycle, ewcVSITESPREAD);
1385 wallcycle_start(wcycle, ewcVSITESPREAD);
1386 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1388 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1389 wallcycle_stop(wcycle, ewcVSITESPREAD);
1393 if (flags & GMX_FORCE_VIRIAL)
1395 /* Calculation of the virial must be done after vsites! */
1396 calc_virial(mdatoms->start, mdatoms->homenr, x, f,
1397 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1401 if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1403 pull_potential_wrapper(fplog, bSepDVDL, cr, inputrec, box, x,
1404 f, vir_force, mdatoms, enerd, lambda, t);
1407 /* Add the forces from enforced rotation potentials (if any) */
1410 wallcycle_start(wcycle, ewcROTadd);
1411 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1412 wallcycle_stop(wcycle, ewcROTadd);
1415 if (PAR(cr) && !(cr->duty & DUTY_PME))
1417 /* In case of node-splitting, the PP nodes receive the long-range
1418 * forces, virial and energy from the PME nodes here.
1420 pme_receive_force_ener(fplog, bSepDVDL, cr, wcycle, enerd, fr);
1425 post_process_forces(cr, step, nrnb, wcycle,
1426 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1430 /* Sum the potential energy terms from group contributions */
1431 sum_epot(&(enerd->grpp), enerd->term);
1434 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1435 t_inputrec *inputrec,
1436 gmx_large_int_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1437 gmx_localtop_t *top,
1438 gmx_groups_t *groups,
1439 matrix box, rvec x[], history_t *hist,
1443 gmx_enerdata_t *enerd, t_fcdata *fcd,
1444 real *lambda, t_graph *graph,
1445 t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1446 double t, FILE *field, gmx_edsam_t ed,
1447 gmx_bool bBornRadii,
1453 gmx_bool bSepDVDL, bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
1454 gmx_bool bDoLongRangeNS, bDoForces, bDoPotential, bSepLRF;
1455 gmx_bool bDoAdressWF;
1457 rvec vzero, box_diag;
1458 real e, v, dvdlambda[efptNR];
1460 float cycles_pme, cycles_force;
1462 start = mdatoms->start;
1463 homenr = mdatoms->homenr;
1465 bSepDVDL = (fr->bSepDVDL && do_per_step(step, inputrec->nstlog));
1467 clear_mat(vir_force);
1471 pd_cg_range(cr, &cg0, &cg1);
1476 if (DOMAINDECOMP(cr))
1478 cg1 = cr->dd->ncg_tot;
1490 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
1491 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1492 /* Should we update the long-range neighborlists at this step? */
1493 bDoLongRangeNS = fr->bTwinRange && bNS;
1494 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1495 bFillGrid = (bNS && bStateChanged);
1496 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
1497 bDoForces = (flags & GMX_FORCE_FORCES);
1498 bDoPotential = (flags & GMX_FORCE_ENERGY);
1499 bSepLRF = ((inputrec->nstcalclr > 1) && bDoForces &&
1500 (flags & GMX_FORCE_SEPLRF) && (flags & GMX_FORCE_DO_LR));
1502 /* should probably move this to the forcerec since it doesn't change */
1503 bDoAdressWF = ((fr->adress_type != eAdressOff));
1507 update_forcerec(fr, box);
1509 if (NEED_MUTOT(*inputrec))
1511 /* Calculate total (local) dipole moment in a temporary common array.
1512 * This makes it possible to sum them over nodes faster.
1514 calc_mu(start, homenr,
1515 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1520 if (fr->ePBC != epbcNONE)
1522 /* Compute shift vectors every step,
1523 * because of pressure coupling or box deformation!
1525 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1527 calc_shifts(box, fr->shift_vec);
1532 put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1533 &(top->cgs), x, fr->cg_cm);
1534 inc_nrnb(nrnb, eNR_CGCM, homenr);
1535 inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1537 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1539 unshift_self(graph, box, x);
1544 calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1545 inc_nrnb(nrnb, eNR_CGCM, homenr);
1552 move_cgcm(fplog, cr, fr->cg_cm);
1556 pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1561 if (!(cr->duty & DUTY_PME))
1563 /* Send particle coordinates to the pme nodes.
1564 * Since this is only implemented for domain decomposition
1565 * and domain decomposition does not use the graph,
1566 * we do not need to worry about shifting.
1569 wallcycle_start(wcycle, ewcPP_PMESENDX);
1571 bBS = (inputrec->nwall == 2);
1574 copy_mat(box, boxs);
1575 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1578 gmx_pme_send_x(cr, bBS ? boxs : box, x,
1579 mdatoms->nChargePerturbed, lambda[efptCOUL],
1580 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)), step);
1582 wallcycle_stop(wcycle, ewcPP_PMESENDX);
1584 #endif /* GMX_MPI */
1586 /* Communicate coordinates and sum dipole if necessary */
1589 wallcycle_start(wcycle, ewcMOVEX);
1590 if (DOMAINDECOMP(cr))
1592 dd_move_x(cr->dd, box, x);
1596 move_x(cr, x, nrnb);
1598 wallcycle_stop(wcycle, ewcMOVEX);
1601 /* update adress weight beforehand */
1602 if (bStateChanged && bDoAdressWF)
1604 /* need pbc for adress weight calculation with pbc_dx */
1605 set_pbc(&pbc, inputrec->ePBC, box);
1606 if (fr->adress_site == eAdressSITEcog)
1608 update_adress_weights_cog(top->idef.iparams, top->idef.il, x, fr, mdatoms,
1609 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1611 else if (fr->adress_site == eAdressSITEcom)
1613 update_adress_weights_com(fplog, cg0, cg1, &(top->cgs), x, fr, mdatoms,
1614 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1616 else if (fr->adress_site == eAdressSITEatomatom)
1618 update_adress_weights_atom_per_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1619 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1623 update_adress_weights_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1624 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1628 if (NEED_MUTOT(*inputrec))
1635 gmx_sumd(2*DIM, mu, cr);
1637 for (i = 0; i < 2; i++)
1639 for (j = 0; j < DIM; j++)
1641 fr->mu_tot[i][j] = mu[i*DIM + j];
1645 if (fr->efep == efepNO)
1647 copy_rvec(fr->mu_tot[0], mu_tot);
1651 for (j = 0; j < DIM; j++)
1654 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1659 /* Reset energies */
1660 reset_enerdata(fr, bNS, enerd, MASTER(cr));
1661 clear_rvecs(SHIFTS, fr->fshift);
1665 wallcycle_start(wcycle, ewcNS);
1667 if (graph && bStateChanged)
1669 /* Calculate intramolecular shift vectors to make molecules whole */
1670 mk_mshift(fplog, graph, fr->ePBC, box, x);
1673 /* Do the actual neighbour searching */
1675 groups, top, mdatoms,
1676 cr, nrnb, bFillGrid,
1679 wallcycle_stop(wcycle, ewcNS);
1682 if (inputrec->implicit_solvent && bNS)
1684 make_gb_nblist(cr, inputrec->gb_algorithm,
1685 x, box, fr, &top->idef, graph, fr->born);
1688 if (DOMAINDECOMP(cr))
1690 if (!(cr->duty & DUTY_PME))
1692 wallcycle_start(wcycle, ewcPPDURINGPME);
1693 dd_force_flop_start(cr->dd, nrnb);
1699 /* Enforced rotation has its own cycle counter that starts after the collective
1700 * coordinates have been communicated. It is added to ddCyclF to allow
1701 * for proper load-balancing */
1702 wallcycle_start(wcycle, ewcROT);
1703 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1704 wallcycle_stop(wcycle, ewcROT);
1707 /* Start the force cycle counter.
1708 * This counter is stopped in do_forcelow_level.
1709 * No parallel communication should occur while this counter is running,
1710 * since that will interfere with the dynamic load balancing.
1712 wallcycle_start(wcycle, ewcFORCE);
1716 /* Reset forces for which the virial is calculated separately:
1717 * PME/Ewald forces if necessary */
1718 if (fr->bF_NoVirSum)
1720 if (flags & GMX_FORCE_VIRIAL)
1722 fr->f_novirsum = fr->f_novirsum_alloc;
1725 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1729 clear_rvecs(homenr, fr->f_novirsum+start);
1734 /* We are not calculating the pressure so we do not need
1735 * a separate array for forces that do not contribute
1742 /* Clear the short- and long-range forces */
1743 clear_rvecs(fr->natoms_force_constr, f);
1744 if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1746 clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1749 clear_rvec(fr->vir_diag_posres);
1751 if (inputrec->ePull == epullCONSTRAINT)
1753 clear_pull_forces(inputrec->pull);
1756 /* update QMMMrec, if necessary */
1759 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1762 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1764 posres_wrapper(fplog, flags, bSepDVDL, inputrec, nrnb, top, box, x,
1768 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_FBPOSRES].nr > 0)
1770 /* Flat-bottomed position restraints always require full pbc */
1771 if (!(bStateChanged && bDoAdressWF))
1773 set_pbc(&pbc, inputrec->ePBC, box);
1775 v = fbposres(top->idef.il[F_FBPOSRES].nr, top->idef.il[F_FBPOSRES].iatoms,
1776 top->idef.iparams_fbposres,
1777 (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
1778 inputrec->ePBC == epbcNONE ? NULL : &pbc,
1779 fr->rc_scaling, fr->ePBC, fr->posres_com);
1780 enerd->term[F_FBPOSRES] += v;
1781 inc_nrnb(nrnb, eNR_FBPOSRES, top->idef.il[F_FBPOSRES].nr/2);
1784 /* Compute the bonded and non-bonded energies and optionally forces */
1785 do_force_lowlevel(fplog, step, fr, inputrec, &(top->idef),
1786 cr, nrnb, wcycle, mdatoms,
1787 x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1788 &(top->atomtypes), bBornRadii, box,
1789 inputrec->fepvals, lambda,
1790 graph, &(top->excls), fr->mu_tot,
1796 if (do_per_step(step, inputrec->nstcalclr))
1798 /* Add the long range forces to the short range forces */
1799 for (i = 0; i < fr->natoms_force_constr; i++)
1801 rvec_add(fr->f_twin[i], f[i], f[i]);
1806 cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1810 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1813 if (DOMAINDECOMP(cr))
1815 dd_force_flop_stop(cr->dd, nrnb);
1818 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1824 if (IR_ELEC_FIELD(*inputrec))
1826 /* Compute forces due to electric field */
1827 calc_f_el(MASTER(cr) ? field : NULL,
1828 start, homenr, mdatoms->chargeA, fr->f_novirsum,
1829 inputrec->ex, inputrec->et, t);
1832 if (bDoAdressWF && fr->adress_icor == eAdressICThermoForce)
1834 /* Compute thermodynamic force in hybrid AdResS region */
1835 adress_thermo_force(start, homenr, &(top->cgs), x, fr->f_novirsum, fr, mdatoms,
1836 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1839 /* Communicate the forces */
1842 wallcycle_start(wcycle, ewcMOVEF);
1843 if (DOMAINDECOMP(cr))
1845 dd_move_f(cr->dd, f, fr->fshift);
1846 /* Do we need to communicate the separate force array
1847 * for terms that do not contribute to the single sum virial?
1848 * Position restraints and electric fields do not introduce
1849 * inter-cg forces, only full electrostatics methods do.
1850 * When we do not calculate the virial, fr->f_novirsum = f,
1851 * so we have already communicated these forces.
1853 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1854 (flags & GMX_FORCE_VIRIAL))
1856 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1860 /* We should not update the shift forces here,
1861 * since f_twin is already included in f.
1863 dd_move_f(cr->dd, fr->f_twin, NULL);
1868 pd_move_f(cr, f, nrnb);
1871 pd_move_f(cr, fr->f_twin, nrnb);
1874 wallcycle_stop(wcycle, ewcMOVEF);
1877 /* If we have NoVirSum forces, but we do not calculate the virial,
1878 * we sum fr->f_novirum=f later.
1880 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1882 wallcycle_start(wcycle, ewcVSITESPREAD);
1883 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1884 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1885 wallcycle_stop(wcycle, ewcVSITESPREAD);
1889 wallcycle_start(wcycle, ewcVSITESPREAD);
1890 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1892 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1893 wallcycle_stop(wcycle, ewcVSITESPREAD);
1897 if (flags & GMX_FORCE_VIRIAL)
1899 /* Calculation of the virial must be done after vsites! */
1900 calc_virial(mdatoms->start, mdatoms->homenr, x, f,
1901 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1905 if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1907 pull_potential_wrapper(fplog, bSepDVDL, cr, inputrec, box, x,
1908 f, vir_force, mdatoms, enerd, lambda, t);
1911 /* Add the forces from enforced rotation potentials (if any) */
1914 wallcycle_start(wcycle, ewcROTadd);
1915 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1916 wallcycle_stop(wcycle, ewcROTadd);
1919 if (PAR(cr) && !(cr->duty & DUTY_PME))
1921 /* In case of node-splitting, the PP nodes receive the long-range
1922 * forces, virial and energy from the PME nodes here.
1924 pme_receive_force_ener(fplog, bSepDVDL, cr, wcycle, enerd, fr);
1929 post_process_forces(cr, step, nrnb, wcycle,
1930 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1934 /* Sum the potential energy terms from group contributions */
1935 sum_epot(&(enerd->grpp), enerd->term);
1938 void do_force(FILE *fplog, t_commrec *cr,
1939 t_inputrec *inputrec,
1940 gmx_large_int_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1941 gmx_localtop_t *top,
1942 gmx_groups_t *groups,
1943 matrix box, rvec x[], history_t *hist,
1947 gmx_enerdata_t *enerd, t_fcdata *fcd,
1948 real *lambda, t_graph *graph,
1950 gmx_vsite_t *vsite, rvec mu_tot,
1951 double t, FILE *field, gmx_edsam_t ed,
1952 gmx_bool bBornRadii,
1955 /* modify force flag if not doing nonbonded */
1956 if (!fr->bNonbonded)
1958 flags &= ~GMX_FORCE_NONBONDED;
1961 switch (inputrec->cutoff_scheme)
1964 do_force_cutsVERLET(fplog, cr, inputrec,
1980 do_force_cutsGROUP(fplog, cr, inputrec,
1995 gmx_incons("Invalid cut-off scheme passed!");
2000 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
2001 t_inputrec *ir, t_mdatoms *md,
2002 t_state *state, t_commrec *cr, t_nrnb *nrnb,
2003 t_forcerec *fr, gmx_localtop_t *top)
2005 int i, m, start, end;
2006 gmx_large_int_t step;
2007 real dt = ir->delta_t;
2011 snew(savex, state->natoms);
2014 end = md->homenr + start;
2018 fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
2019 start, md->homenr, end);
2021 /* Do a first constrain to reset particles... */
2022 step = ir->init_step;
2025 char buf[STEPSTRSIZE];
2026 fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
2027 gmx_step_str(step, buf));
2031 /* constrain the current position */
2032 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2033 ir, NULL, cr, step, 0, md,
2034 state->x, state->x, NULL,
2035 fr->bMolPBC, state->box,
2036 state->lambda[efptBONDED], &dvdl_dum,
2037 NULL, NULL, nrnb, econqCoord,
2038 ir->epc == epcMTTK, state->veta, state->veta);
2041 /* constrain the inital velocity, and save it */
2042 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
2043 /* might not yet treat veta correctly */
2044 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2045 ir, NULL, cr, step, 0, md,
2046 state->x, state->v, state->v,
2047 fr->bMolPBC, state->box,
2048 state->lambda[efptBONDED], &dvdl_dum,
2049 NULL, NULL, nrnb, econqVeloc,
2050 ir->epc == epcMTTK, state->veta, state->veta);
2052 /* constrain the inital velocities at t-dt/2 */
2053 if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
2055 for (i = start; (i < end); i++)
2057 for (m = 0; (m < DIM); m++)
2059 /* Reverse the velocity */
2060 state->v[i][m] = -state->v[i][m];
2061 /* Store the position at t-dt in buf */
2062 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
2065 /* Shake the positions at t=-dt with the positions at t=0
2066 * as reference coordinates.
2070 char buf[STEPSTRSIZE];
2071 fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
2072 gmx_step_str(step, buf));
2075 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2076 ir, NULL, cr, step, -1, md,
2077 state->x, savex, NULL,
2078 fr->bMolPBC, state->box,
2079 state->lambda[efptBONDED], &dvdl_dum,
2080 state->v, NULL, nrnb, econqCoord,
2081 ir->epc == epcMTTK, state->veta, state->veta);
2083 for (i = start; i < end; i++)
2085 for (m = 0; m < DIM; m++)
2087 /* Re-reverse the velocities */
2088 state->v[i][m] = -state->v[i][m];
2095 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2097 double eners[2], virs[2], enersum, virsum, y0, f, g, h;
2098 double r0, r1, r, rc3, rc9, ea, eb, ec, pa, pb, pc, pd;
2099 double invscale, invscale2, invscale3;
2100 int ri0, ri1, ri, i, offstart, offset;
2101 real scale, *vdwtab, tabfactor, tmp;
2103 fr->enershiftsix = 0;
2104 fr->enershifttwelve = 0;
2105 fr->enerdiffsix = 0;
2106 fr->enerdifftwelve = 0;
2108 fr->virdifftwelve = 0;
2110 if (eDispCorr != edispcNO)
2112 for (i = 0; i < 2; i++)
2117 if ((fr->vdwtype == evdwSWITCH) || (fr->vdwtype == evdwSHIFT))
2119 if (fr->rvdw_switch == 0)
2122 "With dispersion correction rvdw-switch can not be zero "
2123 "for vdw-type = %s", evdw_names[fr->vdwtype]);
2126 scale = fr->nblists[0].table_elec_vdw.scale;
2127 vdwtab = fr->nblists[0].table_vdw.data;
2129 /* Round the cut-offs to exact table values for precision */
2130 ri0 = floor(fr->rvdw_switch*scale);
2131 ri1 = ceil(fr->rvdw*scale);
2137 if (fr->vdwtype == evdwSHIFT)
2139 /* Determine the constant energy shift below rvdw_switch.
2140 * Table has a scale factor since we have scaled it down to compensate
2141 * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2143 fr->enershiftsix = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2144 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2146 /* Add the constant part from 0 to rvdw_switch.
2147 * This integration from 0 to rvdw_switch overcounts the number
2148 * of interactions by 1, as it also counts the self interaction.
2149 * We will correct for this later.
2151 eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2152 eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2154 invscale = 1.0/(scale);
2155 invscale2 = invscale*invscale;
2156 invscale3 = invscale*invscale2;
2158 /* following summation derived from cubic spline definition,
2159 Numerical Recipies in C, second edition, p. 113-116. Exact
2160 for the cubic spline. We first calculate the negative of
2161 the energy from rvdw to rvdw_switch, assuming that g(r)=1,
2162 and then add the more standard, abrupt cutoff correction to
2163 that result, yielding the long-range correction for a
2164 switched function. We perform both the pressure and energy
2165 loops at the same time for simplicity, as the computational
2168 for (i = 0; i < 2; i++)
2170 enersum = 0.0; virsum = 0.0;
2174 /* Since the dispersion table has been scaled down a factor 6.0 and the repulsion
2175 * a factor 12.0 to compensate for the c6/c12 parameters inside nbfp[] being scaled
2176 * up (to save flops in kernels), we need to correct for this.
2185 for (ri = ri0; ri < ri1; ri++)
2189 eb = 2.0*invscale2*r;
2193 pb = 3.0*invscale2*r;
2194 pc = 3.0*invscale*r*r;
2197 /* this "8" is from the packing in the vdwtab array - perhaps should be #define'ed? */
2198 offset = 8*ri + offstart;
2199 y0 = vdwtab[offset];
2200 f = vdwtab[offset+1];
2201 g = vdwtab[offset+2];
2202 h = vdwtab[offset+3];
2204 enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2205 virsum += f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2208 enersum *= 4.0*M_PI*tabfactor;
2209 virsum *= 4.0*M_PI*tabfactor;
2210 eners[i] -= enersum;
2214 /* now add the correction for rvdw_switch to infinity */
2215 eners[0] += -4.0*M_PI/(3.0*rc3);
2216 eners[1] += 4.0*M_PI/(9.0*rc9);
2217 virs[0] += 8.0*M_PI/rc3;
2218 virs[1] += -16.0*M_PI/(3.0*rc9);
2220 else if ((fr->vdwtype == evdwCUT) || (fr->vdwtype == evdwUSER))
2222 if (fr->vdwtype == evdwUSER && fplog)
2225 "WARNING: using dispersion correction with user tables\n");
2227 rc3 = fr->rvdw*fr->rvdw*fr->rvdw;
2229 /* Contribution beyond the cut-off */
2230 eners[0] += -4.0*M_PI/(3.0*rc3);
2231 eners[1] += 4.0*M_PI/(9.0*rc9);
2232 if (fr->vdw_modifier == eintmodPOTSHIFT)
2234 /* Contribution within the cut-off */
2235 eners[0] += -4.0*M_PI/(3.0*rc3);
2236 eners[1] += 4.0*M_PI/(3.0*rc9);
2238 /* Contribution beyond the cut-off */
2239 virs[0] += 8.0*M_PI/rc3;
2240 virs[1] += -16.0*M_PI/(3.0*rc9);
2245 "Dispersion correction is not implemented for vdw-type = %s",
2246 evdw_names[fr->vdwtype]);
2248 fr->enerdiffsix = eners[0];
2249 fr->enerdifftwelve = eners[1];
2250 /* The 0.5 is due to the Gromacs definition of the virial */
2251 fr->virdiffsix = 0.5*virs[0];
2252 fr->virdifftwelve = 0.5*virs[1];
2256 void calc_dispcorr(FILE *fplog, t_inputrec *ir, t_forcerec *fr,
2257 gmx_large_int_t step, int natoms,
2258 matrix box, real lambda, tensor pres, tensor virial,
2259 real *prescorr, real *enercorr, real *dvdlcorr)
2261 gmx_bool bCorrAll, bCorrPres;
2262 real dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2272 if (ir->eDispCorr != edispcNO)
2274 bCorrAll = (ir->eDispCorr == edispcAllEner ||
2275 ir->eDispCorr == edispcAllEnerPres);
2276 bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2277 ir->eDispCorr == edispcAllEnerPres);
2279 invvol = 1/det(box);
2282 /* Only correct for the interactions with the inserted molecule */
2283 dens = (natoms - fr->n_tpi)*invvol;
2288 dens = natoms*invvol;
2289 ninter = 0.5*natoms;
2292 if (ir->efep == efepNO)
2294 avcsix = fr->avcsix[0];
2295 avctwelve = fr->avctwelve[0];
2299 avcsix = (1 - lambda)*fr->avcsix[0] + lambda*fr->avcsix[1];
2300 avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2303 enerdiff = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2304 *enercorr += avcsix*enerdiff;
2306 if (ir->efep != efepNO)
2308 dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2312 enerdiff = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2313 *enercorr += avctwelve*enerdiff;
2314 if (fr->efep != efepNO)
2316 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2322 svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2323 if (ir->eDispCorr == edispcAllEnerPres)
2325 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2327 /* The factor 2 is because of the Gromacs virial definition */
2328 spres = -2.0*invvol*svir*PRESFAC;
2330 for (m = 0; m < DIM; m++)
2332 virial[m][m] += svir;
2333 pres[m][m] += spres;
2338 /* Can't currently control when it prints, for now, just print when degugging */
2343 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2349 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2350 *enercorr, spres, svir);
2354 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2358 if (fr->bSepDVDL && do_per_step(step, ir->nstlog))
2360 gmx_print_sepdvdl(fplog, "Dispersion correction", *enercorr, dvdlambda);
2362 if (fr->efep != efepNO)
2364 *dvdlcorr += dvdlambda;
2369 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2370 t_graph *graph, rvec x[])
2374 fprintf(fplog, "Removing pbc first time\n");
2376 calc_shifts(box, fr->shift_vec);
2379 mk_mshift(fplog, graph, fr->ePBC, box, x);
2382 p_graph(debug, "do_pbc_first 1", graph);
2384 shift_self(graph, box, x);
2385 /* By doing an extra mk_mshift the molecules that are broken
2386 * because they were e.g. imported from another software
2387 * will be made whole again. Such are the healing powers
2390 mk_mshift(fplog, graph, fr->ePBC, box, x);
2393 p_graph(debug, "do_pbc_first 2", graph);
2398 fprintf(fplog, "Done rmpbc\n");
2402 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2403 gmx_mtop_t *mtop, rvec x[],
2408 gmx_molblock_t *molb;
2410 if (bFirst && fplog)
2412 fprintf(fplog, "Removing pbc first time\n");
2417 for (mb = 0; mb < mtop->nmolblock; mb++)
2419 molb = &mtop->molblock[mb];
2420 if (molb->natoms_mol == 1 ||
2421 (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2423 /* Just one atom or charge group in the molecule, no PBC required */
2424 as += molb->nmol*molb->natoms_mol;
2428 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2429 mk_graph_ilist(NULL, mtop->moltype[molb->type].ilist,
2430 0, molb->natoms_mol, FALSE, FALSE, graph);
2432 for (mol = 0; mol < molb->nmol; mol++)
2434 mk_mshift(fplog, graph, ePBC, box, x+as);
2436 shift_self(graph, box, x+as);
2437 /* The molecule is whole now.
2438 * We don't need the second mk_mshift call as in do_pbc_first,
2439 * since we no longer need this graph.
2442 as += molb->natoms_mol;
2450 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2451 gmx_mtop_t *mtop, rvec x[])
2453 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2456 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2457 gmx_mtop_t *mtop, rvec x[])
2459 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2462 void finish_run(FILE *fplog, t_commrec *cr,
2463 t_inputrec *inputrec,
2464 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2465 gmx_runtime_t *runtime,
2466 wallclock_gpu_t *gputimes,
2467 gmx_bool bWriteStat)
2470 t_nrnb *nrnb_tot = NULL;
2474 wallcycle_sum(cr, wcycle);
2480 MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2481 cr->mpi_comm_mysim);
2489 #if defined(GMX_MPI) && !defined(GMX_THREAD_MPI)
2492 /* reduce nodetime over all MPI processes in the current simulation */
2494 MPI_Allreduce(&runtime->proctime, &sum, 1, MPI_DOUBLE, MPI_SUM,
2495 cr->mpi_comm_mysim);
2496 runtime->proctime = sum;
2502 print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2509 if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2511 print_dd_statistics(cr, inputrec, fplog);
2523 snew(nrnb_all, cr->nnodes);
2524 nrnb_all[0] = *nrnb;
2525 for (s = 1; s < cr->nnodes; s++)
2527 MPI_Recv(nrnb_all[s].n, eNRNB, MPI_DOUBLE, s, 0,
2528 cr->mpi_comm_mysim, &stat);
2530 pr_load(fplog, cr, nrnb_all);
2535 MPI_Send(nrnb->n, eNRNB, MPI_DOUBLE, MASTERRANK(cr), 0,
2536 cr->mpi_comm_mysim);
2543 wallcycle_print(fplog, cr->nnodes, cr->npmenodes, runtime->realtime,
2546 if (EI_DYNAMICS(inputrec->eI))
2548 delta_t = inputrec->delta_t;
2557 print_perf(fplog, runtime->proctime, runtime->realtime,
2558 runtime->nsteps_done, delta_t, nbfs, mflop);
2562 print_perf(stderr, runtime->proctime, runtime->realtime,
2563 runtime->nsteps_done, delta_t, nbfs, mflop);
2568 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, real *lambda, double *lam0)
2570 /* this function works, but could probably use a logic rewrite to keep all the different
2571 types of efep straight. */
2574 t_lambda *fep = ir->fepvals;
2576 if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2578 for (i = 0; i < efptNR; i++)
2590 *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
2591 if checkpoint is set -- a kludge is in for now
2593 for (i = 0; i < efptNR; i++)
2595 /* overwrite lambda state with init_lambda for now for backwards compatibility */
2596 if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2598 lambda[i] = fep->init_lambda;
2601 lam0[i] = lambda[i];
2606 lambda[i] = fep->all_lambda[i][*fep_state];
2609 lam0[i] = lambda[i];
2615 /* need to rescale control temperatures to match current state */
2616 for (i = 0; i < ir->opts.ngtc; i++)
2618 if (ir->opts.ref_t[i] > 0)
2620 ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2626 /* Send to the log the information on the current lambdas */
2629 fprintf(fplog, "Initial vector of lambda components:[ ");
2630 for (i = 0; i < efptNR; i++)
2632 fprintf(fplog, "%10.4f ", lambda[i]);
2634 fprintf(fplog, "]\n");
2640 void init_md(FILE *fplog,
2641 t_commrec *cr, t_inputrec *ir, const output_env_t oenv,
2642 double *t, double *t0,
2643 real *lambda, int *fep_state, double *lam0,
2644 t_nrnb *nrnb, gmx_mtop_t *mtop,
2646 int nfile, const t_filenm fnm[],
2647 gmx_mdoutf_t **outf, t_mdebin **mdebin,
2648 tensor force_vir, tensor shake_vir, rvec mu_tot,
2649 gmx_bool *bSimAnn, t_vcm **vcm, unsigned long Flags)
2654 /* Initial values */
2655 *t = *t0 = ir->init_t;
2658 for (i = 0; i < ir->opts.ngtc; i++)
2660 /* set bSimAnn if any group is being annealed */
2661 if (ir->opts.annealing[i] != eannNO)
2668 update_annealing_target_temp(&(ir->opts), ir->init_t);
2671 /* Initialize lambda variables */
2672 initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2676 *upd = init_update(ir);
2682 *vcm = init_vcm(fplog, &mtop->groups, ir);
2685 if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2687 if (ir->etc == etcBERENDSEN)
2689 please_cite(fplog, "Berendsen84a");
2691 if (ir->etc == etcVRESCALE)
2693 please_cite(fplog, "Bussi2007a");
2701 *outf = init_mdoutf(nfile, fnm, Flags, cr, ir, oenv);
2703 *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? NULL : (*outf)->fp_ene,
2704 mtop, ir, (*outf)->fp_dhdl);
2709 please_cite(fplog, "Fritsch12");
2710 please_cite(fplog, "Junghans10");
2712 /* Initiate variables */
2713 clear_mat(force_vir);
2714 clear_mat(shake_vir);