1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
11 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
12 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
13 * Copyright (c) 2001-2004, The GROMACS development team,
14 * check out http://www.gromacs.org for more information.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version 2
19 * of the License, or (at your option) any later version.
21 * If you want to redistribute modifications, please consider that
22 * scientific software is very special. Version control is crucial -
23 * bugs must be traceable. We will be happy to consider code for
24 * inclusion in the official distribution, but derived work must not
25 * be called official GROMACS. Details are found in the README & COPYING
26 * files - if they are missing, get the official version at www.gromacs.org.
28 * To help us fund GROMACS development, we humbly ask that you cite
29 * the papers on the package - you can find them in the top README file.
31 * For more info, check our website at http://www.gromacs.org
34 * GROwing Monsters And Cloning Shrimps
41 #include <catamount/dclock.h>
47 #ifdef HAVE_SYS_TIME_H
60 #include "chargegroup.h"
83 #include "pull_rotation.h"
84 #include "gmx_random.h"
87 #include "gmx_wallcycle.h"
89 #include "nbnxn_atomdata.h"
90 #include "nbnxn_search.h"
91 #include "nbnxn_kernels/nbnxn_kernel_ref.h"
92 #include "nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
93 #include "nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
94 #include "nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
96 #include "gromacs/utility/gmxmpi.h"
101 #include "nbnxn_cuda_data_mgmt.h"
102 #include "nbnxn_cuda/nbnxn_cuda.h"
107 #ifdef HAVE_GETTIMEOFDAY
111 // TODO later: gettimeofday() is deprecated by POSIX. We could use
112 // clock_gettime in POSIX (which also offers nanosecond resolution
113 // if the hardware supports it), but that requires linking with
114 // -lrt. Maybe a better option will come along before we have to
115 // really change from gettimeofday().
116 gettimeofday(&t, NULL);
118 seconds = (double) t.tv_sec + 1e-6*(double)t.tv_usec;
124 seconds = time(NULL);
130 // TODO Remove this. gmx_gettime returns double, so this is now useless
131 #define difftime(end, start) ((double)(end)-(double)(start))
133 void print_time(FILE *out, gmx_runtime_t *runtime, gmx_large_int_t step,
134 t_inputrec *ir, t_commrec gmx_unused *cr)
137 char timebuf[STRLEN];
141 #ifndef GMX_THREAD_MPI
147 fprintf(out, "step %s", gmx_step_str(step, buf));
148 if ((step >= ir->nstlist))
150 runtime->last = gmx_gettime();
151 dt = difftime(runtime->last, runtime->real);
152 runtime->time_per_step = dt/(step - ir->init_step + 1);
154 dt = (ir->nsteps + ir->init_step - step)*runtime->time_per_step;
160 finish = (time_t) (runtime->last + dt);
161 gmx_ctime_r(&finish, timebuf, STRLEN);
162 sprintf(buf, "%s", timebuf);
163 buf[strlen(buf)-1] = '\0';
164 fprintf(out, ", will finish %s", buf);
168 fprintf(out, ", remaining runtime: %5d s ", (int)dt);
173 fprintf(out, " performance: %.1f ns/day ",
174 ir->delta_t/1000*24*60*60/runtime->time_per_step);
177 #ifndef GMX_THREAD_MPI
187 // TODO eliminate this
192 static double set_proctime(gmx_runtime_t *runtime)
198 prev = runtime->proc;
199 runtime->proc = dclock();
201 diff = runtime->proc - prev;
205 prev = runtime->proc;
206 runtime->proc = clock();
208 diff = (double)(runtime->proc - prev)/(double)CLOCKS_PER_SEC;
212 /* The counter has probably looped, ignore this data */
219 void runtime_start(gmx_runtime_t *runtime)
221 runtime->real = gmx_gettime();
223 set_proctime(runtime);
224 runtime->realtime = 0;
225 runtime->proctime = 0;
227 runtime->time_per_step = 0;
230 void runtime_end(gmx_runtime_t *runtime)
236 runtime->proctime += set_proctime(runtime);
237 runtime->realtime = now - runtime->real;
241 void runtime_upd_proc(gmx_runtime_t *runtime)
243 runtime->proctime += set_proctime(runtime);
246 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
247 const gmx_runtime_t *runtime)
250 char timebuf[STRLEN];
251 char time_string[STRLEN];
258 tmptime = (time_t) runtime->real;
259 gmx_ctime_r(&tmptime, timebuf, STRLEN);
263 tmptime = (time_t) gmx_gettime();
264 gmx_ctime_r(&tmptime, timebuf, STRLEN);
266 for (i = 0; timebuf[i] >= ' '; i++)
268 time_string[i] = timebuf[i];
270 time_string[i] = '\0';
272 fprintf(fplog, "%s on node %d %s\n", title, nodeid, time_string);
276 static void sum_forces(int start, int end, rvec f[], rvec flr[])
282 pr_rvecs(debug, 0, "fsr", f+start, end-start);
283 pr_rvecs(debug, 0, "flr", flr+start, end-start);
285 for (i = start; (i < end); i++)
287 rvec_inc(f[i], flr[i]);
292 * calc_f_el calculates forces due to an electric field.
294 * force is kJ mol^-1 nm^-1 = e * kJ mol^-1 nm^-1 / e
296 * Et[] contains the parameters for the time dependent
297 * part of the field (not yet used).
298 * Ex[] contains the parameters for
299 * the spatial dependent part of the field. You can have cool periodic
300 * fields in principle, but only a constant field is supported
302 * The function should return the energy due to the electric field
303 * (if any) but for now returns 0.
306 * There can be problems with the virial.
307 * Since the field is not self-consistent this is unavoidable.
308 * For neutral molecules the virial is correct within this approximation.
309 * For neutral systems with many charged molecules the error is small.
310 * But for systems with a net charge or a few charged molecules
311 * the error can be significant when the field is high.
312 * Solution: implement a self-consitent electric field into PME.
314 static void calc_f_el(FILE *fp, int start, int homenr,
315 real charge[], rvec f[],
316 t_cosines Ex[], t_cosines Et[], double t)
322 for (m = 0; (m < DIM); m++)
329 Ext[m] = cos(Et[m].a[0]*(t-t0))*exp(-sqr(t-t0)/(2.0*sqr(Et[m].a[2])));
333 Ext[m] = cos(Et[m].a[0]*t);
342 /* Convert the field strength from V/nm to MD-units */
343 Ext[m] *= Ex[m].a[0]*FIELDFAC;
344 for (i = start; (i < start+homenr); i++)
346 f[i][m] += charge[i]*Ext[m];
356 fprintf(fp, "%10g %10g %10g %10g #FIELD\n", t,
357 Ext[XX]/FIELDFAC, Ext[YY]/FIELDFAC, Ext[ZZ]/FIELDFAC);
361 static void calc_virial(int start, int homenr, rvec x[], rvec f[],
362 tensor vir_part, t_graph *graph, matrix box,
363 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
368 /* The short-range virial from surrounding boxes */
370 calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
371 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
373 /* Calculate partial virial, for local atoms only, based on short range.
374 * Total virial is computed in global_stat, called from do_md
376 f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
377 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
379 /* Add position restraint contribution */
380 for (i = 0; i < DIM; i++)
382 vir_part[i][i] += fr->vir_diag_posres[i];
385 /* Add wall contribution */
386 for (i = 0; i < DIM; i++)
388 vir_part[i][ZZ] += fr->vir_wall_z[i];
393 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
397 static void posres_wrapper(FILE *fplog,
403 matrix box, rvec x[],
404 gmx_enerdata_t *enerd,
412 /* Position restraints always require full pbc */
413 set_pbc(&pbc, ir->ePBC, box);
415 v = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
416 top->idef.iparams_posres,
417 (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
418 ir->ePBC == epbcNONE ? NULL : &pbc,
419 lambda[efptRESTRAINT], &dvdl,
420 fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
423 gmx_print_sepdvdl(fplog, interaction_function[F_POSRES].longname, v, dvdl);
425 enerd->term[F_POSRES] += v;
426 /* If just the force constant changes, the FEP term is linear,
427 * but if k changes, it is not.
429 enerd->dvdl_nonlin[efptRESTRAINT] += dvdl;
430 inc_nrnb(nrnb, eNR_POSRES, top->idef.il[F_POSRES].nr/2);
432 if ((ir->fepvals->n_lambda > 0) && (flags & GMX_FORCE_DHDL))
434 for (i = 0; i < enerd->n_lambda; i++)
436 real dvdl_dum, lambda_dum;
438 lambda_dum = (i == 0 ? lambda[efptRESTRAINT] : ir->fepvals->all_lambda[efptRESTRAINT][i-1]);
439 v = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
440 top->idef.iparams_posres,
441 (const rvec*)x, NULL, NULL,
442 ir->ePBC == epbcNONE ? NULL : &pbc, lambda_dum, &dvdl,
443 fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
444 enerd->enerpart_lambda[i] += v;
449 static void pull_potential_wrapper(FILE *fplog,
453 matrix box, rvec x[],
457 gmx_enerdata_t *enerd,
464 /* Calculate the center of mass forces, this requires communication,
465 * which is why pull_potential is called close to other communication.
466 * The virial contribution is calculated directly,
467 * which is why we call pull_potential after calc_virial.
469 set_pbc(&pbc, ir->ePBC, box);
471 enerd->term[F_COM_PULL] +=
472 pull_potential(ir->ePull, ir->pull, mdatoms, &pbc,
473 cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
476 gmx_print_sepdvdl(fplog, "Com pull", enerd->term[F_COM_PULL], dvdl);
478 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
481 static void pme_receive_force_ener(FILE *fplog,
484 gmx_wallcycle_t wcycle,
485 gmx_enerdata_t *enerd,
489 float cycles_ppdpme, cycles_seppme;
491 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
492 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
494 /* In case of node-splitting, the PP nodes receive the long-range
495 * forces, virial and energy from the PME nodes here.
497 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
499 gmx_pme_receive_f(cr, fr->f_novirsum, fr->vir_el_recip, &e, &dvdl,
503 gmx_print_sepdvdl(fplog, "PME mesh", e, dvdl);
505 enerd->term[F_COUL_RECIP] += e;
506 enerd->dvdl_lin[efptCOUL] += dvdl;
509 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
511 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
514 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
515 gmx_large_int_t step, real pforce, rvec *x, rvec *f)
519 char buf[STEPSTRSIZE];
522 for (i = md->start; i < md->start+md->homenr; i++)
525 /* We also catch NAN, if the compiler does not optimize this away. */
526 if (fn2 >= pf2 || fn2 != fn2)
528 fprintf(fp, "step %s atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
529 gmx_step_str(step, buf),
530 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], sqrt(fn2));
535 static void post_process_forces(t_commrec *cr,
536 gmx_large_int_t step,
537 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
539 matrix box, rvec x[],
544 t_forcerec *fr, gmx_vsite_t *vsite,
551 /* Spread the mesh force on virtual sites to the other particles...
552 * This is parallellized. MPI communication is performed
553 * if the constructing atoms aren't local.
555 wallcycle_start(wcycle, ewcVSITESPREAD);
556 spread_vsite_f(vsite, x, fr->f_novirsum, NULL,
557 (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
559 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
560 wallcycle_stop(wcycle, ewcVSITESPREAD);
562 if (flags & GMX_FORCE_VIRIAL)
564 /* Now add the forces, this is local */
567 sum_forces(0, fr->f_novirsum_n, f, fr->f_novirsum);
571 sum_forces(mdatoms->start, mdatoms->start+mdatoms->homenr,
574 if (EEL_FULL(fr->eeltype))
576 /* Add the mesh contribution to the virial */
577 m_add(vir_force, fr->vir_el_recip, vir_force);
581 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
586 if (fr->print_force >= 0)
588 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
592 static void do_nb_verlet(t_forcerec *fr,
593 interaction_const_t *ic,
594 gmx_enerdata_t *enerd,
595 int flags, int ilocality,
599 int nnbl, kernel_type, enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
601 nonbonded_verlet_group_t *nbvg;
604 if (!(flags & GMX_FORCE_NONBONDED))
606 /* skip non-bonded calculation */
610 nbvg = &fr->nbv->grp[ilocality];
612 /* CUDA kernel launch overhead is already timed separately */
613 if (fr->cutoff_scheme != ecutsVERLET)
615 gmx_incons("Invalid cut-off scheme passed!");
618 bCUDA = (nbvg->kernel_type == nbnxnk8x8x8_CUDA);
622 wallcycle_sub_start(wcycle, ewcsNONBONDED);
624 switch (nbvg->kernel_type)
626 case nbnxnk4x4_PlainC:
627 nbnxn_kernel_ref(&nbvg->nbl_lists,
633 enerd->grpp.ener[egCOULSR],
635 enerd->grpp.ener[egBHAMSR] :
636 enerd->grpp.ener[egLJSR]);
639 case nbnxnk4xN_SIMD_4xN:
640 nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
647 enerd->grpp.ener[egCOULSR],
649 enerd->grpp.ener[egBHAMSR] :
650 enerd->grpp.ener[egLJSR]);
652 case nbnxnk4xN_SIMD_2xNN:
653 nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
660 enerd->grpp.ener[egCOULSR],
662 enerd->grpp.ener[egBHAMSR] :
663 enerd->grpp.ener[egLJSR]);
666 case nbnxnk8x8x8_CUDA:
667 nbnxn_cuda_launch_kernel(fr->nbv->cu_nbv, nbvg->nbat, flags, ilocality);
670 case nbnxnk8x8x8_PlainC:
671 nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
676 nbvg->nbat->out[0].f,
678 enerd->grpp.ener[egCOULSR],
680 enerd->grpp.ener[egBHAMSR] :
681 enerd->grpp.ener[egLJSR]);
685 gmx_incons("Invalid nonbonded kernel type passed!");
690 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
693 if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
695 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
697 else if ((!bCUDA && nbvg->ewald_excl == ewaldexclAnalytical) ||
698 (bCUDA && nbnxn_cuda_is_kernel_ewald_analytical(fr->nbv->cu_nbv)))
700 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
704 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
706 enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
707 if (flags & GMX_FORCE_ENERGY)
709 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
710 enr_nbnxn_kernel_ljc += 1;
711 enr_nbnxn_kernel_lj += 1;
714 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
715 nbvg->nbl_lists.natpair_ljq);
716 inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
717 nbvg->nbl_lists.natpair_lj);
718 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
719 nbvg->nbl_lists.natpair_q);
722 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
723 t_inputrec *inputrec,
724 gmx_large_int_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
726 gmx_groups_t gmx_unused *groups,
727 matrix box, rvec x[], history_t *hist,
731 gmx_enerdata_t *enerd, t_fcdata *fcd,
732 real *lambda, t_graph *graph,
733 t_forcerec *fr, interaction_const_t *ic,
734 gmx_vsite_t *vsite, rvec mu_tot,
735 double t, FILE *field, gmx_edsam_t ed,
743 gmx_bool bSepDVDL, bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
744 gmx_bool bDoLongRange, bDoForces, bSepLRF, bUseGPU, bUseOrEmulGPU;
745 gmx_bool bDiffKernels = FALSE;
747 rvec vzero, box_diag;
749 float cycles_pme, cycles_force;
750 nonbonded_verlet_t *nbv;
754 nb_kernel_type = fr->nbv->grp[0].kernel_type;
756 start = mdatoms->start;
757 homenr = mdatoms->homenr;
759 bSepDVDL = (fr->bSepDVDL && do_per_step(step, inputrec->nstlog));
761 clear_mat(vir_force);
764 if (DOMAINDECOMP(cr))
766 cg1 = cr->dd->ncg_tot;
777 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
778 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
779 bFillGrid = (bNS && bStateChanged);
780 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
781 bDoLongRange = (fr->bTwinRange && bNS && (flags & GMX_FORCE_DO_LR));
782 bDoForces = (flags & GMX_FORCE_FORCES);
783 bSepLRF = (bDoLongRange && bDoForces && (flags & GMX_FORCE_SEPLRF));
784 bUseGPU = fr->nbv->bUseGPU;
785 bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
789 update_forcerec(fr, box);
791 if (NEED_MUTOT(*inputrec))
793 /* Calculate total (local) dipole moment in a temporary common array.
794 * This makes it possible to sum them over nodes faster.
796 calc_mu(start, homenr,
797 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
802 if (fr->ePBC != epbcNONE)
804 /* Compute shift vectors every step,
805 * because of pressure coupling or box deformation!
807 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
809 calc_shifts(box, fr->shift_vec);
814 put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
815 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
817 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
819 unshift_self(graph, box, x);
823 nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
824 fr->shift_vec, nbv->grp[0].nbat);
827 if (!(cr->duty & DUTY_PME))
829 /* Send particle coordinates to the pme nodes.
830 * Since this is only implemented for domain decomposition
831 * and domain decomposition does not use the graph,
832 * we do not need to worry about shifting.
835 wallcycle_start(wcycle, ewcPP_PMESENDX);
837 bBS = (inputrec->nwall == 2);
841 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
844 gmx_pme_send_x(cr, bBS ? boxs : box, x,
845 mdatoms->nChargePerturbed, lambda[efptCOUL],
846 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)), step);
848 wallcycle_stop(wcycle, ewcPP_PMESENDX);
852 /* do gridding for pair search */
855 if (graph && bStateChanged)
857 /* Calculate intramolecular shift vectors to make molecules whole */
858 mk_mshift(fplog, graph, fr->ePBC, box, x);
862 box_diag[XX] = box[XX][XX];
863 box_diag[YY] = box[YY][YY];
864 box_diag[ZZ] = box[ZZ][ZZ];
866 wallcycle_start(wcycle, ewcNS);
869 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
870 nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
872 0, mdatoms->homenr, -1, fr->cginfo, x,
874 nbv->grp[eintLocal].kernel_type,
875 nbv->grp[eintLocal].nbat);
876 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
880 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
881 nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
883 nbv->grp[eintNonlocal].kernel_type,
884 nbv->grp[eintNonlocal].nbat);
885 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
888 if (nbv->ngrp == 1 ||
889 nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
891 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
892 nbv->nbs, mdatoms, fr->cginfo);
896 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
897 nbv->nbs, mdatoms, fr->cginfo);
898 nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
899 nbv->nbs, mdatoms, fr->cginfo);
901 wallcycle_stop(wcycle, ewcNS);
904 /* initialize the GPU atom data and copy shift vector */
909 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
910 nbnxn_cuda_init_atomdata(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
911 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
914 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
915 nbnxn_cuda_upload_shiftvec(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
916 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
919 /* do local pair search */
922 wallcycle_start_nocount(wcycle, ewcNS);
923 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
924 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
927 nbv->min_ci_balanced,
928 &nbv->grp[eintLocal].nbl_lists,
930 nbv->grp[eintLocal].kernel_type,
932 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
936 /* initialize local pair-list on the GPU */
937 nbnxn_cuda_init_pairlist(nbv->cu_nbv,
938 nbv->grp[eintLocal].nbl_lists.nbl[0],
941 wallcycle_stop(wcycle, ewcNS);
945 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
946 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
947 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
948 nbv->grp[eintLocal].nbat);
949 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
950 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
955 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
956 /* launch local nonbonded F on GPU */
957 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
959 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
962 /* Communicate coordinates and sum dipole if necessary +
963 do non-local pair search */
964 if (DOMAINDECOMP(cr))
966 bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
967 nbv->grp[eintLocal].kernel_type);
971 /* With GPU+CPU non-bonded calculations we need to copy
972 * the local coordinates to the non-local nbat struct
973 * (in CPU format) as the non-local kernel call also
974 * calculates the local - non-local interactions.
976 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
977 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
978 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
979 nbv->grp[eintNonlocal].nbat);
980 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
981 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
986 wallcycle_start_nocount(wcycle, ewcNS);
987 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
991 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
994 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
997 nbv->min_ci_balanced,
998 &nbv->grp[eintNonlocal].nbl_lists,
1000 nbv->grp[eintNonlocal].kernel_type,
1003 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1005 if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_CUDA)
1007 /* initialize non-local pair-list on the GPU */
1008 nbnxn_cuda_init_pairlist(nbv->cu_nbv,
1009 nbv->grp[eintNonlocal].nbl_lists.nbl[0],
1012 wallcycle_stop(wcycle, ewcNS);
1016 wallcycle_start(wcycle, ewcMOVEX);
1017 dd_move_x(cr->dd, box, x);
1019 /* When we don't need the total dipole we sum it in global_stat */
1020 if (bStateChanged && NEED_MUTOT(*inputrec))
1022 gmx_sumd(2*DIM, mu, cr);
1024 wallcycle_stop(wcycle, ewcMOVEX);
1026 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1027 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1028 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
1029 nbv->grp[eintNonlocal].nbat);
1030 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1031 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1034 if (bUseGPU && !bDiffKernels)
1036 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1037 /* launch non-local nonbonded F on GPU */
1038 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
1040 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1046 /* launch D2H copy-back F */
1047 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1048 if (DOMAINDECOMP(cr) && !bDiffKernels)
1050 nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintNonlocal].nbat,
1051 flags, eatNonlocal);
1053 nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintLocal].nbat,
1055 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1058 if (bStateChanged && NEED_MUTOT(*inputrec))
1062 gmx_sumd(2*DIM, mu, cr);
1065 for (i = 0; i < 2; i++)
1067 for (j = 0; j < DIM; j++)
1069 fr->mu_tot[i][j] = mu[i*DIM + j];
1073 if (fr->efep == efepNO)
1075 copy_rvec(fr->mu_tot[0], mu_tot);
1079 for (j = 0; j < DIM; j++)
1082 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1083 lambda[efptCOUL]*fr->mu_tot[1][j];
1087 /* Reset energies */
1088 reset_enerdata(fr, bNS, enerd, MASTER(cr));
1089 clear_rvecs(SHIFTS, fr->fshift);
1091 if (DOMAINDECOMP(cr))
1093 if (!(cr->duty & DUTY_PME))
1095 wallcycle_start(wcycle, ewcPPDURINGPME);
1096 dd_force_flop_start(cr->dd, nrnb);
1102 /* Enforced rotation has its own cycle counter that starts after the collective
1103 * coordinates have been communicated. It is added to ddCyclF to allow
1104 * for proper load-balancing */
1105 wallcycle_start(wcycle, ewcROT);
1106 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1107 wallcycle_stop(wcycle, ewcROT);
1110 /* Start the force cycle counter.
1111 * This counter is stopped in do_forcelow_level.
1112 * No parallel communication should occur while this counter is running,
1113 * since that will interfere with the dynamic load balancing.
1115 wallcycle_start(wcycle, ewcFORCE);
1118 /* Reset forces for which the virial is calculated separately:
1119 * PME/Ewald forces if necessary */
1120 if (fr->bF_NoVirSum)
1122 if (flags & GMX_FORCE_VIRIAL)
1124 fr->f_novirsum = fr->f_novirsum_alloc;
1127 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1131 clear_rvecs(homenr, fr->f_novirsum+start);
1136 /* We are not calculating the pressure so we do not need
1137 * a separate array for forces that do not contribute
1144 /* Clear the short- and long-range forces */
1145 clear_rvecs(fr->natoms_force_constr, f);
1146 if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1148 clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1151 clear_rvec(fr->vir_diag_posres);
1154 if (inputrec->ePull == epullCONSTRAINT)
1156 clear_pull_forces(inputrec->pull);
1159 /* We calculate the non-bonded forces, when done on the CPU, here.
1160 * We do this before calling do_force_lowlevel, as in there bondeds
1161 * forces are calculated before PME, which does communication.
1162 * With this order, non-bonded and bonded force calculation imbalance
1163 * can be balanced out by the domain decomposition load balancing.
1168 /* Maybe we should move this into do_force_lowlevel */
1169 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1173 if (!bUseOrEmulGPU || bDiffKernels)
1177 if (DOMAINDECOMP(cr))
1179 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1180 bDiffKernels ? enbvClearFYes : enbvClearFNo,
1190 aloc = eintNonlocal;
1193 /* Add all the non-bonded force to the normal force array.
1194 * This can be split into a local a non-local part when overlapping
1195 * communication with calculation with domain decomposition.
1197 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1198 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1199 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1200 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1201 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1202 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1203 wallcycle_start_nocount(wcycle, ewcFORCE);
1205 /* if there are multiple fshift output buffers reduce them */
1206 if ((flags & GMX_FORCE_VIRIAL) &&
1207 nbv->grp[aloc].nbl_lists.nnbl > 1)
1209 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1214 /* update QMMMrec, if necessary */
1217 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1220 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1222 posres_wrapper(fplog, flags, bSepDVDL, inputrec, nrnb, top, box, x,
1226 /* Compute the bonded and non-bonded energies and optionally forces */
1227 do_force_lowlevel(fplog, step, fr, inputrec, &(top->idef),
1228 cr, nrnb, wcycle, mdatoms,
1229 x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1230 &(top->atomtypes), bBornRadii, box,
1231 inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1232 flags, &cycles_pme);
1236 if (do_per_step(step, inputrec->nstcalclr))
1238 /* Add the long range forces to the short range forces */
1239 for (i = 0; i < fr->natoms_force_constr; i++)
1241 rvec_add(fr->f_twin[i], f[i], f[i]);
1246 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1250 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1253 if (bUseOrEmulGPU && !bDiffKernels)
1255 /* wait for non-local forces (or calculate in emulation mode) */
1256 if (DOMAINDECOMP(cr))
1260 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1261 nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1262 nbv->grp[eintNonlocal].nbat,
1264 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1266 cycles_force += wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1270 wallcycle_start_nocount(wcycle, ewcFORCE);
1271 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1273 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1275 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1276 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1277 /* skip the reduction if there was no non-local work to do */
1278 if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1280 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1281 nbv->grp[eintNonlocal].nbat, f);
1283 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1284 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1290 /* Communicate the forces */
1293 wallcycle_start(wcycle, ewcMOVEF);
1294 if (DOMAINDECOMP(cr))
1296 dd_move_f(cr->dd, f, fr->fshift);
1297 /* Do we need to communicate the separate force array
1298 * for terms that do not contribute to the single sum virial?
1299 * Position restraints and electric fields do not introduce
1300 * inter-cg forces, only full electrostatics methods do.
1301 * When we do not calculate the virial, fr->f_novirsum = f,
1302 * so we have already communicated these forces.
1304 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1305 (flags & GMX_FORCE_VIRIAL))
1307 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1311 /* We should not update the shift forces here,
1312 * since f_twin is already included in f.
1314 dd_move_f(cr->dd, fr->f_twin, NULL);
1317 wallcycle_stop(wcycle, ewcMOVEF);
1323 /* wait for local forces (or calculate in emulation mode) */
1326 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1327 nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1328 nbv->grp[eintLocal].nbat,
1330 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1332 wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1334 /* now clear the GPU outputs while we finish the step on the CPU */
1336 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1337 nbnxn_cuda_clear_outputs(nbv->cu_nbv, flags);
1338 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1342 wallcycle_start_nocount(wcycle, ewcFORCE);
1343 do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1344 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1346 wallcycle_stop(wcycle, ewcFORCE);
1348 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1349 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1350 if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1352 /* skip the reduction if there was no non-local work to do */
1353 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1354 nbv->grp[eintLocal].nbat, f);
1356 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1357 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1360 if (DOMAINDECOMP(cr))
1362 dd_force_flop_stop(cr->dd, nrnb);
1365 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1371 if (IR_ELEC_FIELD(*inputrec))
1373 /* Compute forces due to electric field */
1374 calc_f_el(MASTER(cr) ? field : NULL,
1375 start, homenr, mdatoms->chargeA, fr->f_novirsum,
1376 inputrec->ex, inputrec->et, t);
1379 /* If we have NoVirSum forces, but we do not calculate the virial,
1380 * we sum fr->f_novirum=f later.
1382 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1384 wallcycle_start(wcycle, ewcVSITESPREAD);
1385 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1386 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1387 wallcycle_stop(wcycle, ewcVSITESPREAD);
1391 wallcycle_start(wcycle, ewcVSITESPREAD);
1392 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1394 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1395 wallcycle_stop(wcycle, ewcVSITESPREAD);
1399 if (flags & GMX_FORCE_VIRIAL)
1401 /* Calculation of the virial must be done after vsites! */
1402 calc_virial(mdatoms->start, mdatoms->homenr, x, f,
1403 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1407 if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1409 pull_potential_wrapper(fplog, bSepDVDL, cr, inputrec, box, x,
1410 f, vir_force, mdatoms, enerd, lambda, t);
1413 /* Add the forces from enforced rotation potentials (if any) */
1416 wallcycle_start(wcycle, ewcROTadd);
1417 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1418 wallcycle_stop(wcycle, ewcROTadd);
1421 if (PAR(cr) && !(cr->duty & DUTY_PME))
1423 /* In case of node-splitting, the PP nodes receive the long-range
1424 * forces, virial and energy from the PME nodes here.
1426 pme_receive_force_ener(fplog, bSepDVDL, cr, wcycle, enerd, fr);
1431 post_process_forces(cr, step, nrnb, wcycle,
1432 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1436 /* Sum the potential energy terms from group contributions */
1437 sum_epot(&(enerd->grpp), enerd->term);
1440 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1441 t_inputrec *inputrec,
1442 gmx_large_int_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1443 gmx_localtop_t *top,
1444 gmx_groups_t *groups,
1445 matrix box, rvec x[], history_t *hist,
1449 gmx_enerdata_t *enerd, t_fcdata *fcd,
1450 real *lambda, t_graph *graph,
1451 t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1452 double t, FILE *field, gmx_edsam_t ed,
1453 gmx_bool bBornRadii,
1459 gmx_bool bSepDVDL, bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
1460 gmx_bool bDoLongRangeNS, bDoForces, bDoPotential, bSepLRF;
1461 gmx_bool bDoAdressWF;
1463 rvec vzero, box_diag;
1464 real e, v, dvdlambda[efptNR];
1466 float cycles_pme, cycles_force;
1468 start = mdatoms->start;
1469 homenr = mdatoms->homenr;
1471 bSepDVDL = (fr->bSepDVDL && do_per_step(step, inputrec->nstlog));
1473 clear_mat(vir_force);
1477 pd_cg_range(cr, &cg0, &cg1);
1482 if (DOMAINDECOMP(cr))
1484 cg1 = cr->dd->ncg_tot;
1496 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
1497 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1498 /* Should we update the long-range neighborlists at this step? */
1499 bDoLongRangeNS = fr->bTwinRange && bNS;
1500 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1501 bFillGrid = (bNS && bStateChanged);
1502 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
1503 bDoForces = (flags & GMX_FORCE_FORCES);
1504 bDoPotential = (flags & GMX_FORCE_ENERGY);
1505 bSepLRF = ((inputrec->nstcalclr > 1) && bDoForces &&
1506 (flags & GMX_FORCE_SEPLRF) && (flags & GMX_FORCE_DO_LR));
1508 /* should probably move this to the forcerec since it doesn't change */
1509 bDoAdressWF = ((fr->adress_type != eAdressOff));
1513 update_forcerec(fr, box);
1515 if (NEED_MUTOT(*inputrec))
1517 /* Calculate total (local) dipole moment in a temporary common array.
1518 * This makes it possible to sum them over nodes faster.
1520 calc_mu(start, homenr,
1521 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1526 if (fr->ePBC != epbcNONE)
1528 /* Compute shift vectors every step,
1529 * because of pressure coupling or box deformation!
1531 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1533 calc_shifts(box, fr->shift_vec);
1538 put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1539 &(top->cgs), x, fr->cg_cm);
1540 inc_nrnb(nrnb, eNR_CGCM, homenr);
1541 inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1543 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1545 unshift_self(graph, box, x);
1550 calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1551 inc_nrnb(nrnb, eNR_CGCM, homenr);
1558 move_cgcm(fplog, cr, fr->cg_cm);
1562 pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1567 if (!(cr->duty & DUTY_PME))
1569 /* Send particle coordinates to the pme nodes.
1570 * Since this is only implemented for domain decomposition
1571 * and domain decomposition does not use the graph,
1572 * we do not need to worry about shifting.
1575 wallcycle_start(wcycle, ewcPP_PMESENDX);
1577 bBS = (inputrec->nwall == 2);
1580 copy_mat(box, boxs);
1581 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1584 gmx_pme_send_x(cr, bBS ? boxs : box, x,
1585 mdatoms->nChargePerturbed, lambda[efptCOUL],
1586 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)), step);
1588 wallcycle_stop(wcycle, ewcPP_PMESENDX);
1590 #endif /* GMX_MPI */
1592 /* Communicate coordinates and sum dipole if necessary */
1595 wallcycle_start(wcycle, ewcMOVEX);
1596 if (DOMAINDECOMP(cr))
1598 dd_move_x(cr->dd, box, x);
1602 move_x(cr, x, nrnb);
1604 wallcycle_stop(wcycle, ewcMOVEX);
1607 /* update adress weight beforehand */
1608 if (bStateChanged && bDoAdressWF)
1610 /* need pbc for adress weight calculation with pbc_dx */
1611 set_pbc(&pbc, inputrec->ePBC, box);
1612 if (fr->adress_site == eAdressSITEcog)
1614 update_adress_weights_cog(top->idef.iparams, top->idef.il, x, fr, mdatoms,
1615 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1617 else if (fr->adress_site == eAdressSITEcom)
1619 update_adress_weights_com(fplog, cg0, cg1, &(top->cgs), x, fr, mdatoms,
1620 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1622 else if (fr->adress_site == eAdressSITEatomatom)
1624 update_adress_weights_atom_per_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1625 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1629 update_adress_weights_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1630 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1634 if (NEED_MUTOT(*inputrec))
1641 gmx_sumd(2*DIM, mu, cr);
1643 for (i = 0; i < 2; i++)
1645 for (j = 0; j < DIM; j++)
1647 fr->mu_tot[i][j] = mu[i*DIM + j];
1651 if (fr->efep == efepNO)
1653 copy_rvec(fr->mu_tot[0], mu_tot);
1657 for (j = 0; j < DIM; j++)
1660 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1665 /* Reset energies */
1666 reset_enerdata(fr, bNS, enerd, MASTER(cr));
1667 clear_rvecs(SHIFTS, fr->fshift);
1671 wallcycle_start(wcycle, ewcNS);
1673 if (graph && bStateChanged)
1675 /* Calculate intramolecular shift vectors to make molecules whole */
1676 mk_mshift(fplog, graph, fr->ePBC, box, x);
1679 /* Do the actual neighbour searching */
1681 groups, top, mdatoms,
1682 cr, nrnb, bFillGrid,
1685 wallcycle_stop(wcycle, ewcNS);
1688 if (inputrec->implicit_solvent && bNS)
1690 make_gb_nblist(cr, inputrec->gb_algorithm,
1691 x, box, fr, &top->idef, graph, fr->born);
1694 if (DOMAINDECOMP(cr))
1696 if (!(cr->duty & DUTY_PME))
1698 wallcycle_start(wcycle, ewcPPDURINGPME);
1699 dd_force_flop_start(cr->dd, nrnb);
1705 /* Enforced rotation has its own cycle counter that starts after the collective
1706 * coordinates have been communicated. It is added to ddCyclF to allow
1707 * for proper load-balancing */
1708 wallcycle_start(wcycle, ewcROT);
1709 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1710 wallcycle_stop(wcycle, ewcROT);
1713 /* Start the force cycle counter.
1714 * This counter is stopped in do_forcelow_level.
1715 * No parallel communication should occur while this counter is running,
1716 * since that will interfere with the dynamic load balancing.
1718 wallcycle_start(wcycle, ewcFORCE);
1722 /* Reset forces for which the virial is calculated separately:
1723 * PME/Ewald forces if necessary */
1724 if (fr->bF_NoVirSum)
1726 if (flags & GMX_FORCE_VIRIAL)
1728 fr->f_novirsum = fr->f_novirsum_alloc;
1731 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1735 clear_rvecs(homenr, fr->f_novirsum+start);
1740 /* We are not calculating the pressure so we do not need
1741 * a separate array for forces that do not contribute
1748 /* Clear the short- and long-range forces */
1749 clear_rvecs(fr->natoms_force_constr, f);
1750 if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1752 clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1755 clear_rvec(fr->vir_diag_posres);
1757 if (inputrec->ePull == epullCONSTRAINT)
1759 clear_pull_forces(inputrec->pull);
1762 /* update QMMMrec, if necessary */
1765 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1768 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1770 posres_wrapper(fplog, flags, bSepDVDL, inputrec, nrnb, top, box, x,
1774 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_FBPOSRES].nr > 0)
1776 /* Flat-bottomed position restraints always require full pbc */
1777 if (!(bStateChanged && bDoAdressWF))
1779 set_pbc(&pbc, inputrec->ePBC, box);
1781 v = fbposres(top->idef.il[F_FBPOSRES].nr, top->idef.il[F_FBPOSRES].iatoms,
1782 top->idef.iparams_fbposres,
1783 (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
1784 inputrec->ePBC == epbcNONE ? NULL : &pbc,
1785 fr->rc_scaling, fr->ePBC, fr->posres_com);
1786 enerd->term[F_FBPOSRES] += v;
1787 inc_nrnb(nrnb, eNR_FBPOSRES, top->idef.il[F_FBPOSRES].nr/2);
1790 /* Compute the bonded and non-bonded energies and optionally forces */
1791 do_force_lowlevel(fplog, step, fr, inputrec, &(top->idef),
1792 cr, nrnb, wcycle, mdatoms,
1793 x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1794 &(top->atomtypes), bBornRadii, box,
1795 inputrec->fepvals, lambda,
1796 graph, &(top->excls), fr->mu_tot,
1802 if (do_per_step(step, inputrec->nstcalclr))
1804 /* Add the long range forces to the short range forces */
1805 for (i = 0; i < fr->natoms_force_constr; i++)
1807 rvec_add(fr->f_twin[i], f[i], f[i]);
1812 cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1816 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1819 if (DOMAINDECOMP(cr))
1821 dd_force_flop_stop(cr->dd, nrnb);
1824 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1830 if (IR_ELEC_FIELD(*inputrec))
1832 /* Compute forces due to electric field */
1833 calc_f_el(MASTER(cr) ? field : NULL,
1834 start, homenr, mdatoms->chargeA, fr->f_novirsum,
1835 inputrec->ex, inputrec->et, t);
1838 if (bDoAdressWF && fr->adress_icor == eAdressICThermoForce)
1840 /* Compute thermodynamic force in hybrid AdResS region */
1841 adress_thermo_force(start, homenr, &(top->cgs), x, fr->f_novirsum, fr, mdatoms,
1842 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1845 /* Communicate the forces */
1848 wallcycle_start(wcycle, ewcMOVEF);
1849 if (DOMAINDECOMP(cr))
1851 dd_move_f(cr->dd, f, fr->fshift);
1852 /* Do we need to communicate the separate force array
1853 * for terms that do not contribute to the single sum virial?
1854 * Position restraints and electric fields do not introduce
1855 * inter-cg forces, only full electrostatics methods do.
1856 * When we do not calculate the virial, fr->f_novirsum = f,
1857 * so we have already communicated these forces.
1859 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1860 (flags & GMX_FORCE_VIRIAL))
1862 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1866 /* We should not update the shift forces here,
1867 * since f_twin is already included in f.
1869 dd_move_f(cr->dd, fr->f_twin, NULL);
1874 pd_move_f(cr, f, nrnb);
1877 pd_move_f(cr, fr->f_twin, nrnb);
1880 wallcycle_stop(wcycle, ewcMOVEF);
1883 /* If we have NoVirSum forces, but we do not calculate the virial,
1884 * we sum fr->f_novirum=f later.
1886 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1888 wallcycle_start(wcycle, ewcVSITESPREAD);
1889 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1890 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1891 wallcycle_stop(wcycle, ewcVSITESPREAD);
1895 wallcycle_start(wcycle, ewcVSITESPREAD);
1896 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1898 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1899 wallcycle_stop(wcycle, ewcVSITESPREAD);
1903 if (flags & GMX_FORCE_VIRIAL)
1905 /* Calculation of the virial must be done after vsites! */
1906 calc_virial(mdatoms->start, mdatoms->homenr, x, f,
1907 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1911 if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1913 pull_potential_wrapper(fplog, bSepDVDL, cr, inputrec, box, x,
1914 f, vir_force, mdatoms, enerd, lambda, t);
1917 /* Add the forces from enforced rotation potentials (if any) */
1920 wallcycle_start(wcycle, ewcROTadd);
1921 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1922 wallcycle_stop(wcycle, ewcROTadd);
1925 if (PAR(cr) && !(cr->duty & DUTY_PME))
1927 /* In case of node-splitting, the PP nodes receive the long-range
1928 * forces, virial and energy from the PME nodes here.
1930 pme_receive_force_ener(fplog, bSepDVDL, cr, wcycle, enerd, fr);
1935 post_process_forces(cr, step, nrnb, wcycle,
1936 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1940 /* Sum the potential energy terms from group contributions */
1941 sum_epot(&(enerd->grpp), enerd->term);
1944 void do_force(FILE *fplog, t_commrec *cr,
1945 t_inputrec *inputrec,
1946 gmx_large_int_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1947 gmx_localtop_t *top,
1948 gmx_groups_t *groups,
1949 matrix box, rvec x[], history_t *hist,
1953 gmx_enerdata_t *enerd, t_fcdata *fcd,
1954 real *lambda, t_graph *graph,
1956 gmx_vsite_t *vsite, rvec mu_tot,
1957 double t, FILE *field, gmx_edsam_t ed,
1958 gmx_bool bBornRadii,
1961 /* modify force flag if not doing nonbonded */
1962 if (!fr->bNonbonded)
1964 flags &= ~GMX_FORCE_NONBONDED;
1967 switch (inputrec->cutoff_scheme)
1970 do_force_cutsVERLET(fplog, cr, inputrec,
1986 do_force_cutsGROUP(fplog, cr, inputrec,
2001 gmx_incons("Invalid cut-off scheme passed!");
2006 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
2007 t_inputrec *ir, t_mdatoms *md,
2008 t_state *state, t_commrec *cr, t_nrnb *nrnb,
2009 t_forcerec *fr, gmx_localtop_t *top)
2011 int i, m, start, end;
2012 gmx_large_int_t step;
2013 real dt = ir->delta_t;
2017 snew(savex, state->natoms);
2020 end = md->homenr + start;
2024 fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
2025 start, md->homenr, end);
2027 /* Do a first constrain to reset particles... */
2028 step = ir->init_step;
2031 char buf[STEPSTRSIZE];
2032 fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
2033 gmx_step_str(step, buf));
2037 /* constrain the current position */
2038 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2039 ir, NULL, cr, step, 0, md,
2040 state->x, state->x, NULL,
2041 fr->bMolPBC, state->box,
2042 state->lambda[efptBONDED], &dvdl_dum,
2043 NULL, NULL, nrnb, econqCoord,
2044 ir->epc == epcMTTK, state->veta, state->veta);
2047 /* constrain the inital velocity, and save it */
2048 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
2049 /* might not yet treat veta correctly */
2050 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2051 ir, NULL, cr, step, 0, md,
2052 state->x, state->v, state->v,
2053 fr->bMolPBC, state->box,
2054 state->lambda[efptBONDED], &dvdl_dum,
2055 NULL, NULL, nrnb, econqVeloc,
2056 ir->epc == epcMTTK, state->veta, state->veta);
2058 /* constrain the inital velocities at t-dt/2 */
2059 if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
2061 for (i = start; (i < end); i++)
2063 for (m = 0; (m < DIM); m++)
2065 /* Reverse the velocity */
2066 state->v[i][m] = -state->v[i][m];
2067 /* Store the position at t-dt in buf */
2068 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
2071 /* Shake the positions at t=-dt with the positions at t=0
2072 * as reference coordinates.
2076 char buf[STEPSTRSIZE];
2077 fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
2078 gmx_step_str(step, buf));
2081 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2082 ir, NULL, cr, step, -1, md,
2083 state->x, savex, NULL,
2084 fr->bMolPBC, state->box,
2085 state->lambda[efptBONDED], &dvdl_dum,
2086 state->v, NULL, nrnb, econqCoord,
2087 ir->epc == epcMTTK, state->veta, state->veta);
2089 for (i = start; i < end; i++)
2091 for (m = 0; m < DIM; m++)
2093 /* Re-reverse the velocities */
2094 state->v[i][m] = -state->v[i][m];
2101 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2103 double eners[2], virs[2], enersum, virsum, y0, f, g, h;
2104 double r0, r1, r, rc3, rc9, ea, eb, ec, pa, pb, pc, pd;
2105 double invscale, invscale2, invscale3;
2106 int ri0, ri1, ri, i, offstart, offset;
2107 real scale, *vdwtab, tabfactor, tmp;
2109 fr->enershiftsix = 0;
2110 fr->enershifttwelve = 0;
2111 fr->enerdiffsix = 0;
2112 fr->enerdifftwelve = 0;
2114 fr->virdifftwelve = 0;
2116 if (eDispCorr != edispcNO)
2118 for (i = 0; i < 2; i++)
2123 if ((fr->vdwtype == evdwSWITCH) || (fr->vdwtype == evdwSHIFT))
2125 if (fr->rvdw_switch == 0)
2128 "With dispersion correction rvdw-switch can not be zero "
2129 "for vdw-type = %s", evdw_names[fr->vdwtype]);
2132 scale = fr->nblists[0].table_elec_vdw.scale;
2133 vdwtab = fr->nblists[0].table_vdw.data;
2135 /* Round the cut-offs to exact table values for precision */
2136 ri0 = floor(fr->rvdw_switch*scale);
2137 ri1 = ceil(fr->rvdw*scale);
2143 if (fr->vdwtype == evdwSHIFT)
2145 /* Determine the constant energy shift below rvdw_switch.
2146 * Table has a scale factor since we have scaled it down to compensate
2147 * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2149 fr->enershiftsix = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2150 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2152 /* Add the constant part from 0 to rvdw_switch.
2153 * This integration from 0 to rvdw_switch overcounts the number
2154 * of interactions by 1, as it also counts the self interaction.
2155 * We will correct for this later.
2157 eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2158 eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2160 invscale = 1.0/(scale);
2161 invscale2 = invscale*invscale;
2162 invscale3 = invscale*invscale2;
2164 /* following summation derived from cubic spline definition,
2165 Numerical Recipies in C, second edition, p. 113-116. Exact
2166 for the cubic spline. We first calculate the negative of
2167 the energy from rvdw to rvdw_switch, assuming that g(r)=1,
2168 and then add the more standard, abrupt cutoff correction to
2169 that result, yielding the long-range correction for a
2170 switched function. We perform both the pressure and energy
2171 loops at the same time for simplicity, as the computational
2174 for (i = 0; i < 2; i++)
2176 enersum = 0.0; virsum = 0.0;
2180 /* Since the dispersion table has been scaled down a factor 6.0 and the repulsion
2181 * a factor 12.0 to compensate for the c6/c12 parameters inside nbfp[] being scaled
2182 * up (to save flops in kernels), we need to correct for this.
2191 for (ri = ri0; ri < ri1; ri++)
2195 eb = 2.0*invscale2*r;
2199 pb = 3.0*invscale2*r;
2200 pc = 3.0*invscale*r*r;
2203 /* this "8" is from the packing in the vdwtab array - perhaps should be #define'ed? */
2204 offset = 8*ri + offstart;
2205 y0 = vdwtab[offset];
2206 f = vdwtab[offset+1];
2207 g = vdwtab[offset+2];
2208 h = vdwtab[offset+3];
2210 enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2211 virsum += f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2214 enersum *= 4.0*M_PI*tabfactor;
2215 virsum *= 4.0*M_PI*tabfactor;
2216 eners[i] -= enersum;
2220 /* now add the correction for rvdw_switch to infinity */
2221 eners[0] += -4.0*M_PI/(3.0*rc3);
2222 eners[1] += 4.0*M_PI/(9.0*rc9);
2223 virs[0] += 8.0*M_PI/rc3;
2224 virs[1] += -16.0*M_PI/(3.0*rc9);
2226 else if ((fr->vdwtype == evdwCUT) || (fr->vdwtype == evdwUSER))
2228 if (fr->vdwtype == evdwUSER && fplog)
2231 "WARNING: using dispersion correction with user tables\n");
2233 rc3 = fr->rvdw*fr->rvdw*fr->rvdw;
2235 /* Contribution beyond the cut-off */
2236 eners[0] += -4.0*M_PI/(3.0*rc3);
2237 eners[1] += 4.0*M_PI/(9.0*rc9);
2238 if (fr->vdw_modifier == eintmodPOTSHIFT)
2240 /* Contribution within the cut-off */
2241 eners[0] += -4.0*M_PI/(3.0*rc3);
2242 eners[1] += 4.0*M_PI/(3.0*rc9);
2244 /* Contribution beyond the cut-off */
2245 virs[0] += 8.0*M_PI/rc3;
2246 virs[1] += -16.0*M_PI/(3.0*rc9);
2251 "Dispersion correction is not implemented for vdw-type = %s",
2252 evdw_names[fr->vdwtype]);
2254 fr->enerdiffsix = eners[0];
2255 fr->enerdifftwelve = eners[1];
2256 /* The 0.5 is due to the Gromacs definition of the virial */
2257 fr->virdiffsix = 0.5*virs[0];
2258 fr->virdifftwelve = 0.5*virs[1];
2262 void calc_dispcorr(FILE *fplog, t_inputrec *ir, t_forcerec *fr,
2263 gmx_large_int_t step, int natoms,
2264 matrix box, real lambda, tensor pres, tensor virial,
2265 real *prescorr, real *enercorr, real *dvdlcorr)
2267 gmx_bool bCorrAll, bCorrPres;
2268 real dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2278 if (ir->eDispCorr != edispcNO)
2280 bCorrAll = (ir->eDispCorr == edispcAllEner ||
2281 ir->eDispCorr == edispcAllEnerPres);
2282 bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2283 ir->eDispCorr == edispcAllEnerPres);
2285 invvol = 1/det(box);
2288 /* Only correct for the interactions with the inserted molecule */
2289 dens = (natoms - fr->n_tpi)*invvol;
2294 dens = natoms*invvol;
2295 ninter = 0.5*natoms;
2298 if (ir->efep == efepNO)
2300 avcsix = fr->avcsix[0];
2301 avctwelve = fr->avctwelve[0];
2305 avcsix = (1 - lambda)*fr->avcsix[0] + lambda*fr->avcsix[1];
2306 avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2309 enerdiff = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2310 *enercorr += avcsix*enerdiff;
2312 if (ir->efep != efepNO)
2314 dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2318 enerdiff = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2319 *enercorr += avctwelve*enerdiff;
2320 if (fr->efep != efepNO)
2322 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2328 svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2329 if (ir->eDispCorr == edispcAllEnerPres)
2331 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2333 /* The factor 2 is because of the Gromacs virial definition */
2334 spres = -2.0*invvol*svir*PRESFAC;
2336 for (m = 0; m < DIM; m++)
2338 virial[m][m] += svir;
2339 pres[m][m] += spres;
2344 /* Can't currently control when it prints, for now, just print when degugging */
2349 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2355 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2356 *enercorr, spres, svir);
2360 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2364 if (fr->bSepDVDL && do_per_step(step, ir->nstlog))
2366 gmx_print_sepdvdl(fplog, "Dispersion correction", *enercorr, dvdlambda);
2368 if (fr->efep != efepNO)
2370 *dvdlcorr += dvdlambda;
2375 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2376 t_graph *graph, rvec x[])
2380 fprintf(fplog, "Removing pbc first time\n");
2382 calc_shifts(box, fr->shift_vec);
2385 mk_mshift(fplog, graph, fr->ePBC, box, x);
2388 p_graph(debug, "do_pbc_first 1", graph);
2390 shift_self(graph, box, x);
2391 /* By doing an extra mk_mshift the molecules that are broken
2392 * because they were e.g. imported from another software
2393 * will be made whole again. Such are the healing powers
2396 mk_mshift(fplog, graph, fr->ePBC, box, x);
2399 p_graph(debug, "do_pbc_first 2", graph);
2404 fprintf(fplog, "Done rmpbc\n");
2408 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2409 gmx_mtop_t *mtop, rvec x[],
2414 gmx_molblock_t *molb;
2416 if (bFirst && fplog)
2418 fprintf(fplog, "Removing pbc first time\n");
2423 for (mb = 0; mb < mtop->nmolblock; mb++)
2425 molb = &mtop->molblock[mb];
2426 if (molb->natoms_mol == 1 ||
2427 (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2429 /* Just one atom or charge group in the molecule, no PBC required */
2430 as += molb->nmol*molb->natoms_mol;
2434 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2435 mk_graph_ilist(NULL, mtop->moltype[molb->type].ilist,
2436 0, molb->natoms_mol, FALSE, FALSE, graph);
2438 for (mol = 0; mol < molb->nmol; mol++)
2440 mk_mshift(fplog, graph, ePBC, box, x+as);
2442 shift_self(graph, box, x+as);
2443 /* The molecule is whole now.
2444 * We don't need the second mk_mshift call as in do_pbc_first,
2445 * since we no longer need this graph.
2448 as += molb->natoms_mol;
2456 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2457 gmx_mtop_t *mtop, rvec x[])
2459 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2462 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2463 gmx_mtop_t *mtop, rvec x[])
2465 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2468 void finish_run(FILE *fplog, t_commrec *cr,
2469 t_inputrec *inputrec,
2470 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2471 gmx_runtime_t *runtime,
2472 wallclock_gpu_t *gputimes,
2473 gmx_bool bWriteStat)
2476 t_nrnb *nrnb_tot = NULL;
2480 wallcycle_sum(cr, wcycle);
2486 MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2487 cr->mpi_comm_mysim);
2495 #if defined(GMX_MPI) && !defined(GMX_THREAD_MPI)
2498 /* reduce nodetime over all MPI processes in the current simulation */
2500 MPI_Allreduce(&runtime->proctime, &sum, 1, MPI_DOUBLE, MPI_SUM,
2501 cr->mpi_comm_mysim);
2502 runtime->proctime = sum;
2508 print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2515 if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2517 print_dd_statistics(cr, inputrec, fplog);
2529 snew(nrnb_all, cr->nnodes);
2530 nrnb_all[0] = *nrnb;
2531 for (s = 1; s < cr->nnodes; s++)
2533 MPI_Recv(nrnb_all[s].n, eNRNB, MPI_DOUBLE, s, 0,
2534 cr->mpi_comm_mysim, &stat);
2536 pr_load(fplog, cr, nrnb_all);
2541 MPI_Send(nrnb->n, eNRNB, MPI_DOUBLE, MASTERRANK(cr), 0,
2542 cr->mpi_comm_mysim);
2549 wallcycle_print(fplog, cr->nnodes, cr->npmenodes, runtime->realtime,
2552 if (EI_DYNAMICS(inputrec->eI))
2554 delta_t = inputrec->delta_t;
2563 print_perf(fplog, runtime->proctime, runtime->realtime,
2564 runtime->nsteps_done, delta_t, nbfs, mflop);
2568 print_perf(stderr, runtime->proctime, runtime->realtime,
2569 runtime->nsteps_done, delta_t, nbfs, mflop);
2574 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, real *lambda, double *lam0)
2576 /* this function works, but could probably use a logic rewrite to keep all the different
2577 types of efep straight. */
2580 t_lambda *fep = ir->fepvals;
2582 if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2584 for (i = 0; i < efptNR; i++)
2596 *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
2597 if checkpoint is set -- a kludge is in for now
2599 for (i = 0; i < efptNR; i++)
2601 /* overwrite lambda state with init_lambda for now for backwards compatibility */
2602 if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2604 lambda[i] = fep->init_lambda;
2607 lam0[i] = lambda[i];
2612 lambda[i] = fep->all_lambda[i][*fep_state];
2615 lam0[i] = lambda[i];
2621 /* need to rescale control temperatures to match current state */
2622 for (i = 0; i < ir->opts.ngtc; i++)
2624 if (ir->opts.ref_t[i] > 0)
2626 ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2632 /* Send to the log the information on the current lambdas */
2635 fprintf(fplog, "Initial vector of lambda components:[ ");
2636 for (i = 0; i < efptNR; i++)
2638 fprintf(fplog, "%10.4f ", lambda[i]);
2640 fprintf(fplog, "]\n");
2646 void init_md(FILE *fplog,
2647 t_commrec *cr, t_inputrec *ir, const output_env_t oenv,
2648 double *t, double *t0,
2649 real *lambda, int *fep_state, double *lam0,
2650 t_nrnb *nrnb, gmx_mtop_t *mtop,
2652 int nfile, const t_filenm fnm[],
2653 gmx_mdoutf_t **outf, t_mdebin **mdebin,
2654 tensor force_vir, tensor shake_vir, rvec mu_tot,
2655 gmx_bool *bSimAnn, t_vcm **vcm, unsigned long Flags)
2660 /* Initial values */
2661 *t = *t0 = ir->init_t;
2664 for (i = 0; i < ir->opts.ngtc; i++)
2666 /* set bSimAnn if any group is being annealed */
2667 if (ir->opts.annealing[i] != eannNO)
2674 update_annealing_target_temp(&(ir->opts), ir->init_t);
2677 /* Initialize lambda variables */
2678 initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2682 *upd = init_update(ir);
2688 *vcm = init_vcm(fplog, &mtop->groups, ir);
2691 if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2693 if (ir->etc == etcBERENDSEN)
2695 please_cite(fplog, "Berendsen84a");
2697 if (ir->etc == etcVRESCALE)
2699 please_cite(fplog, "Bussi2007a");
2707 *outf = init_mdoutf(nfile, fnm, Flags, cr, ir, oenv);
2709 *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? NULL : (*outf)->fp_ene,
2710 mtop, ir, (*outf)->fp_dhdl);
2715 please_cite(fplog, "Fritsch12");
2716 please_cite(fplog, "Junghans10");
2718 /* Initiate variables */
2719 clear_mat(force_vir);
2720 clear_mat(shake_vir);