2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012,2013, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
43 #include <catamount/dclock.h>
49 #ifdef HAVE_SYS_TIME_H
53 #include "visibility.h"
63 #include "chargegroup.h"
86 #include "pull_rotation.h"
87 #include "gmx_random.h"
88 #include "mpelogging.h"
91 #include "gmx_wallcycle.h"
93 #include "nbnxn_atomdata.h"
94 #include "nbnxn_search.h"
95 #include "nbnxn_kernels/nbnxn_kernel_ref.h"
96 #include "nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
97 #include "nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
98 #include "nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
103 #ifdef GMX_THREAD_MPI
110 #include "nbnxn_cuda_data_mgmt.h"
111 #include "nbnxn_cuda/nbnxn_cuda.h"
114 typedef struct gmx_timeprint {
119 /* Portable version of ctime_r implemented in src/gmxlib/string2.c, but we do not want it declared in public installed headers */
122 gmx_ctime_r(const time_t *clock, char *buf, int n);
128 #ifdef HAVE_GETTIMEOFDAY
132 gettimeofday(&t, NULL);
134 seconds = (double) t.tv_sec + 1e-6*(double)t.tv_usec;
140 seconds = time(NULL);
147 #define difftime(end, start) ((double)(end)-(double)(start))
149 void print_time(FILE *out, gmx_runtime_t *runtime, gmx_large_int_t step,
150 t_inputrec *ir, t_commrec *cr)
153 char timebuf[STRLEN];
157 #ifndef GMX_THREAD_MPI
163 fprintf(out, "step %s", gmx_step_str(step, buf));
164 if ((step >= ir->nstlist))
166 runtime->last = gmx_gettime();
167 dt = difftime(runtime->last, runtime->real);
168 runtime->time_per_step = dt/(step - ir->init_step + 1);
170 dt = (ir->nsteps + ir->init_step - step)*runtime->time_per_step;
176 finish = (time_t) (runtime->last + dt);
177 gmx_ctime_r(&finish, timebuf, STRLEN);
178 sprintf(buf, "%s", timebuf);
179 buf[strlen(buf)-1] = '\0';
180 fprintf(out, ", will finish %s", buf);
184 fprintf(out, ", remaining runtime: %5d s ", (int)dt);
189 fprintf(out, " performance: %.1f ns/day ",
190 ir->delta_t/1000*24*60*60/runtime->time_per_step);
193 #ifndef GMX_THREAD_MPI
207 static double set_proctime(gmx_runtime_t *runtime)
213 prev = runtime->proc;
214 runtime->proc = dclock();
216 diff = runtime->proc - prev;
220 prev = runtime->proc;
221 runtime->proc = clock();
223 diff = (double)(runtime->proc - prev)/(double)CLOCKS_PER_SEC;
227 /* The counter has probably looped, ignore this data */
234 void runtime_start(gmx_runtime_t *runtime)
236 runtime->real = gmx_gettime();
238 set_proctime(runtime);
239 runtime->realtime = 0;
240 runtime->proctime = 0;
242 runtime->time_per_step = 0;
245 void runtime_end(gmx_runtime_t *runtime)
251 runtime->proctime += set_proctime(runtime);
252 runtime->realtime = now - runtime->real;
256 void runtime_upd_proc(gmx_runtime_t *runtime)
258 runtime->proctime += set_proctime(runtime);
261 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
262 const gmx_runtime_t *runtime)
265 char timebuf[STRLEN];
266 char time_string[STRLEN];
273 tmptime = (time_t) runtime->real;
274 gmx_ctime_r(&tmptime, timebuf, STRLEN);
278 tmptime = (time_t) gmx_gettime();
279 gmx_ctime_r(&tmptime, timebuf, STRLEN);
281 for (i = 0; timebuf[i] >= ' '; i++)
283 time_string[i] = timebuf[i];
285 time_string[i] = '\0';
287 fprintf(fplog, "%s on node %d %s\n", title, nodeid, time_string);
291 void print_start(FILE *fplog, t_commrec *cr, gmx_runtime_t *runtime,
296 sprintf(buf, "Started %s", name);
297 print_date_and_time(fplog, cr->nodeid, buf, runtime);
300 static void sum_forces(int start, int end, rvec f[], rvec flr[])
306 pr_rvecs(debug, 0, "fsr", f+start, end-start);
307 pr_rvecs(debug, 0, "flr", flr+start, end-start);
309 for (i = start; (i < end); i++)
311 rvec_inc(f[i], flr[i]);
316 * calc_f_el calculates forces due to an electric field.
318 * force is kJ mol^-1 nm^-1 = e * kJ mol^-1 nm^-1 / e
320 * Et[] contains the parameters for the time dependent
321 * part of the field (not yet used).
322 * Ex[] contains the parameters for
323 * the spatial dependent part of the field. You can have cool periodic
324 * fields in principle, but only a constant field is supported
326 * The function should return the energy due to the electric field
327 * (if any) but for now returns 0.
330 * There can be problems with the virial.
331 * Since the field is not self-consistent this is unavoidable.
332 * For neutral molecules the virial is correct within this approximation.
333 * For neutral systems with many charged molecules the error is small.
334 * But for systems with a net charge or a few charged molecules
335 * the error can be significant when the field is high.
336 * Solution: implement a self-consitent electric field into PME.
338 static void calc_f_el(FILE *fp, int start, int homenr,
339 real charge[], rvec x[], rvec f[],
340 t_cosines Ex[], t_cosines Et[], double t)
346 for (m = 0; (m < DIM); m++)
353 Ext[m] = cos(Et[m].a[0]*(t-t0))*exp(-sqr(t-t0)/(2.0*sqr(Et[m].a[2])));
357 Ext[m] = cos(Et[m].a[0]*t);
366 /* Convert the field strength from V/nm to MD-units */
367 Ext[m] *= Ex[m].a[0]*FIELDFAC;
368 for (i = start; (i < start+homenr); i++)
370 f[i][m] += charge[i]*Ext[m];
380 fprintf(fp, "%10g %10g %10g %10g #FIELD\n", t,
381 Ext[XX]/FIELDFAC, Ext[YY]/FIELDFAC, Ext[ZZ]/FIELDFAC);
385 static void calc_virial(FILE *fplog, int start, int homenr, rvec x[], rvec f[],
386 tensor vir_part, t_graph *graph, matrix box,
387 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
392 /* The short-range virial from surrounding boxes */
394 calc_vir(fplog, SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
395 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
397 /* Calculate partial virial, for local atoms only, based on short range.
398 * Total virial is computed in global_stat, called from do_md
400 f_calc_vir(fplog, start, start+homenr, x, f, vir_part, graph, box);
401 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
403 /* Add position restraint contribution */
404 for (i = 0; i < DIM; i++)
406 vir_part[i][i] += fr->vir_diag_posres[i];
409 /* Add wall contribution */
410 for (i = 0; i < DIM; i++)
412 vir_part[i][ZZ] += fr->vir_wall_z[i];
417 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
421 static void posres_wrapper(FILE *fplog,
427 matrix box, rvec x[],
429 gmx_enerdata_t *enerd,
437 /* Position restraints always require full pbc */
438 set_pbc(&pbc, ir->ePBC, box);
440 v = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
441 top->idef.iparams_posres,
442 (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
443 ir->ePBC == epbcNONE ? NULL : &pbc,
444 lambda[efptRESTRAINT], &dvdl,
445 fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
448 fprintf(fplog, sepdvdlformat,
449 interaction_function[F_POSRES].longname, v, dvdl);
451 enerd->term[F_POSRES] += v;
452 /* If just the force constant changes, the FEP term is linear,
453 * but if k changes, it is not.
455 enerd->dvdl_nonlin[efptRESTRAINT] += dvdl;
456 inc_nrnb(nrnb, eNR_POSRES, top->idef.il[F_POSRES].nr/2);
458 if ((ir->fepvals->n_lambda > 0) && (flags & GMX_FORCE_DHDL))
460 for (i = 0; i < enerd->n_lambda; i++)
462 real dvdl_dum, lambda_dum;
464 lambda_dum = (i == 0 ? lambda[efptRESTRAINT] : ir->fepvals->all_lambda[efptRESTRAINT][i-1]);
465 v = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
466 top->idef.iparams_posres,
467 (const rvec*)x, NULL, NULL,
468 ir->ePBC == epbcNONE ? NULL : &pbc, lambda_dum, &dvdl,
469 fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
470 enerd->enerpart_lambda[i] += v;
475 static void pull_potential_wrapper(FILE *fplog,
479 matrix box, rvec x[],
483 gmx_enerdata_t *enerd,
490 /* Calculate the center of mass forces, this requires communication,
491 * which is why pull_potential is called close to other communication.
492 * The virial contribution is calculated directly,
493 * which is why we call pull_potential after calc_virial.
495 set_pbc(&pbc, ir->ePBC, box);
497 enerd->term[F_COM_PULL] +=
498 pull_potential(ir->ePull, ir->pull, mdatoms, &pbc,
499 cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
502 fprintf(fplog, sepdvdlformat, "Com pull", enerd->term[F_COM_PULL], dvdl);
504 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
507 static void pme_receive_force_ener(FILE *fplog,
510 gmx_wallcycle_t wcycle,
511 gmx_enerdata_t *enerd,
515 float cycles_ppdpme, cycles_seppme;
517 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
518 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
520 /* In case of node-splitting, the PP nodes receive the long-range
521 * forces, virial and energy from the PME nodes here.
523 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
525 gmx_pme_receive_f(cr, fr->f_novirsum, fr->vir_el_recip, &e, &dvdl,
529 fprintf(fplog, sepdvdlformat, "PME mesh", e, dvdl);
531 enerd->term[F_COUL_RECIP] += e;
532 enerd->dvdl_lin[efptCOUL] += dvdl;
535 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
537 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
540 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
541 gmx_large_int_t step, real pforce, rvec *x, rvec *f)
545 char buf[STEPSTRSIZE];
548 for (i = md->start; i < md->start+md->homenr; i++)
551 /* We also catch NAN, if the compiler does not optimize this away. */
552 if (fn2 >= pf2 || fn2 != fn2)
554 fprintf(fp, "step %s atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
555 gmx_step_str(step, buf),
556 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], sqrt(fn2));
561 static void post_process_forces(FILE *fplog,
563 gmx_large_int_t step,
564 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
566 matrix box, rvec x[],
571 t_forcerec *fr, gmx_vsite_t *vsite,
578 /* Spread the mesh force on virtual sites to the other particles...
579 * This is parallellized. MPI communication is performed
580 * if the constructing atoms aren't local.
582 wallcycle_start(wcycle, ewcVSITESPREAD);
583 spread_vsite_f(fplog, vsite, x, fr->f_novirsum, NULL,
584 (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
586 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
587 wallcycle_stop(wcycle, ewcVSITESPREAD);
589 if (flags & GMX_FORCE_VIRIAL)
591 /* Now add the forces, this is local */
594 sum_forces(0, fr->f_novirsum_n, f, fr->f_novirsum);
598 sum_forces(mdatoms->start, mdatoms->start+mdatoms->homenr,
601 if (EEL_FULL(fr->eeltype))
603 /* Add the mesh contribution to the virial */
604 m_add(vir_force, fr->vir_el_recip, vir_force);
608 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
613 if (fr->print_force >= 0)
615 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
619 static void do_nb_verlet(t_forcerec *fr,
620 interaction_const_t *ic,
621 gmx_enerdata_t *enerd,
622 int flags, int ilocality,
625 gmx_wallcycle_t wcycle)
627 int nnbl, kernel_type, enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
629 nonbonded_verlet_group_t *nbvg;
632 if (!(flags & GMX_FORCE_NONBONDED))
634 /* skip non-bonded calculation */
638 nbvg = &fr->nbv->grp[ilocality];
640 /* CUDA kernel launch overhead is already timed separately */
641 if (fr->cutoff_scheme != ecutsVERLET)
643 gmx_incons("Invalid cut-off scheme passed!");
646 bCUDA = (nbvg->kernel_type == nbnxnk8x8x8_CUDA);
650 wallcycle_sub_start(wcycle, ewcsNONBONDED);
652 switch (nbvg->kernel_type)
654 case nbnxnk4x4_PlainC:
655 nbnxn_kernel_ref(&nbvg->nbl_lists,
661 enerd->grpp.ener[egCOULSR],
663 enerd->grpp.ener[egBHAMSR] :
664 enerd->grpp.ener[egLJSR]);
667 case nbnxnk4xN_SIMD_4xN:
668 nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
675 enerd->grpp.ener[egCOULSR],
677 enerd->grpp.ener[egBHAMSR] :
678 enerd->grpp.ener[egLJSR]);
680 case nbnxnk4xN_SIMD_2xNN:
681 nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
688 enerd->grpp.ener[egCOULSR],
690 enerd->grpp.ener[egBHAMSR] :
691 enerd->grpp.ener[egLJSR]);
694 case nbnxnk8x8x8_CUDA:
695 nbnxn_cuda_launch_kernel(fr->nbv->cu_nbv, nbvg->nbat, flags, ilocality);
698 case nbnxnk8x8x8_PlainC:
699 nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
704 nbvg->nbat->out[0].f,
706 enerd->grpp.ener[egCOULSR],
708 enerd->grpp.ener[egBHAMSR] :
709 enerd->grpp.ener[egLJSR]);
713 gmx_incons("Invalid nonbonded kernel type passed!");
718 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
721 if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
723 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
725 else if ((!bCUDA && nbvg->ewald_excl == ewaldexclAnalytical) ||
726 (bCUDA && nbnxn_cuda_is_kernel_ewald_analytical(fr->nbv->cu_nbv)))
728 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
732 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
734 enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
735 if (flags & GMX_FORCE_ENERGY)
737 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
738 enr_nbnxn_kernel_ljc += 1;
739 enr_nbnxn_kernel_lj += 1;
742 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
743 nbvg->nbl_lists.natpair_ljq);
744 inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
745 nbvg->nbl_lists.natpair_lj);
746 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
747 nbvg->nbl_lists.natpair_q);
750 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
751 t_inputrec *inputrec,
752 gmx_large_int_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
755 gmx_groups_t *groups,
756 matrix box, rvec x[], history_t *hist,
760 gmx_enerdata_t *enerd, t_fcdata *fcd,
761 real *lambda, t_graph *graph,
762 t_forcerec *fr, interaction_const_t *ic,
763 gmx_vsite_t *vsite, rvec mu_tot,
764 double t, FILE *field, gmx_edsam_t ed,
772 gmx_bool bSepDVDL, bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
773 gmx_bool bDoLongRange, bDoForces, bSepLRF, bUseGPU, bUseOrEmulGPU;
774 gmx_bool bDiffKernels = FALSE;
776 rvec vzero, box_diag;
778 float cycles_pme, cycles_force, cycles_wait_gpu;
779 nonbonded_verlet_t *nbv;
784 nb_kernel_type = fr->nbv->grp[0].kernel_type;
786 start = mdatoms->start;
787 homenr = mdatoms->homenr;
789 bSepDVDL = (fr->bSepDVDL && do_per_step(step, inputrec->nstlog));
791 clear_mat(vir_force);
794 if (DOMAINDECOMP(cr))
796 cg1 = cr->dd->ncg_tot;
807 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
808 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
809 bFillGrid = (bNS && bStateChanged);
810 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
811 bDoLongRange = (fr->bTwinRange && bNS && (flags & GMX_FORCE_DO_LR));
812 bDoForces = (flags & GMX_FORCE_FORCES);
813 bSepLRF = (bDoLongRange && bDoForces && (flags & GMX_FORCE_SEPLRF));
814 bUseGPU = fr->nbv->bUseGPU;
815 bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
819 update_forcerec(fplog, fr, box);
821 if (NEED_MUTOT(*inputrec))
823 /* Calculate total (local) dipole moment in a temporary common array.
824 * This makes it possible to sum them over nodes faster.
826 calc_mu(start, homenr,
827 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
832 if (fr->ePBC != epbcNONE)
834 /* Compute shift vectors every step,
835 * because of pressure coupling or box deformation!
837 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
839 calc_shifts(box, fr->shift_vec);
844 put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
845 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
847 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
849 unshift_self(graph, box, x);
853 nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
854 fr->shift_vec, nbv->grp[0].nbat);
857 if (!(cr->duty & DUTY_PME))
859 /* Send particle coordinates to the pme nodes.
860 * Since this is only implemented for domain decomposition
861 * and domain decomposition does not use the graph,
862 * we do not need to worry about shifting.
865 wallcycle_start(wcycle, ewcPP_PMESENDX);
866 GMX_MPE_LOG(ev_send_coordinates_start);
868 bBS = (inputrec->nwall == 2);
872 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
875 gmx_pme_send_x(cr, bBS ? boxs : box, x,
876 mdatoms->nChargePerturbed, lambda[efptCOUL],
877 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)), step);
879 GMX_MPE_LOG(ev_send_coordinates_finish);
880 wallcycle_stop(wcycle, ewcPP_PMESENDX);
884 /* do gridding for pair search */
887 if (graph && bStateChanged)
889 /* Calculate intramolecular shift vectors to make molecules whole */
890 mk_mshift(fplog, graph, fr->ePBC, box, x);
894 box_diag[XX] = box[XX][XX];
895 box_diag[YY] = box[YY][YY];
896 box_diag[ZZ] = box[ZZ][ZZ];
898 wallcycle_start(wcycle, ewcNS);
901 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
902 nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
904 0, mdatoms->homenr, -1, fr->cginfo, x,
906 nbv->grp[eintLocal].kernel_type,
907 nbv->grp[eintLocal].nbat);
908 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
912 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
913 nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
915 nbv->grp[eintNonlocal].kernel_type,
916 nbv->grp[eintNonlocal].nbat);
917 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
920 if (nbv->ngrp == 1 ||
921 nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
923 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
924 nbv->nbs, mdatoms, fr->cginfo);
928 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
929 nbv->nbs, mdatoms, fr->cginfo);
930 nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
931 nbv->nbs, mdatoms, fr->cginfo);
933 wallcycle_stop(wcycle, ewcNS);
936 /* initialize the GPU atom data and copy shift vector */
941 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
942 nbnxn_cuda_init_atomdata(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
943 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
946 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
947 nbnxn_cuda_upload_shiftvec(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
948 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
951 /* do local pair search */
954 wallcycle_start_nocount(wcycle, ewcNS);
955 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
956 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
959 nbv->min_ci_balanced,
960 &nbv->grp[eintLocal].nbl_lists,
962 nbv->grp[eintLocal].kernel_type,
964 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
968 /* initialize local pair-list on the GPU */
969 nbnxn_cuda_init_pairlist(nbv->cu_nbv,
970 nbv->grp[eintLocal].nbl_lists.nbl[0],
973 wallcycle_stop(wcycle, ewcNS);
977 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
978 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
979 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
980 nbv->grp[eintLocal].nbat);
981 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
982 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
987 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
988 /* launch local nonbonded F on GPU */
989 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
991 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
994 /* Communicate coordinates and sum dipole if necessary +
995 do non-local pair search */
996 if (DOMAINDECOMP(cr))
998 bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
999 nbv->grp[eintLocal].kernel_type);
1003 /* With GPU+CPU non-bonded calculations we need to copy
1004 * the local coordinates to the non-local nbat struct
1005 * (in CPU format) as the non-local kernel call also
1006 * calculates the local - non-local interactions.
1008 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1009 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1010 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
1011 nbv->grp[eintNonlocal].nbat);
1012 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1013 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1018 wallcycle_start_nocount(wcycle, ewcNS);
1019 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1023 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
1026 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
1029 nbv->min_ci_balanced,
1030 &nbv->grp[eintNonlocal].nbl_lists,
1032 nbv->grp[eintNonlocal].kernel_type,
1035 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1037 if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_CUDA)
1039 /* initialize non-local pair-list on the GPU */
1040 nbnxn_cuda_init_pairlist(nbv->cu_nbv,
1041 nbv->grp[eintNonlocal].nbl_lists.nbl[0],
1044 wallcycle_stop(wcycle, ewcNS);
1048 wallcycle_start(wcycle, ewcMOVEX);
1049 dd_move_x(cr->dd, box, x);
1051 /* When we don't need the total dipole we sum it in global_stat */
1052 if (bStateChanged && NEED_MUTOT(*inputrec))
1054 gmx_sumd(2*DIM, mu, cr);
1056 wallcycle_stop(wcycle, ewcMOVEX);
1058 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1059 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1060 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
1061 nbv->grp[eintNonlocal].nbat);
1062 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1063 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1066 if (bUseGPU && !bDiffKernels)
1068 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1069 /* launch non-local nonbonded F on GPU */
1070 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
1072 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1078 /* launch D2H copy-back F */
1079 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1080 if (DOMAINDECOMP(cr) && !bDiffKernels)
1082 nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintNonlocal].nbat,
1083 flags, eatNonlocal);
1085 nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintLocal].nbat,
1087 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1090 if (bStateChanged && NEED_MUTOT(*inputrec))
1094 gmx_sumd(2*DIM, mu, cr);
1097 for (i = 0; i < 2; i++)
1099 for (j = 0; j < DIM; j++)
1101 fr->mu_tot[i][j] = mu[i*DIM + j];
1105 if (fr->efep == efepNO)
1107 copy_rvec(fr->mu_tot[0], mu_tot);
1111 for (j = 0; j < DIM; j++)
1114 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1115 lambda[efptCOUL]*fr->mu_tot[1][j];
1119 /* Reset energies */
1120 reset_enerdata(&(inputrec->opts), fr, bNS, enerd, MASTER(cr));
1121 clear_rvecs(SHIFTS, fr->fshift);
1123 if (DOMAINDECOMP(cr))
1125 if (!(cr->duty & DUTY_PME))
1127 wallcycle_start(wcycle, ewcPPDURINGPME);
1128 dd_force_flop_start(cr->dd, nrnb);
1134 /* Enforced rotation has its own cycle counter that starts after the collective
1135 * coordinates have been communicated. It is added to ddCyclF to allow
1136 * for proper load-balancing */
1137 wallcycle_start(wcycle, ewcROT);
1138 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1139 wallcycle_stop(wcycle, ewcROT);
1142 /* Start the force cycle counter.
1143 * This counter is stopped in do_forcelow_level.
1144 * No parallel communication should occur while this counter is running,
1145 * since that will interfere with the dynamic load balancing.
1147 wallcycle_start(wcycle, ewcFORCE);
1150 /* Reset forces for which the virial is calculated separately:
1151 * PME/Ewald forces if necessary */
1152 if (fr->bF_NoVirSum)
1154 if (flags & GMX_FORCE_VIRIAL)
1156 fr->f_novirsum = fr->f_novirsum_alloc;
1157 GMX_BARRIER(cr->mpi_comm_mygroup);
1160 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1164 clear_rvecs(homenr, fr->f_novirsum+start);
1166 GMX_BARRIER(cr->mpi_comm_mygroup);
1170 /* We are not calculating the pressure so we do not need
1171 * a separate array for forces that do not contribute
1178 /* Clear the short- and long-range forces */
1179 clear_rvecs(fr->natoms_force_constr, f);
1180 if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1182 clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1185 clear_rvec(fr->vir_diag_posres);
1187 GMX_BARRIER(cr->mpi_comm_mygroup);
1190 if (inputrec->ePull == epullCONSTRAINT)
1192 clear_pull_forces(inputrec->pull);
1195 /* We calculate the non-bonded forces, when done on the CPU, here.
1196 * We do this before calling do_force_lowlevel, as in there bondeds
1197 * forces are calculated before PME, which does communication.
1198 * With this order, non-bonded and bonded force calculation imbalance
1199 * can be balanced out by the domain decomposition load balancing.
1204 /* Maybe we should move this into do_force_lowlevel */
1205 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1209 if (!bUseOrEmulGPU || bDiffKernels)
1213 if (DOMAINDECOMP(cr))
1215 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1216 bDiffKernels ? enbvClearFYes : enbvClearFNo,
1226 aloc = eintNonlocal;
1229 /* Add all the non-bonded force to the normal force array.
1230 * This can be split into a local a non-local part when overlapping
1231 * communication with calculation with domain decomposition.
1233 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1234 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1235 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1236 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1237 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1238 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1239 wallcycle_start_nocount(wcycle, ewcFORCE);
1241 /* if there are multiple fshift output buffers reduce them */
1242 if ((flags & GMX_FORCE_VIRIAL) &&
1243 nbv->grp[aloc].nbl_lists.nnbl > 1)
1245 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1250 /* update QMMMrec, if necessary */
1253 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1256 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1258 posres_wrapper(fplog, flags, bSepDVDL, inputrec, nrnb, top, box, x,
1259 f, enerd, lambda, fr);
1262 /* Compute the bonded and non-bonded energies and optionally forces */
1263 do_force_lowlevel(fplog, step, fr, inputrec, &(top->idef),
1264 cr, nrnb, wcycle, mdatoms, &(inputrec->opts),
1265 x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, mtop, top, fr->born,
1266 &(top->atomtypes), bBornRadii, box,
1267 inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1268 flags, &cycles_pme);
1272 if (do_per_step(step, inputrec->nstcalclr))
1274 /* Add the long range forces to the short range forces */
1275 for (i = 0; i < fr->natoms_force_constr; i++)
1277 rvec_add(fr->f_twin[i], f[i], f[i]);
1282 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1283 GMX_BARRIER(cr->mpi_comm_mygroup);
1287 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1290 if (bUseOrEmulGPU && !bDiffKernels)
1292 /* wait for non-local forces (or calculate in emulation mode) */
1293 if (DOMAINDECOMP(cr))
1299 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1300 nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1301 nbv->grp[eintNonlocal].nbat,
1303 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1305 cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1306 cycles_wait_gpu += cycles_tmp;
1307 cycles_force += cycles_tmp;
1311 wallcycle_start_nocount(wcycle, ewcFORCE);
1312 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1314 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1316 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1317 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1318 /* skip the reduction if there was no non-local work to do */
1319 if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1321 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1322 nbv->grp[eintNonlocal].nbat, f);
1324 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1325 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1331 /* Communicate the forces */
1334 wallcycle_start(wcycle, ewcMOVEF);
1335 if (DOMAINDECOMP(cr))
1337 dd_move_f(cr->dd, f, fr->fshift);
1338 /* Do we need to communicate the separate force array
1339 * for terms that do not contribute to the single sum virial?
1340 * Position restraints and electric fields do not introduce
1341 * inter-cg forces, only full electrostatics methods do.
1342 * When we do not calculate the virial, fr->f_novirsum = f,
1343 * so we have already communicated these forces.
1345 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1346 (flags & GMX_FORCE_VIRIAL))
1348 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1352 /* We should not update the shift forces here,
1353 * since f_twin is already included in f.
1355 dd_move_f(cr->dd, fr->f_twin, NULL);
1358 wallcycle_stop(wcycle, ewcMOVEF);
1364 /* wait for local forces (or calculate in emulation mode) */
1367 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1368 nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1369 nbv->grp[eintLocal].nbat,
1371 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1373 cycles_wait_gpu += wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1375 /* now clear the GPU outputs while we finish the step on the CPU */
1377 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1378 nbnxn_cuda_clear_outputs(nbv->cu_nbv, flags);
1379 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1383 wallcycle_start_nocount(wcycle, ewcFORCE);
1384 do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1385 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1387 wallcycle_stop(wcycle, ewcFORCE);
1389 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1390 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1391 if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1393 /* skip the reduction if there was no non-local work to do */
1394 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1395 nbv->grp[eintLocal].nbat, f);
1397 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1398 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1401 if (DOMAINDECOMP(cr))
1403 dd_force_flop_stop(cr->dd, nrnb);
1406 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1409 dd_cycles_add(cr->dd, cycles_wait_gpu, ddCyclWaitGPU);
1416 if (IR_ELEC_FIELD(*inputrec))
1418 /* Compute forces due to electric field */
1419 calc_f_el(MASTER(cr) ? field : NULL,
1420 start, homenr, mdatoms->chargeA, x, fr->f_novirsum,
1421 inputrec->ex, inputrec->et, t);
1424 /* If we have NoVirSum forces, but we do not calculate the virial,
1425 * we sum fr->f_novirum=f later.
1427 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1429 wallcycle_start(wcycle, ewcVSITESPREAD);
1430 spread_vsite_f(fplog, vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1431 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1432 wallcycle_stop(wcycle, ewcVSITESPREAD);
1436 wallcycle_start(wcycle, ewcVSITESPREAD);
1437 spread_vsite_f(fplog, vsite, x, fr->f_twin, NULL, FALSE, NULL,
1439 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1440 wallcycle_stop(wcycle, ewcVSITESPREAD);
1444 if (flags & GMX_FORCE_VIRIAL)
1446 /* Calculation of the virial must be done after vsites! */
1447 calc_virial(fplog, mdatoms->start, mdatoms->homenr, x, f,
1448 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1452 if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1454 pull_potential_wrapper(fplog, bSepDVDL, cr, inputrec, box, x,
1455 f, vir_force, mdatoms, enerd, lambda, t);
1458 /* Add the forces from enforced rotation potentials (if any) */
1461 wallcycle_start(wcycle, ewcROTadd);
1462 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1463 wallcycle_stop(wcycle, ewcROTadd);
1466 if (PAR(cr) && !(cr->duty & DUTY_PME))
1468 /* In case of node-splitting, the PP nodes receive the long-range
1469 * forces, virial and energy from the PME nodes here.
1471 pme_receive_force_ener(fplog, bSepDVDL, cr, wcycle, enerd, fr);
1476 post_process_forces(fplog, cr, step, nrnb, wcycle,
1477 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1481 /* Sum the potential energy terms from group contributions */
1482 sum_epot(&(inputrec->opts), &(enerd->grpp), enerd->term);
1485 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1486 t_inputrec *inputrec,
1487 gmx_large_int_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1488 gmx_localtop_t *top,
1490 gmx_groups_t *groups,
1491 matrix box, rvec x[], history_t *hist,
1495 gmx_enerdata_t *enerd, t_fcdata *fcd,
1496 real *lambda, t_graph *graph,
1497 t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1498 double t, FILE *field, gmx_edsam_t ed,
1499 gmx_bool bBornRadii,
1505 gmx_bool bSepDVDL, bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
1506 gmx_bool bDoLongRangeNS, bDoForces, bDoPotential, bSepLRF;
1507 gmx_bool bDoAdressWF;
1509 rvec vzero, box_diag;
1510 real e, v, dvdlambda[efptNR];
1512 float cycles_pme, cycles_force;
1514 start = mdatoms->start;
1515 homenr = mdatoms->homenr;
1517 bSepDVDL = (fr->bSepDVDL && do_per_step(step, inputrec->nstlog));
1519 clear_mat(vir_force);
1523 pd_cg_range(cr, &cg0, &cg1);
1528 if (DOMAINDECOMP(cr))
1530 cg1 = cr->dd->ncg_tot;
1542 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
1543 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1544 /* Should we update the long-range neighborlists at this step? */
1545 bDoLongRangeNS = fr->bTwinRange && bNS;
1546 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1547 bFillGrid = (bNS && bStateChanged);
1548 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
1549 bDoForces = (flags & GMX_FORCE_FORCES);
1550 bDoPotential = (flags & GMX_FORCE_ENERGY);
1551 bSepLRF = ((inputrec->nstcalclr > 1) && bDoForces &&
1552 (flags & GMX_FORCE_SEPLRF) && (flags & GMX_FORCE_DO_LR));
1554 /* should probably move this to the forcerec since it doesn't change */
1555 bDoAdressWF = ((fr->adress_type != eAdressOff));
1559 update_forcerec(fplog, fr, box);
1561 if (NEED_MUTOT(*inputrec))
1563 /* Calculate total (local) dipole moment in a temporary common array.
1564 * This makes it possible to sum them over nodes faster.
1566 calc_mu(start, homenr,
1567 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1572 if (fr->ePBC != epbcNONE)
1574 /* Compute shift vectors every step,
1575 * because of pressure coupling or box deformation!
1577 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1579 calc_shifts(box, fr->shift_vec);
1584 put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1585 &(top->cgs), x, fr->cg_cm);
1586 inc_nrnb(nrnb, eNR_CGCM, homenr);
1587 inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1589 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1591 unshift_self(graph, box, x);
1596 calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1597 inc_nrnb(nrnb, eNR_CGCM, homenr);
1604 move_cgcm(fplog, cr, fr->cg_cm);
1608 pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1613 if (!(cr->duty & DUTY_PME))
1615 /* Send particle coordinates to the pme nodes.
1616 * Since this is only implemented for domain decomposition
1617 * and domain decomposition does not use the graph,
1618 * we do not need to worry about shifting.
1621 wallcycle_start(wcycle, ewcPP_PMESENDX);
1622 GMX_MPE_LOG(ev_send_coordinates_start);
1624 bBS = (inputrec->nwall == 2);
1627 copy_mat(box, boxs);
1628 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1631 gmx_pme_send_x(cr, bBS ? boxs : box, x,
1632 mdatoms->nChargePerturbed, lambda[efptCOUL],
1633 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)), step);
1635 GMX_MPE_LOG(ev_send_coordinates_finish);
1636 wallcycle_stop(wcycle, ewcPP_PMESENDX);
1638 #endif /* GMX_MPI */
1640 /* Communicate coordinates and sum dipole if necessary */
1643 wallcycle_start(wcycle, ewcMOVEX);
1644 if (DOMAINDECOMP(cr))
1646 dd_move_x(cr->dd, box, x);
1650 move_x(fplog, cr, GMX_LEFT, GMX_RIGHT, x, nrnb);
1652 wallcycle_stop(wcycle, ewcMOVEX);
1655 /* update adress weight beforehand */
1656 if (bStateChanged && bDoAdressWF)
1658 /* need pbc for adress weight calculation with pbc_dx */
1659 set_pbc(&pbc, inputrec->ePBC, box);
1660 if (fr->adress_site == eAdressSITEcog)
1662 update_adress_weights_cog(top->idef.iparams, top->idef.il, x, fr, mdatoms,
1663 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1665 else if (fr->adress_site == eAdressSITEcom)
1667 update_adress_weights_com(fplog, cg0, cg1, &(top->cgs), x, fr, mdatoms,
1668 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1670 else if (fr->adress_site == eAdressSITEatomatom)
1672 update_adress_weights_atom_per_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1673 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1677 update_adress_weights_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1678 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1682 if (NEED_MUTOT(*inputrec))
1689 gmx_sumd(2*DIM, mu, cr);
1691 for (i = 0; i < 2; i++)
1693 for (j = 0; j < DIM; j++)
1695 fr->mu_tot[i][j] = mu[i*DIM + j];
1699 if (fr->efep == efepNO)
1701 copy_rvec(fr->mu_tot[0], mu_tot);
1705 for (j = 0; j < DIM; j++)
1708 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1713 /* Reset energies */
1714 reset_enerdata(&(inputrec->opts), fr, bNS, enerd, MASTER(cr));
1715 clear_rvecs(SHIFTS, fr->fshift);
1719 wallcycle_start(wcycle, ewcNS);
1721 if (graph && bStateChanged)
1723 /* Calculate intramolecular shift vectors to make molecules whole */
1724 mk_mshift(fplog, graph, fr->ePBC, box, x);
1727 /* Do the actual neighbour searching */
1728 ns(fplog, fr, x, box,
1729 groups, &(inputrec->opts), top, mdatoms,
1730 cr, nrnb, lambda, dvdlambda, &enerd->grpp, bFillGrid,
1733 wallcycle_stop(wcycle, ewcNS);
1736 if (inputrec->implicit_solvent && bNS)
1738 make_gb_nblist(cr, inputrec->gb_algorithm, inputrec->rlist,
1739 x, box, fr, &top->idef, graph, fr->born);
1742 if (DOMAINDECOMP(cr))
1744 if (!(cr->duty & DUTY_PME))
1746 wallcycle_start(wcycle, ewcPPDURINGPME);
1747 dd_force_flop_start(cr->dd, nrnb);
1753 /* Enforced rotation has its own cycle counter that starts after the collective
1754 * coordinates have been communicated. It is added to ddCyclF to allow
1755 * for proper load-balancing */
1756 wallcycle_start(wcycle, ewcROT);
1757 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1758 wallcycle_stop(wcycle, ewcROT);
1761 /* Start the force cycle counter.
1762 * This counter is stopped in do_forcelow_level.
1763 * No parallel communication should occur while this counter is running,
1764 * since that will interfere with the dynamic load balancing.
1766 wallcycle_start(wcycle, ewcFORCE);
1770 /* Reset forces for which the virial is calculated separately:
1771 * PME/Ewald forces if necessary */
1772 if (fr->bF_NoVirSum)
1774 if (flags & GMX_FORCE_VIRIAL)
1776 fr->f_novirsum = fr->f_novirsum_alloc;
1777 GMX_BARRIER(cr->mpi_comm_mygroup);
1780 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1784 clear_rvecs(homenr, fr->f_novirsum+start);
1786 GMX_BARRIER(cr->mpi_comm_mygroup);
1790 /* We are not calculating the pressure so we do not need
1791 * a separate array for forces that do not contribute
1798 /* Clear the short- and long-range forces */
1799 clear_rvecs(fr->natoms_force_constr, f);
1800 if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1802 clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1805 clear_rvec(fr->vir_diag_posres);
1807 GMX_BARRIER(cr->mpi_comm_mygroup);
1809 if (inputrec->ePull == epullCONSTRAINT)
1811 clear_pull_forces(inputrec->pull);
1814 /* update QMMMrec, if necessary */
1817 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1820 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1822 posres_wrapper(fplog, flags, bSepDVDL, inputrec, nrnb, top, box, x,
1823 f, enerd, lambda, fr);
1826 /* Compute the bonded and non-bonded energies and optionally forces */
1827 do_force_lowlevel(fplog, step, fr, inputrec, &(top->idef),
1828 cr, nrnb, wcycle, mdatoms, &(inputrec->opts),
1829 x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, mtop, top, fr->born,
1830 &(top->atomtypes), bBornRadii, box,
1831 inputrec->fepvals, lambda,
1832 graph, &(top->excls), fr->mu_tot,
1838 if (do_per_step(step, inputrec->nstcalclr))
1840 /* Add the long range forces to the short range forces */
1841 for (i = 0; i < fr->natoms_force_constr; i++)
1843 rvec_add(fr->f_twin[i], f[i], f[i]);
1848 cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1849 GMX_BARRIER(cr->mpi_comm_mygroup);
1853 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1856 if (DOMAINDECOMP(cr))
1858 dd_force_flop_stop(cr->dd, nrnb);
1861 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1867 if (IR_ELEC_FIELD(*inputrec))
1869 /* Compute forces due to electric field */
1870 calc_f_el(MASTER(cr) ? field : NULL,
1871 start, homenr, mdatoms->chargeA, x, fr->f_novirsum,
1872 inputrec->ex, inputrec->et, t);
1875 if (bDoAdressWF && fr->adress_icor == eAdressICThermoForce)
1877 /* Compute thermodynamic force in hybrid AdResS region */
1878 adress_thermo_force(start, homenr, &(top->cgs), x, fr->f_novirsum, fr, mdatoms,
1879 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1882 /* Communicate the forces */
1885 wallcycle_start(wcycle, ewcMOVEF);
1886 if (DOMAINDECOMP(cr))
1888 dd_move_f(cr->dd, f, fr->fshift);
1889 /* Do we need to communicate the separate force array
1890 * for terms that do not contribute to the single sum virial?
1891 * Position restraints and electric fields do not introduce
1892 * inter-cg forces, only full electrostatics methods do.
1893 * When we do not calculate the virial, fr->f_novirsum = f,
1894 * so we have already communicated these forces.
1896 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1897 (flags & GMX_FORCE_VIRIAL))
1899 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1903 /* We should not update the shift forces here,
1904 * since f_twin is already included in f.
1906 dd_move_f(cr->dd, fr->f_twin, NULL);
1911 pd_move_f(cr, f, nrnb);
1914 pd_move_f(cr, fr->f_twin, nrnb);
1917 wallcycle_stop(wcycle, ewcMOVEF);
1920 /* If we have NoVirSum forces, but we do not calculate the virial,
1921 * we sum fr->f_novirum=f later.
1923 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1925 wallcycle_start(wcycle, ewcVSITESPREAD);
1926 spread_vsite_f(fplog, vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1927 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1928 wallcycle_stop(wcycle, ewcVSITESPREAD);
1932 wallcycle_start(wcycle, ewcVSITESPREAD);
1933 spread_vsite_f(fplog, vsite, x, fr->f_twin, NULL, FALSE, NULL,
1935 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1936 wallcycle_stop(wcycle, ewcVSITESPREAD);
1940 if (flags & GMX_FORCE_VIRIAL)
1942 /* Calculation of the virial must be done after vsites! */
1943 calc_virial(fplog, mdatoms->start, mdatoms->homenr, x, f,
1944 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1948 if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1950 pull_potential_wrapper(fplog, bSepDVDL, cr, inputrec, box, x,
1951 f, vir_force, mdatoms, enerd, lambda, t);
1954 /* Add the forces from enforced rotation potentials (if any) */
1957 wallcycle_start(wcycle, ewcROTadd);
1958 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1959 wallcycle_stop(wcycle, ewcROTadd);
1962 if (PAR(cr) && !(cr->duty & DUTY_PME))
1964 /* In case of node-splitting, the PP nodes receive the long-range
1965 * forces, virial and energy from the PME nodes here.
1967 pme_receive_force_ener(fplog, bSepDVDL, cr, wcycle, enerd, fr);
1972 post_process_forces(fplog, cr, step, nrnb, wcycle,
1973 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1977 /* Sum the potential energy terms from group contributions */
1978 sum_epot(&(inputrec->opts), &(enerd->grpp), enerd->term);
1981 void do_force(FILE *fplog, t_commrec *cr,
1982 t_inputrec *inputrec,
1983 gmx_large_int_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1984 gmx_localtop_t *top,
1986 gmx_groups_t *groups,
1987 matrix box, rvec x[], history_t *hist,
1991 gmx_enerdata_t *enerd, t_fcdata *fcd,
1992 real *lambda, t_graph *graph,
1994 gmx_vsite_t *vsite, rvec mu_tot,
1995 double t, FILE *field, gmx_edsam_t ed,
1996 gmx_bool bBornRadii,
1999 /* modify force flag if not doing nonbonded */
2000 if (!fr->bNonbonded)
2002 flags &= ~GMX_FORCE_NONBONDED;
2005 switch (inputrec->cutoff_scheme)
2008 do_force_cutsVERLET(fplog, cr, inputrec,
2024 do_force_cutsGROUP(fplog, cr, inputrec,
2039 gmx_incons("Invalid cut-off scheme passed!");
2044 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
2045 t_inputrec *ir, t_mdatoms *md,
2046 t_state *state, rvec *f,
2047 t_graph *graph, t_commrec *cr, t_nrnb *nrnb,
2048 t_forcerec *fr, gmx_localtop_t *top, tensor shake_vir)
2050 int i, m, start, end;
2051 gmx_large_int_t step;
2052 real dt = ir->delta_t;
2056 snew(savex, state->natoms);
2059 end = md->homenr + start;
2063 fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
2064 start, md->homenr, end);
2066 /* Do a first constrain to reset particles... */
2067 step = ir->init_step;
2070 char buf[STEPSTRSIZE];
2071 fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
2072 gmx_step_str(step, buf));
2076 /* constrain the current position */
2077 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2078 ir, NULL, cr, step, 0, md,
2079 state->x, state->x, NULL,
2080 fr->bMolPBC, state->box,
2081 state->lambda[efptBONDED], &dvdl_dum,
2082 NULL, NULL, nrnb, econqCoord,
2083 ir->epc == epcMTTK, state->veta, state->veta);
2086 /* constrain the inital velocity, and save it */
2087 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
2088 /* might not yet treat veta correctly */
2089 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2090 ir, NULL, cr, step, 0, md,
2091 state->x, state->v, state->v,
2092 fr->bMolPBC, state->box,
2093 state->lambda[efptBONDED], &dvdl_dum,
2094 NULL, NULL, nrnb, econqVeloc,
2095 ir->epc == epcMTTK, state->veta, state->veta);
2097 /* constrain the inital velocities at t-dt/2 */
2098 if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
2100 for (i = start; (i < end); i++)
2102 for (m = 0; (m < DIM); m++)
2104 /* Reverse the velocity */
2105 state->v[i][m] = -state->v[i][m];
2106 /* Store the position at t-dt in buf */
2107 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
2110 /* Shake the positions at t=-dt with the positions at t=0
2111 * as reference coordinates.
2115 char buf[STEPSTRSIZE];
2116 fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
2117 gmx_step_str(step, buf));
2120 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2121 ir, NULL, cr, step, -1, md,
2122 state->x, savex, NULL,
2123 fr->bMolPBC, state->box,
2124 state->lambda[efptBONDED], &dvdl_dum,
2125 state->v, NULL, nrnb, econqCoord,
2126 ir->epc == epcMTTK, state->veta, state->veta);
2128 for (i = start; i < end; i++)
2130 for (m = 0; m < DIM; m++)
2132 /* Re-reverse the velocities */
2133 state->v[i][m] = -state->v[i][m];
2140 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2142 double eners[2], virs[2], enersum, virsum, y0, f, g, h;
2143 double r0, r1, r, rc3, rc9, ea, eb, ec, pa, pb, pc, pd;
2144 double invscale, invscale2, invscale3;
2145 int ri0, ri1, ri, i, offstart, offset;
2146 real scale, *vdwtab, tabfactor, tmp;
2148 fr->enershiftsix = 0;
2149 fr->enershifttwelve = 0;
2150 fr->enerdiffsix = 0;
2151 fr->enerdifftwelve = 0;
2153 fr->virdifftwelve = 0;
2155 if (eDispCorr != edispcNO)
2157 for (i = 0; i < 2; i++)
2162 if ((fr->vdw_modifier == eintmodPOTSHIFT) ||
2163 (fr->vdw_modifier == eintmodPOTSWITCH) ||
2164 (fr->vdwtype == evdwSHIFT) ||
2165 (fr->vdwtype == evdwSWITCH))
2167 if (((fr->vdw_modifier == eintmodPOTSWITCH) ||
2168 (fr->vdwtype == evdwSWITCH)) && fr->rvdw_switch == 0)
2171 "With dispersion correction rvdw-switch can not be zero "
2172 "for vdw-type = %s", evdw_names[fr->vdwtype]);
2175 scale = fr->nblists[0].table_vdw.scale;
2176 vdwtab = fr->nblists[0].table_vdw.data;
2178 /* Round the cut-offs to exact table values for precision */
2179 ri0 = floor(fr->rvdw_switch*scale);
2180 ri1 = ceil(fr->rvdw*scale);
2182 /* The code below has some support for handling force-switching, i.e.
2183 * when the force (instead of potential) is switched over a limited
2184 * region. This leads to a constant shift in the potential inside the
2185 * switching region, which we can handle by adding a constant energy
2186 * term in the force-switch case just like when we do potential-shift.
2188 * For now this is not enabled, but to keep the functionality in the
2189 * code we check separately for switch and shift. When we do force-switch
2190 * the shifting point is rvdw_switch, while it is the cutoff when we
2191 * have a classical potential-shift.
2193 ri0 = (fr->vdw_modifier == eintmodPOTSHIFT) ? ri1 : ri0;
2200 if (fr->vdwtype == evdwSHIFT)
2202 /* Determine the constant energy shift below rvdw_switch.
2203 * Table has a scale factor since we have scaled it down to compensate
2204 * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2206 fr->enershiftsix = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2207 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2209 else if (fr->vdw_modifier == eintmodPOTSHIFT)
2211 fr->enershiftsix = (real)(-1.0/(rc3*rc3));
2212 fr->enershifttwelve = (real)( 1.0/(rc9*rc3));
2215 /* Add the constant part from 0 to rvdw_switch.
2216 * This integration from 0 to rvdw_switch overcounts the number
2217 * of interactions by 1, as it also counts the self interaction.
2218 * We will correct for this later.
2220 eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2221 eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2223 invscale = 1.0/(scale);
2224 invscale2 = invscale*invscale;
2225 invscale3 = invscale*invscale2;
2227 /* following summation derived from cubic spline definition,
2228 Numerical Recipies in C, second edition, p. 113-116. Exact
2229 for the cubic spline. We first calculate the negative of
2230 the energy from rvdw to rvdw_switch, assuming that g(r)=1,
2231 and then add the more standard, abrupt cutoff correction to
2232 that result, yielding the long-range correction for a
2233 switched function. We perform both the pressure and energy
2234 loops at the same time for simplicity, as the computational
2237 for (i = 0; i < 2; i++)
2239 enersum = 0.0; virsum = 0.0;
2243 /* Since the dispersion table has been scaled down a factor 6.0 and the repulsion
2244 * a factor 12.0 to compensate for the c6/c12 parameters inside nbfp[] being scaled
2245 * up (to save flops in kernels), we need to correct for this.
2254 for (ri = ri0; ri < ri1; ri++)
2258 eb = 2.0*invscale2*r;
2262 pb = 3.0*invscale2*r;
2263 pc = 3.0*invscale*r*r;
2266 /* this "8" is from the packing in the vdwtab array - perhaps should be #define'ed? */
2267 offset = 8*ri + offstart;
2268 y0 = vdwtab[offset];
2269 f = vdwtab[offset+1];
2270 g = vdwtab[offset+2];
2271 h = vdwtab[offset+3];
2273 enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2274 virsum += f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2277 enersum *= 4.0*M_PI*tabfactor;
2278 virsum *= 4.0*M_PI*tabfactor;
2279 eners[i] -= enersum;
2283 /* now add the correction for rvdw_switch to infinity */
2284 eners[0] += -4.0*M_PI/(3.0*rc3);
2285 eners[1] += 4.0*M_PI/(9.0*rc9);
2286 virs[0] += 8.0*M_PI/rc3;
2287 virs[1] += -16.0*M_PI/(3.0*rc9);
2289 else if ((fr->vdwtype == evdwCUT) || (fr->vdwtype == evdwUSER))
2291 if (fr->vdwtype == evdwUSER && fplog)
2294 "WARNING: using dispersion correction with user tables\n");
2296 rc3 = fr->rvdw*fr->rvdw*fr->rvdw;
2298 /* Contribution beyond the cut-off */
2299 eners[0] += -4.0*M_PI/(3.0*rc3);
2300 eners[1] += 4.0*M_PI/(9.0*rc9);
2301 if (fr->vdw_modifier == eintmodPOTSHIFT)
2303 /* Contribution within the cut-off */
2304 eners[0] += -4.0*M_PI/(3.0*rc3);
2305 eners[1] += 4.0*M_PI/(3.0*rc9);
2307 /* Contribution beyond the cut-off */
2308 virs[0] += 8.0*M_PI/rc3;
2309 virs[1] += -16.0*M_PI/(3.0*rc9);
2314 "Dispersion correction is not implemented for vdw-type = %s",
2315 evdw_names[fr->vdwtype]);
2317 fr->enerdiffsix = eners[0];
2318 fr->enerdifftwelve = eners[1];
2319 /* The 0.5 is due to the Gromacs definition of the virial */
2320 fr->virdiffsix = 0.5*virs[0];
2321 fr->virdifftwelve = 0.5*virs[1];
2325 void calc_dispcorr(FILE *fplog, t_inputrec *ir, t_forcerec *fr,
2326 gmx_large_int_t step, int natoms,
2327 matrix box, real lambda, tensor pres, tensor virial,
2328 real *prescorr, real *enercorr, real *dvdlcorr)
2330 gmx_bool bCorrAll, bCorrPres;
2331 real dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2341 if (ir->eDispCorr != edispcNO)
2343 bCorrAll = (ir->eDispCorr == edispcAllEner ||
2344 ir->eDispCorr == edispcAllEnerPres);
2345 bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2346 ir->eDispCorr == edispcAllEnerPres);
2348 invvol = 1/det(box);
2351 /* Only correct for the interactions with the inserted molecule */
2352 dens = (natoms - fr->n_tpi)*invvol;
2357 dens = natoms*invvol;
2358 ninter = 0.5*natoms;
2361 if (ir->efep == efepNO)
2363 avcsix = fr->avcsix[0];
2364 avctwelve = fr->avctwelve[0];
2368 avcsix = (1 - lambda)*fr->avcsix[0] + lambda*fr->avcsix[1];
2369 avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2372 enerdiff = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2373 *enercorr += avcsix*enerdiff;
2375 if (ir->efep != efepNO)
2377 dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2381 enerdiff = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2382 *enercorr += avctwelve*enerdiff;
2383 if (fr->efep != efepNO)
2385 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2391 svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2392 if (ir->eDispCorr == edispcAllEnerPres)
2394 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2396 /* The factor 2 is because of the Gromacs virial definition */
2397 spres = -2.0*invvol*svir*PRESFAC;
2399 for (m = 0; m < DIM; m++)
2401 virial[m][m] += svir;
2402 pres[m][m] += spres;
2407 /* Can't currently control when it prints, for now, just print when degugging */
2412 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2418 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2419 *enercorr, spres, svir);
2423 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2427 if (fr->bSepDVDL && do_per_step(step, ir->nstlog))
2429 fprintf(fplog, sepdvdlformat, "Dispersion correction",
2430 *enercorr, dvdlambda);
2432 if (fr->efep != efepNO)
2434 *dvdlcorr += dvdlambda;
2439 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2440 t_graph *graph, rvec x[])
2444 fprintf(fplog, "Removing pbc first time\n");
2446 calc_shifts(box, fr->shift_vec);
2449 mk_mshift(fplog, graph, fr->ePBC, box, x);
2452 p_graph(debug, "do_pbc_first 1", graph);
2454 shift_self(graph, box, x);
2455 /* By doing an extra mk_mshift the molecules that are broken
2456 * because they were e.g. imported from another software
2457 * will be made whole again. Such are the healing powers
2460 mk_mshift(fplog, graph, fr->ePBC, box, x);
2463 p_graph(debug, "do_pbc_first 2", graph);
2468 fprintf(fplog, "Done rmpbc\n");
2472 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2473 gmx_mtop_t *mtop, rvec x[],
2478 gmx_molblock_t *molb;
2480 if (bFirst && fplog)
2482 fprintf(fplog, "Removing pbc first time\n");
2487 for (mb = 0; mb < mtop->nmolblock; mb++)
2489 molb = &mtop->molblock[mb];
2490 if (molb->natoms_mol == 1 ||
2491 (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2493 /* Just one atom or charge group in the molecule, no PBC required */
2494 as += molb->nmol*molb->natoms_mol;
2498 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2499 mk_graph_ilist(NULL, mtop->moltype[molb->type].ilist,
2500 0, molb->natoms_mol, FALSE, FALSE, graph);
2502 for (mol = 0; mol < molb->nmol; mol++)
2504 mk_mshift(fplog, graph, ePBC, box, x+as);
2506 shift_self(graph, box, x+as);
2507 /* The molecule is whole now.
2508 * We don't need the second mk_mshift call as in do_pbc_first,
2509 * since we no longer need this graph.
2512 as += molb->natoms_mol;
2520 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2521 gmx_mtop_t *mtop, rvec x[])
2523 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2526 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2527 gmx_mtop_t *mtop, rvec x[])
2529 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2532 void finish_run(FILE *fplog, t_commrec *cr, const char *confout,
2533 t_inputrec *inputrec,
2534 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2535 gmx_runtime_t *runtime,
2536 wallclock_gpu_t *gputimes,
2538 gmx_bool bWriteStat)
2541 t_nrnb *nrnb_tot = NULL;
2545 wallcycle_sum(cr, wcycle);
2551 MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2552 cr->mpi_comm_mysim);
2560 #if defined(GMX_MPI) && !defined(GMX_THREAD_MPI)
2563 /* reduce nodetime over all MPI processes in the current simulation */
2565 MPI_Allreduce(&runtime->proctime, &sum, 1, MPI_DOUBLE, MPI_SUM,
2566 cr->mpi_comm_mysim);
2567 runtime->proctime = sum;
2573 print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2580 if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2582 print_dd_statistics(cr, inputrec, fplog);
2594 snew(nrnb_all, cr->nnodes);
2595 nrnb_all[0] = *nrnb;
2596 for (s = 1; s < cr->nnodes; s++)
2598 MPI_Recv(nrnb_all[s].n, eNRNB, MPI_DOUBLE, s, 0,
2599 cr->mpi_comm_mysim, &stat);
2601 pr_load(fplog, cr, nrnb_all);
2606 MPI_Send(nrnb->n, eNRNB, MPI_DOUBLE, MASTERRANK(cr), 0,
2607 cr->mpi_comm_mysim);
2614 wallcycle_print(fplog, cr->nnodes, cr->npmenodes, runtime->realtime,
2617 if (EI_DYNAMICS(inputrec->eI))
2619 delta_t = inputrec->delta_t;
2628 print_perf(fplog, runtime->proctime, runtime->realtime,
2629 cr->nnodes-cr->npmenodes,
2630 runtime->nsteps_done, delta_t, nbfs, mflop,
2635 print_perf(stderr, runtime->proctime, runtime->realtime,
2636 cr->nnodes-cr->npmenodes,
2637 runtime->nsteps_done, delta_t, nbfs, mflop,
2643 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, real *lambda, double *lam0)
2645 /* this function works, but could probably use a logic rewrite to keep all the different
2646 types of efep straight. */
2649 t_lambda *fep = ir->fepvals;
2651 if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2653 for (i = 0; i < efptNR; i++)
2665 *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
2666 if checkpoint is set -- a kludge is in for now
2668 for (i = 0; i < efptNR; i++)
2670 /* overwrite lambda state with init_lambda for now for backwards compatibility */
2671 if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2673 lambda[i] = fep->init_lambda;
2676 lam0[i] = lambda[i];
2681 lambda[i] = fep->all_lambda[i][*fep_state];
2684 lam0[i] = lambda[i];
2690 /* need to rescale control temperatures to match current state */
2691 for (i = 0; i < ir->opts.ngtc; i++)
2693 if (ir->opts.ref_t[i] > 0)
2695 ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2701 /* Send to the log the information on the current lambdas */
2704 fprintf(fplog, "Initial vector of lambda components:[ ");
2705 for (i = 0; i < efptNR; i++)
2707 fprintf(fplog, "%10.4f ", lambda[i]);
2709 fprintf(fplog, "]\n");
2715 void init_md(FILE *fplog,
2716 t_commrec *cr, t_inputrec *ir, const output_env_t oenv,
2717 double *t, double *t0,
2718 real *lambda, int *fep_state, double *lam0,
2719 t_nrnb *nrnb, gmx_mtop_t *mtop,
2721 int nfile, const t_filenm fnm[],
2722 gmx_mdoutf_t **outf, t_mdebin **mdebin,
2723 tensor force_vir, tensor shake_vir, rvec mu_tot,
2724 gmx_bool *bSimAnn, t_vcm **vcm, t_state *state, unsigned long Flags)
2729 /* Initial values */
2730 *t = *t0 = ir->init_t;
2733 for (i = 0; i < ir->opts.ngtc; i++)
2735 /* set bSimAnn if any group is being annealed */
2736 if (ir->opts.annealing[i] != eannNO)
2743 update_annealing_target_temp(&(ir->opts), ir->init_t);
2746 /* Initialize lambda variables */
2747 initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2751 *upd = init_update(fplog, ir);
2757 *vcm = init_vcm(fplog, &mtop->groups, ir);
2760 if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2762 if (ir->etc == etcBERENDSEN)
2764 please_cite(fplog, "Berendsen84a");
2766 if (ir->etc == etcVRESCALE)
2768 please_cite(fplog, "Bussi2007a");
2776 *outf = init_mdoutf(nfile, fnm, Flags, cr, ir, oenv);
2778 *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? NULL : (*outf)->fp_ene,
2779 mtop, ir, (*outf)->fp_dhdl);
2784 please_cite(fplog, "Fritsch12");
2785 please_cite(fplog, "Junghans10");
2787 /* Initiate variables */
2788 clear_mat(force_vir);
2789 clear_mat(shake_vir);