2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012,2013, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
43 #include <catamount/dclock.h>
49 #ifdef HAVE_SYS_TIME_H
53 #include "visibility.h"
63 #include "chargegroup.h"
86 #include "pull_rotation.h"
87 #include "gmx_random.h"
88 #include "mpelogging.h"
91 #include "gmx_wallcycle.h"
93 #include "nbnxn_atomdata.h"
94 #include "nbnxn_search.h"
95 #include "nbnxn_kernels/nbnxn_kernel_ref.h"
96 #include "nbnxn_kernels/nbnxn_kernel_simd_4xn.h"
97 #include "nbnxn_kernels/nbnxn_kernel_simd_2xnn.h"
98 #include "nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
103 #ifdef GMX_THREAD_MPI
110 #include "nbnxn_cuda_data_mgmt.h"
111 #include "nbnxn_cuda/nbnxn_cuda.h"
114 typedef struct gmx_timeprint {
119 /* Portable version of ctime_r implemented in src/gmxlib/string2.c, but we do not want it declared in public installed headers */
122 gmx_ctime_r(const time_t *clock, char *buf, int n);
128 #ifdef HAVE_GETTIMEOFDAY
132 gettimeofday(&t, NULL);
134 seconds = (double) t.tv_sec + 1e-6*(double)t.tv_usec;
140 seconds = time(NULL);
147 #define difftime(end, start) ((double)(end)-(double)(start))
149 void print_time(FILE *out, gmx_runtime_t *runtime, gmx_large_int_t step,
150 t_inputrec *ir, t_commrec *cr)
153 char timebuf[STRLEN];
157 #ifndef GMX_THREAD_MPI
163 fprintf(out, "step %s", gmx_step_str(step, buf));
164 if ((step >= ir->nstlist))
166 runtime->last = gmx_gettime();
167 dt = difftime(runtime->last, runtime->real);
168 runtime->time_per_step = dt/(step - ir->init_step + 1);
170 dt = (ir->nsteps + ir->init_step - step)*runtime->time_per_step;
176 finish = (time_t) (runtime->last + dt);
177 gmx_ctime_r(&finish, timebuf, STRLEN);
178 sprintf(buf, "%s", timebuf);
179 buf[strlen(buf)-1] = '\0';
180 fprintf(out, ", will finish %s", buf);
184 fprintf(out, ", remaining runtime: %5d s ", (int)dt);
189 fprintf(out, " performance: %.1f ns/day ",
190 ir->delta_t/1000*24*60*60/runtime->time_per_step);
193 #ifndef GMX_THREAD_MPI
207 static double set_proctime(gmx_runtime_t *runtime)
213 prev = runtime->proc;
214 runtime->proc = dclock();
216 diff = runtime->proc - prev;
220 prev = runtime->proc;
221 runtime->proc = clock();
223 diff = (double)(runtime->proc - prev)/(double)CLOCKS_PER_SEC;
227 /* The counter has probably looped, ignore this data */
234 void runtime_start(gmx_runtime_t *runtime)
236 runtime->real = gmx_gettime();
238 set_proctime(runtime);
239 runtime->realtime = 0;
240 runtime->proctime = 0;
242 runtime->time_per_step = 0;
245 void runtime_end(gmx_runtime_t *runtime)
251 runtime->proctime += set_proctime(runtime);
252 runtime->realtime = now - runtime->real;
256 void runtime_upd_proc(gmx_runtime_t *runtime)
258 runtime->proctime += set_proctime(runtime);
261 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
262 const gmx_runtime_t *runtime)
265 char timebuf[STRLEN];
266 char time_string[STRLEN];
273 tmptime = (time_t) runtime->real;
274 gmx_ctime_r(&tmptime, timebuf, STRLEN);
278 tmptime = (time_t) gmx_gettime();
279 gmx_ctime_r(&tmptime, timebuf, STRLEN);
281 for (i = 0; timebuf[i] >= ' '; i++)
283 time_string[i] = timebuf[i];
285 time_string[i] = '\0';
287 fprintf(fplog, "%s on node %d %s\n", title, nodeid, time_string);
291 static void sum_forces(int start, int end, rvec f[], rvec flr[])
297 pr_rvecs(debug, 0, "fsr", f+start, end-start);
298 pr_rvecs(debug, 0, "flr", flr+start, end-start);
300 for (i = start; (i < end); i++)
302 rvec_inc(f[i], flr[i]);
307 * calc_f_el calculates forces due to an electric field.
309 * force is kJ mol^-1 nm^-1 = e * kJ mol^-1 nm^-1 / e
311 * Et[] contains the parameters for the time dependent
312 * part of the field (not yet used).
313 * Ex[] contains the parameters for
314 * the spatial dependent part of the field. You can have cool periodic
315 * fields in principle, but only a constant field is supported
317 * The function should return the energy due to the electric field
318 * (if any) but for now returns 0.
321 * There can be problems with the virial.
322 * Since the field is not self-consistent this is unavoidable.
323 * For neutral molecules the virial is correct within this approximation.
324 * For neutral systems with many charged molecules the error is small.
325 * But for systems with a net charge or a few charged molecules
326 * the error can be significant when the field is high.
327 * Solution: implement a self-consitent electric field into PME.
329 static void calc_f_el(FILE *fp, int start, int homenr,
330 real charge[], rvec x[], rvec f[],
331 t_cosines Ex[], t_cosines Et[], double t)
337 for (m = 0; (m < DIM); m++)
344 Ext[m] = cos(Et[m].a[0]*(t-t0))*exp(-sqr(t-t0)/(2.0*sqr(Et[m].a[2])));
348 Ext[m] = cos(Et[m].a[0]*t);
357 /* Convert the field strength from V/nm to MD-units */
358 Ext[m] *= Ex[m].a[0]*FIELDFAC;
359 for (i = start; (i < start+homenr); i++)
361 f[i][m] += charge[i]*Ext[m];
371 fprintf(fp, "%10g %10g %10g %10g #FIELD\n", t,
372 Ext[XX]/FIELDFAC, Ext[YY]/FIELDFAC, Ext[ZZ]/FIELDFAC);
376 static void calc_virial(FILE *fplog, int start, int homenr, rvec x[], rvec f[],
377 tensor vir_part, t_graph *graph, matrix box,
378 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
383 /* The short-range virial from surrounding boxes */
385 calc_vir(fplog, SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
386 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
388 /* Calculate partial virial, for local atoms only, based on short range.
389 * Total virial is computed in global_stat, called from do_md
391 f_calc_vir(fplog, start, start+homenr, x, f, vir_part, graph, box);
392 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
394 /* Add position restraint contribution */
395 for (i = 0; i < DIM; i++)
397 vir_part[i][i] += fr->vir_diag_posres[i];
400 /* Add wall contribution */
401 for (i = 0; i < DIM; i++)
403 vir_part[i][ZZ] += fr->vir_wall_z[i];
408 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
412 static void posres_wrapper(FILE *fplog,
418 matrix box, rvec x[],
420 gmx_enerdata_t *enerd,
428 /* Position restraints always require full pbc */
429 set_pbc(&pbc, ir->ePBC, box);
431 v = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
432 top->idef.iparams_posres,
433 (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
434 ir->ePBC == epbcNONE ? NULL : &pbc,
435 lambda[efptRESTRAINT], &dvdl,
436 fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
439 fprintf(fplog, sepdvdlformat,
440 interaction_function[F_POSRES].longname, v, dvdl);
442 enerd->term[F_POSRES] += v;
443 /* If just the force constant changes, the FEP term is linear,
444 * but if k changes, it is not.
446 enerd->dvdl_nonlin[efptRESTRAINT] += dvdl;
447 inc_nrnb(nrnb, eNR_POSRES, top->idef.il[F_POSRES].nr/2);
449 if ((ir->fepvals->n_lambda > 0) && (flags & GMX_FORCE_DHDL))
451 for (i = 0; i < enerd->n_lambda; i++)
453 real dvdl_dum, lambda_dum;
455 lambda_dum = (i == 0 ? lambda[efptRESTRAINT] : ir->fepvals->all_lambda[efptRESTRAINT][i-1]);
456 v = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
457 top->idef.iparams_posres,
458 (const rvec*)x, NULL, NULL,
459 ir->ePBC == epbcNONE ? NULL : &pbc, lambda_dum, &dvdl,
460 fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
461 enerd->enerpart_lambda[i] += v;
466 static void pull_potential_wrapper(FILE *fplog,
470 matrix box, rvec x[],
474 gmx_enerdata_t *enerd,
481 /* Calculate the center of mass forces, this requires communication,
482 * which is why pull_potential is called close to other communication.
483 * The virial contribution is calculated directly,
484 * which is why we call pull_potential after calc_virial.
486 set_pbc(&pbc, ir->ePBC, box);
488 enerd->term[F_COM_PULL] +=
489 pull_potential(ir->ePull, ir->pull, mdatoms, &pbc,
490 cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
493 fprintf(fplog, sepdvdlformat, "Com pull", enerd->term[F_COM_PULL], dvdl);
495 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
498 static void pme_receive_force_ener(FILE *fplog,
501 gmx_wallcycle_t wcycle,
502 gmx_enerdata_t *enerd,
506 float cycles_ppdpme, cycles_seppme;
508 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
509 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
511 /* In case of node-splitting, the PP nodes receive the long-range
512 * forces, virial and energy from the PME nodes here.
514 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
516 gmx_pme_receive_f(cr, fr->f_novirsum, fr->vir_el_recip, &e, &dvdl,
520 fprintf(fplog, sepdvdlformat, "PME mesh", e, dvdl);
522 enerd->term[F_COUL_RECIP] += e;
523 enerd->dvdl_lin[efptCOUL] += dvdl;
526 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
528 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
531 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
532 gmx_large_int_t step, real pforce, rvec *x, rvec *f)
536 char buf[STEPSTRSIZE];
539 for (i = md->start; i < md->start+md->homenr; i++)
542 /* We also catch NAN, if the compiler does not optimize this away. */
543 if (fn2 >= pf2 || fn2 != fn2)
545 fprintf(fp, "step %s atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
546 gmx_step_str(step, buf),
547 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], sqrt(fn2));
552 static void post_process_forces(FILE *fplog,
554 gmx_large_int_t step,
555 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
557 matrix box, rvec x[],
562 t_forcerec *fr, gmx_vsite_t *vsite,
569 /* Spread the mesh force on virtual sites to the other particles...
570 * This is parallellized. MPI communication is performed
571 * if the constructing atoms aren't local.
573 wallcycle_start(wcycle, ewcVSITESPREAD);
574 spread_vsite_f(fplog, vsite, x, fr->f_novirsum, NULL,
575 (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
577 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
578 wallcycle_stop(wcycle, ewcVSITESPREAD);
580 if (flags & GMX_FORCE_VIRIAL)
582 /* Now add the forces, this is local */
585 sum_forces(0, fr->f_novirsum_n, f, fr->f_novirsum);
589 sum_forces(mdatoms->start, mdatoms->start+mdatoms->homenr,
592 if (EEL_FULL(fr->eeltype))
594 /* Add the mesh contribution to the virial */
595 m_add(vir_force, fr->vir_el_recip, vir_force);
599 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
604 if (fr->print_force >= 0)
606 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
610 static void do_nb_verlet(t_forcerec *fr,
611 interaction_const_t *ic,
612 gmx_enerdata_t *enerd,
613 int flags, int ilocality,
616 gmx_wallcycle_t wcycle)
618 int nnbl, kernel_type, enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
620 nonbonded_verlet_group_t *nbvg;
622 if (!(flags & GMX_FORCE_NONBONDED))
624 /* skip non-bonded calculation */
628 nbvg = &fr->nbv->grp[ilocality];
630 /* CUDA kernel launch overhead is already timed separately */
631 if (fr->cutoff_scheme != ecutsVERLET)
633 gmx_incons("Invalid cut-off scheme passed!");
636 if (nbvg->kernel_type != nbnxnk8x8x8_CUDA)
638 wallcycle_sub_start(wcycle, ewcsNONBONDED);
640 switch (nbvg->kernel_type)
642 case nbnxnk4x4_PlainC:
643 nbnxn_kernel_ref(&nbvg->nbl_lists,
649 enerd->grpp.ener[egCOULSR],
651 enerd->grpp.ener[egBHAMSR] :
652 enerd->grpp.ener[egLJSR]);
655 case nbnxnk4xN_SIMD_4xN:
656 nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
663 enerd->grpp.ener[egCOULSR],
665 enerd->grpp.ener[egBHAMSR] :
666 enerd->grpp.ener[egLJSR]);
668 case nbnxnk4xN_SIMD_2xNN:
669 nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
676 enerd->grpp.ener[egCOULSR],
678 enerd->grpp.ener[egBHAMSR] :
679 enerd->grpp.ener[egLJSR]);
682 case nbnxnk8x8x8_CUDA:
683 nbnxn_cuda_launch_kernel(fr->nbv->cu_nbv, nbvg->nbat, flags, ilocality);
686 case nbnxnk8x8x8_PlainC:
687 nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
692 nbvg->nbat->out[0].f,
694 enerd->grpp.ener[egCOULSR],
696 enerd->grpp.ener[egBHAMSR] :
697 enerd->grpp.ener[egLJSR]);
701 gmx_incons("Invalid nonbonded kernel type passed!");
704 if (nbvg->kernel_type != nbnxnk8x8x8_CUDA)
706 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
709 if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
711 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
713 else if (nbvg->ewald_excl == ewaldexclTable)
715 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
719 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
721 enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
722 if (flags & GMX_FORCE_ENERGY)
724 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
725 enr_nbnxn_kernel_ljc += 1;
726 enr_nbnxn_kernel_lj += 1;
729 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
730 nbvg->nbl_lists.natpair_ljq);
731 inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
732 nbvg->nbl_lists.natpair_lj);
733 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
734 nbvg->nbl_lists.natpair_q);
737 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
738 t_inputrec *inputrec,
739 gmx_large_int_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
742 gmx_groups_t *groups,
743 matrix box, rvec x[], history_t *hist,
747 gmx_enerdata_t *enerd, t_fcdata *fcd,
748 real *lambda, t_graph *graph,
749 t_forcerec *fr, interaction_const_t *ic,
750 gmx_vsite_t *vsite, rvec mu_tot,
751 double t, FILE *field, gmx_edsam_t ed,
759 gmx_bool bSepDVDL, bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
760 gmx_bool bDoLongRange, bDoForces, bSepLRF, bUseGPU, bUseOrEmulGPU;
761 gmx_bool bDiffKernels = FALSE;
763 rvec vzero, box_diag;
765 float cycles_pme, cycles_force;
766 nonbonded_verlet_t *nbv;
770 nb_kernel_type = fr->nbv->grp[0].kernel_type;
772 start = mdatoms->start;
773 homenr = mdatoms->homenr;
775 bSepDVDL = (fr->bSepDVDL && do_per_step(step, inputrec->nstlog));
777 clear_mat(vir_force);
780 if (DOMAINDECOMP(cr))
782 cg1 = cr->dd->ncg_tot;
793 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
794 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
795 bFillGrid = (bNS && bStateChanged);
796 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
797 bDoLongRange = (fr->bTwinRange && bNS && (flags & GMX_FORCE_DO_LR));
798 bDoForces = (flags & GMX_FORCE_FORCES);
799 bSepLRF = (bDoLongRange && bDoForces && (flags & GMX_FORCE_SEPLRF));
800 bUseGPU = fr->nbv->bUseGPU;
801 bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
805 update_forcerec(fplog, fr, box);
807 if (NEED_MUTOT(*inputrec))
809 /* Calculate total (local) dipole moment in a temporary common array.
810 * This makes it possible to sum them over nodes faster.
812 calc_mu(start, homenr,
813 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
818 if (fr->ePBC != epbcNONE)
820 /* Compute shift vectors every step,
821 * because of pressure coupling or box deformation!
823 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
825 calc_shifts(box, fr->shift_vec);
830 put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
831 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
833 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
835 unshift_self(graph, box, x);
839 nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
840 fr->shift_vec, nbv->grp[0].nbat);
843 if (!(cr->duty & DUTY_PME))
845 /* Send particle coordinates to the pme nodes.
846 * Since this is only implemented for domain decomposition
847 * and domain decomposition does not use the graph,
848 * we do not need to worry about shifting.
851 wallcycle_start(wcycle, ewcPP_PMESENDX);
852 GMX_MPE_LOG(ev_send_coordinates_start);
854 bBS = (inputrec->nwall == 2);
858 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
861 gmx_pme_send_x(cr, bBS ? boxs : box, x,
862 mdatoms->nChargePerturbed, lambda[efptCOUL],
863 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)), step);
865 GMX_MPE_LOG(ev_send_coordinates_finish);
866 wallcycle_stop(wcycle, ewcPP_PMESENDX);
870 /* do gridding for pair search */
873 if (graph && bStateChanged)
875 /* Calculate intramolecular shift vectors to make molecules whole */
876 mk_mshift(fplog, graph, fr->ePBC, box, x);
880 box_diag[XX] = box[XX][XX];
881 box_diag[YY] = box[YY][YY];
882 box_diag[ZZ] = box[ZZ][ZZ];
884 wallcycle_start(wcycle, ewcNS);
887 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
888 nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
890 0, mdatoms->homenr, -1, fr->cginfo, x,
892 nbv->grp[eintLocal].kernel_type,
893 nbv->grp[eintLocal].nbat);
894 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
898 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
899 nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
901 nbv->grp[eintNonlocal].kernel_type,
902 nbv->grp[eintNonlocal].nbat);
903 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
906 if (nbv->ngrp == 1 ||
907 nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
909 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
910 nbv->nbs, mdatoms, fr->cginfo);
914 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
915 nbv->nbs, mdatoms, fr->cginfo);
916 nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
917 nbv->nbs, mdatoms, fr->cginfo);
919 wallcycle_stop(wcycle, ewcNS);
922 /* initialize the GPU atom data and copy shift vector */
927 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
928 nbnxn_cuda_init_atomdata(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
929 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
932 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
933 nbnxn_cuda_upload_shiftvec(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
934 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
937 /* do local pair search */
940 wallcycle_start_nocount(wcycle, ewcNS);
941 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
942 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
945 nbv->min_ci_balanced,
946 &nbv->grp[eintLocal].nbl_lists,
948 nbv->grp[eintLocal].kernel_type,
950 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
954 /* initialize local pair-list on the GPU */
955 nbnxn_cuda_init_pairlist(nbv->cu_nbv,
956 nbv->grp[eintLocal].nbl_lists.nbl[0],
959 wallcycle_stop(wcycle, ewcNS);
963 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
964 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
965 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
966 nbv->grp[eintLocal].nbat);
967 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
968 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
973 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
974 /* launch local nonbonded F on GPU */
975 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
977 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
980 /* Communicate coordinates and sum dipole if necessary +
981 do non-local pair search */
982 if (DOMAINDECOMP(cr))
984 bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
985 nbv->grp[eintLocal].kernel_type);
989 /* With GPU+CPU non-bonded calculations we need to copy
990 * the local coordinates to the non-local nbat struct
991 * (in CPU format) as the non-local kernel call also
992 * calculates the local - non-local interactions.
994 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
995 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
996 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
997 nbv->grp[eintNonlocal].nbat);
998 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
999 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1004 wallcycle_start_nocount(wcycle, ewcNS);
1005 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1009 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
1012 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
1015 nbv->min_ci_balanced,
1016 &nbv->grp[eintNonlocal].nbl_lists,
1018 nbv->grp[eintNonlocal].kernel_type,
1021 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1023 if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_CUDA)
1025 /* initialize non-local pair-list on the GPU */
1026 nbnxn_cuda_init_pairlist(nbv->cu_nbv,
1027 nbv->grp[eintNonlocal].nbl_lists.nbl[0],
1030 wallcycle_stop(wcycle, ewcNS);
1034 wallcycle_start(wcycle, ewcMOVEX);
1035 dd_move_x(cr->dd, box, x);
1037 /* When we don't need the total dipole we sum it in global_stat */
1038 if (bStateChanged && NEED_MUTOT(*inputrec))
1040 gmx_sumd(2*DIM, mu, cr);
1042 wallcycle_stop(wcycle, ewcMOVEX);
1044 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1045 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1046 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
1047 nbv->grp[eintNonlocal].nbat);
1048 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1049 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1052 if (bUseGPU && !bDiffKernels)
1054 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1055 /* launch non-local nonbonded F on GPU */
1056 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
1058 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1064 /* launch D2H copy-back F */
1065 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1066 if (DOMAINDECOMP(cr) && !bDiffKernels)
1068 nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintNonlocal].nbat,
1069 flags, eatNonlocal);
1071 nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintLocal].nbat,
1073 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1076 if (bStateChanged && NEED_MUTOT(*inputrec))
1080 gmx_sumd(2*DIM, mu, cr);
1083 for (i = 0; i < 2; i++)
1085 for (j = 0; j < DIM; j++)
1087 fr->mu_tot[i][j] = mu[i*DIM + j];
1091 if (fr->efep == efepNO)
1093 copy_rvec(fr->mu_tot[0], mu_tot);
1097 for (j = 0; j < DIM; j++)
1100 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1101 lambda[efptCOUL]*fr->mu_tot[1][j];
1105 /* Reset energies */
1106 reset_enerdata(&(inputrec->opts), fr, bNS, enerd, MASTER(cr));
1107 clear_rvecs(SHIFTS, fr->fshift);
1109 if (DOMAINDECOMP(cr))
1111 if (!(cr->duty & DUTY_PME))
1113 wallcycle_start(wcycle, ewcPPDURINGPME);
1114 dd_force_flop_start(cr->dd, nrnb);
1120 /* Enforced rotation has its own cycle counter that starts after the collective
1121 * coordinates have been communicated. It is added to ddCyclF to allow
1122 * for proper load-balancing */
1123 wallcycle_start(wcycle, ewcROT);
1124 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1125 wallcycle_stop(wcycle, ewcROT);
1128 /* Start the force cycle counter.
1129 * This counter is stopped in do_forcelow_level.
1130 * No parallel communication should occur while this counter is running,
1131 * since that will interfere with the dynamic load balancing.
1133 wallcycle_start(wcycle, ewcFORCE);
1136 /* Reset forces for which the virial is calculated separately:
1137 * PME/Ewald forces if necessary */
1138 if (fr->bF_NoVirSum)
1140 if (flags & GMX_FORCE_VIRIAL)
1142 fr->f_novirsum = fr->f_novirsum_alloc;
1143 GMX_BARRIER(cr->mpi_comm_mygroup);
1146 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1150 clear_rvecs(homenr, fr->f_novirsum+start);
1152 GMX_BARRIER(cr->mpi_comm_mygroup);
1156 /* We are not calculating the pressure so we do not need
1157 * a separate array for forces that do not contribute
1164 /* Clear the short- and long-range forces */
1165 clear_rvecs(fr->natoms_force_constr, f);
1166 if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1168 clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1171 clear_rvec(fr->vir_diag_posres);
1173 GMX_BARRIER(cr->mpi_comm_mygroup);
1176 if (inputrec->ePull == epullCONSTRAINT)
1178 clear_pull_forces(inputrec->pull);
1181 /* We calculate the non-bonded forces, when done on the CPU, here.
1182 * We do this before calling do_force_lowlevel, as in there bondeds
1183 * forces are calculated before PME, which does communication.
1184 * With this order, non-bonded and bonded force calculation imbalance
1185 * can be balanced out by the domain decomposition load balancing.
1190 /* Maybe we should move this into do_force_lowlevel */
1191 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1195 if (!bUseOrEmulGPU || bDiffKernels)
1199 if (DOMAINDECOMP(cr))
1201 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1202 bDiffKernels ? enbvClearFYes : enbvClearFNo,
1212 aloc = eintNonlocal;
1215 /* Add all the non-bonded force to the normal force array.
1216 * This can be split into a local a non-local part when overlapping
1217 * communication with calculation with domain decomposition.
1219 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1220 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1221 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1222 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1223 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1224 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1225 wallcycle_start_nocount(wcycle, ewcFORCE);
1227 /* if there are multiple fshift output buffers reduce them */
1228 if ((flags & GMX_FORCE_VIRIAL) &&
1229 nbv->grp[aloc].nbl_lists.nnbl > 1)
1231 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1236 /* update QMMMrec, if necessary */
1239 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1242 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1244 posres_wrapper(fplog, flags, bSepDVDL, inputrec, nrnb, top, box, x,
1245 f, enerd, lambda, fr);
1248 /* Compute the bonded and non-bonded energies and optionally forces */
1249 do_force_lowlevel(fplog, step, fr, inputrec, &(top->idef),
1250 cr, nrnb, wcycle, mdatoms, &(inputrec->opts),
1251 x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, mtop, top, fr->born,
1252 &(top->atomtypes), bBornRadii, box,
1253 inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1254 flags, &cycles_pme);
1258 if (do_per_step(step, inputrec->nstcalclr))
1260 /* Add the long range forces to the short range forces */
1261 for (i = 0; i < fr->natoms_force_constr; i++)
1263 rvec_add(fr->f_twin[i], f[i], f[i]);
1268 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1269 GMX_BARRIER(cr->mpi_comm_mygroup);
1273 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1276 if (bUseOrEmulGPU && !bDiffKernels)
1278 /* wait for non-local forces (or calculate in emulation mode) */
1279 if (DOMAINDECOMP(cr))
1283 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1284 nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1285 nbv->grp[eintNonlocal].nbat,
1287 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1289 cycles_force += wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1293 wallcycle_start_nocount(wcycle, ewcFORCE);
1294 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1296 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1298 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1299 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1300 /* skip the reduction if there was no non-local work to do */
1301 if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1303 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1304 nbv->grp[eintNonlocal].nbat, f);
1306 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1307 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1313 /* Communicate the forces */
1316 wallcycle_start(wcycle, ewcMOVEF);
1317 if (DOMAINDECOMP(cr))
1319 dd_move_f(cr->dd, f, fr->fshift);
1320 /* Do we need to communicate the separate force array
1321 * for terms that do not contribute to the single sum virial?
1322 * Position restraints and electric fields do not introduce
1323 * inter-cg forces, only full electrostatics methods do.
1324 * When we do not calculate the virial, fr->f_novirsum = f,
1325 * so we have already communicated these forces.
1327 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1328 (flags & GMX_FORCE_VIRIAL))
1330 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1334 /* We should not update the shift forces here,
1335 * since f_twin is already included in f.
1337 dd_move_f(cr->dd, fr->f_twin, NULL);
1340 wallcycle_stop(wcycle, ewcMOVEF);
1346 /* wait for local forces (or calculate in emulation mode) */
1349 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1350 nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1351 nbv->grp[eintLocal].nbat,
1353 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1355 wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1357 /* now clear the GPU outputs while we finish the step on the CPU */
1359 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1360 nbnxn_cuda_clear_outputs(nbv->cu_nbv, flags);
1361 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1365 wallcycle_start_nocount(wcycle, ewcFORCE);
1366 do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1367 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1369 wallcycle_stop(wcycle, ewcFORCE);
1371 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1372 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1373 if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1375 /* skip the reduction if there was no non-local work to do */
1376 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1377 nbv->grp[eintLocal].nbat, f);
1379 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1380 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1383 if (DOMAINDECOMP(cr))
1385 dd_force_flop_stop(cr->dd, nrnb);
1388 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1394 if (IR_ELEC_FIELD(*inputrec))
1396 /* Compute forces due to electric field */
1397 calc_f_el(MASTER(cr) ? field : NULL,
1398 start, homenr, mdatoms->chargeA, x, fr->f_novirsum,
1399 inputrec->ex, inputrec->et, t);
1402 /* If we have NoVirSum forces, but we do not calculate the virial,
1403 * we sum fr->f_novirum=f later.
1405 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1407 wallcycle_start(wcycle, ewcVSITESPREAD);
1408 spread_vsite_f(fplog, vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1409 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1410 wallcycle_stop(wcycle, ewcVSITESPREAD);
1414 wallcycle_start(wcycle, ewcVSITESPREAD);
1415 spread_vsite_f(fplog, vsite, x, fr->f_twin, NULL, FALSE, NULL,
1417 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1418 wallcycle_stop(wcycle, ewcVSITESPREAD);
1422 if (flags & GMX_FORCE_VIRIAL)
1424 /* Calculation of the virial must be done after vsites! */
1425 calc_virial(fplog, mdatoms->start, mdatoms->homenr, x, f,
1426 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1430 if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1432 pull_potential_wrapper(fplog, bSepDVDL, cr, inputrec, box, x,
1433 f, vir_force, mdatoms, enerd, lambda, t);
1436 /* Add the forces from enforced rotation potentials (if any) */
1439 wallcycle_start(wcycle, ewcROTadd);
1440 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1441 wallcycle_stop(wcycle, ewcROTadd);
1444 if (PAR(cr) && !(cr->duty & DUTY_PME))
1446 /* In case of node-splitting, the PP nodes receive the long-range
1447 * forces, virial and energy from the PME nodes here.
1449 pme_receive_force_ener(fplog, bSepDVDL, cr, wcycle, enerd, fr);
1454 post_process_forces(fplog, cr, step, nrnb, wcycle,
1455 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1459 /* Sum the potential energy terms from group contributions */
1460 sum_epot(&(inputrec->opts), &(enerd->grpp), enerd->term);
1463 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1464 t_inputrec *inputrec,
1465 gmx_large_int_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1466 gmx_localtop_t *top,
1468 gmx_groups_t *groups,
1469 matrix box, rvec x[], history_t *hist,
1473 gmx_enerdata_t *enerd, t_fcdata *fcd,
1474 real *lambda, t_graph *graph,
1475 t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1476 double t, FILE *field, gmx_edsam_t ed,
1477 gmx_bool bBornRadii,
1483 gmx_bool bSepDVDL, bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
1484 gmx_bool bDoLongRangeNS, bDoForces, bDoPotential, bSepLRF;
1485 gmx_bool bDoAdressWF;
1487 rvec vzero, box_diag;
1488 real e, v, dvdlambda[efptNR];
1490 float cycles_pme, cycles_force;
1492 start = mdatoms->start;
1493 homenr = mdatoms->homenr;
1495 bSepDVDL = (fr->bSepDVDL && do_per_step(step, inputrec->nstlog));
1497 clear_mat(vir_force);
1501 pd_cg_range(cr, &cg0, &cg1);
1506 if (DOMAINDECOMP(cr))
1508 cg1 = cr->dd->ncg_tot;
1520 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
1521 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1522 /* Should we update the long-range neighborlists at this step? */
1523 bDoLongRangeNS = fr->bTwinRange && bNS;
1524 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1525 bFillGrid = (bNS && bStateChanged);
1526 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
1527 bDoForces = (flags & GMX_FORCE_FORCES);
1528 bDoPotential = (flags & GMX_FORCE_ENERGY);
1529 bSepLRF = ((inputrec->nstcalclr > 1) && bDoForces &&
1530 (flags & GMX_FORCE_SEPLRF) && (flags & GMX_FORCE_DO_LR));
1532 /* should probably move this to the forcerec since it doesn't change */
1533 bDoAdressWF = ((fr->adress_type != eAdressOff));
1537 update_forcerec(fplog, fr, box);
1539 if (NEED_MUTOT(*inputrec))
1541 /* Calculate total (local) dipole moment in a temporary common array.
1542 * This makes it possible to sum them over nodes faster.
1544 calc_mu(start, homenr,
1545 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1550 if (fr->ePBC != epbcNONE)
1552 /* Compute shift vectors every step,
1553 * because of pressure coupling or box deformation!
1555 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1557 calc_shifts(box, fr->shift_vec);
1562 put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1563 &(top->cgs), x, fr->cg_cm);
1564 inc_nrnb(nrnb, eNR_CGCM, homenr);
1565 inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1567 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1569 unshift_self(graph, box, x);
1574 calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1575 inc_nrnb(nrnb, eNR_CGCM, homenr);
1582 move_cgcm(fplog, cr, fr->cg_cm);
1586 pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1591 if (!(cr->duty & DUTY_PME))
1593 /* Send particle coordinates to the pme nodes.
1594 * Since this is only implemented for domain decomposition
1595 * and domain decomposition does not use the graph,
1596 * we do not need to worry about shifting.
1599 wallcycle_start(wcycle, ewcPP_PMESENDX);
1600 GMX_MPE_LOG(ev_send_coordinates_start);
1602 bBS = (inputrec->nwall == 2);
1605 copy_mat(box, boxs);
1606 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1609 gmx_pme_send_x(cr, bBS ? boxs : box, x,
1610 mdatoms->nChargePerturbed, lambda[efptCOUL],
1611 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)), step);
1613 GMX_MPE_LOG(ev_send_coordinates_finish);
1614 wallcycle_stop(wcycle, ewcPP_PMESENDX);
1616 #endif /* GMX_MPI */
1618 /* Communicate coordinates and sum dipole if necessary */
1621 wallcycle_start(wcycle, ewcMOVEX);
1622 if (DOMAINDECOMP(cr))
1624 dd_move_x(cr->dd, box, x);
1628 move_x(fplog, cr, GMX_LEFT, GMX_RIGHT, x, nrnb);
1630 wallcycle_stop(wcycle, ewcMOVEX);
1633 /* update adress weight beforehand */
1634 if (bStateChanged && bDoAdressWF)
1636 /* need pbc for adress weight calculation with pbc_dx */
1637 set_pbc(&pbc, inputrec->ePBC, box);
1638 if (fr->adress_site == eAdressSITEcog)
1640 update_adress_weights_cog(top->idef.iparams, top->idef.il, x, fr, mdatoms,
1641 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1643 else if (fr->adress_site == eAdressSITEcom)
1645 update_adress_weights_com(fplog, cg0, cg1, &(top->cgs), x, fr, mdatoms,
1646 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1648 else if (fr->adress_site == eAdressSITEatomatom)
1650 update_adress_weights_atom_per_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1651 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1655 update_adress_weights_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1656 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1660 if (NEED_MUTOT(*inputrec))
1667 gmx_sumd(2*DIM, mu, cr);
1669 for (i = 0; i < 2; i++)
1671 for (j = 0; j < DIM; j++)
1673 fr->mu_tot[i][j] = mu[i*DIM + j];
1677 if (fr->efep == efepNO)
1679 copy_rvec(fr->mu_tot[0], mu_tot);
1683 for (j = 0; j < DIM; j++)
1686 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1691 /* Reset energies */
1692 reset_enerdata(&(inputrec->opts), fr, bNS, enerd, MASTER(cr));
1693 clear_rvecs(SHIFTS, fr->fshift);
1697 wallcycle_start(wcycle, ewcNS);
1699 if (graph && bStateChanged)
1701 /* Calculate intramolecular shift vectors to make molecules whole */
1702 mk_mshift(fplog, graph, fr->ePBC, box, x);
1705 /* Do the actual neighbour searching and if twin range electrostatics
1706 * also do the calculation of long range forces and energies.
1708 for (i = 0; i < efptNR; i++)
1712 ns(fplog, fr, x, box,
1713 groups, &(inputrec->opts), top, mdatoms,
1714 cr, nrnb, lambda, dvdlambda, &enerd->grpp, bFillGrid,
1718 fprintf(fplog, sepdvdlformat, "LR non-bonded", 0.0, dvdlambda);
1720 enerd->dvdl_lin[efptVDW] += dvdlambda[efptVDW];
1721 enerd->dvdl_lin[efptCOUL] += dvdlambda[efptCOUL];
1723 wallcycle_stop(wcycle, ewcNS);
1726 if (inputrec->implicit_solvent && bNS)
1728 make_gb_nblist(cr, inputrec->gb_algorithm, inputrec->rlist,
1729 x, box, fr, &top->idef, graph, fr->born);
1732 if (DOMAINDECOMP(cr))
1734 if (!(cr->duty & DUTY_PME))
1736 wallcycle_start(wcycle, ewcPPDURINGPME);
1737 dd_force_flop_start(cr->dd, nrnb);
1743 /* Enforced rotation has its own cycle counter that starts after the collective
1744 * coordinates have been communicated. It is added to ddCyclF to allow
1745 * for proper load-balancing */
1746 wallcycle_start(wcycle, ewcROT);
1747 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1748 wallcycle_stop(wcycle, ewcROT);
1751 /* Start the force cycle counter.
1752 * This counter is stopped in do_forcelow_level.
1753 * No parallel communication should occur while this counter is running,
1754 * since that will interfere with the dynamic load balancing.
1756 wallcycle_start(wcycle, ewcFORCE);
1760 /* Reset forces for which the virial is calculated separately:
1761 * PME/Ewald forces if necessary */
1762 if (fr->bF_NoVirSum)
1764 if (flags & GMX_FORCE_VIRIAL)
1766 fr->f_novirsum = fr->f_novirsum_alloc;
1767 GMX_BARRIER(cr->mpi_comm_mygroup);
1770 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1774 clear_rvecs(homenr, fr->f_novirsum+start);
1776 GMX_BARRIER(cr->mpi_comm_mygroup);
1780 /* We are not calculating the pressure so we do not need
1781 * a separate array for forces that do not contribute
1788 /* Clear the short- and long-range forces */
1789 clear_rvecs(fr->natoms_force_constr, f);
1790 if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1792 clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1795 clear_rvec(fr->vir_diag_posres);
1797 GMX_BARRIER(cr->mpi_comm_mygroup);
1799 if (inputrec->ePull == epullCONSTRAINT)
1801 clear_pull_forces(inputrec->pull);
1804 /* update QMMMrec, if necessary */
1807 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1810 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1812 posres_wrapper(fplog, flags, bSepDVDL, inputrec, nrnb, top, box, x,
1813 f, enerd, lambda, fr);
1816 /* Compute the bonded and non-bonded energies and optionally forces */
1817 do_force_lowlevel(fplog, step, fr, inputrec, &(top->idef),
1818 cr, nrnb, wcycle, mdatoms, &(inputrec->opts),
1819 x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, mtop, top, fr->born,
1820 &(top->atomtypes), bBornRadii, box,
1821 inputrec->fepvals, lambda,
1822 graph, &(top->excls), fr->mu_tot,
1828 if (do_per_step(step, inputrec->nstcalclr))
1830 /* Add the long range forces to the short range forces */
1831 for (i = 0; i < fr->natoms_force_constr; i++)
1833 rvec_add(fr->f_twin[i], f[i], f[i]);
1838 cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1839 GMX_BARRIER(cr->mpi_comm_mygroup);
1843 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1846 if (DOMAINDECOMP(cr))
1848 dd_force_flop_stop(cr->dd, nrnb);
1851 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1857 if (IR_ELEC_FIELD(*inputrec))
1859 /* Compute forces due to electric field */
1860 calc_f_el(MASTER(cr) ? field : NULL,
1861 start, homenr, mdatoms->chargeA, x, fr->f_novirsum,
1862 inputrec->ex, inputrec->et, t);
1865 if (bDoAdressWF && fr->adress_icor == eAdressICThermoForce)
1867 /* Compute thermodynamic force in hybrid AdResS region */
1868 adress_thermo_force(start, homenr, &(top->cgs), x, fr->f_novirsum, fr, mdatoms,
1869 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1872 /* Communicate the forces */
1875 wallcycle_start(wcycle, ewcMOVEF);
1876 if (DOMAINDECOMP(cr))
1878 dd_move_f(cr->dd, f, fr->fshift);
1879 /* Do we need to communicate the separate force array
1880 * for terms that do not contribute to the single sum virial?
1881 * Position restraints and electric fields do not introduce
1882 * inter-cg forces, only full electrostatics methods do.
1883 * When we do not calculate the virial, fr->f_novirsum = f,
1884 * so we have already communicated these forces.
1886 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1887 (flags & GMX_FORCE_VIRIAL))
1889 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1893 /* We should not update the shift forces here,
1894 * since f_twin is already included in f.
1896 dd_move_f(cr->dd, fr->f_twin, NULL);
1901 pd_move_f(cr, f, nrnb);
1904 pd_move_f(cr, fr->f_twin, nrnb);
1907 wallcycle_stop(wcycle, ewcMOVEF);
1910 /* If we have NoVirSum forces, but we do not calculate the virial,
1911 * we sum fr->f_novirum=f later.
1913 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1915 wallcycle_start(wcycle, ewcVSITESPREAD);
1916 spread_vsite_f(fplog, vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1917 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1918 wallcycle_stop(wcycle, ewcVSITESPREAD);
1922 wallcycle_start(wcycle, ewcVSITESPREAD);
1923 spread_vsite_f(fplog, vsite, x, fr->f_twin, NULL, FALSE, NULL,
1925 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1926 wallcycle_stop(wcycle, ewcVSITESPREAD);
1930 if (flags & GMX_FORCE_VIRIAL)
1932 /* Calculation of the virial must be done after vsites! */
1933 calc_virial(fplog, mdatoms->start, mdatoms->homenr, x, f,
1934 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1938 if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1940 pull_potential_wrapper(fplog, bSepDVDL, cr, inputrec, box, x,
1941 f, vir_force, mdatoms, enerd, lambda, t);
1944 /* Add the forces from enforced rotation potentials (if any) */
1947 wallcycle_start(wcycle, ewcROTadd);
1948 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1949 wallcycle_stop(wcycle, ewcROTadd);
1952 if (PAR(cr) && !(cr->duty & DUTY_PME))
1954 /* In case of node-splitting, the PP nodes receive the long-range
1955 * forces, virial and energy from the PME nodes here.
1957 pme_receive_force_ener(fplog, bSepDVDL, cr, wcycle, enerd, fr);
1962 post_process_forces(fplog, cr, step, nrnb, wcycle,
1963 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1967 /* Sum the potential energy terms from group contributions */
1968 sum_epot(&(inputrec->opts), &(enerd->grpp), enerd->term);
1971 void do_force(FILE *fplog, t_commrec *cr,
1972 t_inputrec *inputrec,
1973 gmx_large_int_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1974 gmx_localtop_t *top,
1976 gmx_groups_t *groups,
1977 matrix box, rvec x[], history_t *hist,
1981 gmx_enerdata_t *enerd, t_fcdata *fcd,
1982 real *lambda, t_graph *graph,
1984 gmx_vsite_t *vsite, rvec mu_tot,
1985 double t, FILE *field, gmx_edsam_t ed,
1986 gmx_bool bBornRadii,
1989 /* modify force flag if not doing nonbonded */
1990 if (!fr->bNonbonded)
1992 flags &= ~GMX_FORCE_NONBONDED;
1995 switch (inputrec->cutoff_scheme)
1998 do_force_cutsVERLET(fplog, cr, inputrec,
2014 do_force_cutsGROUP(fplog, cr, inputrec,
2029 gmx_incons("Invalid cut-off scheme passed!");
2034 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
2035 t_inputrec *ir, t_mdatoms *md,
2036 t_state *state, rvec *f,
2037 t_graph *graph, t_commrec *cr, t_nrnb *nrnb,
2038 t_forcerec *fr, gmx_localtop_t *top, tensor shake_vir)
2040 int i, m, start, end;
2041 gmx_large_int_t step;
2042 real dt = ir->delta_t;
2046 snew(savex, state->natoms);
2049 end = md->homenr + start;
2053 fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
2054 start, md->homenr, end);
2056 /* Do a first constrain to reset particles... */
2057 step = ir->init_step;
2060 char buf[STEPSTRSIZE];
2061 fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
2062 gmx_step_str(step, buf));
2066 /* constrain the current position */
2067 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2068 ir, NULL, cr, step, 0, md,
2069 state->x, state->x, NULL,
2070 fr->bMolPBC, state->box,
2071 state->lambda[efptBONDED], &dvdl_dum,
2072 NULL, NULL, nrnb, econqCoord,
2073 ir->epc == epcMTTK, state->veta, state->veta);
2076 /* constrain the inital velocity, and save it */
2077 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
2078 /* might not yet treat veta correctly */
2079 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2080 ir, NULL, cr, step, 0, md,
2081 state->x, state->v, state->v,
2082 fr->bMolPBC, state->box,
2083 state->lambda[efptBONDED], &dvdl_dum,
2084 NULL, NULL, nrnb, econqVeloc,
2085 ir->epc == epcMTTK, state->veta, state->veta);
2087 /* constrain the inital velocities at t-dt/2 */
2088 if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
2090 for (i = start; (i < end); i++)
2092 for (m = 0; (m < DIM); m++)
2094 /* Reverse the velocity */
2095 state->v[i][m] = -state->v[i][m];
2096 /* Store the position at t-dt in buf */
2097 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
2100 /* Shake the positions at t=-dt with the positions at t=0
2101 * as reference coordinates.
2105 char buf[STEPSTRSIZE];
2106 fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
2107 gmx_step_str(step, buf));
2110 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2111 ir, NULL, cr, step, -1, md,
2112 state->x, savex, NULL,
2113 fr->bMolPBC, state->box,
2114 state->lambda[efptBONDED], &dvdl_dum,
2115 state->v, NULL, nrnb, econqCoord,
2116 ir->epc == epcMTTK, state->veta, state->veta);
2118 for (i = start; i < end; i++)
2120 for (m = 0; m < DIM; m++)
2122 /* Re-reverse the velocities */
2123 state->v[i][m] = -state->v[i][m];
2130 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2132 double eners[2], virs[2], enersum, virsum, y0, f, g, h;
2133 double r0, r1, r, rc3, rc9, ea, eb, ec, pa, pb, pc, pd;
2134 double invscale, invscale2, invscale3;
2135 int ri0, ri1, ri, i, offstart, offset;
2136 real scale, *vdwtab, tabfactor, tmp;
2138 fr->enershiftsix = 0;
2139 fr->enershifttwelve = 0;
2140 fr->enerdiffsix = 0;
2141 fr->enerdifftwelve = 0;
2143 fr->virdifftwelve = 0;
2145 if (eDispCorr != edispcNO)
2147 for (i = 0; i < 2; i++)
2152 if ((fr->vdwtype == evdwSWITCH) || (fr->vdwtype == evdwSHIFT))
2154 if (fr->rvdw_switch == 0)
2157 "With dispersion correction rvdw-switch can not be zero "
2158 "for vdw-type = %s", evdw_names[fr->vdwtype]);
2161 scale = fr->nblists[0].table_elec_vdw.scale;
2162 vdwtab = fr->nblists[0].table_vdw.data;
2164 /* Round the cut-offs to exact table values for precision */
2165 ri0 = floor(fr->rvdw_switch*scale);
2166 ri1 = ceil(fr->rvdw*scale);
2172 if (fr->vdwtype == evdwSHIFT)
2174 /* Determine the constant energy shift below rvdw_switch.
2175 * Table has a scale factor since we have scaled it down to compensate
2176 * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2178 fr->enershiftsix = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2179 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2181 /* Add the constant part from 0 to rvdw_switch.
2182 * This integration from 0 to rvdw_switch overcounts the number
2183 * of interactions by 1, as it also counts the self interaction.
2184 * We will correct for this later.
2186 eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2187 eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2189 invscale = 1.0/(scale);
2190 invscale2 = invscale*invscale;
2191 invscale3 = invscale*invscale2;
2193 /* following summation derived from cubic spline definition,
2194 Numerical Recipies in C, second edition, p. 113-116. Exact
2195 for the cubic spline. We first calculate the negative of
2196 the energy from rvdw to rvdw_switch, assuming that g(r)=1,
2197 and then add the more standard, abrupt cutoff correction to
2198 that result, yielding the long-range correction for a
2199 switched function. We perform both the pressure and energy
2200 loops at the same time for simplicity, as the computational
2203 for (i = 0; i < 2; i++)
2205 enersum = 0.0; virsum = 0.0;
2209 /* Since the dispersion table has been scaled down a factor 6.0 and the repulsion
2210 * a factor 12.0 to compensate for the c6/c12 parameters inside nbfp[] being scaled
2211 * up (to save flops in kernels), we need to correct for this.
2220 for (ri = ri0; ri < ri1; ri++)
2224 eb = 2.0*invscale2*r;
2228 pb = 3.0*invscale2*r;
2229 pc = 3.0*invscale*r*r;
2232 /* this "8" is from the packing in the vdwtab array - perhaps should be #define'ed? */
2233 offset = 8*ri + offstart;
2234 y0 = vdwtab[offset];
2235 f = vdwtab[offset+1];
2236 g = vdwtab[offset+2];
2237 h = vdwtab[offset+3];
2239 enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2240 virsum += f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2243 enersum *= 4.0*M_PI*tabfactor;
2244 virsum *= 4.0*M_PI*tabfactor;
2245 eners[i] -= enersum;
2249 /* now add the correction for rvdw_switch to infinity */
2250 eners[0] += -4.0*M_PI/(3.0*rc3);
2251 eners[1] += 4.0*M_PI/(9.0*rc9);
2252 virs[0] += 8.0*M_PI/rc3;
2253 virs[1] += -16.0*M_PI/(3.0*rc9);
2255 else if ((fr->vdwtype == evdwCUT) || (fr->vdwtype == evdwUSER))
2257 if (fr->vdwtype == evdwUSER && fplog)
2260 "WARNING: using dispersion correction with user tables\n");
2262 rc3 = fr->rvdw*fr->rvdw*fr->rvdw;
2264 /* Contribution beyond the cut-off */
2265 eners[0] += -4.0*M_PI/(3.0*rc3);
2266 eners[1] += 4.0*M_PI/(9.0*rc9);
2267 if (fr->vdw_modifier == eintmodPOTSHIFT)
2269 /* Contribution within the cut-off */
2270 eners[0] += -4.0*M_PI/(3.0*rc3);
2271 eners[1] += 4.0*M_PI/(3.0*rc9);
2273 /* Contribution beyond the cut-off */
2274 virs[0] += 8.0*M_PI/rc3;
2275 virs[1] += -16.0*M_PI/(3.0*rc9);
2280 "Dispersion correction is not implemented for vdw-type = %s",
2281 evdw_names[fr->vdwtype]);
2283 fr->enerdiffsix = eners[0];
2284 fr->enerdifftwelve = eners[1];
2285 /* The 0.5 is due to the Gromacs definition of the virial */
2286 fr->virdiffsix = 0.5*virs[0];
2287 fr->virdifftwelve = 0.5*virs[1];
2291 void calc_dispcorr(FILE *fplog, t_inputrec *ir, t_forcerec *fr,
2292 gmx_large_int_t step, int natoms,
2293 matrix box, real lambda, tensor pres, tensor virial,
2294 real *prescorr, real *enercorr, real *dvdlcorr)
2296 gmx_bool bCorrAll, bCorrPres;
2297 real dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2307 if (ir->eDispCorr != edispcNO)
2309 bCorrAll = (ir->eDispCorr == edispcAllEner ||
2310 ir->eDispCorr == edispcAllEnerPres);
2311 bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2312 ir->eDispCorr == edispcAllEnerPres);
2314 invvol = 1/det(box);
2317 /* Only correct for the interactions with the inserted molecule */
2318 dens = (natoms - fr->n_tpi)*invvol;
2323 dens = natoms*invvol;
2324 ninter = 0.5*natoms;
2327 if (ir->efep == efepNO)
2329 avcsix = fr->avcsix[0];
2330 avctwelve = fr->avctwelve[0];
2334 avcsix = (1 - lambda)*fr->avcsix[0] + lambda*fr->avcsix[1];
2335 avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2338 enerdiff = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2339 *enercorr += avcsix*enerdiff;
2341 if (ir->efep != efepNO)
2343 dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2347 enerdiff = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2348 *enercorr += avctwelve*enerdiff;
2349 if (fr->efep != efepNO)
2351 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2357 svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2358 if (ir->eDispCorr == edispcAllEnerPres)
2360 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2362 /* The factor 2 is because of the Gromacs virial definition */
2363 spres = -2.0*invvol*svir*PRESFAC;
2365 for (m = 0; m < DIM; m++)
2367 virial[m][m] += svir;
2368 pres[m][m] += spres;
2373 /* Can't currently control when it prints, for now, just print when degugging */
2378 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2384 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2385 *enercorr, spres, svir);
2389 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2393 if (fr->bSepDVDL && do_per_step(step, ir->nstlog))
2395 fprintf(fplog, sepdvdlformat, "Dispersion correction",
2396 *enercorr, dvdlambda);
2398 if (fr->efep != efepNO)
2400 *dvdlcorr += dvdlambda;
2405 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2406 t_graph *graph, rvec x[])
2410 fprintf(fplog, "Removing pbc first time\n");
2412 calc_shifts(box, fr->shift_vec);
2415 mk_mshift(fplog, graph, fr->ePBC, box, x);
2418 p_graph(debug, "do_pbc_first 1", graph);
2420 shift_self(graph, box, x);
2421 /* By doing an extra mk_mshift the molecules that are broken
2422 * because they were e.g. imported from another software
2423 * will be made whole again. Such are the healing powers
2426 mk_mshift(fplog, graph, fr->ePBC, box, x);
2429 p_graph(debug, "do_pbc_first 2", graph);
2434 fprintf(fplog, "Done rmpbc\n");
2438 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2439 gmx_mtop_t *mtop, rvec x[],
2444 gmx_molblock_t *molb;
2446 if (bFirst && fplog)
2448 fprintf(fplog, "Removing pbc first time\n");
2453 for (mb = 0; mb < mtop->nmolblock; mb++)
2455 molb = &mtop->molblock[mb];
2456 if (molb->natoms_mol == 1 ||
2457 (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2459 /* Just one atom or charge group in the molecule, no PBC required */
2460 as += molb->nmol*molb->natoms_mol;
2464 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2465 mk_graph_ilist(NULL, mtop->moltype[molb->type].ilist,
2466 0, molb->natoms_mol, FALSE, FALSE, graph);
2468 for (mol = 0; mol < molb->nmol; mol++)
2470 mk_mshift(fplog, graph, ePBC, box, x+as);
2472 shift_self(graph, box, x+as);
2473 /* The molecule is whole now.
2474 * We don't need the second mk_mshift call as in do_pbc_first,
2475 * since we no longer need this graph.
2478 as += molb->natoms_mol;
2486 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2487 gmx_mtop_t *mtop, rvec x[])
2489 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2492 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2493 gmx_mtop_t *mtop, rvec x[])
2495 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2498 void finish_run(FILE *fplog, t_commrec *cr, const char *confout,
2499 t_inputrec *inputrec,
2500 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2501 gmx_runtime_t *runtime,
2502 wallclock_gpu_t *gputimes,
2504 gmx_bool bWriteStat)
2507 t_nrnb *nrnb_tot = NULL;
2511 wallcycle_sum(cr, wcycle);
2517 MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2518 cr->mpi_comm_mysim);
2526 #if defined(GMX_MPI) && !defined(GMX_THREAD_MPI)
2529 /* reduce nodetime over all MPI processes in the current simulation */
2531 MPI_Allreduce(&runtime->proctime, &sum, 1, MPI_DOUBLE, MPI_SUM,
2532 cr->mpi_comm_mysim);
2533 runtime->proctime = sum;
2539 print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2546 if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2548 print_dd_statistics(cr, inputrec, fplog);
2560 snew(nrnb_all, cr->nnodes);
2561 nrnb_all[0] = *nrnb;
2562 for (s = 1; s < cr->nnodes; s++)
2564 MPI_Recv(nrnb_all[s].n, eNRNB, MPI_DOUBLE, s, 0,
2565 cr->mpi_comm_mysim, &stat);
2567 pr_load(fplog, cr, nrnb_all);
2572 MPI_Send(nrnb->n, eNRNB, MPI_DOUBLE, MASTERRANK(cr), 0,
2573 cr->mpi_comm_mysim);
2580 wallcycle_print(fplog, cr->nnodes, cr->npmenodes, runtime->realtime,
2583 if (EI_DYNAMICS(inputrec->eI))
2585 delta_t = inputrec->delta_t;
2594 print_perf(fplog, runtime->proctime, runtime->realtime,
2595 cr->nnodes-cr->npmenodes,
2596 runtime->nsteps_done, delta_t, nbfs, mflop,
2601 print_perf(stderr, runtime->proctime, runtime->realtime,
2602 cr->nnodes-cr->npmenodes,
2603 runtime->nsteps_done, delta_t, nbfs, mflop,
2609 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, real *lambda, double *lam0)
2611 /* this function works, but could probably use a logic rewrite to keep all the different
2612 types of efep straight. */
2615 t_lambda *fep = ir->fepvals;
2617 if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2619 for (i = 0; i < efptNR; i++)
2631 *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
2632 if checkpoint is set -- a kludge is in for now
2634 for (i = 0; i < efptNR; i++)
2636 /* overwrite lambda state with init_lambda for now for backwards compatibility */
2637 if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2639 lambda[i] = fep->init_lambda;
2642 lam0[i] = lambda[i];
2647 lambda[i] = fep->all_lambda[i][*fep_state];
2650 lam0[i] = lambda[i];
2656 /* need to rescale control temperatures to match current state */
2657 for (i = 0; i < ir->opts.ngtc; i++)
2659 if (ir->opts.ref_t[i] > 0)
2661 ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2667 /* Send to the log the information on the current lambdas */
2670 fprintf(fplog, "Initial vector of lambda components:[ ");
2671 for (i = 0; i < efptNR; i++)
2673 fprintf(fplog, "%10.4f ", lambda[i]);
2675 fprintf(fplog, "]\n");
2681 void init_md(FILE *fplog,
2682 t_commrec *cr, t_inputrec *ir, const output_env_t oenv,
2683 double *t, double *t0,
2684 real *lambda, int *fep_state, double *lam0,
2685 t_nrnb *nrnb, gmx_mtop_t *mtop,
2687 int nfile, const t_filenm fnm[],
2688 gmx_mdoutf_t **outf, t_mdebin **mdebin,
2689 tensor force_vir, tensor shake_vir, rvec mu_tot,
2690 gmx_bool *bSimAnn, t_vcm **vcm, t_state *state, unsigned long Flags)
2695 /* Initial values */
2696 *t = *t0 = ir->init_t;
2699 for (i = 0; i < ir->opts.ngtc; i++)
2701 /* set bSimAnn if any group is being annealed */
2702 if (ir->opts.annealing[i] != eannNO)
2709 update_annealing_target_temp(&(ir->opts), ir->init_t);
2712 /* Initialize lambda variables */
2713 initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2717 *upd = init_update(fplog, ir);
2723 *vcm = init_vcm(fplog, &mtop->groups, ir);
2726 if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2728 if (ir->etc == etcBERENDSEN)
2730 please_cite(fplog, "Berendsen84a");
2732 if (ir->etc == etcVRESCALE)
2734 please_cite(fplog, "Bussi2007a");
2742 *outf = init_mdoutf(nfile, fnm, Flags, cr, ir, oenv);
2744 *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? NULL : (*outf)->fp_ene,
2745 mtop, ir, (*outf)->fp_dhdl);
2750 please_cite(fplog, "Fritsch12");
2751 please_cite(fplog, "Junghans10");
2753 /* Initiate variables */
2754 clear_mat(force_vir);
2755 clear_mat(shake_vir);