2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
47 #include "gromacs/domdec/domdec.h"
48 #include "gromacs/domdec/domdec_struct.h"
49 #include "gromacs/ewald/ewald.h"
50 #include "gromacs/ewald/long-range-correction.h"
51 #include "gromacs/ewald/pme.h"
52 #include "gromacs/gmxlib/network.h"
53 #include "gromacs/gmxlib/nrnb.h"
54 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
55 #include "gromacs/listed-forces/listed-forces.h"
56 #include "gromacs/math/vec.h"
57 #include "gromacs/math/vecdump.h"
58 #include "gromacs/mdlib/force_flags.h"
59 #include "gromacs/mdlib/forcerec-threading.h"
60 #include "gromacs/mdlib/mdrun.h"
61 #include "gromacs/mdlib/ns.h"
62 #include "gromacs/mdlib/qmmm.h"
63 #include "gromacs/mdlib/rf_util.h"
64 #include "gromacs/mdlib/wall.h"
65 #include "gromacs/mdtypes/commrec.h"
66 #include "gromacs/mdtypes/enerdata.h"
67 #include "gromacs/mdtypes/forceoutput.h"
68 #include "gromacs/mdtypes/forcerec.h"
69 #include "gromacs/mdtypes/inputrec.h"
70 #include "gromacs/mdtypes/md_enums.h"
71 #include "gromacs/pbcutil/ishift.h"
72 #include "gromacs/pbcutil/mshift.h"
73 #include "gromacs/pbcutil/pbc.h"
74 #include "gromacs/timing/wallcycle.h"
75 #include "gromacs/utility/cstringutil.h"
76 #include "gromacs/utility/exceptions.h"
77 #include "gromacs/utility/fatalerror.h"
78 #include "gromacs/utility/smalloc.h"
83 const gmx_groups_t *groups,
93 if (!fr->ns->nblist_initialized)
95 init_neighbor_list(fp, fr, md->homenr);
98 nsearch = search_neighbours(fp, fr, box, top, groups, cr, nrnb, md,
102 fprintf(debug, "nsearch = %d\n", nsearch);
105 /* Check whether we have to do dynamic load balancing */
106 /*if ((nsb->nstDlb > 0) && (mod(step,nsb->nstDlb) == 0))
107 count_nb(cr,nsb,&(top->blocks[ebCGS]),nns,fr->nlr,
108 &(top->idef),opts->ngener);
110 if (fr->ns->dump_nl > 0)
112 dump_nblist(fp, cr, fr, fr->ns->dump_nl);
116 static void clearEwaldThreadOutput(ewald_corr_thread_t *ewc_t)
120 ewc_t->dvdl[efptCOUL] = 0;
121 ewc_t->dvdl[efptVDW] = 0;
122 clear_mat(ewc_t->vir_q);
123 clear_mat(ewc_t->vir_lj);
126 static void reduceEwaldThreadOuput(int nthreads, ewald_corr_thread_t *ewc_t)
128 ewald_corr_thread_t &dest = ewc_t[0];
130 for (int t = 1; t < nthreads; t++)
132 dest.Vcorr_q += ewc_t[t].Vcorr_q;
133 dest.Vcorr_lj += ewc_t[t].Vcorr_lj;
134 dest.dvdl[efptCOUL] += ewc_t[t].dvdl[efptCOUL];
135 dest.dvdl[efptVDW] += ewc_t[t].dvdl[efptVDW];
136 m_add(dest.vir_q, ewc_t[t].vir_q, dest.vir_q);
137 m_add(dest.vir_lj, ewc_t[t].vir_lj, dest.vir_lj);
141 void do_force_lowlevel(t_forcerec *fr,
142 const t_inputrec *ir,
145 const gmx_multisim_t *ms,
147 gmx_wallcycle_t wcycle,
151 rvec *forceForUseWithShiftForces,
152 gmx::ForceWithVirial *forceWithVirial,
153 gmx_enerdata_t *enerd,
158 const t_graph *graph,
159 const t_blocka *excl,
168 real dvdl_dum[efptNR], dvdl_nb[efptNR];
171 double t0 = 0.0, t1, t2, t3; /* time measurement for coarse load balancing */
174 set_pbc(&pbc, fr->ePBC, box);
176 /* reset free energy components */
177 for (i = 0; i < efptNR; i++)
183 /* do QMMM first if requested */
186 enerd->term[F_EQM] = calculate_QMMM(cr, forceForUseWithShiftForces, fr);
189 /* Call the short range functions all in one go. */
192 /*#define TAKETIME ((cr->npmenodes) && (fr->timesteps < 12))*/
193 #define TAKETIME FALSE
196 MPI_Barrier(cr->mpi_comm_mygroup);
203 /* foreign lambda component for walls */
204 real dvdl_walls = do_walls(ir, fr, box, md, x, forceForUseWithShiftForces, lambda[efptVDW],
205 enerd->grpp.ener[egLJSR], nrnb);
206 enerd->dvdl_lin[efptVDW] += dvdl_walls;
209 /* We only do non-bonded calculation with group scheme here, the verlet
210 * calls are done from do_force_cutsVERLET(). */
211 if (fr->cutoff_scheme == ecutsGROUP && (flags & GMX_FORCE_NONBONDED))
214 /* Add short-range interactions */
215 donb_flags |= GMX_NONBONDED_DO_SR;
217 /* Currently all group scheme kernels always calculate (shift-)forces */
218 if (flags & GMX_FORCE_FORCES)
220 donb_flags |= GMX_NONBONDED_DO_FORCE;
222 if (flags & GMX_FORCE_VIRIAL)
224 donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
226 if (flags & GMX_FORCE_ENERGY)
228 donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
231 wallcycle_sub_start(wcycle, ewcsNONBONDED);
232 do_nonbonded(fr, x, forceForUseWithShiftForces, md, excl,
234 lambda, dvdl_nb, -1, -1, donb_flags);
236 /* If we do foreign lambda and we have soft-core interactions
237 * we have to recalculate the (non-linear) energies contributions.
239 if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
241 for (i = 0; i < enerd->n_lambda; i++)
245 for (j = 0; j < efptNR; j++)
247 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
249 reset_foreign_enerdata(enerd);
250 do_nonbonded(fr, x, forceForUseWithShiftForces, md, excl,
251 &(enerd->foreign_grpp), nrnb,
252 lam_i, dvdl_dum, -1, -1,
253 (donb_flags & ~GMX_NONBONDED_DO_FORCE) | GMX_NONBONDED_DO_FOREIGNLAMBDA);
254 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
255 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
258 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
269 if (fepvals->sc_alpha != 0)
271 enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
275 enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
278 if (fepvals->sc_alpha != 0)
280 /* even though coulomb part is linear, we already added it, beacuse we
281 need to go through the vdw calculation anyway */
283 enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
287 enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
292 pr_rvecs(debug, 0, "fshift after SR", fr->fshift, SHIFTS);
295 /* Shift the coordinates. Must be done before listed forces and PPPM,
296 * but is also necessary for SHAKE and update, therefore it can NOT
297 * go when no listed forces have to be evaluated.
299 * The shifting and PBC code is deliberately not timed, since with
300 * the Verlet scheme it only takes non-zero time with triclinic
301 * boxes, and even then the time is around a factor of 100 less
302 * than the next smallest counter.
306 /* Here sometimes we would not need to shift with NBFonly,
307 * but we do so anyhow for consistency of the returned coordinates.
311 shift_self(graph, box, x);
314 inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
318 inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
321 /* Check whether we need to do listed interactions or correct for exclusions */
323 ((flags & GMX_FORCE_LISTED)
324 || EEL_RF(fr->ic->eeltype) || EEL_FULL(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)))
326 /* TODO There are no electrostatics methods that require this
327 transformation, when using the Verlet scheme, so update the
328 above conditional. */
329 /* Since all atoms are in the rectangular or triclinic unit-cell,
330 * only single box vector shifts (2 in x) are required.
332 set_pbc_dd(&pbc, fr->ePBC, DOMAINDECOMP(cr) ? cr->dd->nc : nullptr,
336 do_force_listed(wcycle, box, ir->fepvals, cr, ms,
338 forceForUseWithShiftForces, forceWithVirial,
339 fr, &pbc, graph, enerd, nrnb, lambda, md, fcd,
340 DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr,
346 /* Do long-range electrostatics and/or LJ-PME, including related short-range
349 if (EEL_FULL(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
352 real Vlr_q = 0, Vlr_lj = 0;
354 /* We reduce all virial, dV/dlambda and energy contributions, except
355 * for the reciprocal energies (Vlr_q, Vlr_lj) into the same struct.
357 ewald_corr_thread_t &ewaldOutput = fr->ewc_t[0];
358 clearEwaldThreadOutput(&ewaldOutput);
360 if (EEL_PME_EWALD(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
362 /* With the Verlet scheme exclusion forces are calculated
363 * in the non-bonded kernel.
365 /* The TPI molecule does not have exclusions with the rest
366 * of the system and no intra-molecular PME grid
367 * contributions will be calculated in
368 * gmx_pme_calc_energy.
370 if ((ir->cutoff_scheme == ecutsGROUP && fr->n_tpi == 0) ||
371 ir->ewald_geometry != eewg3D ||
372 ir->epsilon_surface != 0)
376 wallcycle_sub_start(wcycle, ewcsEWALD_CORRECTION);
380 gmx_fatal(FARGS, "TPI with PME currently only works in a 3D geometry with tin-foil boundary conditions");
383 nthreads = fr->nthread_ewc;
384 #pragma omp parallel for num_threads(nthreads) schedule(static)
385 for (t = 0; t < nthreads; t++)
389 ewald_corr_thread_t &ewc_t = fr->ewc_t[t];
392 clearEwaldThreadOutput(&ewc_t);
395 /* Threading is only supported with the Verlet cut-off
396 * scheme and then only single particle forces (no
397 * exclusion forces) are calculated, so we can store
398 * the forces in the normal, single forceWithVirial->force_ array.
400 ewald_LRcorrection(md->homenr, cr, nthreads, t, fr, ir,
401 md->chargeA, md->chargeB,
402 md->sqrt_c6A, md->sqrt_c6B,
403 md->sigmaA, md->sigmaB,
404 md->sigma3A, md->sigma3B,
405 md->nChargePerturbed || md->nTypePerturbed,
406 ir->cutoff_scheme != ecutsVERLET,
407 excl, x, box, mu_tot,
410 as_rvec_array(forceWithVirial->force_.data()),
411 ewc_t.vir_q, ewc_t.vir_lj,
412 &ewc_t.Vcorr_q, &ewc_t.Vcorr_lj,
413 lambda[efptCOUL], lambda[efptVDW],
414 &ewc_t.dvdl[efptCOUL], &ewc_t.dvdl[efptVDW]);
416 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
420 reduceEwaldThreadOuput(nthreads, fr->ewc_t);
422 wallcycle_sub_stop(wcycle, ewcsEWALD_CORRECTION);
425 if (EEL_PME_EWALD(fr->ic->eeltype) && fr->n_tpi == 0)
427 /* This is not in a subcounter because it takes a
428 negligible and constant-sized amount of time */
429 ewaldOutput.Vcorr_q +=
430 ewald_charge_correction(cr, fr, lambda[efptCOUL], box,
431 &ewaldOutput.dvdl[efptCOUL],
435 if ((EEL_PME(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)) &&
436 thisRankHasDuty(cr, DUTY_PME) && (pme_run_mode(fr->pmedata) == PmeRunMode::CPU))
438 /* Do reciprocal PME for Coulomb and/or LJ. */
439 assert(fr->n_tpi >= 0);
440 if (fr->n_tpi == 0 || (flags & GMX_FORCE_STATECHANGED))
442 pme_flags = GMX_PME_SPREAD | GMX_PME_SOLVE;
444 if (flags & GMX_FORCE_FORCES)
446 pme_flags |= GMX_PME_CALC_F;
448 if (flags & GMX_FORCE_VIRIAL)
450 pme_flags |= GMX_PME_CALC_ENER_VIR;
454 /* We don't calculate f, but we do want the potential */
455 pme_flags |= GMX_PME_CALC_POT;
458 /* With domain decomposition we close the CPU side load
459 * balancing region here, because PME does global
460 * communication that acts as a global barrier.
462 if (DOMAINDECOMP(cr))
464 ddCloseBalanceRegionCpu(cr->dd);
467 wallcycle_start(wcycle, ewcPMEMESH);
468 status = gmx_pme_do(fr->pmedata,
469 0, md->homenr - fr->n_tpi,
471 as_rvec_array(forceWithVirial->force_.data()),
472 md->chargeA, md->chargeB,
473 md->sqrt_c6A, md->sqrt_c6B,
474 md->sigmaA, md->sigmaB,
476 DOMAINDECOMP(cr) ? dd_pme_maxshift_x(cr->dd) : 0,
477 DOMAINDECOMP(cr) ? dd_pme_maxshift_y(cr->dd) : 0,
479 ewaldOutput.vir_q, ewaldOutput.vir_lj,
481 lambda[efptCOUL], lambda[efptVDW],
482 &ewaldOutput.dvdl[efptCOUL],
483 &ewaldOutput.dvdl[efptVDW],
485 *cycles_pme = wallcycle_stop(wcycle, ewcPMEMESH);
488 gmx_fatal(FARGS, "Error %d in reciprocal PME routine", status);
491 /* We should try to do as little computation after
492 * this as possible, because parallel PME synchronizes
493 * the nodes, so we want all load imbalance of the
494 * rest of the force calculation to be before the PME
495 * call. DD load balancing is done on the whole time
496 * of the force call (without PME).
501 if (EVDW_PME(ir->vdwtype))
504 gmx_fatal(FARGS, "Test particle insertion not implemented with LJ-PME");
506 /* Determine the PME grid energy of the test molecule
507 * with the PME grid potential of the other charges.
509 gmx_pme_calc_energy(fr->pmedata, fr->n_tpi,
510 x + md->homenr - fr->n_tpi,
511 md->chargeA + md->homenr - fr->n_tpi,
517 if (!EEL_PME(fr->ic->eeltype) && EEL_PME_EWALD(fr->ic->eeltype))
519 Vlr_q = do_ewald(ir, x, as_rvec_array(forceWithVirial->force_.data()),
520 md->chargeA, md->chargeB,
522 ewaldOutput.vir_q, fr->ic->ewaldcoeff_q,
523 lambda[efptCOUL], &ewaldOutput.dvdl[efptCOUL],
527 /* Note that with separate PME nodes we get the real energies later */
528 forceWithVirial->addVirialContribution(ewaldOutput.vir_q);
529 forceWithVirial->addVirialContribution(ewaldOutput.vir_lj);
530 enerd->dvdl_lin[efptCOUL] += ewaldOutput.dvdl[efptCOUL];
531 enerd->dvdl_lin[efptVDW] += ewaldOutput.dvdl[efptVDW];
532 enerd->term[F_COUL_RECIP] = Vlr_q + ewaldOutput.Vcorr_q;
533 enerd->term[F_LJ_RECIP] = Vlr_lj + ewaldOutput.Vcorr_lj;
537 fprintf(debug, "Vlr_q = %g, Vcorr_q = %g, Vlr_corr_q = %g\n",
538 Vlr_q, ewaldOutput.Vcorr_q, enerd->term[F_COUL_RECIP]);
539 pr_rvecs(debug, 0, "vir_el_recip after corr", ewaldOutput.vir_q, DIM);
540 pr_rvecs(debug, 0, "fshift after LR Corrections", fr->fshift, SHIFTS);
541 fprintf(debug, "Vlr_lj: %g, Vcorr_lj = %g, Vlr_corr_lj = %g\n",
542 Vlr_lj, ewaldOutput.Vcorr_lj, enerd->term[F_LJ_RECIP]);
543 pr_rvecs(debug, 0, "vir_lj_recip after corr", ewaldOutput.vir_lj, DIM);
548 /* Is there a reaction-field exclusion correction needed?
549 * With the Verlet scheme, exclusion forces are calculated
550 * in the non-bonded kernel.
552 if (ir->cutoff_scheme != ecutsVERLET && EEL_RF(fr->ic->eeltype))
554 real dvdl_rf_excl = 0;
555 enerd->term[F_RF_EXCL] =
556 RF_excl_correction(fr, graph, md, excl, DOMAINDECOMP(cr),
557 x, forceForUseWithShiftForces,
558 fr->fshift, &pbc, lambda[efptCOUL], &dvdl_rf_excl);
560 enerd->dvdl_lin[efptCOUL] += dvdl_rf_excl;
566 print_nrnb(debug, nrnb);
573 MPI_Barrier(cr->mpi_comm_mygroup);
576 if (fr->timesteps == 11)
579 fprintf(stderr, "* PP load balancing info: rank %d, step %s, rel wait time=%3.0f%% , load string value: %7.2f\n",
580 cr->nodeid, gmx_step_str(fr->timesteps, buf),
581 100*fr->t_wait/(fr->t_wait+fr->t_fnbf),
582 (fr->t_fnbf+fr->t_wait)/fr->t_fnbf);
590 pr_rvecs(debug, 0, "fshift after bondeds", fr->fshift, SHIFTS);
595 void init_enerdata(int ngener, int n_lambda, gmx_enerdata_t *enerd)
599 for (i = 0; i < F_NRE; i++)
602 enerd->foreign_term[i] = 0;
606 for (i = 0; i < efptNR; i++)
608 enerd->dvdl_lin[i] = 0;
609 enerd->dvdl_nonlin[i] = 0;
615 fprintf(debug, "Creating %d sized group matrix for energies\n", n2);
617 enerd->grpp.nener = n2;
618 enerd->foreign_grpp.nener = n2;
619 for (i = 0; (i < egNR); i++)
621 snew(enerd->grpp.ener[i], n2);
622 snew(enerd->foreign_grpp.ener[i], n2);
627 enerd->n_lambda = 1 + n_lambda;
628 snew(enerd->enerpart_lambda, enerd->n_lambda);
636 void destroy_enerdata(gmx_enerdata_t *enerd)
640 for (i = 0; (i < egNR); i++)
642 sfree(enerd->grpp.ener[i]);
645 for (i = 0; (i < egNR); i++)
647 sfree(enerd->foreign_grpp.ener[i]);
652 sfree(enerd->enerpart_lambda);
656 static real sum_v(int n, const real v[])
662 for (i = 0; (i < n); i++)
670 void sum_epot(gmx_grppairener_t *grpp, real *epot)
674 /* Accumulate energies */
675 epot[F_COUL_SR] = sum_v(grpp->nener, grpp->ener[egCOULSR]);
676 epot[F_LJ] = sum_v(grpp->nener, grpp->ener[egLJSR]);
677 epot[F_LJ14] = sum_v(grpp->nener, grpp->ener[egLJ14]);
678 epot[F_COUL14] = sum_v(grpp->nener, grpp->ener[egCOUL14]);
680 /* lattice part of LR doesnt belong to any group
681 * and has been added earlier
683 epot[F_BHAM] = sum_v(grpp->nener, grpp->ener[egBHAMSR]);
686 for (i = 0; (i < F_EPOT); i++)
688 if (i != F_DISRESVIOL && i != F_ORIRESDEV)
690 epot[F_EPOT] += epot[i];
695 void sum_dhdl(gmx_enerdata_t *enerd, gmx::ArrayRef<const real> lambda, t_lambda *fepvals)
700 enerd->dvdl_lin[efptVDW] += enerd->term[F_DVDL_VDW]; /* include dispersion correction */
701 enerd->term[F_DVDL] = 0.0;
702 for (int i = 0; i < efptNR; i++)
704 if (fepvals->separate_dvdl[i])
706 /* could this be done more readably/compactly? */
719 index = F_DVDL_BONDED;
721 case (efptRESTRAINT):
722 index = F_DVDL_RESTRAINT;
728 enerd->term[index] = enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
731 fprintf(debug, "dvdl-%s[%2d]: %f: non-linear %f + linear %f\n",
732 efpt_names[i], i, enerd->term[index], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
737 enerd->term[F_DVDL] += enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
740 fprintf(debug, "dvd-%sl[%2d]: %f: non-linear %f + linear %f\n",
741 efpt_names[0], i, enerd->term[F_DVDL], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
746 /* Notes on the foreign lambda free energy difference evaluation:
747 * Adding the potential and ekin terms that depend linearly on lambda
748 * as delta lam * dvdl to the energy differences is exact.
749 * For the constraints this is not exact, but we have no other option
750 * without literally changing the lengths and reevaluating the energies at each step.
751 * (try to remedy this post 4.6 - MRS)
753 if (fepvals->separate_dvdl[efptBONDED])
755 enerd->term[F_DVDL_BONDED] += enerd->term[F_DVDL_CONSTR];
759 enerd->term[F_DVDL] += enerd->term[F_DVDL_CONSTR];
761 enerd->term[F_DVDL_CONSTR] = 0;
763 for (int i = 0; i < fepvals->n_lambda; i++)
765 /* note we are iterating over fepvals here!
766 For the current lam, dlam = 0 automatically,
767 so we don't need to add anything to the
768 enerd->enerpart_lambda[0] */
770 /* we don't need to worry about dvdl_lin contributions to dE at
771 current lambda, because the contributions to the current
772 lambda are automatically zeroed */
774 for (gmx::index j = 0; j < lambda.size(); j++)
776 /* Note that this loop is over all dhdl components, not just the separated ones */
777 dlam = (fepvals->all_lambda[j][i] - lambda[j]);
778 enerd->enerpart_lambda[i+1] += dlam*enerd->dvdl_lin[j];
781 fprintf(debug, "enerdiff lam %g: (%15s), non-linear %f linear %f*%f\n",
782 fepvals->all_lambda[j][i], efpt_names[j],
783 (enerd->enerpart_lambda[i+1] - enerd->enerpart_lambda[0]),
784 dlam, enerd->dvdl_lin[j]);
791 void reset_foreign_enerdata(gmx_enerdata_t *enerd)
795 /* First reset all foreign energy components. Foreign energies always called on
796 neighbor search steps */
797 for (i = 0; (i < egNR); i++)
799 for (j = 0; (j < enerd->grpp.nener); j++)
801 enerd->foreign_grpp.ener[i][j] = 0.0;
805 /* potential energy components */
806 for (i = 0; (i <= F_EPOT); i++)
808 enerd->foreign_term[i] = 0.0;
812 void reset_enerdata(gmx_enerdata_t *enerd)
816 /* First reset all energy components. */
817 for (i = 0; (i < egNR); i++)
819 for (j = 0; (j < enerd->grpp.nener); j++)
821 enerd->grpp.ener[i][j] = 0.0;
824 for (i = 0; i < efptNR; i++)
826 enerd->dvdl_lin[i] = 0.0;
827 enerd->dvdl_nonlin[i] = 0.0;
830 /* Normal potential energy components */
831 for (i = 0; (i <= F_EPOT); i++)
833 enerd->term[i] = 0.0;
835 enerd->term[F_DVDL] = 0.0;
836 enerd->term[F_DVDL_COUL] = 0.0;
837 enerd->term[F_DVDL_VDW] = 0.0;
838 enerd->term[F_DVDL_BONDED] = 0.0;
839 enerd->term[F_DVDL_RESTRAINT] = 0.0;
840 enerd->term[F_DKDL] = 0.0;
841 if (enerd->n_lambda > 0)
843 for (i = 0; i < enerd->n_lambda; i++)
845 enerd->enerpart_lambda[i] = 0.0;
848 /* reset foreign energy data - separate function since we also call it elsewhere */
849 reset_foreign_enerdata(enerd);