2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2014,2015,2016,2017,2018 by the GROMACS development team.
5 * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file defines high-level functions for mdrun to compute
39 * energies and forces for listed interactions.
41 * \author Mark Abraham <mark.j.abraham@gmail.com>
43 * \ingroup module_listed_forces
47 #include "gromacs/utility/arrayref.h"
48 #include "gromacs/utility/enumerationhelpers.h"
49 #include "listed_forces.h"
57 #include "gromacs/gmxlib/network.h"
58 #include "gromacs/gmxlib/nrnb.h"
59 #include "gromacs/listed_forces/bonded.h"
60 #include "gromacs/listed_forces/disre.h"
61 #include "gromacs/listed_forces/orires.h"
62 #include "gromacs/listed_forces/pairs.h"
63 #include "gromacs/listed_forces/position_restraints.h"
64 #include "gromacs/math/vec.h"
65 #include "gromacs/mdlib/enerdata_utils.h"
66 #include "gromacs/mdlib/force.h"
67 #include "gromacs/mdlib/gmx_omp_nthreads.h"
68 #include "gromacs/mdtypes/commrec.h"
69 #include "gromacs/mdtypes/fcdata.h"
70 #include "gromacs/mdtypes/forceoutput.h"
71 #include "gromacs/mdtypes/forcerec.h"
72 #include "gromacs/mdtypes/inputrec.h"
73 #include "gromacs/mdtypes/md_enums.h"
74 #include "gromacs/mdtypes/simulation_workload.h"
75 #include "gromacs/pbcutil/ishift.h"
76 #include "gromacs/pbcutil/pbc.h"
77 #include "gromacs/timing/wallcycle.h"
78 #include "gromacs/topology/topology.h"
79 #include "gromacs/utility/exceptions.h"
80 #include "gromacs/utility/fatalerror.h"
82 #include "listed_internal.h"
83 #include "manage_threading.h"
84 #include "utilities.h"
86 ListedForces::ListedForces(const gmx_ffparams_t& ffparams,
87 const int numEnergyGroups,
89 const InteractionSelection interactionSelection,
91 idefSelection_(ffparams),
92 threading_(std::make_unique<bonded_threading_t>(numThreads, numEnergyGroups, fplog)),
93 interactionSelection_(interactionSelection)
97 ListedForces::ListedForces(ListedForces&& o) noexcept = default;
99 ListedForces::~ListedForces() = default;
101 //! Copies the selection interactions from \p idefSrc to \p idef
102 static void selectInteractions(InteractionDefinitions* idef,
103 const InteractionDefinitions& idefSrc,
104 const ListedForces::InteractionSelection interactionSelection)
106 const bool selectPairs =
107 interactionSelection.test(static_cast<int>(ListedForces::InteractionGroup::Pairs));
108 const bool selectDihedrals =
109 interactionSelection.test(static_cast<int>(ListedForces::InteractionGroup::Dihedrals));
110 const bool selectAngles =
111 interactionSelection.test(static_cast<int>(ListedForces::InteractionGroup::Angles));
112 const bool selectRest =
113 interactionSelection.test(static_cast<int>(ListedForces::InteractionGroup::Rest));
115 for (int ftype = 0; ftype < F_NRE; ftype++)
117 const t_interaction_function& ifunc = interaction_function[ftype];
118 if (ifunc.flags & IF_BOND)
121 if (ifunc.flags & IF_PAIR)
123 assign = selectPairs;
125 else if (ifunc.flags & IF_DIHEDRAL)
127 assign = selectDihedrals;
129 else if (ifunc.flags & IF_ATYPE)
131 assign = selectAngles;
139 idef->il[ftype] = idefSrc.il[ftype];
143 idef->il[ftype].clear();
149 void ListedForces::setup(const InteractionDefinitions& domainIdef, const int numAtomsForce, const bool useGpu)
151 if (interactionSelection_.all())
153 // Avoid the overhead of copying all interaction lists by simply setting the reference to the domain idef
158 idef_ = &idefSelection_;
160 selectInteractions(&idefSelection_, domainIdef, interactionSelection_);
162 idefSelection_.ilsort = domainIdef.ilsort;
164 if (interactionSelection_.test(static_cast<int>(ListedForces::InteractionGroup::Rest)))
166 idefSelection_.iparams_posres = domainIdef.iparams_posres;
167 idefSelection_.iparams_fbposres = domainIdef.iparams_fbposres;
171 idefSelection_.iparams_posres.clear();
172 idefSelection_.iparams_fbposres.clear();
176 setup_bonded_threading(threading_.get(), numAtomsForce, useGpu, *idef_);
178 if (idef_->ilsort == ilsortFE_SORTED)
180 forceBufferLambda_.resize(numAtomsForce * sizeof(rvec4) / sizeof(real));
181 shiftForceBufferLambda_.resize(SHIFTS);
190 /*! \brief Return true if ftype is an explicit pair-listed LJ or
191 * COULOMB interaction type: bonded LJ (usually 1-4), or special
192 * listed non-bonded for FEP. */
193 bool isPairInteraction(int ftype)
195 return ((ftype) >= F_LJ14 && (ftype) <= F_LJC_PAIRS_NB);
198 /*! \brief Zero thread-local output buffers */
199 void zero_thread_output(f_thread_t* f_t)
201 constexpr int nelem_fa = sizeof(f_t->f[0]) / sizeof(real);
203 for (int i = 0; i < f_t->nblock_used; i++)
205 int a0 = f_t->block_index[i] * reduction_block_size;
206 int a1 = a0 + reduction_block_size;
207 for (int a = a0; a < a1; a++)
209 for (int d = 0; d < nelem_fa; d++)
216 for (int i = 0; i < SHIFTS; i++)
218 clear_rvec(f_t->fshift[i]);
220 for (int i = 0; i < F_NRE; i++)
224 for (int i = 0; i < egNR; i++)
226 for (int j = 0; j < f_t->grpp.nener; j++)
228 f_t->grpp.ener[i][j] = 0;
231 for (auto i : keysOf(f_t->dvdl))
237 /*! \brief The max thread number is arbitrary, we used a fixed number
238 * to avoid memory management. Using more than 16 threads is probably
239 * never useful performance wise. */
240 #define MAX_BONDED_THREADS 256
242 /*! \brief Reduce thread-local force buffers */
243 void reduce_thread_forces(gmx::ArrayRef<gmx::RVec> force, const bonded_threading_t* bt, int nthreads)
245 if (nthreads > MAX_BONDED_THREADS)
247 gmx_fatal(FARGS, "Can not reduce bonded forces on more than %d threads", MAX_BONDED_THREADS);
250 rvec* gmx_restrict f = as_rvec_array(force.data());
252 const int numAtomsForce = bt->numAtomsForce;
254 /* This reduction can run on any number of threads,
255 * independently of bt->nthreads.
256 * But if nthreads matches bt->nthreads (which it currently does)
257 * the uniform distribution of the touched blocks over nthreads will
258 * match the distribution of bonded over threads well in most cases,
259 * which means that threads mostly reduce their own data which increases
260 * the number of cache hits.
262 #pragma omp parallel for num_threads(nthreads) schedule(static)
263 for (int b = 0; b < bt->nblock_used; b++)
267 int ind = bt->block_index[b];
268 rvec4* fp[MAX_BONDED_THREADS];
270 /* Determine which threads contribute to this block */
272 for (int ft = 0; ft < bt->nthreads; ft++)
274 if (bitmask_is_set(bt->mask[ind], ft))
276 fp[nfb++] = bt->f_t[ft]->f;
281 /* Reduce force buffers for threads that contribute */
282 int a0 = ind * reduction_block_size;
283 int a1 = (ind + 1) * reduction_block_size;
284 /* It would be nice if we could pad f to avoid this min */
285 a1 = std::min(a1, numAtomsForce);
286 for (int a = a0; a < a1; a++)
288 for (int fb = 0; fb < nfb; fb++)
290 rvec_inc(f[a], fp[fb][a]);
295 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
299 /*! \brief Reduce thread-local forces, shift forces and energies */
300 void reduce_thread_output(gmx::ForceWithShiftForces* forceWithShiftForces,
302 gmx_grppairener_t* grpp,
303 gmx::ArrayRef<real> dvdl,
304 const bonded_threading_t* bt,
305 const gmx::StepWorkload& stepWork)
307 assert(bt->haveBondeds);
309 if (bt->nblock_used > 0)
311 /* Reduce the bonded force buffer */
312 reduce_thread_forces(forceWithShiftForces->force(), bt, bt->nthreads);
315 rvec* gmx_restrict fshift = as_rvec_array(forceWithShiftForces->shiftForces().data());
317 /* When necessary, reduce energy and virial using one thread only */
318 if ((stepWork.computeEnergy || stepWork.computeVirial || stepWork.computeDhdl) && bt->nthreads > 1)
320 gmx::ArrayRef<const std::unique_ptr<f_thread_t>> f_t = bt->f_t;
322 if (stepWork.computeVirial)
324 for (int i = 0; i < SHIFTS; i++)
326 for (int t = 1; t < bt->nthreads; t++)
328 rvec_inc(fshift[i], f_t[t]->fshift[i]);
332 if (stepWork.computeEnergy)
334 for (int i = 0; i < F_NRE; i++)
336 for (int t = 1; t < bt->nthreads; t++)
338 ener[i] += f_t[t]->ener[i];
341 for (int i = 0; i < egNR; i++)
343 for (int j = 0; j < f_t[1]->grpp.nener; j++)
345 for (int t = 1; t < bt->nthreads; t++)
347 grpp->ener[i][j] += f_t[t]->grpp.ener[i][j];
352 if (stepWork.computeDhdl)
354 for (auto i : keysOf(f_t[1]->dvdl))
357 for (int t = 1; t < bt->nthreads; t++)
359 dvdl[static_cast<int>(i)] += f_t[t]->dvdl[i];
366 /*! \brief Returns the bonded kernel flavor
368 * Note that energies are always requested when the virial
369 * is requested (performance gain would be small).
370 * Note that currently we do not have bonded kernels that
371 * do not compute forces.
373 BondedKernelFlavor selectBondedKernelFlavor(const gmx::StepWorkload& stepWork,
374 const bool useSimdKernels,
375 const bool havePerturbedInteractions)
377 BondedKernelFlavor flavor;
378 if (stepWork.computeEnergy || stepWork.computeVirial)
380 if (stepWork.computeVirial)
382 flavor = BondedKernelFlavor::ForcesAndVirialAndEnergy;
386 flavor = BondedKernelFlavor::ForcesAndEnergy;
391 if (useSimdKernels && !havePerturbedInteractions)
393 flavor = BondedKernelFlavor::ForcesSimdWhenAvailable;
397 flavor = BondedKernelFlavor::ForcesNoSimd;
404 /*! \brief Calculate one element of the list of bonded interactions
406 real calc_one_bond(int thread,
408 const InteractionDefinitions& idef,
409 ArrayRef<const int> iatoms,
410 const int numNonperturbedInteractions,
411 const WorkDivision& workDivision,
415 const t_forcerec* fr,
417 gmx_grppairener_t* grpp,
419 gmx::ArrayRef<const real> lambda,
420 gmx::ArrayRef<real> dvdl,
423 const gmx::StepWorkload& stepWork,
424 int* global_atom_index)
426 GMX_ASSERT(idef.ilsort == ilsortNO_FE || idef.ilsort == ilsortFE_SORTED,
427 "The topology should be marked either as no FE or sorted on FE");
429 const bool havePerturbedInteractions =
430 (idef.ilsort == ilsortFE_SORTED && numNonperturbedInteractions < iatoms.ssize());
431 BondedKernelFlavor flavor =
432 selectBondedKernelFlavor(stepWork, fr->use_simd_kernels, havePerturbedInteractions);
433 FreeEnergyPerturbationCouplingType efptFTYPE;
434 if (IS_RESTRAINT_TYPE(ftype))
436 efptFTYPE = FreeEnergyPerturbationCouplingType::Restraint;
440 efptFTYPE = FreeEnergyPerturbationCouplingType::Bonded;
443 const int nat1 = interaction_function[ftype].nratoms + 1;
444 const int nbonds = iatoms.ssize() / nat1;
446 GMX_ASSERT(fr->gpuBonded != nullptr || workDivision.end(ftype) == iatoms.ssize(),
447 "The thread division should match the topology");
449 const int nb0 = workDivision.bound(ftype, thread);
450 const int nbn = workDivision.bound(ftype, thread + 1) - nb0;
452 ArrayRef<const t_iparams> iparams = idef.iparams;
455 if (!isPairInteraction(ftype))
459 /* TODO The execution time for CMAP dihedrals might be
460 nice to account to its own subtimer, but first
461 wallcycle needs to be extended to support calling from
471 lambda[static_cast<int>(efptFTYPE)],
472 &(dvdl[static_cast<int>(efptFTYPE)]),
481 v = calculateSimpleBond(ftype,
489 lambda[static_cast<int>(efptFTYPE)],
490 &(dvdl[static_cast<int>(efptFTYPE)]),
501 /* TODO The execution time for pairs might be nice to account
502 to its own subtimer, but first wallcycle needs to be
503 extended to support calling from multiple threads. */
516 havePerturbedInteractions,
524 inc_nrnb(nrnb, nrnbIndex(ftype), nbonds);
532 /*! \brief Compute the bonded part of the listed forces, parallelized over threads
534 static void calcBondedForces(const InteractionDefinitions& idef,
535 bonded_threading_t* bt,
537 const t_forcerec* fr,
538 const t_pbc* pbc_null,
539 rvec* fshiftMasterBuffer,
540 gmx_enerdata_t* enerd,
542 gmx::ArrayRef<const real> lambda,
543 gmx::ArrayRef<real> dvdl,
546 const gmx::StepWorkload& stepWork,
547 int* global_atom_index)
549 #pragma omp parallel for num_threads(bt->nthreads) schedule(static)
550 for (int thread = 0; thread < bt->nthreads; thread++)
554 f_thread_t& threadBuffers = *bt->f_t[thread];
559 gmx::ArrayRef<real> dvdlt;
560 gmx_grppairener_t* grpp;
562 zero_thread_output(&threadBuffers);
564 rvec4* ft = threadBuffers.f;
566 /* Thread 0 writes directly to the main output buffers.
567 * We might want to reconsider this.
571 fshift = fshiftMasterBuffer;
578 fshift = as_rvec_array(threadBuffers.fshift.data());
579 epot = threadBuffers.ener;
580 grpp = &threadBuffers.grpp;
581 dvdlt = threadBuffers.dvdl;
583 /* Loop over all bonded force types to calculate the bonded forces */
584 for (ftype = 0; (ftype < F_NRE); ftype++)
586 const InteractionList& ilist = idef.il[ftype];
587 if (!ilist.empty() && ftype_is_bonded_potential(ftype))
589 ArrayRef<const int> iatoms = gmx::makeConstArrayRef(ilist.iatoms);
590 v = calc_one_bond(thread,
594 idef.numNonperturbedInteractions[ftype],
613 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
617 bool ListedForces::haveRestraints(const t_fcdata& fcdata) const
619 GMX_ASSERT(fcdata.orires && fcdata.disres, "NMR restraints objects should be set up");
621 return (!idef_->il[F_POSRES].empty() || !idef_->il[F_FBPOSRES].empty() || fcdata.orires->nr > 0
622 || fcdata.disres->nres > 0);
625 bool ListedForces::haveCpuBondeds() const
627 return threading_->haveBondeds;
630 bool ListedForces::haveCpuListedForces(const t_fcdata& fcdata) const
632 return haveCpuBondeds() || haveRestraints(fcdata);
638 /*! \brief Calculates all listed force interactions. */
639 void calc_listed(struct gmx_wallcycle* wcycle,
640 const InteractionDefinitions& idef,
641 bonded_threading_t* bt,
643 gmx::ForceOutputs* forceOutputs,
644 const t_forcerec* fr,
646 gmx_enerdata_t* enerd,
648 gmx::ArrayRef<const real> lambda,
651 int* global_atom_index,
652 const gmx::StepWorkload& stepWork)
656 gmx::ForceWithShiftForces& forceWithShiftForces = forceOutputs->forceWithShiftForces();
658 wallcycle_sub_start(wcycle, ewcsLISTED);
659 /* The dummy array is to have a place to store the dhdl at other values
660 of lambda, which will be thrown away in the end */
661 gmx::EnumerationArray<FreeEnergyPerturbationCouplingType, real> dvdl = { 0 };
662 calcBondedForces(idef,
666 fr->bMolPBC ? pbc : nullptr,
667 as_rvec_array(forceWithShiftForces.shiftForces().data()),
676 wallcycle_sub_stop(wcycle, ewcsLISTED);
678 wallcycle_sub_start(wcycle, ewcsLISTED_BUF_OPS);
679 reduce_thread_output(&forceWithShiftForces, enerd->term, &enerd->grpp, dvdl, bt, stepWork);
681 if (stepWork.computeDhdl)
683 for (auto i : keysOf(enerd->dvdl_lin))
685 enerd->dvdl_nonlin[i] += dvdl[i];
688 wallcycle_sub_stop(wcycle, ewcsLISTED_BUF_OPS);
691 /* Copy the sum of violations for the distance restraints from fcd */
694 enerd->term[F_DISRESVIOL] = fcd->disres->sumviol;
698 /*! \brief As calc_listed(), but only determines the potential energy
699 * for the perturbed interactions.
701 * The shift forces in fr are not affected.
703 void calc_listed_lambda(const InteractionDefinitions& idef,
704 bonded_threading_t* bt,
706 const t_forcerec* fr,
707 const struct t_pbc* pbc,
708 gmx::ArrayRef<real> forceBufferLambda,
709 gmx::ArrayRef<gmx::RVec> shiftForceBufferLambda,
710 gmx_grppairener_t* grpp,
712 gmx::ArrayRef<real> dvdl,
714 gmx::ArrayRef<const real> lambda,
717 int* global_atom_index)
719 WorkDivision& workDivision = bt->foreignLambdaWorkDivision;
721 const t_pbc* pbc_null;
731 /* We already have the forces, so we use temp buffers here */
732 std::fill(forceBufferLambda.begin(), forceBufferLambda.end(), 0.0_real);
733 std::fill(shiftForceBufferLambda.begin(),
734 shiftForceBufferLambda.end(),
735 gmx::RVec{ 0.0_real, 0.0_real, 0.0_real });
736 rvec4* f = reinterpret_cast<rvec4*>(forceBufferLambda.data());
737 rvec* fshift = as_rvec_array(shiftForceBufferLambda.data());
739 /* Loop over all bonded force types to calculate the bonded energies */
740 for (int ftype = 0; (ftype < F_NRE); ftype++)
742 if (ftype_is_bonded_potential(ftype))
744 const InteractionList& ilist = idef.il[ftype];
745 /* Create a temporary iatom list with only perturbed interactions */
746 const int numNonperturbed = idef.numNonperturbedInteractions[ftype];
747 ArrayRef<const int> iatomsPerturbed = gmx::constArrayRefFromArray(
748 ilist.iatoms.data() + numNonperturbed, ilist.size() - numNonperturbed);
749 if (!iatomsPerturbed.empty())
751 /* Set the work range of thread 0 to the perturbed bondeds */
752 workDivision.setBound(ftype, 0, 0);
753 workDivision.setBound(ftype, 1, iatomsPerturbed.ssize());
755 gmx::StepWorkload tempFlags;
756 tempFlags.computeEnergy = true;
757 real v = calc_one_bond(0,
761 iatomsPerturbed.ssize(),
784 void ListedForces::calculate(struct gmx_wallcycle* wcycle,
786 const t_lambda* fepvals,
788 const gmx_multisim_t* ms,
789 gmx::ArrayRefWithPadding<const gmx::RVec> coordinates,
790 gmx::ArrayRef<const gmx::RVec> xWholeMolecules,
792 const history_t* hist,
793 gmx::ForceOutputs* forceOutputs,
794 const t_forcerec* fr,
795 const struct t_pbc* pbc,
796 gmx_enerdata_t* enerd,
798 gmx::ArrayRef<const real> lambda,
800 int* global_atom_index,
801 const gmx::StepWorkload& stepWork)
803 if (interactionSelection_.none() || !stepWork.computeListedForces)
808 const InteractionDefinitions& idef = *idef_;
810 // Todo: replace all rvec use here with ArrayRefWithPadding
811 const rvec* x = as_rvec_array(coordinates.paddedArrayRef().data());
813 const bool calculateRestInteractions =
814 interactionSelection_.test(static_cast<int>(ListedForces::InteractionGroup::Rest));
816 t_pbc pbc_full; /* Full PBC is needed for position restraints */
817 if (calculateRestInteractions && haveRestraints(*fcdata))
819 if (!idef.il[F_POSRES].empty() || !idef.il[F_FBPOSRES].empty())
821 /* Not enough flops to bother counting */
822 set_pbc(&pbc_full, fr->pbcType, box);
825 /* TODO Use of restraints triggers further function calls
826 inside the loop over calc_one_bond(), but those are too
827 awkward to account to this subtimer properly in the present
828 code. We don't test / care much about performance with
829 restraints, anyway. */
830 wallcycle_sub_start(wcycle, ewcsRESTRAINTS);
832 if (!idef.il[F_POSRES].empty())
834 posres_wrapper(nrnb, idef, &pbc_full, x, enerd, lambda, fr, &forceOutputs->forceWithVirial());
837 if (!idef.il[F_FBPOSRES].empty())
839 fbposres_wrapper(nrnb, idef, &pbc_full, x, enerd, fr, &forceOutputs->forceWithVirial());
842 /* Do pre force calculation stuff which might require communication */
843 if (fcdata->orires->nr > 0)
845 GMX_ASSERT(!xWholeMolecules.empty(), "Need whole molecules for orienation restraints");
846 enerd->term[F_ORIRESDEV] = calc_orires_dev(ms,
847 idef.il[F_ORIRES].size(),
848 idef.il[F_ORIRES].iatoms.data(),
853 fr->bMolPBC ? pbc : nullptr,
857 if (fcdata->disres->nres > 0)
861 idef.il[F_DISRES].size(),
862 idef.il[F_DISRES].iatoms.data(),
864 fr->bMolPBC ? pbc : nullptr,
869 wallcycle_sub_stop(wcycle, ewcsRESTRAINTS);
872 calc_listed(wcycle, idef, threading_.get(), x, forceOutputs, fr, pbc, enerd, nrnb, lambda, md, fcdata, global_atom_index, stepWork);
874 /* Check if we have to determine energy differences
875 * at foreign lambda's.
877 if (fepvals->n_lambda > 0 && stepWork.computeDhdl)
879 gmx::EnumerationArray<FreeEnergyPerturbationCouplingType, real> dvdl = { 0 };
880 if (!idef.il[F_POSRES].empty())
882 posres_wrapper_lambda(wcycle, fepvals, idef, &pbc_full, x, enerd, lambda, fr);
884 if (idef.ilsort != ilsortNO_FE)
886 wallcycle_sub_start(wcycle, ewcsLISTED_FEP);
887 if (idef.ilsort != ilsortFE_SORTED)
889 gmx_incons("The bonded interactions are not sorted for free energy");
891 for (int i = 0; i < 1 + enerd->foreignLambdaTerms.numLambdas(); i++)
893 gmx::EnumerationArray<FreeEnergyPerturbationCouplingType, real> lam_i;
895 reset_foreign_enerdata(enerd);
896 for (auto j : keysOf(lam_i))
898 lam_i[j] = (i == 0 ? lambda[static_cast<int>(j)] : fepvals->all_lambda[j][i - 1]);
900 calc_listed_lambda(idef,
906 shiftForceBufferLambda_,
907 &(enerd->foreign_grpp),
915 sum_epot(enerd->foreign_grpp, enerd->foreign_term);
916 const double dvdlSum = std::accumulate(std::begin(dvdl), std::end(dvdl), 0.);
917 std::fill(std::begin(dvdl), std::end(dvdl), 0.0);
918 enerd->foreignLambdaTerms.accumulate(i, enerd->foreign_term[F_EPOT], dvdlSum);
920 wallcycle_sub_stop(wcycle, ewcsLISTED_FEP);