2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2014,2015,2016,2017,2018 by the GROMACS development team.
5 * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file defines high-level functions for mdrun to compute
39 * energies and forces for listed interactions.
41 * \author Mark Abraham <mark.j.abraham@gmail.com>
43 * \ingroup module_listed_forces
47 #include "gromacs/utility/arrayref.h"
48 #include "gromacs/utility/enumerationhelpers.h"
49 #include "listed_forces.h"
57 #include "gromacs/gmxlib/network.h"
58 #include "gromacs/gmxlib/nrnb.h"
59 #include "gromacs/listed_forces/bonded.h"
60 #include "gromacs/listed_forces/disre.h"
61 #include "gromacs/listed_forces/orires.h"
62 #include "gromacs/listed_forces/pairs.h"
63 #include "gromacs/listed_forces/position_restraints.h"
64 #include "gromacs/math/vec.h"
65 #include "gromacs/mdlib/enerdata_utils.h"
66 #include "gromacs/mdlib/force.h"
67 #include "gromacs/mdlib/gmx_omp_nthreads.h"
68 #include "gromacs/mdtypes/commrec.h"
69 #include "gromacs/mdtypes/mdatom.h"
70 #include "gromacs/mdtypes/fcdata.h"
71 #include "gromacs/mdtypes/forceoutput.h"
72 #include "gromacs/mdtypes/forcerec.h"
73 #include "gromacs/mdtypes/inputrec.h"
74 #include "gromacs/mdtypes/md_enums.h"
75 #include "gromacs/mdtypes/simulation_workload.h"
76 #include "gromacs/pbcutil/ishift.h"
77 #include "gromacs/pbcutil/pbc.h"
78 #include "gromacs/timing/wallcycle.h"
79 #include "gromacs/topology/topology.h"
80 #include "gromacs/utility/exceptions.h"
81 #include "gromacs/utility/fatalerror.h"
83 #include "listed_internal.h"
84 #include "manage_threading.h"
85 #include "utilities.h"
87 ListedForces::ListedForces(const gmx_ffparams_t& ffparams,
88 const int numEnergyGroups,
90 const InteractionSelection interactionSelection,
92 idefSelection_(ffparams),
93 threading_(std::make_unique<bonded_threading_t>(numThreads, numEnergyGroups, fplog)),
94 interactionSelection_(interactionSelection)
98 ListedForces::ListedForces(ListedForces&& o) noexcept = default;
100 ListedForces::~ListedForces() = default;
102 //! Copies the selection interactions from \p idefSrc to \p idef
103 static void selectInteractions(InteractionDefinitions* idef,
104 const InteractionDefinitions& idefSrc,
105 const ListedForces::InteractionSelection interactionSelection)
107 const bool selectPairs =
108 interactionSelection.test(static_cast<int>(ListedForces::InteractionGroup::Pairs));
109 const bool selectDihedrals =
110 interactionSelection.test(static_cast<int>(ListedForces::InteractionGroup::Dihedrals));
111 const bool selectAngles =
112 interactionSelection.test(static_cast<int>(ListedForces::InteractionGroup::Angles));
113 const bool selectRest =
114 interactionSelection.test(static_cast<int>(ListedForces::InteractionGroup::Rest));
116 for (int ftype = 0; ftype < F_NRE; ftype++)
118 const t_interaction_function& ifunc = interaction_function[ftype];
119 if (ifunc.flags & IF_BOND)
122 if (ifunc.flags & IF_PAIR)
124 assign = selectPairs;
126 else if (ifunc.flags & IF_DIHEDRAL)
128 assign = selectDihedrals;
130 else if (ifunc.flags & IF_ATYPE)
132 assign = selectAngles;
140 idef->il[ftype] = idefSrc.il[ftype];
144 idef->il[ftype].clear();
150 void ListedForces::setup(const InteractionDefinitions& domainIdef, const int numAtomsForce, const bool useGpu)
152 if (interactionSelection_.all())
154 // Avoid the overhead of copying all interaction lists by simply setting the reference to the domain idef
159 idef_ = &idefSelection_;
161 selectInteractions(&idefSelection_, domainIdef, interactionSelection_);
163 idefSelection_.ilsort = domainIdef.ilsort;
165 if (interactionSelection_.test(static_cast<int>(ListedForces::InteractionGroup::Rest)))
167 idefSelection_.iparams_posres = domainIdef.iparams_posres;
168 idefSelection_.iparams_fbposres = domainIdef.iparams_fbposres;
172 idefSelection_.iparams_posres.clear();
173 idefSelection_.iparams_fbposres.clear();
177 setup_bonded_threading(threading_.get(), numAtomsForce, useGpu, *idef_);
179 if (idef_->ilsort == ilsortFE_SORTED)
181 forceBufferLambda_.resize(numAtomsForce * sizeof(rvec4) / sizeof(real));
182 shiftForceBufferLambda_.resize(gmx::c_numShiftVectors);
191 /*! \brief Return true if ftype is an explicit pair-listed LJ or
192 * COULOMB interaction type: bonded LJ (usually 1-4), or special
193 * listed non-bonded for FEP. */
194 bool isPairInteraction(int ftype)
196 return ((ftype) >= F_LJ14 && (ftype) <= F_LJC_PAIRS_NB);
199 /*! \brief Zero thread-local output buffers */
200 void zero_thread_output(f_thread_t* f_t)
202 constexpr int nelem_fa = sizeof(f_t->f[0]) / sizeof(real);
204 for (int i = 0; i < f_t->nblock_used; i++)
206 int a0 = f_t->block_index[i] * reduction_block_size;
207 int a1 = a0 + reduction_block_size;
208 for (int a = a0; a < a1; a++)
210 for (int d = 0; d < nelem_fa; d++)
217 for (int i = 0; i < gmx::c_numShiftVectors; i++)
219 clear_rvec(f_t->fshift[i]);
221 for (int i = 0; i < F_NRE; i++)
225 for (int i = 0; i < static_cast<int>(NonBondedEnergyTerms::Count); i++)
227 for (int j = 0; j < f_t->grpp.nener; j++)
229 f_t->grpp.energyGroupPairTerms[i][j] = 0;
232 for (auto i : keysOf(f_t->dvdl))
238 /*! \brief The max thread number is arbitrary, we used a fixed number
239 * to avoid memory management. Using more than 16 threads is probably
240 * never useful performance wise. */
241 #define MAX_BONDED_THREADS 256
243 /*! \brief Reduce thread-local force buffers */
244 void reduce_thread_forces(gmx::ArrayRef<gmx::RVec> force, const bonded_threading_t* bt, int nthreads)
246 if (nthreads > MAX_BONDED_THREADS)
248 gmx_fatal(FARGS, "Can not reduce bonded forces on more than %d threads", MAX_BONDED_THREADS);
251 rvec* gmx_restrict f = as_rvec_array(force.data());
253 const int numAtomsForce = bt->numAtomsForce;
255 /* This reduction can run on any number of threads,
256 * independently of bt->nthreads.
257 * But if nthreads matches bt->nthreads (which it currently does)
258 * the uniform distribution of the touched blocks over nthreads will
259 * match the distribution of bonded over threads well in most cases,
260 * which means that threads mostly reduce their own data which increases
261 * the number of cache hits.
263 #pragma omp parallel for num_threads(nthreads) schedule(static)
264 for (int b = 0; b < bt->nblock_used; b++)
268 int ind = bt->block_index[b];
269 rvec4* fp[MAX_BONDED_THREADS];
271 /* Determine which threads contribute to this block */
273 for (int ft = 0; ft < bt->nthreads; ft++)
275 if (bitmask_is_set(bt->mask[ind], ft))
277 fp[nfb++] = bt->f_t[ft]->f;
282 /* Reduce force buffers for threads that contribute */
283 int a0 = ind * reduction_block_size;
284 int a1 = (ind + 1) * reduction_block_size;
285 /* It would be nice if we could pad f to avoid this min */
286 a1 = std::min(a1, numAtomsForce);
287 for (int a = a0; a < a1; a++)
289 for (int fb = 0; fb < nfb; fb++)
291 rvec_inc(f[a], fp[fb][a]);
296 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
300 /*! \brief Reduce thread-local forces, shift forces and energies */
301 void reduce_thread_output(gmx::ForceWithShiftForces* forceWithShiftForces,
303 gmx_grppairener_t* grpp,
304 gmx::ArrayRef<real> dvdl,
305 const bonded_threading_t* bt,
306 const gmx::StepWorkload& stepWork)
308 assert(bt->haveBondeds);
310 if (bt->nblock_used > 0)
312 /* Reduce the bonded force buffer */
313 reduce_thread_forces(forceWithShiftForces->force(), bt, bt->nthreads);
316 rvec* gmx_restrict fshift = as_rvec_array(forceWithShiftForces->shiftForces().data());
318 /* When necessary, reduce energy and virial using one thread only */
319 if ((stepWork.computeEnergy || stepWork.computeVirial || stepWork.computeDhdl) && bt->nthreads > 1)
321 gmx::ArrayRef<const std::unique_ptr<f_thread_t>> f_t = bt->f_t;
323 if (stepWork.computeVirial)
325 for (int i = 0; i < gmx::c_numShiftVectors; i++)
327 for (int t = 1; t < bt->nthreads; t++)
329 rvec_inc(fshift[i], f_t[t]->fshift[i]);
333 if (stepWork.computeEnergy)
335 for (int i = 0; i < F_NRE; i++)
337 for (int t = 1; t < bt->nthreads; t++)
339 ener[i] += f_t[t]->ener[i];
342 for (int i = 0; i < static_cast<int>(NonBondedEnergyTerms::Count); i++)
344 for (int j = 0; j < f_t[1]->grpp.nener; j++)
346 for (int t = 1; t < bt->nthreads; t++)
348 grpp->energyGroupPairTerms[i][j] += f_t[t]->grpp.energyGroupPairTerms[i][j];
353 if (stepWork.computeDhdl)
355 for (auto i : keysOf(f_t[1]->dvdl))
358 for (int t = 1; t < bt->nthreads; t++)
360 dvdl[static_cast<int>(i)] += f_t[t]->dvdl[i];
367 /*! \brief Returns the bonded kernel flavor
369 * Note that energies are always requested when the virial
370 * is requested (performance gain would be small).
371 * Note that currently we do not have bonded kernels that
372 * do not compute forces.
374 BondedKernelFlavor selectBondedKernelFlavor(const gmx::StepWorkload& stepWork,
375 const bool useSimdKernels,
376 const bool havePerturbedInteractions)
378 BondedKernelFlavor flavor;
379 if (stepWork.computeEnergy || stepWork.computeVirial)
381 if (stepWork.computeVirial)
383 flavor = BondedKernelFlavor::ForcesAndVirialAndEnergy;
387 flavor = BondedKernelFlavor::ForcesAndEnergy;
392 if (useSimdKernels && !havePerturbedInteractions)
394 flavor = BondedKernelFlavor::ForcesSimdWhenAvailable;
398 flavor = BondedKernelFlavor::ForcesNoSimd;
405 /*! \brief Calculate one element of the list of bonded interactions
407 real calc_one_bond(int thread,
409 const InteractionDefinitions& idef,
410 ArrayRef<const int> iatoms,
411 const int numNonperturbedInteractions,
412 const WorkDivision& workDivision,
416 const t_forcerec* fr,
418 gmx_grppairener_t* grpp,
420 gmx::ArrayRef<const real> lambda,
421 gmx::ArrayRef<real> dvdl,
424 const gmx::StepWorkload& stepWork,
425 int* global_atom_index)
427 GMX_ASSERT(idef.ilsort == ilsortNO_FE || idef.ilsort == ilsortFE_SORTED,
428 "The topology should be marked either as no FE or sorted on FE");
430 const bool havePerturbedInteractions =
431 (idef.ilsort == ilsortFE_SORTED && numNonperturbedInteractions < iatoms.ssize());
432 BondedKernelFlavor flavor =
433 selectBondedKernelFlavor(stepWork, fr->use_simd_kernels, havePerturbedInteractions);
434 FreeEnergyPerturbationCouplingType efptFTYPE;
435 if (IS_RESTRAINT_TYPE(ftype))
437 efptFTYPE = FreeEnergyPerturbationCouplingType::Restraint;
441 efptFTYPE = FreeEnergyPerturbationCouplingType::Bonded;
444 const int nat1 = interaction_function[ftype].nratoms + 1;
445 const int nbonds = iatoms.ssize() / nat1;
447 GMX_ASSERT(fr->gpuBonded != nullptr || workDivision.end(ftype) == iatoms.ssize(),
448 "The thread division should match the topology");
450 const int nb0 = workDivision.bound(ftype, thread);
451 const int nbn = workDivision.bound(ftype, thread + 1) - nb0;
453 ArrayRef<const t_iparams> iparams = idef.iparams;
456 if (!isPairInteraction(ftype))
460 /* TODO The execution time for CMAP dihedrals might be
461 nice to account to its own subtimer, but first
462 wallcycle needs to be extended to support calling from
472 lambda[static_cast<int>(efptFTYPE)],
473 &(dvdl[static_cast<int>(efptFTYPE)]),
474 gmx::arrayRefFromArray(md->chargeA, md->nr),
482 v = calculateSimpleBond(ftype,
490 lambda[static_cast<int>(efptFTYPE)],
491 &(dvdl[static_cast<int>(efptFTYPE)]),
492 gmx::arrayRefFromArray(md->chargeA, md->nr),
502 /* TODO The execution time for pairs might be nice to account
503 to its own subtimer, but first wallcycle needs to be
504 extended to support calling from multiple threads. */
515 gmx::arrayRefFromArray(md->chargeA, md->nr),
516 gmx::arrayRefFromArray(md->chargeB, md->nr),
517 md->bPerturbed ? gmx::arrayRefFromArray(md->bPerturbed, md->nr) : gmx::ArrayRef<bool>(),
518 gmx::arrayRefFromArray(md->cENER, md->nr),
521 havePerturbedInteractions,
529 inc_nrnb(nrnb, nrnbIndex(ftype), nbonds);
537 /*! \brief Compute the bonded part of the listed forces, parallelized over threads
539 static void calcBondedForces(const InteractionDefinitions& idef,
540 bonded_threading_t* bt,
542 const t_forcerec* fr,
543 const t_pbc* pbc_null,
544 rvec* fshiftMasterBuffer,
545 gmx_enerdata_t* enerd,
547 gmx::ArrayRef<const real> lambda,
548 gmx::ArrayRef<real> dvdl,
551 const gmx::StepWorkload& stepWork,
552 int* global_atom_index)
554 #pragma omp parallel for num_threads(bt->nthreads) schedule(static)
555 for (int thread = 0; thread < bt->nthreads; thread++)
559 f_thread_t& threadBuffers = *bt->f_t[thread];
564 gmx::ArrayRef<real> dvdlt;
565 gmx::ArrayRef<real> epot;
566 gmx_grppairener_t* grpp;
568 zero_thread_output(&threadBuffers);
570 rvec4* ft = threadBuffers.f;
572 /* Thread 0 writes directly to the main output buffers.
573 * We might want to reconsider this.
577 fshift = fshiftMasterBuffer;
584 fshift = as_rvec_array(threadBuffers.fshift.data());
585 epot = threadBuffers.ener;
586 grpp = &threadBuffers.grpp;
587 dvdlt = threadBuffers.dvdl;
589 /* Loop over all bonded force types to calculate the bonded forces */
590 for (ftype = 0; (ftype < F_NRE); ftype++)
592 const InteractionList& ilist = idef.il[ftype];
593 if (!ilist.empty() && ftype_is_bonded_potential(ftype))
595 ArrayRef<const int> iatoms = gmx::makeConstArrayRef(ilist.iatoms);
596 v = calc_one_bond(thread,
600 idef.numNonperturbedInteractions[ftype],
619 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
623 bool ListedForces::haveRestraints(const t_fcdata& fcdata) const
625 GMX_ASSERT(fcdata.disres, "NMR distance restraint object should be set up");
627 return (!idef_->il[F_POSRES].empty() || !idef_->il[F_FBPOSRES].empty() || fcdata.orires
628 || fcdata.disres->nres > 0);
631 bool ListedForces::haveCpuBondeds() const
633 return threading_->haveBondeds;
636 bool ListedForces::haveCpuListedForces(const t_fcdata& fcdata) const
638 return haveCpuBondeds() || haveRestraints(fcdata);
644 /*! \brief Calculates all listed force interactions. */
645 void calc_listed(struct gmx_wallcycle* wcycle,
646 const InteractionDefinitions& idef,
647 bonded_threading_t* bt,
649 gmx::ForceOutputs* forceOutputs,
650 const t_forcerec* fr,
652 gmx_enerdata_t* enerd,
654 gmx::ArrayRef<const real> lambda,
657 int* global_atom_index,
658 const gmx::StepWorkload& stepWork)
662 gmx::ForceWithShiftForces& forceWithShiftForces = forceOutputs->forceWithShiftForces();
664 wallcycle_sub_start(wcycle, WallCycleSubCounter::Listed);
665 /* The dummy array is to have a place to store the dhdl at other values
666 of lambda, which will be thrown away in the end */
667 gmx::EnumerationArray<FreeEnergyPerturbationCouplingType, real> dvdl = { 0 };
668 calcBondedForces(idef,
672 fr->bMolPBC ? pbc : nullptr,
673 as_rvec_array(forceWithShiftForces.shiftForces().data()),
682 wallcycle_sub_stop(wcycle, WallCycleSubCounter::Listed);
684 wallcycle_sub_start(wcycle, WallCycleSubCounter::ListedBufOps);
685 reduce_thread_output(&forceWithShiftForces, enerd->term.data(), &enerd->grpp, dvdl, bt, stepWork);
687 if (stepWork.computeDhdl)
689 for (auto i : keysOf(enerd->dvdl_lin))
691 enerd->dvdl_nonlin[i] += dvdl[i];
694 wallcycle_sub_stop(wcycle, WallCycleSubCounter::ListedBufOps);
697 /* Copy the sum of violations for the distance restraints from fcd */
700 enerd->term[F_DISRESVIOL] = fcd->disres->sumviol;
704 /*! \brief As calc_listed(), but only determines the potential energy
705 * for the perturbed interactions.
707 * The shift forces in fr are not affected.
709 void calc_listed_lambda(const InteractionDefinitions& idef,
710 bonded_threading_t* bt,
712 const t_forcerec* fr,
713 const struct t_pbc* pbc,
714 gmx::ArrayRef<real> forceBufferLambda,
715 gmx::ArrayRef<gmx::RVec> shiftForceBufferLambda,
716 gmx_grppairener_t* grpp,
718 gmx::ArrayRef<real> dvdl,
720 gmx::ArrayRef<const real> lambda,
723 int* global_atom_index)
725 WorkDivision& workDivision = bt->foreignLambdaWorkDivision;
727 const t_pbc* pbc_null;
737 /* We already have the forces, so we use temp buffers here */
738 std::fill(forceBufferLambda.begin(), forceBufferLambda.end(), 0.0_real);
739 std::fill(shiftForceBufferLambda.begin(),
740 shiftForceBufferLambda.end(),
741 gmx::RVec{ 0.0_real, 0.0_real, 0.0_real });
742 rvec4* f = reinterpret_cast<rvec4*>(forceBufferLambda.data());
743 rvec* fshift = as_rvec_array(shiftForceBufferLambda.data());
745 /* Loop over all bonded force types to calculate the bonded energies */
746 for (int ftype = 0; (ftype < F_NRE); ftype++)
748 if (ftype_is_bonded_potential(ftype))
750 const InteractionList& ilist = idef.il[ftype];
751 /* Create a temporary iatom list with only perturbed interactions */
752 const int numNonperturbed = idef.numNonperturbedInteractions[ftype];
753 ArrayRef<const int> iatomsPerturbed = gmx::constArrayRefFromArray(
754 ilist.iatoms.data() + numNonperturbed, ilist.size() - numNonperturbed);
755 if (!iatomsPerturbed.empty())
757 /* Set the work range of thread 0 to the perturbed bondeds */
758 workDivision.setBound(ftype, 0, 0);
759 workDivision.setBound(ftype, 1, iatomsPerturbed.ssize());
761 gmx::StepWorkload tempFlags;
762 tempFlags.computeEnergy = true;
763 real v = calc_one_bond(0,
767 iatomsPerturbed.ssize(),
790 void ListedForces::calculate(struct gmx_wallcycle* wcycle,
792 const t_lambda* fepvals,
794 const gmx_multisim_t* ms,
795 gmx::ArrayRefWithPadding<const gmx::RVec> coordinates,
796 gmx::ArrayRef<const gmx::RVec> xWholeMolecules,
798 const history_t* hist,
799 gmx::ForceOutputs* forceOutputs,
800 const t_forcerec* fr,
801 const struct t_pbc* pbc,
802 gmx_enerdata_t* enerd,
804 gmx::ArrayRef<const real> lambda,
806 int* global_atom_index,
807 const gmx::StepWorkload& stepWork)
809 if (interactionSelection_.none() || !stepWork.computeListedForces)
814 const InteractionDefinitions& idef = *idef_;
816 // Todo: replace all rvec use here with ArrayRefWithPadding
817 const rvec* x = as_rvec_array(coordinates.paddedArrayRef().data());
819 const bool calculateRestInteractions =
820 interactionSelection_.test(static_cast<int>(ListedForces::InteractionGroup::Rest));
822 t_pbc pbc_full; /* Full PBC is needed for position restraints */
823 if (calculateRestInteractions && haveRestraints(*fcdata))
825 if (!idef.il[F_POSRES].empty() || !idef.il[F_FBPOSRES].empty())
827 /* Not enough flops to bother counting */
828 set_pbc(&pbc_full, fr->pbcType, box);
831 /* TODO Use of restraints triggers further function calls
832 inside the loop over calc_one_bond(), but those are too
833 awkward to account to this subtimer properly in the present
834 code. We don't test / care much about performance with
835 restraints, anyway. */
836 wallcycle_sub_start(wcycle, WallCycleSubCounter::Restraints);
838 if (!idef.il[F_POSRES].empty())
840 posres_wrapper(nrnb, idef, &pbc_full, x, enerd, lambda, fr, &forceOutputs->forceWithVirial());
843 if (!idef.il[F_FBPOSRES].empty())
845 fbposres_wrapper(nrnb, idef, &pbc_full, x, enerd, fr, &forceOutputs->forceWithVirial());
848 /* Do pre force calculation stuff which might require communication */
851 GMX_ASSERT(!xWholeMolecules.empty(), "Need whole molecules for orienation restraints");
852 enerd->term[F_ORIRESDEV] = calc_orires_dev(ms,
853 idef.il[F_ORIRES].size(),
854 idef.il[F_ORIRES].iatoms.data(),
859 fr->bMolPBC ? pbc : nullptr,
860 fcdata->orires.get(),
863 if (fcdata->disres->nres > 0)
867 idef.il[F_DISRES].size(),
868 idef.il[F_DISRES].iatoms.data(),
870 fr->bMolPBC ? pbc : nullptr,
875 wallcycle_sub_stop(wcycle, WallCycleSubCounter::Restraints);
878 calc_listed(wcycle, idef, threading_.get(), x, forceOutputs, fr, pbc, enerd, nrnb, lambda, md, fcdata, global_atom_index, stepWork);
880 /* Check if we have to determine energy differences
881 * at foreign lambda's.
883 if (fepvals->n_lambda > 0 && stepWork.computeDhdl)
885 gmx::EnumerationArray<FreeEnergyPerturbationCouplingType, real> dvdl = { 0 };
886 if (!idef.il[F_POSRES].empty())
888 posres_wrapper_lambda(wcycle, fepvals, idef, &pbc_full, x, enerd, lambda, fr);
890 if (idef.ilsort != ilsortNO_FE)
892 wallcycle_sub_start(wcycle, WallCycleSubCounter::ListedFep);
893 if (idef.ilsort != ilsortFE_SORTED)
895 gmx_incons("The bonded interactions are not sorted for free energy");
897 for (int i = 0; i < 1 + enerd->foreignLambdaTerms.numLambdas(); i++)
899 gmx::EnumerationArray<FreeEnergyPerturbationCouplingType, real> lam_i;
901 reset_foreign_enerdata(enerd);
902 for (auto j : keysOf(lam_i))
904 lam_i[j] = (i == 0 ? lambda[static_cast<int>(j)] : fepvals->all_lambda[j][i - 1]);
906 calc_listed_lambda(idef,
912 shiftForceBufferLambda_,
913 &(enerd->foreign_grpp),
914 enerd->foreign_term.data(),
921 sum_epot(enerd->foreign_grpp, enerd->foreign_term.data());
922 const double dvdlSum = std::accumulate(std::begin(dvdl), std::end(dvdl), 0.);
923 std::fill(std::begin(dvdl), std::end(dvdl), 0.0);
924 enerd->foreignLambdaTerms.accumulate(i, enerd->foreign_term[F_EPOT], dvdlSum);
926 wallcycle_sub_stop(wcycle, WallCycleSubCounter::ListedFep);