Make non bonded energy terms enum class
[alexxy/gromacs.git] / src / gromacs / nbnxm / kerneldispatch.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5  * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36
37 #include "gmxpre.h"
38
39 #include "gromacs/gmxlib/nrnb.h"
40 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
41 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
42 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
43 #include "gromacs/math/vectypes.h"
44 #include "gromacs/mdlib/enerdata_utils.h"
45 #include "gromacs/mdlib/force.h"
46 #include "gromacs/mdlib/gmx_omp_nthreads.h"
47 #include "gromacs/mdtypes/enerdata.h"
48 #include "gromacs/mdtypes/forceoutput.h"
49 #include "gromacs/mdtypes/forcerec.h"
50 #include "gromacs/mdtypes/inputrec.h"
51 #include "gromacs/mdtypes/interaction_const.h"
52 #include "gromacs/mdtypes/md_enums.h"
53 #include "gromacs/mdtypes/mdatom.h"
54 #include "gromacs/mdtypes/simulation_workload.h"
55 #include "gromacs/nbnxm/gpu_data_mgmt.h"
56 #include "gromacs/nbnxm/nbnxm.h"
57 #include "gromacs/simd/simd.h"
58 #include "gromacs/timing/wallcycle.h"
59 #include "gromacs/utility/enumerationhelpers.h"
60 #include "gromacs/utility/fatalerror.h"
61 #include "gromacs/utility/gmxassert.h"
62 #include "gromacs/utility/real.h"
63
64 #include "kernel_common.h"
65 #include "nbnxm_gpu.h"
66 #include "nbnxm_gpu_data_mgmt.h"
67 #include "nbnxm_simd.h"
68 #include "pairlistset.h"
69 #include "pairlistsets.h"
70 #include "kernels_reference/kernel_gpu_ref.h"
71 #define INCLUDE_KERNELFUNCTION_TABLES
72 #include "kernels_reference/kernel_ref.h"
73 #ifdef GMX_NBNXN_SIMD_2XNN
74 #    include "kernels_simd_2xmm/kernels.h"
75 #endif
76 #ifdef GMX_NBNXN_SIMD_4XN
77 #    include "kernels_simd_4xm/kernels.h"
78 #endif
79 #undef INCLUDE_FUNCTION_TABLES
80
81 /*! \brief Clears the energy group output buffers
82  *
83  * \param[in,out] out  nbnxn kernel output struct
84  */
85 static void clearGroupEnergies(nbnxn_atomdata_output_t* out)
86 {
87     std::fill(out->Vvdw.begin(), out->Vvdw.end(), 0.0_real);
88     std::fill(out->Vc.begin(), out->Vc.end(), 0.0_real);
89     std::fill(out->VSvdw.begin(), out->VSvdw.end(), 0.0_real);
90     std::fill(out->VSc.begin(), out->VSc.end(), 0.0_real);
91 }
92
93 /*! \brief Reduce the group-pair energy buffers produced by a SIMD kernel
94  * to single terms in the output buffers.
95  *
96  * The SIMD kernels produce a large number of energy buffer in SIMD registers
97  * to avoid scattered reads and writes.
98  *
99  * \tparam        unrollj         The unroll size for j-particles in the SIMD kernel
100  * \param[in]     numGroups       The number of energy groups
101  * \param[in]     numGroups_2log  Log2 of numGroups, rounded up
102  * \param[in,out] out             Struct with energy buffers
103  */
104 template<int unrollj>
105 static void reduceGroupEnergySimdBuffers(int numGroups, int numGroups_2log, nbnxn_atomdata_output_t* out)
106 {
107     const int unrollj_half = unrollj / 2;
108     /* Energies are stored in SIMD registers with size 2^numGroups_2log */
109     const int numGroupsStorage = (1 << numGroups_2log);
110
111     const real* gmx_restrict vVdwSimd     = out->VSvdw.data();
112     const real* gmx_restrict vCoulombSimd = out->VSc.data();
113     real* gmx_restrict vVdw               = out->Vvdw.data();
114     real* gmx_restrict vCoulomb           = out->Vc.data();
115
116     /* The size of the SIMD energy group buffer array is:
117      * numGroups*numGroups*numGroupsStorage*unrollj_half*simd_width
118      */
119     for (int i = 0; i < numGroups; i++)
120     {
121         for (int j1 = 0; j1 < numGroups; j1++)
122         {
123             for (int j0 = 0; j0 < numGroups; j0++)
124             {
125                 int c = ((i * numGroups + j1) * numGroupsStorage + j0) * unrollj_half * unrollj;
126                 for (int s = 0; s < unrollj_half; s++)
127                 {
128                     vVdw[i * numGroups + j0] += vVdwSimd[c + 0];
129                     vVdw[i * numGroups + j1] += vVdwSimd[c + 1];
130                     vCoulomb[i * numGroups + j0] += vCoulombSimd[c + 0];
131                     vCoulomb[i * numGroups + j1] += vCoulombSimd[c + 1];
132                     c += unrollj + 2;
133                 }
134             }
135         }
136     }
137 }
138
139 static int getCoulombKernelType(const Nbnxm::KernelSetup& kernelSetup, const interaction_const_t& ic)
140 {
141
142     if (EEL_RF(ic.eeltype) || ic.eeltype == CoulombInteractionType::Cut)
143     {
144         return coulktRF;
145     }
146     else
147     {
148         if (kernelSetup.ewaldExclusionType == Nbnxm::EwaldExclusionType::Table)
149         {
150             if (ic.rcoulomb == ic.rvdw)
151             {
152                 return coulktTAB;
153             }
154             else
155             {
156                 return coulktTAB_TWIN;
157             }
158         }
159         else
160         {
161             if (ic.rcoulomb == ic.rvdw)
162             {
163                 return coulktEWALD;
164             }
165             else
166             {
167                 return coulktEWALD_TWIN;
168             }
169         }
170     }
171 }
172
173 static int getVdwKernelType(const Nbnxm::KernelSetup&       kernelSetup,
174                             const nbnxn_atomdata_t::Params& nbatParams,
175                             const interaction_const_t&      ic)
176 {
177     if (ic.vdwtype == VanDerWaalsType::Cut)
178     {
179         switch (ic.vdw_modifier)
180         {
181             case InteractionModifiers::None:
182             case InteractionModifiers::PotShift:
183                 switch (nbatParams.ljCombinationRule)
184                 {
185                     case LJCombinationRule::Geometric: return vdwktLJCUT_COMBGEOM;
186                     case LJCombinationRule::LorentzBerthelot: return vdwktLJCUT_COMBLB;
187                     case LJCombinationRule::None: return vdwktLJCUT_COMBNONE;
188                     default: gmx_incons("Unknown combination rule");
189                 }
190             case InteractionModifiers::ForceSwitch: return vdwktLJFORCESWITCH;
191             case InteractionModifiers::PotSwitch: return vdwktLJPOTSWITCH;
192             default:
193                 std::string errorMsg =
194                         gmx::formatString("Unsupported VdW interaction modifier %s (%d)",
195                                           enumValueToString(ic.vdw_modifier),
196                                           static_cast<int>(ic.vdw_modifier));
197                 gmx_incons(errorMsg);
198         }
199     }
200     else if (ic.vdwtype == VanDerWaalsType::Pme)
201     {
202         if (ic.ljpme_comb_rule == LongRangeVdW::Geom)
203         {
204             return vdwktLJEWALDCOMBGEOM;
205         }
206         else
207         {
208             /* At setup we (should have) selected the C reference kernel */
209             GMX_RELEASE_ASSERT(kernelSetup.kernelType == Nbnxm::KernelType::Cpu4x4_PlainC,
210                                "Only the C reference nbnxn SIMD kernel supports LJ-PME with LB "
211                                "combination rules");
212             return vdwktLJEWALDCOMBLB;
213         }
214     }
215     else
216     {
217         std::string errorMsg = gmx::formatString("Unsupported VdW interaction type %s (%d)",
218                                                  enumValueToString(ic.vdwtype),
219                                                  static_cast<int>(ic.vdwtype));
220         gmx_incons(errorMsg);
221     }
222 }
223
224 /*! \brief Dispatches the non-bonded N versus M atom cluster CPU kernels.
225  *
226  * OpenMP parallelization is performed within this function.
227  * Energy reduction, but not force and shift force reduction, is performed
228  * within this function.
229  *
230  * \param[in]     pairlistSet   Pairlists with local or non-local interactions to compute
231  * \param[in]     kernelSetup   The non-bonded kernel setup
232  * \param[in,out] nbat          The atomdata for the interactions
233  * \param[in]     ic            Non-bonded interaction constants
234  * \param[in]     shiftVectors  The PBC shift vectors
235  * \param[in]     stepWork      Flags that tell what to compute
236  * \param[in]     clearF        Enum that tells if to clear the force output buffer
237  * \param[out]    vCoulomb      Output buffer for Coulomb energies
238  * \param[out]    vVdw          Output buffer for Van der Waals energies
239  * \param[in]     wcycle        Pointer to cycle counting data structure.
240  */
241 static void nbnxn_kernel_cpu(const PairlistSet&         pairlistSet,
242                              const Nbnxm::KernelSetup&  kernelSetup,
243                              nbnxn_atomdata_t*          nbat,
244                              const interaction_const_t& ic,
245                              rvec*                      shiftVectors,
246                              const gmx::StepWorkload&   stepWork,
247                              int                        clearF,
248                              real*                      vCoulomb,
249                              real*                      vVdw,
250                              gmx_wallcycle*             wcycle)
251 {
252
253     const nbnxn_atomdata_t::Params& nbatParams = nbat->params();
254
255     const int coulkt = getCoulombKernelType(kernelSetup, ic);
256     const int vdwkt  = getVdwKernelType(kernelSetup, nbatParams, ic);
257
258     gmx::ArrayRef<const NbnxnPairlistCpu> pairlists = pairlistSet.cpuLists();
259
260     int gmx_unused nthreads = gmx_omp_nthreads_get(emntNonbonded);
261     wallcycle_sub_start(wcycle, ewcsNONBONDED_CLEAR);
262 #pragma omp parallel for schedule(static) num_threads(nthreads)
263     for (gmx::index nb = 0; nb < pairlists.ssize(); nb++)
264     {
265         // Presently, the kernels do not call C++ code that can throw,
266         // so no need for a try/catch pair in this OpenMP region.
267         nbnxn_atomdata_output_t* out = &nbat->out[nb];
268
269         if (clearF == enbvClearFYes)
270         {
271             clearForceBuffer(nbat, nb);
272
273             clear_fshift(out->fshift.data());
274         }
275
276         if (nb == 0)
277         {
278             wallcycle_sub_stop(wcycle, ewcsNONBONDED_CLEAR);
279             wallcycle_sub_start(wcycle, ewcsNONBONDED_KERNEL);
280         }
281
282         // TODO: Change to reference
283         const NbnxnPairlistCpu* pairlist = &pairlists[nb];
284
285         if (!stepWork.computeEnergy)
286         {
287             /* Don't calculate energies */
288             switch (kernelSetup.kernelType)
289             {
290                 case Nbnxm::KernelType::Cpu4x4_PlainC:
291                     nbnxn_kernel_noener_ref[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
292                     break;
293 #ifdef GMX_NBNXN_SIMD_2XNN
294                 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
295                     nbnxm_kernel_noener_simd_2xmm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
296                     break;
297 #endif
298 #ifdef GMX_NBNXN_SIMD_4XN
299                 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
300                     nbnxm_kernel_noener_simd_4xm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
301                     break;
302 #endif
303                 default: GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
304             }
305         }
306         else if (out->Vvdw.size() == 1)
307         {
308             /* A single energy group (pair) */
309             out->Vvdw[0] = 0;
310             out->Vc[0]   = 0;
311
312             switch (kernelSetup.kernelType)
313             {
314                 case Nbnxm::KernelType::Cpu4x4_PlainC:
315                     nbnxn_kernel_ener_ref[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
316                     break;
317 #ifdef GMX_NBNXN_SIMD_2XNN
318                 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
319                     nbnxm_kernel_ener_simd_2xmm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
320                     break;
321 #endif
322 #ifdef GMX_NBNXN_SIMD_4XN
323                 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
324                     nbnxm_kernel_ener_simd_4xm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
325                     break;
326 #endif
327                 default: GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
328             }
329         }
330         else
331         {
332             /* Calculate energy group contributions */
333             clearGroupEnergies(out);
334
335             int unrollj = 0;
336
337             switch (kernelSetup.kernelType)
338             {
339                 case Nbnxm::KernelType::Cpu4x4_PlainC:
340                     unrollj = c_nbnxnCpuIClusterSize;
341                     nbnxn_kernel_energrp_ref[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
342                     break;
343 #ifdef GMX_NBNXN_SIMD_2XNN
344                 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
345                     unrollj = GMX_SIMD_REAL_WIDTH / 2;
346                     nbnxm_kernel_energrp_simd_2xmm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
347                     break;
348 #endif
349 #ifdef GMX_NBNXN_SIMD_4XN
350                 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
351                     unrollj = GMX_SIMD_REAL_WIDTH;
352                     nbnxm_kernel_energrp_simd_4xm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
353                     break;
354 #endif
355                 default: GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
356             }
357
358             if (kernelSetup.kernelType != Nbnxm::KernelType::Cpu4x4_PlainC)
359             {
360                 switch (unrollj)
361                 {
362                     case 2:
363                         reduceGroupEnergySimdBuffers<2>(nbatParams.nenergrp, nbatParams.neg_2log, out);
364                         break;
365                     case 4:
366                         reduceGroupEnergySimdBuffers<4>(nbatParams.nenergrp, nbatParams.neg_2log, out);
367                         break;
368                     case 8:
369                         reduceGroupEnergySimdBuffers<8>(nbatParams.nenergrp, nbatParams.neg_2log, out);
370                         break;
371                     default: GMX_RELEASE_ASSERT(false, "Unsupported j-unroll size");
372                 }
373             }
374         }
375     }
376     wallcycle_sub_stop(wcycle, ewcsNONBONDED_KERNEL);
377
378     if (stepWork.computeEnergy)
379     {
380         reduce_energies_over_lists(nbat, pairlists.ssize(), vVdw, vCoulomb);
381     }
382 }
383
384 static void accountFlops(t_nrnb*                    nrnb,
385                          const PairlistSet&         pairlistSet,
386                          const nonbonded_verlet_t&  nbv,
387                          const interaction_const_t& ic,
388                          const gmx::StepWorkload&   stepWork)
389 {
390     const bool usingGpuKernels = nbv.useGpu();
391
392     int enr_nbnxn_kernel_ljc = eNRNB;
393     if (EEL_RF(ic.eeltype) || ic.eeltype == CoulombInteractionType::Cut)
394     {
395         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
396     }
397     else if ((!usingGpuKernels && nbv.kernelSetup().ewaldExclusionType == Nbnxm::EwaldExclusionType::Analytical)
398              || (usingGpuKernels && Nbnxm::gpu_is_kernel_ewald_analytical(nbv.gpu_nbv)))
399     {
400         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
401     }
402     else
403     {
404         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
405     }
406     int enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
407     if (stepWork.computeEnergy)
408     {
409         /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
410         enr_nbnxn_kernel_ljc += 1;
411         enr_nbnxn_kernel_lj += 1;
412     }
413
414     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc, pairlistSet.natpair_ljq_);
415     inc_nrnb(nrnb, enr_nbnxn_kernel_lj, pairlistSet.natpair_lj_);
416     /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
417     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc - eNR_NBNXN_LJ_RF + eNR_NBNXN_RF, pairlistSet.natpair_q_);
418
419     if (ic.vdw_modifier == InteractionModifiers::ForceSwitch)
420     {
421         /* We add up the switch cost separately */
422         inc_nrnb(nrnb,
423                  eNR_NBNXN_ADD_LJ_FSW + (stepWork.computeEnergy ? 1 : 0),
424                  pairlistSet.natpair_ljq_ + pairlistSet.natpair_lj_);
425     }
426     if (ic.vdw_modifier == InteractionModifiers::PotSwitch)
427     {
428         /* We add up the switch cost separately */
429         inc_nrnb(nrnb,
430                  eNR_NBNXN_ADD_LJ_PSW + (stepWork.computeEnergy ? 1 : 0),
431                  pairlistSet.natpair_ljq_ + pairlistSet.natpair_lj_);
432     }
433     if (ic.vdwtype == VanDerWaalsType::Pme)
434     {
435         /* We add up the LJ Ewald cost separately */
436         inc_nrnb(nrnb,
437                  eNR_NBNXN_ADD_LJ_EWALD + (stepWork.computeEnergy ? 1 : 0),
438                  pairlistSet.natpair_ljq_ + pairlistSet.natpair_lj_);
439     }
440 }
441
442 void nonbonded_verlet_t::dispatchNonbondedKernel(gmx::InteractionLocality   iLocality,
443                                                  const interaction_const_t& ic,
444                                                  const gmx::StepWorkload&   stepWork,
445                                                  int                        clearF,
446                                                  const t_forcerec&          fr,
447                                                  gmx_enerdata_t*            enerd,
448                                                  t_nrnb*                    nrnb)
449 {
450     const PairlistSet& pairlistSet = pairlistSets().pairlistSet(iLocality);
451
452     switch (kernelSetup().kernelType)
453     {
454         case Nbnxm::KernelType::Cpu4x4_PlainC:
455         case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
456         case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
457             nbnxn_kernel_cpu(
458                     pairlistSet,
459                     kernelSetup(),
460                     nbat.get(),
461                     ic,
462                     fr.shift_vec,
463                     stepWork,
464                     clearF,
465                     enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
466                     fr.bBHAM ? enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::BuckinghamSR].data()
467                              : enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
468                     wcycle_);
469             break;
470
471         case Nbnxm::KernelType::Gpu8x8x8:
472             Nbnxm::gpu_launch_kernel(gpu_nbv, stepWork, iLocality);
473             break;
474
475         case Nbnxm::KernelType::Cpu8x8x8_PlainC:
476             nbnxn_kernel_gpu_ref(
477                     pairlistSet.gpuList(),
478                     nbat.get(),
479                     &ic,
480                     fr.shift_vec,
481                     stepWork,
482                     clearF,
483                     nbat->out[0].f,
484                     nbat->out[0].fshift.data(),
485                     enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
486                     fr.bBHAM ? enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::BuckinghamSR].data()
487                              : enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data());
488             break;
489
490         default: GMX_RELEASE_ASSERT(false, "Invalid nonbonded kernel type passed!");
491     }
492
493     accountFlops(nrnb, pairlistSet, *this, ic, stepWork);
494 }
495
496 void nonbonded_verlet_t::dispatchFreeEnergyKernel(gmx::InteractionLocality   iLocality,
497                                                   const t_forcerec*          fr,
498                                                   rvec                       x[],
499                                                   gmx::ForceWithShiftForces* forceWithShiftForces,
500                                                   const t_mdatoms&           mdatoms,
501                                                   t_lambda*                  fepvals,
502                                                   gmx::ArrayRef<const real>  lambda,
503                                                   gmx_enerdata_t*            enerd,
504                                                   const gmx::StepWorkload&   stepWork,
505                                                   t_nrnb*                    nrnb)
506 {
507     const auto nbl_fep = pairlistSets().pairlistSet(iLocality).fepLists();
508
509     /* When the first list is empty, all are empty and there is nothing to do */
510     if (!pairlistSets().params().haveFep || nbl_fep[0]->nrj == 0)
511     {
512         return;
513     }
514
515     int donb_flags = 0;
516     /* Add short-range interactions */
517     donb_flags |= GMX_NONBONDED_DO_SR;
518
519     if (stepWork.computeForces)
520     {
521         donb_flags |= GMX_NONBONDED_DO_FORCE;
522     }
523     if (stepWork.computeVirial)
524     {
525         donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
526     }
527     if (stepWork.computeEnergy)
528     {
529         donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
530     }
531
532     nb_kernel_data_t                                                kernel_data;
533     gmx::EnumerationArray<FreeEnergyPerturbationCouplingType, real> dvdl_nb = { 0 };
534     kernel_data.flags                                                       = donb_flags;
535     kernel_data.lambda                                                      = lambda;
536     kernel_data.dvdl                                                        = dvdl_nb;
537
538     kernel_data.energygrp_elec = enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data();
539     kernel_data.energygrp_vdw = enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data();
540
541     GMX_ASSERT(gmx_omp_nthreads_get(emntNonbonded) == nbl_fep.ssize(),
542                "Number of lists should be same as number of NB threads");
543
544     wallcycle_sub_start(wcycle_, ewcsNONBONDED_FEP);
545 #pragma omp parallel for schedule(static) num_threads(nbl_fep.ssize())
546     for (gmx::index th = 0; th < nbl_fep.ssize(); th++)
547     {
548         try
549         {
550             gmx_nb_free_energy_kernel(
551                     nbl_fep[th].get(), x, forceWithShiftForces, fr, &mdatoms, &kernel_data, nrnb);
552         }
553         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
554     }
555
556     if (fepvals->sc_alpha != 0)
557     {
558         enerd->dvdl_nonlin[FreeEnergyPerturbationCouplingType::Vdw] +=
559                 dvdl_nb[FreeEnergyPerturbationCouplingType::Vdw];
560         enerd->dvdl_nonlin[FreeEnergyPerturbationCouplingType::Coul] +=
561                 dvdl_nb[FreeEnergyPerturbationCouplingType::Coul];
562     }
563     else
564     {
565         enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] +=
566                 dvdl_nb[FreeEnergyPerturbationCouplingType::Vdw];
567         enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Coul] +=
568                 dvdl_nb[FreeEnergyPerturbationCouplingType::Coul];
569     }
570
571     /* If we do foreign lambda and we have soft-core interactions
572      * we have to recalculate the (non-linear) energies contributions.
573      */
574     if (fepvals->n_lambda > 0 && stepWork.computeDhdl && fepvals->sc_alpha != 0)
575     {
576         gmx::EnumerationArray<FreeEnergyPerturbationCouplingType, real> lam_i;
577         kernel_data.flags = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE))
578                             | GMX_NONBONDED_DO_FOREIGNLAMBDA;
579         kernel_data.lambda = lam_i;
580         kernel_data.dvdl   = dvdl_nb;
581         kernel_data.energygrp_elec =
582                 enerd->foreign_grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data();
583         kernel_data.energygrp_vdw =
584                 enerd->foreign_grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data();
585
586         for (gmx::index i = 0; i < 1 + enerd->foreignLambdaTerms.numLambdas(); i++)
587         {
588             std::fill(std::begin(dvdl_nb), std::end(dvdl_nb), 0);
589             for (int j = 0; j < static_cast<int>(FreeEnergyPerturbationCouplingType::Count); j++)
590             {
591                 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i - 1]);
592             }
593             reset_foreign_enerdata(enerd);
594 #pragma omp parallel for schedule(static) num_threads(nbl_fep.ssize())
595             for (gmx::index th = 0; th < nbl_fep.ssize(); th++)
596             {
597                 try
598                 {
599                     gmx_nb_free_energy_kernel(
600                             nbl_fep[th].get(), x, forceWithShiftForces, fr, &mdatoms, &kernel_data, nrnb);
601                 }
602                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
603             }
604
605             sum_epot(enerd->foreign_grpp, enerd->foreign_term);
606             enerd->foreignLambdaTerms.accumulate(
607                     i,
608                     enerd->foreign_term[F_EPOT],
609                     dvdl_nb[FreeEnergyPerturbationCouplingType::Vdw]
610                             + dvdl_nb[FreeEnergyPerturbationCouplingType::Coul]);
611         }
612     }
613     wallcycle_sub_stop(wcycle_, ewcsNONBONDED_FEP);
614 }