Split lines with many copyright years
[alexxy/gromacs.git] / src / gromacs / nbnxm / kerneldispatch.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5  * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36
37 #include "gmxpre.h"
38
39 #include "gromacs/gmxlib/nrnb.h"
40 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
41 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
42 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
43 #include "gromacs/math/vectypes.h"
44 #include "gromacs/mdlib/enerdata_utils.h"
45 #include "gromacs/mdlib/force.h"
46 #include "gromacs/mdlib/gmx_omp_nthreads.h"
47 #include "gromacs/mdtypes/enerdata.h"
48 #include "gromacs/mdtypes/forceoutput.h"
49 #include "gromacs/mdtypes/inputrec.h"
50 #include "gromacs/mdtypes/interaction_const.h"
51 #include "gromacs/mdtypes/md_enums.h"
52 #include "gromacs/mdtypes/mdatom.h"
53 #include "gromacs/mdtypes/simulation_workload.h"
54 #include "gromacs/nbnxm/gpu_data_mgmt.h"
55 #include "gromacs/nbnxm/nbnxm.h"
56 #include "gromacs/simd/simd.h"
57 #include "gromacs/timing/wallcycle.h"
58 #include "gromacs/utility/gmxassert.h"
59 #include "gromacs/utility/real.h"
60
61 #include "kernel_common.h"
62 #include "nbnxm_simd.h"
63 #include "pairlistset.h"
64 #include "pairlistsets.h"
65 #include "kernels_reference/kernel_gpu_ref.h"
66 #define INCLUDE_KERNELFUNCTION_TABLES
67 #include "kernels_reference/kernel_ref.h"
68 #ifdef GMX_NBNXN_SIMD_2XNN
69 #    include "kernels_simd_2xmm/kernels.h"
70 #endif
71 #ifdef GMX_NBNXN_SIMD_4XN
72 #    include "kernels_simd_4xm/kernels.h"
73 #endif
74 #undef INCLUDE_FUNCTION_TABLES
75
76 /*! \brief Clears the energy group output buffers
77  *
78  * \param[in,out] out  nbnxn kernel output struct
79  */
80 static void clearGroupEnergies(nbnxn_atomdata_output_t* out)
81 {
82     std::fill(out->Vvdw.begin(), out->Vvdw.end(), 0.0_real);
83     std::fill(out->Vc.begin(), out->Vc.end(), 0.0_real);
84     std::fill(out->VSvdw.begin(), out->VSvdw.end(), 0.0_real);
85     std::fill(out->VSc.begin(), out->VSc.end(), 0.0_real);
86 }
87
88 /*! \brief Reduce the group-pair energy buffers produced by a SIMD kernel
89  * to single terms in the output buffers.
90  *
91  * The SIMD kernels produce a large number of energy buffer in SIMD registers
92  * to avoid scattered reads and writes.
93  *
94  * \tparam        unrollj         The unroll size for j-particles in the SIMD kernel
95  * \param[in]     numGroups       The number of energy groups
96  * \param[in]     numGroups_2log  Log2 of numGroups, rounded up
97  * \param[in,out] out             Struct with energy buffers
98  */
99 template<int unrollj>
100 static void reduceGroupEnergySimdBuffers(int numGroups, int numGroups_2log, nbnxn_atomdata_output_t* out)
101 {
102     const int unrollj_half = unrollj / 2;
103     /* Energies are stored in SIMD registers with size 2^numGroups_2log */
104     const int numGroupsStorage = (1 << numGroups_2log);
105
106     const real* gmx_restrict vVdwSimd     = out->VSvdw.data();
107     const real* gmx_restrict vCoulombSimd = out->VSc.data();
108     real* gmx_restrict vVdw               = out->Vvdw.data();
109     real* gmx_restrict vCoulomb           = out->Vc.data();
110
111     /* The size of the SIMD energy group buffer array is:
112      * numGroups*numGroups*numGroupsStorage*unrollj_half*simd_width
113      */
114     for (int i = 0; i < numGroups; i++)
115     {
116         for (int j1 = 0; j1 < numGroups; j1++)
117         {
118             for (int j0 = 0; j0 < numGroups; j0++)
119             {
120                 int c = ((i * numGroups + j1) * numGroupsStorage + j0) * unrollj_half * unrollj;
121                 for (int s = 0; s < unrollj_half; s++)
122                 {
123                     vVdw[i * numGroups + j0] += vVdwSimd[c + 0];
124                     vVdw[i * numGroups + j1] += vVdwSimd[c + 1];
125                     vCoulomb[i * numGroups + j0] += vCoulombSimd[c + 0];
126                     vCoulomb[i * numGroups + j1] += vCoulombSimd[c + 1];
127                     c += unrollj + 2;
128                 }
129             }
130         }
131     }
132 }
133
134 /*! \brief Dispatches the non-bonded N versus M atom cluster CPU kernels.
135  *
136  * OpenMP parallelization is performed within this function.
137  * Energy reduction, but not force and shift force reduction, is performed
138  * within this function.
139  *
140  * \param[in]     pairlistSet   Pairlists with local or non-local interactions to compute
141  * \param[in]     kernelSetup   The non-bonded kernel setup
142  * \param[in,out] nbat          The atomdata for the interactions
143  * \param[in]     ic            Non-bonded interaction constants
144  * \param[in]     shiftVectors  The PBC shift vectors
145  * \param[in]     stepWork      Flags that tell what to compute
146  * \param[in]     clearF        Enum that tells if to clear the force output buffer
147  * \param[out]    vCoulomb      Output buffer for Coulomb energies
148  * \param[out]    vVdw          Output buffer for Van der Waals energies
149  * \param[in]     wcycle        Pointer to cycle counting data structure.
150  */
151 static void nbnxn_kernel_cpu(const PairlistSet&         pairlistSet,
152                              const Nbnxm::KernelSetup&  kernelSetup,
153                              nbnxn_atomdata_t*          nbat,
154                              const interaction_const_t& ic,
155                              rvec*                      shiftVectors,
156                              const gmx::StepWorkload&   stepWork,
157                              int                        clearF,
158                              real*                      vCoulomb,
159                              real*                      vVdw,
160                              gmx_wallcycle*             wcycle)
161 {
162
163     int coulkt;
164     if (EEL_RF(ic.eeltype) || ic.eeltype == eelCUT)
165     {
166         coulkt = coulktRF;
167     }
168     else
169     {
170         if (kernelSetup.ewaldExclusionType == Nbnxm::EwaldExclusionType::Table)
171         {
172             if (ic.rcoulomb == ic.rvdw)
173             {
174                 coulkt = coulktTAB;
175             }
176             else
177             {
178                 coulkt = coulktTAB_TWIN;
179             }
180         }
181         else
182         {
183             if (ic.rcoulomb == ic.rvdw)
184             {
185                 coulkt = coulktEWALD;
186             }
187             else
188             {
189                 coulkt = coulktEWALD_TWIN;
190             }
191         }
192     }
193
194     const nbnxn_atomdata_t::Params& nbatParams = nbat->params();
195
196     int vdwkt = 0;
197     if (ic.vdwtype == evdwCUT)
198     {
199         switch (ic.vdw_modifier)
200         {
201             case eintmodNONE:
202             case eintmodPOTSHIFT:
203                 switch (nbatParams.comb_rule)
204                 {
205                     case ljcrGEOM: vdwkt = vdwktLJCUT_COMBGEOM; break;
206                     case ljcrLB: vdwkt = vdwktLJCUT_COMBLB; break;
207                     case ljcrNONE: vdwkt = vdwktLJCUT_COMBNONE; break;
208                     default: GMX_RELEASE_ASSERT(false, "Unknown combination rule");
209                 }
210                 break;
211             case eintmodFORCESWITCH: vdwkt = vdwktLJFORCESWITCH; break;
212             case eintmodPOTSWITCH: vdwkt = vdwktLJPOTSWITCH; break;
213             default: GMX_RELEASE_ASSERT(false, "Unsupported VdW interaction modifier");
214         }
215     }
216     else if (ic.vdwtype == evdwPME)
217     {
218         if (ic.ljpme_comb_rule == eljpmeGEOM)
219         {
220             vdwkt = vdwktLJEWALDCOMBGEOM;
221         }
222         else
223         {
224             vdwkt = vdwktLJEWALDCOMBLB;
225             /* At setup we (should have) selected the C reference kernel */
226             GMX_RELEASE_ASSERT(kernelSetup.kernelType == Nbnxm::KernelType::Cpu4x4_PlainC,
227                                "Only the C reference nbnxn SIMD kernel supports LJ-PME with LB "
228                                "combination rules");
229         }
230     }
231     else
232     {
233         GMX_RELEASE_ASSERT(false, "Unsupported VdW interaction type");
234     }
235
236     gmx::ArrayRef<const NbnxnPairlistCpu> pairlists = pairlistSet.cpuLists();
237
238     int gmx_unused nthreads = gmx_omp_nthreads_get(emntNonbonded);
239     wallcycle_sub_start(wcycle, ewcsNONBONDED_CLEAR);
240 #pragma omp parallel for schedule(static) num_threads(nthreads)
241     for (gmx::index nb = 0; nb < pairlists.ssize(); nb++)
242     {
243         // Presently, the kernels do not call C++ code that can throw,
244         // so no need for a try/catch pair in this OpenMP region.
245         nbnxn_atomdata_output_t* out = &nbat->out[nb];
246
247         if (clearF == enbvClearFYes)
248         {
249             clearForceBuffer(nbat, nb);
250
251             clear_fshift(out->fshift.data());
252         }
253
254         if (nb == 0)
255         {
256             wallcycle_sub_stop(wcycle, ewcsNONBONDED_CLEAR);
257             wallcycle_sub_start(wcycle, ewcsNONBONDED_KERNEL);
258         }
259
260         // TODO: Change to reference
261         const NbnxnPairlistCpu* pairlist = &pairlists[nb];
262
263         if (!stepWork.computeEnergy)
264         {
265             /* Don't calculate energies */
266             switch (kernelSetup.kernelType)
267             {
268                 case Nbnxm::KernelType::Cpu4x4_PlainC:
269                     nbnxn_kernel_noener_ref[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
270                     break;
271 #ifdef GMX_NBNXN_SIMD_2XNN
272                 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
273                     nbnxm_kernel_noener_simd_2xmm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
274                     break;
275 #endif
276 #ifdef GMX_NBNXN_SIMD_4XN
277                 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
278                     nbnxm_kernel_noener_simd_4xm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
279                     break;
280 #endif
281                 default: GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
282             }
283         }
284         else if (out->Vvdw.size() == 1)
285         {
286             /* A single energy group (pair) */
287             out->Vvdw[0] = 0;
288             out->Vc[0]   = 0;
289
290             switch (kernelSetup.kernelType)
291             {
292                 case Nbnxm::KernelType::Cpu4x4_PlainC:
293                     nbnxn_kernel_ener_ref[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
294                     break;
295 #ifdef GMX_NBNXN_SIMD_2XNN
296                 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
297                     nbnxm_kernel_ener_simd_2xmm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
298                     break;
299 #endif
300 #ifdef GMX_NBNXN_SIMD_4XN
301                 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
302                     nbnxm_kernel_ener_simd_4xm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
303                     break;
304 #endif
305                 default: GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
306             }
307         }
308         else
309         {
310             /* Calculate energy group contributions */
311             clearGroupEnergies(out);
312
313             int unrollj = 0;
314
315             switch (kernelSetup.kernelType)
316             {
317                 case Nbnxm::KernelType::Cpu4x4_PlainC:
318                     unrollj = c_nbnxnCpuIClusterSize;
319                     nbnxn_kernel_energrp_ref[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
320                     break;
321 #ifdef GMX_NBNXN_SIMD_2XNN
322                 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
323                     unrollj = GMX_SIMD_REAL_WIDTH / 2;
324                     nbnxm_kernel_energrp_simd_2xmm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
325                     break;
326 #endif
327 #ifdef GMX_NBNXN_SIMD_4XN
328                 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
329                     unrollj = GMX_SIMD_REAL_WIDTH;
330                     nbnxm_kernel_energrp_simd_4xm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
331                     break;
332 #endif
333                 default: GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
334             }
335
336             if (kernelSetup.kernelType != Nbnxm::KernelType::Cpu4x4_PlainC)
337             {
338                 switch (unrollj)
339                 {
340                     case 2:
341                         reduceGroupEnergySimdBuffers<2>(nbatParams.nenergrp, nbatParams.neg_2log, out);
342                         break;
343                     case 4:
344                         reduceGroupEnergySimdBuffers<4>(nbatParams.nenergrp, nbatParams.neg_2log, out);
345                         break;
346                     case 8:
347                         reduceGroupEnergySimdBuffers<8>(nbatParams.nenergrp, nbatParams.neg_2log, out);
348                         break;
349                     default: GMX_RELEASE_ASSERT(false, "Unsupported j-unroll size");
350                 }
351             }
352         }
353     }
354     wallcycle_sub_stop(wcycle, ewcsNONBONDED_KERNEL);
355
356     if (stepWork.computeEnergy)
357     {
358         reduce_energies_over_lists(nbat, pairlists.ssize(), vVdw, vCoulomb);
359     }
360 }
361
362 static void accountFlops(t_nrnb*                    nrnb,
363                          const PairlistSet&         pairlistSet,
364                          const nonbonded_verlet_t&  nbv,
365                          const interaction_const_t& ic,
366                          const gmx::StepWorkload&   stepWork)
367 {
368     const bool usingGpuKernels = nbv.useGpu();
369
370     int enr_nbnxn_kernel_ljc;
371     if (EEL_RF(ic.eeltype) || ic.eeltype == eelCUT)
372     {
373         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
374     }
375     else if ((!usingGpuKernels && nbv.kernelSetup().ewaldExclusionType == Nbnxm::EwaldExclusionType::Analytical)
376              || (usingGpuKernels && Nbnxm::gpu_is_kernel_ewald_analytical(nbv.gpu_nbv)))
377     {
378         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
379     }
380     else
381     {
382         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
383     }
384     int enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
385     if (stepWork.computeEnergy)
386     {
387         /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
388         enr_nbnxn_kernel_ljc += 1;
389         enr_nbnxn_kernel_lj += 1;
390     }
391
392     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc, pairlistSet.natpair_ljq_);
393     inc_nrnb(nrnb, enr_nbnxn_kernel_lj, pairlistSet.natpair_lj_);
394     /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
395     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc - eNR_NBNXN_LJ_RF + eNR_NBNXN_RF, pairlistSet.natpair_q_);
396
397     if (ic.vdw_modifier == eintmodFORCESWITCH)
398     {
399         /* We add up the switch cost separately */
400         inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_FSW + (stepWork.computeEnergy ? 1 : 0),
401                  pairlistSet.natpair_ljq_ + pairlistSet.natpair_lj_);
402     }
403     if (ic.vdw_modifier == eintmodPOTSWITCH)
404     {
405         /* We add up the switch cost separately */
406         inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_PSW + (stepWork.computeEnergy ? 1 : 0),
407                  pairlistSet.natpair_ljq_ + pairlistSet.natpair_lj_);
408     }
409     if (ic.vdwtype == evdwPME)
410     {
411         /* We add up the LJ Ewald cost separately */
412         inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_EWALD + (stepWork.computeEnergy ? 1 : 0),
413                  pairlistSet.natpair_ljq_ + pairlistSet.natpair_lj_);
414     }
415 }
416
417 void nonbonded_verlet_t::dispatchNonbondedKernel(gmx::InteractionLocality   iLocality,
418                                                  const interaction_const_t& ic,
419                                                  const gmx::StepWorkload&   stepWork,
420                                                  int                        clearF,
421                                                  const t_forcerec&          fr,
422                                                  gmx_enerdata_t*            enerd,
423                                                  t_nrnb*                    nrnb)
424 {
425     const PairlistSet& pairlistSet = pairlistSets().pairlistSet(iLocality);
426
427     switch (kernelSetup().kernelType)
428     {
429         case Nbnxm::KernelType::Cpu4x4_PlainC:
430         case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
431         case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
432             nbnxn_kernel_cpu(pairlistSet, kernelSetup(), nbat.get(), ic, fr.shift_vec, stepWork,
433                              clearF, enerd->grpp.ener[egCOULSR].data(),
434                              fr.bBHAM ? enerd->grpp.ener[egBHAMSR].data() : enerd->grpp.ener[egLJSR].data(),
435                              wcycle_);
436             break;
437
438         case Nbnxm::KernelType::Gpu8x8x8:
439             Nbnxm::gpu_launch_kernel(gpu_nbv, stepWork, iLocality);
440             break;
441
442         case Nbnxm::KernelType::Cpu8x8x8_PlainC:
443             nbnxn_kernel_gpu_ref(
444                     pairlistSet.gpuList(), nbat.get(), &ic, fr.shift_vec, stepWork, clearF,
445                     nbat->out[0].f, nbat->out[0].fshift.data(), enerd->grpp.ener[egCOULSR].data(),
446                     fr.bBHAM ? enerd->grpp.ener[egBHAMSR].data() : enerd->grpp.ener[egLJSR].data());
447             break;
448
449         default: GMX_RELEASE_ASSERT(false, "Invalid nonbonded kernel type passed!");
450     }
451
452     accountFlops(nrnb, pairlistSet, *this, ic, stepWork);
453 }
454
455 void nonbonded_verlet_t::dispatchFreeEnergyKernel(gmx::InteractionLocality   iLocality,
456                                                   const t_forcerec*          fr,
457                                                   rvec                       x[],
458                                                   gmx::ForceWithShiftForces* forceWithShiftForces,
459                                                   const t_mdatoms&           mdatoms,
460                                                   t_lambda*                  fepvals,
461                                                   real*                      lambda,
462                                                   gmx_enerdata_t*            enerd,
463                                                   const gmx::StepWorkload&   stepWork,
464                                                   t_nrnb*                    nrnb)
465 {
466     const auto nbl_fep = pairlistSets().pairlistSet(iLocality).fepLists();
467
468     /* When the first list is empty, all are empty and there is nothing to do */
469     if (!pairlistSets().params().haveFep || nbl_fep[0]->nrj == 0)
470     {
471         return;
472     }
473
474     int donb_flags = 0;
475     /* Add short-range interactions */
476     donb_flags |= GMX_NONBONDED_DO_SR;
477
478     if (stepWork.computeForces)
479     {
480         donb_flags |= GMX_NONBONDED_DO_FORCE;
481     }
482     if (stepWork.computeVirial)
483     {
484         donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
485     }
486     if (stepWork.computeEnergy)
487     {
488         donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
489     }
490
491     nb_kernel_data_t kernel_data;
492     real             dvdl_nb[efptNR] = { 0 };
493     kernel_data.flags                = donb_flags;
494     kernel_data.lambda               = lambda;
495     kernel_data.dvdl                 = dvdl_nb;
496
497     kernel_data.energygrp_elec = enerd->grpp.ener[egCOULSR].data();
498     kernel_data.energygrp_vdw  = enerd->grpp.ener[egLJSR].data();
499
500     GMX_ASSERT(gmx_omp_nthreads_get(emntNonbonded) == nbl_fep.ssize(),
501                "Number of lists should be same as number of NB threads");
502
503     wallcycle_sub_start(wcycle_, ewcsNONBONDED_FEP);
504 #pragma omp parallel for schedule(static) num_threads(nbl_fep.ssize())
505     for (gmx::index th = 0; th < nbl_fep.ssize(); th++)
506     {
507         try
508         {
509             gmx_nb_free_energy_kernel(nbl_fep[th].get(), x, forceWithShiftForces, fr, &mdatoms,
510                                       &kernel_data, nrnb);
511         }
512         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
513     }
514
515     if (fepvals->sc_alpha != 0)
516     {
517         enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
518         enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
519     }
520     else
521     {
522         enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
523         enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
524     }
525
526     /* If we do foreign lambda and we have soft-core interactions
527      * we have to recalculate the (non-linear) energies contributions.
528      */
529     if (fepvals->n_lambda > 0 && stepWork.computeDhdl && fepvals->sc_alpha != 0)
530     {
531         real lam_i[efptNR];
532         kernel_data.flags = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE))
533                             | GMX_NONBONDED_DO_FOREIGNLAMBDA;
534         kernel_data.lambda         = lam_i;
535         kernel_data.energygrp_elec = enerd->foreign_grpp.ener[egCOULSR].data();
536         kernel_data.energygrp_vdw  = enerd->foreign_grpp.ener[egLJSR].data();
537         /* Note that we add to kernel_data.dvdl, but ignore the result */
538
539         for (size_t i = 0; i < enerd->enerpart_lambda.size(); i++)
540         {
541             for (int j = 0; j < efptNR; j++)
542             {
543                 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i - 1]);
544             }
545             reset_foreign_enerdata(enerd);
546 #pragma omp parallel for schedule(static) num_threads(nbl_fep.ssize())
547             for (gmx::index th = 0; th < nbl_fep.ssize(); th++)
548             {
549                 try
550                 {
551                     gmx_nb_free_energy_kernel(nbl_fep[th].get(), x, forceWithShiftForces, fr,
552                                               &mdatoms, &kernel_data, nrnb);
553                 }
554                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
555             }
556
557             sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
558             enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
559         }
560     }
561     wallcycle_sub_stop(wcycle_, ewcsNONBONDED_FEP);
562 }