Fix clang-tidy-11 errors in NBNXM module, part 1
[alexxy/gromacs.git] / src / gromacs / nbnxm / kerneldispatch.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5  * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36
37 #include "gmxpre.h"
38
39 #include "gromacs/gmxlib/nrnb.h"
40 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
41 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
42 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
43 #include "gromacs/math/vectypes.h"
44 #include "gromacs/mdlib/enerdata_utils.h"
45 #include "gromacs/mdlib/force.h"
46 #include "gromacs/mdlib/gmx_omp_nthreads.h"
47 #include "gromacs/mdtypes/enerdata.h"
48 #include "gromacs/mdtypes/forceoutput.h"
49 #include "gromacs/mdtypes/forcerec.h"
50 #include "gromacs/mdtypes/inputrec.h"
51 #include "gromacs/mdtypes/interaction_const.h"
52 #include "gromacs/mdtypes/md_enums.h"
53 #include "gromacs/mdtypes/mdatom.h"
54 #include "gromacs/mdtypes/simulation_workload.h"
55 #include "gromacs/nbnxm/gpu_data_mgmt.h"
56 #include "gromacs/nbnxm/nbnxm.h"
57 #include "gromacs/simd/simd.h"
58 #include "gromacs/timing/wallcycle.h"
59 #include "gromacs/utility/fatalerror.h"
60 #include "gromacs/utility/gmxassert.h"
61 #include "gromacs/utility/real.h"
62
63 #include "kernel_common.h"
64 #include "nbnxm_gpu.h"
65 #include "nbnxm_gpu_data_mgmt.h"
66 #include "nbnxm_simd.h"
67 #include "pairlistset.h"
68 #include "pairlistsets.h"
69 #include "kernels_reference/kernel_gpu_ref.h"
70 #define INCLUDE_KERNELFUNCTION_TABLES
71 #include "kernels_reference/kernel_ref.h"
72 #ifdef GMX_NBNXN_SIMD_2XNN
73 #    include "kernels_simd_2xmm/kernels.h"
74 #endif
75 #ifdef GMX_NBNXN_SIMD_4XN
76 #    include "kernels_simd_4xm/kernels.h"
77 #endif
78 #undef INCLUDE_FUNCTION_TABLES
79
80 /*! \brief Clears the energy group output buffers
81  *
82  * \param[in,out] out  nbnxn kernel output struct
83  */
84 static void clearGroupEnergies(nbnxn_atomdata_output_t* out)
85 {
86     std::fill(out->Vvdw.begin(), out->Vvdw.end(), 0.0_real);
87     std::fill(out->Vc.begin(), out->Vc.end(), 0.0_real);
88     std::fill(out->VSvdw.begin(), out->VSvdw.end(), 0.0_real);
89     std::fill(out->VSc.begin(), out->VSc.end(), 0.0_real);
90 }
91
92 /*! \brief Reduce the group-pair energy buffers produced by a SIMD kernel
93  * to single terms in the output buffers.
94  *
95  * The SIMD kernels produce a large number of energy buffer in SIMD registers
96  * to avoid scattered reads and writes.
97  *
98  * \tparam        unrollj         The unroll size for j-particles in the SIMD kernel
99  * \param[in]     numGroups       The number of energy groups
100  * \param[in]     numGroups_2log  Log2 of numGroups, rounded up
101  * \param[in,out] out             Struct with energy buffers
102  */
103 template<int unrollj>
104 static void reduceGroupEnergySimdBuffers(int numGroups, int numGroups_2log, nbnxn_atomdata_output_t* out)
105 {
106     const int unrollj_half = unrollj / 2;
107     /* Energies are stored in SIMD registers with size 2^numGroups_2log */
108     const int numGroupsStorage = (1 << numGroups_2log);
109
110     const real* gmx_restrict vVdwSimd     = out->VSvdw.data();
111     const real* gmx_restrict vCoulombSimd = out->VSc.data();
112     real* gmx_restrict vVdw               = out->Vvdw.data();
113     real* gmx_restrict vCoulomb           = out->Vc.data();
114
115     /* The size of the SIMD energy group buffer array is:
116      * numGroups*numGroups*numGroupsStorage*unrollj_half*simd_width
117      */
118     for (int i = 0; i < numGroups; i++)
119     {
120         for (int j1 = 0; j1 < numGroups; j1++)
121         {
122             for (int j0 = 0; j0 < numGroups; j0++)
123             {
124                 int c = ((i * numGroups + j1) * numGroupsStorage + j0) * unrollj_half * unrollj;
125                 for (int s = 0; s < unrollj_half; s++)
126                 {
127                     vVdw[i * numGroups + j0] += vVdwSimd[c + 0];
128                     vVdw[i * numGroups + j1] += vVdwSimd[c + 1];
129                     vCoulomb[i * numGroups + j0] += vCoulombSimd[c + 0];
130                     vCoulomb[i * numGroups + j1] += vCoulombSimd[c + 1];
131                     c += unrollj + 2;
132                 }
133             }
134         }
135     }
136 }
137
138 static int getCoulombKernelType(const Nbnxm::KernelSetup& kernelSetup, const interaction_const_t& ic)
139 {
140
141     if (EEL_RF(ic.eeltype) || ic.eeltype == eelCUT)
142     {
143         return coulktRF;
144     }
145     else
146     {
147         if (kernelSetup.ewaldExclusionType == Nbnxm::EwaldExclusionType::Table)
148         {
149             if (ic.rcoulomb == ic.rvdw)
150             {
151                 return coulktTAB;
152             }
153             else
154             {
155                 return coulktTAB_TWIN;
156             }
157         }
158         else
159         {
160             if (ic.rcoulomb == ic.rvdw)
161             {
162                 return coulktEWALD;
163             }
164             else
165             {
166                 return coulktEWALD_TWIN;
167             }
168         }
169     }
170 }
171
172 static int getVdwKernelType(const Nbnxm::KernelSetup&       kernelSetup,
173                             const nbnxn_atomdata_t::Params& nbatParams,
174                             const interaction_const_t&      ic)
175 {
176     if (ic.vdwtype == evdwCUT)
177     {
178         switch (ic.vdw_modifier)
179         {
180             case eintmodNONE:
181             case eintmodPOTSHIFT:
182                 switch (nbatParams.ljCombinationRule)
183                 {
184                     case LJCombinationRule::Geometric: return vdwktLJCUT_COMBGEOM;
185                     case LJCombinationRule::LorentzBerthelot: return vdwktLJCUT_COMBLB;
186                     case LJCombinationRule::None: return vdwktLJCUT_COMBNONE;
187                     default: gmx_incons("Unknown combination rule");
188                 }
189             case eintmodFORCESWITCH: return vdwktLJFORCESWITCH;
190             case eintmodPOTSWITCH: return vdwktLJPOTSWITCH;
191             default:
192                 std::string errorMsg =
193                         gmx::formatString("Unsupported VdW interaction modifier %s (%d)",
194                                           INTMODIFIER(ic.vdw_modifier),
195                                           ic.vdw_modifier);
196                 gmx_incons(errorMsg);
197         }
198     }
199     else if (ic.vdwtype == evdwPME)
200     {
201         if (ic.ljpme_comb_rule == eljpmeGEOM)
202         {
203             return vdwktLJEWALDCOMBGEOM;
204         }
205         else
206         {
207             /* At setup we (should have) selected the C reference kernel */
208             GMX_RELEASE_ASSERT(kernelSetup.kernelType == Nbnxm::KernelType::Cpu4x4_PlainC,
209                                "Only the C reference nbnxn SIMD kernel supports LJ-PME with LB "
210                                "combination rules");
211             return vdwktLJEWALDCOMBLB;
212         }
213     }
214     else
215     {
216         std::string errorMsg = gmx::formatString(
217                 "Unsupported VdW interaction type %s (%d)", EVDWTYPE(ic.vdwtype), ic.vdwtype);
218         gmx_incons(errorMsg);
219     }
220 }
221
222 /*! \brief Dispatches the non-bonded N versus M atom cluster CPU kernels.
223  *
224  * OpenMP parallelization is performed within this function.
225  * Energy reduction, but not force and shift force reduction, is performed
226  * within this function.
227  *
228  * \param[in]     pairlistSet   Pairlists with local or non-local interactions to compute
229  * \param[in]     kernelSetup   The non-bonded kernel setup
230  * \param[in,out] nbat          The atomdata for the interactions
231  * \param[in]     ic            Non-bonded interaction constants
232  * \param[in]     shiftVectors  The PBC shift vectors
233  * \param[in]     stepWork      Flags that tell what to compute
234  * \param[in]     clearF        Enum that tells if to clear the force output buffer
235  * \param[out]    vCoulomb      Output buffer for Coulomb energies
236  * \param[out]    vVdw          Output buffer for Van der Waals energies
237  * \param[in]     wcycle        Pointer to cycle counting data structure.
238  */
239 static void nbnxn_kernel_cpu(const PairlistSet&         pairlistSet,
240                              const Nbnxm::KernelSetup&  kernelSetup,
241                              nbnxn_atomdata_t*          nbat,
242                              const interaction_const_t& ic,
243                              rvec*                      shiftVectors,
244                              const gmx::StepWorkload&   stepWork,
245                              int                        clearF,
246                              real*                      vCoulomb,
247                              real*                      vVdw,
248                              gmx_wallcycle*             wcycle)
249 {
250
251     const nbnxn_atomdata_t::Params& nbatParams = nbat->params();
252
253     const int coulkt = getCoulombKernelType(kernelSetup, ic);
254     const int vdwkt  = getVdwKernelType(kernelSetup, nbatParams, ic);
255
256     gmx::ArrayRef<const NbnxnPairlistCpu> pairlists = pairlistSet.cpuLists();
257
258     int gmx_unused nthreads = gmx_omp_nthreads_get(emntNonbonded);
259     wallcycle_sub_start(wcycle, ewcsNONBONDED_CLEAR);
260 #pragma omp parallel for schedule(static) num_threads(nthreads)
261     for (gmx::index nb = 0; nb < pairlists.ssize(); nb++)
262     {
263         // Presently, the kernels do not call C++ code that can throw,
264         // so no need for a try/catch pair in this OpenMP region.
265         nbnxn_atomdata_output_t* out = &nbat->out[nb];
266
267         if (clearF == enbvClearFYes)
268         {
269             clearForceBuffer(nbat, nb);
270
271             clear_fshift(out->fshift.data());
272         }
273
274         if (nb == 0)
275         {
276             wallcycle_sub_stop(wcycle, ewcsNONBONDED_CLEAR);
277             wallcycle_sub_start(wcycle, ewcsNONBONDED_KERNEL);
278         }
279
280         // TODO: Change to reference
281         const NbnxnPairlistCpu* pairlist = &pairlists[nb];
282
283         if (!stepWork.computeEnergy)
284         {
285             /* Don't calculate energies */
286             switch (kernelSetup.kernelType)
287             {
288                 case Nbnxm::KernelType::Cpu4x4_PlainC:
289                     nbnxn_kernel_noener_ref[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
290                     break;
291 #ifdef GMX_NBNXN_SIMD_2XNN
292                 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
293                     nbnxm_kernel_noener_simd_2xmm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
294                     break;
295 #endif
296 #ifdef GMX_NBNXN_SIMD_4XN
297                 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
298                     nbnxm_kernel_noener_simd_4xm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
299                     break;
300 #endif
301                 default: GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
302             }
303         }
304         else if (out->Vvdw.size() == 1)
305         {
306             /* A single energy group (pair) */
307             out->Vvdw[0] = 0;
308             out->Vc[0]   = 0;
309
310             switch (kernelSetup.kernelType)
311             {
312                 case Nbnxm::KernelType::Cpu4x4_PlainC:
313                     nbnxn_kernel_ener_ref[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
314                     break;
315 #ifdef GMX_NBNXN_SIMD_2XNN
316                 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
317                     nbnxm_kernel_ener_simd_2xmm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
318                     break;
319 #endif
320 #ifdef GMX_NBNXN_SIMD_4XN
321                 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
322                     nbnxm_kernel_ener_simd_4xm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
323                     break;
324 #endif
325                 default: GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
326             }
327         }
328         else
329         {
330             /* Calculate energy group contributions */
331             clearGroupEnergies(out);
332
333             int unrollj = 0;
334
335             switch (kernelSetup.kernelType)
336             {
337                 case Nbnxm::KernelType::Cpu4x4_PlainC:
338                     unrollj = c_nbnxnCpuIClusterSize;
339                     nbnxn_kernel_energrp_ref[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
340                     break;
341 #ifdef GMX_NBNXN_SIMD_2XNN
342                 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
343                     unrollj = GMX_SIMD_REAL_WIDTH / 2;
344                     nbnxm_kernel_energrp_simd_2xmm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
345                     break;
346 #endif
347 #ifdef GMX_NBNXN_SIMD_4XN
348                 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
349                     unrollj = GMX_SIMD_REAL_WIDTH;
350                     nbnxm_kernel_energrp_simd_4xm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
351                     break;
352 #endif
353                 default: GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
354             }
355
356             if (kernelSetup.kernelType != Nbnxm::KernelType::Cpu4x4_PlainC)
357             {
358                 switch (unrollj)
359                 {
360                     case 2:
361                         reduceGroupEnergySimdBuffers<2>(nbatParams.nenergrp, nbatParams.neg_2log, out);
362                         break;
363                     case 4:
364                         reduceGroupEnergySimdBuffers<4>(nbatParams.nenergrp, nbatParams.neg_2log, out);
365                         break;
366                     case 8:
367                         reduceGroupEnergySimdBuffers<8>(nbatParams.nenergrp, nbatParams.neg_2log, out);
368                         break;
369                     default: GMX_RELEASE_ASSERT(false, "Unsupported j-unroll size");
370                 }
371             }
372         }
373     }
374     wallcycle_sub_stop(wcycle, ewcsNONBONDED_KERNEL);
375
376     if (stepWork.computeEnergy)
377     {
378         reduce_energies_over_lists(nbat, pairlists.ssize(), vVdw, vCoulomb);
379     }
380 }
381
382 static void accountFlops(t_nrnb*                    nrnb,
383                          const PairlistSet&         pairlistSet,
384                          const nonbonded_verlet_t&  nbv,
385                          const interaction_const_t& ic,
386                          const gmx::StepWorkload&   stepWork)
387 {
388     const bool usingGpuKernels = nbv.useGpu();
389
390     int enr_nbnxn_kernel_ljc = eNRNB;
391     if (EEL_RF(ic.eeltype) || ic.eeltype == eelCUT)
392     {
393         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
394     }
395     else if ((!usingGpuKernels && nbv.kernelSetup().ewaldExclusionType == Nbnxm::EwaldExclusionType::Analytical)
396              || (usingGpuKernels && Nbnxm::gpu_is_kernel_ewald_analytical(nbv.gpu_nbv)))
397     {
398         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
399     }
400     else
401     {
402         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
403     }
404     int enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
405     if (stepWork.computeEnergy)
406     {
407         /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
408         enr_nbnxn_kernel_ljc += 1;
409         enr_nbnxn_kernel_lj += 1;
410     }
411
412     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc, pairlistSet.natpair_ljq_);
413     inc_nrnb(nrnb, enr_nbnxn_kernel_lj, pairlistSet.natpair_lj_);
414     /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
415     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc - eNR_NBNXN_LJ_RF + eNR_NBNXN_RF, pairlistSet.natpair_q_);
416
417     if (ic.vdw_modifier == eintmodFORCESWITCH)
418     {
419         /* We add up the switch cost separately */
420         inc_nrnb(nrnb,
421                  eNR_NBNXN_ADD_LJ_FSW + (stepWork.computeEnergy ? 1 : 0),
422                  pairlistSet.natpair_ljq_ + pairlistSet.natpair_lj_);
423     }
424     if (ic.vdw_modifier == eintmodPOTSWITCH)
425     {
426         /* We add up the switch cost separately */
427         inc_nrnb(nrnb,
428                  eNR_NBNXN_ADD_LJ_PSW + (stepWork.computeEnergy ? 1 : 0),
429                  pairlistSet.natpair_ljq_ + pairlistSet.natpair_lj_);
430     }
431     if (ic.vdwtype == evdwPME)
432     {
433         /* We add up the LJ Ewald cost separately */
434         inc_nrnb(nrnb,
435                  eNR_NBNXN_ADD_LJ_EWALD + (stepWork.computeEnergy ? 1 : 0),
436                  pairlistSet.natpair_ljq_ + pairlistSet.natpair_lj_);
437     }
438 }
439
440 void nonbonded_verlet_t::dispatchNonbondedKernel(gmx::InteractionLocality   iLocality,
441                                                  const interaction_const_t& ic,
442                                                  const gmx::StepWorkload&   stepWork,
443                                                  int                        clearF,
444                                                  const t_forcerec&          fr,
445                                                  gmx_enerdata_t*            enerd,
446                                                  t_nrnb*                    nrnb)
447 {
448     const PairlistSet& pairlistSet = pairlistSets().pairlistSet(iLocality);
449
450     switch (kernelSetup().kernelType)
451     {
452         case Nbnxm::KernelType::Cpu4x4_PlainC:
453         case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
454         case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
455             nbnxn_kernel_cpu(pairlistSet,
456                              kernelSetup(),
457                              nbat.get(),
458                              ic,
459                              fr.shift_vec,
460                              stepWork,
461                              clearF,
462                              enerd->grpp.ener[egCOULSR].data(),
463                              fr.bBHAM ? enerd->grpp.ener[egBHAMSR].data() : enerd->grpp.ener[egLJSR].data(),
464                              wcycle_);
465             break;
466
467         case Nbnxm::KernelType::Gpu8x8x8:
468             Nbnxm::gpu_launch_kernel(gpu_nbv, stepWork, iLocality);
469             break;
470
471         case Nbnxm::KernelType::Cpu8x8x8_PlainC:
472             nbnxn_kernel_gpu_ref(
473                     pairlistSet.gpuList(),
474                     nbat.get(),
475                     &ic,
476                     fr.shift_vec,
477                     stepWork,
478                     clearF,
479                     nbat->out[0].f,
480                     nbat->out[0].fshift.data(),
481                     enerd->grpp.ener[egCOULSR].data(),
482                     fr.bBHAM ? enerd->grpp.ener[egBHAMSR].data() : enerd->grpp.ener[egLJSR].data());
483             break;
484
485         default: GMX_RELEASE_ASSERT(false, "Invalid nonbonded kernel type passed!");
486     }
487
488     accountFlops(nrnb, pairlistSet, *this, ic, stepWork);
489 }
490
491 void nonbonded_verlet_t::dispatchFreeEnergyKernel(gmx::InteractionLocality   iLocality,
492                                                   const t_forcerec*          fr,
493                                                   rvec                       x[],
494                                                   gmx::ForceWithShiftForces* forceWithShiftForces,
495                                                   const t_mdatoms&           mdatoms,
496                                                   t_lambda*                  fepvals,
497                                                   gmx::ArrayRef<real const>  lambda,
498                                                   gmx_enerdata_t*            enerd,
499                                                   const gmx::StepWorkload&   stepWork,
500                                                   t_nrnb*                    nrnb)
501 {
502     const auto nbl_fep = pairlistSets().pairlistSet(iLocality).fepLists();
503
504     /* When the first list is empty, all are empty and there is nothing to do */
505     if (!pairlistSets().params().haveFep || nbl_fep[0]->nrj == 0)
506     {
507         return;
508     }
509
510     int donb_flags = 0;
511     /* Add short-range interactions */
512     donb_flags |= GMX_NONBONDED_DO_SR;
513
514     if (stepWork.computeForces)
515     {
516         donb_flags |= GMX_NONBONDED_DO_FORCE;
517     }
518     if (stepWork.computeVirial)
519     {
520         donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
521     }
522     if (stepWork.computeEnergy)
523     {
524         donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
525     }
526
527     nb_kernel_data_t kernel_data;
528     real             dvdl_nb[efptNR] = { 0 };
529     kernel_data.flags                = donb_flags;
530     kernel_data.lambda               = lambda.data();
531     kernel_data.dvdl                 = dvdl_nb;
532
533     kernel_data.energygrp_elec = enerd->grpp.ener[egCOULSR].data();
534     kernel_data.energygrp_vdw  = enerd->grpp.ener[egLJSR].data();
535
536     GMX_ASSERT(gmx_omp_nthreads_get(emntNonbonded) == nbl_fep.ssize(),
537                "Number of lists should be same as number of NB threads");
538
539     wallcycle_sub_start(wcycle_, ewcsNONBONDED_FEP);
540 #pragma omp parallel for schedule(static) num_threads(nbl_fep.ssize())
541     for (gmx::index th = 0; th < nbl_fep.ssize(); th++)
542     {
543         try
544         {
545             gmx_nb_free_energy_kernel(
546                     nbl_fep[th].get(), x, forceWithShiftForces, fr, &mdatoms, &kernel_data, nrnb);
547         }
548         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
549     }
550
551     if (fepvals->sc_alpha != 0)
552     {
553         enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
554         enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
555     }
556     else
557     {
558         enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
559         enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
560     }
561
562     /* If we do foreign lambda and we have soft-core interactions
563      * we have to recalculate the (non-linear) energies contributions.
564      */
565     if (fepvals->n_lambda > 0 && stepWork.computeDhdl && fepvals->sc_alpha != 0)
566     {
567         real lam_i[efptNR];
568         kernel_data.flags = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE))
569                             | GMX_NONBONDED_DO_FOREIGNLAMBDA;
570         kernel_data.lambda         = lam_i;
571         kernel_data.dvdl           = dvdl_nb;
572         kernel_data.energygrp_elec = enerd->foreign_grpp.ener[egCOULSR].data();
573         kernel_data.energygrp_vdw  = enerd->foreign_grpp.ener[egLJSR].data();
574
575         for (gmx::index i = 0; i < 1 + enerd->foreignLambdaTerms.numLambdas(); i++)
576         {
577             std::fill(std::begin(dvdl_nb), std::end(dvdl_nb), 0);
578             for (int j = 0; j < efptNR; j++)
579             {
580                 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i - 1]);
581             }
582             reset_foreign_enerdata(enerd);
583 #pragma omp parallel for schedule(static) num_threads(nbl_fep.ssize())
584             for (gmx::index th = 0; th < nbl_fep.ssize(); th++)
585             {
586                 try
587                 {
588                     gmx_nb_free_energy_kernel(
589                             nbl_fep[th].get(), x, forceWithShiftForces, fr, &mdatoms, &kernel_data, nrnb);
590                 }
591                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
592             }
593
594             sum_epot(enerd->foreign_grpp, enerd->foreign_term);
595             enerd->foreignLambdaTerms.accumulate(
596                     i, enerd->foreign_term[F_EPOT], dvdl_nb[efptVDW] + dvdl_nb[efptCOUL]);
597         }
598     }
599     wallcycle_sub_stop(wcycle_, ewcsNONBONDED_FEP);
600 }