85e1f65b21b42ba5a6bf9824dd8e65d52d814a2a
[alexxy/gromacs.git] / src / gromacs / nbnxm / kerneldispatch.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5  * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36
37 #include "gmxpre.h"
38
39 #include "gromacs/gmxlib/nrnb.h"
40 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
41 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
42 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
43 #include "gromacs/math/vectypes.h"
44 #include "gromacs/mdlib/enerdata_utils.h"
45 #include "gromacs/mdlib/force.h"
46 #include "gromacs/mdlib/gmx_omp_nthreads.h"
47 #include "gromacs/mdtypes/enerdata.h"
48 #include "gromacs/mdtypes/forceoutput.h"
49 #include "gromacs/mdtypes/forcerec.h"
50 #include "gromacs/mdtypes/inputrec.h"
51 #include "gromacs/mdtypes/interaction_const.h"
52 #include "gromacs/mdtypes/md_enums.h"
53 #include "gromacs/mdtypes/mdatom.h"
54 #include "gromacs/mdtypes/nblist.h"
55 #include "gromacs/mdtypes/simulation_workload.h"
56 #include "gromacs/nbnxm/gpu_data_mgmt.h"
57 #include "gromacs/nbnxm/nbnxm.h"
58 #include "gromacs/simd/simd.h"
59 #include "gromacs/timing/wallcycle.h"
60 #include "gromacs/utility/enumerationhelpers.h"
61 #include "gromacs/utility/fatalerror.h"
62 #include "gromacs/utility/gmxassert.h"
63 #include "gromacs/utility/real.h"
64
65 #include "kernel_common.h"
66 #include "nbnxm_gpu.h"
67 #include "nbnxm_gpu_data_mgmt.h"
68 #include "nbnxm_simd.h"
69 #include "pairlistset.h"
70 #include "pairlistsets.h"
71 #include "kernels_reference/kernel_gpu_ref.h"
72 #define INCLUDE_KERNELFUNCTION_TABLES
73 #include "kernels_reference/kernel_ref.h"
74 #ifdef GMX_NBNXN_SIMD_2XNN
75 #    include "kernels_simd_2xmm/kernels.h"
76 #endif
77 #ifdef GMX_NBNXN_SIMD_4XN
78 #    include "kernels_simd_4xm/kernels.h"
79 #endif
80 #undef INCLUDE_FUNCTION_TABLES
81
82 /*! \brief Clears the energy group output buffers
83  *
84  * \param[in,out] out  nbnxn kernel output struct
85  */
86 static void clearGroupEnergies(nbnxn_atomdata_output_t* out)
87 {
88     std::fill(out->Vvdw.begin(), out->Vvdw.end(), 0.0_real);
89     std::fill(out->Vc.begin(), out->Vc.end(), 0.0_real);
90     std::fill(out->VSvdw.begin(), out->VSvdw.end(), 0.0_real);
91     std::fill(out->VSc.begin(), out->VSc.end(), 0.0_real);
92 }
93
94 /*! \brief Reduce the group-pair energy buffers produced by a SIMD kernel
95  * to single terms in the output buffers.
96  *
97  * The SIMD kernels produce a large number of energy buffer in SIMD registers
98  * to avoid scattered reads and writes.
99  *
100  * \tparam        unrollj         The unroll size for j-particles in the SIMD kernel
101  * \param[in]     numGroups       The number of energy groups
102  * \param[in]     numGroups_2log  Log2 of numGroups, rounded up
103  * \param[in,out] out             Struct with energy buffers
104  */
105 template<int unrollj>
106 static void reduceGroupEnergySimdBuffers(int numGroups, int numGroups_2log, nbnxn_atomdata_output_t* out)
107 {
108     const int unrollj_half = unrollj / 2;
109     /* Energies are stored in SIMD registers with size 2^numGroups_2log */
110     const int numGroupsStorage = (1 << numGroups_2log);
111
112     const real* gmx_restrict vVdwSimd     = out->VSvdw.data();
113     const real* gmx_restrict vCoulombSimd = out->VSc.data();
114     real* gmx_restrict vVdw               = out->Vvdw.data();
115     real* gmx_restrict vCoulomb           = out->Vc.data();
116
117     /* The size of the SIMD energy group buffer array is:
118      * numGroups*numGroups*numGroupsStorage*unrollj_half*simd_width
119      */
120     for (int i = 0; i < numGroups; i++)
121     {
122         for (int j1 = 0; j1 < numGroups; j1++)
123         {
124             for (int j0 = 0; j0 < numGroups; j0++)
125             {
126                 int c = ((i * numGroups + j1) * numGroupsStorage + j0) * unrollj_half * unrollj;
127                 for (int s = 0; s < unrollj_half; s++)
128                 {
129                     vVdw[i * numGroups + j0] += vVdwSimd[c + 0];
130                     vVdw[i * numGroups + j1] += vVdwSimd[c + 1];
131                     vCoulomb[i * numGroups + j0] += vCoulombSimd[c + 0];
132                     vCoulomb[i * numGroups + j1] += vCoulombSimd[c + 1];
133                     c += unrollj + 2;
134                 }
135             }
136         }
137     }
138 }
139
140 static int getCoulombKernelType(const Nbnxm::KernelSetup& kernelSetup, const interaction_const_t& ic)
141 {
142
143     if (EEL_RF(ic.eeltype) || ic.eeltype == CoulombInteractionType::Cut)
144     {
145         return coulktRF;
146     }
147     else
148     {
149         if (kernelSetup.ewaldExclusionType == Nbnxm::EwaldExclusionType::Table)
150         {
151             if (ic.rcoulomb == ic.rvdw)
152             {
153                 return coulktTAB;
154             }
155             else
156             {
157                 return coulktTAB_TWIN;
158             }
159         }
160         else
161         {
162             if (ic.rcoulomb == ic.rvdw)
163             {
164                 return coulktEWALD;
165             }
166             else
167             {
168                 return coulktEWALD_TWIN;
169             }
170         }
171     }
172 }
173
174 static int getVdwKernelType(const Nbnxm::KernelSetup&       kernelSetup,
175                             const nbnxn_atomdata_t::Params& nbatParams,
176                             const interaction_const_t&      ic)
177 {
178     if (ic.vdwtype == VanDerWaalsType::Cut)
179     {
180         switch (ic.vdw_modifier)
181         {
182             case InteractionModifiers::None:
183             case InteractionModifiers::PotShift:
184                 switch (nbatParams.ljCombinationRule)
185                 {
186                     case LJCombinationRule::Geometric: return vdwktLJCUT_COMBGEOM;
187                     case LJCombinationRule::LorentzBerthelot: return vdwktLJCUT_COMBLB;
188                     case LJCombinationRule::None: return vdwktLJCUT_COMBNONE;
189                     default: gmx_incons("Unknown combination rule");
190                 }
191             case InteractionModifiers::ForceSwitch: return vdwktLJFORCESWITCH;
192             case InteractionModifiers::PotSwitch: return vdwktLJPOTSWITCH;
193             default:
194                 std::string errorMsg =
195                         gmx::formatString("Unsupported VdW interaction modifier %s (%d)",
196                                           enumValueToString(ic.vdw_modifier),
197                                           static_cast<int>(ic.vdw_modifier));
198                 gmx_incons(errorMsg);
199         }
200     }
201     else if (ic.vdwtype == VanDerWaalsType::Pme)
202     {
203         if (ic.ljpme_comb_rule == LongRangeVdW::Geom)
204         {
205             return vdwktLJEWALDCOMBGEOM;
206         }
207         else
208         {
209             /* At setup we (should have) selected the C reference kernel */
210             GMX_RELEASE_ASSERT(kernelSetup.kernelType == Nbnxm::KernelType::Cpu4x4_PlainC,
211                                "Only the C reference nbnxn SIMD kernel supports LJ-PME with LB "
212                                "combination rules");
213             return vdwktLJEWALDCOMBLB;
214         }
215     }
216     else
217     {
218         std::string errorMsg = gmx::formatString("Unsupported VdW interaction type %s (%d)",
219                                                  enumValueToString(ic.vdwtype),
220                                                  static_cast<int>(ic.vdwtype));
221         gmx_incons(errorMsg);
222     }
223 }
224
225 /*! \brief Dispatches the non-bonded N versus M atom cluster CPU kernels.
226  *
227  * OpenMP parallelization is performed within this function.
228  * Energy reduction, but not force and shift force reduction, is performed
229  * within this function.
230  *
231  * \param[in]     pairlistSet   Pairlists with local or non-local interactions to compute
232  * \param[in]     kernelSetup   The non-bonded kernel setup
233  * \param[in,out] nbat          The atomdata for the interactions
234  * \param[in]     ic            Non-bonded interaction constants
235  * \param[in]     shiftVectors  The PBC shift vectors
236  * \param[in]     stepWork      Flags that tell what to compute
237  * \param[in]     clearF        Enum that tells if to clear the force output buffer
238  * \param[out]    vCoulomb      Output buffer for Coulomb energies
239  * \param[out]    vVdw          Output buffer for Van der Waals energies
240  * \param[in]     wcycle        Pointer to cycle counting data structure.
241  */
242 static void nbnxn_kernel_cpu(const PairlistSet&         pairlistSet,
243                              const Nbnxm::KernelSetup&  kernelSetup,
244                              nbnxn_atomdata_t*          nbat,
245                              const interaction_const_t& ic,
246                              rvec*                      shiftVectors,
247                              const gmx::StepWorkload&   stepWork,
248                              int                        clearF,
249                              real*                      vCoulomb,
250                              real*                      vVdw,
251                              gmx_wallcycle*             wcycle)
252 {
253
254     const nbnxn_atomdata_t::Params& nbatParams = nbat->params();
255
256     const int coulkt = getCoulombKernelType(kernelSetup, ic);
257     const int vdwkt  = getVdwKernelType(kernelSetup, nbatParams, ic);
258
259     gmx::ArrayRef<const NbnxnPairlistCpu> pairlists = pairlistSet.cpuLists();
260
261     int gmx_unused nthreads = gmx_omp_nthreads_get(emntNonbonded);
262     wallcycle_sub_start(wcycle, ewcsNONBONDED_CLEAR);
263 #pragma omp parallel for schedule(static) num_threads(nthreads)
264     for (gmx::index nb = 0; nb < pairlists.ssize(); nb++)
265     {
266         // Presently, the kernels do not call C++ code that can throw,
267         // so no need for a try/catch pair in this OpenMP region.
268         nbnxn_atomdata_output_t* out = &nbat->out[nb];
269
270         if (clearF == enbvClearFYes)
271         {
272             clearForceBuffer(nbat, nb);
273
274             clear_fshift(out->fshift.data());
275         }
276
277         if (nb == 0)
278         {
279             wallcycle_sub_stop(wcycle, ewcsNONBONDED_CLEAR);
280             wallcycle_sub_start(wcycle, ewcsNONBONDED_KERNEL);
281         }
282
283         // TODO: Change to reference
284         const NbnxnPairlistCpu* pairlist = &pairlists[nb];
285
286         if (!stepWork.computeEnergy)
287         {
288             /* Don't calculate energies */
289             switch (kernelSetup.kernelType)
290             {
291                 case Nbnxm::KernelType::Cpu4x4_PlainC:
292                     nbnxn_kernel_noener_ref[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
293                     break;
294 #ifdef GMX_NBNXN_SIMD_2XNN
295                 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
296                     nbnxm_kernel_noener_simd_2xmm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
297                     break;
298 #endif
299 #ifdef GMX_NBNXN_SIMD_4XN
300                 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
301                     nbnxm_kernel_noener_simd_4xm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
302                     break;
303 #endif
304                 default: GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
305             }
306         }
307         else if (out->Vvdw.size() == 1)
308         {
309             /* A single energy group (pair) */
310             out->Vvdw[0] = 0;
311             out->Vc[0]   = 0;
312
313             switch (kernelSetup.kernelType)
314             {
315                 case Nbnxm::KernelType::Cpu4x4_PlainC:
316                     nbnxn_kernel_ener_ref[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
317                     break;
318 #ifdef GMX_NBNXN_SIMD_2XNN
319                 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
320                     nbnxm_kernel_ener_simd_2xmm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
321                     break;
322 #endif
323 #ifdef GMX_NBNXN_SIMD_4XN
324                 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
325                     nbnxm_kernel_ener_simd_4xm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
326                     break;
327 #endif
328                 default: GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
329             }
330         }
331         else
332         {
333             /* Calculate energy group contributions */
334             clearGroupEnergies(out);
335
336             int unrollj = 0;
337
338             switch (kernelSetup.kernelType)
339             {
340                 case Nbnxm::KernelType::Cpu4x4_PlainC:
341                     unrollj = c_nbnxnCpuIClusterSize;
342                     nbnxn_kernel_energrp_ref[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
343                     break;
344 #ifdef GMX_NBNXN_SIMD_2XNN
345                 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
346                     unrollj = GMX_SIMD_REAL_WIDTH / 2;
347                     nbnxm_kernel_energrp_simd_2xmm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
348                     break;
349 #endif
350 #ifdef GMX_NBNXN_SIMD_4XN
351                 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
352                     unrollj = GMX_SIMD_REAL_WIDTH;
353                     nbnxm_kernel_energrp_simd_4xm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVectors, out);
354                     break;
355 #endif
356                 default: GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
357             }
358
359             if (kernelSetup.kernelType != Nbnxm::KernelType::Cpu4x4_PlainC)
360             {
361                 switch (unrollj)
362                 {
363                     case 2:
364                         reduceGroupEnergySimdBuffers<2>(nbatParams.nenergrp, nbatParams.neg_2log, out);
365                         break;
366                     case 4:
367                         reduceGroupEnergySimdBuffers<4>(nbatParams.nenergrp, nbatParams.neg_2log, out);
368                         break;
369                     case 8:
370                         reduceGroupEnergySimdBuffers<8>(nbatParams.nenergrp, nbatParams.neg_2log, out);
371                         break;
372                     default: GMX_RELEASE_ASSERT(false, "Unsupported j-unroll size");
373                 }
374             }
375         }
376     }
377     wallcycle_sub_stop(wcycle, ewcsNONBONDED_KERNEL);
378
379     if (stepWork.computeEnergy)
380     {
381         reduce_energies_over_lists(nbat, pairlists.ssize(), vVdw, vCoulomb);
382     }
383 }
384
385 static void accountFlops(t_nrnb*                    nrnb,
386                          const PairlistSet&         pairlistSet,
387                          const nonbonded_verlet_t&  nbv,
388                          const interaction_const_t& ic,
389                          const gmx::StepWorkload&   stepWork)
390 {
391     const bool usingGpuKernels = nbv.useGpu();
392
393     int enr_nbnxn_kernel_ljc = eNRNB;
394     if (EEL_RF(ic.eeltype) || ic.eeltype == CoulombInteractionType::Cut)
395     {
396         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
397     }
398     else if ((!usingGpuKernels && nbv.kernelSetup().ewaldExclusionType == Nbnxm::EwaldExclusionType::Analytical)
399              || (usingGpuKernels && Nbnxm::gpu_is_kernel_ewald_analytical(nbv.gpu_nbv)))
400     {
401         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
402     }
403     else
404     {
405         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
406     }
407     int enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
408     if (stepWork.computeEnergy)
409     {
410         /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
411         enr_nbnxn_kernel_ljc += 1;
412         enr_nbnxn_kernel_lj += 1;
413     }
414
415     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc, pairlistSet.natpair_ljq_);
416     inc_nrnb(nrnb, enr_nbnxn_kernel_lj, pairlistSet.natpair_lj_);
417     /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
418     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc - eNR_NBNXN_LJ_RF + eNR_NBNXN_RF, pairlistSet.natpair_q_);
419
420     if (ic.vdw_modifier == InteractionModifiers::ForceSwitch)
421     {
422         /* We add up the switch cost separately */
423         inc_nrnb(nrnb,
424                  eNR_NBNXN_ADD_LJ_FSW + (stepWork.computeEnergy ? 1 : 0),
425                  pairlistSet.natpair_ljq_ + pairlistSet.natpair_lj_);
426     }
427     if (ic.vdw_modifier == InteractionModifiers::PotSwitch)
428     {
429         /* We add up the switch cost separately */
430         inc_nrnb(nrnb,
431                  eNR_NBNXN_ADD_LJ_PSW + (stepWork.computeEnergy ? 1 : 0),
432                  pairlistSet.natpair_ljq_ + pairlistSet.natpair_lj_);
433     }
434     if (ic.vdwtype == VanDerWaalsType::Pme)
435     {
436         /* We add up the LJ Ewald cost separately */
437         inc_nrnb(nrnb,
438                  eNR_NBNXN_ADD_LJ_EWALD + (stepWork.computeEnergy ? 1 : 0),
439                  pairlistSet.natpair_ljq_ + pairlistSet.natpair_lj_);
440     }
441 }
442
443 void nonbonded_verlet_t::dispatchNonbondedKernel(gmx::InteractionLocality   iLocality,
444                                                  const interaction_const_t& ic,
445                                                  const gmx::StepWorkload&   stepWork,
446                                                  int                        clearF,
447                                                  const t_forcerec&          fr,
448                                                  gmx_enerdata_t*            enerd,
449                                                  t_nrnb*                    nrnb)
450 {
451     const PairlistSet& pairlistSet = pairlistSets().pairlistSet(iLocality);
452
453     switch (kernelSetup().kernelType)
454     {
455         case Nbnxm::KernelType::Cpu4x4_PlainC:
456         case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
457         case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
458             nbnxn_kernel_cpu(
459                     pairlistSet,
460                     kernelSetup(),
461                     nbat.get(),
462                     ic,
463                     fr.shift_vec,
464                     stepWork,
465                     clearF,
466                     enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
467                     fr.bBHAM ? enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::BuckinghamSR].data()
468                              : enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data(),
469                     wcycle_);
470             break;
471
472         case Nbnxm::KernelType::Gpu8x8x8:
473             Nbnxm::gpu_launch_kernel(gpu_nbv, stepWork, iLocality);
474             break;
475
476         case Nbnxm::KernelType::Cpu8x8x8_PlainC:
477             nbnxn_kernel_gpu_ref(
478                     pairlistSet.gpuList(),
479                     nbat.get(),
480                     &ic,
481                     fr.shift_vec,
482                     stepWork,
483                     clearF,
484                     nbat->out[0].f,
485                     nbat->out[0].fshift.data(),
486                     enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR].data(),
487                     fr.bBHAM ? enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::BuckinghamSR].data()
488                              : enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR].data());
489             break;
490
491         default: GMX_RELEASE_ASSERT(false, "Invalid nonbonded kernel type passed!");
492     }
493
494     accountFlops(nrnb, pairlistSet, *this, ic, stepWork);
495 }
496
497 void nonbonded_verlet_t::dispatchFreeEnergyKernel(gmx::InteractionLocality   iLocality,
498                                                   const t_forcerec*          fr,
499                                                   rvec                       x[],
500                                                   gmx::ForceWithShiftForces* forceWithShiftForces,
501                                                   const t_mdatoms&           mdatoms,
502                                                   t_lambda*                  fepvals,
503                                                   gmx::ArrayRef<const real>  lambda,
504                                                   gmx_enerdata_t*            enerd,
505                                                   const gmx::StepWorkload&   stepWork,
506                                                   t_nrnb*                    nrnb)
507 {
508     const auto nbl_fep = pairlistSets().pairlistSet(iLocality).fepLists();
509
510     /* When the first list is empty, all are empty and there is nothing to do */
511     if (!pairlistSets().params().haveFep || nbl_fep[0]->nrj == 0)
512     {
513         return;
514     }
515
516     int donb_flags = 0;
517     /* Add short-range interactions */
518     donb_flags |= GMX_NONBONDED_DO_SR;
519
520     if (stepWork.computeForces)
521     {
522         donb_flags |= GMX_NONBONDED_DO_FORCE;
523     }
524     if (stepWork.computeVirial)
525     {
526         donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
527     }
528     if (stepWork.computeEnergy)
529     {
530         donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
531     }
532
533     nb_kernel_data_t                                                kernel_data;
534     gmx::EnumerationArray<FreeEnergyPerturbationCouplingType, real> dvdl_nb = { 0 };
535     kernel_data.flags                                                       = donb_flags;
536     kernel_data.lambda                                                      = lambda;
537     kernel_data.dvdl                                                        = dvdl_nb;
538
539     gmx::ArrayRef<real> energygrp_elec = enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR];
540     gmx::ArrayRef<real> energygrp_vdw = enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR];
541
542     GMX_ASSERT(gmx_omp_nthreads_get(emntNonbonded) == nbl_fep.ssize(),
543                "Number of lists should be same as number of NB threads");
544
545     wallcycle_sub_start(wcycle_, ewcsNONBONDED_FEP);
546 #pragma omp parallel for schedule(static) num_threads(nbl_fep.ssize())
547     for (gmx::index th = 0; th < nbl_fep.ssize(); th++)
548     {
549         try
550         {
551             gmx_nb_free_energy_kernel(
552                     nbl_fep[th].get(), x, forceWithShiftForces, fr, &mdatoms, &kernel_data, energygrp_elec, energygrp_vdw, nrnb);
553         }
554         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
555     }
556
557     if (fepvals->sc_alpha != 0)
558     {
559         enerd->dvdl_nonlin[FreeEnergyPerturbationCouplingType::Vdw] +=
560                 dvdl_nb[FreeEnergyPerturbationCouplingType::Vdw];
561         enerd->dvdl_nonlin[FreeEnergyPerturbationCouplingType::Coul] +=
562                 dvdl_nb[FreeEnergyPerturbationCouplingType::Coul];
563     }
564     else
565     {
566         enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] +=
567                 dvdl_nb[FreeEnergyPerturbationCouplingType::Vdw];
568         enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Coul] +=
569                 dvdl_nb[FreeEnergyPerturbationCouplingType::Coul];
570     }
571
572     /* If we do foreign lambda and we have soft-core interactions
573      * we have to recalculate the (non-linear) energies contributions.
574      */
575     if (fepvals->n_lambda > 0 && stepWork.computeDhdl && fepvals->sc_alpha != 0)
576     {
577         gmx::EnumerationArray<FreeEnergyPerturbationCouplingType, real> lam_i;
578         kernel_data.flags = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE))
579                             | GMX_NONBONDED_DO_FOREIGNLAMBDA;
580         kernel_data.lambda = lam_i;
581         kernel_data.dvdl   = dvdl_nb;
582         gmx::ArrayRef<real> energygrp_elec =
583                 enerd->foreign_grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR];
584         gmx::ArrayRef<real> energygrp_vdw =
585                 enerd->foreign_grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR];
586
587         for (gmx::index i = 0; i < 1 + enerd->foreignLambdaTerms.numLambdas(); i++)
588         {
589             std::fill(std::begin(dvdl_nb), std::end(dvdl_nb), 0);
590             for (int j = 0; j < static_cast<int>(FreeEnergyPerturbationCouplingType::Count); j++)
591             {
592                 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i - 1]);
593             }
594             reset_foreign_enerdata(enerd);
595 #pragma omp parallel for schedule(static) num_threads(nbl_fep.ssize())
596             for (gmx::index th = 0; th < nbl_fep.ssize(); th++)
597             {
598                 try
599                 {
600                     gmx_nb_free_energy_kernel(nbl_fep[th].get(),
601                                               x,
602                                               forceWithShiftForces,
603                                               fr,
604                                               &mdatoms,
605                                               &kernel_data,
606                                               energygrp_elec,
607                                               energygrp_vdw,
608                                               nrnb);
609                 }
610                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
611             }
612
613             sum_epot(enerd->foreign_grpp, enerd->foreign_term);
614             enerd->foreignLambdaTerms.accumulate(
615                     i,
616                     enerd->foreign_term[F_EPOT],
617                     dvdl_nb[FreeEnergyPerturbationCouplingType::Vdw]
618                             + dvdl_nb[FreeEnergyPerturbationCouplingType::Coul]);
619         }
620     }
621     wallcycle_sub_stop(wcycle_, ewcsNONBONDED_FEP);
622 }