CoulombKernelType enum, lookup function, and tests
[alexxy/gromacs.git] / src / gromacs / nbnxm / kerneldispatch.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5  * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36
37 #include "gmxpre.h"
38
39 #include "gromacs/gmxlib/nrnb.h"
40 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
41 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
42 #include "gromacs/math/vectypes.h"
43 #include "gromacs/mdlib/enerdata_utils.h"
44 #include "gromacs/mdlib/force.h"
45 #include "gromacs/mdlib/gmx_omp_nthreads.h"
46 #include "gromacs/mdtypes/enerdata.h"
47 #include "gromacs/mdtypes/forceoutput.h"
48 #include "gromacs/mdtypes/forcerec.h"
49 #include "gromacs/mdtypes/inputrec.h"
50 #include "gromacs/mdtypes/interaction_const.h"
51 #include "gromacs/mdtypes/md_enums.h"
52 #include "gromacs/mdtypes/mdatom.h"
53 #include "gromacs/mdtypes/nblist.h"
54 #include "gromacs/mdtypes/simulation_workload.h"
55 #include "gromacs/nbnxm/gpu_data_mgmt.h"
56 #include "gromacs/nbnxm/nbnxm.h"
57 #include "gromacs/simd/simd.h"
58 #include "gromacs/timing/wallcycle.h"
59 #include "gromacs/utility/enumerationhelpers.h"
60 #include "gromacs/utility/fatalerror.h"
61 #include "gromacs/utility/gmxassert.h"
62 #include "gromacs/utility/real.h"
63
64 #include "kernel_common.h"
65 #include "nbnxm_gpu.h"
66 #include "nbnxm_simd.h"
67 #include "pairlistset.h"
68 #include "pairlistsets.h"
69 #include "kernels_reference/kernel_gpu_ref.h"
70 #define INCLUDE_KERNELFUNCTION_TABLES
71 #include "kernels_reference/kernel_ref.h"
72 #ifdef GMX_NBNXN_SIMD_2XNN
73 #    include "kernels_simd_2xmm/kernels.h"
74 #endif
75 #ifdef GMX_NBNXN_SIMD_4XN
76 #    include "kernels_simd_4xm/kernels.h"
77 #endif
78 #undef INCLUDE_FUNCTION_TABLES
79
80 /*! \brief Clears the energy group output buffers
81  *
82  * \param[in,out] out  nbnxn kernel output struct
83  */
84 static void clearGroupEnergies(nbnxn_atomdata_output_t* out)
85 {
86     std::fill(out->Vvdw.begin(), out->Vvdw.end(), 0.0_real);
87     std::fill(out->Vc.begin(), out->Vc.end(), 0.0_real);
88     std::fill(out->VSvdw.begin(), out->VSvdw.end(), 0.0_real);
89     std::fill(out->VSc.begin(), out->VSc.end(), 0.0_real);
90 }
91
92 /*! \brief Reduce the group-pair energy buffers produced by a SIMD kernel
93  * to single terms in the output buffers.
94  *
95  * The SIMD kernels produce a large number of energy buffer in SIMD registers
96  * to avoid scattered reads and writes.
97  *
98  * \tparam        unrollj         The unroll size for j-particles in the SIMD kernel
99  * \param[in]     numGroups       The number of energy groups
100  * \param[in]     numGroups_2log  Log2 of numGroups, rounded up
101  * \param[in,out] out             Struct with energy buffers
102  */
103 template<int unrollj>
104 static void reduceGroupEnergySimdBuffers(int numGroups, int numGroups_2log, nbnxn_atomdata_output_t* out)
105 {
106     const int unrollj_half = unrollj / 2;
107     /* Energies are stored in SIMD registers with size 2^numGroups_2log */
108     const int numGroupsStorage = (1 << numGroups_2log);
109
110     const real* gmx_restrict vVdwSimd     = out->VSvdw.data();
111     const real* gmx_restrict vCoulombSimd = out->VSc.data();
112     real* gmx_restrict       vVdw         = out->Vvdw.data();
113     real* gmx_restrict       vCoulomb     = out->Vc.data();
114
115     /* The size of the SIMD energy group buffer array is:
116      * numGroups*numGroups*numGroupsStorage*unrollj_half*simd_width
117      */
118     for (int i = 0; i < numGroups; i++)
119     {
120         for (int j1 = 0; j1 < numGroups; j1++)
121         {
122             for (int j0 = 0; j0 < numGroups; j0++)
123             {
124                 int c = ((i * numGroups + j1) * numGroupsStorage + j0) * unrollj_half * unrollj;
125                 for (int s = 0; s < unrollj_half; s++)
126                 {
127                     vVdw[i * numGroups + j0] += vVdwSimd[c + 0];
128                     vVdw[i * numGroups + j1] += vVdwSimd[c + 1];
129                     vCoulomb[i * numGroups + j0] += vCoulombSimd[c + 0];
130                     vCoulomb[i * numGroups + j1] += vCoulombSimd[c + 1];
131                     c += unrollj + 2;
132                 }
133             }
134         }
135     }
136 }
137
138 CoulombKernelType getCoulombKernelType(const Nbnxm::EwaldExclusionType ewaldExclusionType,
139                                        const CoulombInteractionType    coulombInteractionType,
140                                        const bool                      haveEqualCoulombVwdRadii)
141 {
142
143     if (EEL_RF(coulombInteractionType) || coulombInteractionType == CoulombInteractionType::Cut)
144     {
145         return CoulombKernelType::ReactionField;
146     }
147     else
148     {
149         if (ewaldExclusionType == Nbnxm::EwaldExclusionType::Table)
150         {
151             if (haveEqualCoulombVwdRadii)
152             {
153                 return CoulombKernelType::Table;
154             }
155             else
156             {
157                 return CoulombKernelType::TableTwin;
158             }
159         }
160         else
161         {
162             if (haveEqualCoulombVwdRadii)
163             {
164                 return CoulombKernelType::Ewald;
165             }
166             else
167             {
168                 return CoulombKernelType::EwaldTwin;
169             }
170         }
171     }
172 }
173
174 static int getVdwKernelType(const Nbnxm::KernelSetup&       kernelSetup,
175                             const nbnxn_atomdata_t::Params& nbatParams,
176                             const interaction_const_t&      ic)
177 {
178     if (ic.vdwtype == VanDerWaalsType::Cut)
179     {
180         switch (ic.vdw_modifier)
181         {
182             case InteractionModifiers::None:
183             case InteractionModifiers::PotShift:
184                 switch (nbatParams.ljCombinationRule)
185                 {
186                     case LJCombinationRule::Geometric: return vdwktLJCUT_COMBGEOM;
187                     case LJCombinationRule::LorentzBerthelot: return vdwktLJCUT_COMBLB;
188                     case LJCombinationRule::None: return vdwktLJCUT_COMBNONE;
189                     default: gmx_incons("Unknown combination rule");
190                 }
191             case InteractionModifiers::ForceSwitch: return vdwktLJFORCESWITCH;
192             case InteractionModifiers::PotSwitch: return vdwktLJPOTSWITCH;
193             default:
194                 std::string errorMsg =
195                         gmx::formatString("Unsupported VdW interaction modifier %s (%d)",
196                                           enumValueToString(ic.vdw_modifier),
197                                           static_cast<int>(ic.vdw_modifier));
198                 gmx_incons(errorMsg);
199         }
200     }
201     else if (ic.vdwtype == VanDerWaalsType::Pme)
202     {
203         if (ic.ljpme_comb_rule == LongRangeVdW::Geom)
204         {
205             return vdwktLJEWALDCOMBGEOM;
206         }
207         else
208         {
209             /* At setup we (should have) selected the C reference kernel */
210             GMX_RELEASE_ASSERT(kernelSetup.kernelType == Nbnxm::KernelType::Cpu4x4_PlainC,
211                                "Only the C reference nbnxn SIMD kernel supports LJ-PME with LB "
212                                "combination rules");
213             return vdwktLJEWALDCOMBLB;
214         }
215     }
216     else
217     {
218         std::string errorMsg = gmx::formatString("Unsupported VdW interaction type %s (%d)",
219                                                  enumValueToString(ic.vdwtype),
220                                                  static_cast<int>(ic.vdwtype));
221         gmx_incons(errorMsg);
222     }
223 }
224
225 /*! \brief Dispatches the non-bonded N versus M atom cluster CPU kernels.
226  *
227  * OpenMP parallelization is performed within this function.
228  * Energy reduction, but not force and shift force reduction, is performed
229  * within this function.
230  *
231  * \param[in]     pairlistSet   Pairlists with local or non-local interactions to compute
232  * \param[in]     kernelSetup   The non-bonded kernel setup
233  * \param[in,out] nbat          The atomdata for the interactions
234  * \param[in]     ic            Non-bonded interaction constants
235  * \param[in]     shiftVectors  The PBC shift vectors
236  * \param[in]     stepWork      Flags that tell what to compute
237  * \param[in]     clearF        Enum that tells if to clear the force output buffer
238  * \param[out]    vCoulomb      Output buffer for Coulomb energies
239  * \param[out]    vVdw          Output buffer for Van der Waals energies
240  * \param[in]     wcycle        Pointer to cycle counting data structure.
241  */
242 static void nbnxn_kernel_cpu(const PairlistSet&             pairlistSet,
243                              const Nbnxm::KernelSetup&      kernelSetup,
244                              nbnxn_atomdata_t*              nbat,
245                              const interaction_const_t&     ic,
246                              gmx::ArrayRef<const gmx::RVec> shiftVectors,
247                              const gmx::StepWorkload&       stepWork,
248                              int                            clearF,
249                              real*                          vCoulomb,
250                              real*                          vVdw,
251                              gmx_wallcycle*                 wcycle)
252 {
253
254     const nbnxn_atomdata_t::Params& nbatParams = nbat->params();
255
256     const int coulkt = static_cast<int>(getCoulombKernelType(
257             kernelSetup.ewaldExclusionType, ic.eeltype, (ic.rcoulomb == ic.rvdw)));
258     const int vdwkt  = getVdwKernelType(kernelSetup, nbatParams, ic);
259
260     gmx::ArrayRef<const NbnxnPairlistCpu> pairlists = pairlistSet.cpuLists();
261
262     const auto* shiftVecPointer = as_rvec_array(shiftVectors.data());
263
264     int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded);
265     wallcycle_sub_start(wcycle, WallCycleSubCounter::NonbondedClear);
266 #pragma omp parallel for schedule(static) num_threads(nthreads)
267     for (gmx::index nb = 0; nb < pairlists.ssize(); nb++)
268     {
269         // Presently, the kernels do not call C++ code that can throw,
270         // so no need for a try/catch pair in this OpenMP region.
271         nbnxn_atomdata_output_t* out = &nbat->out[nb];
272
273         if (clearF == enbvClearFYes)
274         {
275             clearForceBuffer(nbat, nb);
276
277             clear_fshift(out->fshift.data());
278         }
279
280         if (nb == 0)
281         {
282             wallcycle_sub_stop(wcycle, WallCycleSubCounter::NonbondedClear);
283             wallcycle_sub_start(wcycle, WallCycleSubCounter::NonbondedKernel);
284         }
285
286         // TODO: Change to reference
287         const NbnxnPairlistCpu* pairlist = &pairlists[nb];
288
289         if (!stepWork.computeEnergy)
290         {
291             /* Don't calculate energies */
292             switch (kernelSetup.kernelType)
293             {
294                 case Nbnxm::KernelType::Cpu4x4_PlainC:
295                     nbnxn_kernel_noener_ref[coulkt][vdwkt](pairlist, nbat, &ic, shiftVecPointer, out);
296                     break;
297 #ifdef GMX_NBNXN_SIMD_2XNN
298                 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
299                     nbnxm_kernel_noener_simd_2xmm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVecPointer, out);
300                     break;
301 #endif
302 #ifdef GMX_NBNXN_SIMD_4XN
303                 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
304                     nbnxm_kernel_noener_simd_4xm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVecPointer, out);
305                     break;
306 #endif
307                 default: GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
308             }
309         }
310         else if (out->Vvdw.size() == 1)
311         {
312             /* A single energy group (pair) */
313             out->Vvdw[0] = 0;
314             out->Vc[0]   = 0;
315
316             switch (kernelSetup.kernelType)
317             {
318                 case Nbnxm::KernelType::Cpu4x4_PlainC:
319                     nbnxn_kernel_ener_ref[coulkt][vdwkt](pairlist, nbat, &ic, shiftVecPointer, out);
320                     break;
321 #ifdef GMX_NBNXN_SIMD_2XNN
322                 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
323                     nbnxm_kernel_ener_simd_2xmm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVecPointer, out);
324                     break;
325 #endif
326 #ifdef GMX_NBNXN_SIMD_4XN
327                 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
328                     nbnxm_kernel_ener_simd_4xm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVecPointer, out);
329                     break;
330 #endif
331                 default: GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
332             }
333         }
334         else
335         {
336             /* Calculate energy group contributions */
337             clearGroupEnergies(out);
338
339             int unrollj = 0;
340
341             switch (kernelSetup.kernelType)
342             {
343                 case Nbnxm::KernelType::Cpu4x4_PlainC:
344                     unrollj = c_nbnxnCpuIClusterSize;
345                     nbnxn_kernel_energrp_ref[coulkt][vdwkt](pairlist, nbat, &ic, shiftVecPointer, out);
346                     break;
347 #ifdef GMX_NBNXN_SIMD_2XNN
348                 case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
349                     unrollj = GMX_SIMD_REAL_WIDTH / 2;
350                     nbnxm_kernel_energrp_simd_2xmm[coulkt][vdwkt](
351                             pairlist, nbat, &ic, shiftVecPointer, out);
352                     break;
353 #endif
354 #ifdef GMX_NBNXN_SIMD_4XN
355                 case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
356                     unrollj = GMX_SIMD_REAL_WIDTH;
357                     nbnxm_kernel_energrp_simd_4xm[coulkt][vdwkt](pairlist, nbat, &ic, shiftVecPointer, out);
358                     break;
359 #endif
360                 default: GMX_RELEASE_ASSERT(false, "Unsupported kernel architecture");
361             }
362
363             if (kernelSetup.kernelType != Nbnxm::KernelType::Cpu4x4_PlainC)
364             {
365                 switch (unrollj)
366                 {
367                     case 2:
368                         reduceGroupEnergySimdBuffers<2>(nbatParams.nenergrp, nbatParams.neg_2log, out);
369                         break;
370                     case 4:
371                         reduceGroupEnergySimdBuffers<4>(nbatParams.nenergrp, nbatParams.neg_2log, out);
372                         break;
373                     case 8:
374                         reduceGroupEnergySimdBuffers<8>(nbatParams.nenergrp, nbatParams.neg_2log, out);
375                         break;
376                     default: GMX_RELEASE_ASSERT(false, "Unsupported j-unroll size");
377                 }
378             }
379         }
380     }
381     wallcycle_sub_stop(wcycle, WallCycleSubCounter::NonbondedKernel);
382
383     if (stepWork.computeEnergy)
384     {
385         reduce_energies_over_lists(nbat, pairlists.ssize(), vVdw, vCoulomb);
386     }
387 }
388
389 static void accountFlops(t_nrnb*                    nrnb,
390                          const PairlistSet&         pairlistSet,
391                          const nonbonded_verlet_t&  nbv,
392                          const interaction_const_t& ic,
393                          const gmx::StepWorkload&   stepWork)
394 {
395     const bool usingGpuKernels = nbv.useGpu();
396
397     int enr_nbnxn_kernel_ljc = eNRNB;
398     if (EEL_RF(ic.eeltype) || ic.eeltype == CoulombInteractionType::Cut)
399     {
400         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
401     }
402     else if ((!usingGpuKernels && nbv.kernelSetup().ewaldExclusionType == Nbnxm::EwaldExclusionType::Analytical)
403              || (usingGpuKernels && Nbnxm::gpu_is_kernel_ewald_analytical(nbv.gpu_nbv)))
404     {
405         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
406     }
407     else
408     {
409         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
410     }
411     int enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
412     if (stepWork.computeEnergy)
413     {
414         /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
415         enr_nbnxn_kernel_ljc += 1;
416         enr_nbnxn_kernel_lj += 1;
417     }
418
419     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc, pairlistSet.natpair_ljq_);
420     inc_nrnb(nrnb, enr_nbnxn_kernel_lj, pairlistSet.natpair_lj_);
421     /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
422     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc - eNR_NBNXN_LJ_RF + eNR_NBNXN_RF, pairlistSet.natpair_q_);
423
424     if (ic.vdw_modifier == InteractionModifiers::ForceSwitch)
425     {
426         /* We add up the switch cost separately */
427         inc_nrnb(nrnb,
428                  eNR_NBNXN_ADD_LJ_FSW + (stepWork.computeEnergy ? 1 : 0),
429                  pairlistSet.natpair_ljq_ + pairlistSet.natpair_lj_);
430     }
431     if (ic.vdw_modifier == InteractionModifiers::PotSwitch)
432     {
433         /* We add up the switch cost separately */
434         inc_nrnb(nrnb,
435                  eNR_NBNXN_ADD_LJ_PSW + (stepWork.computeEnergy ? 1 : 0),
436                  pairlistSet.natpair_ljq_ + pairlistSet.natpair_lj_);
437     }
438     if (ic.vdwtype == VanDerWaalsType::Pme)
439     {
440         /* We add up the LJ Ewald cost separately */
441         inc_nrnb(nrnb,
442                  eNR_NBNXN_ADD_LJ_EWALD + (stepWork.computeEnergy ? 1 : 0),
443                  pairlistSet.natpair_ljq_ + pairlistSet.natpair_lj_);
444     }
445 }
446
447 void nonbonded_verlet_t::dispatchNonbondedKernel(gmx::InteractionLocality       iLocality,
448                                                  const interaction_const_t&     ic,
449                                                  const gmx::StepWorkload&       stepWork,
450                                                  int                            clearF,
451                                                  gmx::ArrayRef<const gmx::RVec> shiftvec,
452                                                  gmx::ArrayRef<real> repulsionDispersionSR,
453                                                  gmx::ArrayRef<real> CoulombSR,
454                                                  t_nrnb*             nrnb) const
455 {
456     const PairlistSet& pairlistSet = pairlistSets().pairlistSet(iLocality);
457
458     switch (kernelSetup().kernelType)
459     {
460         case Nbnxm::KernelType::Cpu4x4_PlainC:
461         case Nbnxm::KernelType::Cpu4xN_Simd_4xN:
462         case Nbnxm::KernelType::Cpu4xN_Simd_2xNN:
463             nbnxn_kernel_cpu(pairlistSet,
464                              kernelSetup(),
465                              nbat.get(),
466                              ic,
467                              shiftvec,
468                              stepWork,
469                              clearF,
470                              CoulombSR.data(),
471                              repulsionDispersionSR.data(),
472                              wcycle_);
473             break;
474
475         case Nbnxm::KernelType::Gpu8x8x8:
476             Nbnxm::gpu_launch_kernel(gpu_nbv, stepWork, iLocality);
477             break;
478
479         case Nbnxm::KernelType::Cpu8x8x8_PlainC:
480             nbnxn_kernel_gpu_ref(pairlistSet.gpuList(),
481                                  nbat.get(),
482                                  &ic,
483                                  shiftvec,
484                                  stepWork,
485                                  clearF,
486                                  nbat->out[0].f,
487                                  nbat->out[0].fshift.data(),
488                                  CoulombSR.data(),
489                                  repulsionDispersionSR.data());
490             break;
491
492         default: GMX_RELEASE_ASSERT(false, "Invalid nonbonded kernel type passed!");
493     }
494
495     accountFlops(nrnb, pairlistSet, *this, ic, stepWork);
496 }
497
498 void nonbonded_verlet_t::dispatchFreeEnergyKernel(gmx::InteractionLocality       iLocality,
499                                                   gmx::ArrayRef<const gmx::RVec> coords,
500                                                   gmx::ForceWithShiftForces* forceWithShiftForces,
501                                                   bool                       useSimd,
502                                                   int                        ntype,
503                                                   real                       rlist,
504                                                   const interaction_const_t& ic,
505                                                   gmx::ArrayRef<const gmx::RVec> shiftvec,
506                                                   gmx::ArrayRef<const real>      nbfp,
507                                                   gmx::ArrayRef<const real>      nbfp_grid,
508                                                   gmx::ArrayRef<const real>      chargeA,
509                                                   gmx::ArrayRef<const real>      chargeB,
510                                                   gmx::ArrayRef<const int>       typeA,
511                                                   gmx::ArrayRef<const int>       typeB,
512                                                   t_lambda*                      fepvals,
513                                                   gmx::ArrayRef<const real>      lambda,
514                                                   gmx_enerdata_t*                enerd,
515                                                   const gmx::StepWorkload&       stepWork,
516                                                   t_nrnb*                        nrnb)
517 {
518     const auto nbl_fep = pairlistSets().pairlistSet(iLocality).fepLists();
519
520     /* When the first list is empty, all are empty and there is nothing to do */
521     if (!pairlistSets().params().haveFep || nbl_fep[0]->nrj == 0)
522     {
523         return;
524     }
525
526     int donb_flags = 0;
527     /* Add short-range interactions */
528     donb_flags |= GMX_NONBONDED_DO_SR;
529
530     if (stepWork.computeForces)
531     {
532         donb_flags |= GMX_NONBONDED_DO_FORCE;
533     }
534     if (stepWork.computeVirial)
535     {
536         donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
537     }
538     if (stepWork.computeEnergy)
539     {
540         donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
541     }
542
543     gmx::EnumerationArray<FreeEnergyPerturbationCouplingType, real> dvdl_nb      = { 0 };
544     int                                                             kernelFlags  = donb_flags;
545     gmx::ArrayRef<const real>                                       kernelLambda = lambda;
546     gmx::ArrayRef<real>                                             kernelDvdl   = dvdl_nb;
547
548     gmx::ArrayRef<real> energygrp_elec = enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR];
549     gmx::ArrayRef<real> energygrp_vdw = enerd->grpp.energyGroupPairTerms[NonBondedEnergyTerms::LJSR];
550
551     GMX_ASSERT(gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded) == nbl_fep.ssize(),
552                "Number of lists should be same as number of NB threads");
553
554     wallcycle_sub_start(wcycle_, WallCycleSubCounter::NonbondedFep);
555 #pragma omp parallel for schedule(static) num_threads(nbl_fep.ssize())
556     for (gmx::index th = 0; th < nbl_fep.ssize(); th++)
557     {
558         try
559         {
560             gmx_nb_free_energy_kernel(*nbl_fep[th],
561                                       coords,
562                                       forceWithShiftForces,
563                                       useSimd,
564                                       ntype,
565                                       rlist,
566                                       ic,
567                                       shiftvec,
568                                       nbfp,
569                                       nbfp_grid,
570                                       chargeA,
571                                       chargeB,
572                                       typeA,
573                                       typeB,
574                                       kernelFlags,
575                                       kernelLambda,
576                                       kernelDvdl,
577                                       energygrp_elec,
578                                       energygrp_vdw,
579                                       nrnb);
580         }
581         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
582     }
583
584     if (fepvals->sc_alpha != 0)
585     {
586         enerd->dvdl_nonlin[FreeEnergyPerturbationCouplingType::Vdw] +=
587                 dvdl_nb[FreeEnergyPerturbationCouplingType::Vdw];
588         enerd->dvdl_nonlin[FreeEnergyPerturbationCouplingType::Coul] +=
589                 dvdl_nb[FreeEnergyPerturbationCouplingType::Coul];
590     }
591     else
592     {
593         enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] +=
594                 dvdl_nb[FreeEnergyPerturbationCouplingType::Vdw];
595         enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Coul] +=
596                 dvdl_nb[FreeEnergyPerturbationCouplingType::Coul];
597     }
598
599     /* If we do foreign lambda and we have soft-core interactions
600      * we have to recalculate the (non-linear) energies contributions.
601      */
602     if (fepvals->n_lambda > 0 && stepWork.computeDhdl && fepvals->sc_alpha != 0)
603     {
604         gmx::EnumerationArray<FreeEnergyPerturbationCouplingType, real> lam_i;
605         kernelFlags = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE))
606                       | GMX_NONBONDED_DO_FOREIGNLAMBDA;
607         kernelLambda = lam_i;
608         kernelDvdl   = dvdl_nb;
609         gmx::ArrayRef<real> energygrp_elec =
610                 foreignEnergyGroups_->energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR];
611         gmx::ArrayRef<real> energygrp_vdw =
612                 foreignEnergyGroups_->energyGroupPairTerms[NonBondedEnergyTerms::LJSR];
613
614         for (gmx::index i = 0; i < 1 + enerd->foreignLambdaTerms.numLambdas(); i++)
615         {
616             std::fill(std::begin(dvdl_nb), std::end(dvdl_nb), 0);
617             for (int j = 0; j < static_cast<int>(FreeEnergyPerturbationCouplingType::Count); j++)
618             {
619                 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i - 1]);
620             }
621             foreignEnergyGroups_->clear();
622 #pragma omp parallel for schedule(static) num_threads(nbl_fep.ssize())
623             for (gmx::index th = 0; th < nbl_fep.ssize(); th++)
624             {
625                 try
626                 {
627                     gmx_nb_free_energy_kernel(*nbl_fep[th],
628                                               coords,
629                                               forceWithShiftForces,
630                                               useSimd,
631                                               ntype,
632                                               rlist,
633                                               ic,
634                                               shiftvec,
635                                               nbfp,
636                                               nbfp_grid,
637                                               chargeA,
638                                               chargeB,
639                                               typeA,
640                                               typeB,
641                                               kernelFlags,
642                                               kernelLambda,
643                                               kernelDvdl,
644                                               energygrp_elec,
645                                               energygrp_vdw,
646                                               nrnb);
647                 }
648                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
649             }
650             std::array<real, F_NRE> foreign_term = { 0 };
651             sum_epot(*foreignEnergyGroups_, foreign_term.data());
652             enerd->foreignLambdaTerms.accumulate(
653                     i,
654                     foreign_term[F_EPOT],
655                     dvdl_nb[FreeEnergyPerturbationCouplingType::Vdw]
656                             + dvdl_nb[FreeEnergyPerturbationCouplingType::Coul]);
657         }
658     }
659     wallcycle_sub_stop(wcycle_, WallCycleSubCounter::NonbondedFep);
660 }