Merge remote-tracking branch 'origin/release-2019'
authorSzilárd Páll <pall.szilard@gmail.com>
Wed, 25 Sep 2019 14:05:49 +0000 (16:05 +0200)
committerSzilárd Páll <pall.szilard@gmail.com>
Wed, 25 Sep 2019 14:05:49 +0000 (16:05 +0200)
Change-Id: Ia822fecb18b63d8a4e4408e056850a42875d57e8

1  2 
src/gromacs/hardware/detecthardware.cpp
src/gromacs/hardware/hw_info.h
src/gromacs/nbnxm/nbnxm_setup.cpp

index 7b952c1697d5779ebef4a6739c60109335894e1c,4953b17fde647441879b1384c1be39cf59ed13e0..41071a68725c9f169ac53c72d74ebe818544a6b5
  
  #include "config.h"
  
 -#include <cerrno>
 -#include <cstdlib>
 -#include <cstring>
 -
  #include <algorithm>
  #include <array>
  #include <chrono>
  #include <thread>
  #include <vector>
  
 -#include "thread_mpi/threads.h"
 -
 -#include "gromacs/compat/make_unique.h"
 +#include "gromacs/compat/pointers.h"
  #include "gromacs/gpu_utils/gpu_utils.h"
  #include "gromacs/hardware/cpuinfo.h"
  #include "gromacs/hardware/hardwaretopology.h"
  #include "gromacs/hardware/hw_info.h"
 -#include "gromacs/mdtypes/commrec.h"
  #include "gromacs/simd/support.h"
  #include "gromacs/utility/basedefinitions.h"
  #include "gromacs/utility/basenetwork.h"
  #include "gromacs/utility/fatalerror.h"
  #include "gromacs/utility/gmxassert.h"
  #include "gromacs/utility/logger.h"
 +#include "gromacs/utility/mutex.h"
  #include "gromacs/utility/physicalnodecommunicator.h"
 -#include "gromacs/utility/programcontext.h"
 -#include "gromacs/utility/smalloc.h"
 -#include "gromacs/utility/stringutil.h"
 -#include "gromacs/utility/sysinfo.h"
  
  #include "architecture.h"
  
  #    include <unistd.h>       // sysconf()
  #endif
  
 +gmx_hw_info_t::gmx_hw_info_t(std::unique_ptr<gmx::CpuInfo>          cpuInfo,
 +                             std::unique_ptr<gmx::HardwareTopology> hardwareTopology)
 +    : cpuInfo(std::move(cpuInfo)),
 +      hardwareTopology(std::move(hardwareTopology))
 +{
 +}
 +
 +gmx_hw_info_t::~gmx_hw_info_t()
 +{
 +    free_gpu_info(&gpu_info);
 +}
 +
  namespace gmx
  {
  
  #    define _SC_NPROCESSORS_CONF _SC_NPROC_CONF
  #endif
  
 -//! Constant used to help minimize preprocessed code
 -static const bool bGPUBinary     = GMX_GPU != GMX_GPU_NONE;
 -
 -/*! \brief The hwinfo structure (common to all threads in this process).
 +/*! \brief Information about the hardware of all nodes (common to all threads in this process).
   *
 - * \todo This should become a shared_ptr owned by e.g. Mdrunner::runner()
 - * that is shared across any threads as needed (e.g. for thread-MPI). That
 - * offers about the same run time performance as we get here, and avoids a
 - * lot of custom code.
 - */
 -static std::unique_ptr<gmx_hw_info_t> hwinfo_g;
 -//! A reference counter for the hwinfo structure
 -static int                            n_hwinfo = 0;
 -//! A lock to protect the hwinfo structure
 -static tMPI_Thread_mutex_t            hw_info_lock = TMPI_THREAD_MUTEX_INITIALIZER;
 + * This information is constructed only when required, but thereafter
 + * its lifetime is that of the whole process, potentially across
 + * multiple successive simulation parts. It's wise to ensure that only
 + * one thread can create the information, but thereafter they can all
 + * read it without e.g. needing a std::shared_ptr to ensure its
 + * lifetime exceeds that of the thread. */
 +static std::unique_ptr<gmx_hw_info_t> g_hardwareInfo;
 +//! A mutex to protect the hwinfo structure
 +static Mutex                          g_hardwareInfoMutex;
  
  //! Detect GPUs, if that makes sense to attempt.
 -static void gmx_detect_gpus(const gmx::MDLogger            &mdlog,
 -                            const PhysicalNodeCommunicator &physicalNodeComm)
 +static void gmx_detect_gpus(const gmx::MDLogger              &mdlog,
 +                            const PhysicalNodeCommunicator   &physicalNodeComm,
 +                            compat::not_null<gmx_hw_info_t *> hardwareInfo)
  {
 -    hwinfo_g->gpu_info.bDetectGPUs =
 -        (bGPUBinary && getenv("GMX_DISABLE_GPU_DETECTION") == nullptr);
 -    if (!hwinfo_g->gpu_info.bDetectGPUs)
 +    hardwareInfo->gpu_info.bDetectGPUs = canPerformGpuDetection();
 +
 +    if (!hardwareInfo->gpu_info.bDetectGPUs)
      {
          return;
      }
      isMasterRankOfPhysicalNode = (physicalNodeComm.rank_ == 0);
  #else
      // We choose to run the detection only once with thread-MPI and
 -    // use reference counting on the results of the detection to
 -    // enforce it. But we can assert that this is true.
 -    GMX_RELEASE_ASSERT(n_hwinfo == 0, "Cannot run GPU detection on non-master thread-MPI ranks");
 +    // use a mutex to enforce it.
      GMX_UNUSED_VALUE(physicalNodeComm);
      isMasterRankOfPhysicalNode = true;
  #endif
      if (isMasterRankOfPhysicalNode || allRanksMustDetectGpus)
      {
          std::string errorMessage;
 -        gpusCanBeDetected = canDetectGpus(&errorMessage);
 +        gpusCanBeDetected = isGpuDetectionFunctional(&errorMessage);
          if (!gpusCanBeDetected)
          {
              GMX_LOG(mdlog.info).asParagraph().appendTextFormatted(
  
      if (gpusCanBeDetected)
      {
 -        findGpus(&hwinfo_g->gpu_info);
 +        findGpus(&hardwareInfo->gpu_info);
          // No need to tell the user anything at this point, they get a
          // hardware report later.
      }
      if (!allRanksMustDetectGpus)
      {
          /* Broadcast the GPU info to the other ranks within this node */
 -        MPI_Bcast(&hwinfo_g->gpu_info.n_dev, 1, MPI_INT, 0, physicalNodeComm.comm_);
 +        MPI_Bcast(&hardwareInfo->gpu_info.n_dev, 1, MPI_INT, 0, physicalNodeComm.comm_);
  
 -        if (hwinfo_g->gpu_info.n_dev > 0)
 +        if (hardwareInfo->gpu_info.n_dev > 0)
          {
              int dev_size;
  
 -            dev_size = hwinfo_g->gpu_info.n_dev*sizeof_gpu_dev_info();
 +            dev_size = hardwareInfo->gpu_info.n_dev*sizeof_gpu_dev_info();
  
              if (!isMasterRankOfPhysicalNode)
              {
 -                hwinfo_g->gpu_info.gpu_dev =
 +                hardwareInfo->gpu_info.gpu_dev =
                      (struct gmx_device_info_t *)malloc(dev_size);
              }
 -            MPI_Bcast(hwinfo_g->gpu_info.gpu_dev, dev_size, MPI_BYTE,
 +            MPI_Bcast(hardwareInfo->gpu_info.gpu_dev, dev_size, MPI_BYTE,
                        0, physicalNodeComm.comm_);
 -            MPI_Bcast(&hwinfo_g->gpu_info.n_dev_compatible, 1, MPI_INT,
 +            MPI_Bcast(&hardwareInfo->gpu_info.n_dev_compatible, 1, MPI_INT,
                        0, physicalNodeComm.comm_);
          }
      }
  #endif
  }
  
 -//! Reduce the locally collected \p hwinfo_g over MPI ranks
 -static void gmx_collect_hardware_mpi(const gmx::CpuInfo             &cpuInfo,
 -                                     const PhysicalNodeCommunicator &physicalNodeComm)
 +//! Reduce the locally collected \p hardwareInfo over MPI ranks
 +static void gmx_collect_hardware_mpi(const gmx::CpuInfo               &cpuInfo,
 +                                     const PhysicalNodeCommunicator   &physicalNodeComm,
 +                                     compat::not_null<gmx_hw_info_t *> hardwareInfo)
  {
 -    const int  ncore        = hwinfo_g->hardwareTopology->numberOfCores();
 -    /* Zen has family=23, for now we treat future AMD CPUs like Zen */
 -    const bool cpuIsAmdZen1 = (cpuInfo.vendor() == CpuInfo::Vendor::Amd &&
 -                               cpuInfo.family() == 23 &&
 -                               (cpuInfo.model() == 1 || cpuInfo.model() == 17 ||
 -                                cpuInfo.model() == 8 || cpuInfo.model() == 24));
 -
 +    const int  ncore        = hardwareInfo->hardwareTopology->numberOfCores();
-     /* Zen has family=23, for now we treat future AMD CPUs like Zen
-      * and Hygon Dhyana like Zen */
-     const bool cpuIsAmdZen  = ((cpuInfo.vendor() == CpuInfo::Vendor::Amd &&
-                                 cpuInfo.family() >= 23) ||
++    /* Zen1 is assumed for:
++     * - family=23 with the below listed models;
++     * - Hygon as vendor.
++     */
++    const bool cpuIsAmdZen1 = ((cpuInfo.vendor() == CpuInfo::Vendor::Amd &&
++                                cpuInfo.family() == 23 &&
++                                (cpuInfo.model() == 1 || cpuInfo.model() == 17 ||
++                                 cpuInfo.model() == 8 || cpuInfo.model() == 24)) ||
 +                               cpuInfo.vendor() == CpuInfo::Vendor::Hygon);
  #if GMX_LIB_MPI
      int       nhwthread, ngpu, i;
      int       gpu_hash;
  
 -    nhwthread = hwinfo_g->nthreads_hw_avail;
 -    ngpu      = hwinfo_g->gpu_info.n_dev_compatible;
 +    nhwthread = hardwareInfo->nthreads_hw_avail;
 +    ngpu      = hardwareInfo->gpu_info.n_dev_compatible;
      /* Create a unique hash of the GPU type(s) in this node */
      gpu_hash  = 0;
      /* Here it might be better to only loop over the compatible GPU, but we
       * don't have that information available and it would also require
       * removing the device ID from the device info string.
       */
 -    for (i = 0; i < hwinfo_g->gpu_info.n_dev; i++)
 +    for (i = 0; i < hardwareInfo->gpu_info.n_dev; i++)
      {
          char stmp[STRLEN];
  
           * the GPUs affects the hash. Also two identical GPUs won't give
           * a gpu_hash of zero after XORing.
           */
 -        get_gpu_device_info_string(stmp, hwinfo_g->gpu_info, i);
 +        get_gpu_device_info_string(stmp, hardwareInfo->gpu_info, i);
          gpu_hash ^= gmx_string_fullhash_func(stmp, gmx_string_hash_init);
      }
  
          maxMinLocal[7]  = -maxMinLocal[2];
          maxMinLocal[8]  = -maxMinLocal[3];
          maxMinLocal[9]  = -maxMinLocal[4];
-         maxMinLocal[10] = (cpuIsAmdZen ? 1 : 0);
+         maxMinLocal[10] = (cpuIsAmdZen1 ? 1 : 0);
  
          MPI_Allreduce(maxMinLocal.data(), maxMinReduced.data(), maxMinLocal.size(),
                        MPI_INT, MPI_MAX, MPI_COMM_WORLD);
      }
  
 -    hwinfo_g->nphysicalnode       = countsReduced[0];
 -    hwinfo_g->ncore_tot           = countsReduced[1];
 -    hwinfo_g->ncore_min           = -maxMinReduced[5];
 -    hwinfo_g->ncore_max           = maxMinReduced[0];
 -    hwinfo_g->nhwthread_tot       = countsReduced[2];
 -    hwinfo_g->nhwthread_min       = -maxMinReduced[6];
 -    hwinfo_g->nhwthread_max       = maxMinReduced[1];
 -    hwinfo_g->ngpu_compatible_tot = countsReduced[3];
 -    hwinfo_g->ngpu_compatible_min = -maxMinReduced[7];
 -    hwinfo_g->ngpu_compatible_max = maxMinReduced[2];
 -    hwinfo_g->simd_suggest_min    = -maxMinReduced[8];
 -    hwinfo_g->simd_suggest_max    = maxMinReduced[3];
 -    hwinfo_g->bIdenticalGPUs      = (maxMinReduced[4] == -maxMinReduced[9]);
 -    hwinfo_g->haveAmdZen1Cpu      = (maxMinReduced[10] > 0);
 +    hardwareInfo->nphysicalnode       = countsReduced[0];
 +    hardwareInfo->ncore_tot           = countsReduced[1];
 +    hardwareInfo->ncore_min           = -maxMinReduced[5];
 +    hardwareInfo->ncore_max           = maxMinReduced[0];
 +    hardwareInfo->nhwthread_tot       = countsReduced[2];
 +    hardwareInfo->nhwthread_min       = -maxMinReduced[6];
 +    hardwareInfo->nhwthread_max       = maxMinReduced[1];
 +    hardwareInfo->ngpu_compatible_tot = countsReduced[3];
 +    hardwareInfo->ngpu_compatible_min = -maxMinReduced[7];
 +    hardwareInfo->ngpu_compatible_max = maxMinReduced[2];
 +    hardwareInfo->simd_suggest_min    = -maxMinReduced[8];
 +    hardwareInfo->simd_suggest_max    = maxMinReduced[3];
 +    hardwareInfo->bIdenticalGPUs      = (maxMinReduced[4] == -maxMinReduced[9]);
-     hardwareInfo->haveAmdZenCpu       = (maxMinReduced[10] > 0);
++    hardwareInfo->haveAmdZen1Cpu      = (maxMinReduced[10] > 0);
  #else
      /* All ranks use the same pointer, protected by a mutex in the caller */
 -    hwinfo_g->nphysicalnode       = 1;
 -    hwinfo_g->ncore_tot           = ncore;
 -    hwinfo_g->ncore_min           = ncore;
 -    hwinfo_g->ncore_max           = ncore;
 -    hwinfo_g->nhwthread_tot       = hwinfo_g->nthreads_hw_avail;
 -    hwinfo_g->nhwthread_min       = hwinfo_g->nthreads_hw_avail;
 -    hwinfo_g->nhwthread_max       = hwinfo_g->nthreads_hw_avail;
 -    hwinfo_g->ngpu_compatible_tot = hwinfo_g->gpu_info.n_dev_compatible;
 -    hwinfo_g->ngpu_compatible_min = hwinfo_g->gpu_info.n_dev_compatible;
 -    hwinfo_g->ngpu_compatible_max = hwinfo_g->gpu_info.n_dev_compatible;
 -    hwinfo_g->simd_suggest_min    = static_cast<int>(simdSuggested(cpuInfo));
 -    hwinfo_g->simd_suggest_max    = static_cast<int>(simdSuggested(cpuInfo));
 -    hwinfo_g->bIdenticalGPUs      = TRUE;
 -    hwinfo_g->haveAmdZen1Cpu      = cpuIsAmdZen1;
 +    hardwareInfo->nphysicalnode       = 1;
 +    hardwareInfo->ncore_tot           = ncore;
 +    hardwareInfo->ncore_min           = ncore;
 +    hardwareInfo->ncore_max           = ncore;
 +    hardwareInfo->nhwthread_tot       = hardwareInfo->nthreads_hw_avail;
 +    hardwareInfo->nhwthread_min       = hardwareInfo->nthreads_hw_avail;
 +    hardwareInfo->nhwthread_max       = hardwareInfo->nthreads_hw_avail;
 +    hardwareInfo->ngpu_compatible_tot = hardwareInfo->gpu_info.n_dev_compatible;
 +    hardwareInfo->ngpu_compatible_min = hardwareInfo->gpu_info.n_dev_compatible;
 +    hardwareInfo->ngpu_compatible_max = hardwareInfo->gpu_info.n_dev_compatible;
 +    hardwareInfo->simd_suggest_min    = static_cast<int>(simdSuggested(cpuInfo));
 +    hardwareInfo->simd_suggest_max    = static_cast<int>(simdSuggested(cpuInfo));
 +    hardwareInfo->bIdenticalGPUs      = TRUE;
-     hardwareInfo->haveAmdZenCpu       = cpuIsAmdZen;
++    hardwareInfo->haveAmdZen1Cpu      = cpuIsAmdZen1;
      GMX_UNUSED_VALUE(physicalNodeComm);
  #endif
  }
@@@ -420,48 -422,54 +423,48 @@@ hardwareTopologyDoubleCheckDetection(co
  gmx_hw_info_t *gmx_detect_hardware(const gmx::MDLogger            &mdlog,
                                     const PhysicalNodeCommunicator &physicalNodeComm)
  {
 -    int ret;
 -
 -    /* make sure no one else is doing the same thing */
 -    ret = tMPI_Thread_mutex_lock(&hw_info_lock);
 -    if (ret != 0)
 +    // By construction, only one thread ever runs hardware detection,
 +    // but we may as well prevent issues arising if that would change.
 +    // Taking the lock early ensures that exactly one thread can
 +    // attempt to construct g_hardwareInfo.
 +    lock_guard<Mutex> lock(g_hardwareInfoMutex);
 +
 +    // If we already have the information, just return a handle to it.
 +    if (g_hardwareInfo != nullptr)
      {
 -        gmx_fatal(FARGS, "Error locking hwinfo mutex: %s", strerror(errno));
 +        return g_hardwareInfo.get();
      }
  
 -    /* only initialize the hwinfo structure if it is not already initalized */
 -    if (n_hwinfo == 0)
 -    {
 -        hwinfo_g = compat::make_unique<gmx_hw_info_t>();
 -
 -        /* TODO: We should also do CPU hardware detection only once on each
 -         * physical node and broadcast it, instead of do it on every MPI rank. */
 -        hwinfo_g->cpuInfo             = new gmx::CpuInfo(gmx::CpuInfo::detect());
 +    // Make the new hardwareInfo in a temporary.
 +    hardwareTopologyPrepareDetection();
  
 -        hardwareTopologyPrepareDetection();
 -        hwinfo_g->hardwareTopology    = new gmx::HardwareTopology(gmx::HardwareTopology::detect());
 +    // TODO: We should also do CPU hardware detection only once on each
 +    // physical node and broadcast it, instead of doing it on every MPI rank.
 +    auto hardwareInfo = std::make_unique<gmx_hw_info_t>(std::make_unique<CpuInfo>(CpuInfo::detect()),
 +                                                        std::make_unique<HardwareTopology>(HardwareTopology::detect()));
  
 -        // If we detected the topology on this system, double-check that it makes sense
 -        if (hwinfo_g->hardwareTopology->isThisSystem())
 -        {
 -            hardwareTopologyDoubleCheckDetection(mdlog, *(hwinfo_g->hardwareTopology));
 -        }
 +    // If we detected the topology on this system, double-check that it makes sense
 +    if (hardwareInfo->hardwareTopology->isThisSystem())
 +    {
 +        hardwareTopologyDoubleCheckDetection(mdlog, *hardwareInfo->hardwareTopology);
 +    }
  
 -        // TODO: Get rid of this altogether.
 -        hwinfo_g->nthreads_hw_avail = hwinfo_g->hardwareTopology->machine().logicalProcessorCount;
 +    // TODO: Get rid of this altogether.
 +    hardwareInfo->nthreads_hw_avail = hardwareInfo->hardwareTopology->machine().logicalProcessorCount;
  
 -        /* detect GPUs */
 -        hwinfo_g->gpu_info.n_dev            = 0;
 -        hwinfo_g->gpu_info.n_dev_compatible = 0;
 -        hwinfo_g->gpu_info.gpu_dev          = nullptr;
 +    // Detect GPUs
 +    hardwareInfo->gpu_info.n_dev            = 0;
 +    hardwareInfo->gpu_info.n_dev_compatible = 0;
 +    hardwareInfo->gpu_info.gpu_dev          = nullptr;
  
 -        gmx_detect_gpus(mdlog, physicalNodeComm);
 -        gmx_collect_hardware_mpi(*hwinfo_g->cpuInfo, physicalNodeComm);
 -    }
 -    /* increase the reference counter */
 -    n_hwinfo++;
 +    gmx_detect_gpus(mdlog, physicalNodeComm, compat::make_not_null(hardwareInfo));
 +    gmx_collect_hardware_mpi(*hardwareInfo->cpuInfo, physicalNodeComm, compat::make_not_null(hardwareInfo));
  
 -    ret = tMPI_Thread_mutex_unlock(&hw_info_lock);
 -    if (ret != 0)
 -    {
 -        gmx_fatal(FARGS, "Error unlocking hwinfo mutex: %s", strerror(errno));
 -    }
 +    // Now that the temporary is fully constructed, swap it to become
 +    // the real thing.
 +    g_hardwareInfo.swap(hardwareInfo);
  
 -    return hwinfo_g.get();
 +    return g_hardwareInfo.get();
  }
  
  bool compatibleGpusFound(const gmx_gpu_info_t &gpu_info)
      return gpu_info.n_dev_compatible > 0;
  }
  
 -void gmx_hardware_info_free()
 -{
 -    int ret;
 -
 -    ret = tMPI_Thread_mutex_lock(&hw_info_lock);
 -    if (ret != 0)
 -    {
 -        gmx_fatal(FARGS, "Error locking hwinfo mutex: %s", strerror(errno));
 -    }
 -
 -    /* decrease the reference counter */
 -    n_hwinfo--;
 -
 -
 -    if (n_hwinfo < 0)
 -    {
 -        gmx_incons("n_hwinfo < 0");
 -    }
 -
 -    if (n_hwinfo == 0)
 -    {
 -        delete hwinfo_g->cpuInfo;
 -        delete hwinfo_g->hardwareTopology;
 -        free_gpu_info(&hwinfo_g->gpu_info);
 -        hwinfo_g.reset();
 -    }
 -
 -    ret = tMPI_Thread_mutex_unlock(&hw_info_lock);
 -    if (ret != 0)
 -    {
 -        gmx_fatal(FARGS, "Error unlocking hwinfo mutex: %s", strerror(errno));
 -    }
 -}
 -
  }  // namespace gmx
index 567ba3a70787328ae02db34f83f97d26580eae5f,190cccc306b39917947107c37ada320e44b29ff4..15ebfa788e99d036148b5bcc0b85b965761b8f9d
@@@ -35,7 -35,6 +35,7 @@@
  #ifndef GMX_HARDWARE_HWINFO_H
  #define GMX_HARDWARE_HWINFO_H
  
 +#include <memory>
  #include <string>
  #include <vector>
  
@@@ -50,29 -49,19 +50,29 @@@ class HardwareTopology
  
  /* Hardware information structure with CPU and GPU information.
   * It is initialized by gmx_detect_hardware().
 - * NOTE: this structure may only contain structures that are globally valid
 - *       (i.e. must be able to be shared among all threads) */
 + * NOTE: this structure may only contain structures that are
 + *       valid over the whole process (i.e. must be able to
 + *       be shared among all threads) */
  struct gmx_hw_info_t
  {
 +    gmx_hw_info_t(std::unique_ptr<gmx::CpuInfo>          cpuInfo,
 +                  std::unique_ptr<gmx::HardwareTopology> hardwareTopology);
 +    ~gmx_hw_info_t();
 +
      /* Data for our local physical node */
 -    struct gmx_gpu_info_t gpu_info;                /* Information about GPUs detected in the system */
 +    //! Information about GPUs detected on this physical node
 +    gmx_gpu_info_t gpu_info;
 +
 +    /*! \brief Number of hardware threads available.
 +     *
 +     * This number is based on the number of CPUs reported as
 +     * available by the OS at the time of detection. */
 +    int nthreads_hw_avail;
  
 -    int                   nthreads_hw_avail;       /* Number of hardware threads available; this number
 -                                                      is based on the number of CPUs reported as available
 -                                                      by the OS at the time of detection. */
  
 -    const gmx::CpuInfo *         cpuInfo;          /* Information about CPU capabilities */
 -    const gmx::HardwareTopology *hardwareTopology; /* Information about hardware topology */
 +    std::unique_ptr<gmx::CpuInfo>          cpuInfo;           /* Information about CPU capabilities */
 +    std::unique_ptr<gmx::HardwareTopology> hardwareTopology;  /* Information about hardware topology */
 +
  
      /* Data reduced through MPI over all physical nodes */
      int                 nphysicalnode;       /* Number of physical nodes */
      int                 simd_suggest_max;    /* Highest SIMD instruction set supported by at least one rank */
  
      gmx_bool            bIdenticalGPUs;      /* TRUE if all ranks have the same type(s) and order of GPUs */
-     bool                haveAmdZenCpu;       /* TRUE when at least one CPU in any of the nodes is AMD Zen arch */
+     bool                haveAmdZen1Cpu;      /* TRUE when at least one CPU in any of the nodes is AMD Zen of the first generation */
  };
  
  
  /* The options for the thread affinity setting, default: auto */
 -enum {
 -    threadaffSEL, threadaffAUTO, threadaffON, threadaffOFF, threadaffNR
 +enum class ThreadAffinity
 +{
 +    Select,
 +    Auto,
 +    On,
 +    Off,
 +    Count
  };
  
  /*! \internal \brief Threading and GPU options, can be set automatically or by the user
  struct gmx_hw_opt_t
  {
      //! Total number of threads requested (thread-MPI + OpenMP).
 -    int           nthreads_tot = 0;
 +    int            nthreads_tot = 0;
      //! Number of thread-MPI threads requested.
 -    int           nthreads_tmpi = 0;
 +    int            nthreads_tmpi = 0;
      //! Number of OpenMP threads requested.
 -    int           nthreads_omp = 0;
 +    int            nthreads_omp = 0;
      //! Number of OpenMP threads to use on PME_only ranks.
 -    int           nthreads_omp_pme = 0;
 +    int            nthreads_omp_pme = 0;
      //! Thread affinity switch, see enum above.
 -    int           thread_affinity = threadaffSEL;
 +    ThreadAffinity threadAffinity = ThreadAffinity::Select;
      //! Logical core pinning stride.
 -    int           core_pinning_stride = 0;
 +    int            core_pinning_stride = 0;
      //! Logical core pinning offset.
 -    int           core_pinning_offset = 0;
 +    int            core_pinning_offset = 0;
      //! Empty, or a string provided by the user declaring (unique) GPU IDs available for mdrun to use.
 -    std::string   gpuIdsAvailable = "";
 +    std::string    gpuIdsAvailable = "";
      //! Empty, or a string provided by the user mapping GPU tasks to devices.
 -    std::string   userGpuTaskAssignment = "";
 +    std::string    userGpuTaskAssignment = "";
      //! Tells whether mdrun is free to choose the total number of threads (by choosing the number of OpenMP and/or thread-MPI threads).
 -    bool          totNumThreadsIsAuto;
 +    bool           totNumThreadsIsAuto;
  };
  
  #endif
index e87e6363c5a09f786daab12d104d321ce2cc8553,0000000000000000000000000000000000000000..8c0b73608336637a0b20eb7ee7d3260c224e0ab2
mode 100644,000000..100644
--- /dev/null
@@@ -1,514 -1,0 +1,514 @@@
-         if (hardwareInfo.haveAmdZenCpu)
 +/*
 + * This file is part of the GROMACS molecular simulation package.
 + *
 + * Copyright (c) 2019, by the GROMACS development team, led by
 + * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
 + * and including many others, as listed in the AUTHORS file in the
 + * top-level source directory and at http://www.gromacs.org.
 + *
 + * GROMACS is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public License
 + * as published by the Free Software Foundation; either version 2.1
 + * of the License, or (at your option) any later version.
 + *
 + * GROMACS is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with GROMACS; if not, see
 + * http://www.gnu.org/licenses, or write to the Free Software Foundation,
 + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
 + *
 + * If you want to redistribute modifications to GROMACS, please
 + * consider that scientific software is very special. Version
 + * control is crucial - bugs must be traceable. We will be happy to
 + * consider code for inclusion in the official distribution, but
 + * derived work must not be called official GROMACS. Details are found
 + * in the README & COPYING files - if they are missing, get the
 + * official version at http://www.gromacs.org.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the research papers on the package. Check out http://www.gromacs.org.
 + */
 +/*! \internal \file
 + * \brief Common functions for the different NBNXN GPU implementations.
 + *
 + * \author Berk Hess <hess@kth.se>
 + *
 + * \ingroup module_nbnxm
 + */
 +
 +#include "gmxpre.h"
 +
 +#include "gromacs/domdec/domdec.h"
 +#include "gromacs/domdec/domdec_struct.h"
 +#include "gromacs/hardware/hw_info.h"
 +#include "gromacs/mdlib/gmx_omp_nthreads.h"
 +#include "gromacs/mdtypes/commrec.h"
 +#include "gromacs/mdtypes/forcerec.h"
 +#include "gromacs/mdtypes/inputrec.h"
 +#include "gromacs/nbnxm/atomdata.h"
 +#include "gromacs/nbnxm/gpu_data_mgmt.h"
 +#include "gromacs/nbnxm/nbnxm.h"
 +#include "gromacs/nbnxm/nbnxm_geometry.h"
 +#include "gromacs/nbnxm/nbnxm_simd.h"
 +#include "gromacs/nbnxm/pairlist.h"
 +#include "gromacs/nbnxm/pairlist_tuning.h"
 +#include "gromacs/simd/simd.h"
 +#include "gromacs/utility/fatalerror.h"
 +#include "gromacs/utility/logger.h"
 +
 +#include "gpu_types.h"
 +#include "grid.h"
 +#include "pairlistset.h"
 +#include "pairlistsets.h"
 +#include "pairsearch.h"
 +
 +namespace Nbnxm
 +{
 +
 +/*! \brief Resources that can be used to execute non-bonded kernels on */
 +enum class NonbondedResource : int
 +{
 +    Cpu,
 +    Gpu,
 +    EmulateGpu
 +};
 +
 +/*! \brief Returns whether CPU SIMD support exists for the given inputrec
 + *
 + * If the return value is FALSE and fplog/cr != NULL, prints a fallback
 + * message to fplog/stderr.
 + */
 +static gmx_bool nbnxn_simd_supported(const gmx::MDLogger &mdlog,
 +                                     const t_inputrec    *ir)
 +{
 +    if (ir->vdwtype == evdwPME && ir->ljpme_combination_rule == eljpmeLB)
 +    {
 +        /* LJ PME with LB combination rule does 7 mesh operations.
 +         * This so slow that we don't compile SIMD non-bonded kernels
 +         * for that. */
 +        GMX_LOG(mdlog.warning).asParagraph().appendText("LJ-PME with Lorentz-Berthelot is not supported with SIMD kernels, falling back to plain C kernels");
 +        return FALSE;
 +    }
 +
 +    return TRUE;
 +}
 +
 +/*! \brief Returns the most suitable CPU kernel type and Ewald handling */
 +static KernelSetup
 +pick_nbnxn_kernel_cpu(const t_inputrec gmx_unused    *ir,
 +                      const gmx_hw_info_t gmx_unused &hardwareInfo)
 +{
 +    KernelSetup kernelSetup;
 +
 +    if (!GMX_SIMD)
 +    {
 +        kernelSetup.kernelType         = KernelType::Cpu4x4_PlainC;
 +        kernelSetup.ewaldExclusionType = EwaldExclusionType::Table;
 +    }
 +    else
 +    {
 +#ifdef GMX_NBNXN_SIMD_4XN
 +        kernelSetup.kernelType = KernelType::Cpu4xN_Simd_4xN;
 +#endif
 +#ifdef GMX_NBNXN_SIMD_2XNN
 +        kernelSetup.kernelType = KernelType::Cpu4xN_Simd_2xNN;
 +#endif
 +
 +#if defined GMX_NBNXN_SIMD_2XNN && defined GMX_NBNXN_SIMD_4XN
 +        /* We need to choose if we want 2x(N+N) or 4xN kernels.
 +         * This is based on the SIMD acceleration choice and CPU information
 +         * detected at runtime.
 +         *
 +         * 4xN calculates more (zero) interactions, but has less pair-search
 +         * work and much better kernel instruction scheduling.
 +         *
 +         * Up till now we have only seen that on Intel Sandy/Ivy Bridge,
 +         * which doesn't have FMA, both the analytical and tabulated Ewald
 +         * kernels have similar pair rates for 4x8 and 2x(4+4), so we choose
 +         * 2x(4+4) because it results in significantly fewer pairs.
 +         * For RF, the raw pair rate of the 4x8 kernel is higher than 2x(4+4),
 +         * 10% with HT, 50% without HT. As we currently don't detect the actual
 +         * use of HT, use 4x8 to avoid a potential performance hit.
 +         * On Intel Haswell 4x8 is always faster.
 +         */
 +        kernelSetup.kernelType = KernelType::Cpu4xN_Simd_4xN;
 +
 +        if (!GMX_SIMD_HAVE_FMA && (EEL_PME_EWALD(ir->coulombtype) ||
 +                                   EVDW_PME(ir->vdwtype)))
 +        {
 +            /* We have Ewald kernels without FMA (Intel Sandy/Ivy Bridge).
 +             * There are enough instructions to make 2x(4+4) efficient.
 +             */
 +            kernelSetup.kernelType = KernelType::Cpu4xN_Simd_2xNN;
 +        }
 +
-             !hardwareInfo.haveAmdZenCpu)
++        if (hardwareInfo.haveAmdZen1Cpu)
 +        {
 +            /* One 256-bit FMA per cycle makes 2xNN faster */
 +            kernelSetup.kernelType = KernelType::Cpu4xN_Simd_2xNN;
 +        }
 +#endif      /* GMX_NBNXN_SIMD_2XNN && GMX_NBNXN_SIMD_4XN */
 +
 +
 +        if (getenv("GMX_NBNXN_SIMD_4XN") != nullptr)
 +        {
 +#ifdef GMX_NBNXN_SIMD_4XN
 +            kernelSetup.kernelType = KernelType::Cpu4xN_Simd_4xN;
 +#else
 +            gmx_fatal(FARGS, "SIMD 4xN kernels requested, but GROMACS has been compiled without support for these kernels");
 +#endif
 +        }
 +        if (getenv("GMX_NBNXN_SIMD_2XNN") != nullptr)
 +        {
 +#ifdef GMX_NBNXN_SIMD_2XNN
 +            kernelSetup.kernelType = KernelType::Cpu4xN_Simd_2xNN;
 +#else
 +            gmx_fatal(FARGS, "SIMD 2x(N+N) kernels requested, but GROMACS has been compiled without support for these kernels");
 +#endif
 +        }
 +
 +        /* Analytical Ewald exclusion correction is only an option in
 +         * the SIMD kernel.
 +         * Since table lookup's don't parallelize with SIMD, analytical
 +         * will probably always be faster for a SIMD width of 8 or more.
 +         * With FMA analytical is sometimes faster for a width if 4 as well.
 +         * In single precision, this is faster on Bulldozer.
 +         * On AMD Zen, tabulated Ewald kernels are faster on all 4 combinations
 +         * of single or double precision and 128 or 256-bit AVX2.
 +         */
 +        if (
 +#if GMX_SIMD
 +            (GMX_SIMD_REAL_WIDTH >= 8 ||
 +             (GMX_SIMD_REAL_WIDTH >= 4 && GMX_SIMD_HAVE_FMA && !GMX_DOUBLE)) &&
 +#endif
++            !hardwareInfo.haveAmdZen1Cpu)
 +        {
 +            kernelSetup.ewaldExclusionType = EwaldExclusionType::Analytical;
 +        }
 +        else
 +        {
 +            kernelSetup.ewaldExclusionType = EwaldExclusionType::Table;
 +        }
 +        if (getenv("GMX_NBNXN_EWALD_TABLE") != nullptr)
 +        {
 +            kernelSetup.ewaldExclusionType = EwaldExclusionType::Table;
 +        }
 +        if (getenv("GMX_NBNXN_EWALD_ANALYTICAL") != nullptr)
 +        {
 +            kernelSetup.ewaldExclusionType = EwaldExclusionType::Analytical;
 +        }
 +
 +    }
 +
 +    return kernelSetup;
 +}
 +
 +const char *lookup_kernel_name(const KernelType kernelType)
 +{
 +    const char *returnvalue = nullptr;
 +    switch (kernelType)
 +    {
 +        case KernelType::NotSet:
 +            returnvalue = "not set";
 +            break;
 +        case KernelType::Cpu4x4_PlainC:
 +            returnvalue = "plain C";
 +            break;
 +        case KernelType::Cpu4xN_Simd_4xN:
 +        case KernelType::Cpu4xN_Simd_2xNN:
 +#if GMX_SIMD
 +            returnvalue = "SIMD";
 +#else  // GMX_SIMD
 +            returnvalue = "not available";
 +#endif // GMX_SIMD
 +            break;
 +        case KernelType::Gpu8x8x8: returnvalue        = "GPU"; break;
 +        case KernelType::Cpu8x8x8_PlainC: returnvalue = "plain C"; break;
 +
 +        default:
 +            gmx_fatal(FARGS, "Illegal kernel type selected");
 +    }
 +    return returnvalue;
 +};
 +
 +/*! \brief Returns the most suitable kernel type and Ewald handling */
 +static KernelSetup
 +pick_nbnxn_kernel(const gmx::MDLogger     &mdlog,
 +                  gmx_bool                 use_simd_kernels,
 +                  const gmx_hw_info_t     &hardwareInfo,
 +                  const NonbondedResource &nonbondedResource,
 +                  const t_inputrec        *ir,
 +                  gmx_bool                 bDoNonbonded)
 +{
 +    KernelSetup kernelSetup;
 +
 +    if (nonbondedResource == NonbondedResource::EmulateGpu)
 +    {
 +        kernelSetup.kernelType         = KernelType::Cpu8x8x8_PlainC;
 +        kernelSetup.ewaldExclusionType = EwaldExclusionType::DecidedByGpuModule;
 +
 +        if (bDoNonbonded)
 +        {
 +            GMX_LOG(mdlog.warning).asParagraph().appendText("Emulating a GPU run on the CPU (slow)");
 +        }
 +    }
 +    else if (nonbondedResource == NonbondedResource::Gpu)
 +    {
 +        kernelSetup.kernelType         = KernelType::Gpu8x8x8;
 +        kernelSetup.ewaldExclusionType = EwaldExclusionType::DecidedByGpuModule;
 +    }
 +    else
 +    {
 +        if (use_simd_kernels &&
 +            nbnxn_simd_supported(mdlog, ir))
 +        {
 +            kernelSetup = pick_nbnxn_kernel_cpu(ir, hardwareInfo);
 +        }
 +        else
 +        {
 +            kernelSetup.kernelType         = KernelType::Cpu4x4_PlainC;
 +            kernelSetup.ewaldExclusionType = EwaldExclusionType::Analytical;
 +        }
 +    }
 +
 +    if (bDoNonbonded)
 +    {
 +        GMX_LOG(mdlog.info).asParagraph().appendTextFormatted(
 +                "Using %s %dx%d nonbonded short-range kernels",
 +                lookup_kernel_name(kernelSetup.kernelType),
 +                IClusterSizePerKernelType[kernelSetup.kernelType],
 +                JClusterSizePerKernelType[kernelSetup.kernelType]);
 +
 +        if (KernelType::Cpu4x4_PlainC == kernelSetup.kernelType ||
 +            KernelType::Cpu8x8x8_PlainC == kernelSetup.kernelType)
 +        {
 +            GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
 +                    "WARNING: Using the slow %s kernels. This should\n"
 +                    "not happen during routine usage on supported platforms.",
 +                    lookup_kernel_name(kernelSetup.kernelType));
 +        }
 +    }
 +
 +    GMX_RELEASE_ASSERT(kernelSetup.kernelType != KernelType::NotSet &&
 +                       kernelSetup.ewaldExclusionType != EwaldExclusionType::NotSet,
 +                       "All kernel setup parameters should be set here");
 +
 +    return kernelSetup;
 +}
 +
 +} // namespace Nbnxm
 +
 +PairlistSets::PairlistSets(const PairlistParams &pairlistParams,
 +                           const bool            haveMultipleDomains,
 +                           const int             minimumIlistCountForGpuBalancing) :
 +    params_(pairlistParams),
 +    minimumIlistCountForGpuBalancing_(minimumIlistCountForGpuBalancing)
 +{
 +    localSet_ = std::make_unique<PairlistSet>(Nbnxm::InteractionLocality::Local,
 +                                              params_);
 +
 +    if (haveMultipleDomains)
 +    {
 +        nonlocalSet_ = std::make_unique<PairlistSet>(Nbnxm::InteractionLocality::NonLocal,
 +                                                     params_);
 +    }
 +}
 +
 +namespace Nbnxm
 +{
 +
 +/*! \brief Gets and returns the minimum i-list count for balacing based on the GPU used or env.var. when set */
 +static int getMinimumIlistCountForGpuBalancing(gmx_nbnxn_gpu_t *nbnxmGpu)
 +{
 +    int minimumIlistCount;
 +
 +    if (const char *env = getenv("GMX_NB_MIN_CI"))
 +    {
 +        char *end;
 +
 +        minimumIlistCount = strtol(env, &end, 10);
 +        if (!end || (*end != 0) || minimumIlistCount < 0)
 +        {
 +            gmx_fatal(FARGS, "Invalid value passed in GMX_NB_MIN_CI=%s, non-negative integer required", env);
 +        }
 +
 +        if (debug)
 +        {
 +            fprintf(debug, "Neighbor-list balancing parameter: %d (passed as env. var.)\n",
 +                    minimumIlistCount);
 +        }
 +    }
 +    else
 +    {
 +        minimumIlistCount = gpu_min_ci_balanced(nbnxmGpu);
 +        if (debug)
 +        {
 +            fprintf(debug, "Neighbor-list balancing parameter: %d (auto-adjusted to the number of GPU multi-processors)\n",
 +                    minimumIlistCount);
 +        }
 +    }
 +
 +    return minimumIlistCount;
 +}
 +
 +std::unique_ptr<nonbonded_verlet_t>
 +init_nb_verlet(const gmx::MDLogger     &mdlog,
 +               gmx_bool                 bFEP_NonBonded,
 +               const t_inputrec        *ir,
 +               const t_forcerec        *fr,
 +               const t_commrec         *cr,
 +               const gmx_hw_info_t     &hardwareInfo,
 +               const gmx_device_info_t *deviceInfo,
 +               const gmx_mtop_t        *mtop,
 +               matrix                   box,
 +               gmx_wallcycle           *wcycle)
 +{
 +    const bool          emulateGpu = (getenv("GMX_EMULATE_GPU") != nullptr);
 +    const bool          useGpu     = deviceInfo != nullptr;
 +
 +    GMX_RELEASE_ASSERT(!(emulateGpu && useGpu), "When GPU emulation is active, there cannot be a GPU assignment");
 +
 +    NonbondedResource nonbondedResource;
 +    if (useGpu)
 +    {
 +        nonbondedResource = NonbondedResource::Gpu;
 +    }
 +    else if (emulateGpu)
 +    {
 +        nonbondedResource = NonbondedResource::EmulateGpu;
 +    }
 +    else
 +    {
 +        nonbondedResource = NonbondedResource::Cpu;
 +    }
 +
 +    Nbnxm::KernelSetup kernelSetup =
 +        pick_nbnxn_kernel(mdlog, fr->use_simd_kernels, hardwareInfo,
 +                          nonbondedResource, ir,
 +                          fr->bNonbonded);
 +
 +    const bool          haveMultipleDomains = (DOMAINDECOMP(cr) && cr->dd->nnodes > 1);
 +
 +    PairlistParams      pairlistParams(kernelSetup.kernelType,
 +                                       bFEP_NonBonded,
 +                                       ir->rlist,
 +                                       havePPDomainDecomposition(cr));
 +
 +    setupDynamicPairlistPruning(mdlog, ir, mtop, box, fr->ic,
 +                                &pairlistParams);
 +
 +    int      enbnxninitcombrule;
 +    if (fr->ic->vdwtype == evdwCUT &&
 +        (fr->ic->vdw_modifier == eintmodNONE ||
 +         fr->ic->vdw_modifier == eintmodPOTSHIFT) &&
 +        getenv("GMX_NO_LJ_COMB_RULE") == nullptr)
 +    {
 +        /* Plain LJ cut-off: we can optimize with combination rules */
 +        enbnxninitcombrule = enbnxninitcombruleDETECT;
 +    }
 +    else if (fr->ic->vdwtype == evdwPME)
 +    {
 +        /* LJ-PME: we need to use a combination rule for the grid */
 +        if (fr->ljpme_combination_rule == eljpmeGEOM)
 +        {
 +            enbnxninitcombrule = enbnxninitcombruleGEOM;
 +        }
 +        else
 +        {
 +            enbnxninitcombrule = enbnxninitcombruleLB;
 +        }
 +    }
 +    else
 +    {
 +        /* We use a full combination matrix: no rule required */
 +        enbnxninitcombrule = enbnxninitcombruleNONE;
 +    }
 +
 +    auto pinPolicy = (useGpu ? gmx::PinningPolicy::PinnedIfSupported : gmx::PinningPolicy::CannotBePinned);
 +
 +    auto nbat      = std::make_unique<nbnxn_atomdata_t>(pinPolicy);
 +
 +    int  mimimumNumEnergyGroupNonbonded = ir->opts.ngener;
 +    if (ir->opts.ngener - ir->nwall == 1)
 +    {
 +        /* We have only one non-wall energy group, we do not need energy group
 +         * support in the non-bondeds kernels, since all non-bonded energy
 +         * contributions go to the first element of the energy group matrix.
 +         */
 +        mimimumNumEnergyGroupNonbonded = 1;
 +    }
 +    nbnxn_atomdata_init(mdlog,
 +                        nbat.get(),
 +                        kernelSetup.kernelType,
 +                        enbnxninitcombrule,
 +                        fr->ntype, fr->nbfp,
 +                        mimimumNumEnergyGroupNonbonded,
 +                        (useGpu || emulateGpu) ? 1 : gmx_omp_nthreads_get(emntNonbonded));
 +
 +    gmx_nbnxn_gpu_t *gpu_nbv = nullptr;
 +    int              minimumIlistCountForGpuBalancing = 0;
 +    if (useGpu)
 +    {
 +        /* init the NxN GPU data; the last argument tells whether we'll have
 +         * both local and non-local NB calculation on GPU */
 +        gpu_nbv = gpu_init(deviceInfo,
 +                           fr->ic,
 +                           pairlistParams,
 +                           nbat.get(),
 +                           cr->nodeid,
 +                           haveMultipleDomains);
 +
 +        minimumIlistCountForGpuBalancing = getMinimumIlistCountForGpuBalancing(gpu_nbv);
 +    }
 +
 +    auto pairlistSets =
 +        std::make_unique<PairlistSets>(pairlistParams,
 +                                       haveMultipleDomains,
 +                                       minimumIlistCountForGpuBalancing);
 +
 +    auto pairSearch =
 +        std::make_unique<PairSearch>(ir->ePBC,
 +                                     EI_TPI(ir->eI),
 +                                     DOMAINDECOMP(cr) ? &cr->dd->nc : nullptr,
 +                                     DOMAINDECOMP(cr) ? domdec_zones(cr->dd) : nullptr,
 +                                     pairlistParams.pairlistType,
 +                                     bFEP_NonBonded,
 +                                     gmx_omp_nthreads_get(emntPairsearch),
 +                                     pinPolicy);
 +
 +    return std::make_unique<nonbonded_verlet_t>(std::move(pairlistSets),
 +                                                std::move(pairSearch),
 +                                                std::move(nbat),
 +                                                kernelSetup,
 +                                                gpu_nbv,
 +                                                wcycle);
 +}
 +
 +} // namespace Nbnxm
 +
 +nonbonded_verlet_t::nonbonded_verlet_t(std::unique_ptr<PairlistSets>      pairlistSets,
 +                                       std::unique_ptr<PairSearch>        pairSearch,
 +                                       std::unique_ptr<nbnxn_atomdata_t>  nbat_in,
 +                                       const Nbnxm::KernelSetup          &kernelSetup,
 +                                       gmx_nbnxn_gpu_t                   *gpu_nbv_ptr,
 +                                       gmx_wallcycle                     *wcycle) :
 +    pairlistSets_(std::move(pairlistSets)),
 +    pairSearch_(std::move(pairSearch)),
 +    nbat(std::move(nbat_in)),
 +    kernelSetup_(kernelSetup),
 +    wcycle_(wcycle),
 +    gpu_nbv(gpu_nbv_ptr)
 +{
 +    GMX_RELEASE_ASSERT(pairlistSets_, "Need valid pairlistSets");
 +    GMX_RELEASE_ASSERT(pairSearch_, "Need valid search object");
 +    GMX_RELEASE_ASSERT(nbat, "Need valid atomdata object");
 +}
 +
 +nonbonded_verlet_t::~nonbonded_verlet_t()
 +{
 +    Nbnxm::gpu_free(gpu_nbv);
 +}