SYCL: Fully switch to atomic_ref
authorAndrey Alekseenko <al42and@gmail.com>
Sat, 28 Aug 2021 06:26:10 +0000 (06:26 +0000)
committerMark Abraham <mark.j.abraham@gmail.com>
Sat, 28 Aug 2021 06:26:10 +0000 (06:26 +0000)
src/gromacs/gpu_utils/gmxsycl.h
src/gromacs/gpu_utils/sycl_kernel_utils.h
src/gromacs/mdlib/settle_gpu_internal_sycl.cpp
src/gromacs/nbnxm/sycl/nbnxm_sycl_kernel.cpp

index 353ca45116aaf4fec6e68980b7e9d765853f0593..2b07b927a7649288778546911ef69f5fb26ca5f3 100644 (file)
@@ -132,7 +132,7 @@ auto group_reduce(Args&&... args) -> decltype(detail::origin::reduce(std::forwar
     return detail::origin::reduce(std::forward<Args>(args)...);
 }
 #elif GMX_SYCL_HIPSYCL
-// No atomic_ref in hipSYCL yet (2021-02-22)
+using detail::origin::atomic_ref;
 using detail::origin::group_any_of;
 using detail::origin::group_reduce;
 #else
index 8a2d6bace13724e5a5b1bec77fd110e87b1d4449..9a3c041ef8867aeb58c3198b05df0e3c8610e74e 100644 (file)
  *  \author Andrey Alekseenko <al42and@gmail.com>
  */
 
-/*! \brief Access mode to use for atomic accessors.
- *
- * Intel DPCPP compiler has \c sycl::atomic_ref, but has no \c sycl::atomic_fetch_add for floats.
- * However, \c atomic_ref can not be constructed from \c sycl::atomic, so we can not use
- * atomic accessors. Thus, we use \c mode::read_write accessors and \c atomic_ref.
- *
- * hipSYCL does not have \c sycl::atomic_ref, but has \c sycl::atomic_fetch_add for floats, which
- * requires using atomic accessors. Thus, we use \c mode::atomic accessors.
- *
- * The \ref atomicFetchAdd function could be used for doing operations on such accessors.
- */
-static constexpr auto mode_atomic = GMX_SYCL_DPCPP ? cl::sycl::access::mode::read_write :
-                                                   /* GMX_SYCL_HIPSYCL */ cl::sycl::access::mode::atomic;
-
 //! \brief Full warp active thread mask used in CUDA warp-level primitives.
 static constexpr unsigned int c_cudaFullWarpMask = 0xffffffff;
 
 /*! \brief Convenience wrapper to do atomic addition to a global buffer.
- *
- * The implementation differences between DPCPP and hipSYCL are explained in \ref mode_atomic.
  */
-template<class IndexType>
-static inline void atomicFetchAdd(DeviceAccessor<float, mode_atomic> acc, const IndexType idx, const float val)
+template<typename T, sycl_2020::memory_scope MemoryScope = sycl_2020::memory_scope::device>
+static inline void atomicFetchAdd(T& val, const T delta)
 {
-#if GMX_SYCL_DPCPP
-    sycl_2020::atomic_ref<float, sycl_2020::memory_order::relaxed, sycl_2020::memory_scope::device, cl::sycl::access::address_space::global_space>
-            fout_atomic(acc[idx]);
-    fout_atomic.fetch_add(val);
-#elif GMX_SYCL_HIPSYCL
-#    ifdef SYCL_DEVICE_ONLY
-    /* While there is support for float atomics on device, the host implementation uses
-     * Clang's __atomic_fetch_add intrinsic, that, at least in Clang 11, does not support
-     * floats. Luckily, we don't want to run on host. */
-    // The pragmas below can be removed once we switch to sycl::atomic
-#        pragma clang diagnostic push
-#        pragma clang diagnostic ignored "-Wdeprecated-declarations"
-    acc[idx].fetch_add(val);
-#        pragma clang diagnostic push
-#    else
-    GMX_ASSERT(false, "hipSYCL host codepath not supported");
-    GMX_UNUSED_VALUE(val);
-    GMX_UNUSED_VALUE(acc);
-    GMX_UNUSED_VALUE(idx);
-#    endif
-#endif
+    sycl_2020::atomic_ref<T, sycl_2020::memory_order::relaxed, MemoryScope, cl::sycl::access::address_space::global_space> ref(
+            val);
+    ref.fetch_add(delta);
 }
 
+/*! \brief Convenience wrapper to do atomic loads from a global buffer.
+ */
+template<typename T, sycl_2020::memory_scope MemoryScope = sycl_2020::memory_scope::device>
+static inline T atomicLoad(T& val)
+{
+    sycl_2020::atomic_ref<T, sycl_2020::memory_order::relaxed, MemoryScope, cl::sycl::access::address_space::global_space> ref(
+            val);
+    return ref.load();
+}
+
+
 /*! \brief Issue an intra sub-group barrier.
  *
  * Equivalent with CUDA's \c syncwarp(c_cudaFullWarpMask).
index 3e5b56fdf309e8cd8f8c46f190b42486d3bca814..6a32e856c0dd3e60a311e15567151567dcd159a5 100644 (file)
@@ -69,7 +69,7 @@ auto settleKernel(cl::sycl::handler&                                           c
                   DeviceAccessor<Float3, mode::read_write>                     a_xp,
                   float                                                        invdt,
                   OptionalAccessor<Float3, mode::read_write, updateVelocities> a_v,
-                  OptionalAccessor<float, mode_atomic, computeVirial>          a_virialScaled,
+                  OptionalAccessor<float, mode::read_write, computeVirial>     a_virialScaled,
                   PbcAiuc                                                      pbcAiuc)
 {
     cgh.require(a_settles);
@@ -340,7 +340,7 @@ auto settleKernel(cl::sycl::handler&                                           c
             // First 6 threads in the block add the 6 components of virial to the global memory address
             if (threadIdx < 6)
             {
-                atomicFetchAdd(a_virialScaled, threadIdx, sm_threadVirial[threadIdx * blockSize]);
+                atomicFetchAdd(a_virialScaled[threadIdx], sm_threadVirial[threadIdx * blockSize]);
             }
         }
     };
index 7304922c09f19ae5ea9a54a8b490190b837c040f..00957bb1b9cfd6c2cacccd599e032ff7993f23f3 100644 (file)
@@ -306,11 +306,11 @@ static inline float interpolateCoulombForceR(const DeviceAccessor<float, mode::r
  * c_clSize consecutive threads hold the force components of a j-atom which we
  * reduced in log2(cl_Size) steps using shift and atomically accumulate them into \p a_f.
  */
-static inline void reduceForceJShuffle(Float3                             f,
-                                       const cl::sycl::nd_item<1>         itemIdx,
-                                       const int                          tidxi,
-                                       const int                          aidx,
-                                       DeviceAccessor<float, mode_atomic> a_f)
+static inline void reduceForceJShuffle(Float3                                  f,
+                                       const cl::sycl::nd_item<1>              itemIdx,
+                                       const int                               tidxi,
+                                       const int                               aidx,
+                                       DeviceAccessor<float, mode::read_write> a_f)
 {
     static_assert(c_clSize == 8 || c_clSize == 4);
     sycl_2020::sub_group sg = itemIdx.get_sub_group();
@@ -337,7 +337,7 @@ static inline void reduceForceJShuffle(Float3                             f,
 
     if (tidxi < 3)
     {
-        atomicFetchAdd(a_f, 3 * aidx + tidxi, f[0]);
+        atomicFetchAdd(a_f[3 * aidx + tidxi], f[0]);
     }
 }
 
@@ -389,12 +389,12 @@ static inline float groupReduce(const cl::sycl::nd_item<1> itemIdx,
  * TODO: implement binary reduction flavor for the case where cl_Size is power of two.
  */
 static inline void reduceForceJGeneric(cl::sycl::accessor<float, 1, mode::read_write, target::local> sm_buf,
-                                       Float3                             f,
-                                       const cl::sycl::nd_item<1>         itemIdx,
-                                       const int                          tidxi,
-                                       const int                          tidxj,
-                                       const int                          aidx,
-                                       DeviceAccessor<float, mode_atomic> a_f)
+                                       Float3                                  f,
+                                       const cl::sycl::nd_item<1>              itemIdx,
+                                       const int                               tidxi,
+                                       const int                               tidxj,
+                                       const int                               aidx,
+                                       DeviceAccessor<float, mode::read_write> a_f)
 {
     static constexpr int sc_fBufferStride = c_clSizeSq;
     int                  tidx             = tidxi + tidxj * c_clSize;
@@ -415,7 +415,7 @@ static inline void reduceForceJGeneric(cl::sycl::accessor<float, 1, mode::read_w
             fSum += sm_buf[sc_fBufferStride * tidxi + j];
         }
 
-        atomicFetchAdd(a_f, 3 * aidx + tidxi, fSum);
+        atomicFetchAdd(a_f[3 * aidx + tidxi], fSum);
     }
 }
 
@@ -424,11 +424,11 @@ static inline void reduceForceJGeneric(cl::sycl::accessor<float, 1, mode::read_w
  */
 static inline void reduceForceJ(cl::sycl::accessor<float, 1, mode::read_write, target::local> sm_buf,
                                 Float3                                                        f,
-                                const cl::sycl::nd_item<1>         itemIdx,
-                                const int                          tidxi,
-                                const int                          tidxj,
-                                const int                          aidx,
-                                DeviceAccessor<float, mode_atomic> a_f)
+                                const cl::sycl::nd_item<1>              itemIdx,
+                                const int                               tidxi,
+                                const int                               tidxj,
+                                const int                               aidx,
+                                DeviceAccessor<float, mode::read_write> a_f)
 {
     if constexpr (!gmx::isPowerOfTwo(c_nbnxnGpuNumClusterPerSupercluster))
     {
@@ -452,13 +452,13 @@ static inline void reduceForceJ(cl::sycl::accessor<float, 1, mode::read_write, t
 static inline void reduceForceIAndFShift(cl::sycl::accessor<float, 1, mode::read_write, target::local> sm_buf,
                                          const Float3 fCiBuf[c_nbnxnGpuNumClusterPerSupercluster],
                                          const bool   calcFShift,
-                                         const cl::sycl::nd_item<1>         itemIdx,
-                                         const int                          tidxi,
-                                         const int                          tidxj,
-                                         const int                          sci,
-                                         const int                          shift,
-                                         DeviceAccessor<float, mode_atomic> a_f,
-                                         DeviceAccessor<float, mode_atomic> a_fShift)
+                                         const cl::sycl::nd_item<1>              itemIdx,
+                                         const int                               tidxi,
+                                         const int                               tidxj,
+                                         const int                               sci,
+                                         const int                               shift,
+                                         DeviceAccessor<float, mode::read_write> a_f,
+                                         DeviceAccessor<float, mode::read_write> a_fShift)
 {
     // must have power of two elements in fCiBuf
     static_assert(gmx::isPowerOfTwo(c_nbnxnGpuNumClusterPerSupercluster));
@@ -502,7 +502,7 @@ static inline void reduceForceIAndFShift(cl::sycl::accessor<float, 1, mode::read
         {
             const float f =
                     sm_buf[tidxj * bufStride + tidxi] + sm_buf[tidxj * bufStride + c_clSize + tidxi];
-            atomicFetchAdd(a_f, 3 * aidx + tidxj, f);
+            atomicFetchAdd(a_f[3 * aidx + tidxj], f);
             if (calcFShift)
             {
                 fShiftBuf += f;
@@ -531,12 +531,12 @@ static inline void reduceForceIAndFShift(cl::sycl::accessor<float, 1, mode::read
                 fShiftBuf += sycl_2020::shift_left(sg, fShiftBuf, 2);
                 if (tidxi == 0)
                 {
-                    atomicFetchAdd(a_fShift, 3 * shift + tidxj, fShiftBuf);
+                    atomicFetchAdd(a_fShift[3 * shift + tidxj], fShiftBuf);
                 }
             }
             else
             {
-                atomicFetchAdd(a_fShift, 3 * shift + tidxj, fShiftBuf);
+                atomicFetchAdd(a_fShift[3 * shift + tidxj], fShiftBuf);
             }
         }
     }
@@ -546,13 +546,13 @@ static inline void reduceForceIAndFShift(cl::sycl::accessor<float, 1, mode::read
  *
  */
 template<bool doPruneNBL, bool doCalcEnergies, enum ElecType elecType, enum VdwType vdwType>
-auto nbnxmKernel(cl::sycl::handler&                                   cgh,
-                 DeviceAccessor<Float4, mode::read>                   a_xq,
-                 DeviceAccessor<float, mode_atomic>                   a_f,
-                 DeviceAccessor<Float3, mode::read>                   a_shiftVec,
-                 DeviceAccessor<float, mode_atomic>                   a_fShift,
-                 OptionalAccessor<float, mode_atomic, doCalcEnergies> a_energyElec,
-                 OptionalAccessor<float, mode_atomic, doCalcEnergies> a_energyVdw,
+auto nbnxmKernel(cl::sycl::handler&                                        cgh,
+                 DeviceAccessor<Float4, mode::read>                        a_xq,
+                 DeviceAccessor<float, mode::read_write>                   a_f,
+                 DeviceAccessor<Float3, mode::read>                        a_shiftVec,
+                 DeviceAccessor<float, mode::read_write>                   a_fShift,
+                 OptionalAccessor<float, mode::read_write, doCalcEnergies> a_energyElec,
+                 OptionalAccessor<float, mode::read_write, doCalcEnergies> a_energyVdw,
                  DeviceAccessor<nbnxn_cj4_t, doPruneNBL ? mode::read_write : mode::read> a_plistCJ4,
                  DeviceAccessor<nbnxn_sci_t, mode::read>                                 a_plistSci,
                  DeviceAccessor<nbnxn_excl_t, mode::read>                    a_plistExcl,
@@ -1053,8 +1053,8 @@ auto nbnxmKernel(cl::sycl::handler&                                   cgh,
 
             if (tidx == 0)
             {
-                atomicFetchAdd(a_energyVdw, 0, energyVdwGroup);
-                atomicFetchAdd(a_energyElec, 0, energyElecGroup);
+                atomicFetchAdd(a_energyVdw[0], energyVdwGroup);
+                atomicFetchAdd(a_energyElec[0], energyElecGroup);
             }
         }
     };