//! Maximum number of threads in a block (for __launch_bounds__)
constexpr static int c_maxThreadsPerBlock = c_threadsPerBlock;
-/*! \brief Scaling matrix struct.
- *
- * \todo Should be generalized.
- */
-struct ScalingMatrix
-{
- float xx, yy, zz, yx, zx, zy;
-};
-
__launch_bounds__(c_maxThreadsPerBlock) __global__
static void scaleCoordinates_kernel(const int numAtoms,
float3* __restrict__ gm_x,
wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
wallcycle_sub_start(wcycle_, ewcsLAUNCH_GPU_UPDATE_CONSTRAIN);
- ScalingMatrix mu;
- mu.xx = scalingMatrix[XX][XX];
- mu.yy = scalingMatrix[YY][YY];
- mu.zz = scalingMatrix[ZZ][ZZ];
- mu.yx = scalingMatrix[YY][XX];
- mu.zx = scalingMatrix[ZZ][XX];
- mu.zy = scalingMatrix[ZZ][YY];
+ ScalingMatrix mu(scalingMatrix);
const auto kernelArgs = prepareGpuKernelArguments(
scaleCoordinates_kernel, coordinateScalingKernelLaunchConfig_, &numAtoms_, &d_x_, &mu);
wallcycle_start_nocount(wcycle_, ewcLAUNCH_GPU);
wallcycle_sub_start(wcycle_, ewcsLAUNCH_GPU_UPDATE_CONSTRAIN);
- ScalingMatrix mu;
- mu.xx = scalingMatrix[XX][XX];
- mu.yy = scalingMatrix[YY][YY];
- mu.zz = scalingMatrix[ZZ][ZZ];
- mu.yx = scalingMatrix[YY][XX];
- mu.zx = scalingMatrix[ZZ][XX];
- mu.zy = scalingMatrix[ZZ][YY];
+ ScalingMatrix mu(scalingMatrix);
const auto kernelArgs = prepareGpuKernelArguments(
scaleCoordinates_kernel, coordinateScalingKernelLaunchConfig_, &numAtoms_, &d_v_, &mu);
gmx_wallcycle* wcycle_ = nullptr;
};
+/*! \brief Scaling matrix struct.
+ *
+ * \todo Should be generalized.
+ */
+struct ScalingMatrix
+{
+ ScalingMatrix(const matrix m) :
+ xx(m[XX][XX]),
+ yy(m[YY][YY]),
+ zz(m[ZZ][ZZ]),
+ yx(m[YY][XX]),
+ zx(m[ZZ][XX]),
+ zy(m[ZZ][YY])
+ {
+ }
+ float xx, yy, zz, yx, zx, zy;
+};
+
} // namespace gmx
#endif // GMX_MDLIB_UPDATE_CONSTRAIN_GPU_IMPL_H