2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2014,2015,2016,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 #ifndef GMX_SIMD_IMPL_X86_MIC_UTIL_FLOAT_H
37 #define GMX_SIMD_IMPL_X86_MIC_UTIL_FLOAT_H
44 #include <immintrin.h>
46 #include "gromacs/utility/basedefinitions.h"
48 #include "impl_x86_mic_simd_float.h"
53 // On MIC it is better to use scatter operations, so we define the load routines
54 // that use a SIMD offset variable first.
57 static inline void gmx_simdcall
58 gatherLoadBySimdIntTranspose(const float * base,
59 SimdFInt32 simdoffset,
65 assert(std::size_t(base) % 16 == 0);
66 assert(align % 4 == 0);
68 // All instructions might be latency ~4 on MIC, so we use shifts where we
69 // only need a single instruction (since the shift parameter is an immediate),
70 // but multiplication otherwise.
73 simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
77 simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
81 simdoffset = simdoffset * SimdFInt32(align);
84 v0->simdInternal_ = _mm512_i32gather_ps(simdoffset.simdInternal_, base, sizeof(float));
85 v1->simdInternal_ = _mm512_i32gather_ps(simdoffset.simdInternal_, base+1, sizeof(float));
86 v2->simdInternal_ = _mm512_i32gather_ps(simdoffset.simdInternal_, base+2, sizeof(float));
87 v3->simdInternal_ = _mm512_i32gather_ps(simdoffset.simdInternal_, base+3, sizeof(float));
91 static inline void gmx_simdcall
92 gatherLoadUBySimdIntTranspose(const float * base,
93 SimdFInt32 simdoffset,
97 // All instructions might be latency ~4 on MIC, so we use shifts where we
98 // only need a single instruction (since the shift parameter is an immediate),
99 // but multiplication otherwise.
100 // For align == 2 we can merge the constant into the scale parameter,
101 // which can take constants up to 8 in total.
104 v0->simdInternal_ = _mm512_i32gather_ps(simdoffset.simdInternal_, base, align * sizeof(float));
105 v1->simdInternal_ = _mm512_i32gather_ps(simdoffset.simdInternal_, base+1, align * sizeof(float));
111 simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
115 simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
119 simdoffset = simdoffset * SimdFInt32(align);
121 v0->simdInternal_ = _mm512_i32gather_ps(simdoffset.simdInternal_, base, sizeof(float));
122 v1->simdInternal_ = _mm512_i32gather_ps(simdoffset.simdInternal_, base+1, sizeof(float));
127 static inline void gmx_simdcall
128 gatherLoadBySimdIntTranspose(const float * base,
129 SimdFInt32 simdoffset,
133 assert(std::size_t(base) % 8 == 0);
134 assert(align % 2 == 0);
135 gatherLoadUBySimdIntTranspose<align>(base, simdoffset, v0, v1);
139 static inline void gmx_simdcall
140 gatherLoadTranspose(const float * base,
141 const std::int32_t offset[],
147 gatherLoadBySimdIntTranspose<align>(base, simdLoad(offset, SimdFInt32Tag()), v0, v1, v2, v3);
151 static inline void gmx_simdcall
152 gatherLoadTranspose(const float * base,
153 const std::int32_t offset[],
157 gatherLoadBySimdIntTranspose<align>(base, simdLoad(offset, SimdFInt32Tag()), v0, v1);
160 static const int c_simdBestPairAlignmentFloat = 2;
163 static inline void gmx_simdcall
164 gatherLoadUTranspose(const float * base,
165 const std::int32_t offset[],
170 SimdFInt32 simdoffset;
172 assert(std::size_t(offset) % 64 == 0);
174 simdoffset = simdLoad(offset, SimdFInt32Tag());
176 // All instructions might be latency ~4 on MIC, so we use shifts where we
177 // only need a single instruction (since the shift parameter is an immediate),
178 // but multiplication otherwise.
181 simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
185 simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
189 simdoffset = simdoffset * SimdFInt32(align);
192 v0->simdInternal_ = _mm512_i32gather_ps(simdoffset.simdInternal_, base, sizeof(float));
193 v1->simdInternal_ = _mm512_i32gather_ps(simdoffset.simdInternal_, base+1, sizeof(float));
194 v2->simdInternal_ = _mm512_i32gather_ps(simdoffset.simdInternal_, base+2, sizeof(float));
199 static inline void gmx_simdcall
200 transposeScatterStoreU(float * base,
201 const std::int32_t offset[],
206 SimdFInt32 simdoffset;
208 assert(std::size_t(offset) % 64 == 0);
210 simdoffset = simdLoad(offset, SimdFInt32Tag());
212 // All instructions might be latency ~4 on MIC, so we use shifts where we
213 // only need a single instruction (since the shift parameter is an immediate),
214 // but multiplication otherwise.
217 simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
221 simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
225 simdoffset = simdoffset * SimdFInt32(align);
228 _mm512_i32scatter_ps(base, simdoffset.simdInternal_, v0.simdInternal_, sizeof(float));
229 _mm512_i32scatter_ps(base+1, simdoffset.simdInternal_, v1.simdInternal_, sizeof(float));
230 _mm512_i32scatter_ps(base+2, simdoffset.simdInternal_, v2.simdInternal_, sizeof(float));
235 static inline void gmx_simdcall
236 transposeScatterIncrU(float * base,
237 const std::int32_t offset[],
242 GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata0[GMX_SIMD_FLOAT_WIDTH];
243 GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata1[GMX_SIMD_FLOAT_WIDTH];
244 GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata2[GMX_SIMD_FLOAT_WIDTH];
250 for (int i = 0; i < GMX_SIMD_FLOAT_WIDTH; i++)
252 base[ align * offset[i] + 0] += rdata0[i];
253 base[ align * offset[i] + 1] += rdata1[i];
254 base[ align * offset[i] + 2] += rdata2[i];
259 static inline void gmx_simdcall
260 transposeScatterDecrU(float * base,
261 const std::int32_t offset[],
266 GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata0[GMX_SIMD_FLOAT_WIDTH];
267 GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata1[GMX_SIMD_FLOAT_WIDTH];
268 GMX_ALIGNED(float, GMX_SIMD_FLOAT_WIDTH) rdata2[GMX_SIMD_FLOAT_WIDTH];
274 for (int i = 0; i < GMX_SIMD_FLOAT_WIDTH; i++)
276 base[ align * offset[i] + 0] -= rdata0[i];
277 base[ align * offset[i] + 1] -= rdata1[i];
278 base[ align * offset[i] + 2] -= rdata2[i];
282 static inline void gmx_simdcall
283 expandScalarsToTriplets(SimdFloat scalar,
284 SimdFloat * triplets0,
285 SimdFloat * triplets1,
286 SimdFloat * triplets2)
288 triplets0->simdInternal_ = _mm512_castsi512_ps(_mm512_permutevar_epi32(_mm512_set_epi32(5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 1, 1, 1, 0, 0, 0),
289 _mm512_castps_si512(scalar.simdInternal_)));
290 triplets1->simdInternal_ = _mm512_castsi512_ps(_mm512_permutevar_epi32(_mm512_set_epi32(10, 10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 5, 5),
291 _mm512_castps_si512(scalar.simdInternal_)));
292 triplets2->simdInternal_ = _mm512_castsi512_ps(_mm512_permutevar_epi32(_mm512_set_epi32(15, 15, 15, 14, 14, 14, 13, 13, 13, 12, 12, 12, 11, 11, 11, 10),
293 _mm512_castps_si512(scalar.simdInternal_)));
297 static inline float gmx_simdcall
298 reduceIncr4ReturnSum(float * m,
305 __m512 t0, t1, t2, t3;
307 assert(std::size_t(m) % 16 == 0);
309 t0 = _mm512_add_ps(v0.simdInternal_, _mm512_swizzle_ps(v0.simdInternal_, _MM_SWIZ_REG_BADC));
310 t0 = _mm512_mask_add_ps(t0, _mm512_int2mask(0xCCCC), v2.simdInternal_, _mm512_swizzle_ps(v2.simdInternal_, _MM_SWIZ_REG_BADC));
311 t1 = _mm512_add_ps(v1.simdInternal_, _mm512_swizzle_ps(v1.simdInternal_, _MM_SWIZ_REG_BADC));
312 t1 = _mm512_mask_add_ps(t1, _mm512_int2mask(0xCCCC), v3.simdInternal_, _mm512_swizzle_ps(v3.simdInternal_, _MM_SWIZ_REG_BADC));
313 t2 = _mm512_add_ps(t0, _mm512_swizzle_ps(t0, _MM_SWIZ_REG_CDAB));
314 t2 = _mm512_mask_add_ps(t2, _mm512_int2mask(0xAAAA), t1, _mm512_swizzle_ps(t1, _MM_SWIZ_REG_CDAB));
316 t2 = _mm512_add_ps(t2, _mm512_permute4f128_ps(t2, _MM_PERM_BADC));
317 t2 = _mm512_add_ps(t2, _mm512_permute4f128_ps(t2, _MM_PERM_CDAB));
319 t0 = _mm512_mask_extload_ps(_mm512_undefined_ps(), _mm512_int2mask(0xF), m, _MM_UPCONV_PS_NONE, _MM_BROADCAST_4X16, _MM_HINT_NONE);
320 t0 = _mm512_add_ps(t0, t2);
321 _mm512_mask_packstorelo_ps(m, _mm512_int2mask(0xF), t0);
323 t2 = _mm512_add_ps(t2, _mm512_swizzle_ps(t2, _MM_SWIZ_REG_BADC));
324 t2 = _mm512_add_ps(t2, _mm512_swizzle_ps(t2, _MM_SWIZ_REG_CDAB));
326 _mm512_mask_packstorelo_ps(&f, _mm512_mask2int(0x1), t2);
330 static inline SimdFloat gmx_simdcall
331 loadDualHsimd(const float * m0,
334 assert(std::size_t(m0) % 32 == 0);
335 assert(std::size_t(m1) % 32 == 0);
337 return _mm512_castpd_ps(_mm512_mask_extload_pd(_mm512_extload_pd(reinterpret_cast<const double *>(m0), _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE),
338 _mm512_int2mask(0xF0), reinterpret_cast<const double *>(m1), _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE));
341 static inline SimdFloat gmx_simdcall
342 loadDuplicateHsimd(const float * m)
344 assert(std::size_t(m) % 32 == 0);
346 return _mm512_castpd_ps(_mm512_extload_pd(reinterpret_cast<const double *>(m), _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE));
349 static inline SimdFloat gmx_simdcall
350 loadU1DualHsimd(const float * m)
352 return _mm512_mask_extload_ps(_mm512_extload_ps(m, _MM_UPCONV_PS_NONE, _MM_BROADCAST_1X16, _MM_HINT_NONE), _mm512_int2mask(0xFF00),
353 m+1, _MM_UPCONV_PS_NONE, _MM_BROADCAST_1X16, _MM_HINT_NONE);
357 static inline void gmx_simdcall
358 storeDualHsimd(float * m0,
364 assert(std::size_t(m0) % 32 == 0);
365 assert(std::size_t(m1) % 32 == 0);
367 _mm512_mask_packstorelo_ps(m0, _mm512_int2mask(0x00FF), a.simdInternal_);
368 _mm512_mask_packstorelo_ps(m1, _mm512_int2mask(0xFF00), a.simdInternal_);
371 static inline void gmx_simdcall
372 incrDualHsimd(float * m0,
376 assert(std::size_t(m0) % 32 == 0);
377 assert(std::size_t(m1) % 32 == 0);
382 x = _mm512_castpd_ps(_mm512_extload_pd(reinterpret_cast<const double *>(m0), _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE));
383 x = _mm512_add_ps(x, a.simdInternal_);
384 _mm512_mask_packstorelo_ps(m0, _mm512_int2mask(0x00FF), x);
387 x = _mm512_castpd_ps(_mm512_extload_pd(reinterpret_cast<const double *>(m1), _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE));
388 x = _mm512_add_ps(x, a.simdInternal_);
389 _mm512_mask_packstorelo_ps(m1, _mm512_int2mask(0xFF00), x);
392 static inline void gmx_simdcall
398 assert(std::size_t(m) % 32 == 0);
400 t = _mm512_castpd_ps(_mm512_extload_pd(reinterpret_cast<const double *>(m), _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE));
401 a = _mm512_add_ps(a.simdInternal_, _mm512_permute4f128_ps(a.simdInternal_, _MM_PERM_BADC));
402 t = _mm512_sub_ps(t, a.simdInternal_);
403 _mm512_mask_packstorelo_ps(m, _mm512_int2mask(0x00FF), t);
408 static inline void gmx_simdcall
409 gatherLoadTransposeHsimd(const float * base0,
411 const std::int32_t offset[],
415 __m512i idx0, idx1, idx;
418 assert(std::size_t(offset) % 32 == 0);
419 assert(std::size_t(base0) % 8 == 0);
420 assert(std::size_t(base1) % 8 == 0);
421 assert(std::size_t(align) % 2 == 0);
423 idx0 = _mm512_loadunpacklo_epi32(_mm512_undefined_epi32(), offset);
425 idx0 = _mm512_mullo_epi32(idx0, _mm512_set1_epi32(align));
426 idx1 = _mm512_add_epi32(idx0, _mm512_set1_epi32(1));
428 idx = _mm512_mask_permute4f128_epi32(idx0, _mm512_int2mask(0xFF00), idx1, _MM_PERM_BABA);
430 tmp1 = _mm512_i32gather_ps(idx, base0, sizeof(float));
431 tmp2 = _mm512_i32gather_ps(idx, base1, sizeof(float));
433 v0->simdInternal_ = _mm512_mask_permute4f128_ps(tmp1, _mm512_int2mask(0xFF00), tmp2, _MM_PERM_BABA);
434 v1->simdInternal_ = _mm512_mask_permute4f128_ps(tmp2, _mm512_int2mask(0x00FF), tmp1, _MM_PERM_DCDC);
437 static inline float gmx_simdcall
438 reduceIncr4ReturnSumHsimd(float * m,
445 assert(std::size_t(m) % 32 == 0);
447 t0 = _mm512_add_ps(v0.simdInternal_, _mm512_swizzle_ps(v0.simdInternal_, _MM_SWIZ_REG_BADC));
448 t0 = _mm512_mask_add_ps(t0, _mm512_int2mask(0xCCCC), v1.simdInternal_, _mm512_swizzle_ps(v1.simdInternal_, _MM_SWIZ_REG_BADC));
449 t0 = _mm512_add_ps(t0, _mm512_swizzle_ps(t0, _MM_SWIZ_REG_CDAB));
450 t0 = _mm512_add_ps(t0, _mm512_castpd_ps(_mm512_swizzle_pd(_mm512_castps_pd(t0), _MM_SWIZ_REG_BADC)));
451 t0 = _mm512_mask_permute4f128_ps(t0, _mm512_int2mask(0xAAAA), t0, _MM_PERM_BADC);
452 t1 = _mm512_mask_extload_ps(_mm512_undefined_ps(), _mm512_int2mask(0xF), m, _MM_UPCONV_PS_NONE, _MM_BROADCAST_4X16, _MM_HINT_NONE);
453 t1 = _mm512_add_ps(t1, t0);
454 _mm512_mask_packstorelo_ps(m, _mm512_int2mask(0xF), t1);
456 t0 = _mm512_add_ps(t0, _mm512_swizzle_ps(t0, _MM_SWIZ_REG_BADC));
457 t0 = _mm512_add_ps(t0, _mm512_swizzle_ps(t0, _MM_SWIZ_REG_CDAB));
459 _mm512_mask_packstorelo_ps(&f, _mm512_mask2int(0x1), t0);
465 #endif // GMX_SIMD_IMPL_X86_MIC_UTIL_FLOAT_H