2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2014,2015,2016,2017,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 #ifndef GMX_SIMD_IMPL_X86_MIC_SIMD_DOUBLE_H
37 #define GMX_SIMD_IMPL_X86_MIC_SIMD_DOUBLE_H
44 #include <immintrin.h>
46 #include "gromacs/math/utilities.h"
47 #include "gromacs/utility/basedefinitions.h"
49 #include "impl_x86_mic_simd_float.h"
59 SimdDouble(double d) : simdInternal_(_mm512_set1_pd(d)) {}
61 // Internal utility constructor to simplify return statements
62 SimdDouble(__m512d simd) : simdInternal_(simd) {}
64 __m512d simdInternal_;
72 SimdDInt32(std::int32_t i) : simdInternal_(_mm512_set1_epi32(i)) {}
74 // Internal utility constructor to simplify return statements
75 SimdDInt32(__m512i simd) : simdInternal_(simd) {}
77 __m512i simdInternal_;
85 // Internal utility constructor to simplify return statements
86 SimdDBool(__mmask8 simd) : simdInternal_(simd) {}
88 __mmask8 simdInternal_;
96 // Internal utility constructor to simplify return statements
97 SimdDIBool(__mmask16 simd) : simdInternal_(simd) {}
99 __mmask16 simdInternal_;
102 static inline SimdDouble gmx_simdcall simdLoad(const double* m, SimdDoubleTag = {})
104 assert(std::size_t(m) % 64 == 0);
105 return { _mm512_load_pd(m) };
108 static inline void gmx_simdcall store(double* m, SimdDouble a)
110 assert(std::size_t(m) % 64 == 0);
111 _mm512_store_pd(m, a.simdInternal_);
114 static inline SimdDouble gmx_simdcall simdLoadU(const double* m, SimdDoubleTag = {})
116 return { _mm512_loadunpackhi_pd(_mm512_loadunpacklo_pd(_mm512_undefined_pd(), m), m + 8) };
119 static inline void gmx_simdcall storeU(double* m, SimdDouble a)
121 _mm512_packstorelo_pd(m, a.simdInternal_);
122 _mm512_packstorehi_pd(m + 8, a.simdInternal_);
125 static inline SimdDouble gmx_simdcall setZeroD()
127 return { _mm512_setzero_pd() };
130 static inline SimdDInt32 gmx_simdcall simdLoad(const std::int32_t* m, SimdDInt32Tag)
132 assert(std::size_t(m) % 32 == 0);
133 return { _mm512_extload_epi64(m, _MM_UPCONV_EPI64_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE) };
136 static inline void gmx_simdcall store(std::int32_t* m, SimdDInt32 a)
138 assert(std::size_t(m) % 32 == 0);
139 _mm512_mask_packstorelo_epi32(m, _mm512_int2mask(0x00FF), a.simdInternal_);
142 static inline SimdDInt32 gmx_simdcall simdLoadU(const std::int32_t* m, SimdDInt32Tag)
144 return { _mm512_mask_loadunpackhi_epi32(
145 _mm512_mask_loadunpacklo_epi32(_mm512_undefined_epi32(), _mm512_int2mask(0x00FF), m),
146 _mm512_int2mask(0x00FF), m + 16) };
149 static inline void gmx_simdcall storeU(std::int32_t* m, SimdDInt32 a)
151 _mm512_mask_packstorelo_epi32(m, _mm512_int2mask(0x00FF), a.simdInternal_);
152 _mm512_mask_packstorehi_epi32(m + 16, _mm512_int2mask(0x00FF), a.simdInternal_);
155 static inline SimdDInt32 gmx_simdcall setZeroDI()
157 return { _mm512_setzero_epi32() };
161 static inline std::int32_t gmx_simdcall extract(SimdDInt32 a)
164 _mm512_mask_packstorelo_epi32(&r, _mm512_mask2int(1 << index), a.simdInternal_);
168 static inline SimdDouble gmx_simdcall operator&(SimdDouble a, SimdDouble b)
170 return { _mm512_castsi512_pd(_mm512_and_epi32(_mm512_castpd_si512(a.simdInternal_),
171 _mm512_castpd_si512(b.simdInternal_))) };
174 static inline SimdDouble gmx_simdcall andNot(SimdDouble a, SimdDouble b)
176 return { _mm512_castsi512_pd(_mm512_andnot_epi32(_mm512_castpd_si512(a.simdInternal_),
177 _mm512_castpd_si512(b.simdInternal_))) };
180 static inline SimdDouble gmx_simdcall operator|(SimdDouble a, SimdDouble b)
182 return { _mm512_castsi512_pd(_mm512_or_epi32(_mm512_castpd_si512(a.simdInternal_),
183 _mm512_castpd_si512(b.simdInternal_))) };
186 static inline SimdDouble gmx_simdcall operator^(SimdDouble a, SimdDouble b)
188 return { _mm512_castsi512_pd(_mm512_xor_epi32(_mm512_castpd_si512(a.simdInternal_),
189 _mm512_castpd_si512(b.simdInternal_))) };
192 static inline SimdDouble gmx_simdcall operator+(SimdDouble a, SimdDouble b)
194 return { _mm512_add_pd(a.simdInternal_, b.simdInternal_) };
197 static inline SimdDouble gmx_simdcall operator-(SimdDouble a, SimdDouble b)
199 return { _mm512_sub_pd(a.simdInternal_, b.simdInternal_) };
202 static inline SimdDouble gmx_simdcall operator-(SimdDouble x)
204 return { _mm512_addn_pd(x.simdInternal_, _mm512_setzero_pd()) };
207 static inline SimdDouble gmx_simdcall operator*(SimdDouble a, SimdDouble b)
209 return { _mm512_mul_pd(a.simdInternal_, b.simdInternal_) };
212 static inline SimdDouble gmx_simdcall fma(SimdDouble a, SimdDouble b, SimdDouble c)
214 return { _mm512_fmadd_pd(a.simdInternal_, b.simdInternal_, c.simdInternal_) };
217 static inline SimdDouble gmx_simdcall fms(SimdDouble a, SimdDouble b, SimdDouble c)
219 return { _mm512_fmsub_pd(a.simdInternal_, b.simdInternal_, c.simdInternal_) };
222 static inline SimdDouble gmx_simdcall fnma(SimdDouble a, SimdDouble b, SimdDouble c)
224 return { _mm512_fnmadd_pd(a.simdInternal_, b.simdInternal_, c.simdInternal_) };
227 static inline SimdDouble gmx_simdcall fnms(SimdDouble a, SimdDouble b, SimdDouble c)
229 return { _mm512_fnmsub_pd(a.simdInternal_, b.simdInternal_, c.simdInternal_) };
232 static inline SimdDouble gmx_simdcall rsqrt(SimdDouble x)
234 return { _mm512_cvtpslo_pd(_mm512_rsqrt23_ps(_mm512_cvtpd_pslo(x.simdInternal_))) };
237 static inline SimdDouble gmx_simdcall rcp(SimdDouble x)
239 return { _mm512_cvtpslo_pd(_mm512_rcp23_ps(_mm512_cvtpd_pslo(x.simdInternal_))) };
242 static inline SimdDouble gmx_simdcall maskAdd(SimdDouble a, SimdDouble b, SimdDBool m)
244 return { _mm512_mask_add_pd(a.simdInternal_, m.simdInternal_, a.simdInternal_, b.simdInternal_) };
247 static inline SimdDouble gmx_simdcall maskzMul(SimdDouble a, SimdDouble b, SimdDBool m)
249 return { _mm512_mask_mul_pd(_mm512_setzero_pd(), m.simdInternal_, a.simdInternal_, b.simdInternal_) };
252 static inline SimdDouble gmx_simdcall maskzFma(SimdDouble a, SimdDouble b, SimdDouble c, SimdDBool m)
254 return { _mm512_mask_mov_pd(_mm512_setzero_pd(), m.simdInternal_,
255 _mm512_fmadd_pd(a.simdInternal_, b.simdInternal_, c.simdInternal_)) };
258 static inline SimdDouble gmx_simdcall maskzRsqrt(SimdDouble x, SimdDBool m)
260 return { _mm512_cvtpslo_pd(_mm512_mask_rsqrt23_ps(_mm512_setzero_ps(), m.simdInternal_,
261 _mm512_cvtpd_pslo(x.simdInternal_))) };
264 static inline SimdDouble gmx_simdcall maskzRcp(SimdDouble x, SimdDBool m)
266 return { _mm512_cvtpslo_pd(_mm512_mask_rcp23_ps(_mm512_setzero_ps(), m.simdInternal_,
267 _mm512_cvtpd_pslo(x.simdInternal_))) };
270 static inline SimdDouble gmx_simdcall abs(SimdDouble x)
272 return { _mm512_castsi512_pd(_mm512_andnot_epi32(_mm512_castpd_si512(_mm512_set1_pd(GMX_DOUBLE_NEGZERO)),
273 _mm512_castpd_si512(x.simdInternal_))) };
276 static inline SimdDouble gmx_simdcall max(SimdDouble a, SimdDouble b)
278 return { _mm512_gmax_pd(a.simdInternal_, b.simdInternal_) };
281 static inline SimdDouble gmx_simdcall min(SimdDouble a, SimdDouble b)
283 return { _mm512_gmin_pd(a.simdInternal_, b.simdInternal_) };
286 static inline SimdDouble gmx_simdcall round(SimdDouble x)
288 return { _mm512_roundfxpnt_adjust_pd(x.simdInternal_, _MM_FROUND_TO_NEAREST_INT, _MM_EXPADJ_NONE) };
291 static inline SimdDouble gmx_simdcall trunc(SimdDouble x)
293 return { _mm512_roundfxpnt_adjust_pd(x.simdInternal_, _MM_FROUND_TO_ZERO, _MM_EXPADJ_NONE) };
296 template<MathOptimization opt = MathOptimization::Safe>
297 static inline SimdDouble frexp(SimdDouble value, SimdDInt32* exponent)
303 if (opt == MathOptimization::Safe)
305 // For the safe branch, we use the masked operations to only assign results if the
306 // input value was nonzero, and otherwise set exponent to 0, and the fraction to the input (+-0).
307 __mmask8 valueIsNonZero =
308 _mm512_cmp_pd_mask(_mm512_setzero_pd(), value.simdInternal_, _CMP_NEQ_OQ);
309 rExponent = _mm512_mask_getexp_pd(_mm512_setzero_pd(), valueIsNonZero, value.simdInternal_);
311 // Create an integer -1 value, and use masking in the conversion as the result for
312 // zero-value input. When we later add 1 to all fields, the fields that were formerly -1
313 // (corresponding to zero exponent) will be assigned -1 + 1 = 0.
314 iExponent = _mm512_mask_cvtfxpnt_roundpd_epi32lo(_mm512_set_epi32(-1), valueIsNonZero,
315 rExponent, _MM_FROUND_TO_NEAREST_INT);
316 iExponent = _mm512__add_epi32(iExponent, _mm512_set1_epi32(1));
318 // Set result to value (+-0) when it is zero.
319 result = _mm512_mask_getmant_pd(value.simdInternal_, valueIsNonZero, value.simdInternal_,
320 _MM_MANT_NORM_p5_1, _MM_MANT_SIGN_src);
324 rExponent = _mm512_getexp_pd(value.simdInternal_);
325 iExponent = _mm512_cvtfxpnt_roundpd_epi32lo(rExponent, _MM_FROUND_TO_NEAREST_INT);
326 iExponent = _mm512_add_epi32(iExponent, _mm512_set1_epi32(1));
327 result = _mm512_getmant_pd(value.simdInternal_, _MM_MANT_NORM_p5_1, _MM_MANT_SIGN_src);
330 exponent->simdInternal_ = iExponent;
335 template<MathOptimization opt = MathOptimization::Safe>
336 static inline SimdDouble ldexp(SimdDouble value, SimdDInt32 exponent)
338 const __m512i exponentBias = _mm512_set1_epi32(1023);
339 __m512i iExponent = _mm512_add_epi32(exponent.simdInternal_, exponentBias);
341 if (opt == MathOptimization::Safe)
343 // Make sure biased argument is not negative
344 iExponent = _mm512_max_epi32(iExponent, _mm512_setzero_epi32());
347 iExponent = _mm512_permutevar_epi32(
348 _mm512_set_epi32(7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0, 0), iExponent);
349 iExponent = _mm512_mask_slli_epi32(_mm512_setzero_epi32(), _mm512_int2mask(0xAAAA), iExponent, 20);
350 return _mm512_mul_pd(_mm512_castsi512_pd(iExponent), value.simdInternal_);
353 static inline double gmx_simdcall reduce(SimdDouble a)
355 return _mm512_reduce_add_pd(a.simdInternal_);
358 // Picky, picky, picky:
359 // icc-16 complains about "Illegal value of immediate argument to intrinsic"
361 // 1) Ordered-quiet for ==
362 // 2) Unordered-quiet for !=
363 // 3) Ordered-signaling for < and <=
365 static inline SimdDBool gmx_simdcall operator==(SimdDouble a, SimdDouble b)
367 return { _mm512_cmp_pd_mask(a.simdInternal_, b.simdInternal_, _CMP_EQ_OQ) };
370 static inline SimdDBool gmx_simdcall operator!=(SimdDouble a, SimdDouble b)
372 return { _mm512_cmp_pd_mask(a.simdInternal_, b.simdInternal_, _CMP_NEQ_UQ) };
375 static inline SimdDBool gmx_simdcall operator<(SimdDouble a, SimdDouble b)
377 return { _mm512_cmp_pd_mask(a.simdInternal_, b.simdInternal_, _CMP_LT_OS) };
380 static inline SimdDBool gmx_simdcall operator<=(SimdDouble a, SimdDouble b)
382 return { _mm512_cmp_pd_mask(a.simdInternal_, b.simdInternal_, _CMP_LE_OS) };
385 static inline SimdDBool gmx_simdcall testBits(SimdDouble a)
387 // This is a bit problematic since Knight's corner does not have any 64-bit integer comparisons,
388 // and we cannot use floating-point since values with just a single bit set can evaluate to 0.0.
389 // Instead, we do it as
390 // 1) Do a logical or of the high/low 32 bits
391 // 2) Do a permute so we have the low 32 bits of each value in the low 8 32-bit elements
392 // 3) Do an integer comparison, and cast so we just keep the low 8 bits of the mask.
394 // By default we will use integers for the masks in the nonbonded kernels, so this shouldn't
395 // have any significant performance drawbacks.
397 __m512i ia = _mm512_castpd_si512(a.simdInternal_);
399 ia = _mm512_or_epi32(ia, _mm512_swizzle_epi32(ia, _MM_SWIZ_REG_CDAB));
400 ia = _mm512_permutevar_epi32(
401 _mm512_set_epi32(15, 13, 11, 9, 7, 5, 3, 1, 14, 12, 10, 8, 6, 4, 2, 0), ia);
403 return { static_cast<__mmask8>(_mm512_cmp_epi32_mask(ia, _mm512_setzero_si512(), _MM_CMPINT_NE)) };
406 static inline SimdDBool gmx_simdcall operator&&(SimdDBool a, SimdDBool b)
408 return { static_cast<__mmask8>(_mm512_kand(a.simdInternal_, b.simdInternal_)) };
411 static inline SimdDBool gmx_simdcall operator||(SimdDBool a, SimdDBool b)
413 return { static_cast<__mmask8>(_mm512_kor(a.simdInternal_, b.simdInternal_)) };
416 static inline bool gmx_simdcall anyTrue(SimdDBool a)
418 return _mm512_mask2int(a.simdInternal_) != 0;
421 static inline SimdDouble gmx_simdcall selectByMask(SimdDouble a, SimdDBool m)
423 return { _mm512_mask_mov_pd(_mm512_setzero_pd(), m.simdInternal_, a.simdInternal_) };
426 static inline SimdDouble gmx_simdcall selectByNotMask(SimdDouble a, SimdDBool m)
428 return { _mm512_mask_mov_pd(a.simdInternal_, m.simdInternal_, _mm512_setzero_pd()) };
431 static inline SimdDouble gmx_simdcall blend(SimdDouble a, SimdDouble b, SimdDBool sel)
433 return { _mm512_mask_blend_pd(sel.simdInternal_, a.simdInternal_, b.simdInternal_) };
436 static inline SimdDInt32 gmx_simdcall operator&(SimdDInt32 a, SimdDInt32 b)
438 return { _mm512_and_epi32(a.simdInternal_, b.simdInternal_) };
441 static inline SimdDInt32 gmx_simdcall andNot(SimdDInt32 a, SimdDInt32 b)
443 return { _mm512_andnot_epi32(a.simdInternal_, b.simdInternal_) };
446 static inline SimdDInt32 gmx_simdcall operator|(SimdDInt32 a, SimdDInt32 b)
448 return { _mm512_or_epi32(a.simdInternal_, b.simdInternal_) };
451 static inline SimdDInt32 gmx_simdcall operator^(SimdDInt32 a, SimdDInt32 b)
453 return { _mm512_xor_epi32(a.simdInternal_, b.simdInternal_) };
456 static inline SimdDInt32 gmx_simdcall operator+(SimdDInt32 a, SimdDInt32 b)
458 return { _mm512_add_epi32(a.simdInternal_, b.simdInternal_) };
461 static inline SimdDInt32 gmx_simdcall operator-(SimdDInt32 a, SimdDInt32 b)
463 return { _mm512_sub_epi32(a.simdInternal_, b.simdInternal_) };
466 static inline SimdDInt32 gmx_simdcall operator*(SimdDInt32 a, SimdDInt32 b)
468 return { _mm512_mullo_epi32(a.simdInternal_, b.simdInternal_) };
471 static inline SimdDIBool gmx_simdcall operator==(SimdDInt32 a, SimdDInt32 b)
473 return { _mm512_cmp_epi32_mask(a.simdInternal_, b.simdInternal_, _MM_CMPINT_EQ) };
476 static inline SimdDIBool gmx_simdcall testBits(SimdDInt32 a)
478 return { _mm512_cmp_epi32_mask(a.simdInternal_, _mm512_setzero_si512(), _MM_CMPINT_NE) };
481 static inline SimdDIBool gmx_simdcall operator<(SimdDInt32 a, SimdDInt32 b)
483 return { _mm512_cmp_epi32_mask(a.simdInternal_, b.simdInternal_, _MM_CMPINT_LT) };
486 static inline SimdDIBool gmx_simdcall operator&&(SimdDIBool a, SimdDIBool b)
488 return { _mm512_kand(a.simdInternal_, b.simdInternal_) };
491 static inline SimdDIBool gmx_simdcall operator||(SimdDIBool a, SimdDIBool b)
493 return { _mm512_kor(a.simdInternal_, b.simdInternal_) };
496 static inline bool gmx_simdcall anyTrue(SimdDIBool a)
498 return (_mm512_mask2int(a.simdInternal_) & 0xFF) != 0;
501 static inline SimdDInt32 gmx_simdcall selectByMask(SimdDInt32 a, SimdDIBool m)
503 return { _mm512_mask_mov_epi32(_mm512_setzero_epi32(), m.simdInternal_, a.simdInternal_) };
506 static inline SimdDInt32 gmx_simdcall selectByNotMask(SimdDInt32 a, SimdDIBool m)
508 return { _mm512_mask_mov_epi32(a.simdInternal_, m.simdInternal_, _mm512_setzero_epi32()) };
511 static inline SimdDInt32 gmx_simdcall blend(SimdDInt32 a, SimdDInt32 b, SimdDIBool sel)
513 return { _mm512_mask_blend_epi32(sel.simdInternal_, a.simdInternal_, b.simdInternal_) };
516 static inline SimdDInt32 gmx_simdcall cvtR2I(SimdDouble a)
518 return { _mm512_cvtfxpnt_roundpd_epi32lo(a.simdInternal_, _MM_FROUND_TO_NEAREST_INT) };
521 static inline SimdDInt32 gmx_simdcall cvttR2I(SimdDouble a)
523 return { _mm512_cvtfxpnt_roundpd_epi32lo(a.simdInternal_, _MM_FROUND_TO_ZERO) };
526 static inline SimdDouble gmx_simdcall cvtI2R(SimdDInt32 a)
528 return { _mm512_cvtepi32lo_pd(a.simdInternal_) };
531 static inline SimdDIBool gmx_simdcall cvtB2IB(SimdDBool a)
533 return { a.simdInternal_ };
536 static inline SimdDBool gmx_simdcall cvtIB2B(SimdDIBool a)
538 return { static_cast<__mmask8>(a.simdInternal_) };
541 static inline void gmx_simdcall cvtF2DD(SimdFloat f, SimdDouble* d0, SimdDouble* d1)
543 __m512i i1 = _mm512_permute4f128_epi32(_mm512_castps_si512(f.simdInternal_), _MM_PERM_DCDC);
545 *d0 = _mm512_cvtpslo_pd(f.simdInternal_);
546 *d1 = _mm512_cvtpslo_pd(_mm512_castsi512_ps(i1));
549 static inline SimdFloat gmx_simdcall cvtDD2F(SimdDouble d0, SimdDouble d1)
551 __m512 f0 = _mm512_cvtpd_pslo(d0.simdInternal_);
552 __m512 f1 = _mm512_cvtpd_pslo(d1.simdInternal_);
553 return { _mm512_mask_permute4f128_ps(f0, _mm512_int2mask(0xFF00), f1, _MM_PERM_BABA) };
558 #endif // GMX_SIMD_IMPL_X86_MIC_SIMD_DOUBLE_H