2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2014,2015,2016,2017,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 #ifndef GMX_SIMD_IMPL_X86_AVX_512_SIMD4_DOUBLE_H
37 #define GMX_SIMD_IMPL_X86_AVX_512_SIMD4_DOUBLE_H
43 #include <immintrin.h>
45 #include "gromacs/utility/basedefinitions.h"
47 #include "impl_x86_avx_512_general.h"
57 Simd4Double(double d) : simdInternal_(_mm256_set1_pd(d)) {}
59 // Internal utility constructor to simplify return statements
60 Simd4Double(__m256d simd) : simdInternal_(simd) {}
62 __m256d simdInternal_;
70 // Internal utility constructor to simplify return statements
71 Simd4DBool(__mmask8 simd) : simdInternal_(simd) {}
73 __mmask8 simdInternal_;
76 static inline Simd4Double gmx_simdcall load4(const double* m)
78 assert(size_t(m) % 32 == 0);
79 return { _mm256_load_pd(m) };
82 static inline void gmx_simdcall store4(double* m, Simd4Double a)
84 assert(size_t(m) % 32 == 0);
85 _mm256_store_pd(m, a.simdInternal_);
88 static inline Simd4Double gmx_simdcall load4U(const double* m)
90 return { _mm256_loadu_pd(m) };
93 static inline void gmx_simdcall store4U(double* m, Simd4Double a)
95 _mm256_storeu_pd(m, a.simdInternal_);
98 static inline Simd4Double gmx_simdcall simd4SetZeroD()
100 return { _mm256_setzero_pd() };
103 static inline Simd4Double gmx_simdcall operator&(Simd4Double a, Simd4Double b)
105 return { _mm256_and_pd(a.simdInternal_, b.simdInternal_) };
108 static inline Simd4Double gmx_simdcall andNot(Simd4Double a, Simd4Double b)
110 return { _mm256_andnot_pd(a.simdInternal_, b.simdInternal_) };
113 static inline Simd4Double gmx_simdcall operator|(Simd4Double a, Simd4Double b)
115 return { _mm256_or_pd(a.simdInternal_, b.simdInternal_) };
118 static inline Simd4Double gmx_simdcall operator^(Simd4Double a, Simd4Double b)
120 return { _mm256_xor_pd(a.simdInternal_, b.simdInternal_) };
123 static inline Simd4Double gmx_simdcall operator+(Simd4Double a, Simd4Double b)
125 return { _mm256_add_pd(a.simdInternal_, b.simdInternal_) };
128 static inline Simd4Double gmx_simdcall operator-(Simd4Double a, Simd4Double b)
130 return { _mm256_sub_pd(a.simdInternal_, b.simdInternal_) };
133 static inline Simd4Double gmx_simdcall operator-(Simd4Double x)
135 return { _mm256_xor_pd(x.simdInternal_, _mm256_set1_pd(GMX_DOUBLE_NEGZERO)) };
138 static inline Simd4Double gmx_simdcall operator*(Simd4Double a, Simd4Double b)
140 return { _mm256_mul_pd(a.simdInternal_, b.simdInternal_) };
143 static inline Simd4Double gmx_simdcall fma(Simd4Double a, Simd4Double b, Simd4Double c)
145 return { _mm256_fmadd_pd(a.simdInternal_, b.simdInternal_, c.simdInternal_) };
148 static inline Simd4Double gmx_simdcall fms(Simd4Double a, Simd4Double b, Simd4Double c)
150 return { _mm256_fmsub_pd(a.simdInternal_, b.simdInternal_, c.simdInternal_) };
153 static inline Simd4Double gmx_simdcall fnma(Simd4Double a, Simd4Double b, Simd4Double c)
155 return { _mm256_fnmadd_pd(a.simdInternal_, b.simdInternal_, c.simdInternal_) };
158 static inline Simd4Double gmx_simdcall fnms(Simd4Double a, Simd4Double b, Simd4Double c)
160 return { _mm256_fnmsub_pd(a.simdInternal_, b.simdInternal_, c.simdInternal_) };
163 // Override for AVX-512-KNL
164 #if GMX_SIMD_X86_AVX_512
165 static inline Simd4Double gmx_simdcall rsqrt(Simd4Double x)
167 return { _mm512_castpd512_pd256(_mm512_rsqrt14_pd(_mm512_castpd256_pd512(x.simdInternal_))) };
171 static inline Simd4Double gmx_simdcall abs(Simd4Double x)
173 return { _mm256_andnot_pd(_mm256_set1_pd(GMX_DOUBLE_NEGZERO), x.simdInternal_) };
176 static inline Simd4Double gmx_simdcall max(Simd4Double a, Simd4Double b)
178 return { _mm256_max_pd(a.simdInternal_, b.simdInternal_) };
181 static inline Simd4Double gmx_simdcall min(Simd4Double a, Simd4Double b)
183 return { _mm256_min_pd(a.simdInternal_, b.simdInternal_) };
186 static inline Simd4Double gmx_simdcall round(Simd4Double x)
188 return { _mm256_round_pd(x.simdInternal_, _MM_FROUND_NINT) };
191 static inline Simd4Double gmx_simdcall trunc(Simd4Double x)
193 return { _mm256_round_pd(x.simdInternal_, _MM_FROUND_TRUNC) };
196 static inline double gmx_simdcall dotProduct(Simd4Double a, Simd4Double b)
199 a.simdInternal_ = _mm256_mul_pd(a.simdInternal_, b.simdInternal_);
200 tmp1 = _mm256_castpd256_pd128(a.simdInternal_);
201 tmp2 = _mm256_extractf128_pd(a.simdInternal_, 0x1);
203 tmp1 = _mm_add_pd(tmp1, _mm_permute_pd(tmp1, _MM_SHUFFLE2(0, 1)));
204 tmp1 = _mm_add_pd(tmp1, tmp2);
205 return *reinterpret_cast<double*>(&tmp1);
208 static inline void gmx_simdcall transpose(Simd4Double* v0, Simd4Double* v1, Simd4Double* v2, Simd4Double* v3)
210 __m256d t1, t2, t3, t4;
211 t1 = _mm256_unpacklo_pd(v0->simdInternal_, v1->simdInternal_);
212 t2 = _mm256_unpackhi_pd(v0->simdInternal_, v1->simdInternal_);
213 t3 = _mm256_unpacklo_pd(v2->simdInternal_, v3->simdInternal_);
214 t4 = _mm256_unpackhi_pd(v2->simdInternal_, v3->simdInternal_);
215 v0->simdInternal_ = _mm256_permute2f128_pd(t1, t3, 0x20);
216 v1->simdInternal_ = _mm256_permute2f128_pd(t2, t4, 0x20);
217 v2->simdInternal_ = _mm256_permute2f128_pd(t1, t3, 0x31);
218 v3->simdInternal_ = _mm256_permute2f128_pd(t2, t4, 0x31);
221 static inline Simd4DBool gmx_simdcall operator==(Simd4Double a, Simd4Double b)
223 return { _mm512_mask_cmp_pd_mask(avx512Int2Mask(0xF),
224 _mm512_castpd256_pd512(a.simdInternal_),
225 _mm512_castpd256_pd512(b.simdInternal_),
229 static inline Simd4DBool gmx_simdcall operator!=(Simd4Double a, Simd4Double b)
231 return { _mm512_mask_cmp_pd_mask(avx512Int2Mask(0xF),
232 _mm512_castpd256_pd512(a.simdInternal_),
233 _mm512_castpd256_pd512(b.simdInternal_),
237 static inline Simd4DBool gmx_simdcall operator<(Simd4Double a, Simd4Double b)
239 return { _mm512_mask_cmp_pd_mask(avx512Int2Mask(0xF),
240 _mm512_castpd256_pd512(a.simdInternal_),
241 _mm512_castpd256_pd512(b.simdInternal_),
245 static inline Simd4DBool gmx_simdcall operator<=(Simd4Double a, Simd4Double b)
247 return { _mm512_mask_cmp_pd_mask(avx512Int2Mask(0xF),
248 _mm512_castpd256_pd512(a.simdInternal_),
249 _mm512_castpd256_pd512(b.simdInternal_),
253 static inline Simd4DBool gmx_simdcall operator&&(Simd4DBool a, Simd4DBool b)
255 return { static_cast<__mmask8>(_mm512_kand(a.simdInternal_, b.simdInternal_)) };
258 static inline Simd4DBool gmx_simdcall operator||(Simd4DBool a, Simd4DBool b)
260 return { static_cast<__mmask8>(_mm512_kor(a.simdInternal_, b.simdInternal_)) };
263 static inline bool gmx_simdcall anyTrue(Simd4DBool x)
265 return (avx512Mask2Int(x.simdInternal_) & 0xF) != 0;
268 static inline Simd4Double gmx_simdcall selectByMask(Simd4Double a, Simd4DBool m)
270 return { _mm512_castpd512_pd256(_mm512_mask_mov_pd(
271 _mm512_setzero_pd(), m.simdInternal_, _mm512_castpd256_pd512(a.simdInternal_))) };
274 static inline Simd4Double gmx_simdcall selectByNotMask(Simd4Double a, Simd4DBool m)
276 return { _mm512_castpd512_pd256(_mm512_mask_mov_pd(
277 _mm512_castpd256_pd512(a.simdInternal_), m.simdInternal_, _mm512_setzero_pd())) };
280 static inline Simd4Double gmx_simdcall blend(Simd4Double a, Simd4Double b, Simd4DBool sel)
282 return { _mm512_castpd512_pd256(_mm512_mask_blend_pd(sel.simdInternal_,
283 _mm512_castpd256_pd512(a.simdInternal_),
284 _mm512_castpd256_pd512(b.simdInternal_))) };
287 static inline double gmx_simdcall reduce(Simd4Double a)
291 a.simdInternal_ = _mm256_hadd_pd(a.simdInternal_, a.simdInternal_);
292 a0 = _mm256_castpd256_pd128(a.simdInternal_);
293 a1 = _mm256_extractf128_pd(a.simdInternal_, 0x1);
294 a0 = _mm_add_sd(a0, a1);
295 return *reinterpret_cast<double*>(&a0);
300 #endif // GMX_SIMD_IMPL_X86_AVX_512_SIMD4_DOUBLE_H