Apply clang-format to source tree
[alexxy/gromacs.git] / src / gromacs / simd / impl_x86_mic / impl_x86_mic_util_double.h
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 #ifndef GMX_SIMD_IMPL_X86_MIC_UTIL_DOUBLE_H
37 #define GMX_SIMD_IMPL_X86_MIC_UTIL_DOUBLE_H
38
39 #include "config.h"
40
41 #include <cassert>
42 #include <cstdint>
43
44 #include <immintrin.h>
45
46 #include "gromacs/utility/basedefinitions.h"
47
48 #include "impl_x86_mic_simd_double.h"
49
50 namespace gmx
51 {
52
53 // On MIC it is better to use scatter operations, so we define the load routines
54 // that use a SIMD offset variable first.
55
56 template<int align>
57 static inline void gmx_simdcall gatherLoadBySimdIntTranspose(const double* base,
58                                                              SimdDInt32    simdoffset,
59                                                              SimdDouble*   v0,
60                                                              SimdDouble*   v1,
61                                                              SimdDouble*   v2,
62                                                              SimdDouble*   v3)
63 {
64     assert((size_t)base % 32 == 0);
65     assert(align % 4 == 0);
66
67     // All instructions might be latency ~4 on MIC, so we use shifts where we
68     // only need a single instruction (since the shift parameter is an immediate),
69     // but multiplication otherwise.
70     if (align == 4)
71     {
72         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
73     }
74     else if (align == 8)
75     {
76         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
77     }
78     else
79     {
80         simdoffset = simdoffset * SimdDInt32(align);
81     }
82
83     v0->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base, sizeof(double));
84     v1->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 1, sizeof(double));
85     v2->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 2, sizeof(double));
86     v3->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 3, sizeof(double));
87 }
88
89 template<int align>
90 static inline void gmx_simdcall gatherLoadUBySimdIntTranspose(const double* base,
91                                                               SimdDInt32    simdoffset,
92                                                               SimdDouble*   v0,
93                                                               SimdDouble*   v1)
94 {
95     // All instructions might be latency ~4 on MIC, so we use shifts where we
96     // only need a single instruction (since the shift parameter is an immediate),
97     // but multiplication otherwise.
98     if (align == 2)
99     {
100         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 1);
101     }
102     else if (align == 4)
103     {
104         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
105     }
106     else if (align == 8)
107     {
108         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
109     }
110     else
111     {
112         simdoffset = simdoffset * SimdDInt32(align);
113     }
114
115     v0->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base, sizeof(double));
116     v1->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 1, sizeof(double));
117 }
118
119 template<int align>
120 static inline void gmx_simdcall gatherLoadBySimdIntTranspose(const double* base,
121                                                              SimdDInt32    simdoffset,
122                                                              SimdDouble*   v0,
123                                                              SimdDouble*   v1)
124 {
125     assert(std::size_t(base) % 16 == 0);
126     assert(align % 2 == 0);
127     gatherLoadUBySimdIntTranspose<align>(base, simdoffset, v0, v1);
128 }
129
130
131 template<int align>
132 static inline void gmx_simdcall gatherLoadTranspose(const double*      base,
133                                                     const std::int32_t offset[],
134                                                     SimdDouble*        v0,
135                                                     SimdDouble*        v1,
136                                                     SimdDouble*        v2,
137                                                     SimdDouble*        v3)
138 {
139     gatherLoadBySimdIntTranspose<align>(base, simdLoad(offset, SimdDInt32Tag()), v0, v1, v2, v3);
140 }
141
142 template<int align>
143 static inline void gmx_simdcall
144                    gatherLoadTranspose(const double* base, const std::int32_t offset[], SimdDouble* v0, SimdDouble* v1)
145 {
146     gatherLoadBySimdIntTranspose<align>(base, simdLoad(offset, SimdDInt32Tag()), v0, v1);
147 }
148
149 static const int c_simdBestPairAlignmentDouble = 2;
150
151 template<int align>
152 static inline void gmx_simdcall gatherLoadUTranspose(const double*      base,
153                                                      const std::int32_t offset[],
154                                                      SimdDouble*        v0,
155                                                      SimdDouble*        v1,
156                                                      SimdDouble*        v2)
157 {
158     SimdDInt32 simdoffset;
159
160     assert(std::size_t(offset) % 32 == 0);
161
162     simdoffset = simdLoad(offset, SimdDInt32Tag());
163
164     // All instructions might be latency ~4 on MIC, so we use shifts where we
165     // only need a single instruction (since the shift parameter is an immediate),
166     // but multiplication otherwise.
167     if (align == 4)
168     {
169         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
170     }
171     else if (align == 8)
172     {
173         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
174     }
175     else
176     {
177         simdoffset = simdoffset * SimdDInt32(align);
178     }
179
180     v0->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base, sizeof(double));
181     v1->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 1, sizeof(double));
182     v2->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 2, sizeof(double));
183 }
184
185 template<int align>
186 static inline void gmx_simdcall transposeScatterStoreU(double*            base,
187                                                        const std::int32_t offset[],
188                                                        SimdDouble         v0,
189                                                        SimdDouble         v1,
190                                                        SimdDouble         v2)
191 {
192     SimdDInt32 simdoffset;
193
194     assert(std::size_t(offset) % 32 == 0);
195
196     simdoffset = simdLoad(offset, SimdDInt32Tag());
197
198     // All instructions might be latency ~4 on MIC, so we use shifts where we
199     // only need a single instruction (since the shift parameter is an immediate),
200     // but multiplication otherwise.
201     if (align == 4)
202     {
203         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
204     }
205     else if (align == 8)
206     {
207         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
208     }
209     else
210     {
211         simdoffset = simdoffset * SimdDInt32(align);
212     }
213
214     _mm512_i32loscatter_pd(base, simdoffset.simdInternal_, v0.simdInternal_, sizeof(double));
215     _mm512_i32loscatter_pd(base + 1, simdoffset.simdInternal_, v1.simdInternal_, sizeof(double));
216     _mm512_i32loscatter_pd(base + 2, simdoffset.simdInternal_, v2.simdInternal_, sizeof(double));
217 }
218
219 template<int align>
220 static inline void gmx_simdcall
221                    transposeScatterIncrU(double* base, const std::int32_t offset[], SimdDouble v0, SimdDouble v1, SimdDouble v2)
222 {
223     alignas(GMX_SIMD_ALIGNMENT) double rdata0[GMX_SIMD_DOUBLE_WIDTH];
224     alignas(GMX_SIMD_ALIGNMENT) double rdata1[GMX_SIMD_DOUBLE_WIDTH];
225     alignas(GMX_SIMD_ALIGNMENT) double rdata2[GMX_SIMD_DOUBLE_WIDTH];
226
227     store(rdata0, v0);
228     store(rdata1, v1);
229     store(rdata2, v2);
230
231     for (int i = 0; i < GMX_SIMD_DOUBLE_WIDTH; i++)
232     {
233         base[align * offset[i] + 0] += rdata0[i];
234         base[align * offset[i] + 1] += rdata1[i];
235         base[align * offset[i] + 2] += rdata2[i];
236     }
237 }
238
239 template<int align>
240 static inline void gmx_simdcall
241                    transposeScatterDecrU(double* base, const std::int32_t offset[], SimdDouble v0, SimdDouble v1, SimdDouble v2)
242 {
243     alignas(GMX_SIMD_ALIGNMENT) double rdata0[GMX_SIMD_DOUBLE_WIDTH];
244     alignas(GMX_SIMD_ALIGNMENT) double rdata1[GMX_SIMD_DOUBLE_WIDTH];
245     alignas(GMX_SIMD_ALIGNMENT) double rdata2[GMX_SIMD_DOUBLE_WIDTH];
246
247     store(rdata0, v0);
248     store(rdata1, v1);
249     store(rdata2, v2);
250
251     for (int i = 0; i < GMX_SIMD_DOUBLE_WIDTH; i++)
252     {
253         base[align * offset[i] + 0] -= rdata0[i];
254         base[align * offset[i] + 1] -= rdata1[i];
255         base[align * offset[i] + 2] -= rdata2[i];
256     }
257 }
258
259 static inline void gmx_simdcall expandScalarsToTriplets(SimdDouble  scalar,
260                                                         SimdDouble* triplets0,
261                                                         SimdDouble* triplets1,
262                                                         SimdDouble* triplets2)
263 {
264     triplets0->simdInternal_ = _mm512_castsi512_pd(
265             _mm512_permutevar_epi32(_mm512_set_epi32(5, 4, 5, 4, 3, 2, 3, 2, 3, 2, 1, 0, 1, 0, 1, 0),
266                                     _mm512_castpd_si512(scalar.simdInternal_)));
267     triplets1->simdInternal_ = _mm512_castsi512_pd(_mm512_permutevar_epi32(
268             _mm512_set_epi32(11, 10, 9, 8, 9, 8, 9, 8, 7, 6, 7, 6, 7, 6, 5, 4),
269             _mm512_castpd_si512(scalar.simdInternal_)));
270     triplets2->simdInternal_ = _mm512_castsi512_pd(_mm512_permutevar_epi32(
271             _mm512_set_epi32(15, 14, 15, 14, 15, 14, 13, 12, 13, 12, 13, 12, 11, 10, 11, 10),
272             _mm512_castpd_si512(scalar.simdInternal_)));
273 }
274
275
276 static inline double gmx_simdcall
277                      reduceIncr4ReturnSum(double* m, SimdDouble v0, SimdDouble v1, SimdDouble v2, SimdDouble v3)
278 {
279     double  d;
280     __m512d t0, t1, t2, t3;
281
282     assert(std::size_t(m) % 32 == 0);
283
284     t0 = _mm512_swizzle_pd(_mm512_mask_blend_pd(_mm512_int2mask(0x33), v0.simdInternal_, v2.simdInternal_),
285                            _MM_SWIZ_REG_BADC);
286     t2 = _mm512_mask_blend_pd(_mm512_int2mask(0x33), v2.simdInternal_, v0.simdInternal_);
287     t1 = _mm512_swizzle_pd(_mm512_mask_blend_pd(_mm512_int2mask(0x33), v1.simdInternal_, v3.simdInternal_),
288                            _MM_SWIZ_REG_BADC);
289     t3 = _mm512_mask_blend_pd(_mm512_int2mask(0x33), v3.simdInternal_, v1.simdInternal_);
290     t0 = _mm512_add_pd(t0, t2);
291     t1 = _mm512_add_pd(t1, t3);
292
293     t2 = _mm512_swizzle_pd(_mm512_mask_blend_pd(_mm512_int2mask(0b01010101), t0, t1), _MM_SWIZ_REG_CDAB);
294     t3 = _mm512_mask_blend_pd(_mm512_int2mask(0b01010101), t1, t0);
295     t2 = _mm512_add_pd(t2, t3);
296
297     t2 = _mm512_add_pd(t2, _mm512_castps_pd(_mm512_permute4f128_ps(_mm512_castpd_ps(t2), _MM_PERM_BADC)));
298
299     t0 = _mm512_mask_extload_pd(_mm512_undefined_pd(), _mm512_int2mask(0xF), m, _MM_UPCONV_PD_NONE,
300                                 _MM_BROADCAST_4X8, _MM_HINT_NONE);
301     t0 = _mm512_add_pd(t0, t2);
302     _mm512_mask_packstorelo_pd(m, _mm512_int2mask(0xF), t0);
303
304     t2 = _mm512_add_pd(t2, _mm512_swizzle_pd(t2, _MM_SWIZ_REG_BADC));
305     t2 = _mm512_add_pd(t2, _mm512_swizzle_pd(t2, _MM_SWIZ_REG_CDAB));
306
307     _mm512_mask_packstorelo_pd(&d, _mm512_mask2int(0x01), t2);
308     return d;
309 }
310
311 static inline SimdDouble gmx_simdcall loadDualHsimd(const double* m0, const double* m1)
312 {
313     assert(std::size_t(m0) % 32 == 0);
314     assert(std::size_t(m1) % 32 == 0);
315
316     return _mm512_mask_extload_pd(
317             _mm512_extload_pd(m0, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE),
318             _mm512_int2mask(0xF0), m1, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
319 }
320
321 static inline SimdDouble gmx_simdcall loadDuplicateHsimd(const double* m)
322 {
323     assert(std::size_t(m) % 32 == 0);
324
325     return _mm512_extload_pd(m, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
326 }
327
328 static inline SimdDouble gmx_simdcall loadU1DualHsimd(const double* m)
329 {
330     return _mm512_mask_extload_pd(
331             _mm512_extload_pd(m, _MM_UPCONV_PD_NONE, _MM_BROADCAST_1X8, _MM_HINT_NONE),
332             _mm512_int2mask(0xF0), m + 1, _MM_UPCONV_PD_NONE, _MM_BROADCAST_1X8, _MM_HINT_NONE);
333 }
334
335
336 static inline void gmx_simdcall storeDualHsimd(double* m0, double* m1, SimdDouble a)
337 {
338     assert(std::size_t(m0) % 32 == 0);
339     assert(std::size_t(m1) % 32 == 0);
340
341     _mm512_mask_packstorelo_pd(m0, _mm512_int2mask(0x0F), a.simdInternal_);
342     _mm512_mask_packstorelo_pd(m1, _mm512_int2mask(0xF0), a.simdInternal_);
343 }
344
345 static inline void gmx_simdcall incrDualHsimd(double* m0, double* m1, SimdDouble a)
346 {
347     assert(std::size_t(m0) % 32 == 0);
348     assert(std::size_t(m1) % 32 == 0);
349
350     __m512d x;
351
352     // Update lower half
353     x = _mm512_extload_pd(m0, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
354     x = _mm512_add_pd(x, a.simdInternal_);
355     _mm512_mask_packstorelo_pd(m0, _mm512_int2mask(0x0F), x);
356
357     // Update upper half
358     x = _mm512_extload_pd(m1, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
359     x = _mm512_add_pd(x, a.simdInternal_);
360     _mm512_mask_packstorelo_pd(m1, _mm512_int2mask(0xF0), x);
361 }
362
363 static inline void gmx_simdcall decrHsimd(double* m, SimdDouble a)
364 {
365     __m512d t;
366
367     assert(std::size_t(m) % 32 == 0);
368
369     t               = _mm512_extload_pd(m, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
370     a.simdInternal_ = _mm512_add_pd(
371             a.simdInternal_,
372             _mm512_castps_pd(_mm512_permute4f128_ps(_mm512_castpd_ps(a.simdInternal_), _MM_PERM_BADC)));
373     t = _mm512_sub_pd(t, a.simdInternal_);
374     _mm512_mask_packstorelo_pd(m, _mm512_int2mask(0x0F), t);
375 }
376
377
378 template<int align>
379 static inline void gmx_simdcall gatherLoadTransposeHsimd(const double*      base0,
380                                                          const double*      base1,
381                                                          const std::int32_t offset[],
382                                                          SimdDouble*        v0,
383                                                          SimdDouble*        v1)
384 {
385     __m512i idx0, idx1, idx;
386     __m512d tmp1, tmp2;
387
388     assert(std::size_t(offset) % 16 == 0);
389     assert(std::size_t(base0) % 16 == 0);
390     assert(std::size_t(base1) % 16 == 0);
391     assert(std::size_t(align) % 2 == 0);
392
393     idx0 = _mm512_extload_epi32(offset, _MM_UPCONV_EPI32_NONE, _MM_BROADCAST_4X16, _MM_HINT_NONE);
394
395     idx0 = _mm512_mullo_epi32(idx0, _mm512_set1_epi32(align));
396     idx1 = _mm512_add_epi32(idx0, _mm512_set1_epi32(1));
397
398     idx = _mm512_mask_permute4f128_epi32(idx0, _mm512_int2mask(0x00F0), idx1, _MM_PERM_AAAA);
399
400     tmp1 = _mm512_i32logather_pd(idx, base0, sizeof(double));
401     tmp2 = _mm512_i32logather_pd(idx, base1, sizeof(double));
402
403     v0->simdInternal_ = _mm512_castps_pd(_mm512_mask_permute4f128_ps(
404             _mm512_castpd_ps(tmp1), _mm512_int2mask(0xFF00), _mm512_castpd_ps(tmp2), _MM_PERM_BABA));
405     v1->simdInternal_ = _mm512_castps_pd(_mm512_mask_permute4f128_ps(
406             _mm512_castpd_ps(tmp2), _mm512_int2mask(0x00FF), _mm512_castpd_ps(tmp1), _MM_PERM_DCDC));
407 }
408
409 static inline double gmx_simdcall reduceIncr4ReturnSumHsimd(double* m, SimdDouble v0, SimdDouble v1)
410 {
411     double  d;
412     __m512d t0, t1;
413
414     assert(std::size_t(m) % 32 == 0);
415
416     t0 = _mm512_add_pd(v0.simdInternal_, _mm512_swizzle_pd(v0.simdInternal_, _MM_SWIZ_REG_BADC));
417     t0 = _mm512_mask_add_pd(t0, _mm512_int2mask(0xCC), v1.simdInternal_,
418                             _mm512_swizzle_pd(v1.simdInternal_, _MM_SWIZ_REG_BADC));
419     t0 = _mm512_add_pd(t0, _mm512_swizzle_pd(t0, _MM_SWIZ_REG_CDAB));
420     t0 = _mm512_castps_pd(_mm512_mask_permute4f128_ps(_mm512_castpd_ps(t0), _mm512_int2mask(0xCCCC),
421                                                       _mm512_castpd_ps(t0), _MM_PERM_DCDC));
422
423     t1 = _mm512_mask_extload_pd(_mm512_undefined_pd(), _mm512_int2mask(0xF), m, _MM_UPCONV_PD_NONE,
424                                 _MM_BROADCAST_4X8, _MM_HINT_NONE);
425     t1 = _mm512_add_pd(t1, t0);
426     _mm512_mask_packstorelo_pd(m, _mm512_int2mask(0xF), t1);
427
428     t0 = _mm512_add_pd(t0, _mm512_swizzle_pd(t0, _MM_SWIZ_REG_BADC));
429     t0 = _mm512_add_pd(t0, _mm512_swizzle_pd(t0, _MM_SWIZ_REG_CDAB));
430
431     _mm512_mask_packstorelo_pd(&d, _mm512_mask2int(0x03), t0);
432     return d;
433 }
434
435 } // namespace gmx
436
437 #endif // GMX_SIMD_IMPL_X86_MIC_UTIL_DOUBLE_H