Split lines with many copyright years
[alexxy/gromacs.git] / src / gromacs / simd / impl_x86_mic / impl_x86_mic_util_double.h
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2014,2015,2016,2017,2018 by the GROMACS development team.
5  * Copyright (c) 2019,2020, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36
37 #ifndef GMX_SIMD_IMPL_X86_MIC_UTIL_DOUBLE_H
38 #define GMX_SIMD_IMPL_X86_MIC_UTIL_DOUBLE_H
39
40 #include "config.h"
41
42 #include <cassert>
43 #include <cstdint>
44
45 #include <immintrin.h>
46
47 #include "gromacs/utility/basedefinitions.h"
48
49 #include "impl_x86_mic_simd_double.h"
50
51 namespace gmx
52 {
53
54 // On MIC it is better to use scatter operations, so we define the load routines
55 // that use a SIMD offset variable first.
56
57 template<int align>
58 static inline void gmx_simdcall gatherLoadBySimdIntTranspose(const double* base,
59                                                              SimdDInt32    simdoffset,
60                                                              SimdDouble*   v0,
61                                                              SimdDouble*   v1,
62                                                              SimdDouble*   v2,
63                                                              SimdDouble*   v3)
64 {
65     assert((size_t)base % 32 == 0);
66     assert(align % 4 == 0);
67
68     // All instructions might be latency ~4 on MIC, so we use shifts where we
69     // only need a single instruction (since the shift parameter is an immediate),
70     // but multiplication otherwise.
71     if (align == 4)
72     {
73         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
74     }
75     else if (align == 8)
76     {
77         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
78     }
79     else
80     {
81         simdoffset = simdoffset * SimdDInt32(align);
82     }
83
84     v0->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base, sizeof(double));
85     v1->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 1, sizeof(double));
86     v2->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 2, sizeof(double));
87     v3->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 3, sizeof(double));
88 }
89
90 template<int align>
91 static inline void gmx_simdcall gatherLoadUBySimdIntTranspose(const double* base,
92                                                               SimdDInt32    simdoffset,
93                                                               SimdDouble*   v0,
94                                                               SimdDouble*   v1)
95 {
96     // All instructions might be latency ~4 on MIC, so we use shifts where we
97     // only need a single instruction (since the shift parameter is an immediate),
98     // but multiplication otherwise.
99     if (align == 2)
100     {
101         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 1);
102     }
103     else if (align == 4)
104     {
105         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
106     }
107     else if (align == 8)
108     {
109         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
110     }
111     else
112     {
113         simdoffset = simdoffset * SimdDInt32(align);
114     }
115
116     v0->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base, sizeof(double));
117     v1->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 1, sizeof(double));
118 }
119
120 template<int align>
121 static inline void gmx_simdcall gatherLoadBySimdIntTranspose(const double* base,
122                                                              SimdDInt32    simdoffset,
123                                                              SimdDouble*   v0,
124                                                              SimdDouble*   v1)
125 {
126     assert(std::size_t(base) % 16 == 0);
127     assert(align % 2 == 0);
128     gatherLoadUBySimdIntTranspose<align>(base, simdoffset, v0, v1);
129 }
130
131
132 template<int align>
133 static inline void gmx_simdcall gatherLoadTranspose(const double*      base,
134                                                     const std::int32_t offset[],
135                                                     SimdDouble*        v0,
136                                                     SimdDouble*        v1,
137                                                     SimdDouble*        v2,
138                                                     SimdDouble*        v3)
139 {
140     gatherLoadBySimdIntTranspose<align>(base, simdLoad(offset, SimdDInt32Tag()), v0, v1, v2, v3);
141 }
142
143 template<int align>
144 static inline void gmx_simdcall
145                    gatherLoadTranspose(const double* base, const std::int32_t offset[], SimdDouble* v0, SimdDouble* v1)
146 {
147     gatherLoadBySimdIntTranspose<align>(base, simdLoad(offset, SimdDInt32Tag()), v0, v1);
148 }
149
150 static const int c_simdBestPairAlignmentDouble = 2;
151
152 template<int align>
153 static inline void gmx_simdcall gatherLoadUTranspose(const double*      base,
154                                                      const std::int32_t offset[],
155                                                      SimdDouble*        v0,
156                                                      SimdDouble*        v1,
157                                                      SimdDouble*        v2)
158 {
159     SimdDInt32 simdoffset;
160
161     assert(std::size_t(offset) % 32 == 0);
162
163     simdoffset = simdLoad(offset, SimdDInt32Tag());
164
165     // All instructions might be latency ~4 on MIC, so we use shifts where we
166     // only need a single instruction (since the shift parameter is an immediate),
167     // but multiplication otherwise.
168     if (align == 4)
169     {
170         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
171     }
172     else if (align == 8)
173     {
174         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
175     }
176     else
177     {
178         simdoffset = simdoffset * SimdDInt32(align);
179     }
180
181     v0->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base, sizeof(double));
182     v1->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 1, sizeof(double));
183     v2->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 2, sizeof(double));
184 }
185
186 template<int align>
187 static inline void gmx_simdcall transposeScatterStoreU(double*            base,
188                                                        const std::int32_t offset[],
189                                                        SimdDouble         v0,
190                                                        SimdDouble         v1,
191                                                        SimdDouble         v2)
192 {
193     SimdDInt32 simdoffset;
194
195     assert(std::size_t(offset) % 32 == 0);
196
197     simdoffset = simdLoad(offset, SimdDInt32Tag());
198
199     // All instructions might be latency ~4 on MIC, so we use shifts where we
200     // only need a single instruction (since the shift parameter is an immediate),
201     // but multiplication otherwise.
202     if (align == 4)
203     {
204         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
205     }
206     else if (align == 8)
207     {
208         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
209     }
210     else
211     {
212         simdoffset = simdoffset * SimdDInt32(align);
213     }
214
215     _mm512_i32loscatter_pd(base, simdoffset.simdInternal_, v0.simdInternal_, sizeof(double));
216     _mm512_i32loscatter_pd(base + 1, simdoffset.simdInternal_, v1.simdInternal_, sizeof(double));
217     _mm512_i32loscatter_pd(base + 2, simdoffset.simdInternal_, v2.simdInternal_, sizeof(double));
218 }
219
220 template<int align>
221 static inline void gmx_simdcall
222                    transposeScatterIncrU(double* base, const std::int32_t offset[], SimdDouble v0, SimdDouble v1, SimdDouble v2)
223 {
224     alignas(GMX_SIMD_ALIGNMENT) double rdata0[GMX_SIMD_DOUBLE_WIDTH];
225     alignas(GMX_SIMD_ALIGNMENT) double rdata1[GMX_SIMD_DOUBLE_WIDTH];
226     alignas(GMX_SIMD_ALIGNMENT) double rdata2[GMX_SIMD_DOUBLE_WIDTH];
227
228     store(rdata0, v0);
229     store(rdata1, v1);
230     store(rdata2, v2);
231
232     for (int i = 0; i < GMX_SIMD_DOUBLE_WIDTH; i++)
233     {
234         base[align * offset[i] + 0] += rdata0[i];
235         base[align * offset[i] + 1] += rdata1[i];
236         base[align * offset[i] + 2] += rdata2[i];
237     }
238 }
239
240 template<int align>
241 static inline void gmx_simdcall
242                    transposeScatterDecrU(double* base, const std::int32_t offset[], SimdDouble v0, SimdDouble v1, SimdDouble v2)
243 {
244     alignas(GMX_SIMD_ALIGNMENT) double rdata0[GMX_SIMD_DOUBLE_WIDTH];
245     alignas(GMX_SIMD_ALIGNMENT) double rdata1[GMX_SIMD_DOUBLE_WIDTH];
246     alignas(GMX_SIMD_ALIGNMENT) double rdata2[GMX_SIMD_DOUBLE_WIDTH];
247
248     store(rdata0, v0);
249     store(rdata1, v1);
250     store(rdata2, v2);
251
252     for (int i = 0; i < GMX_SIMD_DOUBLE_WIDTH; i++)
253     {
254         base[align * offset[i] + 0] -= rdata0[i];
255         base[align * offset[i] + 1] -= rdata1[i];
256         base[align * offset[i] + 2] -= rdata2[i];
257     }
258 }
259
260 static inline void gmx_simdcall expandScalarsToTriplets(SimdDouble  scalar,
261                                                         SimdDouble* triplets0,
262                                                         SimdDouble* triplets1,
263                                                         SimdDouble* triplets2)
264 {
265     triplets0->simdInternal_ = _mm512_castsi512_pd(
266             _mm512_permutevar_epi32(_mm512_set_epi32(5, 4, 5, 4, 3, 2, 3, 2, 3, 2, 1, 0, 1, 0, 1, 0),
267                                     _mm512_castpd_si512(scalar.simdInternal_)));
268     triplets1->simdInternal_ = _mm512_castsi512_pd(_mm512_permutevar_epi32(
269             _mm512_set_epi32(11, 10, 9, 8, 9, 8, 9, 8, 7, 6, 7, 6, 7, 6, 5, 4),
270             _mm512_castpd_si512(scalar.simdInternal_)));
271     triplets2->simdInternal_ = _mm512_castsi512_pd(_mm512_permutevar_epi32(
272             _mm512_set_epi32(15, 14, 15, 14, 15, 14, 13, 12, 13, 12, 13, 12, 11, 10, 11, 10),
273             _mm512_castpd_si512(scalar.simdInternal_)));
274 }
275
276
277 static inline double gmx_simdcall
278                      reduceIncr4ReturnSum(double* m, SimdDouble v0, SimdDouble v1, SimdDouble v2, SimdDouble v3)
279 {
280     double  d;
281     __m512d t0, t1, t2, t3;
282
283     assert(std::size_t(m) % 32 == 0);
284
285     t0 = _mm512_swizzle_pd(_mm512_mask_blend_pd(_mm512_int2mask(0x33), v0.simdInternal_, v2.simdInternal_),
286                            _MM_SWIZ_REG_BADC);
287     t2 = _mm512_mask_blend_pd(_mm512_int2mask(0x33), v2.simdInternal_, v0.simdInternal_);
288     t1 = _mm512_swizzle_pd(_mm512_mask_blend_pd(_mm512_int2mask(0x33), v1.simdInternal_, v3.simdInternal_),
289                            _MM_SWIZ_REG_BADC);
290     t3 = _mm512_mask_blend_pd(_mm512_int2mask(0x33), v3.simdInternal_, v1.simdInternal_);
291     t0 = _mm512_add_pd(t0, t2);
292     t1 = _mm512_add_pd(t1, t3);
293
294     t2 = _mm512_swizzle_pd(_mm512_mask_blend_pd(_mm512_int2mask(0b01010101), t0, t1), _MM_SWIZ_REG_CDAB);
295     t3 = _mm512_mask_blend_pd(_mm512_int2mask(0b01010101), t1, t0);
296     t2 = _mm512_add_pd(t2, t3);
297
298     t2 = _mm512_add_pd(t2, _mm512_castps_pd(_mm512_permute4f128_ps(_mm512_castpd_ps(t2), _MM_PERM_BADC)));
299
300     t0 = _mm512_mask_extload_pd(_mm512_undefined_pd(), _mm512_int2mask(0xF), m, _MM_UPCONV_PD_NONE,
301                                 _MM_BROADCAST_4X8, _MM_HINT_NONE);
302     t0 = _mm512_add_pd(t0, t2);
303     _mm512_mask_packstorelo_pd(m, _mm512_int2mask(0xF), t0);
304
305     t2 = _mm512_add_pd(t2, _mm512_swizzle_pd(t2, _MM_SWIZ_REG_BADC));
306     t2 = _mm512_add_pd(t2, _mm512_swizzle_pd(t2, _MM_SWIZ_REG_CDAB));
307
308     _mm512_mask_packstorelo_pd(&d, _mm512_mask2int(0x01), t2);
309     return d;
310 }
311
312 static inline SimdDouble gmx_simdcall loadDualHsimd(const double* m0, const double* m1)
313 {
314     assert(std::size_t(m0) % 32 == 0);
315     assert(std::size_t(m1) % 32 == 0);
316
317     return _mm512_mask_extload_pd(
318             _mm512_extload_pd(m0, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE),
319             _mm512_int2mask(0xF0), m1, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
320 }
321
322 static inline SimdDouble gmx_simdcall loadDuplicateHsimd(const double* m)
323 {
324     assert(std::size_t(m) % 32 == 0);
325
326     return _mm512_extload_pd(m, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
327 }
328
329 static inline SimdDouble gmx_simdcall loadU1DualHsimd(const double* m)
330 {
331     return _mm512_mask_extload_pd(
332             _mm512_extload_pd(m, _MM_UPCONV_PD_NONE, _MM_BROADCAST_1X8, _MM_HINT_NONE),
333             _mm512_int2mask(0xF0), m + 1, _MM_UPCONV_PD_NONE, _MM_BROADCAST_1X8, _MM_HINT_NONE);
334 }
335
336
337 static inline void gmx_simdcall storeDualHsimd(double* m0, double* m1, SimdDouble a)
338 {
339     assert(std::size_t(m0) % 32 == 0);
340     assert(std::size_t(m1) % 32 == 0);
341
342     _mm512_mask_packstorelo_pd(m0, _mm512_int2mask(0x0F), a.simdInternal_);
343     _mm512_mask_packstorelo_pd(m1, _mm512_int2mask(0xF0), a.simdInternal_);
344 }
345
346 static inline void gmx_simdcall incrDualHsimd(double* m0, double* m1, SimdDouble a)
347 {
348     assert(std::size_t(m0) % 32 == 0);
349     assert(std::size_t(m1) % 32 == 0);
350
351     __m512d x;
352
353     // Update lower half
354     x = _mm512_extload_pd(m0, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
355     x = _mm512_add_pd(x, a.simdInternal_);
356     _mm512_mask_packstorelo_pd(m0, _mm512_int2mask(0x0F), x);
357
358     // Update upper half
359     x = _mm512_extload_pd(m1, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
360     x = _mm512_add_pd(x, a.simdInternal_);
361     _mm512_mask_packstorelo_pd(m1, _mm512_int2mask(0xF0), x);
362 }
363
364 static inline void gmx_simdcall decrHsimd(double* m, SimdDouble a)
365 {
366     __m512d t;
367
368     assert(std::size_t(m) % 32 == 0);
369
370     t               = _mm512_extload_pd(m, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
371     a.simdInternal_ = _mm512_add_pd(
372             a.simdInternal_,
373             _mm512_castps_pd(_mm512_permute4f128_ps(_mm512_castpd_ps(a.simdInternal_), _MM_PERM_BADC)));
374     t = _mm512_sub_pd(t, a.simdInternal_);
375     _mm512_mask_packstorelo_pd(m, _mm512_int2mask(0x0F), t);
376 }
377
378
379 template<int align>
380 static inline void gmx_simdcall gatherLoadTransposeHsimd(const double*      base0,
381                                                          const double*      base1,
382                                                          const std::int32_t offset[],
383                                                          SimdDouble*        v0,
384                                                          SimdDouble*        v1)
385 {
386     __m512i idx0, idx1, idx;
387     __m512d tmp1, tmp2;
388
389     assert(std::size_t(offset) % 16 == 0);
390     assert(std::size_t(base0) % 16 == 0);
391     assert(std::size_t(base1) % 16 == 0);
392     assert(std::size_t(align) % 2 == 0);
393
394     idx0 = _mm512_extload_epi32(offset, _MM_UPCONV_EPI32_NONE, _MM_BROADCAST_4X16, _MM_HINT_NONE);
395
396     idx0 = _mm512_mullo_epi32(idx0, _mm512_set1_epi32(align));
397     idx1 = _mm512_add_epi32(idx0, _mm512_set1_epi32(1));
398
399     idx = _mm512_mask_permute4f128_epi32(idx0, _mm512_int2mask(0x00F0), idx1, _MM_PERM_AAAA);
400
401     tmp1 = _mm512_i32logather_pd(idx, base0, sizeof(double));
402     tmp2 = _mm512_i32logather_pd(idx, base1, sizeof(double));
403
404     v0->simdInternal_ = _mm512_castps_pd(_mm512_mask_permute4f128_ps(
405             _mm512_castpd_ps(tmp1), _mm512_int2mask(0xFF00), _mm512_castpd_ps(tmp2), _MM_PERM_BABA));
406     v1->simdInternal_ = _mm512_castps_pd(_mm512_mask_permute4f128_ps(
407             _mm512_castpd_ps(tmp2), _mm512_int2mask(0x00FF), _mm512_castpd_ps(tmp1), _MM_PERM_DCDC));
408 }
409
410 static inline double gmx_simdcall reduceIncr4ReturnSumHsimd(double* m, SimdDouble v0, SimdDouble v1)
411 {
412     double  d;
413     __m512d t0, t1;
414
415     assert(std::size_t(m) % 32 == 0);
416
417     t0 = _mm512_add_pd(v0.simdInternal_, _mm512_swizzle_pd(v0.simdInternal_, _MM_SWIZ_REG_BADC));
418     t0 = _mm512_mask_add_pd(t0, _mm512_int2mask(0xCC), v1.simdInternal_,
419                             _mm512_swizzle_pd(v1.simdInternal_, _MM_SWIZ_REG_BADC));
420     t0 = _mm512_add_pd(t0, _mm512_swizzle_pd(t0, _MM_SWIZ_REG_CDAB));
421     t0 = _mm512_castps_pd(_mm512_mask_permute4f128_ps(_mm512_castpd_ps(t0), _mm512_int2mask(0xCCCC),
422                                                       _mm512_castpd_ps(t0), _MM_PERM_DCDC));
423
424     t1 = _mm512_mask_extload_pd(_mm512_undefined_pd(), _mm512_int2mask(0xF), m, _MM_UPCONV_PD_NONE,
425                                 _MM_BROADCAST_4X8, _MM_HINT_NONE);
426     t1 = _mm512_add_pd(t1, t0);
427     _mm512_mask_packstorelo_pd(m, _mm512_int2mask(0xF), t1);
428
429     t0 = _mm512_add_pd(t0, _mm512_swizzle_pd(t0, _MM_SWIZ_REG_BADC));
430     t0 = _mm512_add_pd(t0, _mm512_swizzle_pd(t0, _MM_SWIZ_REG_CDAB));
431
432     _mm512_mask_packstorelo_pd(&d, _mm512_mask2int(0x03), t0);
433     return d;
434 }
435
436 } // namespace gmx
437
438 #endif // GMX_SIMD_IMPL_X86_MIC_UTIL_DOUBLE_H