Apply re-formatting to C++ in src/ tree.
[alexxy/gromacs.git] / src / gromacs / simd / impl_x86_mic / impl_x86_mic_util_double.h
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2014,2015,2016,2017,2018 by the GROMACS development team.
5  * Copyright (c) 2019,2020, by the GROMACS development team, led by
6  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7  * and including many others, as listed in the AUTHORS file in the
8  * top-level source directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36
37 #ifndef GMX_SIMD_IMPL_X86_MIC_UTIL_DOUBLE_H
38 #define GMX_SIMD_IMPL_X86_MIC_UTIL_DOUBLE_H
39
40 #include "config.h"
41
42 #include <cassert>
43 #include <cstdint>
44
45 #include <immintrin.h>
46
47 #include "gromacs/utility/basedefinitions.h"
48
49 #include "impl_x86_mic_simd_double.h"
50
51 namespace gmx
52 {
53
54 namespace
55 {
56 /* This is an internal helper function used by decr3Hsimd(...).
57  */
58 inline void gmx_simdcall decrHsimd(double* m, SimdDouble a)
59 {
60     __m512d t;
61
62     assert(std::size_t(m) % 32 == 0);
63
64     t               = _mm512_extload_pd(m, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
65     a.simdInternal_ = _mm512_add_pd(
66             a.simdInternal_,
67             _mm512_castps_pd(_mm512_permute4f128_ps(_mm512_castpd_ps(a.simdInternal_), _MM_PERM_BADC)));
68     t = _mm512_sub_pd(t, a.simdInternal_);
69     _mm512_mask_packstorelo_pd(m, _mm512_int2mask(0x0F), t);
70 }
71 } // namespace
72
73 // On MIC it is better to use scatter operations, so we define the load routines
74 // that use a SIMD offset variable first.
75
76 template<int align>
77 static inline void gmx_simdcall gatherLoadBySimdIntTranspose(const double* base,
78                                                              SimdDInt32    simdoffset,
79                                                              SimdDouble*   v0,
80                                                              SimdDouble*   v1,
81                                                              SimdDouble*   v2,
82                                                              SimdDouble*   v3)
83 {
84     assert((size_t)base % 32 == 0);
85     assert(align % 4 == 0);
86
87     // All instructions might be latency ~4 on MIC, so we use shifts where we
88     // only need a single instruction (since the shift parameter is an immediate),
89     // but multiplication otherwise.
90     if (align == 4)
91     {
92         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
93     }
94     else if (align == 8)
95     {
96         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
97     }
98     else
99     {
100         simdoffset = simdoffset * SimdDInt32(align);
101     }
102
103     v0->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base, sizeof(double));
104     v1->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 1, sizeof(double));
105     v2->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 2, sizeof(double));
106     v3->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 3, sizeof(double));
107 }
108
109 template<int align>
110 static inline void gmx_simdcall gatherLoadUBySimdIntTranspose(const double* base,
111                                                               SimdDInt32    simdoffset,
112                                                               SimdDouble*   v0,
113                                                               SimdDouble*   v1)
114 {
115     // All instructions might be latency ~4 on MIC, so we use shifts where we
116     // only need a single instruction (since the shift parameter is an immediate),
117     // but multiplication otherwise.
118     if (align == 2)
119     {
120         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 1);
121     }
122     else if (align == 4)
123     {
124         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
125     }
126     else if (align == 8)
127     {
128         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
129     }
130     else
131     {
132         simdoffset = simdoffset * SimdDInt32(align);
133     }
134
135     v0->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base, sizeof(double));
136     v1->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 1, sizeof(double));
137 }
138
139 template<int align>
140 static inline void gmx_simdcall gatherLoadBySimdIntTranspose(const double* base,
141                                                              SimdDInt32    simdoffset,
142                                                              SimdDouble*   v0,
143                                                              SimdDouble*   v1)
144 {
145     assert(std::size_t(base) % 16 == 0);
146     assert(align % 2 == 0);
147     gatherLoadUBySimdIntTranspose<align>(base, simdoffset, v0, v1);
148 }
149
150
151 template<int align>
152 static inline void gmx_simdcall gatherLoadTranspose(const double*      base,
153                                                     const std::int32_t offset[],
154                                                     SimdDouble*        v0,
155                                                     SimdDouble*        v1,
156                                                     SimdDouble*        v2,
157                                                     SimdDouble*        v3)
158 {
159     gatherLoadBySimdIntTranspose<align>(base, simdLoad(offset, SimdDInt32Tag()), v0, v1, v2, v3);
160 }
161
162 template<int align>
163 static inline void gmx_simdcall
164                    gatherLoadTranspose(const double* base, const std::int32_t offset[], SimdDouble* v0, SimdDouble* v1)
165 {
166     gatherLoadBySimdIntTranspose<align>(base, simdLoad(offset, SimdDInt32Tag()), v0, v1);
167 }
168
169 static const int c_simdBestPairAlignmentDouble = 2;
170
171 template<int align>
172 static inline void gmx_simdcall gatherLoadUTranspose(const double*      base,
173                                                      const std::int32_t offset[],
174                                                      SimdDouble*        v0,
175                                                      SimdDouble*        v1,
176                                                      SimdDouble*        v2)
177 {
178     SimdDInt32 simdoffset;
179
180     assert(std::size_t(offset) % 32 == 0);
181
182     simdoffset = simdLoad(offset, SimdDInt32Tag());
183
184     // All instructions might be latency ~4 on MIC, so we use shifts where we
185     // only need a single instruction (since the shift parameter is an immediate),
186     // but multiplication otherwise.
187     if (align == 4)
188     {
189         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
190     }
191     else if (align == 8)
192     {
193         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
194     }
195     else
196     {
197         simdoffset = simdoffset * SimdDInt32(align);
198     }
199
200     v0->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base, sizeof(double));
201     v1->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 1, sizeof(double));
202     v2->simdInternal_ = _mm512_i32logather_pd(simdoffset.simdInternal_, base + 2, sizeof(double));
203 }
204
205 template<int align>
206 static inline void gmx_simdcall transposeScatterStoreU(double*            base,
207                                                        const std::int32_t offset[],
208                                                        SimdDouble         v0,
209                                                        SimdDouble         v1,
210                                                        SimdDouble         v2)
211 {
212     SimdDInt32 simdoffset;
213
214     assert(std::size_t(offset) % 32 == 0);
215
216     simdoffset = simdLoad(offset, SimdDInt32Tag());
217
218     // All instructions might be latency ~4 on MIC, so we use shifts where we
219     // only need a single instruction (since the shift parameter is an immediate),
220     // but multiplication otherwise.
221     if (align == 4)
222     {
223         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 2);
224     }
225     else if (align == 8)
226     {
227         simdoffset.simdInternal_ = _mm512_slli_epi32(simdoffset.simdInternal_, 3);
228     }
229     else
230     {
231         simdoffset = simdoffset * SimdDInt32(align);
232     }
233
234     _mm512_i32loscatter_pd(base, simdoffset.simdInternal_, v0.simdInternal_, sizeof(double));
235     _mm512_i32loscatter_pd(base + 1, simdoffset.simdInternal_, v1.simdInternal_, sizeof(double));
236     _mm512_i32loscatter_pd(base + 2, simdoffset.simdInternal_, v2.simdInternal_, sizeof(double));
237 }
238
239 template<int align>
240 static inline void gmx_simdcall
241                    transposeScatterIncrU(double* base, const std::int32_t offset[], SimdDouble v0, SimdDouble v1, SimdDouble v2)
242 {
243     alignas(GMX_SIMD_ALIGNMENT) double rdata0[GMX_SIMD_DOUBLE_WIDTH];
244     alignas(GMX_SIMD_ALIGNMENT) double rdata1[GMX_SIMD_DOUBLE_WIDTH];
245     alignas(GMX_SIMD_ALIGNMENT) double rdata2[GMX_SIMD_DOUBLE_WIDTH];
246
247     store(rdata0, v0);
248     store(rdata1, v1);
249     store(rdata2, v2);
250
251     for (int i = 0; i < GMX_SIMD_DOUBLE_WIDTH; i++)
252     {
253         base[align * offset[i] + 0] += rdata0[i];
254         base[align * offset[i] + 1] += rdata1[i];
255         base[align * offset[i] + 2] += rdata2[i];
256     }
257 }
258
259 template<int align>
260 static inline void gmx_simdcall
261                    transposeScatterDecrU(double* base, const std::int32_t offset[], SimdDouble v0, SimdDouble v1, SimdDouble v2)
262 {
263     alignas(GMX_SIMD_ALIGNMENT) double rdata0[GMX_SIMD_DOUBLE_WIDTH];
264     alignas(GMX_SIMD_ALIGNMENT) double rdata1[GMX_SIMD_DOUBLE_WIDTH];
265     alignas(GMX_SIMD_ALIGNMENT) double rdata2[GMX_SIMD_DOUBLE_WIDTH];
266
267     store(rdata0, v0);
268     store(rdata1, v1);
269     store(rdata2, v2);
270
271     for (int i = 0; i < GMX_SIMD_DOUBLE_WIDTH; i++)
272     {
273         base[align * offset[i] + 0] -= rdata0[i];
274         base[align * offset[i] + 1] -= rdata1[i];
275         base[align * offset[i] + 2] -= rdata2[i];
276     }
277 }
278
279 static inline void gmx_simdcall expandScalarsToTriplets(SimdDouble  scalar,
280                                                         SimdDouble* triplets0,
281                                                         SimdDouble* triplets1,
282                                                         SimdDouble* triplets2)
283 {
284     triplets0->simdInternal_ = _mm512_castsi512_pd(
285             _mm512_permutevar_epi32(_mm512_set_epi32(5, 4, 5, 4, 3, 2, 3, 2, 3, 2, 1, 0, 1, 0, 1, 0),
286                                     _mm512_castpd_si512(scalar.simdInternal_)));
287     triplets1->simdInternal_ = _mm512_castsi512_pd(_mm512_permutevar_epi32(
288             _mm512_set_epi32(11, 10, 9, 8, 9, 8, 9, 8, 7, 6, 7, 6, 7, 6, 5, 4),
289             _mm512_castpd_si512(scalar.simdInternal_)));
290     triplets2->simdInternal_ = _mm512_castsi512_pd(_mm512_permutevar_epi32(
291             _mm512_set_epi32(15, 14, 15, 14, 15, 14, 13, 12, 13, 12, 13, 12, 11, 10, 11, 10),
292             _mm512_castpd_si512(scalar.simdInternal_)));
293 }
294
295
296 static inline double gmx_simdcall
297                      reduceIncr4ReturnSum(double* m, SimdDouble v0, SimdDouble v1, SimdDouble v2, SimdDouble v3)
298 {
299     double  d;
300     __m512d t0, t1, t2, t3;
301
302     assert(std::size_t(m) % 32 == 0);
303
304     t0 = _mm512_swizzle_pd(_mm512_mask_blend_pd(_mm512_int2mask(0x33), v0.simdInternal_, v2.simdInternal_),
305                            _MM_SWIZ_REG_BADC);
306     t2 = _mm512_mask_blend_pd(_mm512_int2mask(0x33), v2.simdInternal_, v0.simdInternal_);
307     t1 = _mm512_swizzle_pd(_mm512_mask_blend_pd(_mm512_int2mask(0x33), v1.simdInternal_, v3.simdInternal_),
308                            _MM_SWIZ_REG_BADC);
309     t3 = _mm512_mask_blend_pd(_mm512_int2mask(0x33), v3.simdInternal_, v1.simdInternal_);
310     t0 = _mm512_add_pd(t0, t2);
311     t1 = _mm512_add_pd(t1, t3);
312
313     t2 = _mm512_swizzle_pd(_mm512_mask_blend_pd(_mm512_int2mask(0b01010101), t0, t1), _MM_SWIZ_REG_CDAB);
314     t3 = _mm512_mask_blend_pd(_mm512_int2mask(0b01010101), t1, t0);
315     t2 = _mm512_add_pd(t2, t3);
316
317     t2 = _mm512_add_pd(t2, _mm512_castps_pd(_mm512_permute4f128_ps(_mm512_castpd_ps(t2), _MM_PERM_BADC)));
318
319     t0 = _mm512_mask_extload_pd(
320             _mm512_undefined_pd(), _mm512_int2mask(0xF), m, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
321     t0 = _mm512_add_pd(t0, t2);
322     _mm512_mask_packstorelo_pd(m, _mm512_int2mask(0xF), t0);
323
324     t2 = _mm512_add_pd(t2, _mm512_swizzle_pd(t2, _MM_SWIZ_REG_BADC));
325     t2 = _mm512_add_pd(t2, _mm512_swizzle_pd(t2, _MM_SWIZ_REG_CDAB));
326
327     _mm512_mask_packstorelo_pd(&d, _mm512_mask2int(0x01), t2);
328     return d;
329 }
330
331 static inline SimdDouble gmx_simdcall loadDualHsimd(const double* m0, const double* m1)
332 {
333     assert(std::size_t(m0) % 32 == 0);
334     assert(std::size_t(m1) % 32 == 0);
335
336     return _mm512_mask_extload_pd(_mm512_extload_pd(m0, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE),
337                                   _mm512_int2mask(0xF0),
338                                   m1,
339                                   _MM_UPCONV_PD_NONE,
340                                   _MM_BROADCAST_4X8,
341                                   _MM_HINT_NONE);
342 }
343
344 static inline SimdDouble gmx_simdcall loadDuplicateHsimd(const double* m)
345 {
346     assert(std::size_t(m) % 32 == 0);
347
348     return _mm512_extload_pd(m, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
349 }
350
351 static inline SimdDouble gmx_simdcall loadU1DualHsimd(const double* m)
352 {
353     return _mm512_mask_extload_pd(_mm512_extload_pd(m, _MM_UPCONV_PD_NONE, _MM_BROADCAST_1X8, _MM_HINT_NONE),
354                                   _mm512_int2mask(0xF0),
355                                   m + 1,
356                                   _MM_UPCONV_PD_NONE,
357                                   _MM_BROADCAST_1X8,
358                                   _MM_HINT_NONE);
359 }
360
361
362 static inline void gmx_simdcall storeDualHsimd(double* m0, double* m1, SimdDouble a)
363 {
364     assert(std::size_t(m0) % 32 == 0);
365     assert(std::size_t(m1) % 32 == 0);
366
367     _mm512_mask_packstorelo_pd(m0, _mm512_int2mask(0x0F), a.simdInternal_);
368     _mm512_mask_packstorelo_pd(m1, _mm512_int2mask(0xF0), a.simdInternal_);
369 }
370
371 static inline void gmx_simdcall incrDualHsimd(double* m0, double* m1, SimdDouble a)
372 {
373     assert(std::size_t(m0) % 32 == 0);
374     assert(std::size_t(m1) % 32 == 0);
375
376     __m512d x;
377
378     // Update lower half
379     x = _mm512_extload_pd(m0, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
380     x = _mm512_add_pd(x, a.simdInternal_);
381     _mm512_mask_packstorelo_pd(m0, _mm512_int2mask(0x0F), x);
382
383     // Update upper half
384     x = _mm512_extload_pd(m1, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
385     x = _mm512_add_pd(x, a.simdInternal_);
386     _mm512_mask_packstorelo_pd(m1, _mm512_int2mask(0xF0), x);
387 }
388
389 static inline void gmx_simdcall decr3Hsimd(double* m, SimdDouble a0, SimdDouble a1, SimdDouble a2)
390 {
391     assert(std::size_t(m) % 32 == 0);
392     decrHsimd(m, a0);
393     decrHsimd(m + GMX_SIMD_DOUBLE_WIDTH / 2, a1);
394     decrHsimd(m + GMX_SIMD_DOUBLE_WIDTH, a2);
395 }
396
397
398 template<int align>
399 static inline void gmx_simdcall gatherLoadTransposeHsimd(const double*      base0,
400                                                          const double*      base1,
401                                                          const std::int32_t offset[],
402                                                          SimdDouble*        v0,
403                                                          SimdDouble*        v1)
404 {
405     __m512i idx0, idx1, idx;
406     __m512d tmp1, tmp2;
407
408     assert(std::size_t(offset) % 16 == 0);
409     assert(std::size_t(base0) % 16 == 0);
410     assert(std::size_t(base1) % 16 == 0);
411     assert(std::size_t(align) % 2 == 0);
412
413     idx0 = _mm512_extload_epi32(offset, _MM_UPCONV_EPI32_NONE, _MM_BROADCAST_4X16, _MM_HINT_NONE);
414
415     idx0 = _mm512_mullo_epi32(idx0, _mm512_set1_epi32(align));
416     idx1 = _mm512_add_epi32(idx0, _mm512_set1_epi32(1));
417
418     idx = _mm512_mask_permute4f128_epi32(idx0, _mm512_int2mask(0x00F0), idx1, _MM_PERM_AAAA);
419
420     tmp1 = _mm512_i32logather_pd(idx, base0, sizeof(double));
421     tmp2 = _mm512_i32logather_pd(idx, base1, sizeof(double));
422
423     v0->simdInternal_ = _mm512_castps_pd(_mm512_mask_permute4f128_ps(
424             _mm512_castpd_ps(tmp1), _mm512_int2mask(0xFF00), _mm512_castpd_ps(tmp2), _MM_PERM_BABA));
425     v1->simdInternal_ = _mm512_castps_pd(_mm512_mask_permute4f128_ps(
426             _mm512_castpd_ps(tmp2), _mm512_int2mask(0x00FF), _mm512_castpd_ps(tmp1), _MM_PERM_DCDC));
427 }
428
429 static inline double gmx_simdcall reduceIncr4ReturnSumHsimd(double* m, SimdDouble v0, SimdDouble v1)
430 {
431     double  d;
432     __m512d t0, t1;
433
434     assert(std::size_t(m) % 32 == 0);
435
436     t0 = _mm512_add_pd(v0.simdInternal_, _mm512_swizzle_pd(v0.simdInternal_, _MM_SWIZ_REG_BADC));
437     t0 = _mm512_mask_add_pd(t0,
438                             _mm512_int2mask(0xCC),
439                             v1.simdInternal_,
440                             _mm512_swizzle_pd(v1.simdInternal_, _MM_SWIZ_REG_BADC));
441     t0 = _mm512_add_pd(t0, _mm512_swizzle_pd(t0, _MM_SWIZ_REG_CDAB));
442     t0 = _mm512_castps_pd(_mm512_mask_permute4f128_ps(
443             _mm512_castpd_ps(t0), _mm512_int2mask(0xCCCC), _mm512_castpd_ps(t0), _MM_PERM_DCDC));
444
445     t1 = _mm512_mask_extload_pd(
446             _mm512_undefined_pd(), _mm512_int2mask(0xF), m, _MM_UPCONV_PD_NONE, _MM_BROADCAST_4X8, _MM_HINT_NONE);
447     t1 = _mm512_add_pd(t1, t0);
448     _mm512_mask_packstorelo_pd(m, _mm512_int2mask(0xF), t1);
449
450     t0 = _mm512_add_pd(t0, _mm512_swizzle_pd(t0, _MM_SWIZ_REG_BADC));
451     t0 = _mm512_add_pd(t0, _mm512_swizzle_pd(t0, _MM_SWIZ_REG_CDAB));
452
453     _mm512_mask_packstorelo_pd(&d, _mm512_mask2int(0x03), t0);
454     return d;
455 }
456
457 } // namespace gmx
458
459 #endif // GMX_SIMD_IMPL_X86_MIC_UTIL_DOUBLE_H