2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef GMX_SIMD_IMPL_IBM_VMX_UTIL_FLOAT_H
36 #define GMX_SIMD_IMPL_IBM_VMX_UTIL_FLOAT_H
43 #include "gromacs/utility/basedefinitions.h"
45 #include "impl_ibm_vmx_definitions.h"
46 #include "impl_ibm_vmx_simd_float.h"
52 static inline void gmx_simdcall gatherLoadTranspose(const float* base,
53 const std::int32_t offset[],
59 *v0 = simdLoad(base + align * offset[0]);
60 *v1 = simdLoad(base + align * offset[1]);
61 *v2 = simdLoad(base + align * offset[2]);
62 *v3 = simdLoad(base + align * offset[3]);
64 __vector float t0 = vec_mergeh(v0->simdInternal_, v2->simdInternal_);
65 __vector float t1 = vec_mergel(v0->simdInternal_, v2->simdInternal_);
66 __vector float t2 = vec_mergeh(v1->simdInternal_, v3->simdInternal_);
67 __vector float t3 = vec_mergel(v1->simdInternal_, v3->simdInternal_);
68 v0->simdInternal_ = vec_mergeh(t0, t2);
69 v1->simdInternal_ = vec_mergel(t0, t2);
70 v2->simdInternal_ = vec_mergeh(t1, t3);
71 v3->simdInternal_ = vec_mergel(t1, t3);
75 static inline void gmx_simdcall
76 gatherLoadTranspose(const float* base, const std::int32_t offset[], SimdFloat* v0, SimdFloat* v1)
82 gatherLoadTranspose<align>(base, offset, v0, v1, &t2, &t3);
86 __vector float t0, t1, t2, t3, t4, t5, t6, t7;
87 __vector unsigned char p0, p1, p2, p3;
89 // This is REALLY slow, since we have no choice but to load individual
90 // elements when we cannot guarantee that we can access beyond the end of
91 // the memory. Fortunately, 99% of the usage should be the aligned-to-4
92 // case above instead.
93 t0 = vec_lde(0, base + align * offset[0]);
94 t1 = vec_lde(0, base + align * offset[1]);
95 t2 = vec_lde(0, base + align * offset[2]);
96 t3 = vec_lde(0, base + align * offset[3]);
97 p0 = vec_lvsl(0, base + align * offset[0]);
98 p1 = vec_lvsl(0, base + align * offset[1]);
99 p2 = vec_lvsl(0, base + align * offset[2]);
100 p3 = vec_lvsl(0, base + align * offset[3]);
101 t0 = vec_perm(t0, t0, p0);
102 t1 = vec_perm(t1, t1, p1);
103 t2 = vec_perm(t2, t2, p2);
104 t3 = vec_perm(t3, t3, p3);
105 t0 = vec_mergeh(t0, t2);
106 t1 = vec_mergeh(t1, t3);
107 v0->simdInternal_ = vec_mergeh(t0, t1);
109 t4 = vec_lde(0, base + align * offset[0] + 1);
110 t5 = vec_lde(0, base + align * offset[1] + 1);
111 t6 = vec_lde(0, base + align * offset[2] + 1);
112 t7 = vec_lde(0, base + align * offset[3] + 1);
113 p0 = vec_lvsl(0, base + align * offset[0] + 1);
114 p1 = vec_lvsl(0, base + align * offset[1] + 1);
115 p2 = vec_lvsl(0, base + align * offset[2] + 1);
116 p3 = vec_lvsl(0, base + align * offset[3] + 1);
117 t4 = vec_perm(t4, t4, p0);
118 t5 = vec_perm(t5, t5, p1);
119 t6 = vec_perm(t6, t6, p2);
120 t7 = vec_perm(t7, t7, p3);
121 t4 = vec_mergeh(t4, t6);
122 t5 = vec_mergeh(t5, t7);
123 v1->simdInternal_ = vec_mergeh(t4, t5);
127 static const int c_simdBestPairAlignmentFloat = 2;
130 static inline void gmx_simdcall gatherLoadUTranspose(const float* base,
131 const std::int32_t offset[],
139 gatherLoadTranspose<align>(base, offset, v0, v1, v2, &t3);
143 __vector float t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11;
144 __vector unsigned char p0, p1, p2, p3;
146 // This is REALLY slow, since we have no choice but to load individual
147 // elements when we cannot guarantee that we can access beyond the end of
148 // the memory. Unfortunately this is likely the most common case.
149 t0 = vec_lde(0, base + align * offset[0]);
150 t1 = vec_lde(0, base + align * offset[1]);
151 t2 = vec_lde(0, base + align * offset[2]);
152 t3 = vec_lde(0, base + align * offset[3]);
153 p0 = vec_lvsl(0, base + align * offset[0]);
154 p1 = vec_lvsl(0, base + align * offset[1]);
155 p2 = vec_lvsl(0, base + align * offset[2]);
156 p3 = vec_lvsl(0, base + align * offset[3]);
157 t0 = vec_perm(t0, t0, p0);
158 t1 = vec_perm(t1, t1, p1);
159 t2 = vec_perm(t2, t2, p2);
160 t3 = vec_perm(t3, t3, p3);
161 t0 = vec_mergeh(t0, t2);
162 t1 = vec_mergeh(t1, t3);
163 v0->simdInternal_ = vec_mergeh(t0, t1);
165 t4 = vec_lde(0, base + align * offset[0] + 1);
166 t5 = vec_lde(0, base + align * offset[1] + 1);
167 t6 = vec_lde(0, base + align * offset[2] + 1);
168 t7 = vec_lde(0, base + align * offset[3] + 1);
169 p0 = vec_lvsl(0, base + align * offset[0] + 1);
170 p1 = vec_lvsl(0, base + align * offset[1] + 1);
171 p2 = vec_lvsl(0, base + align * offset[2] + 1);
172 p3 = vec_lvsl(0, base + align * offset[3] + 1);
173 t4 = vec_perm(t4, t4, p0);
174 t5 = vec_perm(t5, t5, p1);
175 t6 = vec_perm(t6, t6, p2);
176 t7 = vec_perm(t7, t7, p3);
177 t4 = vec_mergeh(t4, t6);
178 t5 = vec_mergeh(t5, t7);
179 v1->simdInternal_ = vec_mergeh(t4, t5);
181 t8 = vec_lde(0, base + align * offset[0] + 2);
182 t9 = vec_lde(0, base + align * offset[1] + 2);
183 t10 = vec_lde(0, base + align * offset[2] + 2);
184 t11 = vec_lde(0, base + align * offset[3] + 2);
185 p0 = vec_lvsl(0, base + align * offset[0] + 2);
186 p1 = vec_lvsl(0, base + align * offset[1] + 2);
187 p2 = vec_lvsl(0, base + align * offset[2] + 2);
188 p3 = vec_lvsl(0, base + align * offset[3] + 2);
189 t8 = vec_perm(t8, t8, p0);
190 t9 = vec_perm(t9, t9, p1);
191 t10 = vec_perm(t10, t10, p2);
192 t11 = vec_perm(t11, t11, p3);
193 t8 = vec_mergeh(t8, t10);
194 t9 = vec_mergeh(t9, t11);
195 v2->simdInternal_ = vec_mergeh(t8, t9);
201 static inline void gmx_simdcall
202 transposeScatterStoreU(float* base, const std::int32_t offset[], SimdFloat v0, SimdFloat v1, SimdFloat v2)
204 __vector unsigned char p0, p1, p2, p3;
206 __vector float t0 = vec_mergeh(v0.simdInternal_, v2.simdInternal_);
207 __vector float t1 = vec_mergel(v0.simdInternal_, v2.simdInternal_);
208 __vector float t2 = vec_mergeh(v1.simdInternal_, v2.simdInternal_);
209 __vector float t3 = vec_mergel(v1.simdInternal_, v2.simdInternal_);
210 __vector float t4 = vec_mergeh(t0, t2);
211 __vector float t5 = vec_mergel(t0, t2);
212 __vector float t6 = vec_mergeh(t1, t3);
213 __vector float t7 = vec_mergel(t1, t3);
215 p0 = vec_lvsr(0, base + align * offset[0]);
216 p1 = vec_lvsr(0, base + align * offset[1]);
217 p2 = vec_lvsr(0, base + align * offset[2]);
218 p3 = vec_lvsr(0, base + align * offset[3]);
220 t4 = vec_perm(t4, t4, p0);
221 t5 = vec_perm(t5, t5, p1);
222 t6 = vec_perm(t6, t6, p2);
223 t7 = vec_perm(t7, t7, p3);
225 vec_ste(t4, 0, base + align * offset[0]);
226 vec_ste(t4, 4, base + align * offset[0]);
227 vec_ste(t4, 8, base + align * offset[0]);
228 vec_ste(t5, 0, base + align * offset[1]);
229 vec_ste(t5, 4, base + align * offset[1]);
230 vec_ste(t5, 8, base + align * offset[1]);
231 vec_ste(t6, 0, base + align * offset[2]);
232 vec_ste(t6, 4, base + align * offset[2]);
233 vec_ste(t6, 8, base + align * offset[2]);
234 vec_ste(t7, 0, base + align * offset[3]);
235 vec_ste(t7, 4, base + align * offset[3]);
236 vec_ste(t7, 8, base + align * offset[3]);
241 static inline void gmx_simdcall
242 transposeScatterIncrU(float* base, const std::int32_t offset[], SimdFloat v0, SimdFloat v1, SimdFloat v2)
246 __vector float zero = reinterpret_cast<__vector float>(vec_splat_u32(0));
247 __vector float t0 = vec_mergeh(v0.simdInternal_, v2.simdInternal_);
248 __vector float t1 = vec_mergel(v0.simdInternal_, v2.simdInternal_);
249 __vector float t2 = vec_mergeh(v1.simdInternal_, zero);
250 __vector float t3 = vec_mergel(v1.simdInternal_, zero);
251 __vector float t4 = vec_mergeh(t0, t2);
252 __vector float t5 = vec_mergel(t0, t2);
253 __vector float t6 = vec_mergeh(t1, t3);
254 __vector float t7 = vec_mergel(t1, t3);
256 vec_st(vec_add(vec_ld(0, base + align * offset[0]), t4), 0, base + align * offset[0]);
257 vec_st(vec_add(vec_ld(0, base + align * offset[1]), t5), 0, base + align * offset[1]);
258 vec_st(vec_add(vec_ld(0, base + align * offset[2]), t6), 0, base + align * offset[2]);
259 vec_st(vec_add(vec_ld(0, base + align * offset[3]), t7), 0, base + align * offset[3]);
263 alignas(GMX_SIMD_ALIGNMENT) float rdata0[GMX_SIMD_FLOAT_WIDTH];
264 alignas(GMX_SIMD_ALIGNMENT) float rdata1[GMX_SIMD_FLOAT_WIDTH];
265 alignas(GMX_SIMD_ALIGNMENT) float rdata2[GMX_SIMD_FLOAT_WIDTH];
267 vec_st(v0.simdInternal_, 0, rdata0);
268 vec_st(v1.simdInternal_, 0, rdata1);
269 vec_st(v2.simdInternal_, 0, rdata2);
271 base[align * offset[0] + 0] += rdata0[0];
272 base[align * offset[0] + 1] += rdata1[0];
273 base[align * offset[0] + 2] += rdata2[0];
274 base[align * offset[1] + 0] += rdata0[1];
275 base[align * offset[1] + 1] += rdata1[1];
276 base[align * offset[1] + 2] += rdata2[1];
277 base[align * offset[2] + 0] += rdata0[2];
278 base[align * offset[2] + 1] += rdata1[2];
279 base[align * offset[2] + 2] += rdata2[2];
280 base[align * offset[3] + 0] += rdata0[3];
281 base[align * offset[3] + 1] += rdata1[3];
282 base[align * offset[3] + 2] += rdata2[3];
287 static inline void gmx_simdcall
288 transposeScatterDecrU(float* base, const std::int32_t offset[], SimdFloat v0, SimdFloat v1, SimdFloat v2)
292 __vector float zero = reinterpret_cast<__vector float>(vec_splat_u32(0));
293 __vector float t0 = vec_mergeh(v0.simdInternal_, v2.simdInternal_);
294 __vector float t1 = vec_mergel(v0.simdInternal_, v2.simdInternal_);
295 __vector float t2 = vec_mergeh(v1.simdInternal_, zero);
296 __vector float t3 = vec_mergel(v1.simdInternal_, zero);
297 __vector float t4 = vec_mergeh(t0, t2);
298 __vector float t5 = vec_mergel(t0, t2);
299 __vector float t6 = vec_mergeh(t1, t3);
300 __vector float t7 = vec_mergel(t1, t3);
302 vec_st(vec_sub(vec_ld(0, base + align * offset[0]), t4), 0, base + align * offset[0]);
303 vec_st(vec_sub(vec_ld(0, base + align * offset[1]), t5), 0, base + align * offset[1]);
304 vec_st(vec_sub(vec_ld(0, base + align * offset[2]), t6), 0, base + align * offset[2]);
305 vec_st(vec_sub(vec_ld(0, base + align * offset[3]), t7), 0, base + align * offset[3]);
309 alignas(GMX_SIMD_ALIGNMENT) float rdata0[GMX_SIMD_FLOAT_WIDTH];
310 alignas(GMX_SIMD_ALIGNMENT) float rdata1[GMX_SIMD_FLOAT_WIDTH];
311 alignas(GMX_SIMD_ALIGNMENT) float rdata2[GMX_SIMD_FLOAT_WIDTH];
313 vec_st(v0.simdInternal_, 0, rdata0);
314 vec_st(v1.simdInternal_, 0, rdata1);
315 vec_st(v2.simdInternal_, 0, rdata2);
317 base[align * offset[0] + 0] -= rdata0[0];
318 base[align * offset[0] + 1] -= rdata1[0];
319 base[align * offset[0] + 2] -= rdata2[0];
320 base[align * offset[1] + 0] -= rdata0[1];
321 base[align * offset[1] + 1] -= rdata1[1];
322 base[align * offset[1] + 2] -= rdata2[1];
323 base[align * offset[2] + 0] -= rdata0[2];
324 base[align * offset[2] + 1] -= rdata1[2];
325 base[align * offset[2] + 2] -= rdata2[2];
326 base[align * offset[3] + 0] -= rdata0[3];
327 base[align * offset[3] + 1] -= rdata1[3];
328 base[align * offset[3] + 2] -= rdata2[3];
332 static inline void gmx_simdcall expandScalarsToTriplets(SimdFloat scalar,
333 SimdFloat* triplets0,
334 SimdFloat* triplets1,
335 SimdFloat* triplets2)
337 const __vector unsigned char perm0 = { 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7 };
338 const __vector unsigned char perm1 = { 4, 5, 6, 7, 4, 5, 6, 7, 8, 9, 10, 11, 8, 9, 10, 11 };
339 const __vector unsigned char perm2 = { 8, 9, 10, 11, 12, 13, 14, 15,
340 12, 13, 14, 15, 12, 13, 14, 15 };
342 triplets0->simdInternal_ = vec_perm(scalar.simdInternal_, scalar.simdInternal_, perm0);
343 triplets1->simdInternal_ = vec_perm(scalar.simdInternal_, scalar.simdInternal_, perm1);
344 triplets2->simdInternal_ = vec_perm(scalar.simdInternal_, scalar.simdInternal_, perm2);
349 static inline void gmx_simdcall gatherLoadBySimdIntTranspose(const float* base,
356 alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_FINT32_WIDTH];
358 vec_st(offset.simdInternal_, 0, ioffset);
359 gatherLoadTranspose<align>(base, ioffset, v0, v1, v2, v3);
363 static inline void gmx_simdcall
364 gatherLoadBySimdIntTranspose(const float* base, SimdFInt32 offset, SimdFloat* v0, SimdFloat* v1)
366 alignas(GMX_SIMD_ALIGNMENT) std::int32_t ioffset[GMX_SIMD_FINT32_WIDTH];
368 vec_st(offset.simdInternal_, 0, ioffset);
369 gatherLoadTranspose<align>(base, ioffset, v0, v1);
373 static inline float gmx_simdcall reduceIncr4ReturnSum(float* m, SimdFloat v0, SimdFloat v1, SimdFloat v2, SimdFloat v3)
375 __vector float t0 = vec_mergeh(v0.simdInternal_, v2.simdInternal_);
376 __vector float t1 = vec_mergel(v0.simdInternal_, v2.simdInternal_);
377 __vector float t2 = vec_mergeh(v1.simdInternal_, v3.simdInternal_);
378 __vector float t3 = vec_mergel(v1.simdInternal_, v3.simdInternal_);
379 v0.simdInternal_ = vec_mergeh(t0, t2);
380 v1.simdInternal_ = vec_mergel(t0, t2);
381 v2.simdInternal_ = vec_mergeh(t1, t3);
382 v3.simdInternal_ = vec_mergel(t1, t3);
387 v2 = v0 + simdLoad(m);
395 #endif // GMX_SIMD_IMPL_IBM_VMX_UTIL_FLOAT_H