2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 #ifndef GMX_SIMD_IMPLEMENTATION_IBM_VSX_H
37 #define GMX_SIMD_IMPLEMENTATION_IBM_VSX_H
43 /* IBM VSX SIMD instruction wrappers. Power7 and later.
45 * While this instruction set is similar to VMX, there are quite a few differences
46 * that make it easier to understand if we start from scratch rather than by
47 * including the VMX implementation and changing lots of things.
51 /* Make sure we do not screw up c++ - undefine vector/bool, and rely on __vector,
52 * which is present both on gcc and xlc.
56 /* g++ is also unhappy with the clash of vector bool and the C++ reserved 'bool',
57 * which is solved by undefining bool and reyling on __bool. However, that does
58 * not work with xlc, which requires us to use bool. Solve the conflict by
59 * defining a new vsx_bool.
62 # define vsx_bool __bool
65 # define vsx_bool bool
68 /* Since we've had to use quite a few compiler and endian defines in this code
69 * it might not 'just work' when e.g. clang provides VSX support. If you are
70 * reading this because you saw the error below for a new compiler, try removing
71 * the checks, but then make sure you run 'make test'.
73 #if !(defined __GNUC__) && !(defined __IBMC__) && !(defined __IBMCPP__)
74 # error VSX acceleration is very compiler-dependent, and only tested for gcc & xlc.
77 #if !(defined __BIG_ENDIAN__) && !(defined __LITTLE_ENDIAN__)
78 # error VSX platform not recognized - both gcc & xlc should define big or little endian!
81 /* Capability definitions for IBM VSX */
82 #define GMX_SIMD_HAVE_FLOAT
83 #define GMX_SIMD_HAVE_DOUBLE
84 #define GMX_SIMD_HAVE_HARDWARE
85 #define GMX_SIMD_HAVE_LOADU
86 #define GMX_SIMD_HAVE_STOREU
87 #define GMX_SIMD_HAVE_LOGICAL
88 #define GMX_SIMD_HAVE_FMA
89 #undef GMX_SIMD_HAVE_FRACTION
90 #define GMX_SIMD_HAVE_FINT32
91 #define GMX_SIMD_HAVE_FINT32_EXTRACT
92 #define GMX_SIMD_HAVE_FINT32_LOGICAL
93 #define GMX_SIMD_HAVE_FINT32_ARITHMETICS
94 #define GMX_SIMD_HAVE_DINT32
95 #define GMX_SIMD_HAVE_DINT32_EXTRACT
96 #define GMX_SIMD_HAVE_DINT32_LOGICAL
97 #define GMX_SIMD_HAVE_DINT32_ARITHMETICS
98 #define GMX_SIMD4_HAVE_FLOAT
99 #undef GMX_SIMD4_HAVE_DOUBLE
101 /* Implementation details */
102 #define GMX_SIMD_FLOAT_WIDTH 4
103 #define GMX_SIMD_DOUBLE_WIDTH 2
104 #define GMX_SIMD_FINT32_WIDTH 4
105 #define GMX_SIMD_DINT32_WIDTH 2
106 #define GMX_SIMD_RSQRT_BITS 14
107 #define GMX_SIMD_RCP_BITS 14
109 /****************************************************
110 * SINGLE PRECISION SIMD IMPLEMENTATION *
111 ****************************************************/
112 #define gmx_simd_float_t __vector float
114 # define gmx_simd_load_f(m) vec_vsx_ld(0, (const float *)m)
115 # define gmx_simd_store_f(m, x) vec_vsx_st(x, 0, (float *)m)
118 # define gmx_simd_load_f(m) vec_xlw4(0, (float *)(m))
119 # define gmx_simd_store_f(m, x) vec_xstw4(x, 0, (float *)(m))
121 #define gmx_simd_load1_f(m) vec_splats((float)(*m))
122 #define gmx_simd_set1_f(x) vec_splats((float)(x))
123 #define gmx_simd_loadu_f gmx_simd_load_f
124 #define gmx_simd_storeu_f gmx_simd_store_f
125 #define gmx_simd_setzero_f() vec_splats(0.0f)
126 #define gmx_simd_add_f(a, b) vec_add(a, b)
127 #define gmx_simd_sub_f(a, b) vec_sub(a, b)
128 #define gmx_simd_mul_f(a, b) vec_mul(a, b)
129 #define gmx_simd_fmadd_f(a, b, c) vec_madd(a, b, c)
130 #define gmx_simd_fmsub_f(a, b, c) vec_msub(a, b, c)
131 /* IBM uses an alternative FMA definition, so -a*b+c=-(a*b-c) is "nmsub" */
132 #define gmx_simd_fnmadd_f(a, b, c) vec_nmsub(a, b, c)
133 /* IBM uses an alternative FMA definition, so -a*b-c=-(a*b+c) is "nmadd" */
134 #define gmx_simd_fnmsub_f(a, b, c) vec_nmadd(a, b, c)
135 #define gmx_simd_and_f(a, b) vec_and(a, b)
136 #define gmx_simd_andnot_f(a, b) vec_andc(b, a)
137 #define gmx_simd_or_f(a, b) vec_or(a, b)
138 #define gmx_simd_xor_f(a, b) vec_xor(a, b)
139 #define gmx_simd_rsqrt_f(a) vec_rsqrte(a)
140 #define gmx_simd_rcp_f(a) vec_re(a)
141 #define gmx_simd_fabs_f(a) vec_abs(a)
143 /* gcc up to at least version 4.9 is missing intrinsics for vec_neg(), use inline asm. */
144 # define gmx_simd_fneg_f(a) ({ __vector float res; __asm__ ("xvnegsp %0,%1" : \
145 "=ww" ((__vector float)res) : "ww" ((__vector float)(a))); res; })
148 # define gmx_simd_fneg_f(a) vec_neg(a)
150 #define gmx_simd_max_f(a, b) vec_max(a, b)
151 #define gmx_simd_min_f(a, b) vec_min(a, b)
152 #define gmx_simd_round_f(a) vec_round(a)
153 #define gmx_simd_trunc_f(a) vec_trunc(a)
154 #define gmx_simd_fraction_f(x) vec_sub(x, vec_trunc(x))
155 #define gmx_simd_get_exponent_f(a) gmx_simd_get_exponent_f_ibm_vsx(a)
156 #define gmx_simd_get_mantissa_f(a) gmx_simd_get_mantissa_f_ibm_vsx(a)
157 #define gmx_simd_set_exponent_f(a) gmx_simd_set_exponent_f_ibm_vsx(a)
158 /* integer datatype corresponding to float: gmx_simd_fint32_t */
159 #define gmx_simd_fint32_t __vector signed int
161 # define gmx_simd_load_fi(m) vec_vsx_ld(0, (const int *)m)
162 # define gmx_simd_store_fi(m, x) vec_vsx_st(x, 0, (int *)m)
165 # define gmx_simd_load_fi(m) vec_xlw4(0, (int *)m)
166 # define gmx_simd_store_fi(m, x) vec_xstw4(x, 0, (int *)m)
168 #define gmx_simd_set1_fi(i) vec_splats((int)(i))
169 #define gmx_simd_loadu_fi gmx_simd_load_fi
170 #define gmx_simd_storeu_fi gmx_simd_store_fi
171 #define gmx_simd_setzero_fi() vec_splats((int)0)
172 #define gmx_simd_cvt_f2i(a) vec_cts(vec_round(a), 0)
173 #define gmx_simd_cvtt_f2i(a) vec_cts(a, 0)
174 #define gmx_simd_cvt_i2f(a) vec_ctf(a, 0)
175 #define gmx_simd_extract_fi(a, i) gmx_simd_extract_fi_ibm_vsx(a, i)
176 /* Integer logical ops on gmx_simd_fint32_t */
177 #define gmx_simd_slli_fi(a, i) vec_sl(a, vec_splats((unsigned int)i))
178 #define gmx_simd_srli_fi(a, i) vec_sr(a, vec_splats((unsigned int)i))
179 #define gmx_simd_and_fi(a, b) vec_and(a, b)
180 #define gmx_simd_andnot_fi(a, b) vec_andc(b, a)
181 #define gmx_simd_or_fi(a, b) vec_or(a, b)
182 #define gmx_simd_xor_fi(a, b) vec_xor(a, b)
183 /* Integer arithmetic ops on gmx_simd_fint32_t */
184 #define gmx_simd_add_fi(a, b) vec_add(a, b)
185 #define gmx_simd_sub_fi(a, b) vec_sub(a, b)
187 /* gcc-4.9 does not provide vec_mul() for integers */
188 # ifdef __BIG_ENDIAN__
189 # define gmx_simd_mul_fi(a, b) vec_mulo((__vector short)a, (__vector short)b)
191 # define gmx_simd_mul_fi(a, b) vec_mule((__vector short)a, (__vector short)b)
195 # define gmx_simd_mul_fi(a, b) vec_mul(a, b)
197 /* Boolean & comparison operations on gmx_simd_float_t */
198 #define gmx_simd_fbool_t __vector vsx_bool int
199 #define gmx_simd_cmpeq_f(a, b) vec_cmpeq(a, b)
200 #define gmx_simd_cmplt_f(a, b) vec_cmplt(a, b)
201 #define gmx_simd_cmple_f(a, b) vec_cmple(a, b)
202 #define gmx_simd_and_fb(a, b) vec_and(a, b)
203 #define gmx_simd_or_fb(a, b) vec_or(a, b)
204 #define gmx_simd_anytrue_fb(a) vec_any_ne(a, (__vector vsx_bool int)vec_splats(0))
205 #define gmx_simd_blendzero_f(a, sel) vec_and(a, (__vector float)sel)
206 #define gmx_simd_blendnotzero_f(a, sel) vec_andc(a, (__vector float)sel)
207 #define gmx_simd_blendv_f(a, b, sel) vec_sel(a, b, sel)
208 #define gmx_simd_reduce_f(a) gmx_simd_reduce_f_ibm_vsx(a)
209 /* Boolean & comparison operations on gmx_simd_fint32_t */
210 #define gmx_simd_fibool_t __vector vsx_bool int
211 #define gmx_simd_cmpeq_fi(a, b) vec_cmpeq(a, b)
212 #define gmx_simd_cmplt_fi(a, b) vec_cmplt(a, b)
213 #define gmx_simd_and_fib(a, b) vec_and(a, b)
214 #define gmx_simd_or_fib(a, b) vec_or(a, b)
215 #define gmx_simd_anytrue_fib(a) vec_any_ne(a, (__vector vsx_bool int)vec_splats(0))
216 #define gmx_simd_blendzero_fi(a, sel) vec_and(a, (__vector signed int)sel)
217 #define gmx_simd_blendnotzero_fi(a, sel) vec_andc(a, (__vector signed int)sel)
218 #define gmx_simd_blendv_fi(a, b, sel) vec_sel(a, b, sel)
219 /* Conversions between different booleans */
220 #define gmx_simd_cvt_fb2fib(x) (x)
221 #define gmx_simd_cvt_fib2fb(x) (x)
223 /****************************************************
224 * DOUBLE PRECISION SIMD IMPLEMENTATION *
225 ****************************************************/
226 #define gmx_simd_double_t __vector double
228 # define gmx_simd_load_d(m) vec_vsx_ld(0, (const __vector double *)(m))
229 # define gmx_simd_store_d(m, x) vec_vsx_st(x, 0, (__vector double *)(m))
232 # define gmx_simd_load_d(m) vec_xld2(0, (double *)(m))
233 # define gmx_simd_store_d(m, x) vec_xstd2(x, 0, (double *)(m))
235 #define gmx_simd_load1_d(m) vec_splats((double)(*m))
236 #define gmx_simd_set1_d(x) vec_splats((double)(x))
237 #define gmx_simd_loadu_d gmx_simd_load_d
238 #define gmx_simd_storeu_d gmx_simd_store_d
239 #define gmx_simd_setzero_d() vec_splats(0.0)
240 #define gmx_simd_add_d(a, b) vec_add(a, b)
241 #define gmx_simd_sub_d(a, b) vec_sub(a, b)
242 #define gmx_simd_mul_d(a, b) vec_mul(a, b)
243 #define gmx_simd_fmadd_d(a, b, c) vec_madd(a, b, c)
244 #define gmx_simd_fmsub_d(a, b, c) vec_msub(a, b, c)
245 /* IBM uses an alternative FMA definition, so -a*b+c=-(a*b-c) is "nmsub" */
246 #define gmx_simd_fnmadd_d(a, b, c) vec_nmsub(a, b, c)
247 /* IBM uses an alternative FMA definition, so -a*b-c=-(a*b+c) is "nmadd" */
248 #define gmx_simd_fnmsub_d(a, b, c) vec_nmadd(a, b, c)
249 #define gmx_simd_and_d(a, b) vec_and(a, b)
250 #define gmx_simd_andnot_d(a, b) vec_andc(b, a)
251 #define gmx_simd_or_d(a, b) vec_or(a, b)
252 #define gmx_simd_xor_d(a, b) vec_xor(a, b)
253 #define gmx_simd_rsqrt_d(a) vec_rsqrte(a)
254 #define gmx_simd_rcp_d(a) vec_re(a)
255 #define gmx_simd_fabs_d(a) vec_abs(a)
257 /* gcc up to at least version 4.9 is missing intrinsics for vec_neg(), use inline asm. */
258 # define gmx_simd_fneg_d(a) ({ __vector double res; __asm__ ("xvnegdp %0,%1" : \
259 "=ww" (res) : "ww" ((__vector double) (a))); res; })
262 # define gmx_simd_fneg_d(a) vec_neg(a)
264 #define gmx_simd_max_d(a, b) vec_max(a, b)
265 #define gmx_simd_min_d(a, b) vec_min(a, b)
267 /* gcc up to at least version 4.9 does not support vec_round() in double precision. */
268 # define gmx_simd_round_d(a) ({ __vector double res; __asm__ ("xvrdpi %0,%1" : \
269 "=ww" (res) : "ww" ((__vector double) (a))); res; })
272 # define gmx_simd_round_d(a) vec_round(a)
274 #define gmx_simd_trunc_d(a) vec_trunc(a)
275 #define gmx_simd_fraction_d(x) vec_sub(x, vec_trunc(x))
276 #define gmx_simd_get_exponent_d(a) gmx_simd_get_exponent_d_ibm_vsx(a)
277 #define gmx_simd_get_mantissa_d(a) gmx_simd_get_mantissa_d_ibm_vsx(a)
278 #define gmx_simd_set_exponent_d(a) gmx_simd_set_exponent_d_ibm_vsx(a)
279 /* integer datatype corresponding to double: gmx_simd_dint32_t */
280 #define gmx_simd_dint32_t __vector signed int
281 #define gmx_simd_load_di(m) gmx_simd_load_di_ibm_vsx(m)
282 #define gmx_simd_store_di(m, x) gmx_simd_store_di_ibm_vsx(m, x)
283 #define gmx_simd_set1_di(i) vec_splats((int)(i))
284 #define gmx_simd_loadu_di gmx_simd_load_di
285 #define gmx_simd_storeu_di gmx_simd_store_di
286 #define gmx_simd_setzero_di() vec_splats((int)0)
288 /* gcc up to at least version 4.9 is missing intrinsics for double precision
289 * to integer conversion, use inline asm instead.
291 # define gmx_simd_cvtt_d2i(a) ({ __vector signed int res; __asm__ ("xvcvdpsxws %0,%1" : \
292 "=ww" (res) : "ww" ((__vector double) (a))); res; })
293 # define gmx_simd_cvt_i2d(a) ({ __vector double res; __asm__ ("xvcvsxwdp %0,%1" : \
294 "=ww" (res) : "ww" ((__vector signed int) (a))); res; })
297 # define gmx_simd_cvtt_d2i(a) vec_cts(a, 0)
298 # define gmx_simd_cvt_i2d(a) vec_ctd(a, 0)
300 #define gmx_simd_cvt_d2i(a) gmx_simd_cvtt_d2i(gmx_simd_round_d(a))
301 #define gmx_simd_extract_di(a, i) gmx_simd_extract_fi_ibm_vsx(a, (i)*2)
302 /* Integer logical ops on gmx_simd_dint32_t */
303 #define gmx_simd_slli_di(a, i) vec_sl(a, vec_splats((unsigned int)i))
304 #define gmx_simd_srli_di(a, i) vec_sr(a, vec_splats((unsigned int)i))
305 #define gmx_simd_and_di(a, b) vec_and(a, b)
306 #define gmx_simd_andnot_di(a, b) vec_andc(b, a)
307 #define gmx_simd_or_di(a, b) vec_or(a, b)
308 #define gmx_simd_xor_di(a, b) vec_xor(a, b)
309 /* Integer arithmetic ops on gmx_simd_dint32_t */
310 #define gmx_simd_add_di(a, b) vec_add(a, b)
311 #define gmx_simd_sub_di(a, b) vec_sub(a, b)
313 /* gcc 4.9 does not provide vec_mul() for integers */
314 # ifdef __BIG_ENDIAN__
315 # define gmx_simd_mul_di(a, b) vec_mulo((__vector short)a, (__vector short)b)
317 # define gmx_simd_mul_di(a, b) vec_mule((__vector short)a, (__vector short)b)
321 # define gmx_simd_mul_di(a, b) vec_mul(a, b)
323 /* Boolean & comparison operations on gmx_simd_double_t */
324 #define gmx_simd_dbool_t __vector vsx_bool long long
325 #define gmx_simd_cmpeq_d(a, b) vec_cmpeq(a, b)
326 #define gmx_simd_cmplt_d(a, b) vec_cmplt(a, b)
327 #define gmx_simd_cmple_d(a, b) vec_cmple(a, b)
328 #define gmx_simd_and_db(a, b) (__vector vsx_bool long long)vec_and((__vector signed int)a, (__vector signed int)b)
329 #define gmx_simd_or_db(a, b) (__vector vsx_bool long long)vec_or((__vector signed int)a, (__vector signed int)b)
330 #define gmx_simd_anytrue_db(a) vec_any_ne((__vector vsx_bool int)a, (__vector vsx_bool int)vec_splats(0))
331 #define gmx_simd_blendzero_d(a, sel) vec_and(a, (__vector double)sel)
332 #define gmx_simd_blendnotzero_d(a, sel) vec_andc(a, (__vector double)sel)
333 #define gmx_simd_blendv_d(a, b, sel) vec_sel(a, b, sel)
334 #define gmx_simd_reduce_d(a) gmx_simd_reduce_d_ibm_vsx(a)
335 /* Boolean & comparison operations on gmx_simd_fint32_t */
336 #define gmx_simd_dibool_t __vector vsx_bool int
337 #define gmx_simd_cmpeq_di(a, b) vec_cmpeq(a, b)
338 #define gmx_simd_cmplt_di(a, b) vec_cmplt(a, b)
339 #define gmx_simd_and_dib(a, b) vec_and(a, b)
340 #define gmx_simd_or_dib(a, b) vec_or(a, b)
341 /* Since we have applied all operations to pairs of elements we can work on all elements here */
342 #define gmx_simd_anytrue_dib(a) vec_any_ne(a, (__vector vsx_bool int)vec_splats(0))
343 #define gmx_simd_blendzero_di(a, sel) vec_and(a, (__vector signed int)sel)
344 #define gmx_simd_blendnotzero_di(a, sel) vec_andc(a, (__vector signed int)sel)
345 #define gmx_simd_blendv_di(a, b, sel) vec_sel(a, b, sel)
346 /* Conversions between different booleans */
347 #define gmx_simd_cvt_db2dib(x) (__vector vsx_bool int)(x)
348 #define gmx_simd_cvt_dib2db(x) (__vector vsx_bool long long)(x)
349 /* Float/double conversion */
350 #define gmx_simd_cvt_f2dd(f, d0, d1) gmx_simd_cvt_f2dd_ibm_vsx(f, d0, d1)
351 #define gmx_simd_cvt_dd2f(d0, d1) gmx_simd_cvt_dd2f_ibm_vsx(d0, d1)
353 /* Convenience defines to work around GCC/xlc compiler differences */
356 /* vec_xxsldwi() and vec_xxpermdi() were missing from altivec.h
357 * in gcc-4.8, but the builtins are available. This was fixed in gcc-4.9.
360 # define vec_xxsldwi __builtin_vsx_xxsldwi
362 # ifndef vec_xxpermdi
363 # define vec_xxpermdi __builtin_vsx_xxpermdi
365 /* gcc up to at least 4.9 is missing intrinsics for
366 * double-to-float and float-to-double conversions. Use inline asm.
368 # define gmx_vsx_f2d(x) ({ __vector double res; __asm__ ("xvcvspdp %0,%1" : \
369 "=ww" (res) : "ww" ((__vector float) (x))); res; })
370 # define gmx_vsx_d2f(x) ({ __vector float res; __asm__ ("xvcvdpsp %0,%1" : \
371 "=ww" (res) : "ww" ((__vector double) (x))); res; })
374 # define vec_xxsldwi vec_sldw
375 # define vec_xxpermdi vec_permi
376 /* f2d and d2f are indeed identical on xlC; it is selected by the argument and result type. */
377 # define gmx_vsx_f2d(x) vec_cvf(x)
378 # define gmx_vsx_d2f(x) vec_cvf(x)
382 /****************************************************
383 * SINGLE PREC. IMPLEMENTATION HELPER FUNCTIONS *
384 ****************************************************/
385 static gmx_inline gmx_simd_float_t
386 gmx_simd_get_exponent_f_ibm_vsx(gmx_simd_float_t x)
388 /* Generate 0x7F800000 without memory operations */
389 gmx_simd_float_t expmask = (__vector float)vec_splats(0x7F800000);
390 gmx_simd_fint32_t i127 = vec_splats(127);
391 gmx_simd_fint32_t iexp;
393 iexp = (__vector signed int)gmx_simd_and_f(x, expmask);
394 iexp = vec_sub(gmx_simd_srli_fi(iexp, 23), i127);
395 return vec_ctf(iexp, 0);
398 static gmx_inline gmx_simd_float_t
399 gmx_simd_get_mantissa_f_ibm_vsx(gmx_simd_float_t x)
401 gmx_simd_float_t expmask = (__vector float)vec_splats(0x7F800000);
403 x = gmx_simd_andnot_f(expmask, vec_abs(x));
404 /* Reset zero (but correctly biased) exponent */
405 return gmx_simd_or_f(x, vec_splats(1.0f));
408 static gmx_inline gmx_simd_float_t
409 gmx_simd_set_exponent_f_ibm_vsx(gmx_simd_float_t x)
411 gmx_simd_fint32_t iexp = gmx_simd_cvt_f2i(x);
412 gmx_simd_fint32_t i127 = vec_splats(127);
414 iexp = gmx_simd_slli_fi(vec_add(iexp, i127), 23);
415 return (__vector float)iexp;
418 static gmx_inline float
419 gmx_simd_reduce_f_ibm_vsx(gmx_simd_float_t x)
421 x = vec_add(x, vec_xxsldwi(x, x, 2));
422 x = vec_add(x, vec_xxsldwi(x, x, 1));
423 return vec_extract(x, 0);
426 static gmx_inline int
427 gmx_simd_extract_fi_ibm_vsx(gmx_simd_fint32_t gmx_unused x, unsigned int i)
429 /* For some reason gcc-4.9 warns about x being unused, which it isn't. */
430 #if (defined __GNUC__) && (defined __LITTLE_ENDIAN__)
431 /* On Power7 and earlier big-endian architecture the GNU version of vec_extract()
432 * works fine, but it appears to be buggy on Power8 running in little-endian mode.
433 * Use inline assembly instead. Since this can only happen on Power8 and later,
434 * we can use a Power8-specific direct move instruction.
437 __asm__ ("mfvsrwz %0,%1" : "=r" (res) : \
438 "ww" ((vec_xxsldwi((x), (x), (((2-(i))&0x3 ))))));
441 /* IBM xlC, and GNU when running in big-endian mode */
442 return vec_extract(x, i);
446 /****************************************************
447 * DOUBLE PREC. IMPLEMENTATION HELPER FUNCTIONS *
448 ****************************************************/
449 static gmx_inline gmx_simd_dint32_t
450 gmx_simd_load_di_ibm_vsx(const int *m)
452 __vector signed int vi;
453 __vector signed int d0 = vec_splats(m[0]);
454 __vector signed int d1 = vec_splats(m[1]);
455 #ifdef __LITTLE_ENDIAN__
456 vi = (__vector signed int)vec_xxpermdi((__vector double)d1, (__vector double)d0, 0x0);
458 vi = (__vector signed int)vec_xxpermdi((__vector double)d0, (__vector double)d1, 0x0);
463 static gmx_inline void
464 gmx_simd_store_di_ibm_vsx(int *m, gmx_simd_dint32_t x)
466 m[0] = gmx_simd_extract_di(x, 0);
467 m[1] = gmx_simd_extract_di(x, 1);
470 static gmx_inline gmx_simd_double_t
471 gmx_simd_get_exponent_d_ibm_vsx(gmx_simd_double_t x)
473 gmx_simd_double_t expmask = (__vector double)vec_mergel(vec_splats((int)0x7FF00000), vec_splats((int)0));
474 gmx_simd_dint32_t i1023 = vec_splats(1023);
475 gmx_simd_dint32_t iexp;
477 iexp = (__vector signed int)gmx_simd_and_d(x, expmask);
478 /* The upper half of each double is already in positions 0/2, so we only need
479 * to shift by another 52-32=20 bits.
481 iexp = gmx_simd_srli_fi(iexp, 20);
482 iexp = vec_sub(iexp, i1023);
483 /* Now we have the correct integer in elements 0 & 2. Never mind about elements 1,3 */
484 return gmx_simd_cvt_i2d(iexp);
487 static gmx_inline gmx_simd_double_t
488 gmx_simd_get_mantissa_d_ibm_vsx(gmx_simd_double_t x)
490 gmx_simd_double_t expmask = (__vector double)vec_mergel(vec_splats((int)0x7FF00000), vec_splats((int)0));
492 x = gmx_simd_andnot_d(expmask, vec_abs(x));
493 /* Reset zero (but correctly biased) exponent */
494 return gmx_simd_or_d(x, vec_splats(1.0));
497 static gmx_inline gmx_simd_double_t
498 gmx_simd_set_exponent_d_ibm_vsx(gmx_simd_double_t x)
500 gmx_simd_dint32_t iexp = gmx_simd_cvt_d2i(x);
501 gmx_simd_dint32_t i1023 = vec_splats(1023);
502 gmx_simd_dint32_t tmp;
504 iexp = vec_add(iexp, i1023);
505 /* exponent is now present in pairs of integers; 0011.
506 * Elements 0/2 already correspond to the upper half of each double,
507 * so we only need to shift by another 52-32=20 bits.
508 * The remaining elements 1/3 are shifted by 32 bits to make them zero.
510 iexp = vec_sl(iexp, (__vector unsigned int)vec_splats((int)20));
511 tmp = vec_mergeh(iexp, iexp);
512 iexp = vec_mergel(tmp, iexp);
513 iexp = vec_mergel(iexp, gmx_simd_setzero_di());
514 return (__vector double)iexp;
517 static gmx_inline double
518 gmx_simd_reduce_d_ibm_vsx(gmx_simd_double_t x)
520 x = vec_add(x, vec_xxsldwi(x, x, 2));
521 return vec_extract(x, 0);
525 /****************************************************
526 * CONVERSION IMPLEMENTATION HELPER FUNCTIONS *
527 ****************************************************/
528 static gmx_inline void
529 gmx_simd_cvt_f2dd_ibm_vsx(gmx_simd_float_t f0,
530 gmx_simd_double_t * d0, gmx_simd_double_t * d1)
532 __vector float fA, fB;
533 fA = vec_mergel(f0, f0); /* 0011 */
534 fB = vec_mergeh(f0, f0); /* 2233 */
535 *d0 = gmx_vsx_f2d(fA); /* 01 */
536 *d1 = gmx_vsx_f2d(fB); /* 23 */
540 static gmx_inline gmx_simd_float_t
541 gmx_simd_cvt_dd2f_ibm_vsx(gmx_simd_double_t d0, gmx_simd_double_t d1)
543 __vector float fA, fB, fC, fD, fE;
544 fA = gmx_vsx_d2f(d0); /* 0x1x */
545 fB = gmx_vsx_d2f(d1); /* 2x3x */
546 fC = vec_mergel(fA, fB); /* 02xx */
547 fD = vec_mergeh(fA, fB); /* 13xx */
548 fE = vec_mergel(fD, fC); /* 0123 */
552 /* Single precision VSX is 4 elements wide, use for SIMD4 */
553 #define gmx_simd4_float_t gmx_simd_float_t
554 #define gmx_simd4_load_f gmx_simd_load_f
555 #define gmx_simd4_load1_f gmx_simd_load1_f
556 #define gmx_simd4_set1_f gmx_simd_set1_f
557 #define gmx_simd4_store_f gmx_simd_store_f
558 #define gmx_simd4_loadu_f gmx_simd_loadu_f
559 #define gmx_simd4_storeu_f gmx_simd_storeu_f
560 #define gmx_simd4_setzero_f gmx_simd_setzero_f
561 #define gmx_simd4_add_f gmx_simd_add_f
562 #define gmx_simd4_sub_f gmx_simd_sub_f
563 #define gmx_simd4_mul_f gmx_simd_mul_f
564 #define gmx_simd4_fmadd_f gmx_simd_fmadd_f
565 #define gmx_simd4_fmsub_f gmx_simd_fmsub_f
566 #define gmx_simd4_fnmadd_f gmx_simd_fnmadd_f
567 #define gmx_simd4_fnmsub_f gmx_simd_fnmsub_f
568 #define gmx_simd4_and_f gmx_simd_and_f
569 #define gmx_simd4_andnot_f gmx_simd_andnot_f
570 #define gmx_simd4_or_f gmx_simd_or_f
571 #define gmx_simd4_xor_f gmx_simd_xor_f
572 #define gmx_simd4_rsqrt_f gmx_simd_rsqrt_f
573 #define gmx_simd4_rcp_f gmx_simd_rcp_f
574 #define gmx_simd4_fabs_f gmx_simd_fabs_f
575 #define gmx_simd4_fneg_f gmx_simd_fneg_f
576 #define gmx_simd4_max_f gmx_simd_max_f
577 #define gmx_simd4_min_f gmx_simd_min_f
578 #define gmx_simd4_round_f gmx_simd_round_f
579 #define gmx_simd4_trunc_f gmx_simd_trunc_f
580 #define gmx_simd4_fraction_f gmx_simd_fraction_f
581 #define gmx_simd4_get_exponent_f gmx_simd_get_exponent_f
582 #define gmx_simd4_get_mantissa_f gmx_simd_get_mantissa_f
583 #define gmx_simd4_set_exponent_f gmx_simd_set_exponent_f
584 #define gmx_simd4_dotproduct3_f gmx_simd4_dotproduct3_f_ibm_vsx
585 #define gmx_simd4_fint32_t gmx_simd_fint32_t
586 #define gmx_simd4_load_fi gmx_simd_load_fi
587 #define gmx_simd4_load1_fi gmx_simd_load1_fi
588 #define gmx_simd4_set1_fi gmx_simd_set1_fi
589 #define gmx_simd4_store_fi gmx_simd_store_fi
590 #define gmx_simd4_loadu_fi gmx_simd_loadu_fi
591 #define gmx_simd4_storeu_fi gmx_simd_storeu_fi
592 #define gmx_simd4_setzero_fi gmx_simd_setzero_fi
593 #define gmx_simd4_cvt_f2i gmx_simd_cvt_f2i
594 #define gmx_simd4_cvtt_f2i gmx_simd_cvtt_f2i
595 #define gmx_simd4_cvt_i2f gmx_simd_cvt_i2f
596 #define gmx_simd4_fbool_t gmx_simd_fbool_t
597 #define gmx_simd4_cmpeq_f gmx_simd_cmpeq_f
598 #define gmx_simd4_cmplt_f gmx_simd_cmplt_f
599 #define gmx_simd4_cmple_f gmx_simd_cmple_f
600 #define gmx_simd4_and_fb gmx_simd_and_fb
601 #define gmx_simd4_or_fb gmx_simd_or_fb
602 #define gmx_simd4_anytrue_fb gmx_simd_anytrue_fb
603 #define gmx_simd4_blendzero_f gmx_simd_blendzero_f
604 #define gmx_simd4_blendnotzero_f gmx_simd_blendnotzero_f
605 #define gmx_simd4_blendv_f gmx_simd_blendv_f
606 #define gmx_simd4_reduce_f gmx_simd_reduce_f
608 static gmx_inline float
609 gmx_simd4_dotproduct3_f_ibm_vsx(gmx_simd4_float_t a, gmx_simd4_float_t b)
611 gmx_simd4_float_t c = gmx_simd_mul_f(a, b);
612 gmx_simd4_float_t sum;
613 sum = vec_add(c, vec_xxsldwi(c, c, 2));
614 sum = vec_add(sum, vec_xxsldwi(c, c, 1));
615 return vec_extract(sum, 0);
618 /* Function to check whether SIMD operations have resulted in overflow.
619 * For now, this is unfortunately a dummy for this architecture.
622 gmx_simd_check_and_reset_overflow(void)
627 /* Undefine our temporary work-arounds so they are not used by mistake */
631 #endif /* GMX_SIMD_IMPLEMENTATION_IBM_VSX_H */