2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2012, The GROMACS Development Team
6 * Copyright (c) 2012,2013, by the GROMACS development team, led by
7 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8 * others, as listed in the AUTHORS file in the top-level source
9 * directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
38 /* The macros in this file are intended to be used for writing
39 * architecture-independent SIMD intrinsics code.
40 * To support a new architecture, adding macros here should be (nearly)
44 #ifdef _gmx_simd_macros_h_
45 #error "gmx_simd_macros.h included twice"
47 #define _gmx_simd_macros_h_
49 /* NOTE: SSE2 acceleration does not include floor or blendv */
52 /* Uncomment the next line, without other SIMD active, for testing plain-C */
53 /* #define GMX_SIMD_REFERENCE_PLAIN_C */
54 #ifdef GMX_SIMD_REFERENCE_PLAIN_C
55 /* Plain C SIMD reference implementation, also serves as documentation */
56 #define GMX_HAVE_SIMD_MACROS
58 /* In general the reference SIMD supports any SIMD width, including 1.
59 * See types/nb_verlet.h for details
61 #define GMX_SIMD_REF_WIDTH 4
63 /* Include plain-C reference implementation, also serves as documentation */
64 #include "gmx_simd_ref.h"
66 #define GMX_SIMD_WIDTH_HERE GMX_SIMD_REF_WIDTH
68 /* float/double SIMD register type */
69 #define gmx_mm_pr gmx_simd_ref_pr
71 /* boolean SIMD register type */
72 #define gmx_mm_pb gmx_simd_ref_pb
74 /* integer SIMD register type, only for table indexing and exclusion masks */
75 #define gmx_epi32 gmx_simd_ref_epi32
76 #define GMX_SIMD_EPI32_WIDTH GMX_SIMD_REF_EPI32_WIDTH
78 /* Load GMX_SIMD_WIDTH_HERE reals for memory starting at r */
79 #define gmx_load_pr gmx_simd_ref_load_pr
80 /* Set all SIMD register elements to *r */
81 #define gmx_load1_pr gmx_simd_ref_load1_pr
82 #define gmx_set1_pr gmx_simd_ref_set1_pr
83 #define gmx_setzero_pr gmx_simd_ref_setzero_pr
84 #define gmx_store_pr gmx_simd_ref_store_pr
86 #define gmx_add_pr gmx_simd_ref_add_pr
87 #define gmx_sub_pr gmx_simd_ref_sub_pr
88 #define gmx_mul_pr gmx_simd_ref_mul_pr
89 /* For the FMA macros below, aim for c=d in code, so FMA3 uses 1 instruction */
90 #define gmx_madd_pr gmx_simd_ref_madd_pr
91 #define gmx_nmsub_pr gmx_simd_ref_nmsub_pr
93 #define gmx_max_pr gmx_simd_ref_max_pr
94 #define gmx_blendzero_pr gmx_simd_ref_blendzero_pr
96 #define gmx_round_pr gmx_simd_ref_round_pr
98 /* Not required, only used to speed up the nbnxn tabulated PME kernels */
99 #define GMX_SIMD_HAVE_FLOOR
100 #ifdef GMX_SIMD_HAVE_FLOOR
101 #define gmx_floor_pr gmx_simd_ref_floor_pr
104 /* Not required, only used when blendv is faster than comparison */
105 #define GMX_SIMD_HAVE_BLENDV
106 #ifdef GMX_SIMD_HAVE_BLENDV
107 #define gmx_blendv_pr gmx_simd_ref_blendv_pr
110 /* Copy the sign of a to b, assumes b >= 0 for efficiency */
111 #define gmx_cpsgn_nonneg_pr gmx_simd_ref_cpsgn_nonneg_pr
113 /* Very specific operation required in the non-bonded kernels */
114 #define gmx_masknot_add_pr gmx_simd_ref_masknot_add_pr
117 #define gmx_cmplt_pr gmx_simd_ref_cmplt_pr
119 /* Logical operations on SIMD booleans */
120 #define gmx_and_pb gmx_simd_ref_and_pb
121 #define gmx_or_pb gmx_simd_ref_or_pb
123 /* Returns a single int (0/1) which tells if any of the 4 booleans is True */
124 #define gmx_anytrue_pb gmx_simd_ref_anytrue_pb
126 /* Conversions only used for PME table lookup */
127 #define gmx_cvttpr_epi32 gmx_simd_ref_cvttpr_epi32
128 #define gmx_cvtepi32_pr gmx_simd_ref_cvtepi32_pr
130 /* These two function only need to be approximate, Newton-Raphson iteration
131 * is used for full accuracy in gmx_invsqrt_pr and gmx_inv_pr.
133 #define gmx_rsqrt_pr gmx_simd_ref_rsqrt_pr
134 #define gmx_rcp_pr gmx_simd_ref_rcp_pr
136 /* sqrt+inv+sin+cos+acos+atan2 are used for bonded potentials, exp for PME */
137 #define GMX_SIMD_HAVE_EXP
138 #ifdef GMX_SIMD_HAVE_EXP
139 #define gmx_exp_pr gmx_simd_ref_exp_pr
141 #define GMX_SIMD_HAVE_TRIGONOMETRIC
142 #ifdef GMX_SIMD_HAVE_TRIGONOMETRIC
143 #define gmx_sqrt_pr gmx_simd_ref_sqrt_pr
144 #define gmx_sincos_pr gmx_simd_ref_sincos_pr
145 #define gmx_acos_pr gmx_simd_ref_acos_pr
146 #define gmx_atan2_pr gmx_simd_ref_atan2_pr
149 #endif /* GMX_SIMD_REFERENCE_PLAIN_C */
152 /* The same SIMD macros can be translated to SIMD intrinsics (and compiled
153 * to instructions for) different SIMD width and float precision.
155 * On x86: The gmx_ prefix is replaced by _mm_ or _mm256_ (SSE or AVX).
156 * The _pr suffix is replaced by _ps or _pd (for single or double precision).
157 * Compiler settings will decide if 128-bit intrinsics will
158 * be translated into SSE or AVX instructions.
162 #ifdef GMX_USE_HALF_WIDTH_SIMD_HERE
163 #if defined GMX_X86_AVX_256
164 /* We have half SIMD width support, continue */
166 #error "half SIMD width intrinsics are not supported"
170 #ifdef GMX_TARGET_X86
173 /* This is for general x86 SIMD instruction sets that also support SSE2 */
174 #define GMX_HAVE_SIMD_MACROS
176 /* Include the highest supported x86 SIMD intrisics + math functions */
177 #ifdef GMX_X86_AVX_256
178 #include "gmx_x86_avx_256.h"
180 #include "gmx_math_x86_avx_256_double.h"
181 #else /* GMX_DOUBLE */
182 #include "gmx_math_x86_avx_256_single.h"
183 #endif /* GMX_DOUBLE */
184 #else /* GMX_X86_AVX_256 */
185 #ifdef GMX_X86_AVX_128_FMA
186 #include "gmx_x86_avx_128_fma.h"
188 #include "gmx_math_x86_avx_128_fma_double.h"
189 #else /* GMX_DOUBLE */
190 #include "gmx_math_x86_avx_128_fma_single.h"
191 #endif /* GMX_DOUBLE */
192 #else /* GMX_X86_AVX_128_FMA */
193 #ifdef GMX_X86_SSE4_1
194 #include "gmx_x86_sse4_1.h"
196 #include "gmx_math_x86_sse4_1_double.h"
197 #else /* GMX_DOUBLE */
198 #include "gmx_math_x86_sse4_1_single.h"
199 #endif /* GMX_DOUBLE */
200 #else /* GMX_X86_SSE4_1 */
202 #include "gmx_x86_sse2.h"
204 #include "gmx_math_x86_sse2_double.h"
205 #else /* GMX_DOUBLE */
206 #include "gmx_math_x86_sse2_single.h"
207 #endif /* GMX_DOUBLE */
208 #else /* GMX_X86_SSE2 */
209 #error No x86 acceleration defined
210 #endif /* GMX_X86_SSE2 */
211 #endif /* GMX_X86_SSE4_1 */
212 #endif /* GMX_X86_AVX_128_FMA */
213 #endif /* GMX_X86_AVX_256 */
215 /* exp and trigonometric functions are included above */
216 #define GMX_SIMD_HAVE_EXP
217 #define GMX_SIMD_HAVE_TRIGONOMETRIC
219 #if !defined GMX_X86_AVX_256 || defined GMX_USE_HALF_WIDTH_SIMD_HERE
223 #define GMX_SIMD_WIDTH_HERE 4
225 #define gmx_mm_pr __m128
227 #define gmx_mm_pb __m128
229 #define gmx_epi32 __m128i
230 #define GMX_SIMD_EPI32_WIDTH 4
232 #define gmx_load_pr _mm_load_ps
233 #define gmx_load1_pr _mm_load1_ps
234 #define gmx_set1_pr _mm_set1_ps
235 #define gmx_setzero_pr _mm_setzero_ps
236 #define gmx_store_pr _mm_store_ps
238 #define gmx_add_pr _mm_add_ps
239 #define gmx_sub_pr _mm_sub_ps
240 #define gmx_mul_pr _mm_mul_ps
241 #ifdef GMX_X86_AVX_128_FMA
242 #define GMX_SIMD_HAVE_FMA
243 #define gmx_madd_pr(a, b, c) _mm_macc_ps(a, b, c)
244 #define gmx_nmsub_pr(a, b, c) _mm_nmacc_ps(a, b, c)
246 #define gmx_madd_pr(a, b, c) _mm_add_ps(c, _mm_mul_ps(a, b))
247 #define gmx_nmsub_pr(a, b, c) _mm_sub_ps(c, _mm_mul_ps(a, b))
249 #define gmx_max_pr _mm_max_ps
250 #define gmx_blendzero_pr _mm_and_ps
252 #define gmx_cmplt_pr _mm_cmplt_ps
253 #define gmx_and_pb _mm_and_ps
254 #define gmx_or_pb _mm_or_ps
256 #ifdef GMX_X86_SSE4_1
257 #define gmx_round_pr(x) _mm_round_ps(x, 0x0)
258 #define GMX_SIMD_HAVE_FLOOR
259 #define gmx_floor_pr _mm_floor_ps
261 #define gmx_round_pr(x) _mm_cvtepi32_ps(_mm_cvtps_epi32(x))
264 #ifdef GMX_X86_SSE4_1
265 #define GMX_SIMD_HAVE_BLENDV
266 #define gmx_blendv_pr _mm_blendv_ps
269 static gmx_inline gmx_mm_pr gmx_cpsgn_nonneg_pr(gmx_mm_pr a, gmx_mm_pr b)
271 /* The value -0.0 has only the sign-bit set */
272 gmx_mm_pr sign_mask = _mm_set1_ps(-0.0);
273 return _mm_or_ps(_mm_and_ps(a, sign_mask), b);
276 static gmx_inline gmx_mm_pr gmx_masknot_add_pr(gmx_mm_pb a, gmx_mm_pr b, gmx_mm_pr c)
278 return _mm_add_ps(b, _mm_andnot_ps(a, c));
281 #define gmx_anytrue_pb _mm_movemask_ps
283 #define gmx_cvttpr_epi32 _mm_cvttps_epi32
284 #define gmx_cvtepi32_pr _mm_cvtepi32_ps
286 #define gmx_rsqrt_pr _mm_rsqrt_ps
287 #define gmx_rcp_pr _mm_rcp_ps
289 #define gmx_exp_pr gmx_mm_exp_ps
290 #define gmx_sqrt_pr gmx_mm_sqrt_ps
291 #define gmx_sincos_pr gmx_mm_sincos_ps
292 #define gmx_acos_pr gmx_mm_acos_ps
293 #define gmx_atan2_pr gmx_mm_atan2_ps
295 #else /* ifndef GMX_DOUBLE */
297 #define GMX_SIMD_WIDTH_HERE 2
299 #define gmx_mm_pr __m128d
301 #define gmx_mm_pb __m128d
303 #define gmx_epi32 __m128i
304 #define GMX_SIMD_EPI32_WIDTH 4
306 #define gmx_load_pr _mm_load_pd
307 #define gmx_load1_pr _mm_load1_pd
308 #define gmx_set1_pr _mm_set1_pd
309 #define gmx_setzero_pr _mm_setzero_pd
310 #define gmx_store_pr _mm_store_pd
312 #define gmx_add_pr _mm_add_pd
313 #define gmx_sub_pr _mm_sub_pd
314 #define gmx_mul_pr _mm_mul_pd
315 #ifdef GMX_X86_AVX_128_FMA
316 #define GMX_SIMD_HAVE_FMA
317 #define gmx_madd_pr(a, b, c) _mm_macc_pd(a, b, c)
318 #define gmx_nmsub_pr(a, b, c) _mm_nmacc_pd(a, b, c)
320 #define gmx_madd_pr(a, b, c) _mm_add_pd(c, _mm_mul_pd(a, b))
321 #define gmx_nmsub_pr(a, b, c) _mm_sub_pd(c, _mm_mul_pd(a, b))
323 #define gmx_max_pr _mm_max_pd
324 #define gmx_blendzero_pr _mm_and_pd
326 #ifdef GMX_X86_SSE4_1
327 #define gmx_round_pr(x) _mm_round_pd(x, 0x0)
328 #define GMX_SIMD_HAVE_FLOOR
329 #define gmx_floor_pr _mm_floor_pd
331 #define gmx_round_pr(x) _mm_cvtepi32_pd(_mm_cvtpd_epi32(x))
332 /* gmx_floor_pr is not used in code for pre-SSE4_1 hardware */
335 #ifdef GMX_X86_SSE4_1
336 #define GMX_SIMD_HAVE_BLENDV
337 #define gmx_blendv_pr _mm_blendv_pd
340 static gmx_inline gmx_mm_pr gmx_cpsgn_nonneg_pr(gmx_mm_pr a, gmx_mm_pr b)
342 gmx_mm_pr sign_mask = _mm_set1_pd(-0.0);
343 return _mm_or_pd(_mm_and_pd(a, sign_mask), b);
346 static gmx_inline gmx_mm_pr gmx_masknot_add_pr(gmx_mm_pb a, gmx_mm_pr b, gmx_mm_pr c)
348 return _mm_add_pd(b, _mm_andnot_pd(a, c));
351 #define gmx_cmplt_pr _mm_cmplt_pd
353 #define gmx_and_pb _mm_and_pd
354 #define gmx_or_pb _mm_or_pd
356 #define gmx_anytrue_pb _mm_movemask_pd
358 #define gmx_cvttpr_epi32 _mm_cvttpd_epi32
359 #define gmx_cvtepi32_pr _mm_cvtepi32_pd
361 #define gmx_rsqrt_pr(r) _mm_cvtps_pd(_mm_rsqrt_ps(_mm_cvtpd_ps(r)))
362 #define gmx_rcp_pr(r) _mm_cvtps_pd(_mm_rcp_ps(_mm_cvtpd_ps(r)))
364 #define gmx_exp_pr gmx_mm_exp_pd
365 #define gmx_sqrt_pr gmx_mm_sqrt_pd
366 #define gmx_sincos_pr gmx_mm_sincos_pd
367 #define gmx_acos_pr gmx_mm_acos_pd
368 #define gmx_atan2_pr gmx_mm_atan2_pd
370 #endif /* ifndef GMX_DOUBLE */
373 /* We have GMX_X86_AVX_256 and not GMX_USE_HALF_WIDTH_SIMD_HERE,
374 * so we use 256-bit SIMD.
379 #define GMX_SIMD_WIDTH_HERE 8
381 #define gmx_mm_pr __m256
383 #define gmx_mm_pb __m256
385 #define gmx_epi32 __m256i
386 #define GMX_SIMD_EPI32_WIDTH 8
388 #define gmx_load_pr _mm256_load_ps
389 #define gmx_load1_pr(x) _mm256_set1_ps((x)[0])
390 #define gmx_set1_pr _mm256_set1_ps
391 #define gmx_setzero_pr _mm256_setzero_ps
392 #define gmx_store_pr _mm256_store_ps
394 #define gmx_add_pr _mm256_add_ps
395 #define gmx_sub_pr _mm256_sub_ps
396 #define gmx_mul_pr _mm256_mul_ps
397 #define gmx_madd_pr(a, b, c) _mm256_add_ps(c, _mm256_mul_ps(a, b))
398 #define gmx_nmsub_pr(a, b, c) _mm256_sub_ps(c, _mm256_mul_ps(a, b))
399 #define gmx_max_pr _mm256_max_ps
400 #define gmx_blendzero_pr _mm256_and_ps
402 #define gmx_round_pr(x) _mm256_round_ps(x, 0x0)
403 #define GMX_SIMD_HAVE_FLOOR
404 #define gmx_floor_pr _mm256_floor_ps
406 #define GMX_SIMD_HAVE_BLENDV
407 #define gmx_blendv_pr _mm256_blendv_ps
409 static gmx_inline gmx_mm_pr gmx_cpsgn_nonneg_pr(gmx_mm_pr a, gmx_mm_pr b)
411 gmx_mm_pr sign_mask = _mm256_set1_ps(-0.0);
412 return _mm256_or_ps(_mm256_and_ps(a, sign_mask), b);
415 static gmx_inline gmx_mm_pr gmx_masknot_add_pr(gmx_mm_pb a, gmx_mm_pr b, gmx_mm_pr c)
417 return _mm256_add_ps(b, _mm256_andnot_ps(a, c));
420 /* Less-than (we use ordered, non-signaling, but that's not required) */
421 #define gmx_cmplt_pr(x, y) _mm256_cmp_ps(x, y, 0x11)
422 #define gmx_and_pb _mm256_and_ps
423 #define gmx_or_pb _mm256_or_ps
425 #define gmx_anytrue_pb _mm256_movemask_ps
427 #define gmx_cvttpr_epi32 _mm256_cvttps_epi32
429 #define gmx_rsqrt_pr _mm256_rsqrt_ps
430 #define gmx_rcp_pr _mm256_rcp_ps
432 #define gmx_exp_pr gmx_mm256_exp_ps
433 #define gmx_sqrt_pr gmx_mm256_sqrt_ps
434 #define gmx_sincos_pr gmx_mm256_sincos_ps
435 #define gmx_acos_pr gmx_mm256_acos_ps
436 #define gmx_atan2_pr gmx_mm256_atan2_ps
438 #else /* ifndef GMX_DOUBLE */
440 #define GMX_SIMD_WIDTH_HERE 4
442 #define gmx_mm_pr __m256d
444 #define gmx_mm_pb __m256d
446 /* We use 128-bit integer registers because of missing 256-bit operations */
447 #define gmx_epi32 __m128i
448 #define GMX_SIMD_EPI32_WIDTH 4
450 #define gmx_load_pr _mm256_load_pd
451 #define gmx_load1_pr(x) _mm256_set1_pd((x)[0])
452 #define gmx_set1_pr _mm256_set1_pd
453 #define gmx_setzero_pr _mm256_setzero_pd
454 #define gmx_store_pr _mm256_store_pd
456 #define gmx_add_pr _mm256_add_pd
457 #define gmx_sub_pr _mm256_sub_pd
458 #define gmx_mul_pr _mm256_mul_pd
459 #define gmx_madd_pr(a, b, c) _mm256_add_pd(c, _mm256_mul_pd(a, b))
460 #define gmx_nmsub_pr(a, b, c) _mm256_sub_pd(c, _mm256_mul_pd(a, b))
461 #define gmx_max_pr _mm256_max_pd
462 #define gmx_blendzero_pr _mm256_and_pd
464 #define gmx_round_pr(x) _mm256_round_pd(x, 0x0)
465 #define GMX_SIMD_HAVE_FLOOR
466 #define gmx_floor_pr _mm256_floor_pd
468 #define GMX_SIMD_HAVE_BLENDV
469 #define gmx_blendv_pr _mm256_blendv_pd
471 static gmx_inline gmx_mm_pr gmx_cpsgn_nonneg_pr(gmx_mm_pr a, gmx_mm_pr b)
473 gmx_mm_pr sign_mask = _mm256_set1_pd(-0.0);
474 return _mm256_or_pd(_mm256_and_pd(a, sign_mask), b);
477 static gmx_inline gmx_mm_pr gmx_masknot_add_pr(gmx_mm_pb a, gmx_mm_pr b, gmx_mm_pr c)
479 return _mm256_add_pd(b, _mm256_andnot_pd(a, c));
482 /* Less-than (we use ordered, non-signaling, but that's not required) */
483 #define gmx_cmplt_pr(x, y) _mm256_cmp_pd(x, y, 0x11)
485 #define gmx_and_pb _mm256_and_pd
486 #define gmx_or_pb _mm256_or_pd
488 #define gmx_anytrue_pb _mm256_movemask_pd
490 #define gmx_cvttpr_epi32 _mm256_cvttpd_epi32
492 #define gmx_rsqrt_pr(r) _mm256_cvtps_pd(_mm_rsqrt_ps(_mm256_cvtpd_ps(r)))
493 #define gmx_rcp_pr(r) _mm256_cvtps_pd(_mm_rcp_ps(_mm256_cvtpd_ps(r)))
495 #define gmx_exp_pr gmx_mm256_exp_pd
496 #define gmx_sqrt_pr gmx_mm256_sqrt_pd
497 #define gmx_sincos_pr gmx_mm256_sincos_pd
498 #define gmx_acos_pr gmx_mm256_acos_pd
499 #define gmx_atan2_pr gmx_mm256_atan2_pd
501 #endif /* ifndef GMX_DOUBLE */
503 #endif /* 128- or 256-bit x86 SIMD */
505 #endif /* GMX_X86_SSE2 */
507 #endif /* GMX_TARGET_X86 */
509 #ifdef GMX_CPU_ACCELERATION_IBM_QPX
511 /* This hack works on the compilers that can reach this code. A real
512 solution with broader scope will be proposed in master branch. */
513 #define gmx_always_inline __attribute__((always_inline))
515 /* This is for the A2 core on BlueGene/Q that supports IBM's QPX
516 vector built-in functions */
517 #define GMX_HAVE_SIMD_MACROS
521 #include "mass_simd.h"
524 /* No need to version the code by the precision, because the QPX AXU
525 extends to and truncates from double precision for free. */
527 #define GMX_SIMD_WIDTH_HERE 4
528 typedef vector4double gmx_mm_pr;
529 typedef vector4double gmx_mm_pb;
530 typedef vector4double gmx_epi32;
531 #define GMX_SIMD_EPI32_WIDTH 4
533 static gmx_inline gmx_mm_pr gmx_always_inline gmx_load_pr(const real *a)
536 return vec_ld(0, (real *) a);
538 return vec_lda(0, (real *) a);
542 static gmx_inline gmx_mm_pr gmx_always_inline gmx_load1_pr(const real *a)
544 return vec_splats(*a);
547 static gmx_inline gmx_mm_pr gmx_always_inline gmx_set1_pr(real a)
549 return vec_splats(a);
552 static gmx_inline gmx_mm_pr gmx_always_inline gmx_setzero_pr()
554 return vec_splats(0.0);
557 static gmx_inline void gmx_always_inline gmx_store_pr(real *a, gmx_mm_pr b)
566 static gmx_inline gmx_mm_pr gmx_always_inline gmx_add_pr(gmx_mm_pr a, gmx_mm_pr b)
568 return vec_add(a, b);
571 static gmx_inline gmx_mm_pr gmx_always_inline gmx_sub_pr(gmx_mm_pr a, gmx_mm_pr b)
573 return vec_sub(a, b);
576 static gmx_inline gmx_mm_pr gmx_always_inline gmx_mul_pr(gmx_mm_pr a, gmx_mm_pr b)
578 return vec_mul(a, b);
581 static gmx_inline gmx_mm_pr gmx_always_inline gmx_madd_pr(gmx_mm_pr a, gmx_mm_pr b, gmx_mm_pr c)
583 return vec_madd(a, b, c);
586 static gmx_inline gmx_mm_pr gmx_always_inline gmx_nmsub_pr(gmx_mm_pr a, gmx_mm_pr b, gmx_mm_pr c)
588 return vec_nmsub(a, b, c);
591 static gmx_inline gmx_mm_pr gmx_always_inline gmx_max_pr(gmx_mm_pr a, gmx_mm_pr b)
593 return vec_sel(b, a, vec_sub(a, b));
596 static gmx_inline gmx_mm_pr gmx_always_inline gmx_blendzero_pr(gmx_mm_pr a, gmx_mm_pr b)
598 return vec_sel(gmx_setzero_pr(), a, b);
601 static gmx_inline gmx_mm_pb gmx_always_inline gmx_cmplt_pr(gmx_mm_pr a, gmx_mm_pr b)
603 return vec_cmplt(a, b);
606 static gmx_inline gmx_mm_pb gmx_always_inline gmx_and_pb(gmx_mm_pb a, gmx_mm_pb b)
608 return vec_and(a, b);
611 static gmx_inline gmx_mm_pb gmx_always_inline gmx_or_pb(gmx_mm_pb a, gmx_mm_pb b)
616 static gmx_inline gmx_mm_pr gmx_always_inline gmx_round_pr(gmx_mm_pr a)
621 #define GMX_SIMD_HAVE_FLOOR
622 static gmx_inline gmx_mm_pr gmx_always_inline gmx_floor_pr(gmx_mm_pr a)
627 #define GMX_SIMD_HAVE_BLENDV
628 static gmx_inline gmx_mm_pr gmx_always_inline gmx_blendv_pr(gmx_mm_pr a, gmx_mm_pr b, gmx_mm_pr c)
630 return vec_sel(b, a, gmx_cmplt_pr(gmx_setzero_pr(), c));
633 static gmx_inline gmx_mm_pr gmx_always_inline gmx_cpsgn_nonneg_pr(gmx_mm_pr a, gmx_mm_pr b)
635 return vec_cpsgn(a, b);
638 static gmx_inline gmx_mm_pr gmx_always_inline gmx_masknot_add_pr(gmx_mm_pb a, gmx_mm_pr b, gmx_mm_pr c)
640 return vec_add(b, vec_sel(c, gmx_setzero_pr(), a));
643 static gmx_inline gmx_bool gmx_always_inline
644 GMX_SIMD_IS_TRUE(real x)
649 static gmx_inline gmx_epi32 gmx_always_inline gmx_cvttpr_epi32(gmx_mm_pr a)
651 return vec_ctiwuz(a);
653 /* Don't want this, we have floor */
654 /* #define gmx_cvtepi32_pr vec_cvtepi32 */
656 /* A2 core on BG/Q delivers relative error of 2^-14, whereas Power ISA
657 Architecture only promises 2^-8. So probably no need for
658 Newton-Raphson iterates at single or double. */
659 static gmx_inline gmx_mm_pr gmx_always_inline gmx_rsqrt_pr(gmx_mm_pr a)
661 return vec_rsqrte(a);
664 /* A2 core on BG/Q delivers relative error of 2^-14, whereas Power ISA
665 Architecture only promises 2^-5. So probably no need for
666 Newton-Raphson iterates at single or double. */
667 static gmx_inline gmx_mm_pr gmx_always_inline gmx_rcp_pr(gmx_mm_pr a)
672 /* Note that here, and below, we use the built-in SLEEF port when
673 compiling on BlueGene/Q with clang */
675 #define GMX_SIMD_HAVE_EXP
676 static gmx_inline gmx_mm_pr gmx_always_inline gmx_exp_pr(gmx_mm_pr a)
693 static gmx_inline gmx_mm_pr gmx_always_inline gmx_sqrt_pr(gmx_mm_pr a)
696 return vec_swsqrt_nochk(a);
698 return vec_swsqrt(a);
702 #define GMX_SIMD_HAVE_TRIGONOMETRIC
703 static gmx_inline int gmx_always_inline gmx_sincos_pr(gmx_mm_pr a, gmx_mm_pr *b, gmx_mm_pr *c)
721 static gmx_inline gmx_mm_pr gmx_always_inline gmx_acos_pr(gmx_mm_pr a)
738 /* NB The order of parameters here is correct; the
739 documentation of atan2[df]4 in SIMD MASS is wrong. */
740 static gmx_inline gmx_mm_pr gmx_always_inline gmx_atan2_pr(gmx_mm_pr a, gmx_mm_pr b)
744 return xatan2f(a, b);
750 return atan2f4(a, b);
752 return atan2d4(a, b);
757 static gmx_inline int gmx_always_inline
758 gmx_anytrue_pb(gmx_mm_pb a)
760 /* The "anytrue" is done solely on the QPX AXU (which is the only
761 available FPU). This is awkward, because pretty much no
762 "horizontal" SIMD-vector operations exist, unlike x86 where
763 SSE4.1 added various kinds of horizontal operations. So we have
764 to make do with shifting vector elements and operating on the
765 results. This makes for lots of data dependency, but the main
766 alternative of storing to memory and reloading is not going to
767 help, either. OpenMP over 2 or 4 hardware threads per core will
768 hide much of the latency from the data dependency. The
769 vec_extract() lets the compiler correctly use a floating-point
770 comparison on the zeroth vector element, which avoids needing
773 gmx_mm_pb vec_shifted_left_0 = a;
774 gmx_mm_pb vec_shifted_left_1 = vec_sldw(a, a, 1);
775 gmx_mm_pb vec_shifted_left_2 = vec_sldw(a, a, 2);
776 gmx_mm_pb vec_shifted_left_3 = vec_sldw(a, a, 3);
778 gmx_mm_pb vec_return = vec_or(vec_or(vec_shifted_left_2, vec_shifted_left_3),
779 vec_or(vec_shifted_left_0, vec_shifted_left_1));
780 return (0.0 < vec_extract(vec_return, 0));
783 #undef gmx_always_inline
785 #endif /* GMX_CPU_ACCELERATION_IBM_QPX */
787 #ifdef GMX_HAVE_SIMD_MACROS
788 /* Generic functions to extract a SIMD aligned pointer from a pointer x.
789 * x should have at least GMX_SIMD_WIDTH_HERE elements extra compared
790 * to how many you want to use, to avoid indexing outside the aligned region.
793 static gmx_inline real *
794 gmx_simd_align_real(const real *x)
796 return (real *)(((size_t)((x)+GMX_SIMD_WIDTH_HERE)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(real)-1))));
799 static gmx_inline int *
800 gmx_simd_align_int(const int *x)
802 return (int *)(((size_t)((x)+GMX_SIMD_WIDTH_HERE)) & (~((size_t)(GMX_SIMD_WIDTH_HERE*sizeof(int )-1))));
806 /* Include the math functions which only need the above macros,
807 * generally these are the ones that don't need masking operations.
810 #include "gmx_simd_math_double.h"
812 #include "gmx_simd_math_single.h"
816 #endif /* GMX_HAVE_SIMD_MACROS */
818 #endif /* _gmx_simd_macros_h_ */