ETISS 0.8.0
Extendable Translating Instruction Set Simulator (version 0.8.0)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
pmmintrin.h
Go to the documentation of this file.
1/*===---- pmmintrin.h - Implementation of SSE3 intrinsics on PowerPC -------===
2 *
3 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 * See https://llvm.org/LICENSE.txt for license information.
5 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 *
7 *===-----------------------------------------------------------------------===
8 */
9
10/* Implemented from the specification included in the Intel C++ Compiler
11 User Guide and Reference, version 9.0. */
12
13#ifndef NO_WARN_X86_INTRINSICS
14/* This header is distributed to simplify porting x86_64 code that
15 makes explicit use of Intel intrinsics to powerpc64le.
16 It is the user's responsibility to determine if the results are
17 acceptable and make additional changes as necessary.
18 Note that much code that uses Intel intrinsics can be rewritten in
19 standard C or GNU C extensions, which are more portable and better
20 optimized across multiple targets.
21
22 In the specific case of X86 SSE3 intrinsics, the PowerPC VMX/VSX ISA
23 is a good match for most SIMD operations. However the Horizontal
24 add/sub requires the data pairs be permuted into a separate
25 registers with vertical even/odd alignment for the operation.
26 And the addsub operation requires the sign of only the even numbered
27 elements be flipped (xored with -0.0).
28 For larger blocks of code using these intrinsic implementations,
29 the compiler be should be able to schedule instructions to avoid
30 additional latency.
31
32 In the specific case of the monitor and mwait instructions there are
33 no direct equivalent in the PowerISA at this time. So those
34 intrinsics are not implemented. */
35#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this warning."
36#endif
37
38#ifndef PMMINTRIN_H_
39#define PMMINTRIN_H_
40
41#if defined(__linux__) && defined(__ppc64__)
42
43/* We need definitions from the SSE2 and SSE header files*/
44#include <emmintrin.h>
45
46extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
47_mm_addsub_ps (__m128 __X, __m128 __Y)
48{
49 const __v4sf even_n0 = {-0.0, 0.0, -0.0, 0.0};
50 __v4sf even_neg_Y = vec_xor(__Y, even_n0);
51 return (__m128) vec_add (__X, even_neg_Y);
52}
53
54extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
55_mm_addsub_pd (__m128d __X, __m128d __Y)
56{
57 const __v2df even_n0 = {-0.0, 0.0};
58 __v2df even_neg_Y = vec_xor(__Y, even_n0);
59 return (__m128d) vec_add (__X, even_neg_Y);
60}
61
62extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
63_mm_hadd_ps (__m128 __X, __m128 __Y)
64{
65 __vector unsigned char xform2 = {
66 0x00, 0x01, 0x02, 0x03,
67 0x08, 0x09, 0x0A, 0x0B,
68 0x10, 0x11, 0x12, 0x13,
69 0x18, 0x19, 0x1A, 0x1B
70 };
71 __vector unsigned char xform1 = {
72 0x04, 0x05, 0x06, 0x07,
73 0x0C, 0x0D, 0x0E, 0x0F,
74 0x14, 0x15, 0x16, 0x17,
75 0x1C, 0x1D, 0x1E, 0x1F
76 };
77 return (__m128) vec_add (vec_perm ((__v4sf) __X, (__v4sf) __Y, xform2),
78 vec_perm ((__v4sf) __X, (__v4sf) __Y, xform1));
79}
80
81extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
82_mm_hsub_ps (__m128 __X, __m128 __Y)
83{
84 __vector unsigned char xform2 = {
85 0x00, 0x01, 0x02, 0x03,
86 0x08, 0x09, 0x0A, 0x0B,
87 0x10, 0x11, 0x12, 0x13,
88 0x18, 0x19, 0x1A, 0x1B
89 };
90 __vector unsigned char xform1 = {
91 0x04, 0x05, 0x06, 0x07,
92 0x0C, 0x0D, 0x0E, 0x0F,
93 0x14, 0x15, 0x16, 0x17,
94 0x1C, 0x1D, 0x1E, 0x1F
95 };
96 return (__m128) vec_sub (vec_perm ((__v4sf) __X, (__v4sf) __Y, xform2),
97 vec_perm ((__v4sf) __X, (__v4sf) __Y, xform1));
98}
99
100extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
101_mm_hadd_pd (__m128d __X, __m128d __Y)
102{
103 return (__m128d) vec_add (vec_mergeh ((__v2df) __X, (__v2df)__Y),
104 vec_mergel ((__v2df) __X, (__v2df)__Y));
105}
106
107extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
108_mm_hsub_pd (__m128d __X, __m128d __Y)
109{
110 return (__m128d) vec_sub (vec_mergeh ((__v2df) __X, (__v2df)__Y),
111 vec_mergel ((__v2df) __X, (__v2df)__Y));
112}
113
114extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
115_mm_movehdup_ps (__m128 __X)
116{
117 return (__m128)vec_mergeo ((__v4su)__X, (__v4su)__X);
118}
119
120extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
121_mm_moveldup_ps (__m128 __X)
122{
123 return (__m128)vec_mergee ((__v4su)__X, (__v4su)__X);
124}
125
126extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
127_mm_loaddup_pd (double const *__P)
128{
129 return (__m128d) vec_splats (*__P);
130}
131
132extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
133_mm_movedup_pd (__m128d __X)
134{
135 return _mm_shuffle_pd (__X, __X, _MM_SHUFFLE2 (0,0));
136}
137
138extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
139_mm_lddqu_si128 (__m128i const *__P)
140{
141 return (__m128i) (vec_vsx_ld(0, (signed int const *)__P));
142}
143
144/* POWER8 / POWER9 have no equivalent for _mm_monitor nor _mm_wait. */
145
146#else
147#include_next <pmmintrin.h>
148#endif /* defined(__linux__) && defined(__ppc64__) */
149
150#endif /* PMMINTRIN_H_ */
_Float16 __2f16 __attribute__((ext_vector_type(2)))
Zeroes the upper 128 bits (bits 255:128) of all YMM registers.
static __inline__ vector signed char __ATTRS_o_ai vec_splats(signed char __a)
Definition altivec.h:13710
static __inline__ vector signed char __ATTRS_o_ai vec_mergel(vector signed char __a, vector signed char __b)
Definition altivec.h:4804
static __inline__ vector signed char __ATTRS_o_ai vec_perm(vector signed char __a, vector signed char __b, vector unsigned char __c)
Definition altivec.h:7320
static __inline__ vector signed char __ATTRS_o_ai vec_mergeh(vector signed char __a, vector signed char __b)
Definition altivec.h:4534
static __inline__ vector signed char __ATTRS_o_ai vec_add(vector signed char __a, vector signed char __b)
Definition altivec.h:198
static __inline__ vector unsigned char __ATTRS_o_ai vec_xor(vector unsigned char __a, vector unsigned char __b)
Definition altivec.h:12223
static __inline__ vector signed char __ATTRS_o_ai vec_sub(vector signed char __a, vector signed char __b)
Definition altivec.h:10963
#define _MM_SHUFFLE2(x, y)
Definition emmintrin.h:4971
#define _mm_shuffle_pd(a, b, i)
Constructs a 128-bit floating-point vector of [2 x double] from two 128-bit vector parameters of [2 x...
Definition emmintrin.h:4846
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_lddqu_si128(__m128i const *__p)
Loads data from an unaligned memory location to elements in a 128-bit vector.
Definition pmmintrin.h:34
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_hadd_pd(__m128d __a, __m128d __b)
Horizontally adds the pairs of values contained in two 128-bit vectors of [2 x double].
Definition pmmintrin.h:184
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_movedup_pd(__m128d __a)
Moves and duplicates the double-precision value in the lower bits of a 128-bit vector of [2 x double]...
Definition pmmintrin.h:243
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_hadd_ps(__m128 __a, __m128 __b)
Horizontally adds the adjacent pairs of values contained in two 128-bit vectors of [4 x float].
Definition pmmintrin.h:76
#define _mm_loaddup_pd(dp)
Moves and duplicates one double-precision value to double-precision values stored in a 128-bit vector...
Definition pmmintrin.h:227
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_addsub_pd(__m128d __a, __m128d __b)
Adds the even-indexed values and subtracts the odd-indexed values of two 128-bit vectors of [2 x doub...
Definition pmmintrin.h:161
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_hsub_pd(__m128d __a, __m128d __b)
Horizontally subtracts the pairs of values contained in two 128-bit vectors of [2 x double].
Definition pmmintrin.h:207
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_movehdup_ps(__m128 __a)
Moves and duplicates odd-indexed values from a 128-bit vector of [4 x float] to float values stored i...
Definition pmmintrin.h:121
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_moveldup_ps(__m128 __a)
Duplicates even-indexed values from a 128-bit vector of [4 x float] to float values stored in a 128-b...
Definition pmmintrin.h:142
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_addsub_ps(__m128 __a, __m128 __b)
Adds the even-indexed values and subtracts the odd-indexed values of two 128-bit vectors of [4 x floa...
Definition pmmintrin.h:53
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_hsub_ps(__m128 __a, __m128 __b)
Horizontally subtracts the adjacent pairs of values contained in two 128-bit vectors of [4 x float].
Definition pmmintrin.h:99