ETISS 0.8.0
Extendable Translating Instruction Set Simulator (version 0.8.0)
avxintrin.h
Go to the documentation of this file.
1 /*===---- avxintrin.h - AVX intrinsics -------------------------------------===
2  *
3  * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4  * See https://llvm.org/LICENSE.txt for license information.
5  * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6  *
7  *===-----------------------------------------------------------------------===
8  */
9 
10 #ifndef __IMMINTRIN_H
11 #error "Never use <avxintrin.h> directly; include <immintrin.h> instead."
12 #endif
13 
14 #ifndef __AVXINTRIN_H
15 #define __AVXINTRIN_H
16 
17 typedef double __v4df __attribute__ ((__vector_size__ (32)));
18 typedef float __v8sf __attribute__ ((__vector_size__ (32)));
19 typedef long long __v4di __attribute__ ((__vector_size__ (32)));
20 typedef int __v8si __attribute__ ((__vector_size__ (32)));
21 typedef short __v16hi __attribute__ ((__vector_size__ (32)));
22 typedef char __v32qi __attribute__ ((__vector_size__ (32)));
23 
24 /* Unsigned types */
25 typedef unsigned long long __v4du __attribute__ ((__vector_size__ (32)));
26 typedef unsigned int __v8su __attribute__ ((__vector_size__ (32)));
27 typedef unsigned short __v16hu __attribute__ ((__vector_size__ (32)));
28 typedef unsigned char __v32qu __attribute__ ((__vector_size__ (32)));
29 
30 /* We need an explicitly signed variant for char. Note that this shouldn't
31  * appear in the interface though. */
32 typedef signed char __v32qs __attribute__((__vector_size__(32)));
33 
34 typedef float __m256 __attribute__ ((__vector_size__ (32), __aligned__(32)));
35 typedef double __m256d __attribute__((__vector_size__(32), __aligned__(32)));
36 typedef long long __m256i __attribute__((__vector_size__(32), __aligned__(32)));
37 
38 typedef float __m256_u __attribute__ ((__vector_size__ (32), __aligned__(1)));
39 typedef double __m256d_u __attribute__((__vector_size__(32), __aligned__(1)));
40 typedef long long __m256i_u __attribute__((__vector_size__(32), __aligned__(1)));
41 
42 /* Define the default attributes for the functions in this file. */
43 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(256)))
44 #define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(128)))
45 
46 /* Arithmetic */
59 static __inline __m256d __DEFAULT_FN_ATTRS
60 _mm256_add_pd(__m256d __a, __m256d __b)
61 {
62  return (__m256d)((__v4df)__a+(__v4df)__b);
63 }
64 
77 static __inline __m256 __DEFAULT_FN_ATTRS
78 _mm256_add_ps(__m256 __a, __m256 __b)
79 {
80  return (__m256)((__v8sf)__a+(__v8sf)__b);
81 }
82 
95 static __inline __m256d __DEFAULT_FN_ATTRS
96 _mm256_sub_pd(__m256d __a, __m256d __b)
97 {
98  return (__m256d)((__v4df)__a-(__v4df)__b);
99 }
100 
113 static __inline __m256 __DEFAULT_FN_ATTRS
114 _mm256_sub_ps(__m256 __a, __m256 __b)
115 {
116  return (__m256)((__v8sf)__a-(__v8sf)__b);
117 }
118 
132 static __inline __m256d __DEFAULT_FN_ATTRS
133 _mm256_addsub_pd(__m256d __a, __m256d __b)
134 {
135  return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b);
136 }
137 
151 static __inline __m256 __DEFAULT_FN_ATTRS
152 _mm256_addsub_ps(__m256 __a, __m256 __b)
153 {
154  return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b);
155 }
156 
169 static __inline __m256d __DEFAULT_FN_ATTRS
170 _mm256_div_pd(__m256d __a, __m256d __b)
171 {
172  return (__m256d)((__v4df)__a/(__v4df)__b);
173 }
174 
187 static __inline __m256 __DEFAULT_FN_ATTRS
188 _mm256_div_ps(__m256 __a, __m256 __b)
189 {
190  return (__m256)((__v8sf)__a/(__v8sf)__b);
191 }
192 
206 static __inline __m256d __DEFAULT_FN_ATTRS
207 _mm256_max_pd(__m256d __a, __m256d __b)
208 {
209  return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b);
210 }
211 
225 static __inline __m256 __DEFAULT_FN_ATTRS
226 _mm256_max_ps(__m256 __a, __m256 __b)
227 {
228  return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b);
229 }
230 
244 static __inline __m256d __DEFAULT_FN_ATTRS
245 _mm256_min_pd(__m256d __a, __m256d __b)
246 {
247  return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b);
248 }
249 
263 static __inline __m256 __DEFAULT_FN_ATTRS
264 _mm256_min_ps(__m256 __a, __m256 __b)
265 {
266  return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b);
267 }
268 
281 static __inline __m256d __DEFAULT_FN_ATTRS
282 _mm256_mul_pd(__m256d __a, __m256d __b)
283 {
284  return (__m256d)((__v4df)__a * (__v4df)__b);
285 }
286 
299 static __inline __m256 __DEFAULT_FN_ATTRS
300 _mm256_mul_ps(__m256 __a, __m256 __b)
301 {
302  return (__m256)((__v8sf)__a * (__v8sf)__b);
303 }
304 
316 static __inline __m256d __DEFAULT_FN_ATTRS
318 {
319  return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a);
320 }
321 
333 static __inline __m256 __DEFAULT_FN_ATTRS
335 {
336  return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a);
337 }
338 
350 static __inline __m256 __DEFAULT_FN_ATTRS
352 {
353  return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a);
354 }
355 
367 static __inline __m256 __DEFAULT_FN_ATTRS
369 {
370  return (__m256)__builtin_ia32_rcpps256((__v8sf)__a);
371 }
372 
402 #define _mm256_round_pd(V, M) \
403  (__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M))
404 
434 #define _mm256_round_ps(V, M) \
435  (__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M))
436 
452 #define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL)
453 
470 #define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
471 
487 #define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL)
488 
504 #define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)
505 
506 /* Logical */
519 static __inline __m256d __DEFAULT_FN_ATTRS
520 _mm256_and_pd(__m256d __a, __m256d __b)
521 {
522  return (__m256d)((__v4du)__a & (__v4du)__b);
523 }
524 
537 static __inline __m256 __DEFAULT_FN_ATTRS
538 _mm256_and_ps(__m256 __a, __m256 __b)
539 {
540  return (__m256)((__v8su)__a & (__v8su)__b);
541 }
542 
558 static __inline __m256d __DEFAULT_FN_ATTRS
559 _mm256_andnot_pd(__m256d __a, __m256d __b)
560 {
561  return (__m256d)(~(__v4du)__a & (__v4du)__b);
562 }
563 
579 static __inline __m256 __DEFAULT_FN_ATTRS
580 _mm256_andnot_ps(__m256 __a, __m256 __b)
581 {
582  return (__m256)(~(__v8su)__a & (__v8su)__b);
583 }
584 
597 static __inline __m256d __DEFAULT_FN_ATTRS
598 _mm256_or_pd(__m256d __a, __m256d __b)
599 {
600  return (__m256d)((__v4du)__a | (__v4du)__b);
601 }
602 
615 static __inline __m256 __DEFAULT_FN_ATTRS
616 _mm256_or_ps(__m256 __a, __m256 __b)
617 {
618  return (__m256)((__v8su)__a | (__v8su)__b);
619 }
620 
633 static __inline __m256d __DEFAULT_FN_ATTRS
634 _mm256_xor_pd(__m256d __a, __m256d __b)
635 {
636  return (__m256d)((__v4du)__a ^ (__v4du)__b);
637 }
638 
651 static __inline __m256 __DEFAULT_FN_ATTRS
652 _mm256_xor_ps(__m256 __a, __m256 __b)
653 {
654  return (__m256)((__v8su)__a ^ (__v8su)__b);
655 }
656 
657 /* Horizontal arithmetic */
675 static __inline __m256d __DEFAULT_FN_ATTRS
676 _mm256_hadd_pd(__m256d __a, __m256d __b)
677 {
678  return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b);
679 }
680 
698 static __inline __m256 __DEFAULT_FN_ATTRS
699 _mm256_hadd_ps(__m256 __a, __m256 __b)
700 {
701  return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b);
702 }
703 
721 static __inline __m256d __DEFAULT_FN_ATTRS
722 _mm256_hsub_pd(__m256d __a, __m256d __b)
723 {
724  return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b);
725 }
726 
744 static __inline __m256 __DEFAULT_FN_ATTRS
745 _mm256_hsub_ps(__m256 __a, __m256 __b)
746 {
747  return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b);
748 }
749 
750 /* Vector permutations */
774 static __inline __m128d __DEFAULT_FN_ATTRS128
775 _mm_permutevar_pd(__m128d __a, __m128i __c)
776 {
777  return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c);
778 }
779 
813 static __inline __m256d __DEFAULT_FN_ATTRS
814 _mm256_permutevar_pd(__m256d __a, __m256i __c)
815 {
816  return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c);
817 }
818 
867 static __inline __m128 __DEFAULT_FN_ATTRS128
868 _mm_permutevar_ps(__m128 __a, __m128i __c)
869 {
870  return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c);
871 }
872 
958 static __inline __m256 __DEFAULT_FN_ATTRS
959 _mm256_permutevar_ps(__m256 __a, __m256i __c)
960 {
961  return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c);
962 }
963 
991 #define _mm_permute_pd(A, C) \
992  (__m128d)__builtin_ia32_vpermilpd((__v2df)(__m128d)(A), (int)(C))
993 
1031 #define _mm256_permute_pd(A, C) \
1032  (__m256d)__builtin_ia32_vpermilpd256((__v4df)(__m256d)(A), (int)(C))
1033 
1087 #define _mm_permute_ps(A, C) \
1088  (__m128)__builtin_ia32_vpermilps((__v4sf)(__m128)(A), (int)(C))
1089 
1179 #define _mm256_permute_ps(A, C) \
1180  (__m256)__builtin_ia32_vpermilps256((__v8sf)(__m256)(A), (int)(C))
1181 
1219 #define _mm256_permute2f128_pd(V1, V2, M) \
1220  (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
1221  (__v4df)(__m256d)(V2), (int)(M))
1222 
1260 #define _mm256_permute2f128_ps(V1, V2, M) \
1261  (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \
1262  (__v8sf)(__m256)(V2), (int)(M))
1263 
1300 #define _mm256_permute2f128_si256(V1, V2, M) \
1301  (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \
1302  (__v8si)(__m256i)(V2), (int)(M))
1303 
1304 /* Vector Blend */
1329 #define _mm256_blend_pd(V1, V2, M) \
1330  (__m256d)__builtin_ia32_blendpd256((__v4df)(__m256d)(V1), \
1331  (__v4df)(__m256d)(V2), (int)(M))
1332 
1357 #define _mm256_blend_ps(V1, V2, M) \
1358  (__m256)__builtin_ia32_blendps256((__v8sf)(__m256)(V1), \
1359  (__v8sf)(__m256)(V2), (int)(M))
1360 
1382 static __inline __m256d __DEFAULT_FN_ATTRS
1383 _mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)
1384 {
1385  return (__m256d)__builtin_ia32_blendvpd256(
1386  (__v4df)__a, (__v4df)__b, (__v4df)__c);
1387 }
1388 
1410 static __inline __m256 __DEFAULT_FN_ATTRS
1411 _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
1412 {
1413  return (__m256)__builtin_ia32_blendvps256(
1414  (__v8sf)__a, (__v8sf)__b, (__v8sf)__c);
1415 }
1416 
1417 /* Vector Dot Product */
1455 #define _mm256_dp_ps(V1, V2, M) \
1456  (__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \
1457  (__v8sf)(__m256)(V2), (M))
1458 
1459 /* Vector shuffle */
1509 #define _mm256_shuffle_ps(a, b, mask) \
1510  (__m256)__builtin_ia32_shufps256((__v8sf)(__m256)(a), \
1511  (__v8sf)(__m256)(b), (int)(mask))
1512 
1555 #define _mm256_shuffle_pd(a, b, mask) \
1556  (__m256d)__builtin_ia32_shufpd256((__v4df)(__m256d)(a), \
1557  (__v4df)(__m256d)(b), (int)(mask))
1558 
1559 /* Compare */
1560 #define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
1561 #define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */
1562 #define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */
1563 #define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */
1564 #define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */
1565 #define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */
1566 #define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */
1567 #define _CMP_ORD_Q 0x07 /* Ordered (non-signaling) */
1568 #define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */
1569 #define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unordered, signaling) */
1570 #define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */
1571 #define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling) */
1572 #define _CMP_NEQ_OQ 0x0c /* Not-equal (ordered, non-signaling) */
1573 #define _CMP_GE_OS 0x0d /* Greater-than-or-equal (ordered, signaling) */
1574 #define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */
1575 #define _CMP_TRUE_UQ 0x0f /* True (unordered, non-signaling) */
1576 #define _CMP_EQ_OS 0x10 /* Equal (ordered, signaling) */
1577 #define _CMP_LT_OQ 0x11 /* Less-than (ordered, non-signaling) */
1578 #define _CMP_LE_OQ 0x12 /* Less-than-or-equal (ordered, non-signaling) */
1579 #define _CMP_UNORD_S 0x13 /* Unordered (signaling) */
1580 #define _CMP_NEQ_US 0x14 /* Not-equal (unordered, signaling) */
1581 #define _CMP_NLT_UQ 0x15 /* Not-less-than (unordered, non-signaling) */
1582 #define _CMP_NLE_UQ 0x16 /* Not-less-than-or-equal (unordered, non-signaling) */
1583 #define _CMP_ORD_S 0x17 /* Ordered (signaling) */
1584 #define _CMP_EQ_US 0x18 /* Equal (unordered, signaling) */
1585 #define _CMP_NGE_UQ 0x19 /* Not-greater-than-or-equal (unordered, non-signaling) */
1586 #define _CMP_NGT_UQ 0x1a /* Not-greater-than (unordered, non-signaling) */
1587 #define _CMP_FALSE_OS 0x1b /* False (ordered, signaling) */
1588 #define _CMP_NEQ_OS 0x1c /* Not-equal (ordered, signaling) */
1589 #define _CMP_GE_OQ 0x1d /* Greater-than-or-equal (ordered, non-signaling) */
1590 #define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */
1591 #define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */
1592 
1649 #define _mm_cmp_pd(a, b, c) \
1650  (__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \
1651  (__v2df)(__m128d)(b), (c))
1652 
1709 #define _mm_cmp_ps(a, b, c) \
1710  (__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
1711  (__v4sf)(__m128)(b), (c))
1712 
1769 #define _mm256_cmp_pd(a, b, c) \
1770  (__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \
1771  (__v4df)(__m256d)(b), (c))
1772 
1829 #define _mm256_cmp_ps(a, b, c) \
1830  (__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
1831  (__v8sf)(__m256)(b), (c))
1832 
1888 #define _mm_cmp_sd(a, b, c) \
1889  (__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \
1890  (__v2df)(__m128d)(b), (c))
1891 
1947 #define _mm_cmp_ss(a, b, c) \
1948  (__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
1949  (__v4sf)(__m128)(b), (c))
1950 
1966 #define _mm256_extract_epi32(X, N) \
1967  (int)__builtin_ia32_vec_ext_v8si((__v8si)(__m256i)(X), (int)(N))
1968 
1984 #define _mm256_extract_epi16(X, N) \
1985  (int)(unsigned short)__builtin_ia32_vec_ext_v16hi((__v16hi)(__m256i)(X), \
1986  (int)(N))
1987 
2003 #define _mm256_extract_epi8(X, N) \
2004  (int)(unsigned char)__builtin_ia32_vec_ext_v32qi((__v32qi)(__m256i)(X), \
2005  (int)(N))
2006 
2007 #ifdef __x86_64__
2023 #define _mm256_extract_epi64(X, N) \
2024  (long long)__builtin_ia32_vec_ext_v4di((__v4di)(__m256i)(X), (int)(N))
2025 #endif
2026 
2045 #define _mm256_insert_epi32(X, I, N) \
2046  (__m256i)__builtin_ia32_vec_set_v8si((__v8si)(__m256i)(X), \
2047  (int)(I), (int)(N))
2048 
2049 
2068 #define _mm256_insert_epi16(X, I, N) \
2069  (__m256i)__builtin_ia32_vec_set_v16hi((__v16hi)(__m256i)(X), \
2070  (int)(I), (int)(N))
2071 
2090 #define _mm256_insert_epi8(X, I, N) \
2091  (__m256i)__builtin_ia32_vec_set_v32qi((__v32qi)(__m256i)(X), \
2092  (int)(I), (int)(N))
2093 
2094 #ifdef __x86_64__
2113 #define _mm256_insert_epi64(X, I, N) \
2114  (__m256i)__builtin_ia32_vec_set_v4di((__v4di)(__m256i)(X), \
2115  (long long)(I), (int)(N))
2116 #endif
2117 
2118 /* Conversion */
2128 static __inline __m256d __DEFAULT_FN_ATTRS
2130 {
2131  return (__m256d)__builtin_convertvector((__v4si)__a, __v4df);
2132 }
2133 
2143 static __inline __m256 __DEFAULT_FN_ATTRS
2145 {
2146  return (__m256)__builtin_convertvector((__v8si)__a, __v8sf);
2147 }
2148 
2159 static __inline __m128 __DEFAULT_FN_ATTRS
2161 {
2162  return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a);
2163 }
2164 
2174 static __inline __m256i __DEFAULT_FN_ATTRS
2176 {
2177  return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a);
2178 }
2179 
2190 static __inline __m256d __DEFAULT_FN_ATTRS
2192 {
2193  return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df);
2194 }
2195 
2207 static __inline __m128i __DEFAULT_FN_ATTRS
2209 {
2210  return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a);
2211 }
2212 
2224 static __inline __m128i __DEFAULT_FN_ATTRS
2226 {
2227  return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a);
2228 }
2229 
2240 static __inline __m256i __DEFAULT_FN_ATTRS
2242 {
2243  return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a);
2244 }
2245 
2256 static __inline double __DEFAULT_FN_ATTRS
2258 {
2259  return __a[0];
2260 }
2261 
2272 static __inline int __DEFAULT_FN_ATTRS
2274 {
2275  __v8si __b = (__v8si)__a;
2276  return __b[0];
2277 }
2278 
2289 static __inline float __DEFAULT_FN_ATTRS
2291 {
2292  return __a[0];
2293 }
2294 
2295 /* Vector replicate */
2315 static __inline __m256 __DEFAULT_FN_ATTRS
2317 {
2318  return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 1, 1, 3, 3, 5, 5, 7, 7);
2319 }
2320 
2340 static __inline __m256 __DEFAULT_FN_ATTRS
2342 {
2343  return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 0, 2, 2, 4, 4, 6, 6);
2344 }
2345 
2362 static __inline __m256d __DEFAULT_FN_ATTRS
2364 {
2365  return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 0, 2, 2);
2366 }
2367 
2368 /* Unpack and Interleave */
2385 static __inline __m256d __DEFAULT_FN_ATTRS
2386 _mm256_unpackhi_pd(__m256d __a, __m256d __b)
2387 {
2388  return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 1, 5, 1+2, 5+2);
2389 }
2390 
2407 static __inline __m256d __DEFAULT_FN_ATTRS
2408 _mm256_unpacklo_pd(__m256d __a, __m256d __b)
2409 {
2410  return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 0, 4, 0+2, 4+2);
2411 }
2412 
2434 static __inline __m256 __DEFAULT_FN_ATTRS
2435 _mm256_unpackhi_ps(__m256 __a, __m256 __b)
2436 {
2437  return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
2438 }
2439 
2461 static __inline __m256 __DEFAULT_FN_ATTRS
2462 _mm256_unpacklo_ps(__m256 __a, __m256 __b)
2463 {
2464  return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
2465 }
2466 
2467 /* Bit Test */
2491 static __inline int __DEFAULT_FN_ATTRS128
2492 _mm_testz_pd(__m128d __a, __m128d __b)
2493 {
2494  return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b);
2495 }
2496 
2520 static __inline int __DEFAULT_FN_ATTRS128
2521 _mm_testc_pd(__m128d __a, __m128d __b)
2522 {
2523  return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b);
2524 }
2525 
2550 static __inline int __DEFAULT_FN_ATTRS128
2551 _mm_testnzc_pd(__m128d __a, __m128d __b)
2552 {
2553  return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b);
2554 }
2555 
2579 static __inline int __DEFAULT_FN_ATTRS128
2580 _mm_testz_ps(__m128 __a, __m128 __b)
2581 {
2582  return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b);
2583 }
2584 
2608 static __inline int __DEFAULT_FN_ATTRS128
2609 _mm_testc_ps(__m128 __a, __m128 __b)
2610 {
2611  return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b);
2612 }
2613 
2638 static __inline int __DEFAULT_FN_ATTRS128
2639 _mm_testnzc_ps(__m128 __a, __m128 __b)
2640 {
2641  return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b);
2642 }
2643 
2667 static __inline int __DEFAULT_FN_ATTRS
2668 _mm256_testz_pd(__m256d __a, __m256d __b)
2669 {
2670  return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b);
2671 }
2672 
2696 static __inline int __DEFAULT_FN_ATTRS
2697 _mm256_testc_pd(__m256d __a, __m256d __b)
2698 {
2699  return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b);
2700 }
2701 
2726 static __inline int __DEFAULT_FN_ATTRS
2727 _mm256_testnzc_pd(__m256d __a, __m256d __b)
2728 {
2729  return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b);
2730 }
2731 
2755 static __inline int __DEFAULT_FN_ATTRS
2756 _mm256_testz_ps(__m256 __a, __m256 __b)
2757 {
2758  return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b);
2759 }
2760 
2784 static __inline int __DEFAULT_FN_ATTRS
2785 _mm256_testc_ps(__m256 __a, __m256 __b)
2786 {
2787  return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b);
2788 }
2789 
2814 static __inline int __DEFAULT_FN_ATTRS
2815 _mm256_testnzc_ps(__m256 __a, __m256 __b)
2816 {
2817  return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b);
2818 }
2819 
2840 static __inline int __DEFAULT_FN_ATTRS
2841 _mm256_testz_si256(__m256i __a, __m256i __b)
2842 {
2843  return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b);
2844 }
2845 
2866 static __inline int __DEFAULT_FN_ATTRS
2867 _mm256_testc_si256(__m256i __a, __m256i __b)
2868 {
2869  return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b);
2870 }
2871 
2893 static __inline int __DEFAULT_FN_ATTRS
2894 _mm256_testnzc_si256(__m256i __a, __m256i __b)
2895 {
2896  return __builtin_ia32_ptestnzc256((__v4di)__a, (__v4di)__b);
2897 }
2898 
2899 /* Vector extract sign mask */
2912 static __inline int __DEFAULT_FN_ATTRS
2914 {
2915  return __builtin_ia32_movmskpd256((__v4df)__a);
2916 }
2917 
2930 static __inline int __DEFAULT_FN_ATTRS
2932 {
2933  return __builtin_ia32_movmskps256((__v8sf)__a);
2934 }
2935 
2936 /* Vector __zero */
2942 static __inline void __attribute__((__always_inline__, __nodebug__, __target__("avx")))
2943 _mm256_zeroall(void)
2944 {
2945  __builtin_ia32_vzeroall();
2946 }
2947 
2953 static __inline void __attribute__((__always_inline__, __nodebug__, __target__("avx")))
2954 _mm256_zeroupper(void)
2955 {
2956  __builtin_ia32_vzeroupper();
2957 }
2958 
2959 /* Vector load with broadcast */
2972 static __inline __m128 __DEFAULT_FN_ATTRS128
2973 _mm_broadcast_ss(float const *__a)
2974 {
2975  float __f = *__a;
2976  return __extension__ (__m128)(__v4sf){ __f, __f, __f, __f };
2977 }
2978 
2991 static __inline __m256d __DEFAULT_FN_ATTRS
2992 _mm256_broadcast_sd(double const *__a)
2993 {
2994  double __d = *__a;
2995  return __extension__ (__m256d)(__v4df){ __d, __d, __d, __d };
2996 }
2997 
3010 static __inline __m256 __DEFAULT_FN_ATTRS
3012 {
3013  float __f = *__a;
3014  return __extension__ (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
3015 }
3016 
3029 static __inline __m256d __DEFAULT_FN_ATTRS
3030 _mm256_broadcast_pd(__m128d const *__a)
3031 {
3032  __m128d __b = _mm_loadu_pd((const double *)__a);
3033  return (__m256d)__builtin_shufflevector((__v2df)__b, (__v2df)__b,
3034  0, 1, 0, 1);
3035 }
3036 
3049 static __inline __m256 __DEFAULT_FN_ATTRS
3050 _mm256_broadcast_ps(__m128 const *__a)
3051 {
3052  __m128 __b = _mm_loadu_ps((const float *)__a);
3053  return (__m256)__builtin_shufflevector((__v4sf)__b, (__v4sf)__b,
3054  0, 1, 2, 3, 0, 1, 2, 3);
3055 }
3056 
3057 /* SIMD load ops */
3069 static __inline __m256d __DEFAULT_FN_ATTRS
3070 _mm256_load_pd(double const *__p)
3071 {
3072  return *(const __m256d *)__p;
3073 }
3074 
3085 static __inline __m256 __DEFAULT_FN_ATTRS
3086 _mm256_load_ps(float const *__p)
3087 {
3088  return *(const __m256 *)__p;
3089 }
3090 
3102 static __inline __m256d __DEFAULT_FN_ATTRS
3103 _mm256_loadu_pd(double const *__p)
3104 {
3105  struct __loadu_pd {
3106  __m256d_u __v;
3107  } __attribute__((__packed__, __may_alias__));
3108  return ((const struct __loadu_pd*)__p)->__v;
3109 }
3110 
3122 static __inline __m256 __DEFAULT_FN_ATTRS
3123 _mm256_loadu_ps(float const *__p)
3124 {
3125  struct __loadu_ps {
3126  __m256_u __v;
3127  } __attribute__((__packed__, __may_alias__));
3128  return ((const struct __loadu_ps*)__p)->__v;
3129 }
3130 
3142 static __inline __m256i __DEFAULT_FN_ATTRS
3143 _mm256_load_si256(__m256i const *__p)
3144 {
3145  return *__p;
3146 }
3147 
3158 static __inline __m256i __DEFAULT_FN_ATTRS
3159 _mm256_loadu_si256(__m256i_u const *__p)
3160 {
3161  struct __loadu_si256 {
3162  __m256i_u __v;
3163  } __attribute__((__packed__, __may_alias__));
3164  return ((const struct __loadu_si256*)__p)->__v;
3165 }
3166 
3179 static __inline __m256i __DEFAULT_FN_ATTRS
3180 _mm256_lddqu_si256(__m256i const *__p)
3181 {
3182  return (__m256i)__builtin_ia32_lddqu256((char const *)__p);
3183 }
3184 
3185 /* SIMD store ops */
3199 static __inline void __DEFAULT_FN_ATTRS
3200 _mm256_store_pd(double *__p, __m256d __a)
3201 {
3202  *(__m256d *)__p = __a;
3203 }
3204 
3217 static __inline void __DEFAULT_FN_ATTRS
3218 _mm256_store_ps(float *__p, __m256 __a)
3219 {
3220  *(__m256 *)__p = __a;
3221 }
3222 
3235 static __inline void __DEFAULT_FN_ATTRS
3236 _mm256_storeu_pd(double *__p, __m256d __a)
3237 {
3238  struct __storeu_pd {
3239  __m256d_u __v;
3240  } __attribute__((__packed__, __may_alias__));
3241  ((struct __storeu_pd*)__p)->__v = __a;
3242 }
3243 
3255 static __inline void __DEFAULT_FN_ATTRS
3256 _mm256_storeu_ps(float *__p, __m256 __a)
3257 {
3258  struct __storeu_ps {
3259  __m256_u __v;
3260  } __attribute__((__packed__, __may_alias__));
3261  ((struct __storeu_ps*)__p)->__v = __a;
3262 }
3263 
3276 static __inline void __DEFAULT_FN_ATTRS
3277 _mm256_store_si256(__m256i *__p, __m256i __a)
3278 {
3279  *__p = __a;
3280 }
3281 
3293 static __inline void __DEFAULT_FN_ATTRS
3294 _mm256_storeu_si256(__m256i_u *__p, __m256i __a)
3295 {
3296  struct __storeu_si256 {
3297  __m256i_u __v;
3298  } __attribute__((__packed__, __may_alias__));
3299  ((struct __storeu_si256*)__p)->__v = __a;
3300 }
3301 
3302 /* Conditional load ops */
3321 static __inline __m128d __DEFAULT_FN_ATTRS128
3322 _mm_maskload_pd(double const *__p, __m128i __m)
3323 {
3324  return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2di)__m);
3325 }
3326 
3345 static __inline __m256d __DEFAULT_FN_ATTRS
3346 _mm256_maskload_pd(double const *__p, __m256i __m)
3347 {
3348  return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p,
3349  (__v4di)__m);
3350 }
3351 
3370 static __inline __m128 __DEFAULT_FN_ATTRS128
3371 _mm_maskload_ps(float const *__p, __m128i __m)
3372 {
3373  return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4si)__m);
3374 }
3375 
3394 static __inline __m256 __DEFAULT_FN_ATTRS
3395 _mm256_maskload_ps(float const *__p, __m256i __m)
3396 {
3397  return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8si)__m);
3398 }
3399 
3400 /* Conditional store ops */
3419 static __inline void __DEFAULT_FN_ATTRS
3420 _mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a)
3421 {
3422  __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8si)__m, (__v8sf)__a);
3423 }
3424 
3443 static __inline void __DEFAULT_FN_ATTRS128
3444 _mm_maskstore_pd(double *__p, __m128i __m, __m128d __a)
3445 {
3446  __builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a);
3447 }
3448 
3467 static __inline void __DEFAULT_FN_ATTRS
3468 _mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a)
3469 {
3470  __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4di)__m, (__v4df)__a);
3471 }
3472 
3491 static __inline void __DEFAULT_FN_ATTRS128
3492 _mm_maskstore_ps(float *__p, __m128i __m, __m128 __a)
3493 {
3494  __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4si)__m, (__v4sf)__a);
3495 }
3496 
3497 /* Cacheability support ops */
3511 static __inline void __DEFAULT_FN_ATTRS
3512 _mm256_stream_si256(__m256i *__a, __m256i __b)
3513 {
3514  typedef __v4di __v4di_aligned __attribute__((aligned(32)));
3515  __builtin_nontemporal_store((__v4di_aligned)__b, (__v4di_aligned*)__a);
3516 }
3517 
3531 static __inline void __DEFAULT_FN_ATTRS
3532 _mm256_stream_pd(double *__a, __m256d __b)
3533 {
3534  typedef __v4df __v4df_aligned __attribute__((aligned(32)));
3535  __builtin_nontemporal_store((__v4df_aligned)__b, (__v4df_aligned*)__a);
3536 }
3537 
3552 static __inline void __DEFAULT_FN_ATTRS
3553 _mm256_stream_ps(float *__p, __m256 __a)
3554 {
3555  typedef __v8sf __v8sf_aligned __attribute__((aligned(32)));
3556  __builtin_nontemporal_store((__v8sf_aligned)__a, (__v8sf_aligned*)__p);
3557 }
3558 
3559 /* Create vectors */
3567 static __inline__ __m256d __DEFAULT_FN_ATTRS
3569 {
3570  return (__m256d)__builtin_ia32_undef256();
3571 }
3572 
3580 static __inline__ __m256 __DEFAULT_FN_ATTRS
3582 {
3583  return (__m256)__builtin_ia32_undef256();
3584 }
3585 
3593 static __inline__ __m256i __DEFAULT_FN_ATTRS
3595 {
3596  return (__m256i)__builtin_ia32_undef256();
3597 }
3598 
3620 static __inline __m256d __DEFAULT_FN_ATTRS
3621 _mm256_set_pd(double __a, double __b, double __c, double __d)
3622 {
3623  return __extension__ (__m256d){ __d, __c, __b, __a };
3624 }
3625 
3659 static __inline __m256 __DEFAULT_FN_ATTRS
3660 _mm256_set_ps(float __a, float __b, float __c, float __d,
3661  float __e, float __f, float __g, float __h)
3662 {
3663  return __extension__ (__m256){ __h, __g, __f, __e, __d, __c, __b, __a };
3664 }
3665 
3691 static __inline __m256i __DEFAULT_FN_ATTRS
3692 _mm256_set_epi32(int __i0, int __i1, int __i2, int __i3,
3693  int __i4, int __i5, int __i6, int __i7)
3694 {
3695  return __extension__ (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 };
3696 }
3697 
3739 static __inline __m256i __DEFAULT_FN_ATTRS
3740 _mm256_set_epi16(short __w15, short __w14, short __w13, short __w12,
3741  short __w11, short __w10, short __w09, short __w08,
3742  short __w07, short __w06, short __w05, short __w04,
3743  short __w03, short __w02, short __w01, short __w00)
3744 {
3745  return __extension__ (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06,
3746  __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 };
3747 }
3748 
3822 static __inline __m256i __DEFAULT_FN_ATTRS
3823 _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28,
3824  char __b27, char __b26, char __b25, char __b24,
3825  char __b23, char __b22, char __b21, char __b20,
3826  char __b19, char __b18, char __b17, char __b16,
3827  char __b15, char __b14, char __b13, char __b12,
3828  char __b11, char __b10, char __b09, char __b08,
3829  char __b07, char __b06, char __b05, char __b04,
3830  char __b03, char __b02, char __b01, char __b00)
3831 {
3832  return __extension__ (__m256i)(__v32qi){
3833  __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,
3834  __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,
3835  __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,
3836  __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31
3837  };
3838 }
3839 
3857 static __inline __m256i __DEFAULT_FN_ATTRS
3858 _mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)
3859 {
3860  return __extension__ (__m256i)(__v4di){ __d, __c, __b, __a };
3861 }
3862 
3863 /* Create vectors with elements in reverse order */
3886 static __inline __m256d __DEFAULT_FN_ATTRS
3887 _mm256_setr_pd(double __a, double __b, double __c, double __d)
3888 {
3889  return _mm256_set_pd(__d, __c, __b, __a);
3890 }
3891 
3926 static __inline __m256 __DEFAULT_FN_ATTRS
3927 _mm256_setr_ps(float __a, float __b, float __c, float __d,
3928  float __e, float __f, float __g, float __h)
3929 {
3930  return _mm256_set_ps(__h, __g, __f, __e, __d, __c, __b, __a);
3931 }
3932 
3958 static __inline __m256i __DEFAULT_FN_ATTRS
3959 _mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3,
3960  int __i4, int __i5, int __i6, int __i7)
3961 {
3962  return _mm256_set_epi32(__i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0);
3963 }
3964 
4006 static __inline __m256i __DEFAULT_FN_ATTRS
4007 _mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12,
4008  short __w11, short __w10, short __w09, short __w08,
4009  short __w07, short __w06, short __w05, short __w04,
4010  short __w03, short __w02, short __w01, short __w00)
4011 {
4012  return _mm256_set_epi16(__w00, __w01, __w02, __w03,
4013  __w04, __w05, __w06, __w07,
4014  __w08, __w09, __w10, __w11,
4015  __w12, __w13, __w14, __w15);
4016 }
4017 
4091 static __inline __m256i __DEFAULT_FN_ATTRS
4092 _mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28,
4093  char __b27, char __b26, char __b25, char __b24,
4094  char __b23, char __b22, char __b21, char __b20,
4095  char __b19, char __b18, char __b17, char __b16,
4096  char __b15, char __b14, char __b13, char __b12,
4097  char __b11, char __b10, char __b09, char __b08,
4098  char __b07, char __b06, char __b05, char __b04,
4099  char __b03, char __b02, char __b01, char __b00)
4100 {
4101  return _mm256_set_epi8(__b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,
4102  __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,
4103  __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,
4104  __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31);
4105 }
4106 
4124 static __inline __m256i __DEFAULT_FN_ATTRS
4125 _mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)
4126 {
4127  return _mm256_set_epi64x(__d, __c, __b, __a);
4128 }
4129 
4130 /* Create vectors with repeated elements */
4143 static __inline __m256d __DEFAULT_FN_ATTRS
4144 _mm256_set1_pd(double __w)
4145 {
4146  return _mm256_set_pd(__w, __w, __w, __w);
4147 }
4148 
4162 static __inline __m256 __DEFAULT_FN_ATTRS
4163 _mm256_set1_ps(float __w)
4164 {
4165  return _mm256_set_ps(__w, __w, __w, __w, __w, __w, __w, __w);
4166 }
4167 
4181 static __inline __m256i __DEFAULT_FN_ATTRS
4183 {
4184  return _mm256_set_epi32(__i, __i, __i, __i, __i, __i, __i, __i);
4185 }
4186 
4199 static __inline __m256i __DEFAULT_FN_ATTRS
4201 {
4202  return _mm256_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w,
4203  __w, __w, __w, __w, __w, __w, __w, __w);
4204 }
4205 
4217 static __inline __m256i __DEFAULT_FN_ATTRS
4219 {
4220  return _mm256_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b,
4221  __b, __b, __b, __b, __b, __b, __b, __b,
4222  __b, __b, __b, __b, __b, __b, __b, __b,
4223  __b, __b, __b, __b, __b, __b, __b, __b);
4224 }
4225 
4238 static __inline __m256i __DEFAULT_FN_ATTRS
4239 _mm256_set1_epi64x(long long __q)
4240 {
4241  return _mm256_set_epi64x(__q, __q, __q, __q);
4242 }
4243 
4244 /* Create __zeroed vectors */
4253 static __inline __m256d __DEFAULT_FN_ATTRS
4255 {
4256  return __extension__ (__m256d){ 0, 0, 0, 0 };
4257 }
4258 
4267 static __inline __m256 __DEFAULT_FN_ATTRS
4269 {
4270  return __extension__ (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 };
4271 }
4272 
4280 static __inline __m256i __DEFAULT_FN_ATTRS
4282 {
4283  return __extension__ (__m256i)(__v4di){ 0, 0, 0, 0 };
4284 }
4285 
4286 /* Cast between vector types */
4298 static __inline __m256 __DEFAULT_FN_ATTRS
4300 {
4301  return (__m256)__a;
4302 }
4303 
4315 static __inline __m256i __DEFAULT_FN_ATTRS
4317 {
4318  return (__m256i)__a;
4319 }
4320 
4332 static __inline __m256d __DEFAULT_FN_ATTRS
4334 {
4335  return (__m256d)__a;
4336 }
4337 
4349 static __inline __m256i __DEFAULT_FN_ATTRS
4351 {
4352  return (__m256i)__a;
4353 }
4354 
4366 static __inline __m256 __DEFAULT_FN_ATTRS
4368 {
4369  return (__m256)__a;
4370 }
4371 
4383 static __inline __m256d __DEFAULT_FN_ATTRS
4385 {
4386  return (__m256d)__a;
4387 }
4388 
4400 static __inline __m128d __DEFAULT_FN_ATTRS
4402 {
4403  return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 1);
4404 }
4405 
4417 static __inline __m128 __DEFAULT_FN_ATTRS
4419 {
4420  return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 1, 2, 3);
4421 }
4422 
4433 static __inline __m128i __DEFAULT_FN_ATTRS
4435 {
4436  return __builtin_shufflevector((__v4di)__a, (__v4di)__a, 0, 1);
4437 }
4438 
4454 static __inline __m256d __DEFAULT_FN_ATTRS
4456 {
4457  return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 1, -1, -1);
4458 }
4459 
4475 static __inline __m256 __DEFAULT_FN_ATTRS
4477 {
4478  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1, 2, 3, -1, -1, -1, -1);
4479 }
4480 
4494 static __inline __m256i __DEFAULT_FN_ATTRS
4496 {
4497  return __builtin_shufflevector((__v2di)__a, (__v2di)__a, 0, 1, -1, -1);
4498 }
4499 
4513 static __inline __m256d __DEFAULT_FN_ATTRS
4515 {
4516  return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3);
4517 }
4518 
4531 static __inline __m256 __DEFAULT_FN_ATTRS
4533 {
4534  return __builtin_shufflevector((__v4sf)__a, (__v4sf)_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7);
4535 }
4536 
4549 static __inline __m256i __DEFAULT_FN_ATTRS
4551 {
4552  return __builtin_shufflevector((__v2di)__a, (__v2di)_mm_setzero_si128(), 0, 1, 2, 3);
4553 }
4554 
4555 /*
4556  Vector insert.
4557  We use macros rather than inlines because we only want to accept
4558  invocations where the immediate M is a constant expression.
4559 */
4594 #define _mm256_insertf128_ps(V1, V2, M) \
4595  (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)(__m256)(V1), \
4596  (__v4sf)(__m128)(V2), (int)(M))
4597 
4632 #define _mm256_insertf128_pd(V1, V2, M) \
4633  (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)(__m256d)(V1), \
4634  (__v2df)(__m128d)(V2), (int)(M))
4635 
4670 #define _mm256_insertf128_si256(V1, V2, M) \
4671  (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)(__m256i)(V1), \
4672  (__v4si)(__m128i)(V2), (int)(M))
4673 
4674 /*
4675  Vector extract.
4676  We use macros rather than inlines because we only want to accept
4677  invocations where the immediate M is a constant expression.
4678 */
4700 #define _mm256_extractf128_ps(V, M) \
4701  (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)(__m256)(V), (int)(M))
4702 
4724 #define _mm256_extractf128_pd(V, M) \
4725  (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)(__m256d)(V), (int)(M))
4726 
4748 #define _mm256_extractf128_si256(V, M) \
4749  (__m128i)__builtin_ia32_vextractf128_si256((__v8si)(__m256i)(V), (int)(M))
4750 
4751 /* SIMD load ops (unaligned) */
4773 static __inline __m256 __DEFAULT_FN_ATTRS
4774 _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
4775 {
4776  __m256 __v256 = _mm256_castps128_ps256(_mm_loadu_ps(__addr_lo));
4777  return _mm256_insertf128_ps(__v256, _mm_loadu_ps(__addr_hi), 1);
4778 }
4779 
4801 static __inline __m256d __DEFAULT_FN_ATTRS
4802 _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
4803 {
4804  __m256d __v256 = _mm256_castpd128_pd256(_mm_loadu_pd(__addr_lo));
4805  return _mm256_insertf128_pd(__v256, _mm_loadu_pd(__addr_hi), 1);
4806 }
4807 
4826 static __inline __m256i __DEFAULT_FN_ATTRS
4827 _mm256_loadu2_m128i(__m128i_u const *__addr_hi, __m128i_u const *__addr_lo)
4828 {
4829  __m256i __v256 = _mm256_castsi128_si256(_mm_loadu_si128(__addr_lo));
4830  return _mm256_insertf128_si256(__v256, _mm_loadu_si128(__addr_hi), 1);
4831 }
4832 
4833 /* SIMD store ops (unaligned) */
4852 static __inline void __DEFAULT_FN_ATTRS
4853 _mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)
4854 {
4855  __m128 __v128;
4856 
4857  __v128 = _mm256_castps256_ps128(__a);
4858  _mm_storeu_ps(__addr_lo, __v128);
4859  __v128 = _mm256_extractf128_ps(__a, 1);
4860  _mm_storeu_ps(__addr_hi, __v128);
4861 }
4862 
4881 static __inline void __DEFAULT_FN_ATTRS
4882 _mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
4883 {
4884  __m128d __v128;
4885 
4886  __v128 = _mm256_castpd256_pd128(__a);
4887  _mm_storeu_pd(__addr_lo, __v128);
4888  __v128 = _mm256_extractf128_pd(__a, 1);
4889  _mm_storeu_pd(__addr_hi, __v128);
4890 }
4891 
4910 static __inline void __DEFAULT_FN_ATTRS
4911 _mm256_storeu2_m128i(__m128i_u *__addr_hi, __m128i_u *__addr_lo, __m256i __a)
4912 {
4913  __m128i __v128;
4914 
4915  __v128 = _mm256_castsi256_si128(__a);
4916  _mm_storeu_si128(__addr_lo, __v128);
4917  __v128 = _mm256_extractf128_si256(__a, 1);
4918  _mm_storeu_si128(__addr_hi, __v128);
4919 }
4920 
4936 static __inline __m256 __DEFAULT_FN_ATTRS
4937 _mm256_set_m128 (__m128 __hi, __m128 __lo)
4938 {
4939  return (__m256) __builtin_shufflevector((__v4sf)__lo, (__v4sf)__hi, 0, 1, 2, 3, 4, 5, 6, 7);
4940 }
4941 
4957 static __inline __m256d __DEFAULT_FN_ATTRS
4958 _mm256_set_m128d (__m128d __hi, __m128d __lo)
4959 {
4960  return (__m256d) __builtin_shufflevector((__v2df)__lo, (__v2df)__hi, 0, 1, 2, 3);
4961 }
4962 
4977 static __inline __m256i __DEFAULT_FN_ATTRS
4978 _mm256_set_m128i (__m128i __hi, __m128i __lo)
4979 {
4980  return (__m256i) __builtin_shufflevector((__v2di)__lo, (__v2di)__hi, 0, 1, 2, 3);
4981 }
4982 
5000 static __inline __m256 __DEFAULT_FN_ATTRS
5001 _mm256_setr_m128 (__m128 __lo, __m128 __hi)
5002 {
5003  return _mm256_set_m128(__hi, __lo);
5004 }
5005 
5023 static __inline __m256d __DEFAULT_FN_ATTRS
5024 _mm256_setr_m128d (__m128d __lo, __m128d __hi)
5025 {
5026  return (__m256d)_mm256_set_m128d(__hi, __lo);
5027 }
5028 
5044 static __inline __m256i __DEFAULT_FN_ATTRS
5045 _mm256_setr_m128i (__m128i __lo, __m128i __hi)
5046 {
5047  return (__m256i)_mm256_set_m128i(__hi, __lo);
5048 }
5049 
5050 #undef __DEFAULT_FN_ATTRS
5051 #undef __DEFAULT_FN_ATTRS128
5052 
5053 #endif /* __AVXINTRIN_H */
static __inline unsigned char unsigned int unsigned int unsigned int * __p
Definition: adxintrin.h:24
static __inline__ vector float vector float vector float __c
Definition: altivec.h:4243
static __inline__ vector float vector float __b
Definition: altivec.h:520
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_broadcast_sd(double const *__a)
Loads a scalar double-precision floating point value from the specified address pointed to by __a and...
Definition: avxintrin.h:2992
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_set_m128(__m128 __hi, __m128 __lo)
Constructs a 256-bit floating-point vector of [8 x float] by concatenating two 128-bit floating-point...
Definition: avxintrin.h:4937
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_broadcast_pd(__m128d const *__a)
Loads the data from a 128-bit vector of [2 x double] from the specified address pointed to by __a and...
Definition: avxintrin.h:3030
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_hsub_pd(__m256d __a, __m256d __b)
Horizontally subtracts the adjacent pairs of values contained in two 256-bit vectors of [4 x double].
Definition: avxintrin.h:722
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu_pd(double *__p, __m256d __a)
Stores double-precision floating point values from a 256-bit vector of [4 x double] to an unaligned m...
Definition: avxintrin.h:3236
static __inline int __DEFAULT_FN_ATTRS _mm256_testc_si256(__m256i __a, __m256i __b)
Given two 256-bit integer vectors, perform a bit-by-bit comparison of the two source vectors.
Definition: avxintrin.h:2867
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_add_ps(__m256 __a, __m256 __b)
Adds two 256-bit vectors of [8 x float].
Definition: avxintrin.h:78
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_castsi256_ps(__m256i __a)
Casts a 256-bit integer vector into a 256-bit floating-point vector of [8 x float].
Definition: avxintrin.h:4367
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_broadcast_ps(__m128 const *__a)
Loads the data from a 128-bit vector of [4 x float] from the specified address pointed to by __a and ...
Definition: avxintrin.h:3050
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28, char __b27, char __b26, char __b25, char __b24, char __b23, char __b22, char __b21, char __b20, char __b19, char __b18, char __b17, char __b16, char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b09, char __b08, char __b07, char __b06, char __b05, char __b04, char __b03, char __b02, char __b01, char __b00)
Constructs a 256-bit integer vector, initialized in reverse order with the specified 8-bit integral v...
Definition: avxintrin.h:4092
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvtpd_epi32(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x i32].
Definition: avxintrin.h:2225
static __inline void __DEFAULT_FN_ATTRS _mm256_store_pd(double *__p, __m256d __a)
Stores double-precision floating point values from a 256-bit vector of [4 x double] to a 32-byte alig...
Definition: avxintrin.h:3200
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_zextsi128_si256(__m128i __a)
Constructs a 256-bit integer vector from a 128-bit integer vector.
Definition: avxintrin.h:4550
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu_ps(float *__p, __m256 __a)
Stores single-precision floating point values from a 256-bit vector of [8 x float] to an unaligned me...
Definition: avxintrin.h:3256
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_unpacklo_pd(__m256d __a, __m256d __b)
Unpacks the even-indexed vector elements from two 256-bit vectors of [4 x double] and interleaves the...
Definition: avxintrin.h:2408
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
Loads two 128-bit floating-point vectors of [4 x float] from unaligned memory locations and construct...
Definition: avxintrin.h:4774
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_sqrt_ps(__m256 __a)
Calculates the square roots of the values in a 256-bit vector of [8 x float].
Definition: avxintrin.h:334
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_maskload_pd(double const *__p, __m256i __m)
Conditionally loads double-precision floating point elements from a memory location pointed to by __p...
Definition: avxintrin.h:3346
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_set1_ps(float __w)
Constructs a 256-bit floating-point vector of [8 x float], with each of the eight single-precision fl...
Definition: avxintrin.h:4163
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_castps_si256(__m256 __a)
Casts a 256-bit floating-point vector of [8 x float] into a 256-bit integer vector.
Definition: avxintrin.h:4350
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_xor_pd(__m256d __a, __m256d __b)
Performs a bitwise XOR of two 256-bit vectors of [4 x double].
Definition: avxintrin.h:634
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_cvtepi32_ps(__m256i __a)
Converts a vector of [8 x i32] into a vector of [8 x float].
Definition: avxintrin.h:2144
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_rcp_ps(__m256 __a)
Calculates the reciprocals of the values in a 256-bit vector of [8 x float].
Definition: avxintrin.h:368
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_moveldup_ps(__m256 __a)
Moves and duplicates even-indexed values from a 256-bit vector of [8 x float] to float values in a 25...
Definition: avxintrin.h:2341
static __inline __m128d __DEFAULT_FN_ATTRS _mm256_castpd256_pd128(__m256d __a)
Returns the lower 128 bits of a 256-bit floating-point vector of [4 x double] as a 128-bit floating-p...
Definition: avxintrin.h:4401
static __inline int __DEFAULT_FN_ATTRS _mm256_movemask_pd(__m256d __a)
Extracts the sign bits of double-precision floating point elements in a 256-bit vector of [4 x double...
Definition: avxintrin.h:2913
static __inline __m128 __DEFAULT_FN_ATTRS _mm256_cvtpd_ps(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x float].
Definition: avxintrin.h:2160
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_setzero_ps(void)
Constructs a 256-bit floating-point vector of [8 x float] with all vector elements initialized to zer...
Definition: avxintrin.h:4268
static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_undefined_ps(void)
Create a 256-bit vector of [8 x float] with undefined values.
Definition: avxintrin.h:3581
static __inline int __DEFAULT_FN_ATTRS _mm256_testnzc_ps(__m256 __a, __m256 __b)
Given two 256-bit floating-point vectors of [8 x float], perform an element-by-element comparison of ...
Definition: avxintrin.h:2815
static __inline __m128 __DEFAULT_FN_ATTRS _mm256_castps256_ps128(__m256 __a)
Returns the lower 128 bits of a 256-bit floating-point vector of [8 x float] as a 128-bit floating-po...
Definition: avxintrin.h:4418
static __inline __m128 __DEFAULT_FN_ATTRS128 _mm_maskload_ps(float const *__p, __m128i __m)
Conditionally loads single-precision floating point elements from a memory location pointed to by __p...
Definition: avxintrin.h:3371
static __inline __m128d __DEFAULT_FN_ATTRS128 _mm_maskload_pd(double const *__p, __m128i __m)
Conditionally loads double-precision floating point elements from a memory location pointed to by __p...
Definition: avxintrin.h:3322
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi8(char __b)
Constructs a 256-bit integer vector of [32 x i8], with each of the 8-bit integral vector elements set...
Definition: avxintrin.h:4218
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_permutevar_ps(__m256 __a, __m256i __c)
Copies the values stored in a 256-bit vector of [8 x float] as specified by the 256-bit integer vecto...
Definition: avxintrin.h:959
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_cvtps_pd(__m128 __a)
Converts a 128-bit vector of [4 x float] into a 256-bit vector of [4 x double].
Definition: avxintrin.h:2191
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu_si256(__m256i_u *__p, __m256i __a)
Stores integer values from a 256-bit integer vector to an unaligned memory location pointed to by __p...
Definition: avxintrin.h:3294
#define _mm256_extractf128_ps(V, M)
Extracts either the upper or the lower 128 bits from a 256-bit vector of [8 x float],...
Definition: avxintrin.h:4700
#define _mm256_extractf128_si256(V, M)
Extracts either the upper or the lower 128 bits from a 256-bit integer vector, as determined by the i...
Definition: avxintrin.h:4748
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_load_si256(__m256i const *__p)
Loads 256 bits of integer data from a 32-byte aligned memory location pointed to by __p into elements...
Definition: avxintrin.h:3143
static __inline int __DEFAULT_FN_ATTRS _mm256_testnzc_si256(__m256i __a, __m256i __b)
Given two 256-bit integer vectors, perform a bit-by-bit comparison of the two source vectors.
Definition: avxintrin.h:2894
static __inline void __DEFAULT_FN_ATTRS _mm256_stream_ps(float *__p, __m256 __a)
Moves single-precision floating point values from a 256-bit vector of [8 x float] to a 32-byte aligne...
Definition: avxintrin.h:3553
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)
Merges 64-bit double-precision data values stored in either of the two 256-bit vectors of [4 x double...
Definition: avxintrin.h:1383
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_set_m128d(__m128d __hi, __m128d __lo)
Constructs a 256-bit floating-point vector of [4 x double] by concatenating two 128-bit floating-poin...
Definition: avxintrin.h:4958
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_castpd_ps(__m256d __a)
Casts a 256-bit floating-point vector of [4 x double] into a 256-bit floating-point vector of [8 x fl...
Definition: avxintrin.h:4299
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_set1_pd(double __w)
Constructs a 256-bit floating-point vector of [4 x double], with each of the four double-precision fl...
Definition: avxintrin.h:4144
static __inline __m128 __DEFAULT_FN_ATTRS128 _mm_permutevar_ps(__m128 __a, __m128i __c)
Copies the values stored in a 128-bit vector of [4 x float] as specified by the 128-bit integer vecto...
Definition: avxintrin.h:868
static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_undefined_pd(void)
Create a 256-bit vector of [4 x double] with undefined values.
Definition: avxintrin.h:3568
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_rsqrt_ps(__m256 __a)
Calculates the reciprocal square roots of the values in a 256-bit vector of [8 x float].
Definition: avxintrin.h:351
static __inline void __DEFAULT_FN_ATTRS _mm256_stream_si256(__m256i *__a, __m256i __b)
Moves integer data from a 256-bit integer vector to a 32-byte aligned memory location.
Definition: avxintrin.h:3512
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12, short __w11, short __w10, short __w09, short __w08, short __w07, short __w06, short __w05, short __w04, short __w03, short __w02, short __w01, short __w00)
Constructs a 256-bit integer vector, initialized in reverse order with the specified 16-bit integral ...
Definition: avxintrin.h:4007
#define __DEFAULT_FN_ATTRS
Definition: avxintrin.h:43
static __inline void __DEFAULT_FN_ATTRS128 _mm_maskstore_pd(double *__p, __m128i __m, __m128d __a)
Moves double-precision values from a 128-bit vector of [2 x double] to a memory location pointed to b...
Definition: avxintrin.h:3444
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_castpd128_pd256(__m128d __a)
Constructs a 256-bit floating-point vector of [4 x double] from a 128-bit floating-point vector of [2...
Definition: avxintrin.h:4455
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvttpd_epi32(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x i32], truncating the result b...
Definition: avxintrin.h:2208
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_undefined_si256(void)
Create a 256-bit integer vector with undefined values.
Definition: avxintrin.h:3594
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_setr_ps(float __a, float __b, float __c, float __d, float __e, float __f, float __g, float __h)
Constructs a 256-bit floating-point vector of [8 x float], initialized in reverse order with the spec...
Definition: avxintrin.h:3927
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvtps_epi32(__m256 __a)
Converts a vector of [8 x float] into a vector of [8 x i32].
Definition: avxintrin.h:2175
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi64x(long long __q)
Constructs a 256-bit integer vector of [4 x i64], with each of the 64-bit integral vector elements se...
Definition: avxintrin.h:4239
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_castsi256_pd(__m256i __a)
Casts a 256-bit integer vector into a 256-bit floating-point vector of [4 x double].
Definition: avxintrin.h:4384
static __inline int __DEFAULT_FN_ATTRS128 _mm_testnzc_pd(__m128d __a, __m128d __b)
Given two 128-bit floating-point vectors of [2 x double], perform an element-by-element comparison of...
Definition: avxintrin.h:2551
static __inline void __DEFAULT_FN_ATTRS _mm256_stream_pd(double *__a, __m256d __b)
Moves double-precision values from a 256-bit vector of [4 x double] to a 32-byte aligned memory locat...
Definition: avxintrin.h:3532
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_min_ps(__m256 __a, __m256 __b)
Compares two 256-bit vectors of [8 x float] and returns the lesser of each pair of values.
Definition: avxintrin.h:264
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_set_pd(double __a, double __b, double __c, double __d)
Constructs a 256-bit floating-point vector of [4 x double] initialized with the specified double-prec...
Definition: avxintrin.h:3621
static __inline int __DEFAULT_FN_ATTRS _mm256_testc_ps(__m256 __a, __m256 __b)
Given two 256-bit floating-point vectors of [8 x float], perform an element-by-element comparison of ...
Definition: avxintrin.h:2785
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_load_ps(float const *__p)
Loads 8 single-precision floating point values from a 32-byte aligned memory location pointed to by _...
Definition: avxintrin.h:3086
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_xor_ps(__m256 __a, __m256 __b)
Performs a bitwise XOR of two 256-bit vectors of [8 x float].
Definition: avxintrin.h:652
#define _mm256_extractf128_pd(V, M)
Extracts either the upper or the lower 128 bits from a 256-bit vector of [4 x double],...
Definition: avxintrin.h:4724
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvttps_epi32(__m256 __a)
Converts a vector of [8 x float] into a vector of [8 x i32], truncating the result by rounding toward...
Definition: avxintrin.h:2241
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_castps128_ps256(__m128 __a)
Constructs a 256-bit floating-point vector of [8 x float] from a 128-bit floating-point vector of [4 ...
Definition: avxintrin.h:4476
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setr_m128i(__m128i __lo, __m128i __hi)
Constructs a 256-bit integer vector by concatenating two 128-bit integer vectors.
Definition: avxintrin.h:5045
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_sub_ps(__m256 __a, __m256 __b)
Subtracts two 256-bit vectors of [8 x float].
Definition: avxintrin.h:114
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)
Constructs a 256-bit integer vector initialized with the specified 64-bit integral values.
Definition: avxintrin.h:3858
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_broadcast_ss(float const *__a)
Loads a scalar single-precision floating point value from the specified address pointed to by __a and...
Definition: avxintrin.h:3011
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_max_ps(__m256 __a, __m256 __b)
Compares two 256-bit vectors of [8 x float] and returns the greater of each pair of values.
Definition: avxintrin.h:226
static __inline int __DEFAULT_FN_ATTRS _mm256_testnzc_pd(__m256d __a, __m256d __b)
Given two 256-bit floating-point vectors of [4 x double], perform an element-by-element comparison of...
Definition: avxintrin.h:2727
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
Loads two 128-bit floating-point vectors of [2 x double] from unaligned memory locations and construc...
Definition: avxintrin.h:4802
static __inline float __DEFAULT_FN_ATTRS _mm256_cvtss_f32(__m256 __a)
Returns the first element of the input vector of [8 x float].
Definition: avxintrin.h:2290
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_movehdup_ps(__m256 __a)
Moves and duplicates odd-indexed values from a 256-bit vector of [8 x float] to float values in a 256...
Definition: avxintrin.h:2316
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_lddqu_si256(__m256i const *__p)
Loads 256 bits of integer data from an unaligned memory location pointed to by __p into a 256-bit int...
Definition: avxintrin.h:3180
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_div_pd(__m256d __a, __m256d __b)
Divides two 256-bit vectors of [4 x double].
Definition: avxintrin.h:170
static __inline int __DEFAULT_FN_ATTRS128 _mm_testnzc_ps(__m128 __a, __m128 __b)
Given two 128-bit floating-point vectors of [4 x float], perform an element-by-element comparison of ...
Definition: avxintrin.h:2639
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
Stores the upper and lower 128 bits of a 256-bit floating-point vector of [4 x double] into two diffe...
Definition: avxintrin.h:4882
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_sqrt_pd(__m256d __a)
Calculates the square roots of the values in a 256-bit vector of [4 x double].
Definition: avxintrin.h:317
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_addsub_pd(__m256d __a, __m256d __b)
Adds the even-indexed values and subtracts the odd-indexed values of two 256-bit vectors of [4 x doub...
Definition: avxintrin.h:133
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_hadd_ps(__m256 __a, __m256 __b)
Horizontally adds the adjacent pairs of values contained in two 256-bit vectors of [8 x float].
Definition: avxintrin.h:699
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_addsub_ps(__m256 __a, __m256 __b)
Adds the even-indexed values and subtracts the odd-indexed values of two 256-bit vectors of [8 x floa...
Definition: avxintrin.h:152
static __inline int __DEFAULT_FN_ATTRS _mm256_testz_pd(__m256d __a, __m256d __b)
Given two 256-bit floating-point vectors of [4 x double], perform an element-by-element comparison of...
Definition: avxintrin.h:2668
#define _mm256_insertf128_pd(V1, V2, M)
Constructs a new 256-bit vector of [4 x double] by first duplicating a 256-bit vector of [4 x double]...
Definition: avxintrin.h:4632
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_mul_pd(__m256d __a, __m256d __b)
Multiplies two 256-bit vectors of [4 x double].
Definition: avxintrin.h:282
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_and_ps(__m256 __a, __m256 __b)
Performs a bitwise AND of two 256-bit vectors of [8 x float].
Definition: avxintrin.h:538
static __inline void __DEFAULT_FN_ATTRS _mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a)
Moves double-precision values from a 256-bit vector of [4 x double] to a memory location pointed to b...
Definition: avxintrin.h:3468
static __inline void __DEFAULT_FN_ATTRS _mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a)
Moves single-precision floating point values from a 256-bit vector of [8 x float] to a memory locatio...
Definition: avxintrin.h:3420
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_or_ps(__m256 __a, __m256 __b)
Performs a bitwise OR of two 256-bit vectors of [8 x float].
Definition: avxintrin.h:616
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_loadu_pd(double const *__p)
Loads 4 double-precision floating point values from an unaligned memory location pointed to by __p in...
Definition: avxintrin.h:3103
static __inline int __DEFAULT_FN_ATTRS _mm256_testz_si256(__m256i __a, __m256i __b)
Given two 256-bit integer vectors, perform a bit-by-bit comparison of the two source vectors.
Definition: avxintrin.h:2841
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_zextpd128_pd256(__m128d __a)
Constructs a 256-bit floating-point vector of [4 x double] from a 128-bit floating-point vector of [2...
Definition: avxintrin.h:4514
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)
Constructs a 256-bit integer vector, initialized in reverse order with the specified 64-bit integral ...
Definition: avxintrin.h:4125
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_castps_pd(__m256 __a)
Casts a 256-bit floating-point vector of [8 x float] into a 256-bit floating-point vector of [4 x dou...
Definition: avxintrin.h:4333
static __inline double __DEFAULT_FN_ATTRS _mm256_cvtsd_f64(__m256d __a)
Returns the first element of the input vector of [4 x double].
Definition: avxintrin.h:2257
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_hadd_pd(__m256d __a, __m256d __b)
Horizontally adds the adjacent pairs of values contained in two 256-bit vectors of [4 x double].
Definition: avxintrin.h:676
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_hsub_ps(__m256 __a, __m256 __b)
Horizontally subtracts the adjacent pairs of values contained in two 256-bit vectors of [8 x float].
Definition: avxintrin.h:745
static __inline int __DEFAULT_FN_ATTRS _mm256_testc_pd(__m256d __a, __m256d __b)
Given two 256-bit floating-point vectors of [4 x double], perform an element-by-element comparison of...
Definition: avxintrin.h:2697
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_cvtepi32_pd(__m128i __a)
Converts a vector of [4 x i32] into a vector of [4 x double].
Definition: avxintrin.h:2129
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_setr_m128d(__m128d __lo, __m128d __hi)
Constructs a 256-bit floating-point vector of [4 x double] by concatenating two 128-bit floating-poin...
Definition: avxintrin.h:5024
#define __DEFAULT_FN_ATTRS128
Definition: avxintrin.h:44
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_andnot_pd(__m256d __a, __m256d __b)
Performs a bitwise AND of two 256-bit vectors of [4 x double], using the one's complement of the valu...
Definition: avxintrin.h:559
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3, int __i4, int __i5, int __i6, int __i7)
Constructs a 256-bit integer vector, initialized in reverse order with the specified 32-bit integral ...
Definition: avxintrin.h:3959
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
Merges 32-bit single-precision data values stored in either of the two 256-bit vectors of [8 x float]...
Definition: avxintrin.h:1411
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_unpackhi_pd(__m256d __a, __m256d __b)
Unpacks the odd-indexed vector elements from two 256-bit vectors of [4 x double] and interleaves them...
Definition: avxintrin.h:2386
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu2_m128i(__m128i_u *__addr_hi, __m128i_u *__addr_lo, __m256i __a)
Stores the upper and lower 128 bits of a 256-bit integer vector into two different unaligned memory l...
Definition: avxintrin.h:4911
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_sub_pd(__m256d __a, __m256d __b)
Subtracts two 256-bit vectors of [4 x double].
Definition: avxintrin.h:96
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set_m128i(__m128i __hi, __m128i __lo)
Constructs a 256-bit integer vector by concatenating two 128-bit integer vectors.
Definition: avxintrin.h:4978
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_or_pd(__m256d __a, __m256d __b)
Performs a bitwise OR of two 256-bit vectors of [4 x double].
Definition: avxintrin.h:598
static __inline int __DEFAULT_FN_ATTRS128 _mm_testz_pd(__m128d __a, __m128d __b)
Given two 128-bit floating-point vectors of [2 x double], perform an element-by-element comparison of...
Definition: avxintrin.h:2492
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_setzero_pd(void)
Constructs a 256-bit floating-point vector of [4 x double] with all vector elements initialized to ze...
Definition: avxintrin.h:4254
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_movedup_pd(__m256d __a)
Moves and duplicates double-precision floating point values from a 256-bit vector of [4 x double] to ...
Definition: avxintrin.h:2363
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set_epi16(short __w15, short __w14, short __w13, short __w12, short __w11, short __w10, short __w09, short __w08, short __w07, short __w06, short __w05, short __w04, short __w03, short __w02, short __w01, short __w00)
Constructs a 256-bit integer vector initialized with the specified 16-bit integral values.
Definition: avxintrin.h:3740
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_loadu2_m128i(__m128i_u const *__addr_hi, __m128i_u const *__addr_lo)
Loads two 128-bit integer vectors from unaligned memory locations and constructs a 256-bit integer ve...
Definition: avxintrin.h:4827
#define _mm256_insertf128_si256(V1, V2, M)
Constructs a new 256-bit integer vector by first duplicating a 256-bit integer vector given in the fi...
Definition: avxintrin.h:4670
static __inline __m128d __DEFAULT_FN_ATTRS128 _mm_permutevar_pd(__m128d __a, __m128i __c)
Copies the values in a 128-bit vector of [2 x double] as specified by the 128-bit integer vector oper...
Definition: avxintrin.h:775
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setzero_si256(void)
Constructs a 256-bit integer vector initialized to zero.
Definition: avxintrin.h:4281
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_castpd_si256(__m256d __a)
Casts a 256-bit floating-point vector of [4 x double] into a 256-bit integer vector.
Definition: avxintrin.h:4316
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_maskload_ps(float const *__p, __m256i __m)
Conditionally loads single-precision floating point elements from a memory location pointed to by __p...
Definition: avxintrin.h:3395
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_div_ps(__m256 __a, __m256 __b)
Divides two 256-bit vectors of [8 x float].
Definition: avxintrin.h:188
static __inline int __DEFAULT_FN_ATTRS _mm256_cvtsi256_si32(__m256i __a)
Returns the first element of the input vector of [8 x i32].
Definition: avxintrin.h:2273
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_setr_m128(__m128 __lo, __m128 __hi)
Constructs a 256-bit floating-point vector of [8 x float] by concatenating two 128-bit floating-point...
Definition: avxintrin.h:5001
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_loadu_si256(__m256i_u const *__p)
Loads 256 bits of integer data from an unaligned memory location pointed to by __p into a 256-bit int...
Definition: avxintrin.h:3159
static __inline void __DEFAULT_FN_ATTRS _mm256_store_si256(__m256i *__p, __m256i __a)
Stores integer values from a 256-bit integer vector to a 32-byte aligned memory location pointed to b...
Definition: avxintrin.h:3277
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_min_pd(__m256d __a, __m256d __b)
Compares two 256-bit vectors of [4 x double] and returns the lesser of each pair of values.
Definition: avxintrin.h:245
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_castsi256_si128(__m256i __a)
Truncates a 256-bit integer vector into a 128-bit integer vector.
Definition: avxintrin.h:4434
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set_epi32(int __i0, int __i1, int __i2, int __i3, int __i4, int __i5, int __i6, int __i7)
Constructs a 256-bit integer vector initialized with the specified 32-bit integral values.
Definition: avxintrin.h:3692
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_castsi128_si256(__m128i __a)
Constructs a 256-bit integer vector from a 128-bit integer vector.
Definition: avxintrin.h:4495
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_unpackhi_ps(__m256 __a, __m256 __b)
Unpacks the 32-bit vector elements 2, 3, 6 and 7 from each of the two 256-bit vectors of [8 x float] ...
Definition: avxintrin.h:2435
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28, char __b27, char __b26, char __b25, char __b24, char __b23, char __b22, char __b21, char __b20, char __b19, char __b18, char __b17, char __b16, char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b09, char __b08, char __b07, char __b06, char __b05, char __b04, char __b03, char __b02, char __b01, char __b00)
Constructs a 256-bit integer vector initialized with the specified 8-bit integral values.
Definition: avxintrin.h:3823
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_loadu_ps(float const *__p)
Loads 8 single-precision floating point values from an unaligned memory location pointed to by __p in...
Definition: avxintrin.h:3123
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_set_ps(float __a, float __b, float __c, float __d, float __e, float __f, float __g, float __h)
Constructs a 256-bit floating-point vector of [8 x float] initialized with the specified single-preci...
Definition: avxintrin.h:3660
static __inline __m128 __DEFAULT_FN_ATTRS128 _mm_broadcast_ss(float const *__a)
Loads a scalar single-precision floating point value from the specified address pointed to by __a and...
Definition: avxintrin.h:2973
static __inline int __DEFAULT_FN_ATTRS128 _mm_testc_ps(__m128 __a, __m128 __b)
Given two 128-bit floating-point vectors of [4 x float], perform an element-by-element comparison of ...
Definition: avxintrin.h:2609
static __inline int __DEFAULT_FN_ATTRS _mm256_movemask_ps(__m256 __a)
Extracts the sign bits of single-precision floating point elements in a 256-bit vector of [8 x float]...
Definition: avxintrin.h:2931
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_mul_ps(__m256 __a, __m256 __b)
Multiplies two 256-bit vectors of [8 x float].
Definition: avxintrin.h:300
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_unpacklo_ps(__m256 __a, __m256 __b)
Unpacks the 32-bit vector elements 0, 1, 4 and 5 from each of the two 256-bit vectors of [8 x float] ...
Definition: avxintrin.h:2462
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_permutevar_pd(__m256d __a, __m256i __c)
Copies the values in a 256-bit vector of [4 x double] as specified by the 256-bit integer vector oper...
Definition: avxintrin.h:814
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_max_pd(__m256d __a, __m256d __b)
Compares two 256-bit vectors of [4 x double] and returns the greater of each pair of values.
Definition: avxintrin.h:207
static __inline int __DEFAULT_FN_ATTRS128 _mm_testz_ps(__m128 __a, __m128 __b)
Given two 128-bit floating-point vectors of [4 x float], perform an element-by-element comparison of ...
Definition: avxintrin.h:2580
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi16(short __w)
Constructs a 256-bit integer vector of [16 x i16], with each of the 16-bit integral vector elements s...
Definition: avxintrin.h:4200
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_and_pd(__m256d __a, __m256d __b)
Performs a bitwise AND of two 256-bit vectors of [4 x double].
Definition: avxintrin.h:520
#define _mm256_insertf128_ps(V1, V2, M)
Constructs a new 256-bit vector of [8 x float] by first duplicating a 256-bit vector of [8 x float] g...
Definition: avxintrin.h:4594
static __inline void __DEFAULT_FN_ATTRS _mm256_store_ps(float *__p, __m256 __a)
Stores single-precision floating point values from a 256-bit vector of [8 x float] to a 32-byte align...
Definition: avxintrin.h:3218
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)
Stores the upper and lower 128 bits of a 256-bit floating-point vector of [8 x float] into two differ...
Definition: avxintrin.h:4853
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi32(int __i)
Constructs a 256-bit integer vector of [8 x i32], with each of the 32-bit integral vector elements se...
Definition: avxintrin.h:4182
double __v4df __attribute__((__vector_size__(32)))
Definition: avxintrin.h:17
static __inline int __DEFAULT_FN_ATTRS128 _mm_testc_pd(__m128d __a, __m128d __b)
Given two 128-bit floating-point vectors of [2 x double], perform an element-by-element comparison of...
Definition: avxintrin.h:2521
static __inline int __DEFAULT_FN_ATTRS _mm256_testz_ps(__m256 __a, __m256 __b)
Given two 256-bit floating-point vectors of [8 x float], perform an element-by-element comparison of ...
Definition: avxintrin.h:2756
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_zextps128_ps256(__m128 __a)
Constructs a 256-bit floating-point vector of [8 x float] from a 128-bit floating-point vector of [4 ...
Definition: avxintrin.h:4532
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_add_pd(__m256d __a, __m256d __b)
Adds two 256-bit vectors of [4 x double].
Definition: avxintrin.h:60
static __inline void __DEFAULT_FN_ATTRS128 _mm_maskstore_ps(float *__p, __m128i __m, __m128 __a)
Moves single-precision floating point values from a 128-bit vector of [4 x float] to a memory locatio...
Definition: avxintrin.h:3492
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_load_pd(double const *__p)
Loads 4 double-precision floating point values from a 32-byte aligned memory location pointed to by _...
Definition: avxintrin.h:3070
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_andnot_ps(__m256 __a, __m256 __b)
Performs a bitwise AND of two 256-bit vectors of [8 x float], using the one's complement of the value...
Definition: avxintrin.h:580
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_setr_pd(double __a, double __b, double __c, double __d)
Constructs a 256-bit floating-point vector of [4 x double], initialized in reverse order with the spe...
Definition: avxintrin.h:3887
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadu_pd(double const *__dp)
Loads a 128-bit floating-point vector of [2 x double] from an unaligned memory location.
Definition: emmintrin.h:1641
static __inline__ void int __a
Definition: emmintrin.h:4185
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si128(__m128i_u const *__p)
Moves packed integer values from an unaligned 128-bit memory location to elements in a 128-bit intege...
Definition: emmintrin.h:3548
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setzero_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] initialized to zero.
Definition: emmintrin.h:1911
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_pd(double *__dp, __m128d __a)
Stores a 128-bit vector of [2 x double] into an unaligned memory location.
Definition: emmintrin.h:2031
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
Definition: emmintrin.h:3977
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si128(__m128i_u *__p, __m128i __b)
Stores a 128-bit integer vector to an unaligned memory location.
Definition: emmintrin.h:4011
struct __storeu_i16 *__P __v
Definition: immintrin.h:348
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_setzero_ps(void)
Constructs a 128-bit floating-point vector of [4 x float] initialized to zero.
Definition: xmmintrin.h:1903
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_ps(float *__p, __m128 __a)
Stores a 128-bit vector of [4 x float] to an unaligned memory location.
Definition: xmmintrin.h:1983
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_loadu_ps(const float *__p)
Loads a 128-bit floating-point vector of [4 x float] from an unaligned memory location.
Definition: xmmintrin.h:1740