Home
last modified time | relevance | path

Searched refs:__a (Results 1 – 25 of 141) sorted by relevance

123456

/external/clang/lib/Headers/
Daltivec.h40 vector signed char __a, vector signed char __b, vector unsigned char __c);
43 vec_perm(vector unsigned char __a, vector unsigned char __b,
47 vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c);
49 static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a,
54 vec_perm(vector unsigned short __a, vector unsigned short __b,
58 vector bool short __a, vector bool short __b, vector unsigned char __c);
60 static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a,
64 static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a,
69 vector unsigned int __a, vector unsigned int __b, vector unsigned char __c);
72 vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c);
[all …]
Dvecintrin.h361 vec_perm(vector signed char __a, vector signed char __b, in vec_perm() argument
364 (vector unsigned char)__a, (vector unsigned char)__b, __c); in vec_perm()
368 vec_perm(vector unsigned char __a, vector unsigned char __b, in vec_perm() argument
371 (vector unsigned char)__a, (vector unsigned char)__b, __c); in vec_perm()
375 vec_perm(vector bool char __a, vector bool char __b, in vec_perm() argument
378 (vector unsigned char)__a, (vector unsigned char)__b, __c); in vec_perm()
382 vec_perm(vector signed short __a, vector signed short __b, in vec_perm() argument
385 (vector unsigned char)__a, (vector unsigned char)__b, __c); in vec_perm()
389 vec_perm(vector unsigned short __a, vector unsigned short __b, in vec_perm() argument
392 (vector unsigned char)__a, (vector unsigned char)__b, __c); in vec_perm()
[all …]
Dtbmintrin.h39 __blcfill_u32(unsigned int __a) in __blcfill_u32() argument
41 return __a & (__a + 1); in __blcfill_u32()
45 __blci_u32(unsigned int __a) in __blci_u32() argument
47 return __a | ~(__a + 1); in __blci_u32()
51 __blcic_u32(unsigned int __a) in __blcic_u32() argument
53 return ~__a & (__a + 1); in __blcic_u32()
57 __blcmsk_u32(unsigned int __a) in __blcmsk_u32() argument
59 return __a ^ (__a + 1); in __blcmsk_u32()
63 __blcs_u32(unsigned int __a) in __blcs_u32() argument
65 return __a | (__a + 1); in __blcs_u32()
[all …]
Demmintrin.h53 _mm_add_sd(__m128d __a, __m128d __b) in _mm_add_sd() argument
55 __a[0] += __b[0]; in _mm_add_sd()
56 return __a; in _mm_add_sd()
60 _mm_add_pd(__m128d __a, __m128d __b) in _mm_add_pd() argument
62 return (__m128d)((__v2df)__a + (__v2df)__b); in _mm_add_pd()
66 _mm_sub_sd(__m128d __a, __m128d __b) in _mm_sub_sd() argument
68 __a[0] -= __b[0]; in _mm_sub_sd()
69 return __a; in _mm_sub_sd()
73 _mm_sub_pd(__m128d __a, __m128d __b) in _mm_sub_pd() argument
75 return (__m128d)((__v2df)__a - (__v2df)__b); in _mm_sub_pd()
[all …]
Dxmmintrin.h61 _mm_add_ss(__m128 __a, __m128 __b) in _mm_add_ss() argument
63 __a[0] += __b[0]; in _mm_add_ss()
64 return __a; in _mm_add_ss()
81 _mm_add_ps(__m128 __a, __m128 __b) in _mm_add_ps() argument
83 return (__m128)((__v4sf)__a + (__v4sf)__b); in _mm_add_ps()
103 _mm_sub_ss(__m128 __a, __m128 __b) in _mm_sub_ss() argument
105 __a[0] -= __b[0]; in _mm_sub_ss()
106 return __a; in _mm_sub_ss()
124 _mm_sub_ps(__m128 __a, __m128 __b) in _mm_sub_ps() argument
126 return (__m128)((__v4sf)__a - (__v4sf)__b); in _mm_sub_ps()
[all …]
Dtmmintrin.h45 _mm_abs_pi8(__m64 __a) in _mm_abs_pi8() argument
47 return (__m64)__builtin_ia32_pabsb((__v8qi)__a); in _mm_abs_pi8()
63 _mm_abs_epi8(__m128i __a) in _mm_abs_epi8() argument
65 return (__m128i)__builtin_ia32_pabsb128((__v16qi)__a); in _mm_abs_epi8()
81 _mm_abs_pi16(__m64 __a) in _mm_abs_pi16() argument
83 return (__m64)__builtin_ia32_pabsw((__v4hi)__a); in _mm_abs_pi16()
99 _mm_abs_epi16(__m128i __a) in _mm_abs_epi16() argument
101 return (__m128i)__builtin_ia32_pabsw128((__v8hi)__a); in _mm_abs_epi16()
117 _mm_abs_pi32(__m64 __a) in _mm_abs_pi32() argument
119 return (__m64)__builtin_ia32_pabsd((__v2si)__a); in _mm_abs_pi32()
[all …]
Davxintrin.h69 _mm256_add_pd(__m256d __a, __m256d __b) in _mm256_add_pd() argument
71 return (__m256d)((__v4df)__a+(__v4df)__b); in _mm256_add_pd()
87 _mm256_add_ps(__m256 __a, __m256 __b) in _mm256_add_ps() argument
89 return (__m256)((__v8sf)__a+(__v8sf)__b); in _mm256_add_ps()
105 _mm256_sub_pd(__m256d __a, __m256d __b) in _mm256_sub_pd() argument
107 return (__m256d)((__v4df)__a-(__v4df)__b); in _mm256_sub_pd()
123 _mm256_sub_ps(__m256 __a, __m256 __b) in _mm256_sub_ps() argument
125 return (__m256)((__v8sf)__a-(__v8sf)__b); in _mm256_sub_ps()
142 _mm256_addsub_pd(__m256d __a, __m256d __b) in _mm256_addsub_pd() argument
144 return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b); in _mm256_addsub_pd()
[all …]
Davx2intrin.h40 _mm256_abs_epi8(__m256i __a) in _mm256_abs_epi8() argument
42 return (__m256i)__builtin_ia32_pabsb256((__v32qi)__a); in _mm256_abs_epi8()
46 _mm256_abs_epi16(__m256i __a) in _mm256_abs_epi16() argument
48 return (__m256i)__builtin_ia32_pabsw256((__v16hi)__a); in _mm256_abs_epi16()
52 _mm256_abs_epi32(__m256i __a) in _mm256_abs_epi32() argument
54 return (__m256i)__builtin_ia32_pabsd256((__v8si)__a); in _mm256_abs_epi32()
58 _mm256_packs_epi16(__m256i __a, __m256i __b) in _mm256_packs_epi16() argument
60 return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b); in _mm256_packs_epi16()
64 _mm256_packs_epi32(__m256i __a, __m256i __b) in _mm256_packs_epi32() argument
66 return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b); in _mm256_packs_epi32()
[all …]
Dpmmintrin.h65 _mm_addsub_ps(__m128 __a, __m128 __b) in _mm_addsub_ps() argument
67 return __builtin_ia32_addsubps((__v4sf)__a, (__v4sf)__b); in _mm_addsub_ps()
88 _mm_hadd_ps(__m128 __a, __m128 __b) in _mm_hadd_ps() argument
90 return __builtin_ia32_haddps((__v4sf)__a, (__v4sf)__b); in _mm_hadd_ps()
111 _mm_hsub_ps(__m128 __a, __m128 __b) in _mm_hsub_ps() argument
113 return __builtin_ia32_hsubps((__v4sf)__a, (__v4sf)__b); in _mm_hsub_ps()
133 _mm_movehdup_ps(__m128 __a) in _mm_movehdup_ps() argument
135 return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 1, 1, 3, 3); in _mm_movehdup_ps()
155 _mm_moveldup_ps(__m128 __a) in _mm_moveldup_ps() argument
157 return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 2, 2); in _mm_moveldup_ps()
[all …]
D__clang_cuda_runtime_wrapper.h170 static inline float rsqrt(float __a) { return rsqrtf(__a); } in rsqrt() argument
171 static inline float rcbrt(float __a) { return rcbrtf(__a); } in rcbrt() argument
172 static inline float sinpi(float __a) { return sinpif(__a); } in sinpi() argument
173 static inline float cospi(float __a) { return cospif(__a); } in cospi() argument
174 static inline void sincospi(float __a, float *__b, float *__c) { in sincospi() argument
175 return sincospif(__a, __b, __c); in sincospi()
177 static inline float erfcinv(float __a) { return erfcinvf(__a); } in erfcinv() argument
178 static inline float normcdfinv(float __a) { return normcdfinvf(__a); } in normcdfinv() argument
179 static inline float normcdf(float __a) { return normcdff(__a); } in normcdf() argument
180 static inline float erfcx(float __a) { return erfcxf(__a); } in erfcx() argument
Davx512vlbwintrin.h42 _mm_cmpeq_epi8_mask(__m128i __a, __m128i __b) { in _mm_cmpeq_epi8_mask() argument
43 return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__a, (__v16qi)__b, in _mm_cmpeq_epi8_mask()
48 _mm_mask_cmpeq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) { in _mm_mask_cmpeq_epi8_mask() argument
49 return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__a, (__v16qi)__b, in _mm_mask_cmpeq_epi8_mask()
54 _mm_cmpeq_epu8_mask(__m128i __a, __m128i __b) { in _mm_cmpeq_epu8_mask() argument
55 return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 0, in _mm_cmpeq_epu8_mask()
60 _mm_mask_cmpeq_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) { in _mm_mask_cmpeq_epu8_mask() argument
61 return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 0, in _mm_mask_cmpeq_epu8_mask()
66 _mm256_cmpeq_epi8_mask(__m256i __a, __m256i __b) { in _mm256_cmpeq_epi8_mask() argument
67 return (__mmask32)__builtin_ia32_pcmpeqb256_mask((__v32qi)__a, (__v32qi)__b, in _mm256_cmpeq_epi8_mask()
[all …]
Darm_acle.h260 __crc32b(uint32_t __a, uint8_t __b) { in __crc32b() argument
261 return __builtin_arm_crc32b(__a, __b); in __crc32b()
265 __crc32h(uint32_t __a, uint16_t __b) { in __crc32h() argument
266 return __builtin_arm_crc32h(__a, __b); in __crc32h()
270 __crc32w(uint32_t __a, uint32_t __b) { in __crc32w() argument
271 return __builtin_arm_crc32w(__a, __b); in __crc32w()
275 __crc32d(uint32_t __a, uint64_t __b) { in __crc32d() argument
276 return __builtin_arm_crc32d(__a, __b); in __crc32d()
280 __crc32cb(uint32_t __a, uint8_t __b) { in __crc32cb() argument
281 return __builtin_arm_crc32cb(__a, __b); in __crc32cb()
[all …]
/external/clang/test/Sema/
Dbuiltins-x86.c13 __m128 test__builtin_ia32_cmpps(__m128 __a, __m128 __b) { in test__builtin_ia32_cmpps() argument
14 __builtin_ia32_cmpps(__a, __b, 32); // expected-error {{argument should be a value from 0 to 31}} in test__builtin_ia32_cmpps()
17 __m128d test__builtin_ia32_cmppd(__m128d __a, __m128d __b) { in test__builtin_ia32_cmppd() argument
18 __builtin_ia32_cmppd(__a, __b, 32); // expected-error {{argument should be a value from 0 to 31}} in test__builtin_ia32_cmppd()
21 __m128 test__builtin_ia32_cmpss(__m128 __a, __m128 __b) { in test__builtin_ia32_cmpss() argument
22 __builtin_ia32_cmpss(__a, __b, 32); // expected-error {{argument should be a value from 0 to 31}} in test__builtin_ia32_cmpss()
25 __m128d test__builtin_ia32_cmpsd(__m128d __a, __m128d __b) { in test__builtin_ia32_cmpsd() argument
26 __builtin_ia32_cmpsd(__a, __b, 32); // expected-error {{argument should be a value from 0 to 31}} in test__builtin_ia32_cmpsd()
29 __mmask16 test__builtin_ia32_cmpps512_mask(__m512d __a, __m512d __b) { in test__builtin_ia32_cmpps512_mask() argument
30 …__builtin_ia32_cmpps512_mask(__a, __b, 32, -1, 0); // expected-error {{argument should be a value … in test__builtin_ia32_cmpps512_mask()
[all …]
/external/clang/test/CodeGen/
Davx512vlbw-builtins.c9 __mmask32 test_mm256_cmpeq_epi8_mask(__m256i __a, __m256i __b) { in test_mm256_cmpeq_epi8_mask() argument
12 return (__mmask32)_mm256_cmpeq_epi8_mask(__a, __b); in test_mm256_cmpeq_epi8_mask()
15 __mmask32 test_mm256_mask_cmpeq_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) { in test_mm256_mask_cmpeq_epi8_mask() argument
19 return (__mmask32)_mm256_mask_cmpeq_epi8_mask(__u, __a, __b); in test_mm256_mask_cmpeq_epi8_mask()
22 __mmask16 test_mm_cmpeq_epi8_mask(__m128i __a, __m128i __b) { in test_mm_cmpeq_epi8_mask() argument
25 return (__mmask16)_mm_cmpeq_epi8_mask(__a, __b); in test_mm_cmpeq_epi8_mask()
28 __mmask16 test_mm_mask_cmpeq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) { in test_mm_mask_cmpeq_epi8_mask() argument
32 return (__mmask16)_mm_mask_cmpeq_epi8_mask(__u, __a, __b); in test_mm_mask_cmpeq_epi8_mask()
35 __mmask16 test_mm256_cmpeq_epi16_mask(__m256i __a, __m256i __b) { in test_mm256_cmpeq_epi16_mask() argument
38 return (__mmask16)_mm256_cmpeq_epi16_mask(__a, __b); in test_mm256_cmpeq_epi16_mask()
[all …]
Dsse-builtins.c38 __m128 test_mm_cmpeq_ps(__m128 __a, __m128 __b) { in test_mm_cmpeq_ps() argument
44 return _mm_cmpeq_ps(__a, __b); in test_mm_cmpeq_ps()
47 __m128 test_mm_cmpeq_ss(__m128 __a, __m128 __b) { in test_mm_cmpeq_ss() argument
50 return _mm_cmpeq_ss(__a, __b); in test_mm_cmpeq_ss()
53 __m128 test_mm_cmpge_ps(__m128 __a, __m128 __b) { in test_mm_cmpge_ps() argument
59 return _mm_cmpge_ps(__a, __b); in test_mm_cmpge_ps()
62 __m128 test_mm_cmpge_ss(__m128 __a, __m128 __b) { in test_mm_cmpge_ss() argument
66 return _mm_cmpge_ss(__a, __b); in test_mm_cmpge_ss()
69 __m128 test_mm_cmpgt_ps(__m128 __a, __m128 __b) { in test_mm_cmpgt_ps() argument
75 return _mm_cmpgt_ps(__a, __b); in test_mm_cmpgt_ps()
[all …]
Davx512bw-builtins.c9 __mmask64 test_mm512_cmpeq_epi8_mask(__m512i __a, __m512i __b) { in test_mm512_cmpeq_epi8_mask() argument
12 return (__mmask64)_mm512_cmpeq_epi8_mask(__a, __b); in test_mm512_cmpeq_epi8_mask()
15 __mmask64 test_mm512_mask_cmpeq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { in test_mm512_mask_cmpeq_epi8_mask() argument
19 return (__mmask64)_mm512_mask_cmpeq_epi8_mask(__u, __a, __b); in test_mm512_mask_cmpeq_epi8_mask()
22 __mmask32 test_mm512_cmpeq_epi16_mask(__m512i __a, __m512i __b) { in test_mm512_cmpeq_epi16_mask() argument
25 return (__mmask32)_mm512_cmpeq_epi16_mask(__a, __b); in test_mm512_cmpeq_epi16_mask()
28 __mmask32 test_mm512_mask_cmpeq_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) { in test_mm512_mask_cmpeq_epi16_mask() argument
32 return (__mmask32)_mm512_mask_cmpeq_epi16_mask(__u, __a, __b); in test_mm512_mask_cmpeq_epi16_mask()
35 __mmask64 test_mm512_cmpgt_epi8_mask(__m512i __a, __m512i __b) { in test_mm512_cmpgt_epi8_mask() argument
38 return (__mmask64)_mm512_cmpgt_epi8_mask(__a, __b); in test_mm512_cmpgt_epi8_mask()
[all …]
/external/compiler-rt/lib/builtins/
Dmulsc3.c21 __mulsc3(float __a, float __b, float __c, float __d) in __mulsc3() argument
23 float __ac = __a * __c; in __mulsc3()
25 float __ad = __a * __d; in __mulsc3()
33 if (crt_isinf(__a) || crt_isinf(__b)) in __mulsc3()
35 __a = crt_copysignf(crt_isinf(__a) ? 1 : 0, __a); in __mulsc3()
47 if (crt_isnan(__a)) in __mulsc3()
48 __a = crt_copysignf(0, __a); in __mulsc3()
56 if (crt_isnan(__a)) in __mulsc3()
57 __a = crt_copysignf(0, __a); in __mulsc3()
68 COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c - __b * __d); in __mulsc3()
[all …]
Dmuldc3.c21 __muldc3(double __a, double __b, double __c, double __d) in __muldc3() argument
23 double __ac = __a * __c; in __muldc3()
25 double __ad = __a * __d; in __muldc3()
33 if (crt_isinf(__a) || crt_isinf(__b)) in __muldc3()
35 __a = crt_copysign(crt_isinf(__a) ? 1 : 0, __a); in __muldc3()
47 if (crt_isnan(__a)) in __muldc3()
48 __a = crt_copysign(0, __a); in __muldc3()
56 if (crt_isnan(__a)) in __muldc3()
57 __a = crt_copysign(0, __a); in __muldc3()
68 COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c - __b * __d); in __muldc3()
[all …]
Dmulxc3.c23 __mulxc3(long double __a, long double __b, long double __c, long double __d) in __mulxc3() argument
25 long double __ac = __a * __c; in __mulxc3()
27 long double __ad = __a * __d; in __mulxc3()
35 if (crt_isinf(__a) || crt_isinf(__b)) in __mulxc3()
37 __a = crt_copysignl(crt_isinf(__a) ? 1 : 0, __a); in __mulxc3()
49 if (crt_isnan(__a)) in __mulxc3()
50 __a = crt_copysignl(0, __a); in __mulxc3()
58 if (crt_isnan(__a)) in __mulxc3()
59 __a = crt_copysignl(0, __a); in __mulxc3()
70 COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c - __b * __d); in __mulxc3()
[all …]
Ddivtc3.c21 __divtc3(long double __a, long double __b, long double __c, long double __d) in __divtc3() argument
33 __real__ z = crt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw); in __divtc3()
34 __imag__ z = crt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw); in __divtc3()
37 if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b))) in __divtc3()
39 __real__ z = crt_copysignl(CRT_INFINITY, __c) * __a; in __divtc3()
42 else if ((crt_isinf(__a) || crt_isinf(__b)) && in __divtc3()
45 __a = crt_copysignl(crt_isinf(__a) ? 1.0 : 0.0, __a); in __divtc3()
47 __real__ z = CRT_INFINITY * (__a * __c + __b * __d); in __divtc3()
48 __imag__ z = CRT_INFINITY * (__b * __c - __a * __d); in __divtc3()
51 crt_isfinite(__a) && crt_isfinite(__b)) in __divtc3()
[all …]
Ddivdc3.c21 __divdc3(double __a, double __b, double __c, double __d) in __divdc3() argument
33 COMPLEX_REAL(z) = crt_scalbn((__a * __c + __b * __d) / __denom, -__ilogbw); in __divdc3()
34 COMPLEX_IMAGINARY(z) = crt_scalbn((__b * __c - __a * __d) / __denom, -__ilogbw); in __divdc3()
37 if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b))) in __divdc3()
39 COMPLEX_REAL(z) = crt_copysign(CRT_INFINITY, __c) * __a; in __divdc3()
42 else if ((crt_isinf(__a) || crt_isinf(__b)) && in __divdc3()
45 __a = crt_copysign(crt_isinf(__a) ? 1.0 : 0.0, __a); in __divdc3()
47 COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d); in __divdc3()
48 COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d); in __divdc3()
51 crt_isfinite(__a) && crt_isfinite(__b)) in __divdc3()
[all …]
Ddivsc3.c21 __divsc3(float __a, float __b, float __c, float __d) in __divsc3() argument
33 COMPLEX_REAL(z) = crt_scalbnf((__a * __c + __b * __d) / __denom, -__ilogbw); in __divsc3()
34 COMPLEX_IMAGINARY(z) = crt_scalbnf((__b * __c - __a * __d) / __denom, -__ilogbw); in __divsc3()
37 if ((__denom == 0) && (!crt_isnan(__a) || !crt_isnan(__b))) in __divsc3()
39 COMPLEX_REAL(z) = crt_copysignf(CRT_INFINITY, __c) * __a; in __divsc3()
42 else if ((crt_isinf(__a) || crt_isinf(__b)) && in __divsc3()
45 __a = crt_copysignf(crt_isinf(__a) ? 1 : 0, __a); in __divsc3()
47 COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d); in __divsc3()
48 COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d); in __divsc3()
51 crt_isfinite(__a) && crt_isfinite(__b)) in __divsc3()
[all …]
Ddivxc3.c22 __divxc3(long double __a, long double __b, long double __c, long double __d) in __divxc3() argument
34 COMPLEX_REAL(z) = crt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw); in __divxc3()
35 COMPLEX_IMAGINARY(z) = crt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw); in __divxc3()
38 if ((__denom == 0) && (!crt_isnan(__a) || !crt_isnan(__b))) in __divxc3()
40 COMPLEX_REAL(z) = crt_copysignl(CRT_INFINITY, __c) * __a; in __divxc3()
43 else if ((crt_isinf(__a) || crt_isinf(__b)) && in __divxc3()
46 __a = crt_copysignl(crt_isinf(__a) ? 1 : 0, __a); in __divxc3()
48 COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d); in __divxc3()
49 COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d); in __divxc3()
52 crt_isfinite(__a) && crt_isfinite(__b)) in __divxc3()
[all …]
/external/python/cpython2/Tools/pybench/
DLookups.py16 c.__a = 2
20 c.__a = 2
24 c.__a = 2
28 c.__a = 2
32 x = c.__a
36 x = c.__a
40 x = c.__a
44 x = c.__a
48 c.__a = 2
52 c.__a = 2
[all …]
/external/llvm/test/CodeGen/AArch64/
Darm64-vext.ll7 %__a = alloca <8 x i8>, align 8
10 store <8 x i8> %tmp, <8 x i8>* %__a, align 8
13 %tmp2 = load <8 x i8>, <8 x i8>* %__a, align 8
24 %__a = alloca <8 x i8>, align 8
27 store <8 x i8> %tmp, <8 x i8>* %__a, align 8
30 %tmp2 = load <8 x i8>, <8 x i8>* %__a, align 8
41 %__a = alloca <8 x i8>, align 8
44 store <8 x i8> %tmp, <8 x i8>* %__a, align 8
47 %tmp2 = load <8 x i8>, <8 x i8>* %__a, align 8
58 %__a = alloca <4 x i16>, align 8
[all …]

123456