Searched refs:a128 (Results 1 – 12 of 12) sorted by relevance
107 a128 func_xchg(volatile a128 *v, a128 op) { in func_xchg()109 a128 cmp = *v; in func_xchg()114 a128 func_add(volatile a128 *v, a128 op) { in func_add()116 a128 cmp = *v; in func_add()121 a128 func_sub(volatile a128 *v, a128 op) { in func_sub()123 a128 cmp = *v; in func_sub()128 a128 func_and(volatile a128 *v, a128 op) { in func_and()130 a128 cmp = *v; in func_and()135 a128 func_or(volatile a128 *v, a128 op) { in func_or()137 a128 cmp = *v; in func_or()[all …]
150 __extension__ typedef __int128 a128; typedef180 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo);193 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo);206 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo);219 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo);232 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo);245 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo);258 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo);271 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo);284 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo);[all …]
281 define void @test_upper_vXf32(<2 x float> %a64, <2 x float> %b64, <4 x float> %a128, <4 x float> %b…284 …n estimated cost of 1 for instruction: %V128 = shufflevector <4 x float> %a128, <4 x float> %b128,…291 …n estimated cost of 1 for instruction: %V128 = shufflevector <4 x float> %a128, <4 x float> %b128,…298 …n estimated cost of 1 for instruction: %V128 = shufflevector <4 x float> %a128, <4 x float> %b128,…305 …n estimated cost of 1 for instruction: %V128 = shufflevector <4 x float> %a128, <4 x float> %b128,…312 …n estimated cost of 1 for instruction: %V128 = shufflevector <4 x float> %a128, <4 x float> %b128,…318 %V128 = shufflevector <4 x float> %a128, <4 x float> %b128, <4 x i32> <i32 4, i32 4, i32 4, i32 4>
305 define void @test_upper_vXf32(<2 x float> %a64, <2 x float> %b64, <4 x float> %a128, <4 x float> %b…308 …n estimated cost of 1 for instruction: %V128 = shufflevector <4 x float> %a128, <4 x float> %b128,…315 …n estimated cost of 1 for instruction: %V128 = shufflevector <4 x float> %a128, <4 x float> %b128,…322 …n estimated cost of 1 for instruction: %V128 = shufflevector <4 x float> %a128, <4 x float> %b128,…329 …n estimated cost of 1 for instruction: %V128 = shufflevector <4 x float> %a128, <4 x float> %b128,…336 …n estimated cost of 1 for instruction: %V128 = shufflevector <4 x float> %a128, <4 x float> %b128,…342 %V128 = shufflevector <4 x float> %a128, <4 x float> %b128, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
355 define void @identity_vXf32(<4 x float> %a128, <4 x float> %b128, <8 x float> %a256, <8 x float> %b…357 …n estimated cost of 0 for instruction: %V128 = shufflevector <4 x float> %a128, <4 x float> %b128,…363 …n estimated cost of 0 for instruction: %V128 = shufflevector <4 x float> %a128, <4 x float> %b128,…368 …%V128 = shufflevector <4 x float> %a128, <4 x float> %b128, <4 x i32> <i32 0, i32 undef, i32 2, i3…
3527 __m128i a128, b128, res; in vmul_s8() local3528 a128 = _MM_CVTEPI8_EPI16 (_pM128i(a)); // SSE 4.1 use low 64 bits in vmul_s8()3530 res = _mm_mullo_epi16 (a128, b128); in vmul_s8()3556 __m128i mask, a128, b128, res; in vmul_u8() local3558 a128 = _MM_CVTEPU8_EPI16 (_pM128i(a)); in vmul_u8()3560 res = _mm_mullo_epi16 (a128, b128); in vmul_u8()3703 __m128i low, hi, a128,b128; in vmull_s16()3704 a128 = _pM128i(a); in vmull_s16()3706 low = _mm_mullo_epi16(a128,b128); in vmull_s16()3707 hi = _mm_mulhi_epi16(a128,b128); in vmull_s16()[all …]
244 a128[0] = 0
45 … /a11c /a11d /a11e /a11f /a120 /a121 /a122 /a123 /a124 /a125 /a126 /a127 /a128 /a129 /a12a /a12b /…
3274 a128 17 145 29
4676 2a128 8 959 611273 3a128 4 218 2717899 4a128 8 98 13879681 STACK CFI 8a128 x25: x25 x26: x26
1testfile45.o: elf64-elf_x86_64 2 3Disassembly of section .text: 4 5 0 ...