Home
last modified time | relevance | path

Searched refs:x3 (Results 1 – 25 of 751) sorted by relevance

12345678910>>...31

/external/valgrind/none/tests/arm64/
Dinteger.c163 TESTINST3("add x3, x4, x5", 12345, 6789, x3, x4, x5, 0); in test_arith()
164 TESTINST3("add w3, w4, w5", 12345, 6789, x3, x4, x5, 0); in test_arith()
167 TESTINST3("adc x3, x4, x5", 12345, 6789, x3,x4,x5,0); in test_arith()
168 TESTINST3("adc x3, x4, x5", 12345, 6789, x3,x4,x5,1); in test_arith()
169 TESTINST3("adc x3, x4, x5", 0, 0xffffffffffffffffULL, x3,x4,x5,0); in test_arith()
170 TESTINST3("adc x3, x4, x5", 0, 0xffffffffffffffffULL, x3,x4,x5,1); in test_arith()
171 TESTINST3("adc x3, x4, x5", 0x31415927ULL<<32, 0x27181728ULL<<32, x3,x4,x5,0); in test_arith()
172 TESTINST3("adc x3, x4, x5", 0x31415927ULL<<32, 0x27181728ULL<<32, x3,x4,x5,1); in test_arith()
173 TESTINST3("adc x3, x4, x5", 0x00000000ULL<<32, 0x00000000ULL<<32, x3,x4,x5,0); in test_arith()
174 TESTINST3("adc x3, x4, x5", 0x00000000ULL<<32, 0x00000000ULL<<32, x3,x4,x5,1); in test_arith()
[all …]
/external/llvm/test/MC/AArch64/
Darm64-system-encoding.s60 msr ACTLR_EL1, x3
61 msr ACTLR_EL2, x3
62 msr ACTLR_EL3, x3
63 msr AFSR0_EL1, x3
64 msr AFSR0_EL2, x3
65 msr AFSR0_EL3, x3
66 msr AFSR1_EL1, x3
67 msr AFSR1_EL2, x3
68 msr AFSR1_EL3, x3
69 msr AMAIR_EL1, x3
[all …]
Dalias-addsubimm.s9 sub x1, x3, #2, lsl 12
10 add x1, x3, #-2, lsl 12
13 sub x1, x3, #4
14 add x1, x3, #-4
17 sub x1, x3, #4095, lsl 0
18 add x1, x3, #-4095, lsl 0
20 sub x3, x4, #0
28 add x1, x3, #2, lsl 12
29 sub x1, x3, #-2, lsl 12
32 add x1, x3, #4
[all …]
Darm64-logical-encoding.s51 and x1, x2, x3
53 and x1, x2, x3, lsl #2
55 and x1, x2, x3, lsr #2
57 and x1, x2, x3, asr #2
59 and x1, x2, x3, ror #2
62 ; CHECK: and x1, x2, x3 ; encoding: [0x41,0x00,0x03,0x8a]
64 ; CHECK: and x1, x2, x3, lsl #2 ; encoding: [0x41,0x08,0x03,0x8a]
66 ; CHECK: and x1, x2, x3, lsr #2 ; encoding: [0x41,0x08,0x43,0x8a]
68 ; CHECK: and x1, x2, x3, asr #2 ; encoding: [0x41,0x08,0x83,0x8a]
70 ; CHECK: and x1, x2, x3, ror #2 ; encoding: [0x41,0x08,0xc3,0x8a]
[all …]
Dbasic-a64-diagnostics.s9 add x2, x3, x5, sxtb
24 add x3, x5, w7, uxtb #5
37 add xzr, x3, x5, uxtx
38 sub x3, xzr, w9, sxth #1
51 adds sp, x3, w2, uxtb
52 adds x3, xzr, x9, uxtx
174 add x3, x9, #variable
203 add x1, x2, x3, lsl #-1
204 add x1, x2, x3, lsl #64
205 add x1, x2, x3, lsr #-1
[all …]
Darm64-simd-ldst.s11 ld1.8b {v4, v5, v6}, [x3]
102 ; CHECK: ld1.8b { v4, v5, v6 }, [x3] ; encoding: [0x64,0x60,0x40,0x0c]
852 ld1r.8b {v4}, [x2], x3
854 ld1r.16b {v4}, [x2], x3
856 ld1r.4h {v4}, [x2], x3
858 ld1r.8h {v4}, [x2], x3
860 ld1r.2s {v4}, [x2], x3
862 ld1r.4s {v4}, [x2], x3
864 ld1r.1d {v4}, [x2], x3
866 ld1r.2d {v4}, [x2], x3
[all …]
Darm64-memory.s9 ldr x4, [x3]
16 ldrb w4, [x3]
18 ldrb w2, [x3, _foo@pageoff]
20 ldrsb w9, [x3]
44 ; CHECK: ldr x4, [x3] ; encoding: [0x64,0x00,0x40,0xf9]
51 ; CHECK: ldrb w4, [x3] ; encoding: [0x64,0x00,0x40,0x39]
53 ; CHECK: ldrb w2, [x3, _foo@PAGEOFF] ; encoding: [0x62,0bAAAAAA00,0b01AAAAAA,0x39]
55 ; CHECK: ldrsb w9, [x3] ; encoding: [0x69,0x00,0xc0,0x39]
81 str x4, [x3]
89 strb w4, [x3]
[all …]
Darm64-arithmetic-encoding.s9 adc x1, x2, x3
11 adcs x5, x4, x3
14 ; CHECK: adc x1, x2, x3 ; encoding: [0x41,0x00,0x03,0x9a]
16 ; CHECK: adcs x5, x4, x3 ; encoding: [0x85,0x00,0x03,0xba]
19 sbc x1, x2, x3
21 sbcs x1, x2, x3
24 ; CHECK: sbc x1, x2, x3 ; encoding: [0x41,0x00,0x03,0xda]
26 ; CHECK: sbcs x1, x2, x3 ; encoding: [0x41,0x00,0x03,0xfa]
34 add x3, x4, #1024
35 add x3, x4, #1024, lsl #0
[all …]
/external/libhevc/common/arm64/
Dihevc_intra_pred_luma_horz.s126 st1 { v2.16b},[x2],x3 //store in 1st row 0-16 columns
127 st1 { v2.16b},[x9],x3 //store in 1st row 16-32 columns
130 st1 { v4.16b},[x2],x3
131 st1 { v4.16b},[x9],x3
134 st1 { v6.16b},[x2],x3
135 st1 { v6.16b},[x9],x3
138 st1 { v1.16b},[x2],x3
139 st1 { v1.16b},[x9],x3
142 st1 { v2.16b},[x2],x3
143 st1 { v2.16b},[x9],x3
[all …]
Dihevc_intra_pred_chroma_horz.s126 st1 { v2.8h},[x2],x3 //store in 1st row 0-16 columns
127 st1 { v2.8h},[x9],x3 //store in 1st row 16-32 columns
130 st1 { v4.8h},[x2],x3
131 st1 { v4.8h},[x9],x3
134 st1 { v6.8h},[x2],x3
135 st1 { v6.8h},[x9],x3
138 st1 { v1.8h},[x2],x3
139 st1 { v1.8h},[x9],x3
142 st1 { v2.8h},[x2],x3
143 st1 { v2.8h},[x9],x3
[all …]
Dihevc_intra_pred_luma_mode_18_34.s159 st1 {v0.8b},[x10],x3
160 st1 {v1.8b},[x10],x3
162 st1 {v2.8b},[x10],x3
165 st1 {v3.8b},[x10],x3
168 st1 {v4.8b},[x10],x3
170 st1 {v5.8b},[x10],x3
172 st1 {v6.8b},[x10],x3
174 st1 {v7.8b},[x10],x3
200 st1 {v0.8b},[x10],x3
201 st1 {v1.8b},[x10],x3
[all …]
Dihevc_inter_pred_filters_luma_vert_w16out.s81 mov x6,x3
87 mov x3,x16 //load ht
88 subs x7,x3,#0 //x3->ht
108 lsr x3, x5, #3 //divide by 8
109 mul x7, x7, x3 //multiply height by width
115 add x3,x0,x2 //pu1_src_tmp += src_strd//
117 ld1 {v1.8b},[x3],x2 //src_tmp2 = vld1_u8(pu1_src_tmp)//
120 ld1 {v2.8b},[x3],x2 //src_tmp3 = vld1_u8(pu1_src_tmp)//
122 ld1 {v3.8b},[x3],x2 //src_tmp4 = vld1_u8(pu1_src_tmp)//
124 ld1 {v4.8b},[x3],x2 //src_tmp1 = vld1_u8(pu1_src_tmp)//
[all …]
Dihevc_inter_pred_filters_luma_vert.s126 mov x6,x3
132 mov x3,x16 //load ht
133 subs x7,x3,#0 //x3->ht
152 lsr x3, x5, #3 //divide by 8
153 mul x7, x7, x3 //multiply height by width
159 add x3,x0,x2 //pu1_src_tmp += src_strd//
160 ld1 {v1.8b},[x3],x2 //src_tmp2 = vld1_u8(pu1_src_tmp)//
163 ld1 {v2.8b},[x3],x2 //src_tmp3 = vld1_u8(pu1_src_tmp)//
165 ld1 {v3.8b},[x3],x2 //src_tmp4 = vld1_u8(pu1_src_tmp)//
167 ld1 {v4.8b},[x3],x2 //src_tmp1 = vld1_u8(pu1_src_tmp)//
[all …]
Dihevc_deblk_chroma_vert.s76 add x2,x2,x3
86 adds x3,x7,x2,asr #1
95 cmp x3,#0x39
97 ldr w3, [x7,x3,lsl #2]
98 sxtw x3,w3
100 sub x20,x3,#6
101 csel x3, x20, x3,gt
122 add x3,x3,x5,lsl #1
126 add x6,x3,#2
130 csel x3, x20, x3,gt
[all …]
Dihevc_intra_pred_luma_dc.s216 sub x12, x3, x3, lsl #3 //-7*strd
222 sub x0, x3, x4 //strd - nt
256 st1 {v19.8b},[x2], x3 //store row 0 (prol)
268 st1 {v20.8b},[x2], x3 //store row 1 (prol)
277 st1 {v21.8b},[x2], x3 //store row 2 (prol)
285 st1 {v20.8b},[x2], x3 //store row 3 (prol)
293 st1 {v21.8b},[x2], x3 //store row 4 (prol)
301 st1 {v20.8b},[x2], x3 //store row 5 (prol)
312 st1 {v21.8b},[x2], x3 //store row 6 (prol)
331 st1 {v4.8b},[x2], x3 //store 2nd col (for 16x16)
[all …]
Dihevc_inter_pred_chroma_copy.s123 ADD x6,x1,x3 //pu1_dst_tmp += dst_strd
127 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0)
130 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0)
133 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0)
154 ADD x6,x1,x3 //pu1_dst_tmp += dst_strd
158 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0)
177 ADD x6,x1,x3 //pu1_dst_tmp += dst_strd
180 ST1 {v1.8b},[x6],x3 //vst1_u8(pu1_dst_tmp, tmp_src)
183 ST1 {v2.8b},[x6],x3 //vst1_u8(pu1_dst_tmp, tmp_src)
185 ST1 {v3.8b},[x6],x3 //vst1_u8(pu1_dst_tmp, tmp_src)
[all …]
/external/libmpeg2/common/armv8/
Dimpeg2_inter_pred.s114 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst
118 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst
120 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst
122 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst
124 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst
126 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst
128 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst
130 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst
132 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst
134 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst
[all …]
/external/libavc/common/armv8/
Dih264_intra_pred_luma_8x8_av8.s117 sxtw x3, w3
122 st1 {v0.8b}, [x1], x3
123 st1 {v0.8b}, [x1], x3
124 st1 {v0.8b}, [x1], x3
125 st1 {v0.8b}, [x1], x3
126 st1 {v0.8b}, [x1], x3
127 st1 {v0.8b}, [x1], x3
128 st1 {v0.8b}, [x1], x3
129 st1 {v0.8b}, [x1], x3
198 sxtw x3, w3
[all …]
Dih264_intra_pred_luma_16x16_av8.s111 sxtw x3, w3
117 st1 {v0.8b, v1.8b}, [x1], x3
118 st1 {v0.8b, v1.8b}, [x1], x3
119 st1 {v0.8b, v1.8b}, [x1], x3
120 st1 {v0.8b, v1.8b}, [x1], x3
121 st1 {v0.8b, v1.8b}, [x1], x3
122 st1 {v0.8b, v1.8b}, [x1], x3
123 st1 {v0.8b, v1.8b}, [x1], x3
124 st1 {v0.8b, v1.8b}, [x1], x3
125 st1 {v0.8b, v1.8b}, [x1], x3
[all …]
Dih264_intra_pred_luma_4x4_av8.s114 sxtw x3, w3
119 st1 {v0.s}[0], [x1], x3
120 st1 {v0.s}[0], [x1], x3
121 st1 {v0.s}[0], [x1], x3
122 st1 {v0.s}[0], [x1], x3
186 sxtw x3, w3
191 st1 {v0.s}[0], [x1], x3
193 st1 {v2.s}[0], [x1], x3
195 st1 {v3.s}[0], [x1], x3
196 st1 {v4.s}[0], [x1], x3
[all …]
Dih264_inter_pred_luma_copy_av8.s86 sxtw x3, w3
107 add x6, x1, x3 //pu1_dst_tmp += dst_strd
111 st1 {v0.s}[0], [x6], x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0)
114 st1 {v0.s}[0], [x6], x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0)
117 st1 {v0.s}[0], [x6], x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0)
144 add x6, x1, x3 //pu1_dst_tmp += dst_strd
147 st1 {v1.8b}, [x6], x3 //vst1_u8(pu1_dst_tmp, tmp_src)
150 st1 {v2.8b}, [x6], x3 //vst1_u8(pu1_dst_tmp, tmp_src)
152 st1 {v3.8b}, [x6], x3 //vst1_u8(pu1_dst_tmp, tmp_src)
176 add x6, x1, x3 //pu1_dst_tmp += dst_strd
[all …]
/external/compiler-rt/lib/builtins/
Dpopcountti2.c24 tu_int x3 = (tu_int)a; in __popcountti2() local
25 x3 = x3 - ((x3 >> 1) & (((tu_int)0x5555555555555555uLL << 64) | in __popcountti2()
28 x3 = ((x3 >> 2) & (((tu_int)0x3333333333333333uLL << 64) | 0x3333333333333333uLL)) in __popcountti2()
29 + (x3 & (((tu_int)0x3333333333333333uLL << 64) | 0x3333333333333333uLL)); in __popcountti2()
31 x3 = (x3 + (x3 >> 4)) in __popcountti2()
34 du_int x2 = (du_int)(x3 + (x3 >> 64)); in __popcountti2()
/external/llvm/test/MC/Disassembler/AArch64/
Darm64-logical.txt61 # CHECK: and x1, x2, x3
63 # CHECK: and x1, x2, x3, lsl #2
65 # CHECK: and x1, x2, x3, lsr #2
67 # CHECK: and x1, x2, x3, asr #2
69 # CHECK: and x1, x2, x3, ror #2
83 # CHECK: ands x1, x2, x3
85 # CHECK: ands x1, x2, x3, lsl #2
87 # CHECK: ands x1, x2, x3, lsr #2
89 # CHECK: ands x1, x2, x3, asr #2
91 # CHECK: ands x1, x2, x3, ror #2
[all …]
/external/llvm/test/Bitcode/
DbinaryFloatInstructions.3.2.ll8 define void @fadd(float %x1, double %x2 ,half %x3, fp128 %x4, x86_fp80 %x5, ppc_fp128 %x6){
16 ; CHECK-NEXT: %res3 = fadd half %x3, %x3
17 %res3 = fadd half %x3, %x3
31 define void @faddFloatVec(<2 x float> %x1, <3 x float> %x2 ,<4 x float> %x3, <8 x float> %x4, <16 x…
39 ; CHECK-NEXT: %res3 = fadd <4 x float> %x3, %x3
40 %res3 = fadd <4 x float> %x3, %x3
51 define void @faddDoubleVec(<2 x double> %x1, <3 x double> %x2 ,<4 x double> %x3, <8 x double> %x4, …
59 ; CHECK-NEXT: %res3 = fadd <4 x double> %x3, %x3
60 %res3 = fadd <4 x double> %x3, %x3
71 define void @faddHalfVec(<2 x half> %x1, <3 x half> %x2 ,<4 x half> %x3, <8 x half> %x4, <16 x half…
[all …]
/external/libopus/celt/
Dcelt.c101 opus_val32 x0, x1, x2, x3, x4; in comb_filter_const_c() local
104 x3 = SHL32(x[-T-1], 1); in comb_filter_const_c()
112 t = MAC16_32_Q16(t, g11, ADD32(x1,x3)); in comb_filter_const_c()
118 t = MAC16_32_Q16(t, g12, ADD32(x4,x3)); in comb_filter_const_c()
120 x3=SHL32(x[i-T+4],1); in comb_filter_const_c()
123 t = MAC16_32_Q16(t, g12, ADD32(x3,x2)); in comb_filter_const_c()
127 t = MAC16_32_Q16(t, g11, ADD32(x3,x0)); in comb_filter_const_c()
131 t = MAC16_32_Q16(x[i+4], g10, x3); in comb_filter_const_c()
142 t = MAC16_32_Q16(t, g11, ADD32(x1,x3)); in comb_filter_const_c()
145 x4=x3; in comb_filter_const_c()
[all …]

12345678910>>...31