Home
last modified time | relevance | path

Searched refs:d9 (Results 1 – 25 of 293) sorted by relevance

12345678910>>...12

/external/llvm/test/MC/ARM/
Dneon-shuffle-encoding.s81 vtrn.8 d3, d9
82 vtrn.i8 d3, d9
83 vtrn.u8 d3, d9
84 vtrn.p8 d3, d9
85 vtrn.16 d3, d9
86 vtrn.i16 d3, d9
87 vtrn.u16 d3, d9
88 vtrn.p16 d3, d9
89 vtrn.32 d3, d9
90 vtrn.i32 d3, d9
[all …]
Dvpush-vpop.s6 vpush {d8, d9, d10, d11, d12}
8 vpop {d8, d9, d10, d11, d12}
11 vpush.s8 {d8, d9, d10, d11, d12}
13 vpop.f32 {d8, d9, d10, d11, d12}
16 @ CHECK-THUMB: vpush {d8, d9, d10, d11, d12} @ encoding: [0x2d,0xed,0x0a,0x8b]
18 @ CHECK-THUMB: vpop {d8, d9, d10, d11, d12} @ encoding: [0xbd,0xec,0x0a,0x8b]
21 @ CHECK-ARM: vpush {d8, d9, d10, d11, d12} @ encoding: [0x0a,0x8b,0x2d,0xed]
23 @ CHECK-ARM: vpop {d8, d9, d10, d11, d12} @ encoding: [0x0a,0x8b,0xbd,0xec]
26 @ CHECK-THUMB: vpush {d8, d9, d10, d11, d12} @ encoding: [0x2d,0xed,0x0a,0x8b]
28 @ CHECK-THUMB: vpop {d8, d9, d10, d11, d12} @ encoding: [0xbd,0xec,0x0a,0x8b]
[all …]
Deh-directive-integrated-test.s40 .vsave {d8, d9, d10, d11, d12}
41 vpush {d8, d9, d10, d11, d12}
45 vpop {d8, d9, d10, d11, d12}
74 .vsave {d8, d9, d10, d11, d12}
75 vpush {d8, d9, d10, d11, d12}
79 vpop {d8, d9, d10, d11, d12}
Dsingle-precision-fp.s9 vnmul.f64 d8, d9, d10
19 @ CHECK-ERRORS-NEXT: vnmul.f64 d8, d9, d10
21 vmla.f64 d11, d10, d9
27 vfnma.f64 d7, d8, d9
30 @ CHECK-ERRORS-NEXT: vmla.f64 d11, d10, d9
42 @ CHECK-ERRORS-NEXT: vfnma.f64 d7, d8, d9
78 vcvt.f64.s32 d9, s8
87 vcvt.u32.f64 d9, d10, #4
93 @ CHECK-ERRORS-NEXT: vcvt.f64.s32 d9, s8
111 @ CHECK-ERRORS-NEXT: vcvt.u32.f64 d9, d10, #4
Dneont2-dup-encoding.s23 vdup.16 q9, d9[0]
29 vdup.16 q9, d9[1]
36 @ CHECK: vdup.16 q9, d9[0] @ encoding: [0xf2,0xff,0x49,0x2c]
42 @ CHECK: vdup.16 q9, d9[1] @ encoding: [0xf6,0xff,0x49,0x2c]
/external/libhevc/common/arm/
Dihevc_itrans_recon_32x32.s215 vld1.16 d9,[r0],r6
222 vmlal.s16 q12,d9,d0[3] @// y1 * cos1 + y3 * cos3(part of b0)
223 vmlal.s16 q13,d9,d2[1] @// y1 * cos3 - y3 * sin1(part of b1)
224 vmlal.s16 q14,d9,d3[3] @// y1 * sin3 - y3 * cos1(part of b2)
225 vmlal.s16 q15,d9,d5[1] @// y1 * sin1 - y3 * sin3(part of b3)
284 vld1.16 d9,[r0],r6
292 vmlal.s16 q12,d9,d2[3] @// y1 * cos1 + y3 * cos3(part of b0)
293 vmlsl.s16 q13,d9,d7[3] @// y1 * cos3 - y3 * sin1(part of b1)
294 vmlsl.s16 q14,d9,d2[1] @// y1 * sin3 - y3 * cos1(part of b2)
295 vmlsl.s16 q15,d9,d3[1] @// y1 * sin1 - y3 * sin3(part of b3)
[all …]
Dihevc_itrans_recon_16x16.s233 vld1.16 d9,[r9],r8
276 vmlal.s16 q12,d9,d1[3]
277 vmlsl.s16 q13,d9,d2[3]
278 vmlsl.s16 q14,d9,d0[3]
279 vmlal.s16 q15,d9,d3[3]
313 vld1.16 d9,[r9],r5
336 vmlal.s16 q12,d9,d3[3]
337 vmlsl.s16 q13,d9,d3[1]
338 vmlal.s16 q14,d9,d2[3]
339 vmlsl.s16 q15,d9,d2[1]
[all …]
Dihevc_intra_pred_luma_mode_3_to_9.s204 vsub.s8 d9, d8, d2 @ref_main_idx + 1 (row 0)
208 vtbl.8 d13, {d0,d1}, d9 @load from ref_main_idx + 1 (row 0)
210 vsub.s8 d5, d9, d2 @ref_main_idx + 1 (row 1)
218 vsub.s8 d9, d9, d3 @ref_main_idx + 1 (row 2)
226 vtbl.8 d15, {d0,d1}, d9 @load from ref_main_idx + 1 (row 2)
239 vsub.s8 d9, d9, d3 @ref_main_idx + 1 (row 4)
248 vtbl.8 d13, {d0,d1}, d9 @load from ref_main_idx + 1 (row 4)
261 vsub.s8 d9, d9, d3 @ref_main_idx + 1 (row 6)
270 vtbl.8 d15, {d0,d1}, d9 @load from ref_main_idx + 1 (row 6)
337 vsub.s8 d9, d8, d2 @ref_main_idx - 1
[all …]
Dihevc_intra_pred_filters_luma_mode_11_to_17.s314 vadd.s8 d9, d8, d2 @ref_main_idx + 1 (row 0)
318 vtbl.8 d13, {d0,d1}, d9 @load from ref_main_idx + 1 (row 0)
320 vadd.s8 d5, d9, d2 @ref_main_idx + 1 (row 1)
328 vadd.s8 d9, d9, d3 @ref_main_idx + 1 (row 2)
336 vtbl.8 d15, {d0,d1}, d9 @load from ref_main_idx + 1 (row 2)
349 vadd.s8 d9, d9, d3 @ref_main_idx + 1 (row 4)
358 vtbl.8 d13, {d0,d1}, d9 @load from ref_main_idx + 1 (row 4)
371 vadd.s8 d9, d9, d3 @ref_main_idx + 1 (row 6)
380 vtbl.8 d15, {d0,d1}, d9 @load from ref_main_idx + 1 (row 6)
442 vadd.s8 d9, d2, d8 @ref_main_idx + 1
[all …]
Dihevc_intra_pred_chroma_mode_3_to_9.s195 vmov.i8 d9, #22 @row 0 to 7
199 vadd.s8 d8, d8, d9 @to compensate the pu1_src idx incremented by 8
200 vsub.s8 d9, d8, d29 @ref_main_idx + 1 (row 0)
204 vtbl.8 d13, {d0,d1,d2,d3}, d9 @load from ref_main_idx + 1 (row 0)
206 vsub.s8 d5, d9, d29 @ref_main_idx + 1 (row 1)
216 vsub.s8 d9, d9, d29 @ref_main_idx + 1 (row 2)
224 vtbl.8 d15, {d0,d1,d2,d3}, d9 @load from ref_main_idx + 1 (row 2)
237 vsub.s8 d9, d9, d29 @ref_main_idx + 1 (row 4)
246 vtbl.8 d13, {d0,d1,d2,d3}, d9 @load from ref_main_idx + 1 (row 4)
259 vsub.s8 d9, d9, d29 @ref_main_idx + 1 (row 6)
[all …]
Dihevc_intra_pred_luma_planar.s187 vsub.s8 d9, d2, d8 @(1-8)[nt-1-col]
199 vmlal.u8 q6, d9, d20 @(1)(nt-1-col) * src[2nt-1-row]
213 vmlal.u8 q15, d9, d21 @(2)
230 vmlal.u8 q14, d9, d22 @(3)
247 vmlal.u8 q5, d9, d23 @(4)
264 vmlal.u8 q8, d9, d20 @(5)
280 vmlal.u8 q9, d9, d21 @(6)
297 vmlal.u8 q13, d9, d22 @(7)
313 vmlal.u8 q12, d9, d23 @(8)
339 vsub.s8 d9, d2, d8 @(1n)(1-8)[nt-1-col]
[all …]
Dihevc_intra_pred_chroma_planar.s172 vmov d9,d8
173 vzip.8 d8,d9
175 vsub.s8 d31, d2, d9
201 vmlal.u8 q14,d9,d1
221 vmlal.u8 q12,d9,d1
246 vmlal.u8 q10,d9,d1
268 vmlal.u8 q14,d9,d1
320 vmov d9,d8
321 vzip.8 d8,d9
323 vsub.s8 d31, d2, d9
[all …]
Dihevc_intra_pred_filters_chroma_mode_11_to_17.s313 vadd.s8 d9, d8, d29 @ref_main_idx + 1 (row 0)
317 vtbl.8 d13, {d0,d1,d2,d3}, d9 @load from ref_main_idx + 1 (row 0)
319 vadd.s8 d5, d9, d29 @ref_main_idx + 1 (row 1)
330 vadd.s8 d9, d9, d29 @ref_main_idx + 1 (row 2)
338 vtbl.8 d15, {d0,d1,d2,d3}, d9 @load from ref_main_idx + 1 (row 2)
351 vadd.s8 d9, d9, d29 @ref_main_idx + 1 (row 4)
360 vtbl.8 d13, {d0,d1,d2,d3}, d9 @load from ref_main_idx + 1 (row 4)
373 vadd.s8 d9, d9, d29 @ref_main_idx + 1 (row 6)
384 vtbl.8 d15, {d0,d1,d2,d3}, d9 @load from ref_main_idx + 1 (row 6)
452 vadd.s8 d9, d29, d8 @ref_main_idx + 1
[all …]
/external/llvm/test/CodeGen/ARM/
Dcfi-alignment.ll11 ; CHECK: vpush {d8, d9}
13 ; CHECK: .cfi_offset d9, -24
15 call void asm sideeffect "", "~{d8},~{d9},~{d11}"()
27 ; CHECK: vpush {d8, d9}
29 ; CHECK: .cfi_offset d9, -40
31 call void asm sideeffect "", "~{d8},~{d9},~{d11}"()
40 ; CHECK: vpush {d8, d9}
42 call void asm sideeffect "", "~{d8},~{d9}"()
Dv7k-abi-align.ll35 ; CHECK: vpush {d8, d9}
36 ; CHECK: .cfi_offset d9, -24
41 ; CHECK: vpop {d8, d9}
45 call void asm sideeffect "", "~{r6},~{d8},~{d9}"()
53 ; adjustment needs to be performed to put d8 and d9 where they should be.
60 ; CHECK: vpush {d8, d9}
61 ; CHECK: .cfi_offset d9, -40
66 ; CHECK: vpop {d8, d9}
71 call void asm sideeffect "", "~{r4},~{r5},~{r6},~{r7},~{r8},~{d8},~{d9}"()
83 ; CHECK: vpush {d8, d9}
[all …]
Dvfp-regs-dwarf.ll5 ; asm("" ::: "d8", "d9", "d11", "d13");
17 ; CHECK: vpush {d8, d9}
21 ; CHECK: .cfi_offset {{265|d9}}, -24
24 ; CHECK: vpop {d8, d9}
27 call void asm sideeffect "", "~{d8},~{d9},~{d11},~{d13}"() #1
/external/swiftshader/third_party/LLVM/test/MC/ARM/
Dvpush-vpop.s6 vpush {d8, d9, d10, d11, d12}
8 vpop {d8, d9, d10, d11, d12}
11 @ CHECK-THUMB: vpush {d8, d9, d10, d11, d12} @ encoding: [0x2d,0xed,0x0a,0x8b]
13 @ CHECK-THUMB: vpop {d8, d9, d10, d11, d12} @ encoding: [0xbd,0xec,0x0a,0x8b]
16 @ CHECK-ARM: vpush {d8, d9, d10, d11, d12} @ encoding: [0x0a,0x8b,0x2d,0xed]
18 @ CHECK-ARM: vpop {d8, d9, d10, d11, d12} @ encoding: [0x0a,0x8b,0xbd,0xec]
Dneont2-dup-encoding.s23 vdup.16 q9, d9[0]
29 vdup.16 q9, d9[1]
36 @ CHECK: vdup.16 q9, d9[0] @ encoding: [0xf2,0xff,0x49,0x2c]
42 @ CHECK: vdup.16 q9, d9[1] @ encoding: [0xf6,0xff,0x49,0x2c]
/external/llvm/test/CodeGen/Thumb2/
Daligned-spill.ll17 …tail call void asm sideeffect "", "~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15}"() nounwi…
29 ; NEON: vst1.64 {d8, d9, d10, d11}, [r4:128]!
39 ; NEON: vld1.64 {d8, d9, d10, d11}, [r[[R4]]:128]!
50 tail call void asm sideeffect "", "~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14}"() nounwind
60 ; NEON: vst1.64 {d8, d9, d10, d11}, [r4:128]!
64 ; NEON: vld1.64 {d8, d9, d10, d11},
74 tail call void asm sideeffect "", "~{d8},~{d9},~{d10},~{d12},~{d13},~{d14},~{d15}"() nounwind
87 ; NEON: vst1.64 {d8, d9}, [r4:128]
90 ; NEON: vld1.64 {d8, d9},
/external/llvm/test/MC/Disassembler/ARM/
Dneont2.txt1659 # CHECK: vst1.8 {d8, d9}, [r4]!
1661 # CHECK: vst1.16 {d8, d9}, [r4]!
1663 # CHECK: vst1.32 {d8, d9}, [r4]!
1665 # CHECK: vst1.64 {d8, d9}, [r4]!
1667 # CHECK: vst1.8 {d8, d9}, [r4], r6
1669 # CHECK: vst1.16 {d8, d9}, [r4], r6
1671 # CHECK: vst1.32 {d8, d9}, [r4], r6
1673 # CHECK: vst1.64 {d8, d9}, [r4], r6
1676 # CHECK: vst1.8 {d8, d9, d10}, [r4]!
1678 # CHECK: vst1.16 {d8, d9, d10}, [r4]!
[all …]
Dneon.txt1953 # CHECK: vst1.8 {d8, d9}, [r4]!
1955 # CHECK: vst1.16 {d8, d9}, [r4]!
1957 # CHECK: vst1.32 {d8, d9}, [r4]!
1959 # CHECK: vst1.64 {d8, d9}, [r4]!
1961 # CHECK: vst1.8 {d8, d9}, [r4], r6
1963 # CHECK: vst1.16 {d8, d9}, [r4], r6
1965 # CHECK: vst1.32 {d8, d9}, [r4], r6
1967 # CHECK: vst1.64 {d8, d9}, [r4], r6
1970 # CHECK: vst1.8 {d8, d9, d10}, [r4]!
1972 # CHECK: vst1.16 {d8, d9, d10}, [r4]!
[all …]
/external/llvm/test/CodeGen/AArch64/
Darm64-register-pairing.ll11 ; CHECK: stp d9, d8, [sp, #48]
23 ; CHECK: ldp d9, d8, [sp, #48]
49 ; CHECK: stp d9, d8, [sp, #48]
61 ; CHECK: ldp d9, d8, [sp, #48]
68 ; CHECK-NOTMACHO: stp d11, d9, [sp, #16]
76 ; CHECK-NOTMACHO: ldp d11, d9, [sp, #16]
78 …call void asm sideeffect "mov x0, #42", "~{x0},~{x20},~{x22},~{x24},~{x26},~{x28},~{d9},~{d11},~{d…
/external/libavc/encoder/arm/
Dime_distortion_metrics_a9q.s101 vld1.8 {d8, d9}, [r0], r2
110 vabal.u8 q1, d11, d9
113 vld1.8 {d8, d9}, [r0], r2
121 vabal.u8 q1, d11, d9
184 vld1.8 {d8, d9}, [r0], r2
193 vabal.u8 q1, d11, d9
196 vld1.8 {d8, d9}, [r0], r2
204 vabal.u8 q1, d11, d9
268 vld1.8 {d8, d9}, [r0], r2
279 vabal.u8 q1, d11, d9
[all …]
Dih264e_evaluate_intra16x16_modes_a9q.s123 vaddl.u8 q15, d8, d9
242 vdup.8 q5, d9[7] @0
244 vdup.8 q6, d9[6] @1
246 vdup.8 q7, d9[5] @2
248 vdup.8 q8, d9[4] @3
250 vdup.8 q9, d9[3] @4
252 vdup.8 q10, d9[2] @5
254 vdup.8 q11, d9[1] @6
256 vdup.8 q12, d9[0] @7
/external/llvm/test/CodeGen/X86/
Dfp128-calling-conv.ll9 …, fp128 %d3, fp128 %d4, fp128 %d5, fp128 %d6, fp128 %d7, fp128 %d8, fp128 %d9, fp128 %d10, fp128 %…
17 …, fp128 %d3, fp128 %d4, fp128 %d5, fp128 %d6, fp128 %d7, fp128 %d8, fp128 %d9, fp128 %d10, fp128 %…
25 …, fp128 %d3, fp128 %d4, fp128 %d5, fp128 %d6, fp128 %d7, fp128 %d8, fp128 %d9, fp128 %d10, fp128 %…
33 …, fp128 %d3, fp128 %d4, fp128 %d5, fp128 %d6, fp128 %d7, fp128 %d8, fp128 %d9, fp128 %d10, fp128 %…
41 …, fp128 %d3, fp128 %d4, fp128 %d5, fp128 %d6, fp128 %d7, fp128 %d8, fp128 %d9, fp128 %d10, fp128 %…
43 ret fp128 %d9

12345678910>>...12