Home
last modified time | relevance | path

Searched refs:vaddw (Results 1 – 25 of 91) sorted by relevance

1234

/external/libhevc/decoder/arm/
Dihevcd_itrans_recon_dc_luma.s106 vaddw.u8 q15,q0,d2
107 vaddw.u8 q14,q0,d3
108 vaddw.u8 q13,q0,d4
109 vaddw.u8 q12,q0,d5
110 vaddw.u8 q11,q0,d6
111 vaddw.u8 q10,q0,d7
112 vaddw.u8 q9,q0,d8
113 vaddw.u8 q8,q0,d9
165 vaddw.u8 q15,q0,d2
166 vaddw.u8 q14,q0,d3
[all …]
Dihevcd_itrans_recon_dc_chroma.s105 vaddw.u8 q15,q0,d2
106 vaddw.u8 q14,q0,d4
107 vaddw.u8 q13,q0,d6
108 vaddw.u8 q12,q0,d8
109 vaddw.u8 q11,q0,d10
110 vaddw.u8 q10,q0,d12
111 vaddw.u8 q9,q0,d14
120 vaddw.u8 q15,q0,d16
167 vaddw.u8 q15,q0,d2
168 vaddw.u8 q14,q0,d4
[all …]
/external/capstone/suite/MC/ARM/
Dneon-add-encoding.s.cs14 0xa2,0x01,0xc0,0xf2 = vaddw.s8 q8, q8, d18
15 0xa2,0x01,0xd0,0xf2 = vaddw.s16 q8, q8, d18
16 0xa2,0x01,0xe0,0xf2 = vaddw.s32 q8, q8, d18
17 0xa2,0x01,0xc0,0xf3 = vaddw.u8 q8, q8, d18
18 0xa2,0x01,0xd0,0xf3 = vaddw.u16 q8, q8, d18
19 0xa2,0x01,0xe0,0xf3 = vaddw.u32 q8, q8, d18
114 0x05,0xc1,0x8c,0xf2 = vaddw.s8 q6, q6, d5
115 0x01,0xe1,0x9e,0xf2 = vaddw.s16 q7, q7, d1
116 0x82,0x01,0xe0,0xf2 = vaddw.s32 q8, q8, d2
117 0x05,0xc1,0x8c,0xf3 = vaddw.u8 q6, q6, d5
[all …]
Dneont2-add-encoding.s.cs14 0xc0,0xef,0xa2,0x01 = vaddw.s8 q8, q8, d18
15 0xd0,0xef,0xa2,0x01 = vaddw.s16 q8, q8, d18
16 0xe0,0xef,0xa2,0x01 = vaddw.s32 q8, q8, d18
17 0xc0,0xff,0xa2,0x01 = vaddw.u8 q8, q8, d18
18 0xd0,0xff,0xa2,0x01 = vaddw.u16 q8, q8, d18
19 0xe0,0xff,0xa2,0x01 = vaddw.u32 q8, q8, d18
/external/llvm/test/MC/ARM/
Dneon-add-encoding.s30 @ CHECK: vaddw.s8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf2]
31 vaddw.s8 q8, q8, d18
32 @ CHECK: vaddw.s16 q8, q8, d18 @ encoding: [0xa2,0x01,0xd0,0xf2]
33 vaddw.s16 q8, q8, d18
34 @ CHECK: vaddw.s32 q8, q8, d18 @ encoding: [0xa2,0x01,0xe0,0xf2]
35 vaddw.s32 q8, q8, d18
36 @ CHECK: vaddw.u8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf3]
37 vaddw.u8 q8, q8, d18
38 @ CHECK: vaddw.u16 q8, q8, d18 @ encoding: [0xa2,0x01,0xd0,0xf3]
39 vaddw.u16 q8, q8, d18
[all …]
Dneont2-add-encoding.s31 @ CHECK: vaddw.s8 q8, q8, d18 @ encoding: [0xc0,0xef,0xa2,0x01]
32 vaddw.s8 q8, q8, d18
33 @ CHECK: vaddw.s16 q8, q8, d18 @ encoding: [0xd0,0xef,0xa2,0x01]
34 vaddw.s16 q8, q8, d18
35 @ CHECK: vaddw.s32 q8, q8, d18 @ encoding: [0xe0,0xef,0xa2,0x01]
36 vaddw.s32 q8, q8, d18
37 @ CHECK: vaddw.u8 q8, q8, d18 @ encoding: [0xc0,0xff,0xa2,0x01]
38 vaddw.u8 q8, q8, d18
39 @ CHECK: vaddw.u16 q8, q8, d18 @ encoding: [0xd0,0xff,0xa2,0x01]
40 vaddw.u16 q8, q8, d18
[all …]
/external/llvm-project/llvm/test/MC/ARM/
Dneon-add-encoding.s30 @ CHECK: vaddw.s8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf2]
31 vaddw.s8 q8, q8, d18
32 @ CHECK: vaddw.s16 q8, q8, d18 @ encoding: [0xa2,0x01,0xd0,0xf2]
33 vaddw.s16 q8, q8, d18
34 @ CHECK: vaddw.s32 q8, q8, d18 @ encoding: [0xa2,0x01,0xe0,0xf2]
35 vaddw.s32 q8, q8, d18
36 @ CHECK: vaddw.u8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf3]
37 vaddw.u8 q8, q8, d18
38 @ CHECK: vaddw.u16 q8, q8, d18 @ encoding: [0xa2,0x01,0xd0,0xf3]
39 vaddw.u16 q8, q8, d18
[all …]
Dneont2-add-encoding.s31 @ CHECK: vaddw.s8 q8, q8, d18 @ encoding: [0xc0,0xef,0xa2,0x01]
32 vaddw.s8 q8, q8, d18
33 @ CHECK: vaddw.s16 q8, q8, d18 @ encoding: [0xd0,0xef,0xa2,0x01]
34 vaddw.s16 q8, q8, d18
35 @ CHECK: vaddw.s32 q8, q8, d18 @ encoding: [0xe0,0xef,0xa2,0x01]
36 vaddw.s32 q8, q8, d18
37 @ CHECK: vaddw.u8 q8, q8, d18 @ encoding: [0xc0,0xff,0xa2,0x01]
38 vaddw.u8 q8, q8, d18
39 @ CHECK: vaddw.u16 q8, q8, d18 @ encoding: [0xd0,0xff,0xa2,0x01]
40 vaddw.u16 q8, q8, d18
[all …]
/external/llvm-project/llvm/test/CodeGen/Hexagon/
Dreg-scavengebug.ll51 %v21 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v18, <16 x i32> %v19)
55 %v25 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v14, <16 x i32> %v22)
56 %v26 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v25, <16 x i32> %v23)
57 %v27 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v26, <16 x i32> %v24)
59 %v29 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v27, <16 x i32> %v11)
60 %v30 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v27, <16 x i32> %v28)
72 %v41 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> undef, <16 x i32> %v39)
76 %v45 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v21, <16 x i32> %v42)
77 %v46 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v45, <16 x i32> %v43)
78 %v47 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v46, <16 x i32> %v44)
[all …]
Ddisable-const64-tinycore.ll46 %23 = tail call i64 @llvm.hexagon.A2.vaddw(i64 undef, i64 undef)
51 …%28 = tail call i64 @llvm.hexagon.A2.vaddw(i64 %26, i64 %_HEXAGON_V64_internal_union79.sroa.0.0.in…
55 %32 = tail call i64 @llvm.hexagon.A2.vaddw(i64 undef, i64 %29)
56 %33 = tail call i64 @llvm.hexagon.A2.vaddw(i64 0, i64 %31)
57 %34 = tail call i64 @llvm.hexagon.A2.vaddw(i64 %22, i64 undef)
89 declare i64 @llvm.hexagon.A2.vaddw(i64, i64)
Dframe-offset-overflow.ll65 %21 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %17, <16 x i32> %19)
66 %22 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %15, <16 x i32> %19)
69 %25 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> undef, <16 x i32> %20)
83 %35 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %31, <16 x i32> %33)
84 %36 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %29, <16 x i32> %33)
87 %39 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %30, <16 x i32> %34)
106 %51 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> zeroinitializer, <16 x i32> undef)
107 %52 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %49, <16 x i32> %50)
108 %53 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> undef, <16 x i32> %50)
134 %73 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %67, <16 x i32> %71)
[all …]
Dvadd1.ll15 %v2 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v0, <16 x i32> %v1)
21 declare <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32>, <16 x i32>) #1
Dv6vec-vmemu2.ll18 %v4 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v1, <16 x i32> %v3)
25 declare <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32>, <16 x i32>) #1
Dprob-types.ll41 declare <32 x i32> @llvm.hexagon.V6.vaddw.128B(<32 x i32>, <32 x i32>) #0
77 %v28 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.128B(<32 x i32> %v22, <32 x i32> %v27) #2
79 %v30 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.128B(<32 x i32> %v22, <32 x i32> %v29) #2
81 %v32 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.128B(<32 x i32> %v22, <32 x i32> %v31) #2
83 %v34 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.128B(<32 x i32> %v22, <32 x i32> %v33) #2
Dvec-align.ll29 %v8 = call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v6, <16 x i32> %v7)
41 declare <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32>, <16 x i32>) #1
Dhexagon_vector_loop_carried_reuse_invalid.ll15 %1 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> undef, <64 x i32> undef)
24 declare <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32>, <64 x i32>) #1
/external/libavc/common/arm/
Dih264_weighted_pred_a9q.s151 vaddw.s8 q2, q2, d3 @adding offset for rows 1,2
152 vaddw.s8 q3, q3, d3 @adding offset for rows 3,4
185 vaddw.s8 q2, q2, d3 @adding offset for row 1
187 vaddw.s8 q3, q3, d3 @adding offset for row 2
189 vaddw.s8 q4, q4, d3 @adding offset for row 3
191 vaddw.s8 q5, q5, d3 @adding offset for row 4
234 vaddw.s8 q6, q6, d3 @adding offset for row 1L
236 vaddw.s8 q7, q7, d3 @adding offset for row 1H
239 vaddw.s8 q8, q8, d3 @adding offset for row 2L
242 vaddw.s8 q9, q9, d3 @adding offset for row 2H
[all …]
Dih264_iquant_itrans_recon_dc_a9.s154 vaddw.u8 q10, q0, d30
156 vaddw.u8 q11, q0, d31
266 vaddw.u8 q0, q8, d24
268 vaddw.u8 q1, q8, d25
270 vaddw.u8 q2, q8, d26
272 vaddw.u8 q3, q8, d27
274 vaddw.u8 q4, q8, d28
281 vaddw.u8 q5, q8, d29
283 vaddw.u8 q6, q8, d30
286 vaddw.u8 q7, q8, d31
[all …]
Dih264_iquant_itrans_recon_a9.s214 vaddw.u8 q10, q10, d30
215 vaddw.u8 q11, q11, d31
390 vaddw.u8 q10, q10, d28
391 vaddw.u8 q11, q11, d29
636 vaddw.s16 q10, q10, d10 @
637 vaddw.s16 q11, q11, d11 @
644 vaddw.s16 q10, q10, d18 @
645 vaddw.s16 q11, q11, d19 @
660 vaddw.s16 q12, q12, d2 @
661 vaddw.s16 q13, q13, d3 @
[all …]
/external/libmpeg2/common/arm/
Dimpeg2_idct.s157 vaddw.u8 q4, q15, d0
159 vaddw.u8 q5, q15, d1
162 vaddw.u8 q6, q15, d2
166 vaddw.u8 q7, q15, d3
170 vaddw.u8 q8, q15, d4
174 vaddw.u8 q9, q15, d5
178 vaddw.u8 q10, q15, d6
182 vaddw.u8 q11, q15, d7
235 vaddw.u8 q7, q6, d30
245 vaddw.u8 q7, q6, d30
[all …]
/external/libvpx/libvpx/vpx_dsp/arm/
Didct4x4_1_add_neon.asm52 vaddw.u8 q8, q0, d2 ; dest[x] + a1
53 vaddw.u8 q9, q0, d4
Dloopfilter_16_neon.asm470 vaddw.s8 q15, q15, d29 ; filter + 3 * (qs0 - ps0)
513 vaddw.u8 q15, d6 ; op2=p1 + p0 + q0 + p3 * 3 + p2 *2
570 vaddw.u8 q15, d1
591 vaddw.u8 q15, q14, d5 ; op2 += p2
593 vaddw.u8 q15, d12 ; op2 += q4
599 vaddw.u8 q15, d6 ; op1 += p1
600 vaddw.u8 q15, d13 ; op1 += q5
606 vaddw.u8 q15, d7 ; op0 += p0
607 vaddw.u8 q15, d14 ; op0 += q6
613 vaddw.u8 q15, d8 ; oq0 += q0
[all …]
/external/libvpx/config/arm-neon/vpx_dsp/arm/
Dloopfilter_16_neon.asm.S485 vaddw.s8 q15, q15, d29 @ filter + 3 * (qs0 - ps0)
528 vaddw.u8 q15, d6 @ op2=p1 + p0 + q0 + p3 * 3 + p2 *2
585 vaddw.u8 q15, d1
606 vaddw.u8 q15, q14, d5 @ op2 += p2
608 vaddw.u8 q15, d12 @ op2 += q4
614 vaddw.u8 q15, d6 @ op1 += p1
615 vaddw.u8 q15, d13 @ op1 += q5
621 vaddw.u8 q15, d7 @ op0 += p0
622 vaddw.u8 q15, d14 @ op0 += q6
628 vaddw.u8 q15, d8 @ oq0 += q0
[all …]
Didct4x4_1_add_neon.asm.S58 vaddw.u8 q8, q0, d2 @ dest[x] + a1
59 vaddw.u8 q9, q0, d4
/external/libhevc/common/arm/
Dihevc_deblk_luma_horz.s209 vaddw.u8 q4,q3,d27
230 vaddw.u8 q7,q4,d28
279 vaddw.u8 q4,q3,d26
297 vaddw.u8 q7,q4,d23
476 vaddw.s8 q2,q3,d8
501 vaddw.s8 q7,q7,d8
508 vaddw.s8 q8,q8,d14
539 vaddw.s8 q8,q8,d14

1234