Home
last modified time | relevance | path

Searched refs:q4 (Results 1 – 25 of 74) sorted by relevance

123

/external/llvm/test/MC/ARM/
Dneon-bitwise-encoding.s65 veor q4, q7, q3
66 veor.8 q4, q7, q3
67 veor.16 q4, q7, q3
68 veor.32 q4, q7, q3
69 veor.64 q4, q7, q3
71 veor.i8 q4, q7, q3
72 veor.i16 q4, q7, q3
73 veor.i32 q4, q7, q3
74 veor.i64 q4, q7, q3
76 veor.s8 q4, q7, q3
[all …]
Dneon-shift-encoding.s116 vsra.s64 q4, q5, #63
122 vsra.s8 q4, #7
134 @ CHECK: vsra.s64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf2]
139 @ CHECK: vsra.s8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf2]
152 vsra.u64 q4, q5, #63
158 vsra.u8 q4, #7
170 @ CHECK: vsra.u64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf3]
175 @ CHECK: vsra.u8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf3]
188 vsri.64 q4, q5, #63
194 vsri.8 q4, #7
[all …]
Dneont2-shiftaccum-encoding.s12 vsra.s64 q8, q4, #64
20 vsra.u64 q4, q5, #25
30 vsra.s64 q4, #64
47 @ CHECK: vsra.s64 q8, q4, #64 @ encoding: [0xc0,0xef,0xd8,0x01]
55 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xa7,0xff,0xda,0x81]
64 @ CHECK: vsra.s64 q4, q4, #64 @ encoding: [0x80,0xef,0xd8,0x81]
85 vrsra.s32 q3, q4, #32
86 vrsra.s64 q4, q5, #64
103 vrsra.s32 q4, #32
120 @ CHECK: vrsra.s32 q3, q4, #32 @ encoding: [0xa0,0xef,0x58,0x63]
[all …]
Dneon-shiftaccum-encoding.s10 vsra.s64 q8, q4, #64
18 vsra.u64 q4, q5, #25
28 vsra.s64 q4, #64
45 @ CHECK: vsra.s64 q8, q4, #64 @ encoding: [0xd8,0x01,0xc0,0xf2]
53 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xda,0x81,0xa7,0xf3]
62 @ CHECK: vsra.s64 q4, q4, #64 @ encoding: [0xd8,0x81,0x80,0xf2]
82 vrsra.s32 q3, q4, #32
83 vrsra.s64 q4, q5, #64
100 vrsra.s32 q4, #32
117 @ CHECK: vrsra.s32 q3, q4, #32 @ encoding: [0x58,0x63,0xa0,0xf2]
[all …]
Dneon-minmax-encoding.s20 vmax.s16 q4, q5, q6
31 vmax.u16 q4, q5
50 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x4c,0x86,0x1a,0xf2]
60 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x4a,0x86,0x18,0xf3]
82 vmin.s16 q4, q5, q6
93 vmin.u16 q4, q5
112 @ CHECK: vmin.s16 q4, q5, q6 @ encoding: [0x5c,0x86,0x1a,0xf2]
122 @ CHECK: vmin.u16 q4, q4, q5 @ encoding: [0x5a,0x86,0x18,0xf3]
Dneont2-minmax-encoding.s22 vmax.s16 q4, q5, q6
33 vmax.u16 q4, q5
52 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x1a,0xef,0x4c,0x86]
62 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x18,0xff,0x4a,0x86]
84 vmin.s16 q4, q5, q6
95 vmin.u16 q4, q5
114 @ CHECK: vmin.s16 q4, q5, q6 @ encoding: [0x1a,0xef,0x5c,0x86]
124 @ CHECK: vmin.u16 q4, q4, q5 @ encoding: [0x18,0xff,0x5a,0x86]
Dvfp4.s18 @ ARM: vfma.f32 q2, q4, q0 @ encoding: [0x50,0x4c,0x08,0xf2]
19 @ THUMB: vfma.f32 q2, q4, q0 @ encoding: [0x08,0xef,0x50,0x4c]
20 vfma.f32 q2, q4, q0
44 @ ARM: vfms.f32 q2, q4, q0 @ encoding: [0x50,0x4c,0x28,0xf2]
45 @ THUMB: vfms.f32 q2, q4, q0 @ encoding: [0x28,0xef,0x50,0x4c]
46 vfms.f32 q2, q4, q0
Dneont2-pairwise-encoding.s21 vpaddl.s8 q4, q7
24 vpaddl.u8 q7, q4
34 @ CHECK: vpaddl.s8 q4, q7 @ encoding: [0xb0,0xff,0x4e,0x82]
37 @ CHECK: vpaddl.u8 q7, q4 @ encoding: [0xb0,0xff,0xc8,0xe2]
48 vpadal.s8 q4, q10
61 @ CHECK: vpadal.s8 q4, q10 @ encoding: [0xb0,0xff,0x64,0x86]
Dneon-mul-encoding.s118 vmul.s32 q4, d3[1]
134 vmul.s32 q5, q4, d3[1]
135 vmul.u32 q4, q5, d4[0]
150 @ CHECK: vmul.i32 q4, q4, d3[1] @ encoding: [0x63,0x88,0xa8,0xf3]
166 @ CHECK: vmul.i32 q5, q4, d3[1] @ encoding: [0x63,0xa8,0xa8,0xf3]
167 @ CHECK: vmul.i32 q4, q5, d4[0] @ encoding: [0x44,0x88,0xaa,0xf3]
/external/libvpx/libvpx/vp8/common/arm/neon/
Didct_dequant_full_2x_neon.asm28 vld1.16 {q4, q5}, [r0] ; r q
46 vmul.i16 q4, q4, q0
52 ; q4: l4r4 q5: l12r12
61 vqdmulh.s16 q6, q4, d0[2] ; sinpi8sqrt2
63 vqdmulh.s16 q8, q4, d0[0] ; cospi8sqrt2minus1
80 ; q4: 4 + 4 * cospi : d1/temp1
82 vqadd.s16 q4, q4, q8
88 vqadd.s16 q3, q4, q7
94 vqadd.s16 q4, q10, q3
100 vtrn.32 q4, q6
[all …]
Ddequant_idct_neon.asm26 vld1.16 {q3, q4}, [r0]
40 vmul.i16 q2, q4, q6
47 vqdmulh.s16 q4, q2, d0[0]
53 vshr.s16 q4, q4, #1
56 vqadd.s16 q4, q4, q2
76 vqdmulh.s16 q4, q2, d0[0]
84 vshr.s16 q4, q4, #1
87 vqadd.s16 q4, q4, q2
Dsixtappredict4x4_neon.asm62 vld1.u8 {q4}, [r0], r1
88 vmov q4, q3 ;keep original src data in q4 q6
93 vshr.u64 q9, q4, #8 ;construct src_ptr[-1]
100 vshr.u64 q3, q4, #32 ;construct src_ptr[2]
107 vshr.u64 q9, q4, #16 ;construct src_ptr[0]
114 vshr.u64 q3, q4, #24 ;construct src_ptr[1]
125 vld1.u8 {q4}, [r0], r1
154 vmov q4, q3 ;keep original src data in q4 q6
159 vshr.u64 q9, q4, #8 ;construct src_ptr[-1]
168 vshr.u64 q3, q4, #32 ;construct src_ptr[2]
[all …]
Dmbloopfilter_neon.asm38 vld1.u8 {q4}, [r12@128], r1 ; p2
51 vst1.u8 {q4}, [r12@128],r1 ; store op2
157 vtrn.32 q4, q8
162 vtrn.16 q4, q6
166 vtrn.8 q3, q4
179 vtrn.32 q4, q8
184 vtrn.16 q4, q6
188 vtrn.8 q3, q4
252 vtrn.32 q4, q8
257 vtrn.16 q4, q6
[all …]
Dvp8_subpixelvariance16x16s_neon.asm57 vext.8 q5, q4, q5, #1
63 vrhadd.u8 q2, q4, q5
66 vsubl.u8 q4, d0, d22 ;diff
75 vpadal.s16 q8, q4 ;sum
147 vld1.u8 {q4}, [r0], r1
155 vrhadd.u8 q2, q2, q4
156 vrhadd.u8 q4, q4, q6
255 vext.8 q5, q4, q5, #1
260 vrhadd.u8 q2, q4, q5
262 vrhadd.u8 q4, q8, q9
[all …]
Dsixtappredict8x4_neon.asm69 vld1.u8 {q4}, [r0], r1
130 vmull.u8 q4, d29, d3
135 vqadd.s16 q8, q4
146 vld1.u8 {q4}, [r0], r1
217 vmull.u8 q4, d28, d3
223 vqadd.s16 q9, q4
255 vmull.u8 q4, d23, d0
260 vmlsl.u8 q4, d24, d1
265 vmlsl.u8 q4, d27, d4
270 vmlal.u8 q4, d25, d2
[all …]
Dsixtappredict8x8_neon.asm72 vld1.u8 {q4}, [r0], r1
134 vmull.u8 q4, d29, d3
141 vqadd.s16 q8, q4
153 vld1.u8 {q4}, [r0], r1
164 ;vld1.u8 {q4}, [r0], r1
230 vmull.u8 q4, d28, d3
236 vqadd.s16 q9, q4
273 vmull.u8 q4, d19, d0
278 vmlsl.u8 q4, d20, d1
283 vmlsl.u8 q4, d23, d4
[all …]
Dloopfilter_neon.asm37 vld1.u8 {q4}, [r12@128], r1 ; p2
150 vtrn.32 q4, q8
157 vtrn.16 q4, q6
161 vtrn.8 q3, q4
240 vtrn.32 q4, q8
247 vtrn.16 q4, q6
251 vtrn.8 q3, q4
296 ; q4 p2
306 vabd.u8 q11, q3, q4 ; abs(p3 - p2)
307 vabd.u8 q12, q4, q5 ; abs(p2 - p1)
[all …]
Dbuildintrapredictorsmby_neon.asm62 vpaddl.u32 q4, q3
244 vmull.u8 q4, d16, d0
247 vsub.s16 q4, q4, q7
264 vqadd.s16 q8, q0, q4
267 vqadd.s16 q10, q1, q4
270 vqadd.s16 q12, q2, q4
273 vqadd.s16 q14, q3, q4
345 vpaddl.u32 q4, q3
527 vmull.u8 q4, d16, d0
530 vsub.s16 q4, q4, q7
[all …]
Dsad16_neon.asm28 vld1.8 {q4}, [r2], r3
50 vld1.8 {q4}, [r2], r3
75 vld1.8 {q4}, [r2], r3
100 vld1.8 {q4}, [r2], r3
147 vld1.8 {q4}, [r2], r3
168 vld1.8 {q4}, [r2], r3
Dsixtappredict16x16_neon.asm168 vmull.u8 q4, d28, d3 ;(src_ptr[1] * vp8_filter[3])
172 vqadd.s16 q8, q4 ;sum of all (src_data*filter_parameters)
233 vmull.u8 q4, d19, d0
238 vmlsl.u8 q4, d20, d1
243 vmlsl.u8 q4, d23, d4
248 vmlal.u8 q4, d21, d2
253 vmlal.u8 q4, d24, d5
265 vqadd.s16 q8, q4
383 vst1.u8 {q4}, [r4], r5
423 vmull.u8 q4, d19, d0
[all …]
Dloopfiltersimplehorizontaledge_neon.asm52 vqsub.s8 q4, q5, q8 ; q4: vp8_filter = vp8_signed_char_clamp(ps1-qs1)
66 vand q14, q4, q15 ; vp8_filter &= mask
71 vshr.s8 q4, q3, #3 ; Filter1 >>= 3
77 vqsub.s8 q10, q7, q4 ; u = vp8_signed_char_clamp(qs0 - Filter1)
Dvp8_subpixelvariance8x8_neon.asm47 vld1.u8 {q4}, [r0], r1
70 vld1.u8 {q4}, [r0], r1
116 vmull.u8 q4, d25, d0
125 vmlal.u8 q4, d26, d1
134 vqrshrn.u16 d25, q4, #7
170 vsubl.u8 q4, d22, d0 ;calculate diff
176 vpadal.s16 q8, q4 ;sum
Ddequantizeb_neon.asm25 vmul.i16 q4, q0, q2
28 vst1.16 {q4, q5}, [r2]
/external/valgrind/main/none/tests/arm/
Dneon128.c439 TESTINSN_bin("vand q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
445 TESTINSN_bin("vbic q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
452 TESTINSN_bin("vorr q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff); in main()
458 TESTINSN_bin("vorn q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff); in main()
463 TESTINSN_bin("veor q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
468 TESTINSN_bin("veor q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff); in main()
473 TESTINSN_bin("vbsl q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
478 TESTINSN_bin("vbsl q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff); in main()
483 TESTINSN_bin("vbit q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
488 TESTINSN_bin("vbit q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff); in main()
[all …]
/external/libvpx/libvpx/vp8/encoder/arm/neon/
Dfastquantizeb_neon.asm27 vstmdb sp!, {q4-q7}
37 vabs.s16 q4, q0 ; calculate x = abs(z)
49 vadd.s16 q4, q6 ; x + Round
54 vqdmulh.s16 q4, q8 ; y = ((Round+abs(z)) * Quant) >> 16
63 veor.s16 q4, q2 ; y^sz
74 vshr.s16 q4, #1 ; right shift 1 after vqdmulh
79 vsub.s16 q4, q2 ; x1=(y^sz)-sz = (y^sz)-(-1) (2's complement)
98 vmul.s16 q2, q6, q4 ; x * Dequant
112 vtst.16 q14, q4, q8 ; now find eob
149 vldmia sp!, {q4-q7}

123