Home
last modified time | relevance | path

Searched refs:q7 (Results 1 – 25 of 82) sorted by relevance

1234

/external/llvm/test/MC/ARM/
Dneon-bitwise-encoding.s65 veor q4, q7, q3
66 veor.8 q4, q7, q3
67 veor.16 q4, q7, q3
68 veor.32 q4, q7, q3
69 veor.64 q4, q7, q3
71 veor.i8 q4, q7, q3
72 veor.i16 q4, q7, q3
73 veor.i32 q4, q7, q3
74 veor.i64 q4, q7, q3
76 veor.s8 q4, q7, q3
[all …]
Dneont2-shiftaccum-encoding.s9 vsra.s8 q7, q2, #8
17 vsra.u8 q1, q7, #8
18 vsra.u16 q2, q7, #6
35 vsra.u8 q7, #8
36 vsra.u16 q7, #6
44 @ CHECK: vsra.s8 q7, q2, #8 @ encoding: [0x88,0xef,0x54,0xe1]
52 @ CHECK: vsra.u8 q1, q7, #8 @ encoding: [0x88,0xff,0x5e,0x21]
53 @ CHECK: vsra.u16 q2, q7, #6 @ encoding: [0x9a,0xff,0x5e,0x41]
69 @ CHECK: vsra.u8 q7, q7, #8 @ encoding: [0x88,0xff,0x5e,0xe1]
70 @ CHECK: vsra.u16 q7, q7, #6 @ encoding: [0x9a,0xff,0x5e,0xe1]
[all …]
Dneon-shiftaccum-encoding.s7 vsra.s8 q7, q2, #8
15 vsra.u8 q1, q7, #8
16 vsra.u16 q2, q7, #6
33 vsra.u8 q7, #8
34 vsra.u16 q7, #6
42 @ CHECK: vsra.s8 q7, q2, #8 @ encoding: [0x54,0xe1,0x88,0xf2]
50 @ CHECK: vsra.u8 q1, q7, #8 @ encoding: [0x5e,0x21,0x88,0xf3]
51 @ CHECK: vsra.u16 q2, q7, #6 @ encoding: [0x5e,0x41,0x9a,0xf3]
67 @ CHECK: vsra.u8 q7, q7, #8 @ encoding: [0x5e,0xe1,0x88,0xf3]
68 @ CHECK: vsra.u16 q7, q7, #6 @ encoding: [0x5e,0xe1,0x9a,0xf3]
[all …]
Dneont2-minmax-encoding.s23 vmax.s32 q7, q8, q9
26 vmax.u32 q6, q7, q8
34 vmax.u32 q7, q8
53 @ CHECK: vmax.s32 q7, q8, q9 @ encoding: [0x20,0xef,0xe2,0xe6]
56 @ CHECK: vmax.u32 q6, q7, q8 @ encoding: [0x2e,0xff,0x60,0xc6]
63 @ CHECK: vmax.u32 q7, q7, q8 @ encoding: [0x2e,0xff,0x60,0xe6]
85 vmin.s32 q7, q8, q9
88 vmin.u32 q6, q7, q8
96 vmin.u32 q7, q8
115 @ CHECK: vmin.s32 q7, q8, q9 @ encoding: [0x20,0xef,0xf2,0xe6]
[all …]
Dneon-minmax-encoding.s21 vmax.s32 q7, q8, q9
24 vmax.u32 q6, q7, q8
32 vmax.u32 q7, q8
51 @ CHECK: vmax.s32 q7, q8, q9 @ encoding: [0xe2,0xe6,0x20,0xf2]
54 @ CHECK: vmax.u32 q6, q7, q8 @ encoding: [0x60,0xc6,0x2e,0xf3]
61 @ CHECK: vmax.u32 q7, q7, q8 @ encoding: [0x60,0xe6,0x2e,0xf3]
83 vmin.s32 q7, q8, q9
86 vmin.u32 q6, q7, q8
94 vmin.u32 q7, q8
113 @ CHECK: vmin.s32 q7, q8, q9 @ encoding: [0xf2,0xe6,0x20,0xf2]
[all …]
Dneon-shift-encoding.s114 vsra.s16 q2, q7, #15
125 vsra.s64 q7, #63
132 @ CHECK: vsra.s16 q2, q7, #15 @ encoding: [0x5e,0x41,0x91,0xf2]
142 @ CHECK: vsra.s64 q7, q7, #63 @ encoding: [0xde,0xe1,0x81,0xf2]
150 vsra.u16 q2, q7, #15
161 vsra.u64 q7, #63
168 @ CHECK: vsra.u16 q2, q7, #15 @ encoding: [0x5e,0x41,0x91,0xf3]
178 @ CHECK: vsra.u64 q7, q7, #63 @ encoding: [0xde,0xe1,0x81,0xf3]
186 vsri.16 q2, q7, #15
197 vsri.64 q7, #63
[all …]
Dneon-sub-encoding.s22 vsub.i64 q4, q7
44 @ CHECK: vsub.i64 q4, q4, q7 @ encoding: [0x4e,0x88,0x38,0xf3]
147 vhsub.u32 q6, q7
160 @ CHECK: vhsub.u32 q6, q6, q7 @ encoding: [0x4e,0xc2,0x2c,0xf3]
164 vsubw.s16 q7, d1
167 vsubw.u16 q7, d1
171 @ CHECK: vsubw.s16 q7, q7, d1 @ encoding: [0x01,0xe3,0x9e,0xf2]
174 @ CHECK: vsubw.u16 q7, q7, d1 @ encoding: [0x01,0xe3,0x9e,0xf3]
Dneont2-pairwise-encoding.s21 vpaddl.s8 q4, q7
24 vpaddl.u8 q7, q4
34 @ CHECK: vpaddl.s8 q4, q7 @ encoding: [0xb0,0xff,0x4e,0x82]
37 @ CHECK: vpaddl.u8 q7, q4 @ encoding: [0xb0,0xff,0xc8,0xe2]
51 vpadal.u8 q7, q13
64 @ CHECK: vpadal.u8 q7, q13 @ encoding: [0xb0,0xff,0xea,0xe6]
/external/libvpx/libvpx/vp8/common/arm/neon/
Didct_dequant_full_2x_neon.asm58 ; q7: 12 * sinpi : d1/temp2
62 vqdmulh.s16 q7, q5, d0[2]
88 vqadd.s16 q3, q4, q7
97 vqsub.s16 q7, q10, q3
101 vtrn.32 q5, q7
103 vtrn.16 q6, q7
108 ; q7: l 3, 7,11,15 r 3, 7,11,15
115 vqdmulh.s16 q9, q7, d0[2]
117 vqdmulh.s16 q11, q7, d0[0]
129 vqadd.s16 q11, q7, q11
[all …]
Dsixtappredict4x4_neon.asm85 vmull.u8 q7, d18, d5 ;(src_ptr[3] * vp8_filter[5])
95 vmlal.u8 q7, d6, d0 ;+(src_ptr[-2] * vp8_filter[0])
102 vmlsl.u8 q7, d18, d1 ;-(src_ptr[-1] * vp8_filter[1])
109 vmlsl.u8 q7, d6, d4 ;-(src_ptr[2] * vp8_filter[4])
116 vmlal.u8 q7, d18, d2 ;(src_ptr[0] * vp8_filter[2])
127 vqadd.s16 q7, q9 ;sum of all (src_data*filter_parameters)
133 vqrshrun.s16 d27, q7, #7 ;shift/round/saturate to u8
150 vmull.u8 q7, d18, d5 ;(src_ptr[3] * vp8_filter[5])
162 vmlal.u8 q7, d6, d0 ;+(src_ptr[-2] * vp8_filter[0])
172 vmlsl.u8 q7, d18, d1 ;-(src_ptr[-1] * vp8_filter[1])
[all …]
Dsixtappredict8x4_neon.asm79 vmull.u8 q7, d6, d0 ;(src_ptr[-2] * vp8_filter[0])
89 vmlsl.u8 q7, d28, d1 ;-(src_ptr[-1] * vp8_filter[1])
99 vmlsl.u8 q7, d28, d4 ;-(src_ptr[2] * vp8_filter[4])
109 vmlal.u8 q7, d28, d2 ;(src_ptr[0] * vp8_filter[2])
119 vmlal.u8 q7, d28, d5 ;(src_ptr[3] * vp8_filter[5])
134 vqadd.s16 q7, q3 ;sum of all (src_data*filter_parameters)
141 vqrshrun.s16 d22, q7, #7 ;shift/round/saturate to u8
152 vld1.u8 {q7}, [r0], r1
220 vmull.u8 q7, d31, d3
226 vqadd.s16 q12, q7
[all …]
Dmbloopfilter_neon.asm41 vld1.u8 {q7}, [r0@128], r1 ; q0
54 vst1.u8 {q7}, [r0@128],r1 ; store oq0
156 vtrn.32 q3, q7
163 vtrn.16 q7, q9
168 vtrn.8 q7, q8
178 vtrn.32 q3, q7
185 vtrn.16 q7, q9
190 vtrn.8 q7, q8
251 vtrn.32 q3, q7
258 vtrn.16 q7, q9
[all …]
Dsixtappredict8x8_neon.asm83 vmull.u8 q7, d6, d0 ;(src_ptr[-2] * vp8_filter[0])
93 vmlsl.u8 q7, d28, d1 ;-(src_ptr[-1] * vp8_filter[1])
103 vmlsl.u8 q7, d28, d4 ;-(src_ptr[2] * vp8_filter[4])
113 vmlal.u8 q7, d28, d2 ;(src_ptr[0] * vp8_filter[2])
123 vmlal.u8 q7, d28, d5 ;(src_ptr[3] * vp8_filter[5])
140 vqadd.s16 q7, q3 ;sum of all (src_data*filter_parameters)
147 vqrshrun.s16 d22, q7, #7 ;shift/round/saturate to u8
167 vld1.u8 {q7}, [r0], r1
233 vmull.u8 q7, d31, d3
239 vqadd.s16 q12, q7
[all …]
Dvp8_subpixelvariance8x8_neon.asm50 vmull.u8 q7, d4, d0
60 vmlal.u8 q7, d5, d1
67 vqrshrn.u16 d23, q7, #7
77 vmull.u8 q7, d4, d0
89 vmlal.u8 q7, d5, d1
95 vqrshrn.u16 d27, q7, #7
119 vmull.u8 q7, d28, d0
128 vmlal.u8 q7, d29, d1
137 vqrshrn.u16 d28, q7, #7
180 vsubl.u8 q7, d25, d3
[all …]
Dbilinearpredict8x8_neon.asm47 vmull.u8 q7, d4, d0
57 vmlal.u8 q7, d5, d1
64 vqrshrn.u16 d23, q7, #7
74 vmull.u8 q7, d4, d0
86 vmlal.u8 q7, d5, d1
92 vqrshrn.u16 d27, q7, #7
117 vmull.u8 q7, d28, d0
126 vmlal.u8 q7, d29, d1
135 vqrshrn.u16 d8, q7, #7
Dloopfiltersimplehorizontaledge_neon.asm28 vld1.u8 {q7}, [r0@128], r1 ; q0
33 vabd.u8 q15, q6, q7 ; abs(p0 - q0)
42 veor q7, q7, q0 ; qs0: q0 offset to convert to a signed value
77 vqsub.s8 q10, q7, q4 ; u = vp8_signed_char_clamp(qs0 - Filter1)
80 veor q7, q10, q0 ; *oq0 = u^0x80
83 vst1.u8 {q7}, [r0@128] ; store oq0
Dvp8_subpixelvariance16x16s_neon.asm58 vext.8 q7, q6, q7, #1
64 vrhadd.u8 q3, q6, q7
69 vsubl.u8 q7, d3, d25
87 vpadal.s16 q8, q7
154 vld1.8 {q7}, [r2], r3
256 vext.8 q7, q6, q7, #1
261 vrhadd.u8 q3, q6, q7
268 vld1.8 {q7}, [r2], r3
383 vext.8 q7, q6, q7, #1
388 vrhadd.u8 q3, q6, q7
[all …]
Dloopfilter_neon.asm40 vld1.u8 {q7}, [r2@128], r1 ; q0
52 vst1.u8 {q7}, [r2@128], r1 ; store oq0
149 vtrn.32 q3, q7
158 vtrn.16 q7, q9
163 vtrn.8 q7, q8
239 vtrn.32 q3, q7
248 vtrn.16 q7, q9
253 vtrn.8 q7, q8
299 ; q7 q0
309 vabd.u8 q14, q8, q7 ; abs(q1 - q0)
[all …]
Dsixtappredict16x16_neon.asm177 vmull.u8 q7, d31, d3
183 vqadd.s16 q11, q7
206 vabs.s32 q7, q5
257 vmull.u8 q7, d21, d3 ;(src_ptr[1] * vp8_filter[3])
264 vqadd.s16 q7, q3 ;sum of all (src_data*filter_parameters)
269 vqrshrun.s16 d6, q7, #7 ;shift/round/saturate to u8
320 vmull.u8 q7, d7, d0
337 vmlsl.u8 q7, d22, d1 ;-(src_ptr[-1] * vp8_filter[1])
341 vmlsl.u8 q7, d26, d4 ;-(src_ptr[2] * vp8_filter[4])
358 vmlal.u8 q7, d20, d5 ;(src_ptr[3] * vp8_filter[5])
[all …]
Dsad16_neon.asm43 vld1.8 {q7}, [r2], r3
68 vld1.8 {q7}, [r2], r3
93 vld1.8 {q7}, [r2], r3
118 vld1.8 {q7}, [r2]
162 vld1.8 {q7}, [r2], r3
186 vld1.8 {q7}, [r2], r3
/external/libvpx/libvpx/vp8/encoder/arm/neon/
Dvp8_mse16x16_neon.asm30 vmov.i8 q7, #0 ;q7, q8, q9, q10 - sse
48 vmlal.s16 q7, d22, d22
55 vmlal.s16 q7, d26, d26
62 vadd.u32 q7, q7, q8
67 vadd.u32 q10, q7, q9
99 vmull.s16 q7, d22, d22
104 vadd.u32 q7, q7, q8
106 vadd.u32 q9, q7, q9
Dfastquantizeb_neon.asm27 vstmdb sp!, {q4-q7}
44 vld1.s16 {q6, q7}, [r6@128] ; load round_ptr [0-15]
50 vadd.s16 q5, q7
67 vadd.s16 q11, q7
77 vld1.s16 {q6, q7}, [r8@128] ;load dequant_ptr[i]
99 vmul.s16 q3, q7, q5
108 vmul.s16 q13, q7, q11
110 vld1.16 {q6, q7}, [r0@128] ; load inverse scan order
120 vand q1, q7, q15
130 vand q11, q7, q3
[all …]
/external/valgrind/main/none/tests/arm/
Dneon128.c359 TESTINSN_imm("vmov.i16 q7", q7, 0x700); in main()
373 TESTINSN_imm("vmvn.i16 q7", q7, 0x700); in main()
451 TESTINSN_bin("vorr q7, q3, q0", q7, q3, i8, 0x24, q0, i16, 0xff); in main()
457 TESTINSN_bin("vorn q7, q3, q0", q7, q3, i8, 0x24, q0, i16, 0xff); in main()
467 TESTINSN_bin("veor q7, q3, q0", q7, q3, i8, 0x24, q0, i16, 0xff); in main()
477 TESTINSN_bin("vbsl q7, q3, q0", q7, q3, i8, 0x24, q0, i16, 0xff); in main()
487 TESTINSN_bin("vbit q7, q3, q0", q7, q3, i8, 0x24, q0, i16, 0xff); in main()
497 TESTINSN_bin("vbif q7, q3, q0", q7, q3, i8, 0x24, q0, i16, 0xff); in main()
589 TESTINSN_bin("vrhadd.s8 q5, q7, q5", q5, q7, i32, (1 << 31) + 1, q5, i32, (1 << 31) + 2); in main()
592 TESTINSN_bin("vrhadd.s8 q5, q7, q5", q5, q7, i32, (1 << 31) + 1, q5, i32, (1 << 31) + 3); in main()
[all …]
/external/libvpx/libvpx/vp9/common/arm/neon/
Dvp9_short_idct32x32_add_neon.asm112 vrshr.s16 q7, q7, #6
117 vaddw.u8 q7, q7, d9
122 vqmovun.s16 d9, q7
146 vrshr.s16 q7, q7, #6
151 vaddw.u8 q7, q7, d9
156 vqmovun.s16 d9, q7
171 ; q4-q7 contain the results (out[j * 32 + 0-31])
182 vrshr.s16 q7, q7, #6
187 vaddw.u8 q7, q7, d7
192 vqmovun.s16 d7, q7
[all …]
/external/chromium_org/third_party/openssl/openssl/crypto/poly1305/
Dpoly1305_arm_asm.S157 vpush {q4,q5,q6,q7}
213 # asm 2: vmov.i64 >u4=q7,#0xff
214 vmov.i64 q7,#0xff
243 # asm 2: vshr.u64 >u4=q7,<u4=q7,#7
244 vshr.u64 q7,q7,#7
268 # asm 2: vshl.i64 >u4=q7,<u4=q7,#24
269 vshl.i64 q7,q7,#24
325 # asm 2: vmov >r4=q15,<u4=q7
326 vmov q15,q7
330 # asm 2: vmov >r0=q7,<u4=q7
[all …]

1234