Home
last modified time | relevance | path

Searched refs:s8 (Results 1 – 25 of 692) sorted by relevance

12345678910>>...28

/external/libhevc/common/arm/
Dihevc_intra_pred_luma_planar.s135 vdup.s8 d0, r7 @src[nt-1]
141 vdup.s8 d1, r7 @src[3nt+1]
152 vdup.s8 d5, r8 @row + 1
153 vdup.s8 d6, r9 @nt - 1 - row
184 vld1.s8 d8, [r12] @(1-8)load 8 coeffs [col+1]
186 vld1.s8 d4, [r6] @(1-8)src[2nt-1-row]
187 vsub.s8 d9, d2, d8 @(1-8)[nt-1-col]
192 vld1.s8 d3, [r14] @(1-8)load 8 src[2nt+1+col]
195 vdup.s8 d20, d4[7] @(1)
198 vdup.s8 d21, d4[6] @(2)
[all …]
Dihevc_intra_pred_luma_mode_3_to_9.s161 vmull.s8 q11, d30, d31 @(col+1)*intra_pred_angle [0:7](col)
201 vsub.s8 d8, d8, d2 @ref_main_idx (sub row)
202 vsub.s8 d8, d26, d8 @ref_main_idx (row 0)
203 vadd.s8 d8, d8, d27 @t0 compensate the pu1_src idx incremented by 8
204 vsub.s8 d9, d8, d2 @ref_main_idx + 1 (row 0)
206 vsub.s8 d7, d28, d6 @32-fract
209 vsub.s8 d4, d8, d2 @ref_main_idx (row 1)
210 vsub.s8 d5, d9, d2 @ref_main_idx + 1 (row 1)
217 vsub.s8 d8, d8, d3 @ref_main_idx (row 2)
218 vsub.s8 d9, d9, d3 @ref_main_idx + 1 (row 2)
[all …]
Dihevc_intra_pred_filters_luma_mode_11_to_17.s269 vmull.s8 q11, d30, d31 @(col+1)*intra_pred_angle [0:7](col)
312 vadd.s8 d8, d8, d27 @ref_main_idx (add row)
313 vsub.s8 d8, d8, d26 @ref_main_idx (row 0)
314 vadd.s8 d9, d8, d2 @ref_main_idx + 1 (row 0)
316 vsub.s8 d7, d28, d6 @32-fract
319 vadd.s8 d4, d8, d2 @ref_main_idx (row 1)
320 vadd.s8 d5, d9, d2 @ref_main_idx + 1 (row 1)
327 vadd.s8 d8, d8, d3 @ref_main_idx (row 2)
328 vadd.s8 d9, d9, d3 @ref_main_idx + 1 (row 2)
337 vadd.s8 d4, d4, d3 @ref_main_idx (row 3)
[all …]
Dihevc_intra_pred_chroma_mode_3_to_9.s155 vmull.s8 q11, d30, d31 @(col+1)*intra_pred_angle [0:7](col)
186 vshl.s8 d8, d8, #1 @ 2 * idx
197 vsub.s8 d8, d8, d27 @ref_main_idx (sub row)
198 vsub.s8 d8, d26, d8 @ref_main_idx (row 0)
199 vadd.s8 d8, d8, d9 @to compensate the pu1_src idx incremented by 8
200 vsub.s8 d9, d8, d29 @ref_main_idx + 1 (row 0)
202 vsub.s8 d7, d28, d6 @32-fract
205 vsub.s8 d4, d8, d29 @ref_main_idx (row 1)
206 vsub.s8 d5, d9, d29 @ref_main_idx + 1 (row 1)
215 vsub.s8 d8, d8, d29 @ref_main_idx (row 2)
[all …]
Dihevc_intra_pred_chroma_planar.s153 vdup.s8 d5, r8 @row + 1
154 vdup.s8 d6, r9 @nt - 1 - row
170 vld1.s8 {d10,d11}, [r14]! @load src[2nt+1+col]
171 vld1.s8 d8, [r12]!
174 vsub.s8 d30, d2, d8 @[nt-1-col]
175 vsub.s8 d31, d2, d9
196 vadd.s8 d18, d5, d7 @row++ [(row+1)++]c
200 vsub.s8 d19, d6, d7 @[nt-1-row]--
218 vadd.s8 d5, d18, d7 @row++ [(row+1)++]
220 vsub.s8 d6, d19, d7 @[nt-1-row]--
[all …]
/external/libvpx/libvpx/vpx_dsp/arm/
Dhighbd_idct32x32_135_add_neon.c101 s8[32]; in vpx_highbd_idct32_12_neon() local
317 s8[0] = highbd_idct_add_dual(s7[0], s6[31]); in vpx_highbd_idct32_12_neon()
318 s8[1] = highbd_idct_add_dual(s7[1], s6[30]); in vpx_highbd_idct32_12_neon()
319 s8[2] = highbd_idct_add_dual(s7[2], s6[29]); in vpx_highbd_idct32_12_neon()
320 s8[3] = highbd_idct_add_dual(s7[3], s6[28]); in vpx_highbd_idct32_12_neon()
321 s8[4] = highbd_idct_add_dual(s7[4], s7[27]); in vpx_highbd_idct32_12_neon()
322 s8[5] = highbd_idct_add_dual(s7[5], s7[26]); in vpx_highbd_idct32_12_neon()
323 s8[6] = highbd_idct_add_dual(s7[6], s7[25]); in vpx_highbd_idct32_12_neon()
324 s8[7] = highbd_idct_add_dual(s7[7], s7[24]); in vpx_highbd_idct32_12_neon()
325 s8[8] = highbd_idct_add_dual(s7[8], s7[23]); in vpx_highbd_idct32_12_neon()
[all …]
Dhighbd_vpx_convolve8_neon.c157 int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10; in vpx_highbd_convolve8_horiz_neon() local
181 load_4x4((const int16_t *)src, src_stride, &s7, &s8, &s9, &s10); in vpx_highbd_convolve8_horiz_neon()
182 transpose_s16_4x4d(&s7, &s8, &s9, &s10); in vpx_highbd_convolve8_horiz_neon()
185 d1 = highbd_convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters); in vpx_highbd_convolve8_horiz_neon()
186 d2 = highbd_convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters); in vpx_highbd_convolve8_horiz_neon()
187 d3 = highbd_convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters); in vpx_highbd_convolve8_horiz_neon()
204 s4 = s8; in vpx_highbd_convolve8_horiz_neon()
213 int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10; in vpx_highbd_convolve8_horiz_neon() local
222 load_8x8((const int16_t *)(src + 7), src_stride, &s7, &s8, &s9, &s10, in vpx_highbd_convolve8_horiz_neon()
233 transpose_s16_8x8(&s7, &s8, &s9, &s10, &t4, &t5, &t6, &t7); in vpx_highbd_convolve8_horiz_neon()
[all …]
Dvpx_convolve8_neon.c75 int16x4_t filter3, filter4, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, in vpx_convolve8_horiz_neon() local
112 s8 = vget_low_s16(tt1); in vpx_convolve8_horiz_neon()
118 d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3, in vpx_convolve8_horiz_neon()
120 d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3, in vpx_convolve8_horiz_neon()
122 d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3, in vpx_convolve8_horiz_neon()
142 s4 = s8; in vpx_convolve8_horiz_neon()
155 int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10; in vpx_convolve8_horiz_neon() local
182 s8 = vreinterpretq_s16_u16(vmovl_u8(t1)); in vpx_convolve8_horiz_neon()
196 t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3, in vpx_convolve8_horiz_neon()
198 t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3, in vpx_convolve8_horiz_neon()
[all …]
/external/llvm/test/MC/ARM/
Dvpush-vpop.s7 vpush {s8, s9, s10, s11, s12}
9 vpop {s8, s9, s10, s11, s12}
11 vpush.s8 {d8, d9, d10, d11, d12}
12 vpush.16 {s8, s9, s10, s11, s12}
14 vpop.64 {s8, s9, s10, s11, s12}
17 @ CHECK-THUMB: vpush {s8, s9, s10, s11, s12} @ encoding: [0x2d,0xed,0x05,0x4a]
19 @ CHECK-THUMB: vpop {s8, s9, s10, s11, s12} @ encoding: [0xbd,0xec,0x05,0x4a]
22 @ CHECK-ARM: vpush {s8, s9, s10, s11, s12} @ encoding: [0x05,0x4a,0x2d,0xed]
24 @ CHECK-ARM: vpop {s8, s9, s10, s11, s12} @ encoding: [0x05,0x4a,0xbd,0xec]
27 @ CHECK-THUMB: vpush {s8, s9, s10, s11, s12} @ encoding: [0x2d,0xed,0x05,0x4a]
[all …]
Dneon-add-encoding.s17 @ CHECK: vaddl.s8 q8, d17, d16 @ encoding: [0xa0,0x00,0xc1,0xf2]
18 vaddl.s8 q8, d17, d16
30 @ CHECK: vaddw.s8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf2]
31 vaddw.s8 q8, q8, d18
43 @ CHECK: vhadd.s8 d16, d16, d17 @ encoding: [0xa1,0x00,0x40,0xf2]
44 vhadd.s8 d16, d16, d17
55 @ CHECK: vhadd.s8 q8, q8, q9 @ encoding: [0xe2,0x00,0x40,0xf2]
56 vhadd.s8 q8, q8, q9
69 vhadd.s8 d11, d24
75 vhadd.s8 q1, q12
[all …]
Dneon-abs-encoding.s3 @ CHECK: vabs.s8 d16, d16 @ encoding: [0x20,0x03,0xf1,0xf3]
4 vabs.s8 d16, d16
11 @ CHECK: vabs.s8 q8, q8 @ encoding: [0x60,0x03,0xf1,0xf3]
12 vabs.s8 q8, q8
20 @ CHECK: vqabs.s8 d16, d16 @ encoding: [0x20,0x07,0xf0,0xf3]
21 vqabs.s8 d16, d16
26 @ CHECK: vqabs.s8 q8, q8 @ encoding: [0x60,0x07,0xf0,0xf3]
27 vqabs.s8 q8, q8
Dneont2-neg-encoding.s5 @ CHECK: vneg.s8 d16, d16 @ encoding: [0xf1,0xff,0xa0,0x03]
6 vneg.s8 d16, d16
13 @ CHECK: vneg.s8 q8, q8 @ encoding: [0xf1,0xff,0xe0,0x03]
14 vneg.s8 q8, q8
21 @ CHECK: vqneg.s8 d16, d16 @ encoding: [0xf0,0xff,0xa0,0x07]
22 vqneg.s8 d16, d16
27 @ CHECK: vqneg.s8 q8, q8 @ encoding: [0xf0,0xff,0xe0,0x07]
28 vqneg.s8 q8, q8
Dneont2-abs-encoding.s5 @ CHECK: vabs.s8 d16, d16 @ encoding: [0xf1,0xff,0x20,0x03]
6 vabs.s8 d16, d16
13 @ CHECK: vabs.s8 q8, q8 @ encoding: [0xf1,0xff,0x60,0x03]
14 vabs.s8 q8, q8
22 @ CHECK: vqabs.s8 d16, d16 @ encoding: [0xf0,0xff,0x20,0x07]
23 vqabs.s8 d16, d16
28 @ CHECK: vqabs.s8 q8, q8 @ encoding: [0xf0,0xff,0x60,0x07]
29 vqabs.s8 q8, q8
Dneon-neg-encoding.s3 @ CHECK: vneg.s8 d16, d16 @ encoding: [0xa0,0x03,0xf1,0xf3]
4 vneg.s8 d16, d16
11 @ CHECK: vneg.s8 q8, q8 @ encoding: [0xe0,0x03,0xf1,0xf3]
12 vneg.s8 q8, q8
19 @ CHECK: vqneg.s8 d16, d16 @ encoding: [0xa0,0x07,0xf0,0xf3]
20 vqneg.s8 d16, d16
25 @ CHECK: vqneg.s8 q8, q8 @ encoding: [0xe0,0x07,0xf0,0xf3]
26 vqneg.s8 q8, q8
Dneont2-minmax-encoding.s5 vmax.s8 d1, d2, d3
13 vmax.s8 d2, d3
21 vmax.s8 q1, q2, q3
29 vmax.s8 q2, q3
37 @ CHECK: vmax.s8 d1, d2, d3 @ encoding: [0x02,0xef,0x03,0x16]
44 @ CHECK: vmax.s8 d2, d2, d3 @ encoding: [0x02,0xef,0x03,0x26]
51 @ CHECK: vmax.s8 q1, q2, q3 @ encoding: [0x04,0xef,0x46,0x26]
58 @ CHECK: vmax.s8 q2, q2, q3 @ encoding: [0x04,0xef,0x46,0x46]
67 vmin.s8 d1, d2, d3
75 vmin.s8 d2, d3
[all …]
Dneon-minmax-encoding.s3 vmax.s8 d1, d2, d3
11 vmax.s8 d2, d3
19 vmax.s8 q1, q2, q3
27 vmax.s8 q2, q3
35 @ CHECK: vmax.s8 d1, d2, d3 @ encoding: [0x03,0x16,0x02,0xf2]
42 @ CHECK: vmax.s8 d2, d2, d3 @ encoding: [0x03,0x26,0x02,0xf2]
49 @ CHECK: vmax.s8 q1, q2, q3 @ encoding: [0x46,0x26,0x04,0xf2]
56 @ CHECK: vmax.s8 q2, q2, q3 @ encoding: [0x46,0x46,0x04,0xf2]
65 vmin.s8 d1, d2, d3
73 vmin.s8 d2, d3
[all …]
Dneon-cmp-encoding.s21 vcge.s8 d16, d16, d17
28 vcge.s8 q8, q8, q9
38 @ CHECK: vcge.s8 d16, d16, d17 @ encoding: [0xb1,0x03,0x40,0xf2]
45 @ CHECK: vcge.s8 q8, q8, q9 @ encoding: [0xf2,0x03,0x40,0xf2]
55 vcgt.s8 d16, d16, d17
62 vcgt.s8 q8, q8, q9
72 @ CHECK: vcgt.s8 d16, d16, d17 @ encoding: [0xa1,0x03,0x40,0xf2]
79 @ CHECK: vcgt.s8 q8, q8, q9 @ encoding: [0xe2,0x03,0x40,0xf2]
104 vcge.s8 d16, d16, #0
105 vcle.s8 d16, d16, #0
[all …]
/external/llvm/test/MC/Disassembler/ARM/
Dinvalid-armv8.1a.txt4 [0x12,0x0b,0x01,0xf3] # vqrdmlah.s8 d0, d1, d2
6 # CHECK-NEXT: [0x12,0x0b,0x01,0xf3] # vqrdmlah.s8 d0, d1, d2
14 [0x54,0x0b,0x02,0xf3] # vqrdmlah.s8 q0, q1, q2
16 # CHECK-NEXT: [0x54,0x0b,0x02,0xf3] # vqrdmlah.s8 q0, q1, q2
24 [0x15,0x7c,0x06,0xf3] # vqrdmlsh.s8 d0, d1, d2
26 # CHECK-NEXT: [0x15,0x7c,0x06,0xf3] # vqrdmlsh.s8 d0, d1, d2
34 [0x54,0x0c,0x02,0xf3] # vqrdmlsh.s8 q0, q1, q2
36 # CHECK-NEXT: [0x54,0x0c,0x02,0xf3] # vqrdmlsh.s8 q0, q1, q2
44 [0x42,0x0e,0x81,0xf2] # vqrdmlah.s8 d0, d1, d2[0]
46 # CHECK-NEXT: [0x42,0x0e,0x81,0xf2] # vqrdmlah.s8 d0, d1, d2[0]
[all …]
Dinvalid-thumbv8.1a.txt4 [0x01,0xff,0x12,0x0b] # vqrdmlah.s8 d0, d1, d2
6 [0x02,0xff,0x54,0x0b] # vqrdmlah.s8 q0, q1, q2
9 [0x01,0xff,0x12,0x0c] # vqrdmlsh.s8 d0, d1, d2
11 [0x02,0xff,0x54,0x0c] # vqrdmlsh.s8 q0, q1, q2
15 # CHECK-NEXT: [0x01,0xff,0x12,0x0b] # vqrdmlah.s8 d0, d1, d2
21 # CHECK-NEXT: [0x02,0xff,0x54,0x0b] # vqrdmlah.s8 q0, q1, q2
27 # CHECK-NEXT: [0x01,0xff,0x12,0x0c] # vqrdmlsh.s8 d0, d1, d2
33 # CHECK-NEXT: [0x02,0xff,0x54,0x0c] # vqrdmlsh.s8 q0, q1, q2
39 [0x81,0xef,0x42,0x0e] # vqrdmlah.s8 d0, d1, d2[0]
41 [0x82,0xff,0x42,0x0e] # vqrdmlah.s8 q0, q1, d2[0]
[all …]
/external/swiftshader/third_party/LLVM/test/MC/ARM/
Dneont2-abs-encoding.s5 @ CHECK: vabs.s8 d16, d16 @ encoding: [0xf1,0xff,0x20,0x03]
6 vabs.s8 d16, d16
13 @ CHECK: vabs.s8 q8, q8 @ encoding: [0xf1,0xff,0x60,0x03]
14 vabs.s8 q8, q8
22 @ CHECK: vqabs.s8 d16, d16 @ encoding: [0xf0,0xff,0x20,0x07]
23 vqabs.s8 d16, d16
28 @ CHECK: vqabs.s8 q8, q8 @ encoding: [0xf0,0xff,0x60,0x07]
29 vqabs.s8 q8, q8
Dneon-abs-encoding.s3 @ CHECK: vabs.s8 d16, d16 @ encoding: [0x20,0x03,0xf1,0xf3]
4 vabs.s8 d16, d16
11 @ CHECK: vabs.s8 q8, q8 @ encoding: [0x60,0x03,0xf1,0xf3]
12 vabs.s8 q8, q8
20 @ CHECK: vqabs.s8 d16, d16 @ encoding: [0x20,0x07,0xf0,0xf3]
21 vqabs.s8 d16, d16
26 @ CHECK: vqabs.s8 q8, q8 @ encoding: [0x60,0x07,0xf0,0xf3]
27 vqabs.s8 q8, q8
Dneon-neg-encoding.s3 @ CHECK: vneg.s8 d16, d16 @ encoding: [0xa0,0x03,0xf1,0xf3]
4 vneg.s8 d16, d16
11 @ CHECK: vneg.s8 q8, q8 @ encoding: [0xe0,0x03,0xf1,0xf3]
12 vneg.s8 q8, q8
19 @ CHECK: vqneg.s8 d16, d16 @ encoding: [0xa0,0x07,0xf0,0xf3]
20 vqneg.s8 d16, d16
25 @ CHECK: vqneg.s8 q8, q8 @ encoding: [0xe0,0x07,0xf0,0xf3]
26 vqneg.s8 q8, q8
Dneont2-neg-encoding.s5 @ CHECK: vneg.s8 d16, d16 @ encoding: [0xf1,0xff,0xa0,0x03]
6 vneg.s8 d16, d16
13 @ CHECK: vneg.s8 q8, q8 @ encoding: [0xf1,0xff,0xe0,0x03]
14 vneg.s8 q8, q8
21 @ CHECK: vqneg.s8 d16, d16 @ encoding: [0xf0,0xff,0xa0,0x07]
22 vqneg.s8 d16, d16
27 @ CHECK: vqneg.s8 q8, q8 @ encoding: [0xf0,0xff,0xe0,0x07]
28 vqneg.s8 q8, q8
Dneon-cmp-encoding.s21 vcge.s8 d16, d16, d17
28 vcge.s8 q8, q8, q9
38 @ CHECK: vcge.s8 d16, d16, d17 @ encoding: [0xb1,0x03,0x40,0xf2]
45 @ CHECK: vcge.s8 q8, q8, q9 @ encoding: [0xf2,0x03,0x40,0xf2]
55 vcgt.s8 d16, d16, d17
62 vcgt.s8 q8, q8, q9
72 @ CHECK: vcgt.s8 d16, d16, d17 @ encoding: [0xa1,0x03,0x40,0xf2]
79 @ CHECK: vcgt.s8 q8, q8, q9 @ encoding: [0xe2,0x03,0x40,0xf2]
104 vcge.s8 d16, d16, #0
105 vcle.s8 d16, d16, #0
[all …]
/external/capstone/suite/MC/ARM/
Dneon-add-encoding.s.cs8 0xa0,0x00,0xc1,0xf2 = vaddl.s8 q8, d17, d16
14 0xa2,0x01,0xc0,0xf2 = vaddw.s8 q8, q8, d18
20 0xa1,0x00,0x40,0xf2 = vhadd.s8 d16, d16, d17
26 0xe2,0x00,0x40,0xf2 = vhadd.s8 q8, q8, q9
32 0x28,0xb0,0x0b,0xf2 = vhadd.s8 d11, d11, d24
38 0x68,0x20,0x02,0xf2 = vhadd.s8 q1, q1, q12
44 0xa1,0x01,0x40,0xf2 = vrhadd.s8 d16, d16, d17
50 0xe2,0x01,0x40,0xf2 = vrhadd.s8 q8, q8, q9
56 0xa1,0x01,0x40,0xf2 = vrhadd.s8 d16, d16, d17
62 0xe2,0x01,0x40,0xf2 = vrhadd.s8 q8, q8, q9
[all …]

12345678910>>...28