/external/libhevc/common/arm/ |
D | ihevc_intra_pred_luma_planar.s | 133 vdup.s8 d0, r7 @src[nt-1] 139 vdup.s8 d1, r7 @src[3nt+1] 150 vdup.s8 d5, r8 @row + 1 151 vdup.s8 d6, r9 @nt - 1 - row 182 vld1.s8 d8, [r12] @(1-8)load 8 coeffs [col+1] 184 vld1.s8 d4, [r6] @(1-8)src[2nt-1-row] 185 vsub.s8 d9, d2, d8 @(1-8)[nt-1-col] 190 vld1.s8 d3, [r14] @(1-8)load 8 src[2nt+1+col] 193 vdup.s8 d20, d4[7] @(1) 196 vdup.s8 d21, d4[6] @(2) [all …]
|
D | ihevc_intra_pred_luma_mode_3_to_9.s | 158 vmull.s8 q11, d30, d31 @(col+1)*intra_pred_angle [0:7](col) 198 vsub.s8 d8, d8, d2 @ref_main_idx (sub row) 199 vsub.s8 d8, d26, d8 @ref_main_idx (row 0) 200 vadd.s8 d8, d8, d27 @t0 compensate the pu1_src idx incremented by 8 201 vsub.s8 d9, d8, d2 @ref_main_idx + 1 (row 0) 203 vsub.s8 d7, d28, d6 @32-fract 206 vsub.s8 d4, d8, d2 @ref_main_idx (row 1) 207 vsub.s8 d5, d9, d2 @ref_main_idx + 1 (row 1) 214 vsub.s8 d8, d8, d3 @ref_main_idx (row 2) 215 vsub.s8 d9, d9, d3 @ref_main_idx + 1 (row 2) [all …]
|
D | ihevc_intra_pred_filters_luma_mode_11_to_17.s | 266 vmull.s8 q11, d30, d31 @(col+1)*intra_pred_angle [0:7](col) 309 vadd.s8 d8, d8, d27 @ref_main_idx (add row) 310 vsub.s8 d8, d8, d26 @ref_main_idx (row 0) 311 vadd.s8 d9, d8, d2 @ref_main_idx + 1 (row 0) 313 vsub.s8 d7, d28, d6 @32-fract 316 vadd.s8 d4, d8, d2 @ref_main_idx (row 1) 317 vadd.s8 d5, d9, d2 @ref_main_idx + 1 (row 1) 324 vadd.s8 d8, d8, d3 @ref_main_idx (row 2) 325 vadd.s8 d9, d9, d3 @ref_main_idx + 1 (row 2) 334 vadd.s8 d4, d4, d3 @ref_main_idx (row 3) [all …]
|
D | ihevc_intra_pred_chroma_mode_3_to_9.s | 151 vmull.s8 q11, d30, d31 @(col+1)*intra_pred_angle [0:7](col) 182 vshl.s8 d8, d8, #1 @ 2 * idx 193 vsub.s8 d8, d8, d27 @ref_main_idx (sub row) 194 vsub.s8 d8, d26, d8 @ref_main_idx (row 0) 195 vadd.s8 d8, d8, d9 @to compensate the pu1_src idx incremented by 8 196 vsub.s8 d9, d8, d29 @ref_main_idx + 1 (row 0) 198 vsub.s8 d7, d28, d6 @32-fract 201 vsub.s8 d4, d8, d29 @ref_main_idx (row 1) 202 vsub.s8 d5, d9, d29 @ref_main_idx + 1 (row 1) 211 vsub.s8 d8, d8, d29 @ref_main_idx (row 2) [all …]
|
D | ihevc_intra_pred_chroma_planar.s | 150 vdup.s8 d5, r8 @row + 1 151 vdup.s8 d6, r9 @nt - 1 - row 167 vld1.s8 {d10,d11}, [r14]! @load src[2nt+1+col] 168 vld1.s8 d8, [r12]! 171 vsub.s8 d30, d2, d8 @[nt-1-col] 172 vsub.s8 d31, d2, d9 193 vadd.s8 d18, d5, d7 @row++ [(row+1)++]c 197 vsub.s8 d19, d6, d7 @[nt-1-row]-- 215 vadd.s8 d5, d18, d7 @row++ [(row+1)++] 217 vsub.s8 d6, d19, d7 @[nt-1-row]-- [all …]
|
D | ihevc_intra_pred_filters_chroma_mode_11_to_17.s | 262 vmull.s8 q11, d30, d31 @(col+1)*intra_pred_angle [0:7](col) 295 vshl.s8 d8, d8, #1 @ 2 * idx 307 vadd.s8 d8, d8, d27 @ref_main_idx (add row) 308 vsub.s8 d8, d8, d26 @ref_main_idx (row 0) 309 vadd.s8 d9, d8, d29 @ref_main_idx + 1 (row 0) 311 vsub.s8 d7, d28, d6 @32-fract 314 vadd.s8 d4, d8, d29 @ref_main_idx (row 1) 315 vadd.s8 d5, d9, d29 @ref_main_idx + 1 (row 1) 325 vadd.s8 d8, d8, d29 @ref_main_idx (row 2) 326 vadd.s8 d9, d9, d29 @ref_main_idx + 1 (row 2) [all …]
|
/external/llvm/test/MC/ARM/ |
D | vpush-vpop.s | 7 vpush {s8, s9, s10, s11, s12} 9 vpop {s8, s9, s10, s11, s12} 11 vpush.s8 {d8, d9, d10, d11, d12} 12 vpush.16 {s8, s9, s10, s11, s12} 14 vpop.64 {s8, s9, s10, s11, s12} 17 @ CHECK-THUMB: vpush {s8, s9, s10, s11, s12} @ encoding: [0x2d,0xed,0x05,0x4a] 19 @ CHECK-THUMB: vpop {s8, s9, s10, s11, s12} @ encoding: [0xbd,0xec,0x05,0x4a] 22 @ CHECK-ARM: vpush {s8, s9, s10, s11, s12} @ encoding: [0x05,0x4a,0x2d,0xed] 24 @ CHECK-ARM: vpop {s8, s9, s10, s11, s12} @ encoding: [0x05,0x4a,0xbd,0xec] 27 @ CHECK-THUMB: vpush {s8, s9, s10, s11, s12} @ encoding: [0x2d,0xed,0x05,0x4a] [all …]
|
D | neont2-abs-encoding.s | 5 @ CHECK: vabs.s8 d16, d16 @ encoding: [0xf1,0xff,0x20,0x03] 6 vabs.s8 d16, d16 13 @ CHECK: vabs.s8 q8, q8 @ encoding: [0xf1,0xff,0x60,0x03] 14 vabs.s8 q8, q8 22 @ CHECK: vqabs.s8 d16, d16 @ encoding: [0xf0,0xff,0x20,0x07] 23 vqabs.s8 d16, d16 28 @ CHECK: vqabs.s8 q8, q8 @ encoding: [0xf0,0xff,0x60,0x07] 29 vqabs.s8 q8, q8
|
D | neon-abs-encoding.s | 3 @ CHECK: vabs.s8 d16, d16 @ encoding: [0x20,0x03,0xf1,0xf3] 4 vabs.s8 d16, d16 11 @ CHECK: vabs.s8 q8, q8 @ encoding: [0x60,0x03,0xf1,0xf3] 12 vabs.s8 q8, q8 20 @ CHECK: vqabs.s8 d16, d16 @ encoding: [0x20,0x07,0xf0,0xf3] 21 vqabs.s8 d16, d16 26 @ CHECK: vqabs.s8 q8, q8 @ encoding: [0x60,0x07,0xf0,0xf3] 27 vqabs.s8 q8, q8
|
D | neont2-neg-encoding.s | 5 @ CHECK: vneg.s8 d16, d16 @ encoding: [0xf1,0xff,0xa0,0x03] 6 vneg.s8 d16, d16 13 @ CHECK: vneg.s8 q8, q8 @ encoding: [0xf1,0xff,0xe0,0x03] 14 vneg.s8 q8, q8 21 @ CHECK: vqneg.s8 d16, d16 @ encoding: [0xf0,0xff,0xa0,0x07] 22 vqneg.s8 d16, d16 27 @ CHECK: vqneg.s8 q8, q8 @ encoding: [0xf0,0xff,0xe0,0x07] 28 vqneg.s8 q8, q8
|
D | neon-neg-encoding.s | 3 @ CHECK: vneg.s8 d16, d16 @ encoding: [0xa0,0x03,0xf1,0xf3] 4 vneg.s8 d16, d16 11 @ CHECK: vneg.s8 q8, q8 @ encoding: [0xe0,0x03,0xf1,0xf3] 12 vneg.s8 q8, q8 19 @ CHECK: vqneg.s8 d16, d16 @ encoding: [0xa0,0x07,0xf0,0xf3] 20 vqneg.s8 d16, d16 25 @ CHECK: vqneg.s8 q8, q8 @ encoding: [0xe0,0x07,0xf0,0xf3] 26 vqneg.s8 q8, q8
|
D | neon-add-encoding.s | 17 @ CHECK: vaddl.s8 q8, d17, d16 @ encoding: [0xa0,0x00,0xc1,0xf2] 18 vaddl.s8 q8, d17, d16 30 @ CHECK: vaddw.s8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf2] 31 vaddw.s8 q8, q8, d18 43 @ CHECK: vhadd.s8 d16, d16, d17 @ encoding: [0xa1,0x00,0x40,0xf2] 44 vhadd.s8 d16, d16, d17 55 @ CHECK: vhadd.s8 q8, q8, q9 @ encoding: [0xe2,0x00,0x40,0xf2] 56 vhadd.s8 q8, q8, q9 69 vhadd.s8 d11, d24 75 vhadd.s8 q1, q12 [all …]
|
D | neon-minmax-encoding.s | 3 vmax.s8 d1, d2, d3 11 vmax.s8 d2, d3 19 vmax.s8 q1, q2, q3 27 vmax.s8 q2, q3 35 @ CHECK: vmax.s8 d1, d2, d3 @ encoding: [0x03,0x16,0x02,0xf2] 42 @ CHECK: vmax.s8 d2, d2, d3 @ encoding: [0x03,0x26,0x02,0xf2] 49 @ CHECK: vmax.s8 q1, q2, q3 @ encoding: [0x46,0x26,0x04,0xf2] 56 @ CHECK: vmax.s8 q2, q2, q3 @ encoding: [0x46,0x46,0x04,0xf2] 65 vmin.s8 d1, d2, d3 73 vmin.s8 d2, d3 [all …]
|
D | neont2-minmax-encoding.s | 5 vmax.s8 d1, d2, d3 13 vmax.s8 d2, d3 21 vmax.s8 q1, q2, q3 29 vmax.s8 q2, q3 37 @ CHECK: vmax.s8 d1, d2, d3 @ encoding: [0x02,0xef,0x03,0x16] 44 @ CHECK: vmax.s8 d2, d2, d3 @ encoding: [0x02,0xef,0x03,0x26] 51 @ CHECK: vmax.s8 q1, q2, q3 @ encoding: [0x04,0xef,0x46,0x26] 58 @ CHECK: vmax.s8 q2, q2, q3 @ encoding: [0x04,0xef,0x46,0x46] 67 vmin.s8 d1, d2, d3 75 vmin.s8 d2, d3 [all …]
|
D | neon-cmp-encoding.s | 21 vcge.s8 d16, d16, d17 28 vcge.s8 q8, q8, q9 38 @ CHECK: vcge.s8 d16, d16, d17 @ encoding: [0xb1,0x03,0x40,0xf2] 45 @ CHECK: vcge.s8 q8, q8, q9 @ encoding: [0xf2,0x03,0x40,0xf2] 55 vcgt.s8 d16, d16, d17 62 vcgt.s8 q8, q8, q9 72 @ CHECK: vcgt.s8 d16, d16, d17 @ encoding: [0xa1,0x03,0x40,0xf2] 79 @ CHECK: vcgt.s8 q8, q8, q9 @ encoding: [0xe2,0x03,0x40,0xf2] 104 vcge.s8 d16, d16, #0 105 vcle.s8 d16, d16, #0 [all …]
|
D | neon-absdiff-encoding.s | 3 @ CHECK: vabd.s8 d16, d16, d17 @ encoding: [0xa1,0x07,0x40,0xf2] 4 vabd.s8 d16, d16, d17 17 @ CHECK: vabd.s8 q8, q8, q9 @ encoding: [0xe2,0x07,0x40,0xf2] 18 vabd.s8 q8, q8, q9 32 @ CHECK: vabdl.s8 q8, d16, d17 @ encoding: [0xa1,0x07,0xc0,0xf2] 33 vabdl.s8 q8, d16, d17 45 @ CHECK: vaba.s8 d16, d18, d17 @ encoding: [0xb1,0x07,0x42,0xf2] 46 vaba.s8 d16, d18, d17 57 @ CHECK: vaba.s8 q9, q8, q10 @ encoding: [0xf4,0x27,0x40,0xf2] 58 vaba.s8 q9, q8, q10 [all …]
|
D | neon-shift-encoding.s | 47 vshr.s8 d16, d16, #7 51 vshr.s8 q8, q8, #7 64 @ CHECK: vshr.s8 d16, d16, #7 @ encoding: [0x30,0x00,0xc9,0xf2] 68 @ CHECK: vshr.s8 q8, q8, #7 @ encoding: [0x70,0x00,0xc9,0xf2] 82 vshr.s8 d16, #7 86 vshr.s8 q8, #7 99 @ CHECK: vshr.s8 d16, d16, #7 @ encoding: [0x30,0x00,0xc9,0xf2] 103 @ CHECK: vshr.s8 q8, q8, #7 @ encoding: [0x70,0x00,0xc9,0xf2] 109 vsra.s8 d16, d6, #7 113 vsra.s8 q1, q8, #7 [all …]
|
D | neont2-absdiff-encoding.s | 5 vabd.s8 d16, d16, d17 12 vabd.s8 q8, q8, q9 20 @ CHECK: vabd.s8 d16, d16, d17 @ encoding: [0x40,0xef,0xa1,0x07] 27 @ CHECK: vabd.s8 q8, q8, q9 @ encoding: [0x40,0xef,0xe2,0x07] 36 vabdl.s8 q8, d16, d17 43 @ CHECK: vabdl.s8 q8, d16, d17 @ encoding: [0xc0,0xef,0xa1,0x07] 51 vaba.s8 d16, d18, d17 57 vaba.s8 q9, q8, q10 64 @ CHECK: vaba.s8 d16, d18, d17 @ encoding: [0x42,0xef,0xb1,0x07] 70 @ CHECK: vaba.s8 q9, q8, q10 @ encoding: [0x40,0xef,0xf4,0x27] [all …]
|
D | neont2-pairwise-encoding.s | 15 vpaddl.s8 d7, d10 21 vpaddl.s8 q4, q7 28 @ CHECK: vpaddl.s8 d7, d10 @ encoding: [0xb0,0xff,0x0a,0x72] 34 @ CHECK: vpaddl.s8 q4, q7 @ encoding: [0xb0,0xff,0x4e,0x82] 42 vpadal.s8 d16, d4 48 vpadal.s8 q4, q10 55 @ CHECK: vpadal.s8 d16, d4 @ encoding: [0xf0,0xff,0x04,0x06] 61 @ CHECK: vpadal.s8 q4, q10 @ encoding: [0xb0,0xff,0x64,0x86] 69 vpmin.s8 d16, d29, d10 77 @ CHECK: vpmin.s8 d16, d29, d10 @ encoding: [0x4d,0xef,0x9a,0x0a] [all …]
|
D | neont2-add-encoding.s | 18 @ CHECK: vaddl.s8 q8, d17, d16 @ encoding: [0xc1,0xef,0xa0,0x00] 19 vaddl.s8 q8, d17, d16 31 @ CHECK: vaddw.s8 q8, q8, d18 @ encoding: [0xc0,0xef,0xa2,0x01] 32 vaddw.s8 q8, q8, d18 44 @ CHECK: vhadd.s8 d16, d16, d17 @ encoding: [0x40,0xef,0xa1,0x00] 45 vhadd.s8 d16, d16, d17 56 @ CHECK: vhadd.s8 q8, q8, q9 @ encoding: [0x40,0xef,0xe2,0x00] 57 vhadd.s8 q8, q8, q9 69 @ CHECK: vrhadd.s8 d16, d16, d17 @ encoding: [0x40,0xef,0xa1,0x01] 70 vrhadd.s8 d16, d16, d17 [all …]
|
D | neon-pairwise-encoding.s | 21 @ CHECK: vpaddl.s8 d16, d16 @ encoding: [0x20,0x02,0xf0,0xf3] 22 vpaddl.s8 d16, d16 33 @ CHECK: vpaddl.s8 q8, q8 @ encoding: [0x60,0x02,0xf0,0xf3] 34 vpaddl.s8 q8, q8 45 @ CHECK: vpadal.s8 d16, d17 @ encoding: [0x21,0x06,0xf0,0xf3] 46 vpadal.s8 d16, d17 57 @ CHECK: vpadal.s8 q9, q8 @ encoding: [0x60,0x26,0xf0,0xf3] 58 vpadal.s8 q9, q8 69 @ CHECK: vpmin.s8 d16, d16, d17 @ encoding: [0xb1,0x0a,0x40,0xf2] 70 vpmin.s8 d16, d16, d17 [all …]
|
D | neon-satshift-encoding.s | 3 @ CHECK: vqshl.s8 d16, d16, d17 @ encoding: [0xb0,0x04,0x41,0xf2] 4 vqshl.s8 d16, d16, d17 19 @ CHECK: vqshl.s8 q8, q8, q9 @ encoding: [0xf0,0x04,0x42,0xf2] 20 vqshl.s8 q8, q8, q9 35 @ CHECK: vqshl.s8 d16, d16, #7 @ encoding: [0x30,0x07,0xcf,0xf2] 36 vqshl.s8 d16, d16, #7 51 @ CHECK: vqshlu.s8 d16, d16, #7 @ encoding: [0x30,0x06,0xcf,0xf3] 52 vqshlu.s8 d16, d16, #7 59 @ CHECK: vqshl.s8 q8, q8, #7 @ encoding: [0x70,0x07,0xcf,0xf2] 60 vqshl.s8 q8, q8, #7 [all …]
|
/external/llvm/test/MC/Disassembler/ARM/ |
D | invalid-armv8.1a.txt | 4 [0x12,0x0b,0x01,0xf3] # vqrdmlah.s8 d0, d1, d2 6 # CHECK-NEXT: [0x12,0x0b,0x01,0xf3] # vqrdmlah.s8 d0, d1, d2 14 [0x54,0x0b,0x02,0xf3] # vqrdmlah.s8 q0, q1, q2 16 # CHECK-NEXT: [0x54,0x0b,0x02,0xf3] # vqrdmlah.s8 q0, q1, q2 24 [0x15,0x7c,0x06,0xf3] # vqrdmlsh.s8 d0, d1, d2 26 # CHECK-NEXT: [0x15,0x7c,0x06,0xf3] # vqrdmlsh.s8 d0, d1, d2 34 [0x54,0x0c,0x02,0xf3] # vqrdmlsh.s8 q0, q1, q2 36 # CHECK-NEXT: [0x54,0x0c,0x02,0xf3] # vqrdmlsh.s8 q0, q1, q2 44 [0x42,0x0e,0x81,0xf2] # vqrdmlah.s8 d0, d1, d2[0] 46 # CHECK-NEXT: [0x42,0x0e,0x81,0xf2] # vqrdmlah.s8 d0, d1, d2[0] [all …]
|
D | invalid-thumbv8.1a.txt | 4 [0x01,0xff,0x12,0x0b] # vqrdmlah.s8 d0, d1, d2 6 [0x02,0xff,0x54,0x0b] # vqrdmlah.s8 q0, q1, q2 9 [0x01,0xff,0x12,0x0c] # vqrdmlsh.s8 d0, d1, d2 11 [0x02,0xff,0x54,0x0c] # vqrdmlsh.s8 q0, q1, q2 15 # CHECK-NEXT: [0x01,0xff,0x12,0x0b] # vqrdmlah.s8 d0, d1, d2 21 # CHECK-NEXT: [0x02,0xff,0x54,0x0b] # vqrdmlah.s8 q0, q1, q2 27 # CHECK-NEXT: [0x01,0xff,0x12,0x0c] # vqrdmlsh.s8 d0, d1, d2 33 # CHECK-NEXT: [0x02,0xff,0x54,0x0c] # vqrdmlsh.s8 q0, q1, q2 39 [0x81,0xef,0x42,0x0e] # vqrdmlah.s8 d0, d1, d2[0] 41 [0x82,0xff,0x42,0x0e] # vqrdmlah.s8 q0, q1, d2[0] [all …]
|
/external/libgdx/backends/gdx-backends-gwt/src/com/badlogic/gdx/backends/gwt/emu/avian/ |
D | Utf8.java | 46 public static Object decode (byte[] s8, int offset, int length) { in decode() argument 51 int x = s8[i++]; in decode() 60 int y = s8[i++]; in decode() 67 int y = s8[i++]; in decode() 68 int z = s8[i++]; in decode() 76 public static char[] decode16 (byte[] s8, int offset, int length) { in decode16() argument 77 Object decoded = decode(s8, offset, length); in decode16()
|