/external/libxaac/decoder/armv8/ |
D | ixheaacd_sbr_qmf_analysis32_neon.s | 58 LDRSH w4 , [x6] 59 ADD x6, x6, x9 60 LDRSH w8 , [x6] 61 ADD x6, x6, x9 62 LDRSH w11 , [x6] 63 ADD x6, x6, x9 64 LDRSH w12 , [x6] 65 ADD x6, x6, x9 72 LDRSH w4 , [x6] 73 ADD x6, x6, x9 [all …]
|
D | ixheaacd_apply_scale_factors.s | 33 MOV x21, x6 73 SUBS x6, x11, x5, ASR #2 // 37-(scale_factor >> 2) 83 SUB x14, x6, #1 //dont do that extra LSL #1 in SMULWB 90 SMULL x6, w6, w17 95 ASR x6, x6, #16 100 ASR x6, x6, x14 // buffex1 = shx32(buffex1, shift); 115 NEGS x14, x6 122 SMULL x6, w6, w17 125 ASR x6, x6, #16 128 LSL x6, x6, #1 [all …]
|
/external/llvm-project/llvm/test/CodeGen/PowerPC/ |
D | fold-frame-offset-using-rr.mir | 19 liveins: $x3, $x1, $x4, $x6 24 $x6 = LD 4, killed $x4 25 ; CHECK: $x6 = LDX killed $x4, killed $x3 35 liveins: $x3, $x1, $x4, $x6 40 $x6 = LD 4, killed $x3 41 ; CHECK: $x6 = LDX killed $x4, killed $x3 51 liveins: $x3, $x1, $x4, $x6 56 STD $x3, killed $x6, 100 57 ; CHECK: STD $x3, killed $x6, 100 58 $x6 = LD 4, killed $x3 [all …]
|
/external/llvm-project/llvm/test/MC/RISCV/ |
D | rv64b-aliases-valid.s | 20 zext.b x5, x6 24 zext.h x5, x6 28 zext.w x5, x6 32 rev.p x5, x6 36 rev2.n x5, x6 40 rev.n x5, x6 44 rev4.b x5, x6 48 rev2.b x5, x6 52 rev.b x5, x6 56 rev8.h x5, x6 [all …]
|
D | rv32b-aliases-valid.s | 20 zext.b x5, x6 24 zext.h x5, x6 28 rev.p x5, x6 32 rev2.n x5, x6 36 rev.n x5, x6 40 rev4.b x5, x6 44 rev2.b x5, x6 48 rev.b x5, x6 52 rev8.h x5, x6 56 rev4.h x5, x6 [all …]
|
/external/libaom/libaom/av1/encoder/x86/ |
D | av1_fwd_txfm_sse2.c | 329 __m128i x6[16]; in fdct8x16_new_sse2() local 330 x6[0] = x5[0]; in fdct8x16_new_sse2() 331 x6[1] = x5[1]; in fdct8x16_new_sse2() 332 x6[2] = x5[2]; in fdct8x16_new_sse2() 333 x6[3] = x5[3]; in fdct8x16_new_sse2() 334 x6[4] = x5[4]; in fdct8x16_new_sse2() 335 x6[5] = x5[5]; in fdct8x16_new_sse2() 336 x6[6] = x5[6]; in fdct8x16_new_sse2() 337 x6[7] = x5[7]; in fdct8x16_new_sse2() 338 btf_16_sse2(cospi_p60_p04, cospi_m04_p60, x5[8], x5[15], x6[8], x6[15]); in fdct8x16_new_sse2() [all …]
|
D | av1_fwd_txfm1d_sse4.c | 946 __m128i x6[64]; in av1_fdct64_sse4_1() local 947 btf_32_type0_sse4_1_new(cospi_p32, cospi_p32, x5[0], x5[1], x6[0], x6[1], in av1_fdct64_sse4_1() 949 btf_32_type1_sse4_1_new(cospi_p48, cospi_p16, x5[2], x5[3], x6[2], x6[3], in av1_fdct64_sse4_1() 951 x6[4] = _mm_add_epi32(x5[4], x5[5]); in av1_fdct64_sse4_1() 952 x6[5] = _mm_sub_epi32(x5[4], x5[5]); in av1_fdct64_sse4_1() 953 x6[6] = _mm_sub_epi32(x5[7], x5[6]); in av1_fdct64_sse4_1() 954 x6[7] = _mm_add_epi32(x5[7], x5[6]); in av1_fdct64_sse4_1() 955 x6[8] = x5[8]; in av1_fdct64_sse4_1() 956 btf_32_type0_sse4_1_new(cospi_m16, cospi_p48, x5[9], x5[14], x6[9], x6[14], in av1_fdct64_sse4_1() 958 btf_32_type0_sse4_1_new(cospi_m48, cospi_m16, x5[10], x5[13], x6[10], x6[13], in av1_fdct64_sse4_1() [all …]
|
/external/libhevc/decoder/arm64/ |
D | ihevcd_fmt_conv_420sp_to_420p.s | 93 mov x9, x6 ////Load u2_height 111 MOV x6,x8 //// Copying width 115 SUB x6,x6,#16 118 CMP x6,#16 120 CMP x6,#0 126 sub x20,x6,#16 127 neg x6, x20 128 SUB x0,x0,x6 129 SUB x2,x2,x6 166 MOV x6,x8 //// Copying width [all …]
|
D | ihevcd_fmt_conv_420sp_to_420sp.s | 100 mov x7, x6 ////Load u2_stridey 109 MOV x6,x8 //// Copying width 113 SUB x6,x6,#32 122 CMP x6,#32 124 CMP x6,#0 130 sub x20,x6,#32 131 neg x6, x20 132 SUB x0,x0,x6 133 SUB x2,x2,x6 166 MOV x6,x8 //// Copying width [all …]
|
/external/libhevc/common/arm64/ |
D | ihevc_intra_pred_luma_mode_18_34.s | 127 csel x6, x20, x6,eq 129 csel x6, x20, x6,ne 134 ld1 {v0.8b},[x8],x6 136 ld1 {v1.8b},[x8],x6 138 ld1 {v2.8b},[x8],x6 139 ld1 {v3.8b},[x8],x6 141 ld1 {v4.8b},[x8],x6 142 ld1 {v5.8b},[x8],x6 143 ld1 {v6.8b},[x8],x6 145 ld1 {v7.8b},[x8],x6 [all …]
|
D | ihevc_inter_pred_chroma_copy.s | 103 LSL x12,x6,#1 //wd << 1 123 ADD x6,x1,x3 //pu1_dst_tmp += dst_strd 127 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0) 130 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0) 133 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0) 139 SUB x1,x6,x11 //pu1_dst = pu1_dst_tmp 154 ADD x6,x1,x3 //pu1_dst_tmp += dst_strd 158 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0) 177 ADD x6,x1,x3 //pu1_dst_tmp += dst_strd 180 ST1 {v1.8b},[x6],x3 //vst1_u8(pu1_dst_tmp, tmp_src) [all …]
|
D | ihevc_padding.s | 123 add x6,x5,x1 131 add x7,x6,x1 133 st1 {v4.16b},[x6],#16 //128/8 = 16 bytes store 134 st1 {v4.16b},[x6],#16 //128/8 = 16 bytes store 135 st1 {v4.16b},[x6],#16 //128/8 = 16 bytes store 136 st1 {v4.16b},[x6],#16 //128/8 = 16 bytes store 137 st1 {v4.16b},[x6] //128/8 = 16 bytes store 241 add x6,x5,x1 249 add x7,x6,x1 251 st1 {v4.16b},[x6],#16 //128/8 = 16 bytes store [all …]
|
D | ihevc_inter_pred_chroma_vert_w16out.s | 113 mov x17,x6 // wd 119 mov x6,x17 //loads wd 125 tst x6,#3 //checks (wd & 3) 127 lsl x10,x6,#1 //2*wd 147 add x6,x0,x2 //pu1_src +src_strd 148 ld1 {v17.8b},[x6],x2 //loads pu1_src 152 ld1 {v4.8b},[x6],x2 //loads incremented src 154 ld1 {v16.8b},[x6],x2 //loads incremented src 157 ld1 {v18.8b},[x6] //loads the incremented src 162 add x6,x1,x3 //pu1_dst + dst_strd [all …]
|
D | ihevc_inter_pred_chroma_vert.s | 112 mov x17,x6 // wd 117 mov x6,x17 //loads wd 123 tst x6,#3 //checks (wd & 3) 125 lsl x10,x6,#1 //2*wd 144 add x6,x0,x2 //pu1_src +src_strd 145 ld1 {v17.8b},[x6],x2 //loads pu1_src 149 ld1 {v4.8b},[x6],x2 //loads incremented src 151 ld1 {v16.8b},[x6],x2 //loads incremented src 156 ld1 {v18.8b},[x6] //loads the incremented src 160 add x6,x1,x3 //pu1_dst + dst_strd [all …]
|
D | ihevc_inter_pred_chroma_copy_w16out.s | 112 mov x17,x6 // wd 130 lsl x6, x3,#1 131 adds x6, x6,#0 143 add x10,x1,x6 154 st1 {v22.1d},[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 157 st1 {v24.1d},[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 160 st1 {v26.1d},[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 187 add x10,x1,x6 198 st1 {v22.1d},[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 220 add x6,x0,x2 //pu1_src_tmp += src_strd [all …]
|
D | ihevc_inter_pred_luma_copy_w16out.s | 92 mov x17,x6 // wd 101 lsl x6, x3,#1 102 adds x6, x6,#0 112 add x10,x1,x6 123 st1 {v22.d}[0],[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 126 st1 {v24.d}[0],[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 129 st1 {v26.d}[0],[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 160 add x6,x0,x2 //pu1_src_tmp += src_strd 163 ld1 {v3.8b},[x6],x2 //vld1_u8(pu1_src_tmp) 164 ld1 {v5.8b},[x6],x2 //vld1_u8(pu1_src_tmp) [all …]
|
D | ihevc_intra_pred_chroma_mode_18_34.s | 127 csel x6, x20, x6,eq 129 csel x6, x20, x6,ne 136 ld1 {v0.8b, v1.8b},[x8],x6 138 ld1 {v2.8b, v3.8b},[x8],x6 140 ld1 {v4.8b, v5.8b},[x8],x6 142 ld1 {v6.8b, v7.8b},[x8],x6 144 ld1 {v16.8b, v17.8b},[x8],x6 146 ld1 {v18.8b, v19.8b},[x8],x6 148 ld1 {v20.8b, v21.8b},[x8],x6 150 ld1 {v22.8b, v23.8b},[x8],x6
|
D | ihevc_intra_pred_chroma_dc.s | 117 add x6, x0, x4,lsl #1 //&src[2nt] 131 ld2 {v30.8b, v31.8b}, [x6], #16 //load from src[nt] 159 ld2 {v30.8b, v31.8b}, [x6],#16 //load from src[nt] 199 lsl x6, x3, #2 200 csel x11, x6, x11,eq 207 sub x6, x6, #16 216 st2 {v16.8b, v17.8b}, [x2], x6 217 st2 {v16.8b, v17.8b}, [x5], x6 218 st2 {v16.8b, v17.8b}, [x8], x6 219 st2 {v16.8b, v17.8b}, [x10], x6 [all …]
|
/external/libmpeg2/common/armv8/ |
D | impeg2_format_conv.s | 147 sub x7, x7, x6 //// Source increment 149 sub x8, x8, x6 //// Destination increment 153 mov x16, x6 189 add x6, x6, 1 190 bic x6, x6, #1 194 sub x7, x7, x6, lsr #1 //// Source increment 196 sub x8, x8, x6 //// Destination increment 198 lsr x6, x6, #1 201 mov x16, x6 325 sub x7, x7, x6 //// Source increment [all …]
|
/external/libavc/common/armv8/ |
D | ih264_padding_neon_av8.s | 93 neg x6, x1 103 st1 {v0.8b, v1.8b}, [x4], x6 181 sub x6, x1, #16 231 st1 {v0.16b}, [x4], x6 234 st1 {v2.16b}, [x4], x6 // 16 bytes store 239 st1 {v4.16b}, [x4], x6 // 16 bytes store 246 st1 {v6.16b}, [x4], x6 // 16 bytes store 251 st1 {v0.16b}, [x4], x6 // 16 bytes store 256 st1 {v2.16b}, [x4], x6 // 16 bytes store 259 st1 {v4.16b}, [x4], x6 // 16 bytes store [all …]
|
D | ih264_inter_pred_luma_copy_av8.s | 107 add x6, x1, x3 //pu1_dst_tmp += dst_strd 111 st1 {v0.s}[0], [x6], x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0) 114 st1 {v0.s}[0], [x6], x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0) 117 st1 {v0.s}[0], [x6], x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0) 124 sub x1, x6, x11 //pu1_dst = pu1_dst_tmp 144 add x6, x1, x3 //pu1_dst_tmp += dst_strd 147 st1 {v1.8b}, [x6], x3 //vst1_u8(pu1_dst_tmp, tmp_src) 150 st1 {v2.8b}, [x6], x3 //vst1_u8(pu1_dst_tmp, tmp_src) 152 st1 {v3.8b}, [x6], x3 //vst1_u8(pu1_dst_tmp, tmp_src) 158 sub x1, x6, x11 //pu1_dst = pu1_dst_tmp [all …]
|
/external/llvm/test/MC/Disassembler/AMDGPU/ |
D | sopk_vi.txt | 3 # VI: s_cmovk_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb0] 6 # VI: s_cmpk_eq_i32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb1] 9 # VI: s_cmpk_lg_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb1] 12 # VI: s_cmpk_gt_i32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb2] 15 # VI: s_cmpk_ge_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb2] 18 # VI: s_cmpk_lt_i32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb3] 21 # VI: s_cmpk_le_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb3] 24 # VI: s_cmpk_eq_u32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb4] 27 # VI: s_cmpk_lg_u32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb4] 30 # VI: s_cmpk_gt_u32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb5] [all …]
|
/external/libvpx/libvpx/vp9/common/x86/ |
D | vp9_highbd_iht8x8_add_sse4.c | 58 __m128i x0[2], x1[2], x2[2], x3[2], x4[2], x5[2], x6[2], x7[2]; in highbd_iadst8_sse4_1() local 80 x6[0] = _mm_sub_epi64(s2[0], s6[0]); in highbd_iadst8_sse4_1() 81 x6[1] = _mm_sub_epi64(s2[1], s6[1]); in highbd_iadst8_sse4_1() 97 x6[0] = dct_const_round_shift_64bit(x6[0]); in highbd_iadst8_sse4_1() 98 x6[1] = dct_const_round_shift_64bit(x6[1]); in highbd_iadst8_sse4_1() 107 x6[0] = pack_4(x6[0], x6[1]); in highbd_iadst8_sse4_1() 117 highbd_iadst_butterfly_sse4_1(x7[0], x6[0], cospi_24_64, cospi_8_64, s7, s6); in highbd_iadst8_sse4_1() 123 x6[0] = _mm_sub_epi64(s4[0], s6[0]); in highbd_iadst8_sse4_1() 124 x6[1] = _mm_sub_epi64(s4[1], s6[1]); in highbd_iadst8_sse4_1() 131 x6[0] = dct_const_round_shift_64bit(x6[0]); in highbd_iadst8_sse4_1() [all …]
|
/external/llvm-project/llvm/test/MC/Disassembler/AMDGPU/ |
D | sopk_vi.txt | 3 # VI: s_cmovk_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb0] 6 # VI: s_cmpk_eq_i32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb1] 9 # VI: s_cmpk_lg_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb1] 12 # VI: s_cmpk_gt_i32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb2] 15 # VI: s_cmpk_ge_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb2] 18 # VI: s_cmpk_lt_i32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb3] 21 # VI: s_cmpk_le_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb3] 24 # VI: s_cmpk_eq_u32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb4] 27 # VI: s_cmpk_lg_u32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb4] 30 # VI: s_cmpk_gt_u32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb5] [all …]
|
/external/boringssl/ios-aarch64/crypto/third_party/sike/asm/ |
D | fp-armv8.S | 42 ldp x5, x6, [x0,#16] 54 adcs x6, x6, xzr 80 and x23, x6, x8 93 adcs x4, x4, x6 175 mul x5, x6, x12 176 umulh x12, x6, x12 180 mul x11, x6, x13 181 umulh x13, x6, x13 214 ldp x5, x6, [x0,#16] 224 adcs x4, x4, x6 [all …]
|