/external/libvpx/libvpx/vpx_dsp/mips/ |
D | idct32x32_msa.c | 45 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in idct32x8_row_even_process_store() local 49 LD_SH8(tmp_buf, 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); in idct32x8_row_even_process_store() 60 DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6); in idct32x8_row_even_process_store() 61 BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0); in idct32x8_row_even_process_store() 66 LD_SH8((tmp_buf + 16), 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); in idct32x8_row_even_process_store() 70 DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1); in idct32x8_row_even_process_store() 74 reg4 = reg6 + reg2; in idct32x8_row_even_process_store() 75 reg6 = reg6 - reg2; in idct32x8_row_even_process_store() 89 DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1); in idct32x8_row_even_process_store() 91 vec0 = reg0 - reg6; in idct32x8_row_even_process_store() [all …]
|
D | idct16x16_msa.c | 16 v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14; in vpx_idct16_1d_rows_msa() local 20 LD_SH8(input, 16, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); in vpx_idct16_1d_rows_msa() 24 TRANSPOSE8x8_SH_SH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg0, reg1, in vpx_idct16_1d_rows_msa() 25 reg2, reg3, reg4, reg5, reg6, reg7); in vpx_idct16_1d_rows_msa() 29 DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6); in vpx_idct16_1d_rows_msa() 30 BUTTERFLY_4(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2); in vpx_idct16_1d_rows_msa() 34 BUTTERFLY_4(reg8, reg0, reg4, reg12, reg2, reg6, reg10, reg14); in vpx_idct16_1d_rows_msa() 35 SUB4(reg2, loc1, reg14, loc0, reg6, loc3, reg10, loc2, reg0, reg12, reg4, in vpx_idct16_1d_rows_msa() 37 ADD4(reg2, loc1, reg14, loc0, reg6, loc3, reg10, loc2, reg2, reg14, reg6, in vpx_idct16_1d_rows_msa() 71 reg2 = reg6 + loc0; in vpx_idct16_1d_rows_msa() [all …]
|
/external/elfutils/tests/ |
D | run-dwarfcfi.sh | 44 reg6: same_value 61 reg6: same_value 78 reg6: undefined 95 reg6: undefined 112 reg6: same_value 129 reg6: undefined
|
D | run-varlocs.sh | 64 [40051c,40052b) {reg6} 587 [40106e,401090) {reg6}
|
D | run-addrcfi.sh | 39 integer reg6 (%esi): same_value 86 integer reg6 (%esi): same_value 138 integer reg6 (%rbp): same_value 204 integer reg6 (%rbp): same_value 308 integer reg6 (r6): undefined 1330 integer reg6 (r6): undefined 2358 integer reg6 (r6): undefined 3384 integer reg6 (%r6): same_value 3461 integer reg6 (%r6): same_value 3539 integer reg6 (r6): same_value [all …]
|
D | run-readelf-loc.sh | 1153 [ 0] reg6
|
/external/vixl/src/aarch64/ |
D | registers-aarch64.cc | 178 const CPURegister& reg6, in AreAliased() argument 189 const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8}; in AreAliased() 229 const CPURegister& reg6, in AreSameSizeAndType() argument 238 match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1); in AreSameSizeAndType() 249 const CPURegister& reg6, in AreEven() argument 258 even &= !reg6.IsValid() || ((reg6.GetCode() % 2) == 0); in AreEven()
|
D | registers-aarch64.h | 842 const CPURegister& reg6 = NoReg, 855 const CPURegister& reg6 = NoCPUReg, 867 const CPURegister& reg6 = NoReg,
|
/external/libyuv/files/source/ |
D | rotate_msa.cc | 85 v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in TransposeWx16_MSA() local 109 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeWx16_MSA() 111 ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7); in TransposeWx16_MSA() 131 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeWx16_MSA() 142 res8 = (v16u8)__msa_ilvr_w((v4i32)reg6, (v4i32)reg2); in TransposeWx16_MSA() 143 res9 = (v16u8)__msa_ilvl_w((v4i32)reg6, (v4i32)reg2); in TransposeWx16_MSA() 166 v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in TransposeUVWx16_MSA() local 190 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeUVWx16_MSA() 192 ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7); in TransposeUVWx16_MSA() 212 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeUVWx16_MSA() [all …]
|
D | scale_msa.cc | 141 v8u16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in ScaleARGBRowDownEvenBox_MSA() local 171 reg6 = (v8u16)__msa_pckod_d((v2i64)reg2, (v2i64)reg0); in ScaleARGBRowDownEvenBox_MSA() 173 reg4 += reg6; in ScaleARGBRowDownEvenBox_MSA() 669 v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in ScaleARGBFilterCols_MSA() local 707 reg6 = (v16u8)__msa_ilvr_b((v16i8)src3, (v16i8)src1); in ScaleARGBFilterCols_MSA() 711 tmp2 = __msa_dotp_u_h(reg6, mult2); in ScaleARGBFilterCols_MSA() 767 v8i16 reg6, reg7, reg8, reg9, reg10, reg11; in ScaleRowDown34_0_Box_MSA() local 808 reg6 = (v8i16)__msa_dotp_u_h(vec6, const0); in ScaleRowDown34_0_Box_MSA() 820 reg6 = __msa_srar_h(reg6, shft0); in ScaleRowDown34_0_Box_MSA() 826 reg0 = reg0 * 3 + reg6; in ScaleRowDown34_0_Box_MSA() [all …]
|
D | row_msa.cc | 826 v8u16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9; in ARGBToUVRow_MSA() local 898 reg6 = reg0 * const_0x70; in ARGBToUVRow_MSA() 902 reg6 += const_0x8080; in ARGBToUVRow_MSA() 916 reg6 -= reg8; in ARGBToUVRow_MSA() 920 reg6 = (v8u16)__msa_srai_h((v8i16)reg6, 8); in ARGBToUVRow_MSA() 924 dst0 = (v16u8)__msa_pckev_b((v16i8)reg7, (v16i8)reg6); in ARGBToUVRow_MSA() 1243 v4u32 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in ARGBAttenuateRow_MSA() local 1272 reg6 = (v4u32)__msa_ilvr_h(zero, (v8i16)vec7); in ARGBAttenuateRow_MSA() 1280 reg6 *= (v4u32)__msa_ilvr_h(zero, (v8i16)vec3); in ARGBAttenuateRow_MSA() 1288 reg6 = (v4u32)__msa_srai_w((v4i32)reg6, 24); in ARGBAttenuateRow_MSA() [all …]
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
D | rotate_msa.cc | 85 v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in TransposeWx16_MSA() local 109 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeWx16_MSA() 111 ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7); in TransposeWx16_MSA() 131 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeWx16_MSA() 142 res8 = (v16u8)__msa_ilvr_w((v4i32)reg6, (v4i32)reg2); in TransposeWx16_MSA() 143 res9 = (v16u8)__msa_ilvl_w((v4i32)reg6, (v4i32)reg2); in TransposeWx16_MSA() 166 v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in TransposeUVWx16_MSA() local 190 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeUVWx16_MSA() 192 ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7); in TransposeUVWx16_MSA() 212 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeUVWx16_MSA() [all …]
|
D | scale_msa.cc | 141 v8u16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in ScaleARGBRowDownEvenBox_MSA() local 171 reg6 = (v8u16)__msa_pckod_d((v2i64)reg2, (v2i64)reg0); in ScaleARGBRowDownEvenBox_MSA() 173 reg4 += reg6; in ScaleARGBRowDownEvenBox_MSA() 669 v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in ScaleARGBFilterCols_MSA() local 707 reg6 = (v16u8)__msa_ilvr_b((v16i8)src3, (v16i8)src1); in ScaleARGBFilterCols_MSA() 711 tmp2 = __msa_dotp_u_h(reg6, mult2); in ScaleARGBFilterCols_MSA() 767 v8i16 reg6, reg7, reg8, reg9, reg10, reg11; in ScaleRowDown34_0_Box_MSA() local 808 reg6 = (v8i16)__msa_dotp_u_h(vec6, const0); in ScaleRowDown34_0_Box_MSA() 820 reg6 = __msa_srar_h(reg6, shft0); in ScaleRowDown34_0_Box_MSA() 826 reg0 = reg0 * 3 + reg6; in ScaleRowDown34_0_Box_MSA() [all …]
|
D | row_msa.cc | 826 v8u16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9; in ARGBToUVRow_MSA() local 898 reg6 = reg0 * const_0x70; in ARGBToUVRow_MSA() 902 reg6 += const_0x8080; in ARGBToUVRow_MSA() 916 reg6 -= reg8; in ARGBToUVRow_MSA() 920 reg6 = (v8u16)__msa_srai_h((v8i16)reg6, 8); in ARGBToUVRow_MSA() 924 dst0 = (v16u8)__msa_pckev_b((v16i8)reg7, (v16i8)reg6); in ARGBToUVRow_MSA() 1243 v4u32 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in ARGBAttenuateRow_MSA() local 1272 reg6 = (v4u32)__msa_ilvr_h(zero, (v8i16)vec7); in ARGBAttenuateRow_MSA() 1280 reg6 *= (v4u32)__msa_ilvr_h(zero, (v8i16)vec3); in ARGBAttenuateRow_MSA() 1288 reg6 = (v4u32)__msa_srai_w((v4i32)reg6, 24); in ARGBAttenuateRow_MSA() [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | pv.ll | 6 …eg3, <4 x float> inreg %reg4, <4 x float> inreg %reg5, <4 x float> inreg %reg6, <4 x float> inreg … 28 %20 = extractelement <4 x float> %reg6, i32 0 29 %21 = extractelement <4 x float> %reg6, i32 1 30 %22 = extractelement <4 x float> %reg6, i32 2 31 %23 = extractelement <4 x float> %reg6, i32 3
|
D | big_alu.ll | 5 …eg3, <4 x float> inreg %reg4, <4 x float> inreg %reg5, <4 x float> inreg %reg6, <4 x float> inreg … 45 %tmp38 = extractelement <4 x float> %reg6, i32 0 46 %tmp39 = extractelement <4 x float> %reg6, i32 1 47 %tmp40 = extractelement <4 x float> %reg6, i32 2 48 %tmp41 = extractelement <4 x float> %reg6, i32 3
|
/external/llvm-project/llvm/test/tools/llvm-readobj/ELF/ |
D | unwind.test | 115 # CHECK-NEXT: DW_CFA_offset: reg6 -16 117 # CHECK-NEXT: DW_CFA_def_cfa_register: reg6 132 # CHECK-NEXT: DW_CFA_expression: reg6 DW_OP_breg6 +0 151 # CHECK-NEXT: DW_CFA_offset: reg6 -16 153 # CHECK-NEXT: DW_CFA_def_cfa_register: reg6
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | pv.ll | 5 …eg3, <4 x float> inreg %reg4, <4 x float> inreg %reg5, <4 x float> inreg %reg6, <4 x float> inreg … 27 %tmp32 = extractelement <4 x float> %reg6, i32 0 28 %tmp33 = extractelement <4 x float> %reg6, i32 1 29 %tmp34 = extractelement <4 x float> %reg6, i32 2 30 %tmp35 = extractelement <4 x float> %reg6, i32 3
|
D | big_alu.ll | 5 …eg3, <4 x float> inreg %reg4, <4 x float> inreg %reg5, <4 x float> inreg %reg6, <4 x float> inreg … 45 %tmp38 = extractelement <4 x float> %reg6, i32 0 46 %tmp39 = extractelement <4 x float> %reg6, i32 1 47 %tmp40 = extractelement <4 x float> %reg6, i32 2 48 %tmp41 = extractelement <4 x float> %reg6, i32 3
|
/external/libaom/libaom/av1/common/arm/ |
D | convolve_neon.c | 1168 int16x4_t reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9, in av1_convolve_2d_sr_neon() local 1187 reg6 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t2))); in av1_convolve_2d_sr_neon() 1211 d0 = convolve8_4x4(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, in av1_convolve_2d_sr_neon() 1214 d1 = convolve8_4x4(reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, in av1_convolve_2d_sr_neon() 1217 d2 = convolve8_4x4(reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9, in av1_convolve_2d_sr_neon() 1220 d3 = convolve8_4x4(reg3, reg4, reg5, reg6, reg7, reg8, reg9, reg10, in av1_convolve_2d_sr_neon() 1223 d4 = convolve8_4x4(reg4, reg5, reg6, reg7, reg8, reg9, reg10, reg11, in av1_convolve_2d_sr_neon() 1226 d5 = convolve8_4x4(reg5, reg6, reg7, reg8, reg9, reg10, reg11, reg12, in av1_convolve_2d_sr_neon() 1229 d6 = convolve8_4x4(reg6, reg7, reg8, reg9, reg10, reg11, reg12, reg13, in av1_convolve_2d_sr_neon() 1258 reg6 = reg14; in av1_convolve_2d_sr_neon()
|
/external/llvm/test/CodeGen/ARM/ |
D | fast-isel-pic.ll | 58 ; ARMv7-ELF: ldr r[[reg6:[0-9]+]], [pc, r[[reg5]]]
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | fast-isel-pic.ll | 58 ; ARMv7-ELF: ldr r[[reg6:[0-9]+]], [pc, r[[reg5]]]
|
/external/llvm/include/llvm/Support/ |
D | Dwarf.def | 207 HANDLE_DW_OP(0x56, reg6)
|
/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/BinaryFormat/ |
D | Dwarf.def | 561 HANDLE_DW_OP(0x56, reg6, 2, DWARF)
|
/external/llvm-project/llvm/include/llvm/BinaryFormat/ |
D | Dwarf.def | 579 HANDLE_DW_OP(0x56, reg6, 2, DWARF)
|