/external/libvpx/libvpx/vpx_dsp/mips/ |
D | idct32x32_msa.c | 45 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in idct32x8_row_even_process_store() local 49 LD_SH8(tmp_buf, 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); in idct32x8_row_even_process_store() 51 DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7); in idct32x8_row_even_process_store() 53 BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0); in idct32x8_row_even_process_store() 66 LD_SH8((tmp_buf + 16), 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); in idct32x8_row_even_process_store() 67 DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7); in idct32x8_row_even_process_store() 78 reg5 = reg7 + reg3; in idct32x8_row_even_process_store() 79 reg7 = reg7 - reg3; in idct32x8_row_even_process_store() 88 DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7); in idct32x8_row_even_process_store() 93 vec1 = reg7 - reg1; in idct32x8_row_even_process_store() [all …]
|
D | idct16x16_msa.c | 17 v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15; in vpx_idct16_1d_rows_msa() local 20 LD_SH8(input, 16, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); in vpx_idct16_1d_rows_msa() 24 TRANSPOSE8x8_SH_SH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg0, reg1, in vpx_idct16_1d_rows_msa() 25 reg2, reg3, reg4, reg5, reg6, reg7); in vpx_idct16_1d_rows_msa() 42 DOTP_CONST_PAIR(reg9, reg7, cospi_14_64, cospi_18_64, loc2, loc3); in vpx_idct16_1d_rows_msa() 46 reg7 = reg15 - loc3; in vpx_idct16_1d_rows_msa() 66 DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9); in vpx_idct16_1d_rows_msa() 74 loc0 = reg7 + reg11; in vpx_idct16_1d_rows_msa() 75 reg11 = reg7 - reg11; in vpx_idct16_1d_rows_msa() 87 BUTTERFLY_4(reg12, reg14, reg13, reg3, reg8, reg6, reg7, reg5); in vpx_idct16_1d_rows_msa() [all …]
|
/external/elfutils/tests/ |
D | run-dwarfcfi.sh | 45 reg7: same_value 62 reg7: location expression: call_frame_cfa stack_value 79 reg7: undefined 96 reg7: undefined 113 reg7: same_value 130 reg7: undefined
|
D | run-addrcfi.sh | 40 integer reg7 (%edi): same_value 87 integer reg7 (%edi): same_value 139 integer reg7 (%rsp): location expression: call_frame_cfa stack_value 205 integer reg7 (%rsp): location expression: call_frame_cfa stack_value 309 integer reg7 (r7): undefined 1331 integer reg7 (r7): undefined 2359 integer reg7 (r7): undefined 3385 integer reg7 (%r7): same_value 3462 integer reg7 (%r7): same_value 3540 integer reg7 (r7): same_value [all …]
|
/external/vixl/src/aarch64/ |
D | registers-aarch64.cc | 179 const CPURegister& reg7, in AreAliased() argument 189 const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8}; in AreAliased() 230 const CPURegister& reg7, in AreSameSizeAndType() argument 239 match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1); in AreSameSizeAndType() 250 const CPURegister& reg7, in AreEven() argument 259 even &= !reg7.IsValid() || ((reg7.GetCode() % 2) == 0); in AreEven()
|
D | registers-aarch64.h | 843 const CPURegister& reg7 = NoReg, 856 const CPURegister& reg7 = NoCPUReg, 868 const CPURegister& reg7 = NoReg,
|
/external/libyuv/files/source/ |
D | rotate_msa.cc | 85 v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in TransposeWx16_MSA() local 109 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeWx16_MSA() 111 ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7); in TransposeWx16_MSA() 131 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeWx16_MSA() 147 res8 = (v16u8)__msa_ilvr_w((v4i32)reg7, (v4i32)reg3); in TransposeWx16_MSA() 148 res9 = (v16u8)__msa_ilvl_w((v4i32)reg7, (v4i32)reg3); in TransposeWx16_MSA() 166 v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in TransposeUVWx16_MSA() local 190 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeUVWx16_MSA() 192 ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7); in TransposeUVWx16_MSA() 212 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeUVWx16_MSA() [all …]
|
D | scale_msa.cc | 141 v8u16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in ScaleARGBRowDownEvenBox_MSA() local 172 reg7 = (v8u16)__msa_pckod_d((v2i64)reg3, (v2i64)reg1); in ScaleARGBRowDownEvenBox_MSA() 174 reg5 += reg7; in ScaleARGBRowDownEvenBox_MSA() 669 v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in ScaleARGBFilterCols_MSA() local 708 reg7 = (v16u8)__msa_ilvl_b((v16i8)src3, (v16i8)src1); in ScaleARGBFilterCols_MSA() 712 tmp3 = __msa_dotp_u_h(reg7, mult3); in ScaleARGBFilterCols_MSA() 767 v8i16 reg6, reg7, reg8, reg9, reg10, reg11; in ScaleRowDown34_0_Box_MSA() local 809 reg7 = (v8i16)__msa_dotp_u_h(vec7, const1); in ScaleRowDown34_0_Box_MSA() 821 reg7 = __msa_srar_h(reg7, shft1); in ScaleRowDown34_0_Box_MSA() 827 reg1 = reg1 * 3 + reg7; in ScaleRowDown34_0_Box_MSA() [all …]
|
D | row_msa.cc | 826 v8u16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9; in ARGBToUVRow_MSA() local 899 reg7 = reg1 * const_0x70; in ARGBToUVRow_MSA() 903 reg7 += const_0x8080; in ARGBToUVRow_MSA() 917 reg7 -= reg9; in ARGBToUVRow_MSA() 921 reg7 = (v8u16)__msa_srai_h((v8i16)reg7, 8); in ARGBToUVRow_MSA() 924 dst0 = (v16u8)__msa_pckev_b((v16i8)reg7, (v16i8)reg6); in ARGBToUVRow_MSA() 1243 v4u32 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in ARGBAttenuateRow_MSA() local 1273 reg7 = (v4u32)__msa_ilvl_h(zero, (v8i16)vec7); in ARGBAttenuateRow_MSA() 1281 reg7 *= (v4u32)__msa_ilvl_h(zero, (v8i16)vec3); in ARGBAttenuateRow_MSA() 1289 reg7 = (v4u32)__msa_srai_w((v4i32)reg7, 24); in ARGBAttenuateRow_MSA() [all …]
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
D | rotate_msa.cc | 85 v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in TransposeWx16_MSA() local 109 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeWx16_MSA() 111 ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7); in TransposeWx16_MSA() 131 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeWx16_MSA() 147 res8 = (v16u8)__msa_ilvr_w((v4i32)reg7, (v4i32)reg3); in TransposeWx16_MSA() 148 res9 = (v16u8)__msa_ilvl_w((v4i32)reg7, (v4i32)reg3); in TransposeWx16_MSA() 166 v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in TransposeUVWx16_MSA() local 190 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeUVWx16_MSA() 192 ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7); in TransposeUVWx16_MSA() 212 ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); in TransposeUVWx16_MSA() [all …]
|
D | scale_msa.cc | 141 v8u16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in ScaleARGBRowDownEvenBox_MSA() local 172 reg7 = (v8u16)__msa_pckod_d((v2i64)reg3, (v2i64)reg1); in ScaleARGBRowDownEvenBox_MSA() 174 reg5 += reg7; in ScaleARGBRowDownEvenBox_MSA() 669 v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in ScaleARGBFilterCols_MSA() local 708 reg7 = (v16u8)__msa_ilvl_b((v16i8)src3, (v16i8)src1); in ScaleARGBFilterCols_MSA() 712 tmp3 = __msa_dotp_u_h(reg7, mult3); in ScaleARGBFilterCols_MSA() 767 v8i16 reg6, reg7, reg8, reg9, reg10, reg11; in ScaleRowDown34_0_Box_MSA() local 809 reg7 = (v8i16)__msa_dotp_u_h(vec7, const1); in ScaleRowDown34_0_Box_MSA() 821 reg7 = __msa_srar_h(reg7, shft1); in ScaleRowDown34_0_Box_MSA() 827 reg1 = reg1 * 3 + reg7; in ScaleRowDown34_0_Box_MSA() [all …]
|
D | row_msa.cc | 826 v8u16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9; in ARGBToUVRow_MSA() local 899 reg7 = reg1 * const_0x70; in ARGBToUVRow_MSA() 903 reg7 += const_0x8080; in ARGBToUVRow_MSA() 917 reg7 -= reg9; in ARGBToUVRow_MSA() 921 reg7 = (v8u16)__msa_srai_h((v8i16)reg7, 8); in ARGBToUVRow_MSA() 924 dst0 = (v16u8)__msa_pckev_b((v16i8)reg7, (v16i8)reg6); in ARGBToUVRow_MSA() 1243 v4u32 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; in ARGBAttenuateRow_MSA() local 1273 reg7 = (v4u32)__msa_ilvl_h(zero, (v8i16)vec7); in ARGBAttenuateRow_MSA() 1281 reg7 *= (v4u32)__msa_ilvl_h(zero, (v8i16)vec3); in ARGBAttenuateRow_MSA() 1289 reg7 = (v4u32)__msa_srai_w((v4i32)reg7, 24); in ARGBAttenuateRow_MSA() [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | pv.ll | 6 … x float> inreg %reg4, <4 x float> inreg %reg5, <4 x float> inreg %reg6, <4 x float> inreg %reg7) { 32 %24 = extractelement <4 x float> %reg7, i32 0 33 %25 = extractelement <4 x float> %reg7, i32 1 34 %26 = extractelement <4 x float> %reg7, i32 2 35 %27 = extractelement <4 x float> %reg7, i32 3
|
D | big_alu.ll | 5 …eg4, <4 x float> inreg %reg5, <4 x float> inreg %reg6, <4 x float> inreg %reg7, <4 x float> inreg … 49 %tmp42 = extractelement <4 x float> %reg7, i32 0 50 %tmp43 = extractelement <4 x float> %reg7, i32 1 51 %tmp44 = extractelement <4 x float> %reg7, i32 2 52 %tmp45 = extractelement <4 x float> %reg7, i32 3
|
/external/llvm-project/llvm/test/tools/llvm-readobj/ELF/ |
D | unwind.test | 63 # CHECK-NEXT: DW_CFA_def_cfa: reg7 +8 88 # CHECK-NEXT: DW_CFA_def_cfa: reg7 +8 119 # CHECK-NEXT: DW_CFA_def_cfa: reg7 +8 138 # CHECK-NEXT: DW_CFA_def_cfa: reg7 +8 164 # CHECK-NEXT: DW_CFA_def_cfa: reg7 +8
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | pv.ll | 5 … x float> inreg %reg4, <4 x float> inreg %reg5, <4 x float> inreg %reg6, <4 x float> inreg %reg7) { 31 %tmp36 = extractelement <4 x float> %reg7, i32 0 32 %tmp37 = extractelement <4 x float> %reg7, i32 1 33 %tmp38 = extractelement <4 x float> %reg7, i32 2 34 %tmp39 = extractelement <4 x float> %reg7, i32 3
|
D | big_alu.ll | 5 …eg4, <4 x float> inreg %reg5, <4 x float> inreg %reg6, <4 x float> inreg %reg7, <4 x float> inreg … 49 %tmp42 = extractelement <4 x float> %reg7, i32 0 50 %tmp43 = extractelement <4 x float> %reg7, i32 1 51 %tmp44 = extractelement <4 x float> %reg7, i32 2 52 %tmp45 = extractelement <4 x float> %reg7, i32 3
|
/external/libaom/libaom/av1/common/arm/ |
D | convolve_neon.c | 1168 int16x4_t reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9, in av1_convolve_2d_sr_neon() local 1202 reg7 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0))); in av1_convolve_2d_sr_neon() 1211 d0 = convolve8_4x4(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, in av1_convolve_2d_sr_neon() 1214 d1 = convolve8_4x4(reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, in av1_convolve_2d_sr_neon() 1217 d2 = convolve8_4x4(reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9, in av1_convolve_2d_sr_neon() 1220 d3 = convolve8_4x4(reg3, reg4, reg5, reg6, reg7, reg8, reg9, reg10, in av1_convolve_2d_sr_neon() 1223 d4 = convolve8_4x4(reg4, reg5, reg6, reg7, reg8, reg9, reg10, reg11, in av1_convolve_2d_sr_neon() 1226 d5 = convolve8_4x4(reg5, reg6, reg7, reg8, reg9, reg10, reg11, reg12, in av1_convolve_2d_sr_neon() 1229 d6 = convolve8_4x4(reg6, reg7, reg8, reg9, reg10, reg11, reg12, reg13, in av1_convolve_2d_sr_neon() 1232 d7 = convolve8_4x4(reg7, reg8, reg9, reg10, reg11, reg12, reg13, reg14, in av1_convolve_2d_sr_neon()
|
/external/llvm-project/llvm/test/Transforms/NewGVN/ |
D | pr33367.ll | 98 %reg7 = load i64, i64* %preg7, align 8, !tbaa !26 101 %add2c279 = add i64 %reg7, %reg4
|
/external/llvm/include/llvm/Support/ |
D | Dwarf.def | 208 HANDLE_DW_OP(0x57, reg7)
|
/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/BinaryFormat/ |
D | Dwarf.def | 562 HANDLE_DW_OP(0x57, reg7, 2, DWARF)
|
/external/llvm-project/llvm/include/llvm/BinaryFormat/ |
D | Dwarf.def | 580 HANDLE_DW_OP(0x57, reg7, 2, DWARF)
|