/external/libxaac/decoder/armv7/ |
D | ixheaacd_tns_ar_filter_fixed.s | 164 VEXT.32 Q6, Q7, Q6, #3 186 VEXT.32 Q11, Q6, Q11, #3 203 VEXT.32 Q6, Q7, Q6, #3 230 VEXT.32 Q12, Q11, Q12, #3 234 VEXT.32 Q11, Q6, Q11, #3 250 VEXT.32 Q6, Q7, Q6, #3 275 VEXT.32 Q14, Q12, Q14, #3 277 VEXT.32 Q12, Q11, Q12, #3 281 VEXT.32 Q11, Q6, Q11, #3 297 VEXT.32 Q6, Q7, Q6, #3 [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_sao_edge_offset_class0.s | 163 …VEXT.8 Q7,Q7,Q6,#15 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, … 179 …VEXT.8 Q14,Q14,Q13,#15 @II Iteration pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, … 187 …VEXT.8 Q7,Q6,Q7,#1 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, … 199 …VEXT.8 Q14,Q13,Q14,#1 @II pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tm… 305 …VEXT.8 Q7,Q7,Q6,#15 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, … 313 …VEXT.8 Q7,Q6,Q7,#1 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, … 328 VEXT.8 Q10,Q10,Q11,#15 @sign_left = vextq_s8(sign_left, sign_left, 15)
|
D | ihevc_sao_edge_offset_class0_chroma.s | 170 …VEXT.8 Q7,Q7,Q6,#14 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, … 185 …VEXT.8 Q14,Q14,Q15,#14 @II pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_ro… 198 …VEXT.8 Q7,Q6,Q7,#2 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, … 209 …VEXT.8 Q14,Q15,Q14,#2 @II pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tm… 333 …VEXT.8 Q7,Q7,Q6,#14 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, … 348 …VEXT.8 Q14,Q14,Q15,#14 @II pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_ro… 359 …VEXT.8 Q7,Q6,Q7,#2 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, … 374 …VEXT.8 Q14,Q15,Q14,#2 @II pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tm…
|
D | ihevc_sao_edge_offset_class2.s | 286 …VEXT.8 Q9,Q8,Q9,#1 @I pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_… 316 VEXT.8 Q7,Q7,Q7,#15 @I sign_up = vextq_s8(sign_up, sign_up, 15) 358 …VEXT.8 Q11,Q8,Q14,#1 @II pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row… 366 …VEXT.8 Q9,Q15,Q9,#1 @III pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_ro… 396 VEXT.8 Q7,Q7,Q7,#15 @II sign_up = vextq_s8(sign_up, sign_up, 15) 411 VEXT.8 Q7,Q7,Q7,#15 @III sign_up = vextq_s8(sign_up, sign_up, 15) 467 …VEXT.8 Q9,Q8,Q9,#1 @pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tm… 486 VEXT.8 Q7,Q7,Q7,#15 @sign_up = vextq_s8(sign_up, sign_up, 15) 596 …VEXT.8 Q9,Q8,Q9,#1 @pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tm… 628 VEXT.8 Q7,Q7,Q7,#15 @sign_up = vextq_s8(sign_up, sign_up, 15) [all …]
|
D | ihevc_sao_edge_offset_class3.s | 301 …VEXT.8 Q9,Q9,Q8,#15 @I pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_… 326 VEXT.8 Q7,Q7,Q7,#1 @I sign_up = vextq_s8(sign_up, sign_up, 1) 379 …VEXT.8 Q9,Q9,Q8,#15 @II pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next… 413 …VEXT.8 Q9,Q9,Q15,#15 @III pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_nex… 417 VEXT.8 Q7,Q7,Q7,#1 @II sign_up = vextq_s8(sign_up, sign_up, 1) 440 VEXT.8 Q7,Q7,Q7,#1 @III sign_up = vextq_s8(sign_up, sign_up, 1) 504 …VEXT.8 Q9,Q9,Q8,#15 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_ro… 636 …VEXT.8 Q9,Q9,Q8,#15 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_ro… 669 VEXT.8 Q7,Q7,Q7,#1 @sign_up = vextq_s8(sign_up, sign_up, 1) 778 …VEXT.8 Q9,Q9,Q8,#15 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_ro… [all …]
|
D | ihevc_sao_edge_offset_class3_chroma.s | 375 …VEXT.8 Q9,Q9,Q8,#14 @I pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_… 415 VEXT.8 Q7,Q7,Q7,#2 @I sign_up = vextq_s8(sign_up, sign_up, 2) 470 …VEXT.8 Q14,Q14,Q8,#14 @II pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next… 509 …VEXT.8 Q9,Q9,Q15,#14 @III pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_nex… 525 VEXT.8 Q7,Q7,Q7,#2 @II sign_up = vextq_s8(sign_up, sign_up, 2) 552 VEXT.8 Q7,Q7,Q7,#2 @III sign_up = vextq_s8(sign_up, sign_up, 2) 625 …VEXT.8 Q9,Q9,Q8,#14 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_ro… 775 …VEXT.8 Q9,Q9,Q8,#14 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_ro… 822 VEXT.8 Q7,Q7,Q7,#2 @sign_up = vextq_s8(sign_up, sign_up, 2) 952 …VEXT.8 Q9,Q9,Q8,#14 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_ro… [all …]
|
D | ihevc_sao_edge_offset_class2_chroma.s | 382 …VEXT.8 Q9,Q8,Q9,#2 @I pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_… 424 VEXT.8 Q7,Q7,Q7,#14 @I sign_up = vextq_s8(sign_up, sign_up, 14) 470 …VEXT.8 Q14,Q8,Q14,#2 @II pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row… 482 …VEXT.8 Q9,Q15,Q9,#2 @III pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_ro… 525 VEXT.8 Q7,Q7,Q7,#14 @II sign_up = vextq_s8(sign_up, sign_up, 14) 560 VEXT.8 Q7,Q7,Q7,#14 @III sign_up = vextq_s8(sign_up, sign_up, 14) 616 …VEXT.8 Q9,Q8,Q9,#2 @pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tm… 758 …VEXT.8 Q9,Q8,Q9,#2 @pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tm… 802 VEXT.8 Q7,Q7,Q7,#14 @sign_up = vextq_s8(sign_up, sign_up, 14) 908 …VEXT.8 Q9,Q8,Q9,#2 @pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tm… [all …]
|
/external/libopus/celt/arm/ |
D | celt_pitch_xcorr_arm_gnu.s | 117 VEXT.16 d16, d4, d5, #1 119 VEXT.16 d16, d4, d5, #2 121 VEXT.16 d16, d4, d5, #3
|
D | celt_pitch_xcorr_arm.s | 114 VEXT.16 d16, d4, d5, #1 116 VEXT.16 d16, d4, d5, #2 118 VEXT.16 d16, d4, d5, #3
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | arm64-ext.ll | 57 ; Undef shuffle indices should not prevent matching to VEXT:
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-ext.ll | 57 ; Undef shuffle indices should not prevent matching to VEXT:
|
/external/arm-neon-tests/ |
D | ref-rvct-neon-nofp16.txt | 622 VEXT/VEXTQ output: 623 VEXT/VEXTQ:0:result_int8x8 [] = { fffffff7, 11, 11, 11, 11, 11, 11, 11, } 624 VEXT/VEXTQ:1:result_int16x4 [] = { fffffff3, 22, 22, 22, } 625 VEXT/VEXTQ:2:result_int32x2 [] = { fffffff1, 33, } 626 VEXT/VEXTQ:3:result_int64x1 [] = { fffffffffffffff0, } 627 VEXT/VEXTQ:4:result_uint8x8 [] = { f6, f7, 55, 55, 55, 55, 55, 55, } 628 VEXT/VEXTQ:5:result_uint16x4 [] = { fff2, fff3, 66, 66, } 629 VEXT/VEXTQ:6:result_uint32x2 [] = { fffffff1, 77, } 630 VEXT/VEXTQ:7:result_uint64x1 [] = { fffffffffffffff0, } 631 VEXT/VEXTQ:8:result_poly8x8 [] = { f6, f7, 55, 55, 55, 55, 55, 55, } [all …]
|
D | ref-rvct-neon.txt | 670 VEXT/VEXTQ output: 671 VEXT/VEXTQ:0:result_int8x8 [] = { fffffff7, 11, 11, 11, 11, 11, 11, 11, } 672 VEXT/VEXTQ:1:result_int16x4 [] = { fffffff3, 22, 22, 22, } 673 VEXT/VEXTQ:2:result_int32x2 [] = { fffffff1, 33, } 674 VEXT/VEXTQ:3:result_int64x1 [] = { fffffffffffffff0, } 675 VEXT/VEXTQ:4:result_uint8x8 [] = { f6, f7, 55, 55, 55, 55, 55, 55, } 676 VEXT/VEXTQ:5:result_uint16x4 [] = { fff2, fff3, 66, 66, } 677 VEXT/VEXTQ:6:result_uint32x2 [] = { fffffff1, 77, } 678 VEXT/VEXTQ:7:result_uint64x1 [] = { fffffffffffffff0, } 679 VEXT/VEXTQ:8:result_poly8x8 [] = { f6, f7, 55, 55, 55, 55, 55, 55, } [all …]
|
D | ref-rvct-all.txt | 670 VEXT/VEXTQ output: 671 VEXT/VEXTQ:0:result_int8x8 [] = { fffffff7, 11, 11, 11, 11, 11, 11, 11, } 672 VEXT/VEXTQ:1:result_int16x4 [] = { fffffff3, 22, 22, 22, } 673 VEXT/VEXTQ:2:result_int32x2 [] = { fffffff1, 33, } 674 VEXT/VEXTQ:3:result_int64x1 [] = { fffffffffffffff0, } 675 VEXT/VEXTQ:4:result_uint8x8 [] = { f6, f7, 55, 55, 55, 55, 55, 55, } 676 VEXT/VEXTQ:5:result_uint16x4 [] = { fff2, fff3, 66, 66, } 677 VEXT/VEXTQ:6:result_uint32x2 [] = { fffffff1, 77, } 678 VEXT/VEXTQ:7:result_uint64x1 [] = { fffffffffffffff0, } 679 VEXT/VEXTQ:8:result_poly8x8 [] = { f6, f7, 55, 55, 55, 55, 55, 55, } [all …]
|
/external/llvm/lib/Target/ARM/ |
D | ARMISelLowering.h | 152 VEXT, // extract enumerator
|
D | ARMScheduleA8.td | 1024 // Double-register VEXT 1028 // Quad-register VEXT
|
D | ARMScheduleSwift.td | 549 (instregex "VEXT", "VREV16", "VREV32", "VREV64")>;
|
/external/llvm/test/CodeGen/ARM/ |
D | vext.ll | 57 ; Undef shuffle indices should not prevent matching to VEXT:
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARM/ |
D | ARMScheduleA8.td | 1023 // Double-register VEXT 1027 // Quad-register VEXT
|
D | ARMISelLowering.h | 193 VEXT, // extract enumerator
|
D | ARMScheduleSwift.td | 565 (instregex "VEXT", "VREV16", "VREV32", "VREV64")>;
|
/external/llvm-project/llvm/lib/Target/ARM/ |
D | ARMScheduleA8.td | 1023 // Double-register VEXT 1027 // Quad-register VEXT
|
D | ARMISelLowering.h | 196 VEXT, // extract enumerator
|
D | ARMScheduleSwift.td | 565 (instregex "VEXT", "VREV16", "VREV32", "VREV64")>;
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | vext.ll | 91 ; Undef shuffle indices should not prevent matching to VEXT:
|