Home
last modified time | relevance | path

Searched refs:VZIP (Results 1 – 25 of 37) sorted by relevance

12

/external/libhevc/decoder/arm/
Dihevcd_fmt_conv_420sp_to_rgba8888.s232 VZIP.8 D14,D15
233 VZIP.8 D16,D17
234 VZIP.16 Q7,Q8
242 VZIP.8 D20,D21
243 VZIP.8 D22,D23
244 VZIP.16 Q10,Q11
246 VZIP.32 Q7,Q10
247 VZIP.32 Q8,Q11
283 VZIP.8 D14,D15
284 VZIP.8 D16,D17
[all …]
/external/libhevc/common/arm/
Dihevc_sao_edge_offset_class1_chroma.s194 VZIP.8 D12,D13
213 VZIP.8 D24,D25
261 VZIP.8 D24,D25
344 VZIP.8 D12,D13
362 VZIP.8 D24,D25
397 VZIP.8 D24,D25
Dihevc_sao_band_offset_chroma.s371 VZIP.8 D5,D6
377 VZIP.8 D13,D14
386 VZIP.8 D17,D18
390 VZIP.8 D21,D22
Dihevc_sao_edge_offset_class0_chroma.s232 VZIP.S8 D16,D17
253 VZIP.S8 D26,D27 @II
397 VZIP.S8 D16,D17
420 VZIP.S8 D26,D27 @II
Dihevc_sao_edge_offset_class3_chroma.s423 VZIP.8 D22,D23 @I
544 VZIP.8 D24,D25 @II
569 VZIP.8 D22,D23 @III
657 VZIP.8 D22,D23
831 VZIP.8 D24,D25
1007 VZIP.8 D24,D25
Dihevc_sao_edge_offset_class2_chroma.s434 VZIP.8 D22,D23 @I
544 VZIP.8 D24,D25 @II
576 VZIP.8 D22,D23 @III
655 VZIP.8 D24,D25
807 VZIP.8 D24,D25
957 VZIP.8 D24,D25
Dihevc_resi_trans_32x32_a9q.s288 VZIP.S32 Q8,Q9 @ 3-cycle instruction -- 1st cycle dual issued
350 VZIP.S32 Q15,Q8 @ 3-cycle instruction
405 VZIP.S32 Q9,Q12 @ 3-cycle
483 VZIP.S32 Q9,Q12
562 VZIP.S32 Q8, Q9
643 VZIP.S32 Q8, Q9
720 VZIP.S32 Q8, Q9
778 VZIP.S32 Q0, Q1 @ 3-cycle instruction
Dihevc_resi_trans.s1093 VZIP.S32 Q5,Q6 @3-cycle instruction
1153 VZIP.S32 Q5,Q4 @ 3 cycle instruction
1228 VZIP.S32 Q12,Q13
1289 VZIP.S32 Q10,Q12 @ 3-cycle instruction
1448 VZIP.S16 D8,D9 @INTERLEAVE R1 R2 R1 R2 R1 R2 to write
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/
Dvzip.ll27 ; VZIP.32 is equivalent to VTRN.32 for 64-bit vectors.
77 ; Undef shuffle indices should not prevent matching to VZIP:
/external/arm-neon-tests/
Dref-rvct-neon-nofp16.txt3143 VZIP/VZIPQ chunk 0 output:
3144 VZIP/VZIPQ:0:result_int8x8 [] = { fffffff0, fffffff4, 11, 11, fffffff1, fffffff5, 11, 11, }
3145 VZIP/VZIPQ:1:result_int16x4 [] = { fffffff0, fffffff2, 22, 22, }
3146 VZIP/VZIPQ:2:result_int32x2 [] = { fffffff0, fffffff1, }
3147 VZIP/VZIPQ:3:result_int64x1 [] = { 3333333333333333, }
3148 VZIP/VZIPQ:4:result_uint8x8 [] = { f0, f4, 55, 55, f1, f5, 55, 55, }
3149 VZIP/VZIPQ:5:result_uint16x4 [] = { fff0, fff2, 66, 66, }
3150 VZIP/VZIPQ:6:result_uint32x2 [] = { fffffff0, fffffff1, }
3151 VZIP/VZIPQ:7:result_uint64x1 [] = { 3333333333333333, }
3152 VZIP/VZIPQ:8:result_poly8x8 [] = { f0, f4, 55, 55, f1, f5, 55, 55, }
[all …]
Dref-rvct-neon.txt3579 VZIP/VZIPQ chunk 0 output:
3580 VZIP/VZIPQ:0:result_int8x8 [] = { fffffff0, fffffff4, 11, 11, fffffff1, fffffff5, 11, 11, }
3581 VZIP/VZIPQ:1:result_int16x4 [] = { fffffff0, fffffff2, 22, 22, }
3582 VZIP/VZIPQ:2:result_int32x2 [] = { fffffff0, fffffff1, }
3583 VZIP/VZIPQ:3:result_int64x1 [] = { 3333333333333333, }
3584 VZIP/VZIPQ:4:result_uint8x8 [] = { f0, f4, 55, 55, f1, f5, 55, 55, }
3585 VZIP/VZIPQ:5:result_uint16x4 [] = { fff0, fff2, 66, 66, }
3586 VZIP/VZIPQ:6:result_uint32x2 [] = { fffffff0, fffffff1, }
3587 VZIP/VZIPQ:7:result_uint64x1 [] = { 3333333333333333, }
3588 VZIP/VZIPQ:8:result_poly8x8 [] = { f0, f4, 55, 55, f1, f5, 55, 55, }
[all …]
Dref-rvct-all.txt3579 VZIP/VZIPQ chunk 0 output:
3580 VZIP/VZIPQ:0:result_int8x8 [] = { fffffff0, fffffff4, 11, 11, fffffff1, fffffff5, 11, 11, }
3581 VZIP/VZIPQ:1:result_int16x4 [] = { fffffff0, fffffff2, 22, 22, }
3582 VZIP/VZIPQ:2:result_int32x2 [] = { fffffff0, fffffff1, }
3583 VZIP/VZIPQ:3:result_int64x1 [] = { 3333333333333333, }
3584 VZIP/VZIPQ:4:result_uint8x8 [] = { f0, f4, 55, 55, f1, f5, 55, 55, }
3585 VZIP/VZIPQ:5:result_uint16x4 [] = { fff0, fff2, 66, 66, }
3586 VZIP/VZIPQ:6:result_uint32x2 [] = { fffffff0, fffffff1, }
3587 VZIP/VZIPQ:7:result_uint64x1 [] = { 3333333333333333, }
3588 VZIP/VZIPQ:8:result_poly8x8 [] = { f0, f4, 55, 55, f1, f5, 55, 55, }
[all …]
/external/libxaac/decoder/armv7/
Dixheaacd_post_twiddle_overlap.s140 VZIP.32 D2, D3
148 VZIP.32 D0, D1
337 VZIP.32 Q13, Q0
593 VZIP.32 Q13, Q0
869 VZIP.32 Q13, Q0
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/
Daarch64-vuzp.ll62 ; Check that this pattern is recognized as a VZIP and
Darm64-zip.ll81 ; Undef shuffle indices should not prevent matching to VZIP:
/external/llvm/test/CodeGen/AArch64/
Darm64-zip.ll81 ; Undef shuffle indices should not prevent matching to VZIP:
/external/swiftshader/third_party/LLVM/lib/Target/ARM/
DARMISelLowering.h158 VZIP, // zip (interleave) enumerator
/external/llvm/test/CodeGen/ARM/
Dvzip.ll67 ; VZIP.32 is equivalent to VTRN.32 for 64-bit vectors.
201 ; Undef shuffle indices should not prevent matching to VZIP:
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/
Dvzip.ll68 ; VZIP.32 is equivalent to VTRN.32 for 64-bit vectors.
202 ; Undef shuffle indices should not prevent matching to VZIP:
/external/llvm/lib/Target/ARM/
DARMISelLowering.h156 VZIP, // zip (interleave) enumerator
DARMScheduleSwift.td584 (instregex "VSWP", "VTRN", "VUZP", "VZIP")>;
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/ARM/
DARMISelLowering.h188 VZIP, // zip (interleave) enumerator
DARMScheduleSwift.td601 (instregex "VSWP", "VTRN", "VUZP", "VZIP")>;
DARMScheduleR52.td831 def : InstRW<[R52WriteFPALU_F3, R52Read_F1, R52Read_F1], (instregex "VSWP", "VTRN", "VUZP", "VZIP")…
/external/v8/src/arm/
Dassembler-arm.cc4787 enum NeonSizedOp { VZIP, VUZP, VREV16, VREV32, VREV64, VTRN }; enumerator
4793 case VZIP: in EncodeNeonSizedOp()
4832 emit(EncodeNeonSizedOp(VZIP, NEON_D, size, src1.code(), src2.code())); in vzip()
4840 emit(EncodeNeonSizedOp(VZIP, NEON_Q, size, src1.code(), src2.code())); in vzip()

12