/external/aac/libFDK/src/arm/ |
D | fft_rad2_arm.cpp | 168 FIXP_DBL *xt2 = xt1 + (mh << 1); in dit_fft() local 176 vi = xt2[1] >> 1; in dit_fft() 177 vr = xt2[0] >> 1; in dit_fft() 185 xt2[0] = ur - vr; in dit_fft() 186 xt2[1] = ui - vi; in dit_fft() 189 xt2 += mh; in dit_fft() 192 vr = xt2[1] >> 1; in dit_fft() 193 vi = xt2[0] >> 1; in dit_fft() 201 xt2[0] = ur - vr; in dit_fft() 202 xt2[1] = ui + vi; in dit_fft() [all …]
|
/external/rust/crates/ring/crypto/chacha/asm/ |
D | chacha-x86_64.pl | 576 my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3, 586 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3); 782 pshufd \$0xaa,$xt3,$xt2 # "$xc2" 785 movdqa $xt2,0xe0-0x100(%rcx) 811 movdqa 0xe0-0x100(%rcx),$xt2 # "$xc2" 820 movdqa $xt2,0x20(%rsp) # SIMD equivalent of "@x[10]" 841 movdqa $xa0,$xt2 # "de-interlace" data 845 punpckhdq $xa1,$xt2 849 movdqa $xt2,$xa3 850 punpcklqdq $xt3,$xt2 # "a2" [all …]
|
D | chacha-x86.pl | 572 my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7)); 585 &movdqa ($xt2,$xa0); # "de-interlace" data 589 &punpckhdq ($xt2,$xa1); 593 &movdqa ($xa3,$xt2); 594 &punpcklqdq ($xt2,$xt3); # "a2" 609 &pxor ($xt2,$xa2); 615 &movdqu (&QWP(64*2-128,$out),$xt2);
|
/external/boringssl/src/crypto/chacha/asm/ |
D | chacha-x86_64.pl | 580 my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3, 590 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3); 786 pshufd \$0xaa,$xt3,$xt2 # "$xc2" 789 movdqa $xt2,0xe0-0x100(%rcx) 815 movdqa 0xe0-0x100(%rcx),$xt2 # "$xc2" 824 movdqa $xt2,0x20(%rsp) # SIMD equivalent of "@x[10]" 845 movdqa $xa0,$xt2 # "de-interlace" data 849 punpckhdq $xa1,$xt2 853 movdqa $xt2,$xa3 854 punpcklqdq $xt3,$xt2 # "a2" [all …]
|
D | chacha-x86.pl | 572 my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7)); 585 &movdqa ($xt2,$xa0); # "de-interlace" data 589 &punpckhdq ($xt2,$xa1); 593 &movdqa ($xa3,$xt2); 594 &punpcklqdq ($xt2,$xt3); # "a2" 609 &pxor ($xt2,$xa2); 615 &movdqu (&QWP(64*2-128,$out),$xt2);
|
/external/mesa3d/src/intel/isl/ |
D | isl_tiled_memcpy_sse41.c | 37 _isl_memcpy_linear_to_tiled_sse41(uint32_t xt1, uint32_t xt2, in _isl_memcpy_linear_to_tiled_sse41() argument 45 intel_linear_to_tiled(xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, in _isl_memcpy_linear_to_tiled_sse41() 50 _isl_memcpy_tiled_to_linear_sse41(uint32_t xt1, uint32_t xt2, in _isl_memcpy_tiled_to_linear_sse41() argument 58 intel_tiled_to_linear(xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, in _isl_memcpy_tiled_to_linear_sse41()
|
D | isl_tiled_memcpy_normal.c | 36 _isl_memcpy_linear_to_tiled(uint32_t xt1, uint32_t xt2, in _isl_memcpy_linear_to_tiled() argument 44 intel_linear_to_tiled(xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, in _isl_memcpy_linear_to_tiled() 49 _isl_memcpy_tiled_to_linear(uint32_t xt1, uint32_t xt2, in _isl_memcpy_tiled_to_linear() argument 57 intel_tiled_to_linear(xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, in _isl_memcpy_tiled_to_linear()
|
D | isl_priv.h | 165 _isl_memcpy_linear_to_tiled(uint32_t xt1, uint32_t xt2, 174 _isl_memcpy_tiled_to_linear(uint32_t xt1, uint32_t xt2, 183 _isl_memcpy_linear_to_tiled_sse41(uint32_t xt1, uint32_t xt2, 192 _isl_memcpy_tiled_to_linear_sse41(uint32_t xt1, uint32_t xt2,
|
D | isl_tiled_memcpy.c | 831 intel_linear_to_tiled(uint32_t xt1, uint32_t xt2, in intel_linear_to_tiled() argument 862 xt3 = ALIGN_UP (xt2, tw); in intel_linear_to_tiled() 879 uint32_t x3 = MIN2(xt2, xt + tw); in intel_linear_to_tiled() 922 intel_tiled_to_linear(uint32_t xt1, uint32_t xt2, in intel_tiled_to_linear() argument 962 xt3 = ALIGN_UP (xt2, tw); in intel_tiled_to_linear() 979 uint32_t x3 = MIN2(xt2, xt + tw); in intel_tiled_to_linear()
|
D | isl.c | 40 isl_memcpy_linear_to_tiled(uint32_t xt1, uint32_t xt2, in isl_memcpy_linear_to_tiled() argument 51 xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, has_swizzling, in isl_memcpy_linear_to_tiled() 58 xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, has_swizzling, in isl_memcpy_linear_to_tiled() 63 isl_memcpy_tiled_to_linear(uint32_t xt1, uint32_t xt2, in isl_memcpy_tiled_to_linear() argument 74 xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, has_swizzling, in isl_memcpy_tiled_to_linear() 81 xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, has_swizzling, in isl_memcpy_tiled_to_linear()
|
D | isl.h | 2352 isl_memcpy_linear_to_tiled(uint32_t xt1, uint32_t xt2, 2365 isl_memcpy_tiled_to_linear(uint32_t xt1, uint32_t xt2,
|
/external/vixl/src/aarch64/ |
D | assembler-aarch64.cc | 1111 const Register& xt2, in ldpsw() argument 1113 VIXL_ASSERT(xt.Is64Bits() && xt2.Is64Bits()); in ldpsw() 1114 LoadStorePair(xt, xt2, src, LDPSW_x); in ldpsw()
|
D | assembler-aarch64.h | 1272 void ldpsw(const Register& xt, const Register& xt2, const MemOperand& src);
|
/external/vixl/doc/aarch64/ |
D | supported-instructions-aarch64.md | 1270 void ldpsw(const Register& xt, const Register& xt2, const MemOperand& src)
|