Searched refs:xt0 (Results 1 – 5 of 5) sorted by relevance
/external/rust/crates/ring/crypto/chacha/asm/ |
D | chacha-x86_64.pl | 576 my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3, 586 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3); 779 pshufd \$0x00,$xt3,$xt0 # "$xc0" 781 movdqa $xt0,0xc0-0x100(%rcx) 809 movdqa 0xc0-0x100(%rcx),$xt0 # "$xc0" 880 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1); 904 ($xt0,$xt1)=($xa2,$xa3); # use $xaN as temporary 929 movdqu 0x00($inp),$xt0 # xor with input 933 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember? 938 movdqu $xt0,0x00($out) [all …]
|
D | chacha-x86.pl | 572 my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7)); 600 &movdqu ($xt0,&QWP(64*0-128,$inp)); # load input 605 &pxor ($xt0,$xa0); 613 &movdqu (&QWP(64*0-128,$out),$xt0); # store output
|
/external/boringssl/src/crypto/chacha/asm/ |
D | chacha-x86_64.pl | 580 my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3, 590 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3); 783 pshufd \$0x00,$xt3,$xt0 # "$xc0" 785 movdqa $xt0,0xc0-0x100(%rcx) 813 movdqa 0xc0-0x100(%rcx),$xt0 # "$xc0" 884 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1); 908 ($xt0,$xt1)=($xa2,$xa3); # use $xaN as temporary 933 movdqu 0x00($inp),$xt0 # xor with input 937 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember? 942 movdqu $xt0,0x00($out) [all …]
|
D | chacha-x86.pl | 572 my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7)); 600 &movdqu ($xt0,&QWP(64*0-128,$inp)); # load input 605 &pxor ($xt0,$xa0); 613 &movdqu (&QWP(64*0-128,$out),$xt0); # store output
|
/external/mesa3d/src/intel/isl/ |
D | isl_tiled_memcpy.c | 840 uint32_t xt0, xt3; in intel_linear_to_tiled() local 861 xt0 = ALIGN_DOWN(xt1, tw); in intel_linear_to_tiled() 873 for (xt = xt0; xt < xt3; xt += tw) { in intel_linear_to_tiled() 931 uint32_t xt0, xt3; in intel_tiled_to_linear() local 961 xt0 = ALIGN_DOWN(xt1, tw); in intel_tiled_to_linear() 973 for (xt = xt0; xt < xt3; xt += tw) { in intel_tiled_to_linear()
|