• Home
  • Raw
  • Download

Lines Matching refs:rax

134 	lea	(%rsp), %rax
135 .cfi_def_cfa_register %rax
152 vmovaps %xmm6,-0xd8(%rax)
153 vmovaps %xmm7,-0xc8(%rax)
154 vmovaps %xmm8,-0xb8(%rax)
155 vmovaps %xmm9,-0xa8(%rax)
156 vmovaps %xmm10,-0x98(%rax)
157 vmovaps %xmm11,-0x88(%rax)
158 vmovaps %xmm12,-0x78(%rax)
159 vmovaps %xmm13,-0x68(%rax)
160 vmovaps %xmm14,-0x58(%rax)
161 vmovaps %xmm15,-0x48(%rax)
165 mov %rax,%rbp
448 mov (%rsp), %rax
460 mov %rax, $r0
465 mov %rax, %rdx
466 imulq -128($np), %rax
468 add %rax, $r0
469 mov %rdx, %rax
470 imulq 8-128($np), %rax
472 add %rax, $r1
473 mov %rdx, %rax
474 imulq 16-128($np), %rax
476 add %rax, $r2
480 mov $r1, %rax
493 mov %rax, %rdx
494 imulq -128($np), %rax
496 add %rax, $r1
498 mov %rdx, %rax
499 imulq 8-128($np), %rax
503 add %rax, $r2
505 mov %rdx, %rax
506 imulq 16-128($np), %rax
510 add %rax, $r3
514 mov $r2, %rax
531 mov %rax, %rdx
532 imulq -128($np), %rax
536 add %rax, $r2
537 mov %rdx, %rax
538 imulq 8-128($np), %rax
540 add $r3, %rax
544 add $r2, %rax
549 mov %rax, $r3
562 imulq -128($np), %rax
570 add %rax, $r3
587 vmovq $ACC0, %rax
602 add $r3, %rax
606 mov %rax, $r0
619 mov %rax, %rdx
620 imulq -128($np), %rax
625 add %rax, $r0
626 mov %rdx, %rax
627 imulq 8-128($np), %rax
634 add %rax, $r1
635 mov %rdx, %rax
636 imulq 16-128($np), %rax
641 add %rax, $r2
642 lea ($r0,$r1), %rax
646 mov %rax, $r1
798 mov %rbp, %rax
799 .cfi_def_cfa_register %rax
803 movaps -0xd8(%rax),%xmm6
804 movaps -0xc8(%rax),%xmm7
805 movaps -0xb8(%rax),%xmm8
806 movaps -0xa8(%rax),%xmm9
807 movaps -0x98(%rax),%xmm10
808 movaps -0x88(%rax),%xmm11
809 movaps -0x78(%rax),%xmm12
810 movaps -0x68(%rax),%xmm13
811 movaps -0x58(%rax),%xmm14
812 movaps -0x48(%rax),%xmm15
815 mov -48(%rax),%r15
817 mov -40(%rax),%r14
819 mov -32(%rax),%r13
821 mov -24(%rax),%r12
823 mov -16(%rax),%rbp
825 mov -8(%rax),%rbx
827 lea (%rax),%rsp # restore %rsp
885 lea (%rsp), %rax
886 .cfi_def_cfa_register %rax
903 vmovaps %xmm6,-0xd8(%rax)
904 vmovaps %xmm7,-0xc8(%rax)
905 vmovaps %xmm8,-0xb8(%rax)
906 vmovaps %xmm9,-0xa8(%rax)
907 vmovaps %xmm10,-0x98(%rax)
908 vmovaps %xmm11,-0x88(%rax)
909 vmovaps %xmm12,-0x78(%rax)
910 vmovaps %xmm13,-0x68(%rax)
911 vmovaps %xmm14,-0x58(%rax)
912 vmovaps %xmm15,-0x48(%rax)
916 mov %rax,%rbp
1002 mov %rbx, %rax
1003 imulq -128($ap), %rax
1004 add $r0, %rax
1009 mov %rax, $r0
1042 mov %rax,%rdx
1043 imulq -128($np),%rax
1044 add %rax,$r0
1045 mov %rdx,%rax
1046 imulq 8-128($np),%rax
1047 add %rax,$r1
1048 mov %rdx,%rax
1049 imulq 16-128($np),%rax
1050 add %rax,$r2
1076 mov %rbx, %rax
1077 imulq -128($ap),%rax
1078 add %rax,$r1
1080 mov %rbx, %rax
1081 imulq 8-128($ap),%rax
1082 add %rax,$r2
1085 mov $r1, %rax
1121 mov %rax,%rdx
1122 imulq -128($np),%rax
1123 add %rax,$r1
1125 mov %rdx,%rax
1126 imulq 8-128($np),%rax
1127 add %rax,$r2
1162 mov %rbx,%rax
1163 imulq -128($ap),%rax
1164 add $r2,%rax
1167 mov %rax,$r2
1203 mov %rax,%rdx
1204 imulq -128($np),%rax
1205 add %rax,$r2
1244 mov $r3, %rax
1279 imulq -128($np),%rax
1280 add %rax,$r3
1449 mov %rbp, %rax
1450 .cfi_def_cfa_register %rax
1454 movaps -0xd8(%rax),%xmm6
1455 movaps -0xc8(%rax),%xmm7
1456 movaps -0xb8(%rax),%xmm8
1457 movaps -0xa8(%rax),%xmm9
1458 movaps -0x98(%rax),%xmm10
1459 movaps -0x88(%rax),%xmm11
1460 movaps -0x78(%rax),%xmm12
1461 movaps -0x68(%rax),%xmm13
1462 movaps -0x58(%rax),%xmm14
1463 movaps -0x48(%rax),%xmm15
1466 mov -48(%rax),%r15
1468 mov -40(%rax),%r14
1470 mov -32(%rax),%r13
1472 mov -24(%rax),%r12
1474 mov -16(%rax),%rbp
1476 mov -8(%rax),%rbx
1478 lea (%rax),%rsp # restore %rsp
1497 xor %rax,%rax
1517 $code.=" add @T[-$l], %rax\n";
1522 mov %rax, 8*$i($out)
1523 mov @T[0], %rax
1549 and %rax,@T[-$k] # &0x1fffffff
1556 and %rax,@T[0]
1613 lea -0x88(%rsp),%rax
1616 .byte 0x48,0x8d,0x60,0xe0 # lea -0x20(%rax),%rsp
1617 .byte 0xc5,0xf8,0x29,0x70,0xe0 # vmovaps %xmm6,-0x20(%rax)
1618 .byte 0xc5,0xf8,0x29,0x78,0xf0 # vmovaps %xmm7,-0x10(%rax)
1619 .byte 0xc5,0x78,0x29,0x40,0x00 # vmovaps %xmm8,0(%rax)
1620 .byte 0xc5,0x78,0x29,0x48,0x10 # vmovaps %xmm9,0x10(%rax)
1621 .byte 0xc5,0x78,0x29,0x50,0x20 # vmovaps %xmm10,0x20(%rax)
1622 .byte 0xc5,0x78,0x29,0x58,0x30 # vmovaps %xmm11,0x30(%rax)
1623 .byte 0xc5,0x78,0x29,0x60,0x40 # vmovaps %xmm12,0x40(%rax)
1624 .byte 0xc5,0x78,0x29,0x68,0x50 # vmovaps %xmm13,0x50(%rax)
1625 .byte 0xc5,0x78,0x29,0x70,0x60 # vmovaps %xmm14,0x60(%rax)
1626 .byte 0xc5,0x78,0x29,0x78,0x70 # vmovaps %xmm15,0x70(%rax)
1632 lea -128(%rsp),%rax # control u-op density
1644 vmovdqa %ymm0, 32*0+128(%rax)
1647 vmovdqa %ymm1, 32*1+128(%rax)
1650 vmovdqa %ymm2, 32*2+128(%rax)
1653 vmovdqa %ymm3, 32*3+128(%rax)
1656 vmovdqa %ymm0, 32*4+128(%rax)
1659 vmovdqa %ymm1, 32*5+128(%rax)
1662 vmovdqa %ymm2, 32*6+128(%rax)
1665 vmovdqa %ymm3, 32*7+128(%rax)
1688 vpand 32*0+128(%rax), %ymm0, %ymm0
1689 vpand 32*1+128(%rax), %ymm1, %ymm1
1690 vpand 32*2+128(%rax), %ymm2, %ymm2
1692 vpand 32*3+128(%rax), %ymm3, %ymm3
1698 vpand 32*4+128(%rax), %ymm0, %ymm0
1699 vpand 32*5+128(%rax), %ymm1, %ymm1
1700 vpand 32*6+128(%rax), %ymm2, %ymm2
1702 vpand 32*7+128(%rax), %ymm3, %ymm3
1816 mov 120($context),%rax # pull context->Rax
1837 cmovc %rbp,%rax
1839 mov -48(%rax),%r15
1840 mov -40(%rax),%r14
1841 mov -32(%rax),%r13
1842 mov -24(%rax),%r12
1843 mov -16(%rax),%rbp
1844 mov -8(%rax),%rbx
1852 lea -0xd8(%rax),%rsi # %xmm save area
1854 mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
1858 mov 8(%rax),%rdi
1859 mov 16(%rax),%rsi
1860 mov %rax,152($context) # restore context->Rsp