• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i386-unknown | FileCheck %s --check-prefix=X32
3; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
4
5define void @add(i256* %p, i256* %q) nounwind {
6; X32-LABEL: add:
7; X32:       # %bb.0:
8; X32-NEXT:    pushl %ebp
9; X32-NEXT:    pushl %ebx
10; X32-NEXT:    pushl %edi
11; X32-NEXT:    pushl %esi
12; X32-NEXT:    subl $8, %esp
13; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
14; X32-NEXT:    movl 28(%eax), %ecx
15; X32-NEXT:    movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
16; X32-NEXT:    movl 24(%eax), %ecx
17; X32-NEXT:    movl %ecx, (%esp) # 4-byte Spill
18; X32-NEXT:    movl 20(%eax), %esi
19; X32-NEXT:    movl 16(%eax), %edi
20; X32-NEXT:    movl 12(%eax), %ebx
21; X32-NEXT:    movl 8(%eax), %ebp
22; X32-NEXT:    movl (%eax), %ecx
23; X32-NEXT:    movl 4(%eax), %edx
24; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
25; X32-NEXT:    addl %ecx, (%eax)
26; X32-NEXT:    adcl %edx, 4(%eax)
27; X32-NEXT:    adcl %ebp, 8(%eax)
28; X32-NEXT:    adcl %ebx, 12(%eax)
29; X32-NEXT:    adcl %edi, 16(%eax)
30; X32-NEXT:    adcl %esi, 20(%eax)
31; X32-NEXT:    movl (%esp), %ecx # 4-byte Reload
32; X32-NEXT:    adcl %ecx, 24(%eax)
33; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
34; X32-NEXT:    adcl %ecx, 28(%eax)
35; X32-NEXT:    addl $8, %esp
36; X32-NEXT:    popl %esi
37; X32-NEXT:    popl %edi
38; X32-NEXT:    popl %ebx
39; X32-NEXT:    popl %ebp
40; X32-NEXT:    retl
41;
42; X64-LABEL: add:
43; X64:       # %bb.0:
44; X64-NEXT:    movq 24(%rsi), %rax
45; X64-NEXT:    movq 16(%rsi), %rcx
46; X64-NEXT:    movq (%rsi), %rdx
47; X64-NEXT:    movq 8(%rsi), %rsi
48; X64-NEXT:    addq %rdx, (%rdi)
49; X64-NEXT:    adcq %rsi, 8(%rdi)
50; X64-NEXT:    adcq %rcx, 16(%rdi)
51; X64-NEXT:    adcq %rax, 24(%rdi)
52; X64-NEXT:    retq
53  %a = load i256, i256* %p
54  %b = load i256, i256* %q
55  %c = add i256 %a, %b
56  store i256 %c, i256* %p
57  ret void
58}
59define void @sub(i256* %p, i256* %q) nounwind {
60; X32-LABEL: sub:
61; X32:       # %bb.0:
62; X32-NEXT:    pushl %ebp
63; X32-NEXT:    pushl %ebx
64; X32-NEXT:    pushl %edi
65; X32-NEXT:    pushl %esi
66; X32-NEXT:    subl $8, %esp
67; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
68; X32-NEXT:    movl 28(%eax), %ecx
69; X32-NEXT:    movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
70; X32-NEXT:    movl 24(%eax), %ecx
71; X32-NEXT:    movl %ecx, (%esp) # 4-byte Spill
72; X32-NEXT:    movl 20(%eax), %esi
73; X32-NEXT:    movl 16(%eax), %edi
74; X32-NEXT:    movl 12(%eax), %ebx
75; X32-NEXT:    movl 8(%eax), %ebp
76; X32-NEXT:    movl (%eax), %ecx
77; X32-NEXT:    movl 4(%eax), %edx
78; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
79; X32-NEXT:    subl %ecx, (%eax)
80; X32-NEXT:    sbbl %edx, 4(%eax)
81; X32-NEXT:    sbbl %ebp, 8(%eax)
82; X32-NEXT:    sbbl %ebx, 12(%eax)
83; X32-NEXT:    sbbl %edi, 16(%eax)
84; X32-NEXT:    sbbl %esi, 20(%eax)
85; X32-NEXT:    movl (%esp), %ecx # 4-byte Reload
86; X32-NEXT:    sbbl %ecx, 24(%eax)
87; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
88; X32-NEXT:    sbbl %ecx, 28(%eax)
89; X32-NEXT:    addl $8, %esp
90; X32-NEXT:    popl %esi
91; X32-NEXT:    popl %edi
92; X32-NEXT:    popl %ebx
93; X32-NEXT:    popl %ebp
94; X32-NEXT:    retl
95;
96; X64-LABEL: sub:
97; X64:       # %bb.0:
98; X64-NEXT:    movq 24(%rsi), %rax
99; X64-NEXT:    movq 16(%rsi), %rcx
100; X64-NEXT:    movq (%rsi), %rdx
101; X64-NEXT:    movq 8(%rsi), %rsi
102; X64-NEXT:    subq %rdx, (%rdi)
103; X64-NEXT:    sbbq %rsi, 8(%rdi)
104; X64-NEXT:    sbbq %rcx, 16(%rdi)
105; X64-NEXT:    sbbq %rax, 24(%rdi)
106; X64-NEXT:    retq
107  %a = load i256, i256* %p
108  %b = load i256, i256* %q
109  %c = sub i256 %a, %b
110  store i256 %c, i256* %p
111  ret void
112}
113