1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s -check-prefix=X64 3; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s -check-prefix=X86 4 5%struct.SA = type { i32 , i32 , i32 , i32 , i32}; 6 7define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 { 8; X64-LABEL: foo: 9; X64: # %bb.0: # %entry 10; X64-NEXT: movl 16(%rdi), %eax 11; X64-NEXT: movl (%rdi), %ecx 12; X64-NEXT: addl %eax, %ecx 13; X64-NEXT: addl %eax, %ecx 14; X64-NEXT: addl %eax, %ecx 15; X64-NEXT: leal (%rcx,%rax), %edx 16; X64-NEXT: leal 1(%rax,%rcx), %ecx 17; X64-NEXT: movl %ecx, 12(%rdi) 18; X64-NEXT: leal 1(%rax,%rdx), %eax 19; X64-NEXT: movl %eax, 16(%rdi) 20; X64-NEXT: retq 21; 22; X86-LABEL: foo: 23; X86: # %bb.0: # %entry 24; X86-NEXT: pushl %esi 25; X86-NEXT: .cfi_def_cfa_offset 8 26; X86-NEXT: .cfi_offset %esi, -8 27; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 28; X86-NEXT: movl 16(%eax), %ecx 29; X86-NEXT: movl (%eax), %edx 30; X86-NEXT: addl %ecx, %edx 31; X86-NEXT: addl %ecx, %edx 32; X86-NEXT: addl %ecx, %edx 33; X86-NEXT: leal 1(%ecx,%edx), %esi 34; X86-NEXT: addl %ecx, %edx 35; X86-NEXT: movl %esi, 12(%eax) 36; X86-NEXT: leal 1(%ecx,%edx), %ecx 37; X86-NEXT: movl %ecx, 16(%eax) 38; X86-NEXT: popl %esi 39; X86-NEXT: .cfi_def_cfa_offset 4 40; X86-NEXT: retl 41 entry: 42 %h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0 43 %0 = load i32, i32* %h0, align 8 44 %h3 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 3 45 %h4 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 4 46 %1 = load i32, i32* %h4, align 8 47 %add = add i32 %0 , 1 48 %add1 = add i32 %add, %1 49 %add2 = add i32 %add1, %1 50 %add3 = add i32 %add2, %1 51 %add4 = add i32 %add3, %1 52 store i32 %add4, i32* %h3, align 4 53 %add29 = add i32 %add4, %1 54 store i32 %add29, i32* %h4, align 8 55 ret void 56} 57 58 59 60define void @foo_loop(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 { 61; X64-LABEL: foo_loop: 62; X64: # %bb.0: # %entry 63; X64-NEXT: .p2align 4, 0x90 64; X64-NEXT: .LBB1_1: # %loop 65; X64-NEXT: # =>This Inner Loop Header: Depth=1 66; X64-NEXT: movl (%rdi), %ecx 67; X64-NEXT: movl 16(%rdi), %eax 68; X64-NEXT: leal 1(%rcx,%rax), %edx 69; X64-NEXT: movl %edx, 12(%rdi) 70; X64-NEXT: decl %esi 71; X64-NEXT: jne .LBB1_1 72; X64-NEXT: # %bb.2: # %exit 73; X64-NEXT: addl %eax, %ecx 74; X64-NEXT: leal 1(%rax,%rcx), %ecx 75; X64-NEXT: addl %eax, %ecx 76; X64-NEXT: addl %eax, %ecx 77; X64-NEXT: addl %eax, %ecx 78; X64-NEXT: addl %eax, %ecx 79; X64-NEXT: addl %eax, %ecx 80; X64-NEXT: addl %eax, %ecx 81; X64-NEXT: movl %ecx, 16(%rdi) 82; X64-NEXT: retq 83; 84; X86-LABEL: foo_loop: 85; X86: # %bb.0: # %entry 86; X86-NEXT: pushl %edi 87; X86-NEXT: .cfi_def_cfa_offset 8 88; X86-NEXT: pushl %esi 89; X86-NEXT: .cfi_def_cfa_offset 12 90; X86-NEXT: .cfi_offset %esi, -12 91; X86-NEXT: .cfi_offset %edi, -8 92; X86-NEXT: movl {{[0-9]+}}(%esp), %edx 93; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 94; X86-NEXT: .p2align 4, 0x90 95; X86-NEXT: .LBB1_1: # %loop 96; X86-NEXT: # =>This Inner Loop Header: Depth=1 97; X86-NEXT: movl (%eax), %esi 98; X86-NEXT: movl 16(%eax), %ecx 99; X86-NEXT: leal 1(%esi,%ecx), %edi 100; X86-NEXT: movl %edi, 12(%eax) 101; X86-NEXT: decl %edx 102; X86-NEXT: jne .LBB1_1 103; X86-NEXT: # %bb.2: # %exit 104; X86-NEXT: addl %ecx, %esi 105; X86-NEXT: leal 1(%ecx,%esi), %edx 106; X86-NEXT: addl %ecx, %edx 107; X86-NEXT: addl %ecx, %edx 108; X86-NEXT: addl %ecx, %edx 109; X86-NEXT: addl %ecx, %edx 110; X86-NEXT: addl %ecx, %edx 111; X86-NEXT: addl %ecx, %edx 112; X86-NEXT: movl %edx, 16(%eax) 113; X86-NEXT: popl %esi 114; X86-NEXT: .cfi_def_cfa_offset 8 115; X86-NEXT: popl %edi 116; X86-NEXT: .cfi_def_cfa_offset 4 117; X86-NEXT: retl 118 entry: 119 br label %loop 120 121 loop: 122 %iter = phi i32 [%n ,%entry ] ,[ %iter.ctr ,%loop] 123 %h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0 124 %0 = load i32, i32* %h0, align 8 125 %h3 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 3 126 %h4 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 4 127 %1 = load i32, i32* %h4, align 8 128 %add = add i32 %0, 1 129 %add4 = add i32 %add, %1 130 store i32 %add4, i32* %h3, align 4 131 %add291 = add i32 %add4, %1 132 %add292 = add i32 %add291, %1 133 %add293 = add i32 %add292, %1 134 %add294 = add i32 %add293, %1 135 %add295 = add i32 %add294, %1 136 %add296 = add i32 %add295, %1 137 %add29 = add i32 %add296, %1 138 %iter.ctr = sub i32 %iter , 1 139 %res = icmp ne i32 %iter.ctr , 0 140 br i1 %res , label %loop , label %exit 141 142 exit: 143 store i32 %add29, i32* %h4, align 8 144 ret void 145} 146