• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mcpu=pentium2 -mtriple=i686-apple-darwin8.8.0 | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mcpu=pentium3 -mtriple=i686-apple-darwin8.8.0 | FileCheck %s --check-prefix=XMM
4; RUN: llc < %s -mcpu=bdver1   -mtriple=i686-apple-darwin8.8.0 | FileCheck %s --check-prefix=YMM
5
6%struct.x = type { i16, i16 }
7
8define void @t() nounwind  {
9; X86-LABEL: t:
10; X86:       ## %bb.0: ## %entry
11; X86-NEXT:    subl $44, %esp
12; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
13; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
14; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
15; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
16; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
17; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
18; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
19; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
20; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
21; X86-NEXT:    movl %eax, (%esp)
22; X86-NEXT:    calll _foo
23; X86-NEXT:    addl $44, %esp
24; X86-NEXT:    retl
25; X86-NEXT:    ## -- End function
26;
27; XMM-LABEL: t:
28; XMM:       ## %bb.0: ## %entry
29; XMM-NEXT:    subl $60, %esp
30; XMM-NEXT:    xorps %xmm0, %xmm0
31; XMM-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
32; XMM-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
33; XMM-NEXT:    leal {{[0-9]+}}(%esp), %eax
34; XMM-NEXT:    movl %eax, (%esp)
35; XMM-NEXT:    calll _foo
36; XMM-NEXT:    addl $60, %esp
37; XMM-NEXT:    retl
38; XMM-NEXT:    ## -- End function
39;
40; YMM-LABEL: t:
41; YMM:       ## %bb.0: ## %entry
42; YMM-NEXT:    pushl %ebp
43; YMM-NEXT:    movl %esp, %ebp
44; YMM-NEXT:    andl $-32, %esp
45; YMM-NEXT:    subl $96, %esp
46; YMM-NEXT:    vxorps %xmm0, %xmm0, %xmm0
47; YMM-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
48; YMM-NEXT:    leal {{[0-9]+}}(%esp), %eax
49; YMM-NEXT:    movl %eax, (%esp)
50; YMM-NEXT:    vzeroupper
51; YMM-NEXT:    calll _foo
52; YMM-NEXT:    movl %ebp, %esp
53; YMM-NEXT:    popl %ebp
54; YMM-NEXT:    retl
55; YMM-NEXT:    ## -- End function
56entry:
57	%up_mvd = alloca [8 x %struct.x]		; <[8 x %struct.x]*> [#uses=2]
58	%up_mvd116 = getelementptr [8 x %struct.x], [8 x %struct.x]* %up_mvd, i32 0, i32 0		; <%struct.x*> [#uses=1]
59	%tmp110117 = bitcast [8 x %struct.x]* %up_mvd to i8*		; <i8*> [#uses=1]
60
61	call void @llvm.memset.p0i8.i64(i8* align 8 %tmp110117, i8 0, i64 32, i1 false)
62	call void @foo( %struct.x* %up_mvd116 ) nounwind
63	ret void
64}
65
66declare void @foo(%struct.x*)
67
68declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
69
70; Ensure that alignment of '0' in an @llvm.memset intrinsic results in
71; unaligned loads and stores.
72define void @PR15348(i8* %a) {
73; X86-LABEL: PR15348:
74; X86:       ## %bb.0:
75; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
76; X86-NEXT:    movb $0, 16(%eax)
77; X86-NEXT:    movl $0, 12(%eax)
78; X86-NEXT:    movl $0, 8(%eax)
79; X86-NEXT:    movl $0, 4(%eax)
80; X86-NEXT:    movl $0, (%eax)
81; X86-NEXT:    retl
82;
83; XMM-LABEL: PR15348:
84; XMM:       ## %bb.0:
85; XMM-NEXT:    movl {{[0-9]+}}(%esp), %eax
86; XMM-NEXT:    movb $0, 16(%eax)
87; XMM-NEXT:    movl $0, 12(%eax)
88; XMM-NEXT:    movl $0, 8(%eax)
89; XMM-NEXT:    movl $0, 4(%eax)
90; XMM-NEXT:    movl $0, (%eax)
91; XMM-NEXT:    retl
92;
93; YMM-LABEL: PR15348:
94; YMM:       ## %bb.0:
95; YMM-NEXT:    movl {{[0-9]+}}(%esp), %eax
96; YMM-NEXT:    vxorps %xmm0, %xmm0, %xmm0
97; YMM-NEXT:    vmovups %xmm0, (%eax)
98; YMM-NEXT:    movb $0, 16(%eax)
99; YMM-NEXT:    retl
100  call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 17, i1 false)
101  ret void
102}
103