• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
2
3; Check that we properly realign the stack. While 4-byte access is all
4; that is ever needed, some transformations rely on the known bits from the alignment of the pointer (e.g.
5
6
7; 128 byte object
8; 4 byte emergency stack slot
9; = 144 bytes with padding between them
10
11; GCN-LABEL: {{^}}needs_align16_default_stack_align:
12; GCN: s_mov_b32 s5, s32
13; GCN-NOT: s32
14
15; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
16; GCN: v_or_b32_e32 v{{[0-9]+}}, 12
17; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
18; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
19; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
20
21; GCN-NOT: s32
22
23; GCN: ; ScratchSize: 144
24define void @needs_align16_default_stack_align(i32 %idx) #0 {
25  %alloca.align16 = alloca [8 x <4 x i32>], align 16, addrspace(5)
26  %gep0 = getelementptr inbounds [8 x <4 x i32>], [8 x <4 x i32>] addrspace(5)* %alloca.align16, i32 0, i32 %idx
27  store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %gep0, align 16
28  ret void
29}
30
31; GCN-LABEL: {{^}}needs_align16_stack_align4:
32; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0x3c0{{$}}
33; GCN: s_and_b32 s5, s6, 0xfffffc00
34; GCN: s_add_u32 s32, s32, 0x2800{{$}}
35
36; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
37; GCN: v_or_b32_e32 v{{[0-9]+}}, 12
38; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
39; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
40; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
41
42; GCN: s_sub_u32 s32, s32, 0x2800
43
44; GCN: ; ScratchSize: 160
45define void @needs_align16_stack_align4(i32 %idx) #2 {
46  %alloca.align16 = alloca [8 x <4 x i32>], align 16, addrspace(5)
47  %gep0 = getelementptr inbounds [8 x <4 x i32>], [8 x <4 x i32>] addrspace(5)* %alloca.align16, i32 0, i32 %idx
48  store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %gep0, align 16
49  ret void
50}
51
52; GCN-LABEL: {{^}}needs_align32:
53; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0x7c0{{$}}
54; GCN: s_and_b32 s5, s6, 0xfffff800
55; GCN: s_add_u32 s32, s32, 0x3000{{$}}
56
57; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
58; GCN: v_or_b32_e32 v{{[0-9]+}}, 12
59; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
60; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
61; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
62
63; GCN: s_sub_u32 s32, s32, 0x3000
64
65; GCN: ; ScratchSize: 192
66define void @needs_align32(i32 %idx) #0 {
67  %alloca.align16 = alloca [8 x <4 x i32>], align 32, addrspace(5)
68  %gep0 = getelementptr inbounds [8 x <4 x i32>], [8 x <4 x i32>] addrspace(5)* %alloca.align16, i32 0, i32 %idx
69  store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %gep0, align 32
70  ret void
71}
72
73; GCN-LABEL: {{^}}force_realign4:
74; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0xc0{{$}}
75; GCN: s_and_b32 s5, s6, 0xffffff00
76; GCN: s_add_u32 s32, s32, 0xd00{{$}}
77
78; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
79; GCN: s_sub_u32 s32, s32, 0xd00
80
81; GCN: ; ScratchSize: 52
82define void @force_realign4(i32 %idx) #1 {
83  %alloca.align16 = alloca [8 x i32], align 4, addrspace(5)
84  %gep0 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %alloca.align16, i32 0, i32 %idx
85  store volatile i32 3, i32 addrspace(5)* %gep0, align 4
86  ret void
87}
88
89; GCN-LABEL: {{^}}kernel_call_align16_from_8:
90; GCN: s_add_u32 s32, s8, 0x400{{$}}
91; GCN-NOT: s32
92; GCN: s_swappc_b64
93define amdgpu_kernel void @kernel_call_align16_from_8() #0 {
94  %alloca = alloca i32, align 4, addrspace(5)
95  store volatile i32 2, i32 addrspace(5)* %alloca
96  call void @needs_align16_default_stack_align(i32 1)
97  ret void
98}
99
100; The call sequence should keep the stack on call aligned to 4
101; GCN-LABEL: {{^}}kernel_call_align16_from_5:
102; GCN: s_add_u32 s32, s8, 0x400
103; GCN: s_swappc_b64
104define amdgpu_kernel void @kernel_call_align16_from_5() {
105  %alloca0 = alloca i8, align 1, addrspace(5)
106  store volatile i8 2, i8  addrspace(5)* %alloca0
107
108  call void @needs_align16_default_stack_align(i32 1)
109  ret void
110}
111
112; GCN-LABEL: {{^}}kernel_call_align4_from_5:
113; GCN: s_add_u32 s32, s8, 0x400
114; GCN: s_swappc_b64
115define amdgpu_kernel void @kernel_call_align4_from_5() {
116  %alloca0 = alloca i8, align 1, addrspace(5)
117  store volatile i8 2, i8  addrspace(5)* %alloca0
118
119  call void @needs_align16_stack_align4(i32 1)
120  ret void
121}
122
123attributes #0 = { noinline nounwind }
124attributes #1 = { noinline nounwind "stackrealign" }
125attributes #2 = { noinline nounwind alignstack=4 }
126