1 // RUN: %clang_cc1 -emit-llvm -triple x86_64 -O3 -o %t.opt.ll %s \
2 // RUN: -fdump-record-layouts 2> %t.dump.txt
3 // RUN: FileCheck -check-prefix=CHECK-RECORD < %t.dump.txt %s
4 // RUN: FileCheck -check-prefix=CHECK-OPT < %t.opt.ll %s
5
6 /****/
7
8 // Check that we don't read off the end a packed 24-bit structure.
9 // PR6176
10
11 // CHECK-RECORD: *** Dumping IRgen Record Layout
12 // CHECK-RECORD: Record: struct s0
13 // CHECK-RECORD: Layout: <CGRecordLayout
14 // CHECK-RECORD: LLVMType:%struct.s0 = type <{ [3 x i8] }>
15 // CHECK-RECORD: IsZeroInitializable:1
16 // CHECK-RECORD: BitFields:[
17 // CHECK-RECORD: <CGBitFieldInfo Size:24 IsSigned:1
18 // CHECK-RECORD: NumComponents:2 Components: [
19 // CHECK-RECORD: <AccessInfo FieldIndex:0 FieldByteOffset:0 FieldBitStart:0 AccessWidth:16
20 // CHECK-RECORD: AccessAlignment:1 TargetBitOffset:0 TargetBitWidth:16>
21 // CHECK-RECORD: <AccessInfo FieldIndex:0 FieldByteOffset:2 FieldBitStart:0 AccessWidth:8
22 // CHECK-RECORD: AccessAlignment:1 TargetBitOffset:16 TargetBitWidth:8>
23 struct __attribute((packed)) s0 {
24 int f0 : 24;
25 };
26
27 struct s0 g0 = { 0xdeadbeef };
28
f0_load(struct s0 * a0)29 int f0_load(struct s0 *a0) {
30 int size_check[sizeof(struct s0) == 3 ? 1 : -1];
31 return a0->f0;
32 }
f0_store(struct s0 * a0)33 int f0_store(struct s0 *a0) {
34 return (a0->f0 = 1);
35 }
f0_reload(struct s0 * a0)36 int f0_reload(struct s0 *a0) {
37 return (a0->f0 += 1);
38 }
39
40 // CHECK-OPT: define i64 @test_0()
41 // CHECK-OPT: ret i64 1
42 // CHECK-OPT: }
test_0()43 unsigned long long test_0() {
44 struct s0 g0 = { 0xdeadbeef };
45 unsigned long long res = 0;
46 res ^= g0.f0;
47 res ^= f0_load(&g0) ^ f0_store(&g0) ^ f0_reload(&g0);
48 res ^= g0.f0;
49 return res;
50 }
51
52 /****/
53
54 // PR5591
55
56 // CHECK-RECORD: *** Dumping IRgen Record Layout
57 // CHECK-RECORD: Record: struct s1
58 // CHECK-RECORD: Layout: <CGRecordLayout
59 // CHECK-RECORD: LLVMType:%struct.s1 = type <{ [2 x i8], i8 }>
60 // CHECK-RECORD: IsZeroInitializable:1
61 // CHECK-RECORD: BitFields:[
62 // CHECK-RECORD: <CGBitFieldInfo Size:10 IsSigned:1
63 // CHECK-RECORD: NumComponents:1 Components: [
64 // CHECK-RECORD: <AccessInfo FieldIndex:0 FieldByteOffset:0 FieldBitStart:0 AccessWidth:16
65 // CHECK-RECORD: AccessAlignment:1 TargetBitOffset:0 TargetBitWidth:10>
66 // CHECK-RECORD: ]>
67 // CHECK-RECORD: <CGBitFieldInfo Size:10 IsSigned:1
68 // CHECK-RECORD: NumComponents:2 Components: [
69 // CHECK-RECORD: <AccessInfo FieldIndex:0 FieldByteOffset:0 FieldBitStart:10 AccessWidth:16
70 // CHECK-RECORD: AccessAlignment:1 TargetBitOffset:0 TargetBitWidth:6>
71 // CHECK-RECORD: <AccessInfo FieldIndex:0 FieldByteOffset:2 FieldBitStart:0 AccessWidth:8
72 // CHECK-RECORD: AccessAlignment:1 TargetBitOffset:6 TargetBitWidth:4>
73
74 #pragma pack(push)
75 #pragma pack(1)
76 struct __attribute((packed)) s1 {
77 signed f0 : 10;
78 signed f1 : 10;
79 };
80 #pragma pack(pop)
81
82 struct s1 g1 = { 0xdeadbeef, 0xdeadbeef };
83
f1_load(struct s1 * a0)84 int f1_load(struct s1 *a0) {
85 int size_check[sizeof(struct s1) == 3 ? 1 : -1];
86 return a0->f1;
87 }
f1_store(struct s1 * a0)88 int f1_store(struct s1 *a0) {
89 return (a0->f1 = 1234);
90 }
f1_reload(struct s1 * a0)91 int f1_reload(struct s1 *a0) {
92 return (a0->f1 += 1234);
93 }
94
95 // CHECK-OPT: define i64 @test_1()
96 // CHECK-OPT: ret i64 210
97 // CHECK-OPT: }
test_1()98 unsigned long long test_1() {
99 struct s1 g1 = { 0xdeadbeef, 0xdeadbeef };
100 unsigned long long res = 0;
101 res ^= g1.f0 ^ g1.f1;
102 res ^= f1_load(&g1) ^ f1_store(&g1) ^ f1_reload(&g1);
103 res ^= g1.f0 ^ g1.f1;
104 return res;
105 }
106
107 /****/
108
109 // Check that we don't access beyond the bounds of a union.
110 //
111 // PR5567
112
113 // CHECK-RECORD: *** Dumping IRgen Record Layout
114 // CHECK-RECORD: Record: union u2
115 // CHECK-RECORD: Layout: <CGRecordLayout
116 // CHECK-RECORD: LLVMType:%union.u2 = type <{ i8 }>
117 // CHECK-RECORD: IsZeroInitializable:1
118 // CHECK-RECORD: BitFields:[
119 // CHECK-RECORD: <CGBitFieldInfo Size:3 IsSigned:0
120 // CHECK-RECORD: NumComponents:1 Components: [
121 // CHECK-RECORD: <AccessInfo FieldIndex:0 FieldByteOffset:0 FieldBitStart:0 AccessWidth:8
122 // CHECK-RECORD: AccessAlignment:1 TargetBitOffset:0 TargetBitWidth:3>
123
124 union __attribute__((packed)) u2 {
125 unsigned long long f0 : 3;
126 };
127
128 union u2 g2 = { 0xdeadbeef };
129
f2_load(union u2 * a0)130 int f2_load(union u2 *a0) {
131 return a0->f0;
132 }
f2_store(union u2 * a0)133 int f2_store(union u2 *a0) {
134 return (a0->f0 = 1234);
135 }
f2_reload(union u2 * a0)136 int f2_reload(union u2 *a0) {
137 return (a0->f0 += 1234);
138 }
139
140 // CHECK-OPT: define i64 @test_2()
141 // CHECK-OPT: ret i64 2
142 // CHECK-OPT: }
test_2()143 unsigned long long test_2() {
144 union u2 g2 = { 0xdeadbeef };
145 unsigned long long res = 0;
146 res ^= g2.f0;
147 res ^= f2_load(&g2) ^ f2_store(&g2) ^ f2_reload(&g2);
148 res ^= g2.f0;
149 return res;
150 }
151
152 /***/
153
154 // PR5039
155
156 struct s3 {
157 long long f0 : 32;
158 long long f1 : 32;
159 };
160
161 struct s3 g3 = { 0xdeadbeef, 0xdeadbeef };
162
f3_load(struct s3 * a0)163 int f3_load(struct s3 *a0) {
164 a0->f0 = 1;
165 return a0->f0;
166 }
f3_store(struct s3 * a0)167 int f3_store(struct s3 *a0) {
168 a0->f0 = 1;
169 return (a0->f0 = 1234);
170 }
f3_reload(struct s3 * a0)171 int f3_reload(struct s3 *a0) {
172 a0->f0 = 1;
173 return (a0->f0 += 1234);
174 }
175
176 // CHECK-OPT: define i64 @test_3()
177 // CHECK-OPT: ret i64 -559039940
178 // CHECK-OPT: }
test_3()179 unsigned long long test_3() {
180 struct s3 g3 = { 0xdeadbeef, 0xdeadbeef };
181 unsigned long long res = 0;
182 res ^= g3.f0 ^ g3.f1;
183 res ^= f3_load(&g3) ^ f3_store(&g3) ^ f3_reload(&g3);
184 res ^= g3.f0 ^ g3.f1;
185 return res;
186 }
187
188 /***/
189
190 // This is a case where the bitfield access will straddle an alignment boundary
191 // of its underlying type.
192
193 struct s4 {
194 unsigned f0 : 16;
195 unsigned f1 : 28 __attribute__ ((packed));
196 };
197
198 struct s4 g4 = { 0xdeadbeef, 0xdeadbeef };
199
f4_load(struct s4 * a0)200 int f4_load(struct s4 *a0) {
201 return a0->f0 ^ a0->f1;
202 }
f4_store(struct s4 * a0)203 int f4_store(struct s4 *a0) {
204 return (a0->f0 = 1234) ^ (a0->f1 = 5678);
205 }
f4_reload(struct s4 * a0)206 int f4_reload(struct s4 *a0) {
207 return (a0->f0 += 1234) ^ (a0->f1 += 5678);
208 }
209
210 // CHECK-OPT: define i64 @test_4()
211 // CHECK-OPT: ret i64 4860
212 // CHECK-OPT: }
test_4()213 unsigned long long test_4() {
214 struct s4 g4 = { 0xdeadbeef, 0xdeadbeef };
215 unsigned long long res = 0;
216 res ^= g4.f0 ^ g4.f1;
217 res ^= f4_load(&g4) ^ f4_store(&g4) ^ f4_reload(&g4);
218 res ^= g4.f0 ^ g4.f1;
219 return res;
220 }
221
222 /***/
223
224 struct s5 {
225 unsigned f0 : 2;
226 _Bool f1 : 1;
227 _Bool f2 : 1;
228 };
229
230 struct s5 g5 = { 0xdeadbeef, 0xdeadbeef };
231
f5_load(struct s5 * a0)232 int f5_load(struct s5 *a0) {
233 return a0->f0 ^ a0->f1;
234 }
f5_store(struct s5 * a0)235 int f5_store(struct s5 *a0) {
236 return (a0->f0 = 0xF) ^ (a0->f1 = 0xF) ^ (a0->f2 = 0xF);
237 }
f5_reload(struct s5 * a0)238 int f5_reload(struct s5 *a0) {
239 return (a0->f0 += 0xF) ^ (a0->f1 += 0xF) ^ (a0->f2 += 0xF);
240 }
241
242 // CHECK-OPT: define i64 @test_5()
243 // CHECK-OPT: ret i64 2
244 // CHECK-OPT: }
test_5()245 unsigned long long test_5() {
246 struct s5 g5 = { 0xdeadbeef, 0xdeadbeef, 0xdeadbeef };
247 unsigned long long res = 0;
248 res ^= g5.f0 ^ g5.f1 ^ g5.f2;
249 res ^= f5_load(&g5) ^ f5_store(&g5) ^ f5_reload(&g5);
250 res ^= g5.f0 ^ g5.f1 ^ g5.f2;
251 return res;
252 }
253
254 /***/
255
256 struct s6 {
257 _Bool f0 : 2;
258 };
259
260 struct s6 g6 = { 0xF };
261
f6_load(struct s6 * a0)262 int f6_load(struct s6 *a0) {
263 return a0->f0;
264 }
f6_store(struct s6 * a0)265 int f6_store(struct s6 *a0) {
266 return a0->f0 = 0x0;
267 }
f6_reload(struct s6 * a0)268 int f6_reload(struct s6 *a0) {
269 return (a0->f0 += 0xF);
270 }
271
272 // CHECK-OPT: define zeroext i1 @test_6()
273 // CHECK-OPT: ret i1 true
274 // CHECK-OPT: }
test_6()275 _Bool test_6() {
276 struct s6 g6 = { 0xF };
277 unsigned long long res = 0;
278 res ^= g6.f0;
279 res ^= f6_load(&g6);
280 res ^= g6.f0;
281 return res;
282 }
283
284 /***/
285
286 // Check that we compute the best alignment possible for each access.
287 //
288 // CHECK-RECORD: *** Dumping IRgen Record Layout
289 // CHECK-RECORD: Record: struct s7
290 // CHECK-RECORD: Layout: <CGRecordLayout
291 // CHECK-RECORD: LLVMType:%struct.s7 = type { i32, i32, i32, i8, [3 x i8], [4 x i8], [12 x i8] }
292 // CHECK-RECORD: IsZeroInitializable:1
293 // CHECK-RECORD: BitFields:[
294 // CHECK-RECORD: <CGBitFieldInfo Size:5 IsSigned:1
295 // CHECK-RECORD: NumComponents:1 Components: [
296 // CHECK-RECORD: <AccessInfo FieldIndex:0 FieldByteOffset:12 FieldBitStart:0 AccessWidth:32
297 // CHECK-RECORD: AccessAlignment:4 TargetBitOffset:0 TargetBitWidth:5>
298 // CHECK-RECORD: ]>
299 // CHECK-RECORD: <CGBitFieldInfo Size:29 IsSigned:1
300 // CHECK-RECORD: NumComponents:1 Components: [
301 // CHECK-RECORD: <AccessInfo FieldIndex:0 FieldByteOffset:16 FieldBitStart:0 AccessWidth:32
302 // CHECK-RECORD: AccessAlignment:16 TargetBitOffset:0 TargetBitWidth:29>
303
304 struct __attribute__((aligned(16))) s7 {
305 int a, b, c;
306 int f0 : 5;
307 int f1 : 29;
308 };
309
f7_load(struct s7 * a0)310 int f7_load(struct s7 *a0) {
311 return a0->f0;
312 }
313
314 /***/
315
316 // This is a case where we narrow the access width immediately.
317
318 struct __attribute__((packed)) s8 {
319 char f0 : 4;
320 char f1;
321 int f2 : 4;
322 char f3 : 4;
323 };
324
325 struct s8 g8 = { 0xF };
326
f8_load(struct s8 * a0)327 int f8_load(struct s8 *a0) {
328 return a0->f0 ^ a0 ->f2 ^ a0->f3;
329 }
f8_store(struct s8 * a0)330 int f8_store(struct s8 *a0) {
331 return (a0->f0 = 0xFD) ^ (a0->f2 = 0xFD) ^ (a0->f3 = 0xFD);
332 }
f8_reload(struct s8 * a0)333 int f8_reload(struct s8 *a0) {
334 return (a0->f0 += 0xFD) ^ (a0->f2 += 0xFD) ^ (a0->f3 += 0xFD);
335 }
336
337 // CHECK-OPT: define i32 @test_8()
338 // CHECK-OPT: ret i32 -3
339 // CHECK-OPT: }
test_8()340 unsigned test_8() {
341 struct s8 g8 = { 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef };
342 unsigned long long res = 0;
343 res ^= g8.f0 ^ g8.f2 ^ g8.f3;
344 res ^= f8_load(&g8) ^ f8_store(&g8) ^ f8_reload(&g8);
345 res ^= g8.f0 ^ g8.f2 ^ g8.f3;
346 return res;
347 }
348
349 /***/
350
351 // This is another case where we narrow the access width immediately.
352 //
353 // <rdar://problem/7893760>
354
355 struct __attribute__((packed)) s9 {
356 unsigned f0 : 7;
357 unsigned f1 : 7;
358 unsigned f2 : 7;
359 unsigned f3 : 7;
360 unsigned f4 : 7;
361 unsigned f5 : 7;
362 unsigned f6 : 7;
363 unsigned f7 : 7;
364 };
365
f9_load(struct s9 * a0)366 int f9_load(struct s9 *a0) {
367 return a0->f7;
368 }
369