• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -asm-verbose=false -disable-wasm-explicit-locals -disable-wasm-fallthrough-return-opt | FileCheck %s
2
3; Test constant load and store address offsets.
4
5target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
6target triple = "wasm32-unknown-unknown"
7
8;===----------------------------------------------------------------------------
9; Loads: 32-bit
10;===----------------------------------------------------------------------------
11
12; Basic load.
13
14; CHECK-LABEL: load_i32_no_offset:
15; CHECK: i32.load $push0=, 0($0){{$}}
16; CHECK-NEXT: return $pop0{{$}}
17define i32 @load_i32_no_offset(i32 *%p) {
18  %v = load i32, i32* %p
19  ret i32 %v
20}
21
22; With an nuw add, we can fold an offset.
23
24; CHECK-LABEL: load_i32_with_folded_offset:
25; CHECK: i32.load  $push0=, 24($0){{$}}
26define i32 @load_i32_with_folded_offset(i32* %p) {
27  %q = ptrtoint i32* %p to i32
28  %r = add nuw i32 %q, 24
29  %s = inttoptr i32 %r to i32*
30  %t = load i32, i32* %s
31  ret i32 %t
32}
33
34; With an inbounds gep, we can fold an offset.
35
36; CHECK-LABEL: load_i32_with_folded_gep_offset:
37; CHECK: i32.load  $push0=, 24($0){{$}}
38define i32 @load_i32_with_folded_gep_offset(i32* %p) {
39  %s = getelementptr inbounds i32, i32* %p, i32 6
40  %t = load i32, i32* %s
41  ret i32 %t
42}
43
44; We can't fold a negative offset though, even with an inbounds gep.
45
46; CHECK-LABEL: load_i32_with_unfolded_gep_negative_offset:
47; CHECK: i32.const $push0=, -24{{$}}
48; CHECK: i32.add   $push1=, $0, $pop0{{$}}
49; CHECK: i32.load  $push2=, 0($pop1){{$}}
50define i32 @load_i32_with_unfolded_gep_negative_offset(i32* %p) {
51  %s = getelementptr inbounds i32, i32* %p, i32 -6
52  %t = load i32, i32* %s
53  ret i32 %t
54}
55
56; Without nuw, and even with nsw, we can't fold an offset.
57
58; CHECK-LABEL: load_i32_with_unfolded_offset:
59; CHECK: i32.const $push0=, 24{{$}}
60; CHECK: i32.add   $push1=, $0, $pop0{{$}}
61; CHECK: i32.load  $push2=, 0($pop1){{$}}
62define i32 @load_i32_with_unfolded_offset(i32* %p) {
63  %q = ptrtoint i32* %p to i32
64  %r = add nsw i32 %q, 24
65  %s = inttoptr i32 %r to i32*
66  %t = load i32, i32* %s
67  ret i32 %t
68}
69
70; Without inbounds, we can't fold a gep offset.
71
72; CHECK-LABEL: load_i32_with_unfolded_gep_offset:
73; CHECK: i32.const $push0=, 24{{$}}
74; CHECK: i32.add   $push1=, $0, $pop0{{$}}
75; CHECK: i32.load  $push2=, 0($pop1){{$}}
76define i32 @load_i32_with_unfolded_gep_offset(i32* %p) {
77  %s = getelementptr i32, i32* %p, i32 6
78  %t = load i32, i32* %s
79  ret i32 %t
80}
81
82; When loading from a fixed address, materialize a zero.
83
84; CHECK-LABEL: load_i32_from_numeric_address
85; CHECK: i32.const $push0=, 0{{$}}
86; CHECK: i32.load  $push1=, 42($pop0){{$}}
87define i32 @load_i32_from_numeric_address() {
88  %s = inttoptr i32 42 to i32*
89  %t = load i32, i32* %s
90  ret i32 %t
91}
92
93; CHECK-LABEL: load_i32_from_global_address
94; CHECK: i32.const $push0=, 0{{$}}
95; CHECK: i32.load  $push1=, gv($pop0){{$}}
96@gv = global i32 0
97define i32 @load_i32_from_global_address() {
98  %t = load i32, i32* @gv
99  ret i32 %t
100}
101
102;===----------------------------------------------------------------------------
103; Loads: 64-bit
104;===----------------------------------------------------------------------------
105
106; Basic load.
107
108; CHECK-LABEL: load_i64_no_offset:
109; CHECK: i64.load $push0=, 0($0){{$}}
110; CHECK-NEXT: return $pop0{{$}}
111define i64 @load_i64_no_offset(i64 *%p) {
112  %v = load i64, i64* %p
113  ret i64 %v
114}
115
116; With an nuw add, we can fold an offset.
117
118; CHECK-LABEL: load_i64_with_folded_offset:
119; CHECK: i64.load  $push0=, 24($0){{$}}
120define i64 @load_i64_with_folded_offset(i64* %p) {
121  %q = ptrtoint i64* %p to i32
122  %r = add nuw i32 %q, 24
123  %s = inttoptr i32 %r to i64*
124  %t = load i64, i64* %s
125  ret i64 %t
126}
127
128; With an inbounds gep, we can fold an offset.
129
130; CHECK-LABEL: load_i64_with_folded_gep_offset:
131; CHECK: i64.load  $push0=, 24($0){{$}}
132define i64 @load_i64_with_folded_gep_offset(i64* %p) {
133  %s = getelementptr inbounds i64, i64* %p, i32 3
134  %t = load i64, i64* %s
135  ret i64 %t
136}
137
138; We can't fold a negative offset though, even with an inbounds gep.
139
140; CHECK-LABEL: load_i64_with_unfolded_gep_negative_offset:
141; CHECK: i32.const $push0=, -24{{$}}
142; CHECK: i32.add   $push1=, $0, $pop0{{$}}
143; CHECK: i64.load  $push2=, 0($pop1){{$}}
144define i64 @load_i64_with_unfolded_gep_negative_offset(i64* %p) {
145  %s = getelementptr inbounds i64, i64* %p, i32 -3
146  %t = load i64, i64* %s
147  ret i64 %t
148}
149
150; Without nuw, and even with nsw, we can't fold an offset.
151
152; CHECK-LABEL: load_i64_with_unfolded_offset:
153; CHECK: i32.const $push0=, 24{{$}}
154; CHECK: i32.add   $push1=, $0, $pop0{{$}}
155; CHECK: i64.load  $push2=, 0($pop1){{$}}
156define i64 @load_i64_with_unfolded_offset(i64* %p) {
157  %q = ptrtoint i64* %p to i32
158  %r = add nsw i32 %q, 24
159  %s = inttoptr i32 %r to i64*
160  %t = load i64, i64* %s
161  ret i64 %t
162}
163
164; Without inbounds, we can't fold a gep offset.
165
166; CHECK-LABEL: load_i64_with_unfolded_gep_offset:
167; CHECK: i32.const $push0=, 24{{$}}
168; CHECK: i32.add   $push1=, $0, $pop0{{$}}
169; CHECK: i64.load  $push2=, 0($pop1){{$}}
170define i64 @load_i64_with_unfolded_gep_offset(i64* %p) {
171  %s = getelementptr i64, i64* %p, i32 3
172  %t = load i64, i64* %s
173  ret i64 %t
174}
175
176;===----------------------------------------------------------------------------
177; Stores: 32-bit
178;===----------------------------------------------------------------------------
179
180; Basic store.
181
182; CHECK-LABEL: store_i32_no_offset:
183; CHECK-NEXT: .param i32, i32{{$}}
184; CHECK-NEXT: i32.store 0($0), $1{{$}}
185; CHECK-NEXT: return{{$}}
186define void @store_i32_no_offset(i32 *%p, i32 %v) {
187  store i32 %v, i32* %p
188  ret void
189}
190
191; With an nuw add, we can fold an offset.
192
193; CHECK-LABEL: store_i32_with_folded_offset:
194; CHECK: i32.store 24($0), $pop0{{$}}
195define void @store_i32_with_folded_offset(i32* %p) {
196  %q = ptrtoint i32* %p to i32
197  %r = add nuw i32 %q, 24
198  %s = inttoptr i32 %r to i32*
199  store i32 0, i32* %s
200  ret void
201}
202
203; With an inbounds gep, we can fold an offset.
204
205; CHECK-LABEL: store_i32_with_folded_gep_offset:
206; CHECK: i32.store 24($0), $pop0{{$}}
207define void @store_i32_with_folded_gep_offset(i32* %p) {
208  %s = getelementptr inbounds i32, i32* %p, i32 6
209  store i32 0, i32* %s
210  ret void
211}
212
213; We can't fold a negative offset though, even with an inbounds gep.
214
215; CHECK-LABEL: store_i32_with_unfolded_gep_negative_offset:
216; CHECK: i32.const $push0=, -24{{$}}
217; CHECK: i32.add   $push1=, $0, $pop0{{$}}
218; CHECK: i32.store 0($pop1), $pop2{{$}}
219define void @store_i32_with_unfolded_gep_negative_offset(i32* %p) {
220  %s = getelementptr inbounds i32, i32* %p, i32 -6
221  store i32 0, i32* %s
222  ret void
223}
224
225; Without nuw, and even with nsw, we can't fold an offset.
226
227; CHECK-LABEL: store_i32_with_unfolded_offset:
228; CHECK: i32.const $push0=, 24{{$}}
229; CHECK: i32.add   $push1=, $0, $pop0{{$}}
230; CHECK: i32.store 0($pop1), $pop2{{$}}
231define void @store_i32_with_unfolded_offset(i32* %p) {
232  %q = ptrtoint i32* %p to i32
233  %r = add nsw i32 %q, 24
234  %s = inttoptr i32 %r to i32*
235  store i32 0, i32* %s
236  ret void
237}
238
239; Without inbounds, we can't fold a gep offset.
240
241; CHECK-LABEL: store_i32_with_unfolded_gep_offset:
242; CHECK: i32.const $push0=, 24{{$}}
243; CHECK: i32.add   $push1=, $0, $pop0{{$}}
244; CHECK: i32.store 0($pop1), $pop2{{$}}
245define void @store_i32_with_unfolded_gep_offset(i32* %p) {
246  %s = getelementptr i32, i32* %p, i32 6
247  store i32 0, i32* %s
248  ret void
249}
250
251; When storing from a fixed address, materialize a zero.
252
253; CHECK-LABEL: store_i32_to_numeric_address:
254; CHECK-NEXT: i32.const $push0=, 0{{$}}
255; CHECK-NEXT: i32.const $push1=, 0{{$}}
256; CHECK-NEXT: i32.store 42($pop0), $pop1{{$}}
257define void @store_i32_to_numeric_address() {
258  %s = inttoptr i32 42 to i32*
259  store i32 0, i32* %s
260  ret void
261}
262
263; CHECK-LABEL: store_i32_to_global_address:
264; CHECK: i32.const $push0=, 0{{$}}
265; CHECK: i32.const $push1=, 0{{$}}
266; CHECK: i32.store gv($pop0), $pop1{{$}}
267define void @store_i32_to_global_address() {
268  store i32 0, i32* @gv
269  ret void
270}
271
272;===----------------------------------------------------------------------------
273; Stores: 64-bit
274;===----------------------------------------------------------------------------
275
276; Basic store.
277
278; CHECK-LABEL: store_i64_with_folded_offset:
279; CHECK: i64.store 24($0), $pop0{{$}}
280define void @store_i64_with_folded_offset(i64* %p) {
281  %q = ptrtoint i64* %p to i32
282  %r = add nuw i32 %q, 24
283  %s = inttoptr i32 %r to i64*
284  store i64 0, i64* %s
285  ret void
286}
287
288; With an nuw add, we can fold an offset.
289
290; CHECK-LABEL: store_i64_with_folded_gep_offset:
291; CHECK: i64.store 24($0), $pop0{{$}}
292define void @store_i64_with_folded_gep_offset(i64* %p) {
293  %s = getelementptr inbounds i64, i64* %p, i32 3
294  store i64 0, i64* %s
295  ret void
296}
297
298; With an inbounds gep, we can fold an offset.
299
300; CHECK-LABEL: store_i64_with_unfolded_gep_negative_offset:
301; CHECK: i32.const $push0=, -24{{$}}
302; CHECK: i32.add   $push1=, $0, $pop0{{$}}
303; CHECK: i64.store 0($pop1), $pop2{{$}}
304define void @store_i64_with_unfolded_gep_negative_offset(i64* %p) {
305  %s = getelementptr inbounds i64, i64* %p, i32 -3
306  store i64 0, i64* %s
307  ret void
308}
309
310; We can't fold a negative offset though, even with an inbounds gep.
311
312; CHECK-LABEL: store_i64_with_unfolded_offset:
313; CHECK: i32.const $push0=, 24{{$}}
314; CHECK: i32.add   $push1=, $0, $pop0{{$}}
315; CHECK: i64.store 0($pop1), $pop2{{$}}
316define void @store_i64_with_unfolded_offset(i64* %p) {
317  %q = ptrtoint i64* %p to i32
318  %r = add nsw i32 %q, 24
319  %s = inttoptr i32 %r to i64*
320  store i64 0, i64* %s
321  ret void
322}
323
324; Without nuw, and even with nsw, we can't fold an offset.
325
326; CHECK-LABEL: store_i64_with_unfolded_gep_offset:
327; CHECK: i32.const $push0=, 24{{$}}
328; CHECK: i32.add   $push1=, $0, $pop0{{$}}
329; CHECK: i64.store 0($pop1), $pop2{{$}}
330define void @store_i64_with_unfolded_gep_offset(i64* %p) {
331  %s = getelementptr i64, i64* %p, i32 3
332  store i64 0, i64* %s
333  ret void
334}
335
336; Without inbounds, we can't fold a gep offset.
337
338; CHECK-LABEL: store_i32_with_folded_or_offset:
339; CHECK: i32.store8 2($pop{{[0-9]+}}), $pop{{[0-9]+}}{{$}}
340define void @store_i32_with_folded_or_offset(i32 %x) {
341  %and = and i32 %x, -4
342  %t0 = inttoptr i32 %and to i8*
343  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
344  store i8 0, i8* %arrayidx, align 1
345  ret void
346}
347
348;===----------------------------------------------------------------------------
349; Sign-extending loads
350;===----------------------------------------------------------------------------
351
352; Fold an offset into a sign-extending load.
353
354; CHECK-LABEL: load_i8_i32_s_with_folded_offset:
355; CHECK: i32.load8_s $push0=, 24($0){{$}}
356define i32 @load_i8_i32_s_with_folded_offset(i8* %p) {
357  %q = ptrtoint i8* %p to i32
358  %r = add nuw i32 %q, 24
359  %s = inttoptr i32 %r to i8*
360  %t = load i8, i8* %s
361  %u = sext i8 %t to i32
362  ret i32 %u
363}
364
365; CHECK-LABEL: load_i32_i64_s_with_folded_offset:
366; CHECK: i64.load32_s $push0=, 24($0){{$}}
367define i64 @load_i32_i64_s_with_folded_offset(i32* %p) {
368  %q = ptrtoint i32* %p to i32
369  %r = add nuw i32 %q, 24
370  %s = inttoptr i32 %r to i32*
371  %t = load i32, i32* %s
372  %u = sext i32 %t to i64
373  ret i64 %u
374}
375
376; Fold a gep offset into a sign-extending load.
377
378; CHECK-LABEL: load_i8_i32_s_with_folded_gep_offset:
379; CHECK: i32.load8_s $push0=, 24($0){{$}}
380define i32 @load_i8_i32_s_with_folded_gep_offset(i8* %p) {
381  %s = getelementptr inbounds i8, i8* %p, i32 24
382  %t = load i8, i8* %s
383  %u = sext i8 %t to i32
384  ret i32 %u
385}
386
387; CHECK-LABEL: load_i16_i32_s_with_folded_gep_offset:
388; CHECK: i32.load16_s $push0=, 48($0){{$}}
389define i32 @load_i16_i32_s_with_folded_gep_offset(i16* %p) {
390  %s = getelementptr inbounds i16, i16* %p, i32 24
391  %t = load i16, i16* %s
392  %u = sext i16 %t to i32
393  ret i32 %u
394}
395
396; CHECK-LABEL: load_i16_i64_s_with_folded_gep_offset:
397; CHECK: i64.load16_s $push0=, 48($0){{$}}
398define i64 @load_i16_i64_s_with_folded_gep_offset(i16* %p) {
399  %s = getelementptr inbounds i16, i16* %p, i32 24
400  %t = load i16, i16* %s
401  %u = sext i16 %t to i64
402  ret i64 %u
403}
404
405; 'add' in this code becomes 'or' after DAG optimization. Treat an 'or' node as
406; an 'add' if the or'ed bits are known to be zero.
407
408; CHECK-LABEL: load_i8_i32_s_with_folded_or_offset:
409; CHECK: i32.load8_s $push{{[0-9]+}}=, 2($pop{{[0-9]+}}){{$}}
410define i32 @load_i8_i32_s_with_folded_or_offset(i32 %x) {
411  %and = and i32 %x, -4
412  %t0 = inttoptr i32 %and to i8*
413  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
414  %t1 = load i8, i8* %arrayidx
415  %conv = sext i8 %t1 to i32
416  ret i32 %conv
417}
418
419; CHECK-LABEL: load_i8_i64_s_with_folded_or_offset:
420; CHECK: i64.load8_s $push{{[0-9]+}}=, 2($pop{{[0-9]+}}){{$}}
421define i64 @load_i8_i64_s_with_folded_or_offset(i32 %x) {
422  %and = and i32 %x, -4
423  %t0 = inttoptr i32 %and to i8*
424  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
425  %t1 = load i8, i8* %arrayidx
426  %conv = sext i8 %t1 to i64
427  ret i64 %conv
428}
429
430; When loading from a fixed address, materialize a zero.
431
432; CHECK-LABEL: load_i16_i32_s_from_numeric_address
433; CHECK: i32.const $push0=, 0{{$}}
434; CHECK: i32.load16_s  $push1=, 42($pop0){{$}}
435define i32 @load_i16_i32_s_from_numeric_address() {
436  %s = inttoptr i32 42 to i16*
437  %t = load i16, i16* %s
438  %u = sext i16 %t to i32
439  ret i32 %u
440}
441
442; CHECK-LABEL: load_i8_i32_s_from_global_address
443; CHECK: i32.const $push0=, 0{{$}}
444; CHECK: i32.load8_s  $push1=, gv8($pop0){{$}}
445@gv8 = global i8 0
446define i32 @load_i8_i32_s_from_global_address() {
447  %t = load i8, i8* @gv8
448  %u = sext i8 %t to i32
449  ret i32 %u
450}
451
452;===----------------------------------------------------------------------------
453; Zero-extending loads
454;===----------------------------------------------------------------------------
455
456; Fold an offset into a zero-extending load.
457
458; CHECK-LABEL: load_i8_i32_z_with_folded_offset:
459; CHECK: i32.load8_u $push0=, 24($0){{$}}
460define i32 @load_i8_i32_z_with_folded_offset(i8* %p) {
461  %q = ptrtoint i8* %p to i32
462  %r = add nuw i32 %q, 24
463  %s = inttoptr i32 %r to i8*
464  %t = load i8, i8* %s
465  %u = zext i8 %t to i32
466  ret i32 %u
467}
468
469; CHECK-LABEL: load_i32_i64_z_with_folded_offset:
470; CHECK: i64.load32_u $push0=, 24($0){{$}}
471define i64 @load_i32_i64_z_with_folded_offset(i32* %p) {
472  %q = ptrtoint i32* %p to i32
473  %r = add nuw i32 %q, 24
474  %s = inttoptr i32 %r to i32*
475  %t = load i32, i32* %s
476  %u = zext i32 %t to i64
477  ret i64 %u
478}
479
480; Fold a gep offset into a zero-extending load.
481
482; CHECK-LABEL: load_i8_i32_z_with_folded_gep_offset:
483; CHECK: i32.load8_u $push0=, 24($0){{$}}
484define i32 @load_i8_i32_z_with_folded_gep_offset(i8* %p) {
485  %s = getelementptr inbounds i8, i8* %p, i32 24
486  %t = load i8, i8* %s
487  %u = zext i8 %t to i32
488  ret i32 %u
489}
490
491; CHECK-LABEL: load_i16_i32_z_with_folded_gep_offset:
492; CHECK: i32.load16_u $push0=, 48($0){{$}}
493define i32 @load_i16_i32_z_with_folded_gep_offset(i16* %p) {
494  %s = getelementptr inbounds i16, i16* %p, i32 24
495  %t = load i16, i16* %s
496  %u = zext i16 %t to i32
497  ret i32 %u
498}
499
500; CHECK-LABEL: load_i16_i64_z_with_folded_gep_offset:
501; CHECK: i64.load16_u $push0=, 48($0){{$}}
502define i64 @load_i16_i64_z_with_folded_gep_offset(i16* %p) {
503  %s = getelementptr inbounds i16, i16* %p, i64 24
504  %t = load i16, i16* %s
505  %u = zext i16 %t to i64
506  ret i64 %u
507}
508
509; When loading from a fixed address, materialize a zero.
510
511; CHECK-LABEL: load_i16_i32_z_from_numeric_address
512; CHECK: i32.const $push0=, 0{{$}}
513; CHECK: i32.load16_u  $push1=, 42($pop0){{$}}
514define i32 @load_i16_i32_z_from_numeric_address() {
515  %s = inttoptr i32 42 to i16*
516  %t = load i16, i16* %s
517  %u = zext i16 %t to i32
518  ret i32 %u
519}
520
521; CHECK-LABEL: load_i8_i32_z_from_global_address
522; CHECK: i32.const $push0=, 0{{$}}
523; CHECK: i32.load8_u  $push1=, gv8($pop0){{$}}
524define i32 @load_i8_i32_z_from_global_address() {
525  %t = load i8, i8* @gv8
526  %u = zext i8 %t to i32
527  ret i32 %u
528}
529
530; i8 return value should test anyext loads
531; CHECK-LABEL: load_i8_i32_retvalue:
532; CHECK: i32.load8_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
533; CHECK-NEXT: return $pop[[NUM]]{{$}}
534define i8 @load_i8_i32_retvalue(i8 *%p) {
535  %v = load i8, i8* %p
536  ret i8 %v
537}
538
539;===----------------------------------------------------------------------------
540; Truncating stores
541;===----------------------------------------------------------------------------
542
543; Fold an offset into a truncating store.
544
545; CHECK-LABEL: store_i8_i32_with_folded_offset:
546; CHECK: i32.store8 24($0), $1{{$}}
547define void @store_i8_i32_with_folded_offset(i8* %p, i32 %v) {
548  %q = ptrtoint i8* %p to i32
549  %r = add nuw i32 %q, 24
550  %s = inttoptr i32 %r to i8*
551  %t = trunc i32 %v to i8
552  store i8 %t, i8* %s
553  ret void
554}
555
556; CHECK-LABEL: store_i32_i64_with_folded_offset:
557; CHECK: i64.store32 24($0), $1{{$}}
558define void @store_i32_i64_with_folded_offset(i32* %p, i64 %v) {
559  %q = ptrtoint i32* %p to i32
560  %r = add nuw i32 %q, 24
561  %s = inttoptr i32 %r to i32*
562  %t = trunc i64 %v to i32
563  store i32 %t, i32* %s
564  ret void
565}
566
567; Fold a gep offset into a truncating store.
568
569; CHECK-LABEL: store_i8_i32_with_folded_gep_offset:
570; CHECK: i32.store8 24($0), $1{{$}}
571define void @store_i8_i32_with_folded_gep_offset(i8* %p, i32 %v) {
572  %s = getelementptr inbounds i8, i8* %p, i32 24
573  %t = trunc i32 %v to i8
574  store i8 %t, i8* %s
575  ret void
576}
577
578; CHECK-LABEL: store_i16_i32_with_folded_gep_offset:
579; CHECK: i32.store16 48($0), $1{{$}}
580define void @store_i16_i32_with_folded_gep_offset(i16* %p, i32 %v) {
581  %s = getelementptr inbounds i16, i16* %p, i32 24
582  %t = trunc i32 %v to i16
583  store i16 %t, i16* %s
584  ret void
585}
586
587; CHECK-LABEL: store_i16_i64_with_folded_gep_offset:
588; CHECK: i64.store16 48($0), $1{{$}}
589define void @store_i16_i64_with_folded_gep_offset(i16* %p, i64 %v) {
590  %s = getelementptr inbounds i16, i16* %p, i64 24
591  %t = trunc i64 %v to i16
592  store i16 %t, i16* %s
593  ret void
594}
595
596; 'add' in this code becomes 'or' after DAG optimization. Treat an 'or' node as
597; an 'add' if the or'ed bits are known to be zero.
598
599; CHECK-LABEL: store_i8_i32_with_folded_or_offset:
600; CHECK: i32.store8 2($pop{{[0-9]+}}), $1{{$}}
601define void @store_i8_i32_with_folded_or_offset(i32 %x, i32 %v) {
602  %and = and i32 %x, -4
603  %p = inttoptr i32 %and to i8*
604  %arrayidx = getelementptr inbounds i8, i8* %p, i32 2
605  %t = trunc i32 %v to i8
606  store i8 %t, i8* %arrayidx
607  ret void
608}
609
610; CHECK-LABEL: store_i8_i64_with_folded_or_offset:
611; CHECK: i64.store8 2($pop{{[0-9]+}}), $1{{$}}
612define void @store_i8_i64_with_folded_or_offset(i32 %x, i64 %v) {
613  %and = and i32 %x, -4
614  %p = inttoptr i32 %and to i8*
615  %arrayidx = getelementptr inbounds i8, i8* %p, i32 2
616  %t = trunc i64 %v to i8
617  store i8 %t, i8* %arrayidx
618  ret void
619}
620
621;===----------------------------------------------------------------------------
622; Aggregate values
623;===----------------------------------------------------------------------------
624
625; Fold the offsets when lowering aggregate loads and stores.
626
627; CHECK-LABEL: aggregate_load_store:
628; CHECK: i32.load  $2=, 0($0){{$}}
629; CHECK: i32.load  $3=, 4($0){{$}}
630; CHECK: i32.load  $4=, 8($0){{$}}
631; CHECK: i32.load  $push0=, 12($0){{$}}
632; CHECK: i32.store 12($1), $pop0{{$}}
633; CHECK: i32.store 8($1), $4{{$}}
634; CHECK: i32.store 4($1), $3{{$}}
635; CHECK: i32.store 0($1), $2{{$}}
636define void @aggregate_load_store({i32,i32,i32,i32}* %p, {i32,i32,i32,i32}* %q) {
637  ; volatile so that things stay in order for the tests above
638  %t = load volatile {i32,i32,i32,i32}, {i32, i32,i32,i32}* %p
639  store volatile {i32,i32,i32,i32} %t, {i32, i32,i32,i32}* %q
640  ret void
641}
642
643; Fold the offsets when lowering aggregate return values. The stores get
644; merged into i64 stores.
645
646; CHECK-LABEL: aggregate_return:
647; CHECK: i64.const   $push[[L0:[0-9]+]]=, 0{{$}}
648; CHECK: i64.store   8($0):p2align=2, $pop[[L0]]{{$}}
649; CHECK: i64.const   $push[[L1:[0-9]+]]=, 0{{$}}
650; CHECK: i64.store   0($0):p2align=2, $pop[[L1]]{{$}}
651define {i32,i32,i32,i32} @aggregate_return() {
652  ret {i32,i32,i32,i32} zeroinitializer
653}
654
655; Fold the offsets when lowering aggregate return values. The stores are not
656; merged.
657
658; CHECK-LABEL: aggregate_return_without_merge:
659; CHECK: i32.const   $push[[L0:[0-9]+]]=, 0{{$}}
660; CHECK: i32.store8  14($0), $pop[[L0]]{{$}}
661; CHECK: i32.const   $push[[L1:[0-9]+]]=, 0{{$}}
662; CHECK: i32.store16 12($0), $pop[[L1]]{{$}}
663; CHECK: i32.const   $push[[L2:[0-9]+]]=, 0{{$}}
664; CHECK: i32.store   8($0), $pop[[L2]]{{$}}
665; CHECK: i64.const   $push[[L3:[0-9]+]]=, 0{{$}}
666; CHECK: i64.store   0($0), $pop[[L3]]{{$}}
667define {i64,i32,i16,i8} @aggregate_return_without_merge() {
668  ret {i64,i32,i16,i8} zeroinitializer
669}
670