• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -march=hexagon < %s | FileCheck %s
2
3; DAG combiner folds sequences of shifts, which can sometimes obscure
4; optimization opportunities. For example
5;
6;   unsigned int c(unsigned int b, unsigned int *a) {
7;     unsigned int bitidx = b >> 5;
8;     return a[bitidx];
9;   }
10;
11; produces
12;   (add x (shl (srl y 5) 2))
13; which is then folded into
14;   (add x (and (srl y 3) 1FFFFFFC))
15;
16; That results in a constant-extended and:
17;   r0 = and(##536870908,lsr(r0,#3))
18;   r0 = memw(r1+r0<<#0)
19; whereas
20;   r0 = lsr(r0,#5)
21;   r0 = memw(r1+r0<<#2)
22; is more desirable.
23
24target triple = "hexagon"
25
26; CHECK-LABEL: load_0
27; CHECK: memw(r{{[0-9]+}}+r{{[0-9]}}<<#2)
28define i32 @load_0(i32 %b, i32* nocapture readonly %a) #0 {
29entry:
30  %shr = lshr i32 %b, 5
31  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %shr
32  %0 = load i32, i32* %arrayidx, align 4
33  ret i32 %0
34}
35
36; This would require r0<<#3, which is not legal.
37; CHECK-LABEL: load_1
38; CHECK: memw(r{{[0-9]+}}+r{{[0-9]}}<<#0)
39define i32 @load_1(i32 %b, [3 x i32]* nocapture readonly %a) #0 {
40entry:
41  %shr = lshr i32 %b, 5
42  %arrayidx = getelementptr inbounds [3 x i32], [3 x i32]* %a, i32 %shr, i32 0
43  %0 = load i32, i32* %arrayidx, align 4
44  ret i32 %0
45}
46
47; CHECK-LABEL: store_0
48; CHECK: memw(r{{[0-9]+}}+r{{[0-9]}}<<#2)
49define void @store_0(i32 %b, i32* nocapture %a, i32 %v) #1 {
50entry:
51  %shr = lshr i32 %b, 5
52  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %shr
53  store i32 %v, i32* %arrayidx, align 4
54  ret void
55}
56
57attributes #0 = { norecurse nounwind readonly "target-cpu"="hexagonv60" "target-features"="-hvx,-long-calls" }
58attributes #1 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="-hvx,-long-calls" }
59
60