• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -mcpu=cortex-a8 | FileCheck %s
2target triple = "thumbv7-apple-ios"
3
4; CHECK: local_split
5;
6; The load must go into d0-15 which are all clobbered by the asm.
7; RAGreedy should split the range and use d16-d31 to avoid a spill.
8;
9; CHECK: vldr s
10; CHECK-NOT: vstr
11; CHECK: vadd.f32
12; CHECK-NOT: vstr
13; CHECK: vorr
14; CHECK: vstr s
15define void @local_split(float* nocapture %p) nounwind ssp {
16entry:
17  %x = load float* %p, align 4
18  %a = fadd float %x, 1.0
19  tail call void asm sideeffect "", "~{d0},~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15}"() nounwind
20  store float %a, float* %p, align 4
21  ret void
22}
23
24; CHECK: global_split
25;
26; Same thing, but across basic blocks.
27;
28; CHECK: vldr s
29; CHECK-NOT: vstr
30; CHECK: vadd.f32
31; CHECK-NOT: vstr
32; CHECK: vorr
33; CHECK: vstr s
34define void @global_split(float* nocapture %p1, float* nocapture %p2) nounwind ssp {
35entry:
36  %0 = load float* %p1, align 4
37  %add = fadd float %0, 1.000000e+00
38  tail call void asm sideeffect "", "~{d0},~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15}"() nounwind
39  %cmp = fcmp ogt float %add, 0.000000e+00
40  br i1 %cmp, label %if.then, label %if.end
41
42if.then:
43  store float %add, float* %p2, align 4
44  br label %if.end
45
46if.end:
47  store float %add, float* %p1, align 4
48  ret void
49}
50