• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; Test all AArch64 subarches with scheduling models.
2; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=cortex-a57 < %s | FileCheck %s
3; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=cortex-a72 < %s | FileCheck %s
4; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=cortex-a73 < %s | FileCheck %s
5; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=cyclone    < %s | FileCheck %s
6; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=exynos-m1  < %s | FileCheck %s
7; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=exynos-m2  < %s | FileCheck %s
8; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=exynos-m3  < %s | FileCheck %s
9; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=kryo       < %s | FileCheck %s
10; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=thunderx2t99 < %s | FileCheck %s
11
12; Make sure that inst-combine fuses the multiply add in the addressing mode of
13; the load.
14
15; CHECK-LABEL: fun:
16; CHECK-NOT: mul
17; CHECK:     madd
18; CHECK-NOT: mul
19
20%class.D = type { %class.basic_string.base, [4 x i8] }
21%class.basic_string.base = type <{ i64, i64, i32 }>
22@a = global %class.D* zeroinitializer, align 8
23declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
24define internal void @fun() section ".text.startup" {
25entry:
26  %tmp.i.i = alloca %class.D, align 8
27  %y = bitcast %class.D* %tmp.i.i to i8*
28  br label %loop
29loop:
30  %conv11.i.i = phi i64 [ 0, %entry ], [ %inc.i.i, %loop ]
31  %i = phi i64 [ undef, %entry ], [ %inc.i.i, %loop ]
32  %x = load %class.D*, %class.D** getelementptr inbounds (%class.D*, %class.D** @a, i64 0), align 8
33  %arrayidx.i.i.i = getelementptr inbounds %class.D, %class.D* %x, i64 %conv11.i.i
34  %d = bitcast %class.D* %arrayidx.i.i.i to i8*
35  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 nonnull %y, i8* align 8 %d, i64 24, i1 false)
36  %inc.i.i = add i64 %i, 1
37  %cmp.i.i = icmp slt i64 %inc.i.i, 0
38  br i1 %cmp.i.i, label %loop, label %exit
39exit:
40  ret void
41}
42