• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -march=aarch64 -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
3--- |
4  target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
5  target triple = "aarch64"
6
7  define void @test_memmove1(i32* nocapture %dst, i32* nocapture readonly %src, i64 %len) local_unnamed_addr #0 {
8  entry:
9    %0 = bitcast i32* %dst to i8*
10    %1 = bitcast i32* %src to i8*
11    tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 %len, i1 false)
12    ret void
13  }
14
15  declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1 immarg) #1
16
17  define void @test_memmove2_const(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr #0 {
18  entry:
19    %0 = bitcast i32* %dst to i8*
20    %1 = bitcast i32* %src to i8*
21    tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 48, i1 false)
22    ret void
23  }
24
25  define void @test_memmove3_const_toolarge(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr #0 {
26  entry:
27    %0 = bitcast i32* %dst to i8*
28    %1 = bitcast i32* %src to i8*
29    tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 96, i1 false)
30    ret void
31  }
32
33  define void @test_memmove4_const_unaligned(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr #0 {
34  entry:
35    %0 = bitcast i32* %dst to i8*
36    %1 = bitcast i32* %src to i8*
37    tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 52, i1 false)
38    ret void
39  }
40
41  attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2,+zcm,+zcz" "unsafe-fp-math"="false" "use-soft-float"="false" }
42  attributes #1 = { argmemonly nounwind }
43
44...
45---
46name:            test_memmove1
47alignment:       4
48tracksRegLiveness: true
49body:             |
50  bb.1.entry:
51    liveins: $x0, $x1, $x2
52
53    ; CHECK-LABEL: name: test_memmove1
54    ; CHECK: liveins: $x0, $x1, $x2
55    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
56    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
57    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
58    ; CHECK: G_MEMMOVE [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
59    ; CHECK: RET_ReallyLR
60    %0:_(p0) = COPY $x0
61    %1:_(p0) = COPY $x1
62    %2:_(s64) = COPY $x2
63    G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
64    RET_ReallyLR
65
66...
67---
68name:            test_memmove2_const
69alignment:       4
70tracksRegLiveness: true
71body:             |
72  bb.1.entry:
73    liveins: $x0, $x1
74
75    ; CHECK-LABEL: name: test_memmove2_const
76    ; CHECK: liveins: $x0, $x1
77    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
78    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
79    ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
80    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
81    ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
82    ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
83    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
84    ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
85    ; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP1]](p0) :: (load 16 from %ir.1 + 32, align 4)
86    ; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
87    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
88    ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
89    ; CHECK: G_STORE [[LOAD1]](s128), [[GEP2]](p0) :: (store 16 into %ir.0 + 16, align 4)
90    ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
91    ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
92    ; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store 16 into %ir.0 + 32, align 4)
93    ; CHECK: RET_ReallyLR
94    %0:_(p0) = COPY $x0
95    %1:_(p0) = COPY $x1
96    %2:_(s64) = G_CONSTANT i64 48
97    G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
98    RET_ReallyLR
99
100...
101---
102name:            test_memmove3_const_toolarge
103alignment:       4
104tracksRegLiveness: true
105body:             |
106  bb.1.entry:
107    liveins: $x0, $x1
108
109    ; CHECK-LABEL: name: test_memmove3_const_toolarge
110    ; CHECK: liveins: $x0, $x1
111    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
112    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
113    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
114    ; CHECK: G_MEMMOVE [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
115    ; CHECK: RET_ReallyLR
116    %0:_(p0) = COPY $x0
117    %1:_(p0) = COPY $x1
118    %2:_(s64) = G_CONSTANT i64 96
119    G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
120    RET_ReallyLR
121
122...
123---
124name:            test_memmove4_const_unaligned
125alignment:       4
126tracksRegLiveness: true
127body:             |
128  bb.1.entry:
129    liveins: $x0, $x1
130
131    ; CHECK-LABEL: name: test_memmove4_const_unaligned
132    ; CHECK: liveins: $x0, $x1
133    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
134    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
135    ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
136    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
137    ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
138    ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
139    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
140    ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
141    ; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP1]](p0) :: (load 16 from %ir.1 + 32, align 4)
142    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
143    ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
144    ; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.1 + 48)
145    ; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
146    ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
147    ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
148    ; CHECK: G_STORE [[LOAD1]](s128), [[GEP3]](p0) :: (store 16 into %ir.0 + 16, align 4)
149    ; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
150    ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
151    ; CHECK: G_STORE [[LOAD2]](s128), [[GEP4]](p0) :: (store 16 into %ir.0 + 32, align 4)
152    ; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
153    ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
154    ; CHECK: G_STORE [[LOAD3]](s32), [[GEP5]](p0) :: (store 4 into %ir.0 + 48)
155    ; CHECK: RET_ReallyLR
156    %0:_(p0) = COPY $x0
157    %1:_(p0) = COPY $x1
158    %2:_(s64) = G_CONSTANT i64 52
159    G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
160    RET_ReallyLR
161
162...
163