• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S                         | FileCheck %s --check-prefix=ALL --check-prefix=NODL
3; RUN: opt < %s -instcombine -S -data-layout=n32        | FileCheck %s --check-prefix=ALL --check-prefix=I32
4; RUN: opt < %s -instcombine -S -data-layout=n32:64     | FileCheck %s --check-prefix=ALL --check-prefix=I64
5; RUN: opt < %s -instcombine -S -data-layout=n32:64:128 | FileCheck %s --check-prefix=ALL --check-prefix=I128
6
7declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
8
9; memcpy can be expanded inline with load/store (based on the datalayout?)
10
11define void @copy_1_byte(i8* %d, i8* %s) {
12; ALL-LABEL: @copy_1_byte(
13; ALL-NEXT:    [[TMP1:%.*]] = load i8, i8* [[S:%.*]], align 1
14; ALL-NEXT:    store i8 [[TMP1]], i8* [[D:%.*]], align 1
15; ALL-NEXT:    ret void
16;
17  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 1, i1 false)
18  ret void
19}
20
21define void @copy_2_bytes(i8* %d, i8* %s) {
22; ALL-LABEL: @copy_2_bytes(
23; ALL-NEXT:    [[TMP1:%.*]] = bitcast i8* [[S:%.*]] to i16*
24; ALL-NEXT:    [[TMP2:%.*]] = bitcast i8* [[D:%.*]] to i16*
25; ALL-NEXT:    [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 1
26; ALL-NEXT:    store i16 [[TMP3]], i16* [[TMP2]], align 1
27; ALL-NEXT:    ret void
28;
29  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 2, i1 false)
30  ret void
31}
32
33; We don't expand small non-power-of-2. Should we? Might be a target-dependent choice.
34
35define void @copy_3_bytes(i8* %d, i8* %s) {
36; ALL-LABEL: @copy_3_bytes(
37; ALL-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[D:%.*]], i8* align 1 [[S:%.*]], i32 3, i1 false)
38; ALL-NEXT:    ret void
39;
40  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 3, i1 false)
41  ret void
42}
43
44define void @copy_4_bytes(i8* %d, i8* %s) {
45; ALL-LABEL: @copy_4_bytes(
46; ALL-NEXT:    [[TMP1:%.*]] = bitcast i8* [[S:%.*]] to i32*
47; ALL-NEXT:    [[TMP2:%.*]] = bitcast i8* [[D:%.*]] to i32*
48; ALL-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 1
49; ALL-NEXT:    store i32 [[TMP3]], i32* [[TMP2]], align 1
50; ALL-NEXT:    ret void
51;
52  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 4, i1 false)
53  ret void
54}
55
56; We don't expand small non-power-of-2. Should we? Might be a target-dependent choice.
57
58define void @copy_5_bytes(i8* %d, i8* %s) {
59; ALL-LABEL: @copy_5_bytes(
60; ALL-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[D:%.*]], i8* align 1 [[S:%.*]], i32 5, i1 false)
61; ALL-NEXT:    ret void
62;
63  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 5, i1 false)
64  ret void
65}
66
67define void @copy_8_bytes(i8* %d, i8* %s) {
68; ALL-LABEL: @copy_8_bytes(
69; ALL-NEXT:    [[TMP1:%.*]] = bitcast i8* [[S:%.*]] to i64*
70; ALL-NEXT:    [[TMP2:%.*]] = bitcast i8* [[D:%.*]] to i64*
71; ALL-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1
72; ALL-NEXT:    store i64 [[TMP3]], i64* [[TMP2]], align 1
73; ALL-NEXT:    ret void
74;
75  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 8, i1 false)
76  ret void
77}
78
79define void @copy_16_bytes(i8* %d, i8* %s) {
80; ALL-LABEL: @copy_16_bytes(
81; ALL-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[D:%.*]], i8* align 1 [[S:%.*]], i32 16, i1 false)
82; ALL-NEXT:    ret void
83;
84  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 16, i1 false)
85  ret void
86}
87
88