• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; MemCpy optimizations should take place even in presence of invariant.start
3; RUN: opt < %s -basic-aa -memcpyopt -S -enable-memcpyopt-memoryssa=0 | FileCheck %s --check-prefixes=CHECK,NO_MSSA
4; RUN: opt < %s -basic-aa -memcpyopt -S -enable-memcpyopt-memoryssa=1 -verify-memoryssa | FileCheck %s --check-prefixes=CHECK,MSSA
5
6target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
7
8target triple = "i686-apple-darwin9"
9
10%0 = type { x86_fp80, x86_fp80 }
11declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
12declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
13declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
14
15declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly
16
17; FIXME: The invariant.start does not modify %P.
18; The intermediate alloca and one of the memcpy's should be eliminated, the
19; other should be transformed to a memmove.
20define void @test1(i8* %P, i8* %Q) nounwind  {
21; NO_MSSA-LABEL: @test1(
22; NO_MSSA-NEXT:    [[MEMTMP:%.*]] = alloca [[TMP0:%.*]], align 16
23; NO_MSSA-NEXT:    [[R:%.*]] = bitcast %0* [[MEMTMP]] to i8*
24; NO_MSSA-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[R]], i8* align 16 [[P:%.*]], i32 32, i1 false)
25; NO_MSSA-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 32, i8* [[P]])
26; NO_MSSA-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[R]], i32 32, i1 false)
27; NO_MSSA-NEXT:    ret void
28;
29; MSSA-LABEL: @test1(
30; MSSA-NEXT:    [[MEMTMP:%.*]] = alloca [[TMP0:%.*]], align 16
31; MSSA-NEXT:    [[R:%.*]] = bitcast %0* [[MEMTMP]] to i8*
32; MSSA-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[R]], i8* align 16 [[P:%.*]], i32 32, i1 false)
33; MSSA-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 32, i8* [[P]])
34; MSSA-NEXT:    call void @llvm.memmove.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[P]], i32 32, i1 false)
35; MSSA-NEXT:    ret void
36;
37  %memtmp = alloca %0, align 16
38  %R = bitcast %0* %memtmp to i8*
39  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %R, i8* align 16 %P, i32 32, i1 false)
40  %i = call {}* @llvm.invariant.start.p0i8(i64 32, i8* %P)
41  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %Q, i8* align 16 %R, i32 32, i1 false)
42  ret void
43}
44
45
46; The invariant.start intrinsic does not inhibit tranforming the memcpy to a
47; memset.
48define void @test2(i8* %dst1, i8* %dst2, i8 %c) {
49; CHECK-LABEL: @test2(
50; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* [[DST1:%.*]], i8 [[C:%.*]], i64 128, i1 false)
51; CHECK-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 32, i8* [[DST1]])
52; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[DST2:%.*]], i8 [[C]], i64 128, i1 false)
53; CHECK-NEXT:    ret void
54;
55  call void @llvm.memset.p0i8.i64(i8* %dst1, i8 %c, i64 128, i1 false)
56  %i = call {}* @llvm.invariant.start.p0i8(i64 32, i8* %dst1)
57  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %dst2, i8* align 8 %dst1, i64 128, i1 false)
58  ret void
59}
60