• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 | FileCheck %s --check-prefix PTX
2; RUN: opt < %s -S -nvptx-lower-aggr-copies | FileCheck %s --check-prefix IR
3
4; Verify that the NVPTXLowerAggrCopies pass works as expected - calls to
5; llvm.mem* intrinsics get lowered to loops.
6
7target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
8target triple = "nvptx64-unknown-unknown"
9
10declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
11declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
12declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1
13
14define i8* @memcpy_caller(i8* %dst, i8* %src, i64 %n) #0 {
15entry:
16  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i32 1, i1 false)
17  ret i8* %dst
18
19; IR-LABEL:   @memcpy_caller
20; IR:         loadstoreloop:
21; IR:         [[LOADPTR:%[0-9]+]] = getelementptr inbounds i8, i8* %src, i64
22; IR-NEXT:    [[VAL:%[0-9]+]] = load i8, i8* [[LOADPTR]]
23; IR-NEXT:    [[STOREPTR:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64
24; IR-NEXT:    store i8 [[VAL]], i8* [[STOREPTR]]
25
26; PTX-LABEL:  .visible .func (.param .b64 func_retval0) memcpy_caller
27; PTX:        LBB[[LABEL:[_0-9]+]]:
28; PTX:        ld.u8 %rs[[REG:[0-9]+]]
29; PTX:        st.u8 [%rd{{[0-9]+}}], %rs[[REG]]
30; PTX:        add.s64 %rd[[COUNTER:[0-9]+]], %rd[[COUNTER]], 1
31; PTX-NEXT:   setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
32; PTX-NEXT:   @%p[[PRED]] bra LBB[[LABEL]]
33}
34
35define i8* @memcpy_volatile_caller(i8* %dst, i8* %src, i64 %n) #0 {
36entry:
37  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i32 1, i1 true)
38  ret i8* %dst
39
40; IR-LABEL:   @memcpy_volatile_caller
41; IR:         load volatile
42; IR:         store volatile
43
44; PTX-LABEL:  .visible .func (.param .b64 func_retval0) memcpy_volatile_caller
45; PTX:        LBB[[LABEL:[_0-9]+]]:
46; PTX:        ld.volatile.u8 %rs[[REG:[0-9]+]]
47; PTX:        st.volatile.u8 [%rd{{[0-9]+}}], %rs[[REG]]
48; PTX:        add.s64 %rd[[COUNTER:[0-9]+]], %rd[[COUNTER]], 1
49; PTX-NEXT:   setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
50; PTX-NEXT:   @%p[[PRED]] bra LBB[[LABEL]]
51}
52
53define i8* @memcpy_casting_caller(i32* %dst, i32* %src, i64 %n) #0 {
54entry:
55  %0 = bitcast i32* %dst to i8*
56  %1 = bitcast i32* %src to i8*
57  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 %n, i32 1, i1 false)
58  ret i8* %0
59
60; Check that casts in calls to memcpy are handled properly
61; IR-LABEL:   @memcpy_casting_caller
62; IR:         [[DSTCAST:%[0-9]+]] = bitcast i32* %dst to i8*
63; IR:         [[SRCCAST:%[0-9]+]] = bitcast i32* %src to i8*
64; IR:         getelementptr inbounds i8, i8* [[SRCCAST]]
65; IR:         getelementptr inbounds i8, i8* [[DSTCAST]]
66}
67
68define i8* @memset_caller(i8* %dst, i32 %c, i64 %n) #0 {
69entry:
70  %0 = trunc i32 %c to i8
71  tail call void @llvm.memset.p0i8.i64(i8* %dst, i8 %0, i64 %n, i32 1, i1 false)
72  ret i8* %dst
73
74; IR-LABEL:   @memset_caller
75; IR:         [[VAL:%[0-9]+]] = trunc i32 %c to i8
76; IR:         loadstoreloop:
77; IR:         [[STOREPTR:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64
78; IR-NEXT:    store i8 [[VAL]], i8* [[STOREPTR]]
79
80; PTX-LABEL:  .visible .func (.param .b64 func_retval0) memset_caller(
81; PTX:        ld.param.u8 %rs[[REG:[0-9]+]]
82; PTX:        LBB[[LABEL:[_0-9]+]]:
83; PTX:        st.u8 [%rd{{[0-9]+}}], %rs[[REG]]
84; PTX:        add.s64 %rd[[COUNTER:[0-9]+]], %rd[[COUNTER]], 1
85; PTX-NEXT:   setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
86; PTX-NEXT:   @%p[[PRED]] bra LBB[[LABEL]]
87}
88
89define i8* @memmove_caller(i8* %dst, i8* %src, i64 %n) #0 {
90entry:
91  tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i32 1, i1 false)
92  ret i8* %dst
93
94; IR-LABEL:   @memmove_caller
95; IR:         icmp ult i8* %src, %dst
96; IR:         [[PHIVAL:%[0-9a-zA-Z_]+]] = phi i64
97; IR-NEXT:    %index_ptr = sub i64 [[PHIVAL]], 1
98; IR:         [[FWDPHIVAL:%[0-9a-zA-Z_]+]] = phi i64
99; IR:         {{%[0-9a-zA-Z_]+}} = add i64 [[FWDPHIVAL]], 1
100
101; PTX-LABEL:  .visible .func (.param .b64 func_retval0) memmove_caller(
102; PTX:        ld.param.u64 %rd[[N:[0-9]+]]
103; PTX:        setp.eq.s64 %p[[NEQ0:[0-9]+]], %rd[[N]], 0
104; PTX:        setp.ge.u64 %p[[SRC_GT_THAN_DST:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}}
105; PTX-NEXT:   @%p[[SRC_GT_THAN_DST]] bra LBB[[FORWARD_BB:[0-9_]+]]
106; -- this is the backwards copying BB
107; PTX:        @%p[[NEQ0]] bra LBB[[EXIT:[0-9_]+]]
108; PTX:        add.s64 %rd[[N]], %rd[[N]], -1
109; PTX:        ld.u8 %rs[[ELEMENT:[0-9]+]]
110; PTX:        st.u8 [%rd{{[0-9]+}}], %rs[[ELEMENT]]
111; -- this is the forwards copying BB
112; PTX:        LBB[[FORWARD_BB]]:
113; PTX:        @%p[[NEQ0]] bra LBB[[EXIT]]
114; PTX:        ld.u8 %rs[[ELEMENT2:[0-9]+]]
115; PTX:        st.u8 [%rd{{[0-9]+}}], %rs[[ELEMENT2]]
116; PTX:        add.s64 %rd[[INDEX:[0-9]+]], %rd[[INDEX]], 1
117; -- exit block
118; PTX:        LBB[[EXIT]]:
119; PTX-NEXT:   st.param.b64 [func_retval0
120; PTX-NEXT:   ret
121}
122