• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -mattr=harden-sls-retbr,harden-sls-blr -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,HARDEN,ISBDSB,ISBDSBDAGISEL
2; RUN: llc -mattr=harden-sls-retbr,harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,HARDEN,SB,SBDAGISEL
3; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,NOHARDEN
4; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr,harden-sls-blr -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,HARDEN,ISBDSB
5; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr,harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,HARDEN,SB
6
7; Function Attrs: norecurse nounwind readnone
8define dso_local i32 @double_return(i32 %a, i32 %b) local_unnamed_addr {
9entry:
10  %cmp = icmp sgt i32 %a, 0
11  br i1 %cmp, label %if.then, label %if.else
12
13if.then:                                          ; preds = %entry
14  %div = sdiv i32 %a, %b
15  ret i32 %div
16
17if.else:                                          ; preds = %entry
18  %div1 = sdiv i32 %b, %a
19  ret i32 %div1
20; CHECK-LABEL: double_return:
21; CHECK:       {{ret$}}
22; ISBDSB-NEXT: dsb sy
23; ISBDSB-NEXT: isb
24; SB-NEXT:     {{ sb$}}
25; CHECK:       {{ret$}}
26; ISBDSB-NEXT: dsb sy
27; ISBDSB-NEXT: isb
28; SB-NEXT:     {{ sb$}}
29; CHECK-NEXT: .Lfunc_end
30}
31
32@__const.indirect_branch.ptr = private unnamed_addr constant [2 x i8*] [i8* blockaddress(@indirect_branch, %return), i8* blockaddress(@indirect_branch, %l2)], align 8
33
34; Function Attrs: norecurse nounwind readnone
35define dso_local i32 @indirect_branch(i32 %a, i32 %b, i32 %i) {
36; CHECK-LABEL: indirect_branch:
37entry:
38  %idxprom = sext i32 %i to i64
39  %arrayidx = getelementptr inbounds [2 x i8*], [2 x i8*]* @__const.indirect_branch.ptr, i64 0, i64 %idxprom
40  %0 = load i8*, i8** %arrayidx, align 8
41  indirectbr i8* %0, [label %return, label %l2]
42; CHECK:       br x
43; ISBDSB-NEXT: dsb sy
44; ISBDSB-NEXT: isb
45; SB-NEXT:     {{ sb$}}
46
47l2:                                               ; preds = %entry
48  br label %return
49; CHECK:       {{ret$}}
50; ISBDSB-NEXT: dsb sy
51; ISBDSB-NEXT: isb
52; SB-NEXT:     {{ sb$}}
53
54return:                                           ; preds = %entry, %l2
55  %retval.0 = phi i32 [ 1, %l2 ], [ 0, %entry ]
56  ret i32 %retval.0
57; CHECK:       {{ret$}}
58; ISBDSB-NEXT: dsb sy
59; ISBDSB-NEXT: isb
60; SB-NEXT:     {{ sb$}}
61; CHECK-NEXT: .Lfunc_end
62}
63
64; Check that RETAA and RETAB instructions are also protected as expected.
65define dso_local i32 @ret_aa(i32 returned %a) local_unnamed_addr "target-features"="+neon,+v8.3a" "sign-return-address"="all" "sign-return-address-key"="a_key" {
66entry:
67; CHECK-LABEL: ret_aa:
68; CHECK:       {{ retaa$}}
69; ISBDSB-NEXT: dsb sy
70; ISBDSB-NEXT: isb
71; SB-NEXT:     {{ sb$}}
72; CHECK-NEXT: .Lfunc_end
73	  ret i32 %a
74}
75
76define dso_local i32 @ret_ab(i32 returned %a) local_unnamed_addr "target-features"="+neon,+v8.3a" "sign-return-address"="all" "sign-return-address-key"="b_key" {
77entry:
78; CHECK-LABEL: ret_ab:
79; CHECK:       {{ retab$}}
80; ISBDSB-NEXT: dsb sy
81; ISBDSB-NEXT: isb
82; SB-NEXT:     {{ sb$}}
83; CHECK-NEXT: .Lfunc_end
84	  ret i32 %a
85}
86
87define i32 @asmgoto() {
88entry:
89; CHECK-LABEL: asmgoto:
90  callbr void asm sideeffect "B $0", "X"(i8* blockaddress(@asmgoto, %d))
91            to label %asm.fallthrough [label %d]
92     ; The asm goto above produces a direct branch:
93; CHECK:           //APP
94; CHECK-NEXT:      {{^[ \t]+b }}
95; CHECK-NEXT:      //NO_APP
96     ; For direct branches, no mitigation is needed.
97; ISDDSB-NOT: dsb sy
98; SB-NOT:     {{ sb$}}
99
100asm.fallthrough:               ; preds = %entry
101  ret i32 0
102; CHECK:       {{ret$}}
103; ISBDSB-NEXT: dsb sy
104; ISBDSB-NEXT: isb
105; SB-NEXT:     {{ sb$}}
106
107d:                             ; preds = %asm.fallthrough, %entry
108  ret i32 1
109; CHECK:       {{ret$}}
110; ISBDSB-NEXT: dsb sy
111; ISBDSB-NEXT: isb
112; SB-NEXT:     {{ sb$}}
113; CHECK-NEXT: .Lfunc_end
114}
115
116define dso_local i32 @indirect_call(
117i32 (...)* nocapture %f1, i32 (...)* nocapture %f2) {
118entry:
119; CHECK-LABEL: indirect_call:
120  %callee.knr.cast = bitcast i32 (...)* %f1 to i32 ()*
121  %call = tail call i32 %callee.knr.cast()
122; HARDEN: bl {{__llvm_slsblr_thunk_x[0-9]+$}}
123  %callee.knr.cast1 = bitcast i32 (...)* %f2 to i32 ()*
124  %call2 = tail call i32 %callee.knr.cast1()
125; HARDEN: bl {{__llvm_slsblr_thunk_x[0-9]+$}}
126  %add = add nsw i32 %call2, %call
127  ret i32 %add
128; CHECK: .Lfunc_end
129}
130
131; verify calling through a function pointer.
132@a = dso_local local_unnamed_addr global i32 (...)* null, align 8
133@b = dso_local local_unnamed_addr global i32 0, align 4
134define dso_local void @indirect_call_global() local_unnamed_addr {
135; CHECK-LABEL: indirect_call_global:
136entry:
137  %0 = load i32 ()*, i32 ()** bitcast (i32 (...)** @a to i32 ()**), align 8
138  %call = tail call i32 %0()  nounwind
139; HARDEN: bl {{__llvm_slsblr_thunk_x[0-9]+$}}
140  store i32 %call, i32* @b, align 4
141  ret void
142; CHECK: .Lfunc_end
143}
144
145; Verify that neither x16 nor x17 are used when the BLR mitigation is enabled,
146; as a linker is allowed to clobber x16 or x17 on calls, which would break the
147; correct execution of the code sequence produced by the mitigation.
148; The below test carefully increases register pressure to persuade code
149; generation to produce a BLR x16. Yes, that is a bit fragile.
150define i64 @check_x16(i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** nocapture readonly %fp, i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** nocapture readonly %fp2) "target-features"="+neon,+reserve-x10,+reserve-x11,+reserve-x12,+reserve-x13,+reserve-x14,+reserve-x15,+reserve-x18,+reserve-x20,+reserve-x21,+reserve-x22,+reserve-x23,+reserve-x24,+reserve-x25,+reserve-x26,+reserve-x27,+reserve-x28,+reserve-x30,+reserve-x9" {
151entry:
152; CHECK-LABEL: check_x16:
153  %0 = load i64 (i8*, i64, i64, i64, i64, i64, i64, i64)*, i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** %fp, align 8
154  %1 = bitcast i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** %fp2 to i8**
155  %2 = load i8*, i8** %1, align 8
156  %call = call i64 %0(i8* %2, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
157  %3 = load i64 (i8*, i64, i64, i64, i64, i64, i64, i64)*, i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** %fp2, align 8
158  %4 = bitcast i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** %fp to i8**
159  %5 = load i8*, i8** %4, align 8;, !tbaa !2
160  %call1 = call i64 %3(i8* %5, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
161; NOHARDEN:   blr x16
162; ISBDSB-NOT: bl __llvm_slsblr_thunk_x16
163; SB-NOT:     bl __llvm_slsblr_thunk_x16
164; CHECK
165  %add = add nsw i64 %call1, %call
166  ret i64 %add
167; CHECK: .Lfunc_end
168}
169
170; Verify that the transformation works correctly for x29 when it is not
171; reserved to be used as a frame pointer.
172; Since this is sensitive to register allocation choices, only check this with
173; DAGIsel to avoid too much accidental breaking of this test that is a bit
174; brittle.
175define i64 @check_x29(i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** nocapture readonly %fp,
176                      i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** nocapture readonly %fp2,
177                      i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** nocapture readonly %fp3)
178"target-features"="+neon,+reserve-x10,+reserve-x11,+reserve-x12,+reserve-x13,+reserve-x14,+reserve-x15,+reserve-x18,+reserve-x20,+reserve-x21,+reserve-x22,+reserve-x23,+reserve-x24,+reserve-x25,+reserve-x26,+reserve-x27,+reserve-x28,+reserve-x9"
179"frame-pointer"="none"
180{
181entry:
182; CHECK-LABEL: check_x29:
183  %0 = load i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)*, i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp, align 8
184  %1 = bitcast i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp2 to i8**
185  %2 = load i8*, i8** %1, align 8
186  %3 = load i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)*, i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp2, align 8
187  %4 = bitcast i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp3 to i8**
188  %5 = load i8*, i8** %4, align 8
189  %6 = load i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)*, i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp3, align 8
190  %7 = bitcast i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp to i8**
191  %8 = load i8*, i8** %7, align 8
192  %call = call i64 %0(i8* %2, i8* %5, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
193  %call1 = call i64 %3(i8* %2, i8* %5, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
194; NOHARDEN:      blr x29
195; ISBDSBDAGISEL: bl __llvm_slsblr_thunk_x29
196; SBDAGISEL:     bl __llvm_slsblr_thunk_x29
197; CHECK
198  %call2 = call i64 %6(i8* %2, i8* %8, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
199  %add = add nsw i64 %call1, %call
200  %add1 = add nsw i64 %call2, %add
201  ret i64 %add1
202; CHECK: .Lfunc_end
203}
204
205; HARDEN-label: __llvm_slsblr_thunk_x0:
206; HARDEN:    mov x16, x0
207; HARDEN:    br x16
208; ISBDSB-NEXT: dsb sy
209; ISBDSB-NEXT: isb
210; SB-NEXT:     dsb sy
211; SB-NEXT:     isb
212; HARDEN-NEXT: .Lfunc_end
213; HARDEN-label: __llvm_slsblr_thunk_x19:
214; HARDEN:    mov x16, x19
215; HARDEN:    br x16
216; ISBDSB-NEXT: dsb sy
217; ISBDSB-NEXT: isb
218; SB-NEXT:     dsb sy
219; SB-NEXT:     isb
220; HARDEN-NEXT: .Lfunc_end
221