• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt < %s -msan-check-access-address=0 -S -passes='module(msan-module),function(msan)' 2>&1 | FileCheck -allow-deprecated-dag-overlap %s
2; RUN: opt < %s --passes='module(msan-module),function(msan)' -msan-check-access-address=0 -S | FileCheck -allow-deprecated-dag-overlap %s
3; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S -passes='module(msan-module),function(msan)' 2>&1 | \
4; RUN:   FileCheck -allow-deprecated-dag-overlap -check-prefixes=CHECK,CHECK-ORIGINS %s
5; RUN: opt < %s -passes='module(msan-module),function(msan)' -msan-check-access-address=0 -msan-track-origins=1 -S | \
6; RUN:   FileCheck -allow-deprecated-dag-overlap -check-prefixes=CHECK,CHECK-ORIGINS %s
7; RUN: opt < %s -passes='module(msan-module),function(msan)' -msan-instrumentation-with-call-threshold=0 -msan-track-origins=1 -S | \
8; RUN:   FileCheck -allow-deprecated-dag-overlap -check-prefixes=CHECK-CALLS %s
9
10target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
11target triple = "x86_64-unknown-linux-gnu"
12
13; CHECK: @llvm.global_ctors {{.*}} { i32 0, void ()* @msan.module_ctor, i8* null }
14
15; Check the presence and the linkage type of __msan_track_origins and
16; other interface symbols.
17; CHECK-NOT: @__msan_track_origins
18; CHECK-ORIGINS: @__msan_track_origins = weak_odr constant i32 1
19; CHECK-NOT: @__msan_keep_going = weak_odr constant i32 0
20; CHECK: @__msan_retval_tls = external thread_local(initialexec) global [{{.*}}]
21; CHECK: @__msan_retval_origin_tls = external thread_local(initialexec) global i32
22; CHECK: @__msan_param_tls = external thread_local(initialexec) global [{{.*}}]
23; CHECK: @__msan_param_origin_tls = external thread_local(initialexec) global [{{.*}}]
24; CHECK: @__msan_va_arg_tls = external thread_local(initialexec) global [{{.*}}]
25; CHECK: @__msan_va_arg_overflow_size_tls = external thread_local(initialexec) global i64
26
27
28; Check instrumentation of stores
29
30define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
31entry:
32  store i32 %x, i32* %p, align 4
33  ret void
34}
35
36; CHECK-LABEL: @Store
37; CHECK: load {{.*}} @__msan_param_tls
38; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
39; CHECK: store
40; CHECK-ORIGINS: icmp
41; CHECK-ORIGINS: br i1
42; CHECK-ORIGINS: {{^[0-9]+}}:
43; CHECK-ORIGINS: store
44; CHECK-ORIGINS: br label
45; CHECK-ORIGINS: {{^[0-9]+}}:
46; CHECK: store
47; CHECK: ret void
48
49
50; Check instrumentation of aligned stores
51; Shadow store has the same alignment as the original store; origin store
52; does not specify explicit alignment.
53
54define void @AlignedStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
55entry:
56  store i32 %x, i32* %p, align 32
57  ret void
58}
59
60; CHECK-LABEL: @AlignedStore
61; CHECK: load {{.*}} @__msan_param_tls
62; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
63; CHECK: store {{.*}} align 32
64; CHECK-ORIGINS: icmp
65; CHECK-ORIGINS: br i1
66; CHECK-ORIGINS: {{^[0-9]+}}:
67; CHECK-ORIGINS: store {{.*}} align 32
68; CHECK-ORIGINS: br label
69; CHECK-ORIGINS: {{^[0-9]+}}:
70; CHECK: store {{.*}} align 32
71; CHECK: ret void
72
73
74; load followed by cmp: check that we load the shadow and call __msan_warning_with_origin.
75define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory {
76entry:
77  %0 = load i32, i32* %a, align 4
78  %tobool = icmp eq i32 %0, 0
79  br i1 %tobool, label %if.end, label %if.then
80
81if.then:                                          ; preds = %entry
82  tail call void (...) @foo() nounwind
83  br label %if.end
84
85if.end:                                           ; preds = %entry, %if.then
86  ret void
87}
88
89declare void @foo(...)
90
91; CHECK-LABEL: @LoadAndCmp
92; CHECK: %0 = load i32,
93; CHECK: = load
94; CHECK-ORIGINS: %[[ORIGIN:.*]] = load
95; CHECK: call void @__msan_warning_with_origin_noreturn(i32
96; CHECK-ORIGINS-SAME %[[ORIGIN]])
97; CHECK-CONT:
98; CHECK-NEXT: unreachable
99; CHECK: br i1 %tobool
100; CHECK: ret void
101
102; Check that we store the shadow for the retval.
103define i32 @ReturnInt() nounwind uwtable readnone sanitize_memory {
104entry:
105  ret i32 123
106}
107
108; CHECK-LABEL: @ReturnInt
109; CHECK: store i32 0,{{.*}}__msan_retval_tls
110; CHECK: ret i32
111
112; Check that we get the shadow for the retval.
113define void @CopyRetVal(i32* nocapture %a) nounwind uwtable sanitize_memory {
114entry:
115  %call = tail call i32 @ReturnInt() nounwind
116  store i32 %call, i32* %a, align 4
117  ret void
118}
119
120; CHECK-LABEL: @CopyRetVal
121; CHECK: load{{.*}}__msan_retval_tls
122; CHECK: store
123; CHECK: store
124; CHECK: ret void
125
126
127; Check that we generate PHIs for shadow.
128define void @FuncWithPhi(i32* nocapture %a, i32* %b, i32* nocapture %c) nounwind uwtable sanitize_memory {
129entry:
130  %tobool = icmp eq i32* %b, null
131  br i1 %tobool, label %if.else, label %if.then
132
133  if.then:                                          ; preds = %entry
134  %0 = load i32, i32* %b, align 4
135  br label %if.end
136
137  if.else:                                          ; preds = %entry
138  %1 = load i32, i32* %c, align 4
139  br label %if.end
140
141  if.end:                                           ; preds = %if.else, %if.then
142  %t.0 = phi i32 [ %0, %if.then ], [ %1, %if.else ]
143  store i32 %t.0, i32* %a, align 4
144  ret void
145}
146
147; CHECK-LABEL: @FuncWithPhi
148; CHECK: = phi
149; CHECK-NEXT: = phi
150; CHECK: store
151; CHECK: store
152; CHECK: ret void
153
154; Compute shadow for "x << 10"
155define void @ShlConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
156entry:
157  %0 = load i32, i32* %x, align 4
158  %1 = shl i32 %0, 10
159  store i32 %1, i32* %x, align 4
160  ret void
161}
162
163; CHECK-LABEL: @ShlConst
164; CHECK: = load
165; CHECK: = load
166; CHECK: shl
167; CHECK: shl
168; CHECK: store
169; CHECK: store
170; CHECK: ret void
171
172; Compute shadow for "10 << x": it should have 'sext i1'.
173define void @ShlNonConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
174entry:
175  %0 = load i32, i32* %x, align 4
176  %1 = shl i32 10, %0
177  store i32 %1, i32* %x, align 4
178  ret void
179}
180
181; CHECK-LABEL: @ShlNonConst
182; CHECK: = load
183; CHECK: = load
184; CHECK: = sext i1
185; CHECK: store
186; CHECK: store
187; CHECK: ret void
188
189; SExt
190define void @SExt(i32* nocapture %a, i16* nocapture %b) nounwind uwtable sanitize_memory {
191entry:
192  %0 = load i16, i16* %b, align 2
193  %1 = sext i16 %0 to i32
194  store i32 %1, i32* %a, align 4
195  ret void
196}
197
198; CHECK-LABEL: @SExt
199; CHECK: = load
200; CHECK: = load
201; CHECK: = sext
202; CHECK: = sext
203; CHECK: store
204; CHECK: store
205; CHECK: ret void
206
207
208; memset
209define void @MemSet(i8* nocapture %x) nounwind uwtable sanitize_memory {
210entry:
211  call void @llvm.memset.p0i8.i64(i8* %x, i8 42, i64 10, i1 false)
212  ret void
213}
214
215declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
216
217; CHECK-LABEL: @MemSet
218; CHECK: call i8* @__msan_memset
219; CHECK: ret void
220
221
222; memcpy
223define void @MemCpy(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
224entry:
225  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i1 false)
226  ret void
227}
228
229declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
230
231; CHECK-LABEL: @MemCpy
232; CHECK: call i8* @__msan_memcpy
233; CHECK: ret void
234
235
236; memmove is lowered to a call
237define void @MemMove(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
238entry:
239  call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i1 false)
240  ret void
241}
242
243declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
244
245; CHECK-LABEL: @MemMove
246; CHECK: call i8* @__msan_memmove
247; CHECK: ret void
248
249;; ------------
250;; Placeholder tests that will fail once element atomic @llvm.mem[cpy|move|set] instrinsics have
251;; been added to the MemIntrinsic class hierarchy. These will act as a reminder to
252;; verify that MSAN handles these intrinsics properly once they have been
253;; added to that class hierarchy.
254declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
255declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
256declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
257
258define void @atomic_memcpy(i8* nocapture %x, i8* nocapture %y) nounwind {
259  ; CHECK-LABEL: atomic_memcpy
260  ; CHECK-NEXT: call void @llvm.donothing
261  ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
262  ; CHECK-NEXT: ret void
263  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
264  ret void
265}
266
267define void @atomic_memmove(i8* nocapture %x, i8* nocapture %y) nounwind {
268  ; CHECK-LABEL: atomic_memmove
269  ; CHECK-NEXT: call void @llvm.donothing
270  ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
271  ; CHECK-NEXT: ret void
272  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
273  ret void
274}
275
276define void @atomic_memset(i8* nocapture %x) nounwind {
277  ; CHECK-LABEL: atomic_memset
278  ; CHECK-NEXT: call void @llvm.donothing
279  ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1)
280  ; CHECK-NEXT: ret void
281  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1)
282  ret void
283}
284
285;; ------------
286
287
288; Check that we propagate shadow for "select"
289
290define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_memory {
291entry:
292  %cond = select i1 %c, i32 %a, i32 %b
293  ret i32 %cond
294}
295
296; CHECK-LABEL: @Select
297; CHECK: select i1
298; CHECK-DAG: or i32
299; CHECK-DAG: xor i32
300; CHECK: or i32
301; CHECK-DAG: select i1
302; CHECK-ORIGINS-DAG: select
303; CHECK-ORIGINS-DAG: select
304; CHECK-DAG: select i1
305; CHECK: store i32{{.*}}@__msan_retval_tls
306; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
307; CHECK: ret i32
308
309
310; Check that we propagate origin for "select" with vector condition.
311; Select condition is flattened to i1, which is then used to select one of the
312; argument origins.
313
314define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone sanitize_memory {
315entry:
316  %cond = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %b
317  ret <8 x i16> %cond
318}
319
320; CHECK-LABEL: @SelectVector
321; CHECK: select <8 x i1>
322; CHECK-DAG: or <8 x i16>
323; CHECK-DAG: xor <8 x i16>
324; CHECK: or <8 x i16>
325; CHECK-DAG: select <8 x i1>
326; CHECK-ORIGINS-DAG: select
327; CHECK-ORIGINS-DAG: select
328; CHECK-DAG: select <8 x i1>
329; CHECK: store <8 x i16>{{.*}}@__msan_retval_tls
330; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
331; CHECK: ret <8 x i16>
332
333
334; Check that we propagate origin for "select" with scalar condition and vector
335; arguments. Select condition shadow is sign-extended to the vector type and
336; mixed into the result shadow.
337
338define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwtable readnone sanitize_memory {
339entry:
340  %cond = select i1 %c, <8 x i16> %a, <8 x i16> %b
341  ret <8 x i16> %cond
342}
343
344; CHECK-LABEL: @SelectVector2
345; CHECK: select i1
346; CHECK-DAG: or <8 x i16>
347; CHECK-DAG: xor <8 x i16>
348; CHECK: or <8 x i16>
349; CHECK-DAG: select i1
350; CHECK-ORIGINS-DAG: select i1
351; CHECK-ORIGINS-DAG: select i1
352; CHECK-DAG: select i1
353; CHECK: ret <8 x i16>
354
355
356define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } %b) readnone sanitize_memory {
357entry:
358  %c = select i1 %x, { i64, i64 } %a, { i64, i64 } %b
359  ret { i64, i64 } %c
360}
361
362; CHECK-LABEL: @SelectStruct
363; CHECK: select i1 {{.*}}, { i64, i64 }
364; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
365; CHECK-ORIGINS: select i1
366; CHECK-ORIGINS: select i1
367; CHECK-NEXT: select i1 {{.*}}, { i64, i64 }
368; CHECK: ret { i64, i64 }
369
370
371define { i64*, double } @SelectStruct2(i1 zeroext %x, { i64*, double } %a, { i64*, double } %b) readnone sanitize_memory {
372entry:
373  %c = select i1 %x, { i64*, double } %a, { i64*, double } %b
374  ret { i64*, double } %c
375}
376
377; CHECK-LABEL: @SelectStruct2
378; CHECK: select i1 {{.*}}, { i64, i64 }
379; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
380; CHECK-ORIGINS: select i1
381; CHECK-ORIGINS: select i1
382; CHECK-NEXT: select i1 {{.*}}, { i64*, double }
383; CHECK: ret { i64*, double }
384
385
386define i8* @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
387entry:
388  %0 = inttoptr i64 %x to i8*
389  ret i8* %0
390}
391
392; CHECK-LABEL: @IntToPtr
393; CHECK: load i64, i64*{{.*}}__msan_param_tls
394; CHECK-ORIGINS-NEXT: load i32, i32*{{.*}}__msan_param_origin_tls
395; CHECK-NEXT: call void @llvm.donothing
396; CHECK-NEXT: inttoptr
397; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
398; CHECK: ret i8*
399
400
401define i8* @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone sanitize_memory {
402entry:
403  %0 = inttoptr i16 %x to i8*
404  ret i8* %0
405}
406
407; CHECK-LABEL: @IntToPtr_ZExt
408; CHECK: load i16, i16*{{.*}}__msan_param_tls
409; CHECK: zext
410; CHECK-NEXT: inttoptr
411; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
412; CHECK: ret i8*
413
414
415; Check that we insert exactly one check on udiv
416; (2nd arg shadow is checked, 1st arg shadow is propagated)
417
418define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory {
419entry:
420  %div = udiv i32 %a, %b
421  ret i32 %div
422}
423
424; CHECK-LABEL: @Div
425; CHECK: icmp
426; CHECK: call void @__msan_warning_with_origin
427; CHECK-NOT: icmp
428; CHECK: udiv
429; CHECK-NOT: icmp
430; CHECK: ret i32
431
432; Check that fdiv, unlike udiv, simply propagates shadow.
433
434define float @FDiv(float %a, float %b) nounwind uwtable readnone sanitize_memory {
435entry:
436  %c = fdiv float %a, %b
437  ret float %c
438}
439
440; CHECK-LABEL: @FDiv
441; CHECK: %[[SA:.*]] = load i32,{{.*}}@__msan_param_tls
442; CHECK: %[[SB:.*]] = load i32,{{.*}}@__msan_param_tls
443; CHECK: %[[SC:.*]] = or i32 %[[SA]], %[[SB]]
444; CHECK: = fdiv float
445; CHECK: store i32 %[[SC]], i32* {{.*}}@__msan_retval_tls
446; CHECK: ret float
447
448; Check that fneg simply propagates shadow.
449
450define float @FNeg(float %a) nounwind uwtable readnone sanitize_memory {
451entry:
452  %c = fneg float %a
453  ret float %c
454}
455
456; CHECK-LABEL: @FNeg
457; CHECK: %[[SA:.*]] = load i32,{{.*}}@__msan_param_tls
458; CHECK-ORIGINS: %[[SB:.*]] = load i32,{{.*}}@__msan_param_origin_tls
459; CHECK: = fneg float
460; CHECK: store i32 %[[SA]], i32* {{.*}}@__msan_retval_tls
461; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
462; CHECK: ret float
463
464; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
465
466define zeroext i1 @ICmpSLTZero(i32 %x) nounwind uwtable readnone sanitize_memory {
467  %1 = icmp slt i32 %x, 0
468  ret i1 %1
469}
470
471; CHECK-LABEL: @ICmpSLTZero
472; CHECK: icmp slt
473; CHECK-NOT: call void @__msan_warning_with_origin
474; CHECK: icmp slt
475; CHECK-NOT: call void @__msan_warning_with_origin
476; CHECK: ret i1
477
478define zeroext i1 @ICmpSGEZero(i32 %x) nounwind uwtable readnone sanitize_memory {
479  %1 = icmp sge i32 %x, 0
480  ret i1 %1
481}
482
483; CHECK-LABEL: @ICmpSGEZero
484; CHECK: icmp slt
485; CHECK-NOT: call void @__msan_warning_with_origin
486; CHECK: icmp sge
487; CHECK-NOT: call void @__msan_warning_with_origin
488; CHECK: ret i1
489
490define zeroext i1 @ICmpSGTZero(i32 %x) nounwind uwtable readnone sanitize_memory {
491  %1 = icmp sgt i32 0, %x
492  ret i1 %1
493}
494
495; CHECK-LABEL: @ICmpSGTZero
496; CHECK: icmp slt
497; CHECK-NOT: call void @__msan_warning_with_origin
498; CHECK: icmp sgt
499; CHECK-NOT: call void @__msan_warning_with_origin
500; CHECK: ret i1
501
502define zeroext i1 @ICmpSLEZero(i32 %x) nounwind uwtable readnone sanitize_memory {
503  %1 = icmp sle i32 0, %x
504  ret i1 %1
505}
506
507; CHECK-LABEL: @ICmpSLEZero
508; CHECK: icmp slt
509; CHECK-NOT: call void @__msan_warning_with_origin
510; CHECK: icmp sle
511; CHECK-NOT: call void @__msan_warning_with_origin
512; CHECK: ret i1
513
514
515; Check that we propagate shadow for x<=-1, x>-1, etc (i.e. sign bit tests)
516
517define zeroext i1 @ICmpSLTAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
518  %1 = icmp slt i32 -1, %x
519  ret i1 %1
520}
521
522; CHECK-LABEL: @ICmpSLTAllOnes
523; CHECK: icmp slt
524; CHECK-NOT: call void @__msan_warning_with_origin
525; CHECK: icmp slt
526; CHECK-NOT: call void @__msan_warning_with_origin
527; CHECK: ret i1
528
529define zeroext i1 @ICmpSGEAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
530  %1 = icmp sge i32 -1, %x
531  ret i1 %1
532}
533
534; CHECK-LABEL: @ICmpSGEAllOnes
535; CHECK: icmp slt
536; CHECK-NOT: call void @__msan_warning_with_origin
537; CHECK: icmp sge
538; CHECK-NOT: call void @__msan_warning_with_origin
539; CHECK: ret i1
540
541define zeroext i1 @ICmpSGTAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
542  %1 = icmp sgt i32 %x, -1
543  ret i1 %1
544}
545
546; CHECK-LABEL: @ICmpSGTAllOnes
547; CHECK: icmp slt
548; CHECK-NOT: call void @__msan_warning_with_origin
549; CHECK: icmp sgt
550; CHECK-NOT: call void @__msan_warning_with_origin
551; CHECK: ret i1
552
553define zeroext i1 @ICmpSLEAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
554  %1 = icmp sle i32 %x, -1
555  ret i1 %1
556}
557
558; CHECK-LABEL: @ICmpSLEAllOnes
559; CHECK: icmp slt
560; CHECK-NOT: call void @__msan_warning_with_origin
561; CHECK: icmp sle
562; CHECK-NOT: call void @__msan_warning_with_origin
563; CHECK: ret i1
564
565
566; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
567; of the vector arguments.
568
569define <2 x i1> @ICmpSLT_vector_Zero(<2 x i32*> %x) nounwind uwtable readnone sanitize_memory {
570  %1 = icmp slt <2 x i32*> %x, zeroinitializer
571  ret <2 x i1> %1
572}
573
574; CHECK-LABEL: @ICmpSLT_vector_Zero
575; CHECK: icmp slt <2 x i64>
576; CHECK-NOT: call void @__msan_warning_with_origin
577; CHECK: icmp slt <2 x i32*>
578; CHECK-NOT: call void @__msan_warning_with_origin
579; CHECK: ret <2 x i1>
580
581; Check that we propagate shadow for x<=-1, x>0, etc (i.e. sign bit tests)
582; of the vector arguments.
583
584define <2 x i1> @ICmpSLT_vector_AllOnes(<2 x i32> %x) nounwind uwtable readnone sanitize_memory {
585  %1 = icmp slt <2 x i32> <i32 -1, i32 -1>, %x
586  ret <2 x i1> %1
587}
588
589; CHECK-LABEL: @ICmpSLT_vector_AllOnes
590; CHECK: icmp slt <2 x i32>
591; CHECK-NOT: call void @__msan_warning_with_origin
592; CHECK: icmp slt <2 x i32>
593; CHECK-NOT: call void @__msan_warning_with_origin
594; CHECK: ret <2 x i1>
595
596
597; Check that we propagate shadow for unsigned relational comparisons with
598; constants
599
600define zeroext i1 @ICmpUGTConst(i32 %x) nounwind uwtable readnone sanitize_memory {
601entry:
602  %cmp = icmp ugt i32 %x, 7
603  ret i1 %cmp
604}
605
606; CHECK-LABEL: @ICmpUGTConst
607; CHECK: icmp ugt i32
608; CHECK-NOT: call void @__msan_warning_with_origin
609; CHECK: icmp ugt i32
610; CHECK-NOT: call void @__msan_warning_with_origin
611; CHECK: icmp ugt i32
612; CHECK-NOT: call void @__msan_warning_with_origin
613; CHECK: ret i1
614
615
616; Check that loads of shadow have the same alignment as the original loads.
617; Check that loads of origin have the alignment of max(4, original alignment).
618
619define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
620  %y = alloca i32, align 64
621  %1 = load volatile i32, i32* %y, align 64
622  ret i32 %1
623}
624
625; CHECK-LABEL: @ShadowLoadAlignmentLarge
626; CHECK: load volatile i32, i32* {{.*}} align 64
627; CHECK: load i32, i32* {{.*}} align 64
628; CHECK: ret i32
629
630define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory {
631  %y = alloca i32, align 2
632  %1 = load volatile i32, i32* %y, align 2
633  ret i32 %1
634}
635
636; CHECK-LABEL: @ShadowLoadAlignmentSmall
637; CHECK: load volatile i32, i32* {{.*}} align 2
638; CHECK: load i32, i32* {{.*}} align 2
639; CHECK-ORIGINS: load i32, i32* {{.*}} align 4
640; CHECK: ret i32
641
642
643; Test vector manipulation instructions.
644; Check that the same bit manipulation is applied to the shadow values.
645; Check that there is a zero test of the shadow of %idx argument, where present.
646
647define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
648  %x = extractelement <4 x i32> %vec, i32 %idx
649  ret i32 %x
650}
651
652; CHECK-LABEL: @ExtractElement
653; CHECK: extractelement
654; CHECK: call void @__msan_warning_with_origin
655; CHECK: extractelement
656; CHECK: ret i32
657
658define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory {
659  %vec1 = insertelement <4 x i32> %vec, i32 %x, i32 %idx
660  ret <4 x i32> %vec1
661}
662
663; CHECK-LABEL: @InsertElement
664; CHECK: insertelement
665; CHECK: call void @__msan_warning_with_origin
666; CHECK: insertelement
667; CHECK: ret <4 x i32>
668
669define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory {
670  %vec2 = shufflevector <4 x i32> %vec, <4 x i32> %vec1,
671                        <4 x i32> <i32 0, i32 4, i32 1, i32 5>
672  ret <4 x i32> %vec2
673}
674
675; CHECK-LABEL: @ShuffleVector
676; CHECK: shufflevector
677; CHECK-NOT: call void @__msan_warning_with_origin
678; CHECK: shufflevector
679; CHECK: ret <4 x i32>
680
681
682; Test bswap intrinsic instrumentation
683define i32 @BSwap(i32 %x) nounwind uwtable readnone sanitize_memory {
684  %y = tail call i32 @llvm.bswap.i32(i32 %x)
685  ret i32 %y
686}
687
688declare i32 @llvm.bswap.i32(i32) nounwind readnone
689
690; CHECK-LABEL: @BSwap
691; CHECK-NOT: call void @__msan_warning_with_origin
692; CHECK: @llvm.bswap.i32
693; CHECK-NOT: call void @__msan_warning_with_origin
694; CHECK: @llvm.bswap.i32
695; CHECK-NOT: call void @__msan_warning_with_origin
696; CHECK: ret i32
697
698; Test handling of vectors of pointers.
699; Check that shadow of such vector is a vector of integers.
700
701define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable sanitize_memory {
702  %x = load <8 x i8*>, <8 x i8*>* %p
703  ret <8 x i8*> %x
704}
705
706; CHECK-LABEL: @VectorOfPointers
707; CHECK: load <8 x i8*>, <8 x i8*>*
708; CHECK: load <8 x i64>, <8 x i64>*
709; CHECK: store <8 x i64> {{.*}} @__msan_retval_tls
710; CHECK: ret <8 x i8*>
711
712; Test handling of va_copy.
713
714declare void @llvm.va_copy(i8*, i8*) nounwind
715
716define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable sanitize_memory {
717  call void @llvm.va_copy(i8* %p1, i8* %p2) nounwind
718  ret void
719}
720
721; CHECK-LABEL: @VACopy
722; CHECK: call void @llvm.memset.p0i8.i64({{.*}}, i8 0, i64 24, i1 false)
723; CHECK: ret void
724
725
726; Test that va_start instrumentation does not use va_arg_tls*.
727; It should work with a local stack copy instead.
728
729%struct.__va_list_tag = type { i32, i32, i8*, i8* }
730declare void @llvm.va_start(i8*) nounwind
731
732; Function Attrs: nounwind uwtable
733define void @VAStart(i32 %x, ...) sanitize_memory {
734entry:
735  %x.addr = alloca i32, align 4
736  %va = alloca [1 x %struct.__va_list_tag], align 16
737  store i32 %x, i32* %x.addr, align 4
738  %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i32 0, i32 0
739  %arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8*
740  call void @llvm.va_start(i8* %arraydecay1)
741  ret void
742}
743
744; CHECK-LABEL: @VAStart
745; CHECK: call void @llvm.va_start
746; CHECK-NOT: @__msan_va_arg_tls
747; CHECK-NOT: @__msan_va_arg_overflow_size_tls
748; CHECK: ret void
749
750
751; Test handling of volatile stores.
752; Check that MemorySanitizer does not add a check of the value being stored.
753
754define void @VolatileStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
755entry:
756  store volatile i32 %x, i32* %p, align 4
757  ret void
758}
759
760; CHECK-LABEL: @VolatileStore
761; CHECK-NOT: @__msan_warning_with_origin
762; CHECK: ret void
763
764
765; Test that checks are omitted and returned value is always initialized if
766; sanitize_memory attribute is missing.
767
768define i32 @NoSanitizeMemory(i32 %x) uwtable {
769entry:
770  %tobool = icmp eq i32 %x, 0
771  br i1 %tobool, label %if.end, label %if.then
772
773if.then:                                          ; preds = %entry
774  tail call void @bar()
775  br label %if.end
776
777if.end:                                           ; preds = %entry, %if.then
778  ret i32 %x
779}
780
781declare void @bar()
782
783; CHECK-LABEL: @NoSanitizeMemory
784; CHECK-NOT: @__msan_warning_with_origin
785; CHECK: store i32 0, {{.*}} @__msan_retval_tls
786; CHECK-NOT: @__msan_warning_with_origin
787; CHECK: ret i32
788
789
790; Test that stack allocations are unpoisoned in functions missing
791; sanitize_memory attribute
792
793define i32 @NoSanitizeMemoryAlloca() {
794entry:
795  %p = alloca i32, align 4
796  %x = call i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
797  ret i32 %x
798}
799
800declare i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
801
802; CHECK-LABEL: @NoSanitizeMemoryAlloca
803; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 0, i64 4, i1 false)
804; CHECK: call i32 @NoSanitizeMemoryAllocaHelper(i32*
805; CHECK: ret i32
806
807
808; Test that undef is unpoisoned in functions missing
809; sanitize_memory attribute
810
811define i32 @NoSanitizeMemoryUndef() {
812entry:
813  %x = call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
814  ret i32 %x
815}
816
817declare i32 @NoSanitizeMemoryUndefHelper(i32 %x)
818
819; CHECK-LABEL: @NoSanitizeMemoryUndef
820; CHECK: store i32 0, i32* {{.*}} @__msan_param_tls
821; CHECK: call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
822; CHECK: ret i32
823
824
825; Test PHINode instrumentation in blacklisted functions
826
827define i32 @NoSanitizeMemoryPHI(i32 %x) {
828entry:
829  %tobool = icmp ne i32 %x, 0
830  br i1 %tobool, label %cond.true, label %cond.false
831
832cond.true:                                        ; preds = %entry
833  br label %cond.end
834
835cond.false:                                       ; preds = %entry
836  br label %cond.end
837
838cond.end:                                         ; preds = %cond.false, %cond.true
839  %cond = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
840  ret i32 %cond
841}
842
843; CHECK: [[A:%.*]] = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
844; CHECK: store i32 0, i32* bitcast {{.*}} @__msan_retval_tls
845; CHECK: ret i32 [[A]]
846
847
848; Test that there are no __msan_param_origin_tls stores when
849; argument shadow is a compile-time zero constant (which is always the case
850; in functions missing sanitize_memory attribute).
851
852define i32 @NoSanitizeMemoryParamTLS(i32* nocapture readonly %x) {
853entry:
854  %0 = load i32, i32* %x, align 4
855  %call = tail call i32 @NoSanitizeMemoryParamTLSHelper(i32 %0)
856  ret i32 %call
857}
858
859declare i32 @NoSanitizeMemoryParamTLSHelper(i32 %x)
860
861; CHECK-LABEL: define i32 @NoSanitizeMemoryParamTLS(
862; CHECK-NOT: __msan_param_origin_tls
863; CHECK: ret i32
864
865
866; Test argument shadow alignment
867
868define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory {
869entry:
870  ret <2 x i64> %b
871}
872
873; CHECK-LABEL: @ArgumentShadowAlignment
874; CHECK: load <2 x i64>, <2 x i64>* {{.*}} @__msan_param_tls {{.*}}, align 8
875; CHECK: store <2 x i64> {{.*}} @__msan_retval_tls {{.*}}, align 8
876; CHECK: ret <2 x i64>
877
878
879; Test origin propagation for insertvalue
880
881define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory {
882entry:
883  %a = insertvalue { i64, i32 } undef, i64 %x, 0
884  %b = insertvalue { i64, i32 } %a, i32 %y, 1
885  ret { i64, i32 } %b
886}
887
888; CHECK-ORIGINS: @make_pair_64_32
889; First element shadow
890; CHECK-ORIGINS: insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 {{.*}}, 0
891; First element origin
892; CHECK-ORIGINS: icmp ne i64
893; CHECK-ORIGINS: select i1
894; First element app value
895; CHECK-ORIGINS: insertvalue { i64, i32 } undef, i64 {{.*}}, 0
896; Second element shadow
897; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1
898; Second element origin
899; CHECK-ORIGINS: icmp ne i32
900; CHECK-ORIGINS: select i1
901; Second element app value
902; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1
903; CHECK-ORIGINS: ret { i64, i32 }
904
905
906; Test shadow propagation for aggregates passed through ellipsis.
907
908%struct.StructByVal = type { i32, i32, i32, i32 }
909
910declare void @VAArgStructFn(i32 %guard, ...)
911
912define void @VAArgStruct(%struct.StructByVal* nocapture %s) sanitize_memory {
913entry:
914  %agg.tmp2 = alloca %struct.StructByVal, align 8
915  %0 = bitcast %struct.StructByVal* %s to i8*
916  %agg.tmp.sroa.0.0..sroa_cast = bitcast %struct.StructByVal* %s to i64*
917  %agg.tmp.sroa.0.0.copyload = load i64, i64* %agg.tmp.sroa.0.0..sroa_cast, align 4
918  %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, %struct.StructByVal* %s, i64 0, i32 2
919  %agg.tmp.sroa.2.0..sroa_cast = bitcast i32* %agg.tmp.sroa.2.0..sroa_idx to i64*
920  %agg.tmp.sroa.2.0.copyload = load i64, i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
921  %1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
922  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %0, i64 16, i1 false)
923  call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval(%struct.StructByVal) align 8 %agg.tmp2)
924  ret void
925}
926
927; "undef" and the first 2 structs go to general purpose registers;
928; the third struct goes to the overflow area byval
929
930; CHECK-LABEL: @VAArgStruct
931; undef not stored to __msan_va_arg_tls - it's a fixed argument
932; first struct through general purpose registers
933; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 8){{.*}}, align 8
934; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 16){{.*}}, align 8
935; second struct through general purpose registers
936; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 24){{.*}}, align 8
937; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 32){{.*}}, align 8
938; third struct through the overflow area byval
939; CHECK: ptrtoint %struct.StructByVal* {{.*}} to i64
940; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64{{.*}}@__msan_va_arg_tls {{.*}}, i64 176
941; CHECK: store i64 16, i64* @__msan_va_arg_overflow_size_tls
942; CHECK: call void (i32, ...) @VAArgStructFn
943; CHECK: ret void
944
945; Same code compiled without SSE (see attributes below).
946; The register save area is only 48 bytes instead of 176.
947define void @VAArgStructNoSSE(%struct.StructByVal* nocapture %s) sanitize_memory #0 {
948entry:
949  %agg.tmp2 = alloca %struct.StructByVal, align 8
950  %0 = bitcast %struct.StructByVal* %s to i8*
951  %agg.tmp.sroa.0.0..sroa_cast = bitcast %struct.StructByVal* %s to i64*
952  %agg.tmp.sroa.0.0.copyload = load i64, i64* %agg.tmp.sroa.0.0..sroa_cast, align 4
953  %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, %struct.StructByVal* %s, i64 0, i32 2
954  %agg.tmp.sroa.2.0..sroa_cast = bitcast i32* %agg.tmp.sroa.2.0..sroa_idx to i64*
955  %agg.tmp.sroa.2.0.copyload = load i64, i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
956  %1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
957  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %0, i64 16, i1 false)
958  call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval(%struct.StructByVal) align 8 %agg.tmp2)
959  ret void
960}
961
962attributes #0 = { "target-features"="+fxsr,+x87,-sse" }
963
964; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64{{.*}}@__msan_va_arg_tls {{.*}}, i64 48
965
966declare i32 @InnerTailCall(i32 %a)
967
968define void @MismatchedReturnTypeTailCall(i32 %a) sanitize_memory {
969  %b = tail call i32 @InnerTailCall(i32 %a)
970  ret void
971}
972
973; We used to strip off the 'tail' modifier, but now that we unpoison return slot
974; shadow before the call, we don't need to anymore.
975
976; CHECK-LABEL: define void @MismatchedReturnTypeTailCall
977; CHECK: tail call i32 @InnerTailCall
978; CHECK: ret void
979
980
981declare i32 @MustTailCall(i32 %a)
982
983define i32 @CallMustTailCall(i32 %a) sanitize_memory {
984  %b = musttail call i32 @MustTailCall(i32 %a)
985  ret i32 %b
986}
987
988; For "musttail" calls we can not insert any shadow manipulating code between
989; call and the return instruction. And we don't need to, because everything is
990; taken care of in the callee.
991
992; CHECK-LABEL: define i32 @CallMustTailCall
993; CHECK: musttail call i32 @MustTailCall
994; No instrumentation between call and ret.
995; CHECK-NEXT: ret i32
996
997declare i32* @MismatchingMustTailCall(i32 %a)
998
999define i8* @MismatchingCallMustTailCall(i32 %a) sanitize_memory {
1000  %b = musttail call i32* @MismatchingMustTailCall(i32 %a)
1001  %c = bitcast i32* %b to i8*
1002  ret i8* %c
1003}
1004
1005; For "musttail" calls we can not insert any shadow manipulating code between
1006; call and the return instruction. And we don't need to, because everything is
1007; taken care of in the callee.
1008
1009; CHECK-LABEL: define i8* @MismatchingCallMustTailCall
1010; CHECK: musttail call i32* @MismatchingMustTailCall
1011; No instrumentation between call and ret.
1012; CHECK-NEXT: bitcast i32* {{.*}} to i8*
1013; CHECK-NEXT: ret i8*
1014
1015
1016; CHECK-LABEL: define internal void @msan.module_ctor() {
1017; CHECK: call void @__msan_init()
1018
1019; CHECK-CALLS: declare void @__msan_maybe_warning_1(i8 zeroext, i32 zeroext)
1020; CHECK-CALLS: declare void @__msan_maybe_store_origin_1(i8 zeroext, i8*, i32 zeroext)
1021; CHECK-CALLS: declare void @__msan_maybe_warning_2(i16 zeroext, i32 zeroext)
1022; CHECK-CALLS: declare void @__msan_maybe_store_origin_2(i16 zeroext, i8*, i32 zeroext)
1023; CHECK-CALLS: declare void @__msan_maybe_warning_4(i32 zeroext, i32 zeroext)
1024; CHECK-CALLS: declare void @__msan_maybe_store_origin_4(i32 zeroext, i8*, i32 zeroext)
1025; CHECK-CALLS: declare void @__msan_maybe_warning_8(i64 zeroext, i32 zeroext)
1026; CHECK-CALLS: declare void @__msan_maybe_store_origin_8(i64 zeroext, i8*, i32 zeroext)
1027