• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
2; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
3
4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5target triple = "x86_64-unknown-linux-gnu"
6
7; CHECK: @llvm.global_ctors {{.*}} { i32 0, void ()* @msan.module_ctor, i8* null }
8
9; Check the presence and the linkage type of __msan_track_origins and
10; other interface symbols.
11; CHECK-NOT: @__msan_track_origins
12; CHECK-ORIGINS: @__msan_track_origins = weak_odr constant i32 1
13; CHECK-NOT: @__msan_keep_going = weak_odr constant i32 0
14; CHECK: @__msan_retval_tls = external thread_local(initialexec) global [{{.*}}]
15; CHECK: @__msan_retval_origin_tls = external thread_local(initialexec) global i32
16; CHECK: @__msan_param_tls = external thread_local(initialexec) global [{{.*}}]
17; CHECK: @__msan_param_origin_tls = external thread_local(initialexec) global [{{.*}}]
18; CHECK: @__msan_va_arg_tls = external thread_local(initialexec) global [{{.*}}]
19; CHECK: @__msan_va_arg_overflow_size_tls = external thread_local(initialexec) global i64
20; CHECK: @__msan_origin_tls = external thread_local(initialexec) global i32
21
22
23; Check instrumentation of stores
24
25define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
26entry:
27  store i32 %x, i32* %p, align 4
28  ret void
29}
30
31; CHECK-LABEL: @Store
32; CHECK: load {{.*}} @__msan_param_tls
33; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
34; CHECK: store
35; CHECK-ORIGINS: icmp
36; CHECK-ORIGINS: br i1
37; CHECK-ORIGINS: <label>
38; CHECK-ORIGINS: store
39; CHECK-ORIGINS: br label
40; CHECK-ORIGINS: <label>
41; CHECK: store
42; CHECK: ret void
43
44
45; Check instrumentation of aligned stores
46; Shadow store has the same alignment as the original store; origin store
47; does not specify explicit alignment.
48
49define void @AlignedStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
50entry:
51  store i32 %x, i32* %p, align 32
52  ret void
53}
54
55; CHECK-LABEL: @AlignedStore
56; CHECK: load {{.*}} @__msan_param_tls
57; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
58; CHECK: store {{.*}} align 32
59; CHECK-ORIGINS: icmp
60; CHECK-ORIGINS: br i1
61; CHECK-ORIGINS: <label>
62; CHECK-ORIGINS: store {{.*}} align 32
63; CHECK-ORIGINS: br label
64; CHECK-ORIGINS: <label>
65; CHECK: store {{.*}} align 32
66; CHECK: ret void
67
68
69; load followed by cmp: check that we load the shadow and call __msan_warning.
70define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory {
71entry:
72  %0 = load i32, i32* %a, align 4
73  %tobool = icmp eq i32 %0, 0
74  br i1 %tobool, label %if.end, label %if.then
75
76if.then:                                          ; preds = %entry
77  tail call void (...) @foo() nounwind
78  br label %if.end
79
80if.end:                                           ; preds = %entry, %if.then
81  ret void
82}
83
84declare void @foo(...)
85
86; CHECK-LABEL: @LoadAndCmp
87; CHECK: = load
88; CHECK: = load
89; CHECK: call void @__msan_warning_noreturn()
90; CHECK-NEXT: call void asm sideeffect
91; CHECK-NEXT: unreachable
92; CHECK: ret void
93
94; Check that we store the shadow for the retval.
95define i32 @ReturnInt() nounwind uwtable readnone sanitize_memory {
96entry:
97  ret i32 123
98}
99
100; CHECK-LABEL: @ReturnInt
101; CHECK: store i32 0,{{.*}}__msan_retval_tls
102; CHECK: ret i32
103
104; Check that we get the shadow for the retval.
105define void @CopyRetVal(i32* nocapture %a) nounwind uwtable sanitize_memory {
106entry:
107  %call = tail call i32 @ReturnInt() nounwind
108  store i32 %call, i32* %a, align 4
109  ret void
110}
111
112; CHECK-LABEL: @CopyRetVal
113; CHECK: load{{.*}}__msan_retval_tls
114; CHECK: store
115; CHECK: store
116; CHECK: ret void
117
118
119; Check that we generate PHIs for shadow.
120define void @FuncWithPhi(i32* nocapture %a, i32* %b, i32* nocapture %c) nounwind uwtable sanitize_memory {
121entry:
122  %tobool = icmp eq i32* %b, null
123  br i1 %tobool, label %if.else, label %if.then
124
125  if.then:                                          ; preds = %entry
126  %0 = load i32, i32* %b, align 4
127  br label %if.end
128
129  if.else:                                          ; preds = %entry
130  %1 = load i32, i32* %c, align 4
131  br label %if.end
132
133  if.end:                                           ; preds = %if.else, %if.then
134  %t.0 = phi i32 [ %0, %if.then ], [ %1, %if.else ]
135  store i32 %t.0, i32* %a, align 4
136  ret void
137}
138
139; CHECK-LABEL: @FuncWithPhi
140; CHECK: = phi
141; CHECK-NEXT: = phi
142; CHECK: store
143; CHECK: store
144; CHECK: ret void
145
146; Compute shadow for "x << 10"
147define void @ShlConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
148entry:
149  %0 = load i32, i32* %x, align 4
150  %1 = shl i32 %0, 10
151  store i32 %1, i32* %x, align 4
152  ret void
153}
154
155; CHECK-LABEL: @ShlConst
156; CHECK: = load
157; CHECK: = load
158; CHECK: shl
159; CHECK: shl
160; CHECK: store
161; CHECK: store
162; CHECK: ret void
163
164; Compute shadow for "10 << x": it should have 'sext i1'.
165define void @ShlNonConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
166entry:
167  %0 = load i32, i32* %x, align 4
168  %1 = shl i32 10, %0
169  store i32 %1, i32* %x, align 4
170  ret void
171}
172
173; CHECK-LABEL: @ShlNonConst
174; CHECK: = load
175; CHECK: = load
176; CHECK: = sext i1
177; CHECK: store
178; CHECK: store
179; CHECK: ret void
180
181; SExt
182define void @SExt(i32* nocapture %a, i16* nocapture %b) nounwind uwtable sanitize_memory {
183entry:
184  %0 = load i16, i16* %b, align 2
185  %1 = sext i16 %0 to i32
186  store i32 %1, i32* %a, align 4
187  ret void
188}
189
190; CHECK-LABEL: @SExt
191; CHECK: = load
192; CHECK: = load
193; CHECK: = sext
194; CHECK: = sext
195; CHECK: store
196; CHECK: store
197; CHECK: ret void
198
199
200; memset
201define void @MemSet(i8* nocapture %x) nounwind uwtable sanitize_memory {
202entry:
203  call void @llvm.memset.p0i8.i64(i8* %x, i8 42, i64 10, i32 1, i1 false)
204  ret void
205}
206
207declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
208
209; CHECK-LABEL: @MemSet
210; CHECK: call i8* @__msan_memset
211; CHECK: ret void
212
213
214; memcpy
215define void @MemCpy(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
216entry:
217  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false)
218  ret void
219}
220
221declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
222
223; CHECK-LABEL: @MemCpy
224; CHECK: call i8* @__msan_memcpy
225; CHECK: ret void
226
227
228; memmove is lowered to a call
229define void @MemMove(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
230entry:
231  call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false)
232  ret void
233}
234
235declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
236
237; CHECK-LABEL: @MemMove
238; CHECK: call i8* @__msan_memmove
239; CHECK: ret void
240
241
242; Check that we propagate shadow for "select"
243
244define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_memory {
245entry:
246  %cond = select i1 %c, i32 %a, i32 %b
247  ret i32 %cond
248}
249
250; CHECK-LABEL: @Select
251; CHECK: select i1
252; CHECK-DAG: or i32
253; CHECK-DAG: xor i32
254; CHECK: or i32
255; CHECK-DAG: select i1
256; CHECK-ORIGINS-DAG: select
257; CHECK-ORIGINS-DAG: select
258; CHECK-DAG: select i1
259; CHECK: store i32{{.*}}@__msan_retval_tls
260; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
261; CHECK: ret i32
262
263
264; Check that we propagate origin for "select" with vector condition.
265; Select condition is flattened to i1, which is then used to select one of the
266; argument origins.
267
268define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone sanitize_memory {
269entry:
270  %cond = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %b
271  ret <8 x i16> %cond
272}
273
274; CHECK-LABEL: @SelectVector
275; CHECK: select <8 x i1>
276; CHECK-DAG: or <8 x i16>
277; CHECK-DAG: xor <8 x i16>
278; CHECK: or <8 x i16>
279; CHECK-DAG: select <8 x i1>
280; CHECK-ORIGINS-DAG: select
281; CHECK-ORIGINS-DAG: select
282; CHECK-DAG: select <8 x i1>
283; CHECK: store <8 x i16>{{.*}}@__msan_retval_tls
284; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
285; CHECK: ret <8 x i16>
286
287
288; Check that we propagate origin for "select" with scalar condition and vector
289; arguments. Select condition shadow is sign-extended to the vector type and
290; mixed into the result shadow.
291
292define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwtable readnone sanitize_memory {
293entry:
294  %cond = select i1 %c, <8 x i16> %a, <8 x i16> %b
295  ret <8 x i16> %cond
296}
297
298; CHECK-LABEL: @SelectVector2
299; CHECK: select i1
300; CHECK-DAG: or <8 x i16>
301; CHECK-DAG: xor <8 x i16>
302; CHECK: or <8 x i16>
303; CHECK-DAG: select i1
304; CHECK-ORIGINS-DAG: select i1
305; CHECK-ORIGINS-DAG: select i1
306; CHECK-DAG: select i1
307; CHECK: ret <8 x i16>
308
309
310define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } %b) readnone sanitize_memory {
311entry:
312  %c = select i1 %x, { i64, i64 } %a, { i64, i64 } %b
313  ret { i64, i64 } %c
314}
315
316; CHECK-LABEL: @SelectStruct
317; CHECK: select i1 {{.*}}, { i64, i64 }
318; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
319; CHECK-ORIGINS: select i1
320; CHECK-ORIGINS: select i1
321; CHECK-NEXT: select i1 {{.*}}, { i64, i64 }
322; CHECK: ret { i64, i64 }
323
324
325define { i64*, double } @SelectStruct2(i1 zeroext %x, { i64*, double } %a, { i64*, double } %b) readnone sanitize_memory {
326entry:
327  %c = select i1 %x, { i64*, double } %a, { i64*, double } %b
328  ret { i64*, double } %c
329}
330
331; CHECK-LABEL: @SelectStruct2
332; CHECK: select i1 {{.*}}, { i64, i64 }
333; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
334; CHECK-ORIGINS: select i1
335; CHECK-ORIGINS: select i1
336; CHECK-NEXT: select i1 {{.*}}, { i64*, double }
337; CHECK: ret { i64*, double }
338
339
340define i8* @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
341entry:
342  %0 = inttoptr i64 %x to i8*
343  ret i8* %0
344}
345
346; CHECK-LABEL: @IntToPtr
347; CHECK: load i64, i64*{{.*}}__msan_param_tls
348; CHECK-ORIGINS-NEXT: load i32, i32*{{.*}}__msan_param_origin_tls
349; CHECK-NEXT: inttoptr
350; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
351; CHECK: ret i8*
352
353
354define i8* @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone sanitize_memory {
355entry:
356  %0 = inttoptr i16 %x to i8*
357  ret i8* %0
358}
359
360; CHECK-LABEL: @IntToPtr_ZExt
361; CHECK: load i16, i16*{{.*}}__msan_param_tls
362; CHECK: zext
363; CHECK-NEXT: inttoptr
364; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
365; CHECK: ret i8*
366
367
368; Check that we insert exactly one check on udiv
369; (2nd arg shadow is checked, 1st arg shadow is propagated)
370
371define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory {
372entry:
373  %div = udiv i32 %a, %b
374  ret i32 %div
375}
376
377; CHECK-LABEL: @Div
378; CHECK: icmp
379; CHECK: call void @__msan_warning
380; CHECK-NOT: icmp
381; CHECK: udiv
382; CHECK-NOT: icmp
383; CHECK: ret i32
384
385
386; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
387
388define zeroext i1 @ICmpSLTZero(i32 %x) nounwind uwtable readnone sanitize_memory {
389  %1 = icmp slt i32 %x, 0
390  ret i1 %1
391}
392
393; CHECK-LABEL: @ICmpSLTZero
394; CHECK: icmp slt
395; CHECK-NOT: call void @__msan_warning
396; CHECK: icmp slt
397; CHECK-NOT: call void @__msan_warning
398; CHECK: ret i1
399
400define zeroext i1 @ICmpSGEZero(i32 %x) nounwind uwtable readnone sanitize_memory {
401  %1 = icmp sge i32 %x, 0
402  ret i1 %1
403}
404
405; CHECK-LABEL: @ICmpSGEZero
406; CHECK: icmp slt
407; CHECK-NOT: call void @__msan_warning
408; CHECK: icmp sge
409; CHECK-NOT: call void @__msan_warning
410; CHECK: ret i1
411
412define zeroext i1 @ICmpSGTZero(i32 %x) nounwind uwtable readnone sanitize_memory {
413  %1 = icmp sgt i32 0, %x
414  ret i1 %1
415}
416
417; CHECK-LABEL: @ICmpSGTZero
418; CHECK: icmp slt
419; CHECK-NOT: call void @__msan_warning
420; CHECK: icmp sgt
421; CHECK-NOT: call void @__msan_warning
422; CHECK: ret i1
423
424define zeroext i1 @ICmpSLEZero(i32 %x) nounwind uwtable readnone sanitize_memory {
425  %1 = icmp sle i32 0, %x
426  ret i1 %1
427}
428
429; CHECK-LABEL: @ICmpSLEZero
430; CHECK: icmp slt
431; CHECK-NOT: call void @__msan_warning
432; CHECK: icmp sle
433; CHECK-NOT: call void @__msan_warning
434; CHECK: ret i1
435
436
437; Check that we propagate shadow for x<=-1, x>-1, etc (i.e. sign bit tests)
438
439define zeroext i1 @ICmpSLTAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
440  %1 = icmp slt i32 -1, %x
441  ret i1 %1
442}
443
444; CHECK-LABEL: @ICmpSLTAllOnes
445; CHECK: icmp slt
446; CHECK-NOT: call void @__msan_warning
447; CHECK: icmp slt
448; CHECK-NOT: call void @__msan_warning
449; CHECK: ret i1
450
451define zeroext i1 @ICmpSGEAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
452  %1 = icmp sge i32 -1, %x
453  ret i1 %1
454}
455
456; CHECK-LABEL: @ICmpSGEAllOnes
457; CHECK: icmp slt
458; CHECK-NOT: call void @__msan_warning
459; CHECK: icmp sge
460; CHECK-NOT: call void @__msan_warning
461; CHECK: ret i1
462
463define zeroext i1 @ICmpSGTAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
464  %1 = icmp sgt i32 %x, -1
465  ret i1 %1
466}
467
468; CHECK-LABEL: @ICmpSGTAllOnes
469; CHECK: icmp slt
470; CHECK-NOT: call void @__msan_warning
471; CHECK: icmp sgt
472; CHECK-NOT: call void @__msan_warning
473; CHECK: ret i1
474
475define zeroext i1 @ICmpSLEAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
476  %1 = icmp sle i32 %x, -1
477  ret i1 %1
478}
479
480; CHECK-LABEL: @ICmpSLEAllOnes
481; CHECK: icmp slt
482; CHECK-NOT: call void @__msan_warning
483; CHECK: icmp sle
484; CHECK-NOT: call void @__msan_warning
485; CHECK: ret i1
486
487
488; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
489; of the vector arguments.
490
491define <2 x i1> @ICmpSLT_vector_Zero(<2 x i32*> %x) nounwind uwtable readnone sanitize_memory {
492  %1 = icmp slt <2 x i32*> %x, zeroinitializer
493  ret <2 x i1> %1
494}
495
496; CHECK-LABEL: @ICmpSLT_vector_Zero
497; CHECK: icmp slt <2 x i64>
498; CHECK-NOT: call void @__msan_warning
499; CHECK: icmp slt <2 x i32*>
500; CHECK-NOT: call void @__msan_warning
501; CHECK: ret <2 x i1>
502
503; Check that we propagate shadow for x<=-1, x>0, etc (i.e. sign bit tests)
504; of the vector arguments.
505
506define <2 x i1> @ICmpSLT_vector_AllOnes(<2 x i32> %x) nounwind uwtable readnone sanitize_memory {
507  %1 = icmp slt <2 x i32> <i32 -1, i32 -1>, %x
508  ret <2 x i1> %1
509}
510
511; CHECK-LABEL: @ICmpSLT_vector_AllOnes
512; CHECK: icmp slt <2 x i32>
513; CHECK-NOT: call void @__msan_warning
514; CHECK: icmp slt <2 x i32>
515; CHECK-NOT: call void @__msan_warning
516; CHECK: ret <2 x i1>
517
518
519; Check that we propagate shadow for unsigned relational comparisons with
520; constants
521
522define zeroext i1 @ICmpUGTConst(i32 %x) nounwind uwtable readnone sanitize_memory {
523entry:
524  %cmp = icmp ugt i32 %x, 7
525  ret i1 %cmp
526}
527
528; CHECK-LABEL: @ICmpUGTConst
529; CHECK: icmp ugt i32
530; CHECK-NOT: call void @__msan_warning
531; CHECK: icmp ugt i32
532; CHECK-NOT: call void @__msan_warning
533; CHECK: icmp ugt i32
534; CHECK-NOT: call void @__msan_warning
535; CHECK: ret i1
536
537
538; Check that loads of shadow have the same aligment as the original loads.
539; Check that loads of origin have the aligment of max(4, original alignment).
540
541define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
542  %y = alloca i32, align 64
543  %1 = load volatile i32, i32* %y, align 64
544  ret i32 %1
545}
546
547; CHECK-LABEL: @ShadowLoadAlignmentLarge
548; CHECK: load volatile i32, i32* {{.*}} align 64
549; CHECK: load i32, i32* {{.*}} align 64
550; CHECK: ret i32
551
552define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory {
553  %y = alloca i32, align 2
554  %1 = load volatile i32, i32* %y, align 2
555  ret i32 %1
556}
557
558; CHECK-LABEL: @ShadowLoadAlignmentSmall
559; CHECK: load volatile i32, i32* {{.*}} align 2
560; CHECK: load i32, i32* {{.*}} align 2
561; CHECK-ORIGINS: load i32, i32* {{.*}} align 4
562; CHECK: ret i32
563
564
565; Test vector manipulation instructions.
566; Check that the same bit manipulation is applied to the shadow values.
567; Check that there is a zero test of the shadow of %idx argument, where present.
568
569define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
570  %x = extractelement <4 x i32> %vec, i32 %idx
571  ret i32 %x
572}
573
574; CHECK-LABEL: @ExtractElement
575; CHECK: extractelement
576; CHECK: call void @__msan_warning
577; CHECK: extractelement
578; CHECK: ret i32
579
580define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory {
581  %vec1 = insertelement <4 x i32> %vec, i32 %x, i32 %idx
582  ret <4 x i32> %vec1
583}
584
585; CHECK-LABEL: @InsertElement
586; CHECK: insertelement
587; CHECK: call void @__msan_warning
588; CHECK: insertelement
589; CHECK: ret <4 x i32>
590
591define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory {
592  %vec2 = shufflevector <4 x i32> %vec, <4 x i32> %vec1,
593                        <4 x i32> <i32 0, i32 4, i32 1, i32 5>
594  ret <4 x i32> %vec2
595}
596
597; CHECK-LABEL: @ShuffleVector
598; CHECK: shufflevector
599; CHECK-NOT: call void @__msan_warning
600; CHECK: shufflevector
601; CHECK: ret <4 x i32>
602
603
604; Test bswap intrinsic instrumentation
605define i32 @BSwap(i32 %x) nounwind uwtable readnone sanitize_memory {
606  %y = tail call i32 @llvm.bswap.i32(i32 %x)
607  ret i32 %y
608}
609
610declare i32 @llvm.bswap.i32(i32) nounwind readnone
611
612; CHECK-LABEL: @BSwap
613; CHECK-NOT: call void @__msan_warning
614; CHECK: @llvm.bswap.i32
615; CHECK-NOT: call void @__msan_warning
616; CHECK: @llvm.bswap.i32
617; CHECK-NOT: call void @__msan_warning
618; CHECK: ret i32
619
620
621; Store intrinsic.
622
623define void @StoreIntrinsic(i8* %p, <4 x float> %x) nounwind uwtable sanitize_memory {
624  call void @llvm.x86.sse.storeu.ps(i8* %p, <4 x float> %x)
625  ret void
626}
627
628declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
629
630; CHECK-LABEL: @StoreIntrinsic
631; CHECK-NOT: br
632; CHECK-NOT: = or
633; CHECK: store <4 x i32> {{.*}} align 1
634; CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
635; CHECK: ret void
636
637
638; Load intrinsic.
639
640define <16 x i8> @LoadIntrinsic(i8* %p) nounwind uwtable sanitize_memory {
641  %call = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p)
642  ret <16 x i8> %call
643}
644
645declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) nounwind
646
647; CHECK-LABEL: @LoadIntrinsic
648; CHECK: load <16 x i8>, <16 x i8>* {{.*}} align 1
649; CHECK-ORIGINS: [[ORIGIN:%[01-9a-z]+]] = load i32, i32* {{.*}}
650; CHECK-NOT: br
651; CHECK-NOT: = or
652; CHECK: call <16 x i8> @llvm.x86.sse3.ldu.dq
653; CHECK: store <16 x i8> {{.*}} @__msan_retval_tls
654; CHECK-ORIGINS: store i32 {{.*}}[[ORIGIN]], i32* @__msan_retval_origin_tls
655; CHECK: ret <16 x i8>
656
657
658; Simple NoMem intrinsic
659; Check that shadow is OR'ed, and origin is Select'ed
660; And no shadow checks!
661
662define <8 x i16> @Paddsw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable sanitize_memory {
663  %call = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
664  ret <8 x i16> %call
665}
666
667declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b) nounwind
668
669; CHECK-LABEL: @Paddsw128
670; CHECK-NEXT: load <8 x i16>, <8 x i16>* {{.*}} @__msan_param_tls
671; CHECK-ORIGINS: load i32, i32* {{.*}} @__msan_param_origin_tls
672; CHECK-NEXT: load <8 x i16>, <8 x i16>* {{.*}} @__msan_param_tls
673; CHECK-ORIGINS: load i32, i32* {{.*}} @__msan_param_origin_tls
674; CHECK-NEXT: = or <8 x i16>
675; CHECK-ORIGINS: = bitcast <8 x i16> {{.*}} to i128
676; CHECK-ORIGINS-NEXT: = icmp ne i128 {{.*}}, 0
677; CHECK-ORIGINS-NEXT: = select i1 {{.*}}, i32 {{.*}}, i32
678; CHECK-NEXT: call <8 x i16> @llvm.x86.sse2.padds.w
679; CHECK-NEXT: store <8 x i16> {{.*}} @__msan_retval_tls
680; CHECK-ORIGINS: store i32 {{.*}} @__msan_retval_origin_tls
681; CHECK-NEXT: ret <8 x i16>
682
683
684; Test handling of vectors of pointers.
685; Check that shadow of such vector is a vector of integers.
686
687define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable sanitize_memory {
688  %x = load <8 x i8*>, <8 x i8*>* %p
689  ret <8 x i8*> %x
690}
691
692; CHECK-LABEL: @VectorOfPointers
693; CHECK: load <8 x i8*>, <8 x i8*>*
694; CHECK: load <8 x i64>, <8 x i64>*
695; CHECK: store <8 x i64> {{.*}} @__msan_retval_tls
696; CHECK: ret <8 x i8*>
697
698; Test handling of va_copy.
699
700declare void @llvm.va_copy(i8*, i8*) nounwind
701
702define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable sanitize_memory {
703  call void @llvm.va_copy(i8* %p1, i8* %p2) nounwind
704  ret void
705}
706
707; CHECK-LABEL: @VACopy
708; CHECK: call void @llvm.memset.p0i8.i64({{.*}}, i8 0, i64 24, i32 8, i1 false)
709; CHECK: ret void
710
711
712; Test that va_start instrumentation does not use va_arg_tls*.
713; It should work with a local stack copy instead.
714
715%struct.__va_list_tag = type { i32, i32, i8*, i8* }
716declare void @llvm.va_start(i8*) nounwind
717
718; Function Attrs: nounwind uwtable
719define void @VAStart(i32 %x, ...) sanitize_memory {
720entry:
721  %x.addr = alloca i32, align 4
722  %va = alloca [1 x %struct.__va_list_tag], align 16
723  store i32 %x, i32* %x.addr, align 4
724  %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i32 0, i32 0
725  %arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8*
726  call void @llvm.va_start(i8* %arraydecay1)
727  ret void
728}
729
730; CHECK-LABEL: @VAStart
731; CHECK: call void @llvm.va_start
732; CHECK-NOT: @__msan_va_arg_tls
733; CHECK-NOT: @__msan_va_arg_overflow_size_tls
734; CHECK: ret void
735
736
737; Test handling of volatile stores.
738; Check that MemorySanitizer does not add a check of the value being stored.
739
740define void @VolatileStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
741entry:
742  store volatile i32 %x, i32* %p, align 4
743  ret void
744}
745
746; CHECK-LABEL: @VolatileStore
747; CHECK-NOT: @__msan_warning
748; CHECK: ret void
749
750
751; Test that checks are omitted and returned value is always initialized if
752; sanitize_memory attribute is missing.
753
754define i32 @NoSanitizeMemory(i32 %x) uwtable {
755entry:
756  %tobool = icmp eq i32 %x, 0
757  br i1 %tobool, label %if.end, label %if.then
758
759if.then:                                          ; preds = %entry
760  tail call void @bar()
761  br label %if.end
762
763if.end:                                           ; preds = %entry, %if.then
764  ret i32 %x
765}
766
767declare void @bar()
768
769; CHECK-LABEL: @NoSanitizeMemory
770; CHECK-NOT: @__msan_warning
771; CHECK: store i32 0, {{.*}} @__msan_retval_tls
772; CHECK-NOT: @__msan_warning
773; CHECK: ret i32
774
775
776; Test that stack allocations are unpoisoned in functions missing
777; sanitize_memory attribute
778
779define i32 @NoSanitizeMemoryAlloca() {
780entry:
781  %p = alloca i32, align 4
782  %x = call i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
783  ret i32 %x
784}
785
786declare i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
787
788; CHECK-LABEL: @NoSanitizeMemoryAlloca
789; CHECK: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 0, i64 4, i32 4, i1 false)
790; CHECK: call i32 @NoSanitizeMemoryAllocaHelper(i32*
791; CHECK: ret i32
792
793
794; Test that undef is unpoisoned in functions missing
795; sanitize_memory attribute
796
797define i32 @NoSanitizeMemoryUndef() {
798entry:
799  %x = call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
800  ret i32 %x
801}
802
803declare i32 @NoSanitizeMemoryUndefHelper(i32 %x)
804
805; CHECK-LABEL: @NoSanitizeMemoryUndef
806; CHECK: store i32 0, i32* {{.*}} @__msan_param_tls
807; CHECK: call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
808; CHECK: ret i32
809
810
811; Test PHINode instrumentation in blacklisted functions
812
813define i32 @NoSanitizeMemoryPHI(i32 %x) {
814entry:
815  %tobool = icmp ne i32 %x, 0
816  br i1 %tobool, label %cond.true, label %cond.false
817
818cond.true:                                        ; preds = %entry
819  br label %cond.end
820
821cond.false:                                       ; preds = %entry
822  br label %cond.end
823
824cond.end:                                         ; preds = %cond.false, %cond.true
825  %cond = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
826  ret i32 %cond
827}
828
829; CHECK: [[A:%.*]] = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
830; CHECK: store i32 0, i32* bitcast {{.*}} @__msan_retval_tls
831; CHECK: ret i32 [[A]]
832
833
834; Test that there are no __msan_param_origin_tls stores when
835; argument shadow is a compile-time zero constant (which is always the case
836; in functions missing sanitize_memory attribute).
837
838define i32 @NoSanitizeMemoryParamTLS(i32* nocapture readonly %x) {
839entry:
840  %0 = load i32, i32* %x, align 4
841  %call = tail call i32 @NoSanitizeMemoryParamTLSHelper(i32 %0)
842  ret i32 %call
843}
844
845declare i32 @NoSanitizeMemoryParamTLSHelper(i32 %x)
846
847; CHECK-LABEL: define i32 @NoSanitizeMemoryParamTLS(
848; CHECK-NOT: __msan_param_origin_tls
849; CHECK: ret i32
850
851
852; Test argument shadow alignment
853
854define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory {
855entry:
856  ret <2 x i64> %b
857}
858
859; CHECK-LABEL: @ArgumentShadowAlignment
860; CHECK: load <2 x i64>, <2 x i64>* {{.*}} @__msan_param_tls {{.*}}, align 8
861; CHECK: store <2 x i64> {{.*}} @__msan_retval_tls {{.*}}, align 8
862; CHECK: ret <2 x i64>
863
864
865; Test origin propagation for insertvalue
866
867define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory {
868entry:
869  %a = insertvalue { i64, i32 } undef, i64 %x, 0
870  %b = insertvalue { i64, i32 } %a, i32 %y, 1
871  ret { i64, i32 } %b
872}
873
874; CHECK-ORIGINS: @make_pair_64_32
875; First element shadow
876; CHECK-ORIGINS: insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 {{.*}}, 0
877; First element origin
878; CHECK-ORIGINS: icmp ne i64
879; CHECK-ORIGINS: select i1
880; First element app value
881; CHECK-ORIGINS: insertvalue { i64, i32 } undef, i64 {{.*}}, 0
882; Second element shadow
883; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1
884; Second element origin
885; CHECK-ORIGINS: icmp ne i32
886; CHECK-ORIGINS: select i1
887; Second element app value
888; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1
889; CHECK-ORIGINS: ret { i64, i32 }
890
891
892; Test shadow propagation for aggregates passed through ellipsis.
893
894%struct.StructByVal = type { i32, i32, i32, i32 }
895
896declare void @VAArgStructFn(i32 %guard, ...)
897
898define void @VAArgStruct(%struct.StructByVal* nocapture %s) sanitize_memory {
899entry:
900  %agg.tmp2 = alloca %struct.StructByVal, align 8
901  %0 = bitcast %struct.StructByVal* %s to i8*
902  %agg.tmp.sroa.0.0..sroa_cast = bitcast %struct.StructByVal* %s to i64*
903  %agg.tmp.sroa.0.0.copyload = load i64, i64* %agg.tmp.sroa.0.0..sroa_cast, align 4
904  %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, %struct.StructByVal* %s, i64 0, i32 2
905  %agg.tmp.sroa.2.0..sroa_cast = bitcast i32* %agg.tmp.sroa.2.0..sroa_idx to i64*
906  %agg.tmp.sroa.2.0.copyload = load i64, i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
907  %1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
908  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %0, i64 16, i32 4, i1 false)
909  call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval align 8 %agg.tmp2)
910  ret void
911}
912
913; "undef" and the first 2 structs go to general purpose registers;
914; the third struct goes to the overflow area byval
915
916; CHECK-LABEL: @VAArgStruct
917; undef not stored to __msan_va_arg_tls - it's a fixed argument
918; first struct through general purpose registers
919; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 8){{.*}}, align 8
920; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 16){{.*}}, align 8
921; second struct through general purpose registers
922; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 24){{.*}}, align 8
923; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 32){{.*}}, align 8
924; third struct through the overflow area byval
925; CHECK: ptrtoint %struct.StructByVal* {{.*}} to i64
926; CHECK: bitcast { i32, i32, i32, i32 }* {{.*}}@__msan_va_arg_tls {{.*}}, i64 176
927; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
928; CHECK: store i64 16, i64* @__msan_va_arg_overflow_size_tls
929; CHECK: call void (i32, ...) @VAArgStructFn
930; CHECK: ret void
931
932
933declare i32 @InnerTailCall(i32 %a)
934
935define void @MismatchedReturnTypeTailCall(i32 %a) sanitize_memory {
936  %b = tail call i32 @InnerTailCall(i32 %a)
937  ret void
938}
939
940; We used to strip off the 'tail' modifier, but now that we unpoison return slot
941; shadow before the call, we don't need to anymore.
942
943; CHECK-LABEL: define void @MismatchedReturnTypeTailCall
944; CHECK: tail call i32 @InnerTailCall
945; CHECK: ret void
946
947
948declare i32 @MustTailCall(i32 %a)
949
950define i32 @CallMustTailCall(i32 %a) sanitize_memory {
951  %b = musttail call i32 @MustTailCall(i32 %a)
952  ret i32 %b
953}
954
955; For "musttail" calls we can not insert any shadow manipulating code between
956; call and the return instruction. And we don't need to, because everything is
957; taken care of in the callee.
958
959; CHECK-LABEL: define i32 @CallMustTailCall
960; CHECK: musttail call i32 @MustTailCall
961; No instrumentation between call and ret.
962; CHECK-NEXT: ret i32
963
964declare i32* @MismatchingMustTailCall(i32 %a)
965
966define i8* @MismatchingCallMustTailCall(i32 %a) sanitize_memory {
967  %b = musttail call i32* @MismatchingMustTailCall(i32 %a)
968  %c = bitcast i32* %b to i8*
969  ret i8* %c
970}
971
972; For "musttail" calls we can not insert any shadow manipulating code between
973; call and the return instruction. And we don't need to, because everything is
974; taken care of in the callee.
975
976; CHECK-LABEL: define i8* @MismatchingCallMustTailCall
977; CHECK: musttail call i32* @MismatchingMustTailCall
978; No instrumentation between call and ret.
979; CHECK-NEXT: bitcast i32* {{.*}} to i8*
980; CHECK-NEXT: ret i8*
981
982
983; CHECK-LABEL: define internal void @msan.module_ctor() {
984; CHECK: call void @__msan_init()
985