• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; This test checks that we are not instrumenting unwanted acesses to globals:
2; - Instruction profiler counter instrumentation has known intended races.
3; - The gcov counters array has a known intended race.
4;
5; RUN: opt < %s -tsan -S -enable-new-pm=0 | FileCheck %s
6; RUN: opt < %s -passes='function(tsan),module(tsan-module)' -S | FileCheck %s
7
8target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
9target triple = "x86_64-apple-macosx10.9"
10
11@__profc_test_gep = private global [1 x i64] zeroinitializer, section "__DATA,__llvm_prf_cnts", align 8
12@__profc_test_bitcast = private global [2 x i64] zeroinitializer, section "__DATA,__llvm_prf_cnts", align 8
13@__profc_test_bitcast_foo = private global [1 x i64] zeroinitializer, section "__DATA,__llvm_prf_cnts", align 8
14
15@__llvm_gcov_ctr = internal global [1 x i64] zeroinitializer
16@__llvm_gcov_ctr.1 = internal global [1 x i64] zeroinitializer
17@__llvm_gcov_global_state_pred = internal global i32 0
18@__llvm_gcda_foo = internal global i32 0
19
20define i32 @test_gep() sanitize_thread {
21entry:
22  %pgocount = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_test_gep, i64 0, i64 0)
23  %0 = add i64 %pgocount, 1
24  store i64 %0, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_test_gep, i64 0, i64 0)
25
26  %gcovcount = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0)
27  %1 = add i64 %gcovcount, 1
28  store i64 %1, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0)
29
30  %gcovcount.1 = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr.1, i64 0, i64 0)
31  %2 = add i64 %gcovcount.1, 1
32  store i64 %2, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr.1, i64 0, i64 0)
33
34  ret i32 1
35}
36
37define i32 @test_bitcast() sanitize_thread {
38entry:
39  %0 = load <2 x i64>, <2 x i64>* bitcast ([2 x i64]* @__profc_test_bitcast to <2 x i64>*), align 8
40  %.promoted5 = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_test_bitcast_foo, i64 0, i64 0), align 8
41  %1 = add i64 %.promoted5, 10
42  %2 = add <2 x i64> %0, <i64 1, i64 10>
43  store <2 x i64> %2, <2 x i64>* bitcast ([2 x i64]* @__profc_test_bitcast to <2 x i64>*), align 8
44  store i64 %1, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_test_bitcast_foo, i64 0, i64 0), align 8
45  ret i32 undef
46}
47
48define void @test_load() sanitize_thread {
49entry:
50  %0 = load i32, i32* @__llvm_gcov_global_state_pred
51  store i32 1, i32* @__llvm_gcov_global_state_pred
52
53  %1 = load i32, i32* @__llvm_gcda_foo
54  store i32 1, i32* @__llvm_gcda_foo
55
56  ret void
57}
58
59; CHECK-NOT: {{call void @__tsan_write}}
60; CHECK: __tsan_init
61