1; This test checks that we are not instrumenting unwanted acesses to globals: 2; - Instruction profiler counter instrumentation has known intended races. 3; - The gcov counters array has a known intended race. 4; 5; RUN: opt < %s -tsan -S | FileCheck %s 6 7target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" 8target triple = "x86_64-apple-macosx10.9" 9 10@__profc_test_gep = private global [1 x i64] zeroinitializer, section "__DATA,__llvm_prf_cnts", align 8 11@__profc_test_bitcast = private global [2 x i64] zeroinitializer, section "__DATA,__llvm_prf_cnts", align 8 12@__profc_test_bitcast_foo = private global [1 x i64] zeroinitializer, section "__DATA,__llvm_prf_cnts", align 8 13 14@__llvm_gcov_ctr = internal global [1 x i64] zeroinitializer 15@__llvm_gcov_ctr.1 = internal global [1 x i64] zeroinitializer 16@__llvm_gcov_global_state_pred = internal global i32 0 17@__llvm_gcda_foo = internal global i32 0 18 19define i32 @test_gep() sanitize_thread { 20entry: 21 %pgocount = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_test_gep, i64 0, i64 0) 22 %0 = add i64 %pgocount, 1 23 store i64 %0, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_test_gep, i64 0, i64 0) 24 25 %gcovcount = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0) 26 %1 = add i64 %gcovcount, 1 27 store i64 %1, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0) 28 29 %gcovcount.1 = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr.1, i64 0, i64 0) 30 %2 = add i64 %gcovcount.1, 1 31 store i64 %2, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr.1, i64 0, i64 0) 32 33 ret i32 1 34} 35 36define i32 @test_bitcast() sanitize_thread { 37entry: 38 %0 = load <2 x i64>, <2 x i64>* bitcast ([2 x i64]* @__profc_test_bitcast to <2 x i64>*), align 8 39 %.promoted5 = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_test_bitcast_foo, i64 0, i64 0), align 8 40 %1 = add i64 %.promoted5, 10 41 %2 = add <2 x i64> %0, <i64 1, i64 10> 42 store <2 x i64> %2, <2 x i64>* bitcast ([2 x i64]* @__profc_test_bitcast to <2 x i64>*), align 8 43 store i64 %1, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_test_bitcast_foo, i64 0, i64 0), align 8 44 ret i32 undef 45} 46 47define void @test_load() sanitize_thread { 48entry: 49 %0 = load i32, i32* @__llvm_gcov_global_state_pred 50 store i32 1, i32* @__llvm_gcov_global_state_pred 51 52 %1 = load i32, i32* @__llvm_gcda_foo 53 store i32 1, i32* @__llvm_gcda_foo 54 55 ret void 56} 57 58; CHECK-NOT: {{call void @__tsan_write}} 59; CHECK: __tsan_init 60