1; RUN: opt < %s -S -mcpu=z13 -passes=msan 2>&1 | FileCheck %s 2; RUN: opt < %s -msan -S -mcpu=z13 | FileCheck %s 3 4target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64" 5target triple = "s390x-unknown-linux-gnu" 6 7%struct.__va_list = type { i64, i64, i8*, i8* } 8 9define i64 @foo(i64 %guard, ...) { 10 %vl = alloca %struct.__va_list, align 8 11 %1 = bitcast %struct.__va_list* %vl to i8* 12 call void @llvm.lifetime.start.p0i8(i64 32, i8* %1) 13 call void @llvm.va_start(i8* %1) 14 call void @llvm.va_end(i8* %1) 15 call void @llvm.lifetime.end.p0i8(i64 32, i8* %1) 16 ret i64 0 17} 18 19; First check if the variadic shadow values are saved in stack with correct 20; size (which is 160 - size of the register save area). 21 22; CHECK-LABEL: @foo 23; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls 24; CHECK: [[B:%.*]] = add i64 160, [[A]] 25; CHECK: alloca {{.*}} [[B]] 26 27; We expect two memcpy operations: one for the register save area, and one for 28; the overflow arg area. 29 30; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 {{%.*}}, i8* align 8 {{%.*}}, i64 160, i1 false) 31; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 {{%.*}}, i8* align 8 {{%.*}}, i64 [[A]], i1 false) 32 33declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1 34declare void @llvm.va_start(i8*) #2 35declare void @llvm.va_end(i8*) #2 36declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1 37 38declare i32 @random_i32() 39declare i64 @random_i64() 40declare float @random_float() 41declare double @random_double() 42 43define i64 @bar() { 44 %arg2 = call i32 () @random_i32() 45 %arg3 = call float () @random_float() 46 %arg4 = call i32 () @random_i32() 47 %arg5 = call double () @random_double() 48 %arg6 = call i64 () @random_i64() 49 %arg9 = call i32 () @random_i32() 50 %arg11 = call float () @random_float() 51 %arg12 = call i32 () @random_i32() 52 %arg13 = call double () @random_double() 53 %arg14 = call i64 () @random_i64() 54 %1 = call i64 (i64, ...) @foo(i64 1, i32 zeroext %arg2, float %arg3, 55 i32 signext %arg4, double %arg5, i64 %arg6, 56 i64 7, double 8.0, i32 zeroext %arg9, 57 double 10.0, float %arg11, i32 signext %arg12, 58 double %arg13, i64 %arg14) 59 ret i64 %1 60} 61 62; Save the incoming shadow values from the varargs in the __msan_va_arg_tls 63; array at the offsets equal to those defined by the ABI for the corresponding 64; registers in the register save area, and for the corresponding overflow args 65; in the overflow arg area: 66; - r2@16 == i64 1 - skipped, because it's fixed 67; - r3@24 == i32 zext %arg2 - shadow is zero-extended 68; - f0@128 == float %arg3 - left-justified, shadow is 32-bit 69; - r4@32 == i32 sext %arg4 - shadow is sign-extended 70; - f2@136 == double %arg5 - straightforward 71; - r5@40 == i64 %arg6 - straightforward 72; - r6@48 == 7 - filler 73; - f4@144 == 8.0 - filler 74; - overflow@160 == i32 zext %arg9 - shadow is zero-extended 75; - f6@152 == 10.0 - filler 76; - overflow@(168 + 4) == float %arg11 - right-justified, shadow is 32-bit 77; - overflow@176 == i32 sext %arg12 - shadow is sign-extended 78; - overflow@184 == double %arg13 - straightforward 79; - overflow@192 == i64 %arg14 - straightforward 80; Overflow arg area size is 40. 81 82; CHECK-LABEL: @bar 83 84; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 24 85; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 128 86; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 32 87; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 136 88; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 40 89; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 48 90; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 144 91; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 160 92; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 152 93; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 172 94; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 176 95; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 184 96; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 192 97; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls 98 99; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are 100; passed to a variadic function. 101 102define dso_local i64 @many_args() { 103entry: 104 %ret = call i64 (i64, ...) @sum(i64 120, 105 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 106 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 107 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 108 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 109 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 110 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 111 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 112 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 113 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 114 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 115 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 116 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1 117 ) 118 ret i64 %ret 119} 120 121; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed. 122; CHECK-LABEL: @many_args 123; CHECK: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 792) 124; CHECK-NOT: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 800) 125 126declare i64 @sum(i64 %n, ...) 127 128; Test offset calculation for vector arguments. 129; Regardless of whether or not fixed args overflow, we should copy a shadow 130; for a single vector vararg to offset 160. 131 132declare void @vr_no_overflow(<4 x float> %v24, <4 x float> %v26, 133 <4 x float> %v28, <4 x float> %v30, 134 <4 x float> %v25, <4 x float> %v27, 135 <4 x float> %v29, ...) 136 137declare <4 x float> @vr_value() 138 139define void @vr_no_overflow_caller() { 140 %1 = call <4 x float> () @vr_value() 141 call void (<4 x float>, <4 x float>, <4 x float>, 142 <4 x float>, <4 x float>, <4 x float>, 143 <4 x float>, ...) @vr_no_overflow( 144 <4 x float> %1, <4 x float> %1, <4 x float> %1, <4 x float> %1, 145 <4 x float> %1, <4 x float> %1, <4 x float> %1, <4 x float> %1) 146 ret void 147} 148 149; CHECK-LABEL: @vr_no_overflow_caller 150; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 160 151; CHECK-NOT: store {{.*}} @__msan_va_arg_tls {{.*}} 152; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls 153 154declare void @vr_overflow(<4 x float> %v24, <4 x float> %v26, 155 <4 x float> %v28, <4 x float> %v30, 156 <4 x float> %v25, <4 x float> %v27, 157 <4 x float> %v29, <4 x float> %v31, 158 <4 x float> %overflow, ...) 159 160define void @vr_overflow_caller() { 161 %1 = call <4 x float> @vr_value() 162 call void (<4 x float>, <4 x float>, <4 x float>, 163 <4 x float>, <4 x float>, <4 x float>, 164 <4 x float>, <4 x float>, <4 x float>, 165 ...) @vr_overflow( 166 <4 x float> %1, <4 x float> %1, <4 x float> %1, <4 x float> %1, 167 <4 x float> %1, <4 x float> %1, <4 x float> %1, <4 x float> %1, 168 <4 x float> %1, <4 x float> %1) 169 ret void 170} 171 172; CHECK-LABEL: @vr_overflow_caller 173; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 160 174; CHECK-NOT: store {{.*}} @__msan_va_arg_tls {{.*}} 175; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls 176 177; Test that i128 and fp128 are passed by reference. 178 179declare i128 @random_i128() 180declare fp128 @random_fp128() 181 182define i64 @bar_128() { 183 %iarg = call i128 @random_i128() 184 %fparg = call fp128 @random_fp128() 185 %1 = call i64 (i64, ...) @foo(i64 1, i128 %iarg, fp128 %fparg) 186 ret i64 %1 187} 188 189; CHECK-LABEL: @bar_128 190; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 24 191; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 32 192; CHECK: store {{.*}} 0, {{.*}} @__msan_va_arg_overflow_size_tls 193