/external/llvm/test/CodeGen/AMDGPU/ |
D | usubo.ll | 5 declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone 6 declare { i64, i1 } @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone 13 %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind 14 %val = extractvalue { i64, i1 } %usub, 0 15 %carry = extractvalue { i64, i1 } %usub, 1 28 %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) nounwind 29 %val = extractvalue { i32, i1 } %usub, 0 30 %carry = extractvalue { i32, i1 } %usub, 1 44 %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) nounwind 45 %val = extractvalue { i32, i1 } %usub, 0 [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | usubo.ll | 14 %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) #0 15 %val = extractvalue { i64, i1 } %usub, 0 16 %carry = extractvalue { i64, i1 } %usub, 1 35 %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) 36 %val = extractvalue { i32, i1 } %usub, 0 37 %carry = extractvalue { i32, i1 } %usub, 1 59 %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) 60 %val = extractvalue { i32, i1 } %usub, 0 61 %carry = extractvalue { i32, i1 } %usub, 1 83 %uadd = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) [all …]
|
/external/llvm-project/llvm/test/Analysis/CostModel/X86/ |
D | arith-usat.ll | 252 declare i64 @llvm.usub.sat.i64(i64, i64) 253 declare <2 x i64> @llvm.usub.sat.v2i64(<2 x i64>, <2 x i64>) 254 declare <4 x i64> @llvm.usub.sat.v4i64(<4 x i64>, <4 x i64>) 255 declare <8 x i64> @llvm.usub.sat.v8i64(<8 x i64>, <8 x i64>) 257 declare i32 @llvm.usub.sat.i32(i32, i32) 258 declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>) 259 declare <8 x i32> @llvm.usub.sat.v8i32(<8 x i32>, <8 x i32>) 260 declare <16 x i32> @llvm.usub.sat.v16i32(<16 x i32>, <16 x i32>) 262 declare i16 @llvm.usub.sat.i16(i16, i16) 263 declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>) [all …]
|
/external/llvm-project/llvm/test/Analysis/CostModel/ARM/ |
D | arith-usat.ll | 167 declare i64 @llvm.usub.sat.i64(i64, i64) 168 declare <2 x i64> @llvm.usub.sat.v2i64(<2 x i64>, <2 x i64>) 169 declare <4 x i64> @llvm.usub.sat.v4i64(<4 x i64>, <4 x i64>) 170 declare <8 x i64> @llvm.usub.sat.v8i64(<8 x i64>, <8 x i64>) 172 declare i32 @llvm.usub.sat.i32(i32, i32) 173 declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>) 174 declare <8 x i32> @llvm.usub.sat.v8i32(<8 x i32>, <8 x i32>) 175 declare <16 x i32> @llvm.usub.sat.v16i32(<16 x i32>, <16 x i32>) 177 declare i16 @llvm.usub.sat.i16(i16, i16) 178 declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>) [all …]
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | usub_sat_vec.ll | 4 declare <1 x i8> @llvm.usub.sat.v1i8(<1 x i8>, <1 x i8>) 5 declare <2 x i8> @llvm.usub.sat.v2i8(<2 x i8>, <2 x i8>) 6 declare <4 x i8> @llvm.usub.sat.v4i8(<4 x i8>, <4 x i8>) 7 declare <8 x i8> @llvm.usub.sat.v8i8(<8 x i8>, <8 x i8>) 8 declare <12 x i8> @llvm.usub.sat.v12i8(<12 x i8>, <12 x i8>) 9 declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>) 10 declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>) 11 declare <64 x i8> @llvm.usub.sat.v64i8(<64 x i8>, <64 x i8>) 13 declare <1 x i16> @llvm.usub.sat.v1i16(<1 x i16>, <1 x i16>) 14 declare <2 x i16> @llvm.usub.sat.v2i16(<2 x i16>, <2 x i16>) [all …]
|
D | usub_sat.ll | 4 declare i4 @llvm.usub.sat.i4(i4, i4) 5 declare i8 @llvm.usub.sat.i8(i8, i8) 6 declare i16 @llvm.usub.sat.i16(i16, i16) 7 declare i32 @llvm.usub.sat.i32(i32, i32) 8 declare i64 @llvm.usub.sat.i64(i64, i64) 16 %tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %y); 26 %tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %y); 39 %tmp = call i16 @llvm.usub.sat.i16(i16 %x, i16 %y); 52 %tmp = call i8 @llvm.usub.sat.i8(i8 %x, i8 %y); 65 %tmp = call i4 @llvm.usub.sat.i4(i4 %x, i4 %y);
|
D | usub_sat_plus.ll | 4 declare i4 @llvm.usub.sat.i4(i4, i4) 5 declare i8 @llvm.usub.sat.i8(i8, i8) 6 declare i16 @llvm.usub.sat.i16(i16, i16) 7 declare i32 @llvm.usub.sat.i32(i32, i32) 8 declare i64 @llvm.usub.sat.i64(i64, i64) 18 %tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %a) 29 %tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %z) 44 %tmp = call i16 @llvm.usub.sat.i16(i16 %x, i16 %a) 59 %tmp = call i8 @llvm.usub.sat.i8(i8 %x, i8 %a) 74 %tmp = call i4 @llvm.usub.sat.i4(i4 %x, i4 %a)
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/ |
D | arith-sub-usat.ll | 23 declare i64 @llvm.usub.sat.i64(i64, i64) 24 declare i32 @llvm.usub.sat.i32(i32, i32) 25 declare i16 @llvm.usub.sat.i16(i16, i16) 26 declare i8 @llvm.usub.sat.i8 (i8 , i8 ) 46 ; SSE-NEXT: [[R0:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A0]], i64 [[B0]]) 47 ; SSE-NEXT: [[R1:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A1]], i64 [[B1]]) 48 ; SSE-NEXT: [[R2:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A2]], i64 [[B2]]) 49 ; SSE-NEXT: [[R3:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A3]], i64 [[B3]]) 50 ; SSE-NEXT: [[R4:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A4]], i64 [[B4]]) 51 ; SSE-NEXT: [[R5:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A5]], i64 [[B5]]) [all …]
|
D | arith-sub-usubo.ll | 23 declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) 24 declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) 25 declare {i16, i1} @llvm.usub.with.overflow.i16(i16, i16) 26 declare {i8 , i1} @llvm.usub.with.overflow.i8 (i8 , i8 ) 46 ; CHECK-NEXT: [[C0:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[A0]], i64 [[B0]]) 47 ; CHECK-NEXT: [[C1:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[A1]], i64 [[B1]]) 48 ; CHECK-NEXT: [[C2:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[A2]], i64 [[B2]]) 49 ; CHECK-NEXT: [[C3:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[A3]], i64 [[B3]]) 50 ; CHECK-NEXT: [[C4:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[A4]], i64 [[B4]]) 51 ; CHECK-NEXT: [[C5:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[A5]], i64 [[B5]]) [all …]
|
/external/llvm-project/llvm/test/CodeGen/SystemZ/ |
D | int-usub-10.ll | 16 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1) 32 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 128) 50 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 129) 66 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 -127) 84 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 -128) 101 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1) 120 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1) 137 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1) 156 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1) 176 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1) [all …]
|
D | int-usub-11.ll | 16 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1) 32 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 128) 50 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 129) 66 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 -127) 85 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 -128) 102 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1) 121 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1) 138 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1) 157 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1) 177 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1) [all …]
|
D | int-usub-02.ll | 16 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b) 30 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b) 51 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b) 75 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b) 93 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b) 113 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b) 131 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b) 149 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b) 169 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b) 189 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b) [all …]
|
D | int-usub-01.ll | 16 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b) 30 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b) 51 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b) 75 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b) 93 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b) 111 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b) 129 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b) 149 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b) 167 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b) 185 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b) [all …]
|
D | int-usub-03.ll | 17 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 32 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 54 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 79 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 98 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 119 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 138 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 157 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 178 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 199 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) [all …]
|
D | int-usub-09.ll | 17 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1) 33 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 32768) 49 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 32769) 65 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 -1) 81 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 -32767) 94 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 -32768) 108 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1) 129 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1) 144 declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
|
/external/llvm/test/CodeGen/Generic/ |
D | overflow.ll | 113 ;; usub 117 %usub = tail call { i8, i1 } @llvm.usub.with.overflow.i8(i8 %a, i8 %b) 118 %cmp = extractvalue { i8, i1 } %usub, 1 119 %usub.result = extractvalue { i8, i1 } %usub, 0 120 %X = select i1 %cmp, i8 %usub.result, i8 42 124 declare { i8, i1 } @llvm.usub.with.overflow.i8(i8, i8) nounwind readnone 128 %usub = tail call { i16, i1 } @llvm.usub.with.overflow.i16(i16 %a, i16 %b) 129 %cmp = extractvalue { i16, i1 } %usub, 1 130 %usub.result = extractvalue { i16, i1 } %usub, 0 131 %X = select i1 %cmp, i16 %usub.result, i16 42 [all …]
|
/external/llvm-project/llvm/test/CodeGen/Generic/ |
D | overflow.ll | 113 ;; usub 117 %usub = tail call { i8, i1 } @llvm.usub.with.overflow.i8(i8 %a, i8 %b) 118 %cmp = extractvalue { i8, i1 } %usub, 1 119 %usub.result = extractvalue { i8, i1 } %usub, 0 120 %X = select i1 %cmp, i8 %usub.result, i8 42 124 declare { i8, i1 } @llvm.usub.with.overflow.i8(i8, i8) nounwind readnone 128 %usub = tail call { i16, i1 } @llvm.usub.with.overflow.i16(i16 %a, i16 %b) 129 %cmp = extractvalue { i16, i1 } %usub, 1 130 %usub.result = extractvalue { i16, i1 } %usub, 0 131 %X = select i1 %cmp, i16 %usub.result, i16 42 [all …]
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | combine-sub-usat.ll | 10 declare i32 @llvm.usub.sat.i32 (i32, i32) 11 declare i64 @llvm.usub.sat.i64 (i64, i64) 12 declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>) 20 %res = call i32 @llvm.usub.sat.i32(i32 %a0, i32 undef) 34 %res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> %a0) 44 %res = call i32 @llvm.usub.sat.i32(i32 100, i32 4294967295) 58 …%res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i1… 72 …%res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> <i16 undef, i16 1, i16 undef, i16 65535, i16 … 82 %1 = call i32 @llvm.usub.sat.i32(i32 %a0, i32 0) 90 %1 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer) [all …]
|
D | usub_sat.ll | 5 declare i4 @llvm.usub.sat.i4(i4, i4) 6 declare i8 @llvm.usub.sat.i8(i8, i8) 7 declare i16 @llvm.usub.sat.i16(i16, i16) 8 declare i32 @llvm.usub.sat.i32(i32, i32) 9 declare i64 @llvm.usub.sat.i64(i64, i64) 10 declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>) 27 %tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %y) 49 %tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %y) 70 %tmp = call i16 @llvm.usub.sat.i16(i16 %x, i16 %y) 93 %tmp = call i8 @llvm.usub.sat.i8(i8 %x, i8 %y) [all …]
|
D | combine-subo.ll | 6 declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone 9 declare {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone 44 ; fold (usub x, 0) -> x 55 %1 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a0, i32 zeroinitializer) 70 …%1 = call {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitia… 112 ; fold (usub x, x) -> x 123 %1 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a0, i32 %a0) 140 %1 = call {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> %a0) 147 ; fold (usub -1, x) -> (xor x, -1) + no borrow 160 %1 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 -1, i32 %a0) [all …]
|
D | usub_sat_plus.ll | 5 declare i4 @llvm.usub.sat.i4(i4, i4) 6 declare i8 @llvm.usub.sat.i8(i8, i8) 7 declare i16 @llvm.usub.sat.i16(i16, i16) 8 declare i32 @llvm.usub.sat.i32(i32, i32) 9 declare i64 @llvm.usub.sat.i64(i64, i64) 30 %tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %a) 53 %tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %z) 78 %tmp = call i16 @llvm.usub.sat.i16(i16 %x, i16 %a) 107 %tmp = call i8 @llvm.usub.sat.i8(i8 %x, i8 %a) 147 %tmp = call i4 @llvm.usub.sat.i4(i4 %x, i4 %a)
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | unsigned_saturated_sub.ll | 5 ; usub.sat() intrinsics is tested here. 11 ; (a > b) ? a - b : 0 -> usub.sat(a, b) 15 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A:%.*]], i64 [[B:%.*]]) 24 ; (a >= b) ? a - b : 0 -> usub.sat(a, b) 28 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A:%.*]], i64 [[B:%.*]]) 40 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A]], i64 [[B]]) 54 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A]], i64 [[B]]) 69 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A]], i64 [[B]]) 83 ; (a > b) ? a - b : 0 -> usub.sat(a, b) 87 ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> [[A:%.*]], <4 x i32> … [all …]
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | vqsub.ll | 44 %tmp3 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 53 %tmp3 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 62 %tmp3 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 71 %tmp3 = call <1 x i64> @llvm.usub.sat.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 116 %tmp3 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 125 %tmp3 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 134 %tmp3 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 152 declare <8 x i8> @llvm.usub.sat.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 153 declare <4 x i16> @llvm.usub.sat.v4i16(<4 x i16>, <4 x i16>) nounwind readnone [all …]
|
/external/angle/third_party/vulkan-deps/glslang/src/Test/baseResults/ |
D | spv.subpass.frag.out | 23 Name 56 "usub" 40 Decorate 56(usub) DescriptorSet 0 41 Decorate 56(usub) Binding 4 42 Decorate 56(usub) InputAttachmentIndex 5 79 56(usub): 55(ptr) Variable UniformConstant 101 57: 54 Load 56(usub)
|
/external/deqp-deps/glslang/Test/baseResults/ |
D | spv.subpass.frag.out | 23 Name 56 "usub" 40 Decorate 56(usub) DescriptorSet 0 41 Decorate 56(usub) Binding 4 42 Decorate 56(usub) InputAttachmentIndex 5 79 56(usub): 55(ptr) Variable UniformConstant 101 57: 54 Load 56(usub)
|