Searched refs:sext (Results 1 – 25 of 496) sorted by relevance
12345678910>>...20
/external/llvm/test/CodeGen/PowerPC/ |
D | vec_cmp.ll | 14 %sext = sext <2 x i1> %cmp to <2 x i8> 15 ret <2 x i8> %sext 23 %sext = sext <4 x i1> %cmp to <4 x i8> 24 ret <4 x i8> %sext 32 %sext = sext <8 x i1> %cmp to <8 x i8> 33 ret <8 x i8> %sext 43 %sext = sext <16 x i1> %cmp to <16 x i8> 44 ret <16 x i8> %sext 52 %sext = sext <16 x i1> %cmp to <16 x i8> 53 ret <16 x i8> %sext [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | pmovsx-inreg.ll | 10 %sext = sext <2 x i8> %wide.load35 to <2 x i64> 12 store <2 x i64> %sext, <2 x i64>* %out, align 8 27 %sext = sext <4 x i8> %wide.load35 to <4 x i64> 29 store <4 x i64> %sext, <4 x i64>* %out, align 8 38 %sext = sext <4 x i8> %wide.load35 to <4 x i32> 40 store <4 x i32> %sext, <4 x i32>* %out, align 8 55 %sext = sext <8 x i8> %wide.load35 to <8 x i32> 57 store <8 x i32> %sext, <8 x i32>* %out, align 8 66 %sext = sext <8 x i8> %wide.load35 to <8 x i16> 68 store <8 x i16> %sext, <8 x i16>* %out, align 8 [all …]
|
D | vec_sext.ll | 6 %G = sext <4 x i16> %F to <4 x i32> 8 %Y = sext <4 x i16> %H to <4 x i32> 16 %G = sext <4 x i16> %F to <4 x i64> 18 %Y = sext <4 x i16> %H to <4 x i64> 26 %G = sext <4 x i32> %F to <4 x i64> 28 %Y = sext <4 x i32> %H to <4 x i64> 35 %G = sext <4 x i8> %F to <4 x i16> 37 %Y = sext <4 x i8> %H to <4 x i16> 44 %G = sext <4 x i8> %F to <4 x i32> 46 %Y = sext <4 x i8> %H to <4 x i32> [all …]
|
D | avx2-cmp.ll | 6 %x = sext <8 x i1> %bincmp to <8 x i32> 13 %x = sext <4 x i1> %bincmp to <4 x i64> 20 %x = sext <16 x i1> %bincmp to <16 x i16> 27 %x = sext <32 x i1> %bincmp to <32 x i8> 34 %x = sext <8 x i1> %bincmp to <8 x i32> 41 %x = sext <4 x i1> %bincmp to <4 x i64> 48 %x = sext <16 x i1> %bincmp to <16 x i16> 55 %x = sext <32 x i1> %bincmp to <32 x i8>
|
D | sext-setcc-self.ll | 8 %1 = sext <4 x i1> %0 to <4 x i32> 17 %1 = sext <4 x i1> %0 to <4 x i32> 26 %1 = sext <4 x i1> %0 to <4 x i32> 35 %1 = sext <4 x i1> %0 to <4 x i32> 44 %1 = sext <4 x i1> %0 to <4 x i32> 53 %1 = sext <4 x i1> %0 to <4 x i32>
|
D | avx-sext.ll | 9 %B = sext <8 x i16> %A to <8 x i32> 17 %B = sext <4 x i32> %A to <4 x i64> 38 %Y = sext <4 x i16> %X to <4 x i32> 58 %Y = sext <4 x i8> %X to <4 x i32> 79 %Y = sext <2 x i8> %X to <2 x i64> 100 %Y = sext <2 x i16> %X to <2 x i64> 121 %Y = sext <2 x i32> %X to <2 x i64> 142 %Y = sext <8 x i8> %X to <8 x i16> 153 %extmask = sext <4 x i1> %mask to <4 x i64> 164 %extmask = sext <4 x i8> %mask to <4 x i64>
|
/external/llvm/test/Transforms/InstCombine/ |
D | sext.ll | 11 %s = sext i32 %t to i64 20 %s = sext i32 %t to i64 29 %s = sext i32 %t to i64 38 %s = sext i32 %t to i64 47 %s = sext i32 %t to i64 56 %s = sext i32 %t to i64 65 %s = sext i32 %u to i64 75 %n = sext i16 %s to i32 88 %t2 = sext i16 %t to i32 109 %b = sext i8 %a to i32 [all …]
|
D | vector-casts.ll | 29 %sext = sext <4 x i1> %cmp to <4 x i32> 31 %sext5 = sext <4 x i1> %cmp4 to <4 x i32> 32 %and = and <4 x i32> %sext, %sext5 43 %sext = sext <4 x i1> %cmp to <4 x i32> 45 %sext5 = sext <4 x i1> %cmp4 to <4 x i32> 46 %or = or <4 x i32> %sext, %sext5 58 %sext = sext <4 x i1> %cmp to <4 x i32> 60 %sext5 = sext <4 x i1> %cmp4 to <4 x i32> 61 %and = and <4 x i32> %sext, %sext5 66 ; CHECK: sext <4 x i1> %cmp to <4 x i32> [all …]
|
D | signext.ll | 11 ; CHECK: %sext = shl i32 %x, 16 12 ; CHECK: %tmp.3 = ashr exact i32 %sext, 16 22 ; CHECK: %sext = shl i32 %x, 16 23 ; CHECK: %tmp.3 = ashr exact i32 %sext, 16 33 ; CHECK: %tmp.5 = sext i16 %P to i32 43 ; CHECK: %tmp.5 = sext i16 %P to i32 53 ; CHECK: %sext = shl i32 %x, 24 54 ; CHECK: %tmp.3 = ashr exact i32 %sext, 24 74 ; CHECK: %tmp.5 = sext i16 %P to i32
|
D | bitcast-sext-vector.ll | 2 ; CHECK: sext 8 %sext = sext <4 x i1> %cmp to <4 x i8> 9 %val = bitcast <4 x i8> %sext to i32
|
D | add-shrink.ll | 2 ; RUN: opt < %s -instcombine -S | grep sext | count 1 4 ; Should only have one sext and the add should be i32 instead of i64. 9 %D = sext i32 %B to i64 ; <i64> [#uses=1] 10 %E = sext i32 %C to i64 ; <i64> [#uses=1]
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | cast.ll | 11 ;CHECK: cost of 2 {{.*}} sext 12 %B = sext <4 x i1> undef to <4 x i32> 19 ;CHECK-NOT: cost of 2 {{.*}} sext 20 %E = sext <8 x i1> undef to <8 x i32> 38 ;CHECK: cost of 9 {{.*}} sext 39 %S = sext <8 x i1> %in to <8 x i32> 41 ;CHECK: cost of 1 {{.*}} sext 42 %A = sext <8 x i16> undef to <8 x i32> 45 ;CHECK: cost of 1 {{.*}} sext 46 %C = sext <4 x i32> undef to <4 x i64> [all …]
|
/external/llvm/test/CodeGen/XCore/ |
D | sext.ll | 4 %2 = sext i1 %1 to i32 8 ; CHECK: sext r0, 1 12 %2 = sext i2 %1 to i32 16 ; CHECK: sext r0, 2 20 %2 = sext i8 %1 to i32 24 ; CHECK: sext r0, 8 28 %2 = sext i16 %1 to i32 32 ; CHECK: sext r0, 16
|
D | misc-intrinsics.ll | 8 declare i32 @llvm.xcore.sext(i32, i32) 47 define i32 @sext(i32 %a, i32 %b) { 48 ; CHECK: sext: 49 ; CHECK: sext r0, r1 50 %result = call i32 @llvm.xcore.sext(i32 %a, i32 %b) 56 ; CHECK: sext r0, 4 57 %result = call i32 @llvm.xcore.sext(i32 %a, i32 4)
|
/external/llvm/test/Transforms/IndVarSimplify/ |
D | elim-extend.ll | 6 ; IV rewrite only removes one sext. WidenIVs removes all three. 11 ; CHECK-NOT: sext 16 %preofs = sext i32 %iv to i64 20 %postofs = sext i32 %postiv to i64 24 %postofsnsw = sext i32 %postivnsw to i64 43 ; CHECK-NOT: sext 48 %preofs = sext i32 %iv to i64 52 %postofs = sext i32 %postiv to i64 56 %postofsnsw = sext i32 %postivnsw to i64 80 ; CHECK-NOT: sext [all …]
|
D | 2009-04-14-shorten_iv_vars.ll | 1 ; RUN: opt < %s -indvars -S | not grep "sext" 20 %2 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1] 24 %6 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1] 28 %10 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1] 35 %16 = sext i32 %15 to i64 ; <i64> [#uses=1] 40 %21 = sext i32 %20 to i64 ; <i64> [#uses=1] 44 %25 = sext i32 %13 to i64 ; <i64> [#uses=1] 51 %31 = sext i32 %30 to i64 ; <i64> [#uses=1] 56 %36 = sext i32 %35 to i64 ; <i64> [#uses=1] 60 %40 = sext i32 %28 to i64 ; <i64> [#uses=1] [all …]
|
D | preserve-signed-wrap.ll | 4 ; sext for the addressing, however it shouldn't eliminate the sext 17 ; CHECK: sext i8 18 ; CHECK-NOT: sext 22 %1 = sext i8 %p.01 to i32 ; <i32> [#uses=1] 23 %2 = sext i32 %i.02 to i64 ; <i64> [#uses=1]
|
D | 2011-09-27-hoistsext.ll | 2 ; Test indvars' ability to hoist new sext created by WidenIV. 8 ; CHECK: sext 9 ; CHECK: sext 14 ; CHECK-NOT: sext 19 %idxprom177 = sext i32 %add174 to i64
|
/external/llvm/test/CodeGen/Mips/ |
D | madd-msub.ll | 6 %conv = sext i32 %a to i64 7 %conv2 = sext i32 %b to i64 9 %conv4 = sext i32 %c to i64 28 %conv = sext i32 %a to i64 29 %conv2 = sext i32 %b to i64 38 %conv = sext i32 %c to i64 39 %conv2 = sext i32 %a to i64 40 %conv4 = sext i32 %b to i64 60 %conv = sext i32 %a to i64 61 %conv3 = sext i32 %b to i64
|
/external/llvm/test/CodeGen/ARM/ |
D | 2012-08-23-legalize-vmull.ll | 19 %v0 = sext <4 x i8> %0 to <4 x i32> 32 %v0 = sext <2 x i8> %0 to <2 x i64> 45 %v0 = sext <2 x i16> %0 to <2 x i64> 60 %v0 = sext <4 x i8> %0 to <4 x i32> 63 %v2 = sext <4 x i8> %1 to <4 x i32> 76 %v0 = sext <2 x i8> %0 to <2 x i64> 79 %v2 = sext <2 x i8> %1 to <2 x i64> 92 %v0 = sext <2 x i16> %0 to <2 x i64> 95 %v2 = sext <2 x i16> %1 to <2 x i64> 110 %v0 = sext <4 x i8> %0 to <4 x i32> [all …]
|
D | vcge.ll | 9 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 19 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 29 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 39 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 49 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 59 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 69 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 79 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 89 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 99 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> [all …]
|
D | vceq.ll | 9 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 19 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 29 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 39 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 49 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 59 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 69 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 79 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 90 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
|
D | vcgt.ll | 10 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 20 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 30 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 40 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 50 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 60 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 70 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 80 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 90 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 100 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> [all …]
|
/external/llvm/test/Analysis/ScalarEvolution/ |
D | sext-iv-1.ll | 2 ; RUN: | grep " --> (sext i. {.*,+,.*}<%bb1> to i64)" | count 5 4 ; Don't convert (sext {...,+,...}) to {sext(...),+,sext(...)} in cases 18 %2 = sext i9 %1 to i64 ; <i64> [#uses=1] 22 %6 = sext i7 %0 to i64 ; <i64> [#uses=1] 41 %2 = sext i9 %1 to i64 ; <i64> [#uses=1] 45 %6 = sext i8 %0 to i64 ; <i64> [#uses=1] 64 %2 = sext i9 %1 to i64 ; <i64> [#uses=1] 68 %6 = sext i8 %0 to i64 ; <i64> [#uses=1] 87 %2 = sext i9 %1 to i64 ; <i64> [#uses=1] 91 %6 = sext i8 %0 to i64 ; <i64> [#uses=1]
|
D | nsw-offset.ll | 21 ; CHECK: %1 = sext i32 %i.01 to i64 23 %1 = sext i32 %i.01 to i64 ; <i64> [#uses=1] 30 %4 = sext i32 %i.01 to i64 ; <i64> [#uses=1] 35 ; CHECK: %8 = sext i32 %7 to i64 37 %8 = sext i32 %7 to i64 ; <i64> [#uses=1] 47 ; CHECK: %t8 = sext i32 %t7 to i64 49 %t8 = sext i32 %t7 to i64 ; <i64> [#uses=1] 59 %14 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
|
12345678910>>...20