Home
last modified time | relevance | path

Searched refs:conv4 (Results 1 – 25 of 148) sorted by relevance

123456

/external/llvm-project/llvm/test/Transforms/AggressiveInstCombine/
Dtrunc_select.ll17 %conv4 = trunc i32 %sel to i16
18 ret i16 %conv4
31 %conv4 = trunc i32 %sel to i8
32 ret i8 %conv4
47 %conv4 = trunc i32 %sel to i16
48 ret i16 %conv4
61 %conv4 = trunc i32 %sel to i16
62 ret i16 %conv4
75 %conv4 = trunc i32 %sel to i16
76 ret i16 %conv4
[all …]
Dtrunc_select_cmp.ll18 %conv4 = trunc i32 %cond to i16
19 ret i16 %conv4
37 %conv4 = trunc i32 %cond to i16
38 ret i16 %conv4
56 %conv4 = trunc i32 %cond to i16
57 ret i16 %conv4
75 %conv4 = trunc i32 %cond to i16
76 ret i16 %conv4
94 %conv4 = trunc i32 %cond to i16
95 ret i16 %conv4
[all …]
/external/llvm-project/llvm/test/CodeGen/NVPTX/
Dsext-in-reg.ll13 %conv4 = ashr exact i64 %sext1, 56
16 %add = add nsw i64 %conv4, %conv1
31 %conv4 = ashr exact i64 %sext1, 32
34 %add = add nsw i64 %conv4, %conv1
49 %conv4 = ashr exact i64 %sext1, 48
52 %add = add nsw i64 %conv4, %conv1
67 %conv4 = ashr exact i32 %sext1, 24
70 %add = add nsw i32 %conv4, %conv1
85 %conv4 = ashr exact i32 %sext1, 16
88 %add = add nsw i32 %conv4, %conv1
[all …]
/external/llvm/test/CodeGen/NVPTX/
Dsext-in-reg.ll13 %conv4 = ashr exact i64 %sext1, 56
16 %add = add nsw i64 %conv4, %conv1
31 %conv4 = ashr exact i64 %sext1, 32
34 %add = add nsw i64 %conv4, %conv1
49 %conv4 = ashr exact i64 %sext1, 48
52 %add = add nsw i64 %conv4, %conv1
67 %conv4 = ashr exact i32 %sext1, 24
70 %add = add nsw i32 %conv4, %conv1
85 %conv4 = ashr exact i32 %sext1, 16
88 %add = add nsw i32 %conv4, %conv1
[all …]
/external/llvm-project/llvm/test/Analysis/CostModel/X86/
Dinterleaved-store-i8.ll9 ;CHECK: LV: Found an estimated cost of 1 for VF 1 For instruction: store i8 %conv4
10 ;CHECK: LV: Found an estimated cost of 8 for VF 2 For instruction: store i8 %conv4
11 ;CHECK: LV: Found an estimated cost of 9 for VF 4 For instruction: store i8 %conv4
12 ;CHECK: LV: Found an estimated cost of 12 for VF 8 For instruction: store i8 %conv4
13 ;CHECK: LV: Found an estimated cost of 13 for VF 16 For instruction: store i8 %conv4
14 ;CHECK: LV: Found an estimated cost of 16 for VF 32 For instruction: store i8 %conv4
22 %conv4 = shl i8 %conv, 2
33 store i8 %conv4, i8* %incdec.ptr2, align 1
60 %conv4 = shl i8 %conv, 2
73 store i8 %conv4, i8* %incdec.ptr2, align 1
/external/llvm/test/CodeGen/Mips/
Dmisha.ll15 %1 = phi i8 [ %.pre, %for.body.lr.ph ], [ %conv4, %for.body ]
23 %conv4 = trunc i32 %add to i8
24 store i8 %conv4, i8* %to, align 1
51 %1 = phi i16 [ %.pre, %for.body.lr.ph ], [ %conv4, %for.body ]
59 %conv4 = trunc i32 %add to i16
60 store i16 %conv4, i16* %to, align 2
Dmadd-msub.ll52 %conv4 = sext i32 %c to i64
53 %add = add nsw i64 %mul, %conv4
91 %conv4 = zext i32 %c to i64
92 %add = add nsw i64 %mul, %conv4
176 %conv4 = sext i32 %b to i64
177 %mul = mul nsw i64 %conv4, %conv2
216 %conv4 = zext i32 %b to i64
217 %mul = mul nsw i64 %conv4, %conv2
/external/llvm-project/llvm/test/CodeGen/Mips/
Dmisha.ll15 %1 = phi i8 [ %.pre, %for.body.lr.ph ], [ %conv4, %for.body ]
23 %conv4 = trunc i32 %add to i8
24 store i8 %conv4, i8* %to, align 1
51 %1 = phi i16 [ %.pre, %for.body.lr.ph ], [ %conv4, %for.body ]
59 %conv4 = trunc i32 %add to i16
60 store i16 %conv4, i16* %to, align 2
/external/llvm-project/llvm/test/Transforms/LoopVectorize/X86/
Dmul_slm_16bit.ll40 %conv4 = zext i8 %1 to i32
41 %mul2 = mul nsw i32 %conv4, %conv
47 %mul3 = mul nsw i32 %conv5, %conv4
62 %mul6 = mul nsw i32 -120, %conv4
67 %mul7 = mul nsw i32 250, %conv4
109 %conv4 = zext i16 %1 to i32
110 %mul2 = mul nsw i32 %conv4, %conv
116 %mul3 = mul nsw i32 %conv5, %conv4
131 %mul6 = mul nsw i32 -32000, %conv4
136 %mul7 = mul nsw i32 250, %conv4
/external/llvm-project/llvm/test/CodeGen/ARM/ParallelDSP/
Daliasing.ll38 %conv4 = sext i16 %0 to i32
39 %mul = mul nsw i32 %conv, %conv4
86 %conv4 = sext i16 %0 to i32
87 %mul = mul nsw i32 %conv, %conv4
135 %conv4 = sext i16 %0 to i32
136 %mul = mul nsw i32 %conv, %conv4
179 %conv4 = sext i16 %0 to i32
180 %mul = mul nsw i32 %conv, %conv4
229 %conv4 = sext i16 %0 to i32
230 %mul = mul nsw i32 %conv, %conv4
[all …]
Dsmlald1.ll36 %conv4 = sext i16 %0 to i64
37 %mul = mul nsw i64 %conv, %conv4
82 %conv4 = sext i8 %0 to i64
83 %mul = mul nsw i64 %conv, %conv4
Dsmlad1.ll36 %conv4 = sext i16 %0 to i32
37 %mul = mul nsw i32 %conv, %conv4
82 %conv4 = sext i8 %0 to i32
83 %mul = mul nsw i32 %conv, %conv4
/external/llvm/test/CodeGen/ARM/
Dsmml.ll6 %conv4 = zext i32 %a to i64
11 %sub = sub nsw i64 %conv4, %shr5
/external/llvm/test/CodeGen/PowerPC/
Dec-input.ll57 …%conv4.i65.lcssa = phi i32 [ %inp32, %if.end14 ], [ 0, %if.end14.1 ], [ %conv4.i65.2, %if.end14.2 …
59 …lementptr inbounds ([49 x i8], [49 x i8]* @.str294, i64 0, i64 0), i32 signext %conv4.i65.lcssa) #3
107 %conv4.i65.2 = trunc i64 %asmresult1.i64.2 to i32
108 %cmp19.2 = icmp eq i32 %conv4.i65.2, 0
131 %conv4.i65.3 = trunc i64 %asmresult1.i64.3 to i32
132 %cmp19.3 = icmp eq i32 %conv4.i65.3, 0
/external/llvm-project/llvm/test/CodeGen/PowerPC/
Dec-input.ll57 …%conv4.i65.lcssa = phi i32 [ %inp32, %if.end14 ], [ 0, %if.end14.1 ], [ %conv4.i65.2, %if.end14.2 …
59 …lementptr inbounds ([49 x i8], [49 x i8]* @.str294, i64 0, i64 0), i32 signext %conv4.i65.lcssa) #3
107 %conv4.i65.2 = trunc i64 %asmresult1.i64.2 to i32
108 %cmp19.2 = icmp eq i32 %conv4.i65.2, 0
131 %conv4.i65.3 = trunc i64 %asmresult1.i64.3 to i32
132 %cmp19.3 = icmp eq i32 %conv4.i65.3, 0
Dsetcclike-or-comb.ll20 %conv4 = zext i1 undef to i32
21 store i32 %conv4, i32* @b, align 4
/external/llvm/test/CodeGen/X86/
Dalldiv-divdi3.ll10 %conv4 = sext i32 %argc to i64
11 %div = sdiv i64 84, %conv4
Dallrem-moddi3.ll12 %conv4 = sext i32 %argc to i64
13 %div = srem i64 84, %conv4
/external/llvm-project/llvm/test/CodeGen/X86/
Dalldiv-divdi3.ll10 %conv4 = sext i32 %argc to i64
11 %div = sdiv i64 84, %conv4
Dallrem-moddi3.ll12 %conv4 = sext i32 %argc to i64
13 %div = srem i64 84, %conv4
/external/llvm-project/polly/docs/experiments/matmul/
Dscops.init_array.dot10conv4 = fptrunc double %div to float\l %arrayidx6 = getelementptr inbounds [1536 x [1536 x float]…
/external/llvm-project/llvm/test/Transforms/LoopVectorize/AArch64/
Dreduction-small-size.ll52 %conv4 = and i32 %sum.013, 255
53 %add = add nuw nsw i32 %conv, %conv4
107 %conv4.13 = and i32 %sum.017, 65535
108 %add = add nuw nsw i32 %conv.14, %conv4.13
164 %conv4.13 = and i32 %sum.015, 65535
165 %add = add nuw nsw i32 %conv, %conv4.13
/external/llvm-project/llvm/test/Transforms/LoopUnswitch/
Dpr32818.ll15 %conv4 = sext i8 %split to i32
17 %call = tail call fastcc i32 @fn5(i32 %conv4)
/external/llvm/test/Transforms/SLPVectorizer/X86/
Dpr16628.ll22 %conv4 = zext i1 %cmp to i16
23 store i16 %conv4, i16* @b, align 2
/external/llvm/test/Transforms/LoopVectorize/AArch64/
Dreduction-small-size.ll60 %conv4 = and i32 %sum.013, 255
61 %add = add nuw nsw i32 %conv, %conv4
121 %conv4.13 = and i32 %sum.017, 65535
122 %add = add nuw nsw i32 %conv.14, %conv4.13
184 %conv4.13 = and i32 %sum.015, 65535
185 %add = add nuw nsw i32 %conv, %conv4.13

123456