/external/pdfium/core/fxcodec/jbig2/ |
D | JBig2_Image.cpp | 302 uint32_t tmp2 = JBIG2_GETDWORD(lineDst); in composeTo_opt2() local 306 tmp = (tmp2 & ~maskM) | ((tmp1 | tmp2) & maskM); in composeTo_opt2() 309 tmp = (tmp2 & ~maskM) | ((tmp1 & tmp2) & maskM); in composeTo_opt2() 312 tmp = (tmp2 & ~maskM) | ((tmp1 ^ tmp2) & maskM); in composeTo_opt2() 315 tmp = (tmp2 & ~maskM) | ((~(tmp1 ^ tmp2)) & maskM); in composeTo_opt2() 318 tmp = (tmp2 & ~maskM) | (tmp1 & maskM); in composeTo_opt2() 332 uint32_t tmp2 = JBIG2_GETDWORD(lineDst); in composeTo_opt2() local 336 tmp = (tmp2 & ~maskM) | ((tmp1 | tmp2) & maskM); in composeTo_opt2() 339 tmp = (tmp2 & ~maskM) | ((tmp1 & tmp2) & maskM); in composeTo_opt2() 342 tmp = (tmp2 & ~maskM) | ((tmp1 ^ tmp2) & maskM); in composeTo_opt2() [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
D | vshift.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = shl <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = shl <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = shl <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = shl <1 x i64> %tmp1, %tmp2 43 %tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > 44 ret <8 x i8> %tmp2 [all …]
|
D | vbits.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = and <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = and <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = and <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = and <1 x i64> %tmp1, %tmp2 43 %tmp2 = load <16 x i8>* %B 44 %tmp3 = and <16 x i8> %tmp1, %tmp2 [all …]
|
D | vshl.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 43 %tmp2 = load <8 x i8>* %B 44 %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) [all …]
|
D | vadd.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = add <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = add <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = add <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = add <1 x i64> %tmp1, %tmp2 43 %tmp2 = load <2 x float>* %B 44 %tmp3 = fadd <2 x float> %tmp1, %tmp2 [all …]
|
D | vsub.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = sub <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = sub <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = sub <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = sub <1 x i64> %tmp1, %tmp2 43 %tmp2 = load <2 x float>* %B 44 %tmp3 = fsub <2 x float> %tmp1, %tmp2 [all …]
|
D | vqshl.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 43 %tmp2 = load <8 x i8>* %B 44 %tmp3 = call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) [all …]
|
D | vcvt.ll | 7 %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32> 8 ret <2 x i32> %tmp2 15 %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32> 16 ret <2 x i32> %tmp2 23 %tmp2 = sitofp <2 x i32> %tmp1 to <2 x float> 24 ret <2 x float> %tmp2 31 %tmp2 = uitofp <2 x i32> %tmp1 to <2 x float> 32 ret <2 x float> %tmp2 39 %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32> 40 ret <4 x i32> %tmp2 [all …]
|
D | vneg.ll | 7 %tmp2 = sub <8 x i8> zeroinitializer, %tmp1 8 ret <8 x i8> %tmp2 15 %tmp2 = sub <4 x i16> zeroinitializer, %tmp1 16 ret <4 x i16> %tmp2 23 %tmp2 = sub <2 x i32> zeroinitializer, %tmp1 24 ret <2 x i32> %tmp2 31 %tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1 32 ret <2 x float> %tmp2 39 %tmp2 = sub <16 x i8> zeroinitializer, %tmp1 40 ret <16 x i8> %tmp2 [all …]
|
D | vrev.ll | 7 …%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3… 8 ret <8 x i8> %tmp2 15 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> 16 ret <4 x i16> %tmp2 23 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0> 24 ret <2 x i32> %tmp2 31 %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0> 32 ret <2 x float> %tmp2 39 …%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i3… 40 ret <16 x i8> %tmp2 [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | vshift.ll | 7 %tmp2 = load <8 x i8>, <8 x i8>* %B 8 %tmp3 = shl <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>, <4 x i16>* %B 17 %tmp3 = shl <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>, <2 x i32>* %B 26 %tmp3 = shl <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>, <1 x i64>* %B 35 %tmp3 = shl <1 x i64> %tmp1, %tmp2 43 %tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > 44 ret <8 x i8> %tmp2 [all …]
|
D | vbits.ll | 7 %tmp2 = load <8 x i8>, <8 x i8>* %B 8 %tmp3 = and <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>, <4 x i16>* %B 17 %tmp3 = and <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>, <2 x i32>* %B 26 %tmp3 = and <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>, <1 x i64>* %B 35 %tmp3 = and <1 x i64> %tmp1, %tmp2 43 %tmp2 = load <16 x i8>, <16 x i8>* %B 44 %tmp3 = and <16 x i8> %tmp1, %tmp2 [all …]
|
D | vcnt.ll | 8 %tmp2 = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %tmp1) 9 ret <8 x i8> %tmp2 16 %tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1) 17 ret <16 x i8> %tmp2 27 %tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 0) 28 ret <8 x i8> %tmp2 35 %tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 0) 36 ret <4 x i16> %tmp2 43 %tmp2 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %tmp1, i1 0) 44 ret <2 x i32> %tmp2 [all …]
|
D | vshl.ll | 7 %tmp2 = load <8 x i8>, <8 x i8>* %B 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 16 %tmp2 = load <4 x i16>, <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 25 %tmp2 = load <2 x i32>, <2 x i32>* %B 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 34 %tmp2 = load <1 x i64>, <1 x i64>* %B 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 43 %tmp2 = load <8 x i8>, <8 x i8>* %B 44 %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) [all …]
|
D | vneg.ll | 7 %tmp2 = sub <8 x i8> zeroinitializer, %tmp1 8 ret <8 x i8> %tmp2 15 %tmp2 = sub <4 x i16> zeroinitializer, %tmp1 16 ret <4 x i16> %tmp2 23 %tmp2 = sub <2 x i32> zeroinitializer, %tmp1 24 ret <2 x i32> %tmp2 31 %tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1 32 ret <2 x float> %tmp2 39 %tmp2 = sub <16 x i8> zeroinitializer, %tmp1 40 ret <16 x i8> %tmp2 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/ |
D | vshift.ll | 7 %tmp2 = load <8 x i8>, <8 x i8>* %B 8 %tmp3 = shl <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>, <4 x i16>* %B 17 %tmp3 = shl <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>, <2 x i32>* %B 26 %tmp3 = shl <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>, <1 x i64>* %B 35 %tmp3 = shl <1 x i64> %tmp1, %tmp2 43 %tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > 44 ret <8 x i8> %tmp2 [all …]
|
D | vcnt.ll | 8 %tmp2 = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %tmp1) 9 ret <8 x i8> %tmp2 16 %tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1) 17 ret <16 x i8> %tmp2 27 %tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 0) 28 ret <8 x i8> %tmp2 35 %tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 0) 36 ret <4 x i16> %tmp2 43 %tmp2 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %tmp1, i1 0) 44 ret <2 x i32> %tmp2 [all …]
|
D | vshl.ll | 7 %tmp2 = load <8 x i8>, <8 x i8>* %B 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 16 %tmp2 = load <4 x i16>, <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 25 %tmp2 = load <2 x i32>, <2 x i32>* %B 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 34 %tmp2 = load <1 x i64>, <1 x i64>* %B 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 43 %tmp2 = load <8 x i8>, <8 x i8>* %B 44 %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) [all …]
|
D | vneg.ll | 7 %tmp2 = sub <8 x i8> zeroinitializer, %tmp1 8 ret <8 x i8> %tmp2 15 %tmp2 = sub <4 x i16> zeroinitializer, %tmp1 16 ret <4 x i16> %tmp2 23 %tmp2 = sub <2 x i32> zeroinitializer, %tmp1 24 ret <2 x i32> %tmp2 31 %tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1 32 ret <2 x float> %tmp2 39 %tmp2 = sub <16 x i8> zeroinitializer, %tmp1 40 ret <16 x i8> %tmp2 [all …]
|
/external/webrtc/webrtc/modules/audio_coding/codecs/isac/fix/source/ |
D | lpc_masking_model_mips.c | 36 int32_t tmp2, tmp3; in WebRtcIsacfix_CalculateResidualEnergyMIPS() local 55 : [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp32] "=&r" (tmp32), in WebRtcIsacfix_CalculateResidualEnergyMIPS() 71 : [tmp2] "r" (tmp2), [tmp3] "r" (tmp3) in WebRtcIsacfix_CalculateResidualEnergyMIPS() 75 if (((!(sign_1 || sign_2)) && (0x7FFFFFFF - sum64_hi < tmp2)) || in WebRtcIsacfix_CalculateResidualEnergyMIPS() 76 ((sign_1 && sign_2) && (sum64_hi + tmp2 > 0))) { in WebRtcIsacfix_CalculateResidualEnergyMIPS() 89 : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3), in WebRtcIsacfix_CalculateResidualEnergyMIPS() 103 : [tmp2] "r" (tmp2), [tmp3] "r" (tmp3) in WebRtcIsacfix_CalculateResidualEnergyMIPS() 119 int32_t tmp2, tmp3; in WebRtcIsacfix_CalculateResidualEnergyMIPS() local 141 : [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp32] "=&r" (tmp32), in WebRtcIsacfix_CalculateResidualEnergyMIPS() 156 : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3), [sum64_hi] "+r" (sum64_hi), in WebRtcIsacfix_CalculateResidualEnergyMIPS() [all …]
|
/external/libunwind/src/dwarf/ |
D | Gexpr.c | 193 unw_word_t operand1 = 0, operand2 = 0, tmp1, tmp2, tmp3, end_addr; in dwarf_eval_expr() local 368 if ((ret = dwarf_readw (as, a, &tmp1, &tmp2, arg)) < 0) in dwarf_eval_expr() 370 push (tmp2); in dwarf_eval_expr() 386 tmp2 = u8; in dwarf_eval_expr() 392 tmp2 = u16; in dwarf_eval_expr() 399 tmp2 = u32; in dwarf_eval_expr() 403 tmp2 >>= 8; in dwarf_eval_expr() 405 tmp2 &= 0xffffff; in dwarf_eval_expr() 414 tmp2 = u64; in dwarf_eval_expr() 418 tmp2 >>= 64 - 8 * operand1; in dwarf_eval_expr() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | arm64-neon-scalar-by-elem-mul.ll | 7 %tmp2 = fmul float %a, %tmp1; 8 ret float %tmp2; 15 %tmp2 = fmul float %tmp1, %a; 16 ret float %tmp2; 24 %tmp2 = fmul float %a, %tmp1; 25 ret float %tmp2; 32 %tmp2 = fmul float %tmp1, %a; 33 ret float %tmp2; 41 %tmp2 = fmul double %a, %tmp1; 42 ret double %tmp2; [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-neon-scalar-by-elem-mul.ll | 7 %tmp2 = fmul float %a, %tmp1; 8 ret float %tmp2; 15 %tmp2 = fmul float %tmp1, %a; 16 ret float %tmp2; 24 %tmp2 = fmul float %a, %tmp1; 25 ret float %tmp2; 32 %tmp2 = fmul float %tmp1, %a; 33 ret float %tmp2; 41 %tmp2 = fmul double %a, %tmp1; 42 ret double %tmp2; [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/Reassociate/ |
D | otherops.ll | 7 ; CHECK-NEXT: %tmp2 = mul i32 %arg, 144 8 ; CHECK-NEXT: ret i32 %tmp2 11 %tmp2 = mul i32 %tmp1, 12 12 ret i32 %tmp2 17 ; CHECK-NEXT: %tmp2 = and i32 %arg, 14 18 ; CHECK-NEXT: ret i32 %tmp2 21 %tmp2 = and i32 %tmp1, 14 22 ret i32 %tmp2 27 ; CHECK-NEXT: %tmp2 = or i32 %arg, 14 28 ; CHECK-NEXT: ret i32 %tmp2 [all …]
|
/external/llvm/test/Transforms/Reassociate/ |
D | otherops.ll | 7 ; CHECK-NEXT: %tmp2 = mul i32 %arg, 144 8 ; CHECK-NEXT: ret i32 %tmp2 11 %tmp2 = mul i32 %tmp1, 12 12 ret i32 %tmp2 17 ; CHECK-NEXT: %tmp2 = and i32 %arg, 14 18 ; CHECK-NEXT: ret i32 %tmp2 21 %tmp2 = and i32 %tmp1, 14 22 ret i32 %tmp2 27 ; CHECK-NEXT: %tmp2 = or i32 %arg, 14 28 ; CHECK-NEXT: ret i32 %tmp2 [all …]
|