/external/pdfium/core/fxcodec/jbig2/ |
D | JBig2_Image.cpp | 320 uint32_t tmp2 = JBIG2_GETDWORD(lineDst); in ComposeToInternal() local 324 tmp = (tmp2 & ~maskM) | ((tmp1 | tmp2) & maskM); in ComposeToInternal() 327 tmp = (tmp2 & ~maskM) | ((tmp1 & tmp2) & maskM); in ComposeToInternal() 330 tmp = (tmp2 & ~maskM) | ((tmp1 ^ tmp2) & maskM); in ComposeToInternal() 333 tmp = (tmp2 & ~maskM) | ((~(tmp1 ^ tmp2)) & maskM); in ComposeToInternal() 336 tmp = (tmp2 & ~maskM) | (tmp1 & maskM); in ComposeToInternal() 349 uint32_t tmp2 = JBIG2_GETDWORD(lineDst); in ComposeToInternal() local 353 tmp = (tmp2 & ~maskM) | ((tmp1 | tmp2) & maskM); in ComposeToInternal() 356 tmp = (tmp2 & ~maskM) | ((tmp1 & tmp2) & maskM); in ComposeToInternal() 359 tmp = (tmp2 & ~maskM) | ((tmp1 ^ tmp2) & maskM); in ComposeToInternal() [all …]
|
/external/arm-optimized-routines/string/aarch64/ |
D | strcpy.S | 35 #define tmp2 x9 macro 95 and tmp2, srcin, #(MIN_PAGE_SIZE - 1) 98 cmp tmp2, #(MIN_PAGE_SIZE - 16) 114 rev tmp2, data1 115 sub tmp1, tmp2, zeroones 116 orr tmp2, tmp2, #REP8_7f 117 bics has_nul1, tmp1, tmp2 124 orr tmp2, data1, #REP8_7f 125 bics has_nul1, tmp1, tmp2 139 mov tmp2, #56 [all …]
|
/external/llvm-project/libc/AOR_v20.02/string/aarch64/ |
D | strcpy.S | 36 #define tmp2 x9 macro 94 and tmp2, srcin, #(MIN_PAGE_SIZE - 1) 97 cmp tmp2, #(MIN_PAGE_SIZE - 16) 113 rev tmp2, data1 114 sub tmp1, tmp2, zeroones 115 orr tmp2, tmp2, #REP8_7f 116 bics has_nul1, tmp1, tmp2 123 orr tmp2, data1, #REP8_7f 124 bics has_nul1, tmp1, tmp2 138 mov tmp2, #56 [all …]
|
D | strlen-mte.S | 27 #define tmp2 x5 macro 82 orr tmp2, data1, REP8_7f 85 bics has_nul1, tmp1, tmp2 93 and tmp2, srcin, 7 /* Bytes to ignore. */ 95 neg tmp2, tmp2 98 add tmp3, tmp2, 8 99 csel len, tmp2, tmp3, cc 105 orr tmp2, data2, REP8_7f 106 bics has_nul1, tmp1, tmp2 123 orr tmp2, tmp1, tmp3 [all …]
|
/external/webrtc/modules/audio_coding/codecs/isac/fix/source/ |
D | lpc_masking_model_mips.c | 36 int32_t tmp2, tmp3; in WebRtcIsacfix_CalculateResidualEnergyMIPS() local 55 : [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp32] "=&r" (tmp32), in WebRtcIsacfix_CalculateResidualEnergyMIPS() 71 : [tmp2] "r" (tmp2), [tmp3] "r" (tmp3) in WebRtcIsacfix_CalculateResidualEnergyMIPS() 75 if (((!(sign_1 || sign_2)) && (0x7FFFFFFF - sum64_hi < tmp2)) || in WebRtcIsacfix_CalculateResidualEnergyMIPS() 76 ((sign_1 && sign_2) && (sum64_hi + tmp2 > 0))) { in WebRtcIsacfix_CalculateResidualEnergyMIPS() 89 : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3), in WebRtcIsacfix_CalculateResidualEnergyMIPS() 103 : [tmp2] "r" (tmp2), [tmp3] "r" (tmp3) in WebRtcIsacfix_CalculateResidualEnergyMIPS() 119 int32_t tmp2, tmp3; in WebRtcIsacfix_CalculateResidualEnergyMIPS() local 141 : [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp32] "=&r" (tmp32), in WebRtcIsacfix_CalculateResidualEnergyMIPS() 156 : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3), [sum64_hi] "+r" (sum64_hi), in WebRtcIsacfix_CalculateResidualEnergyMIPS() [all …]
|
/external/libiio/src/ |
D | sort.c | 41 const struct iio_channel *tmp2 = *(struct iio_channel **)p2; in iio_channel_compare() local 44 if (iio_channel_is_scan_element(tmp1) && !iio_channel_is_scan_element(tmp2)) in iio_channel_compare() 46 if (!iio_channel_is_scan_element(tmp1) && iio_channel_is_scan_element(tmp2)) in iio_channel_compare() 49 if (iio_channel_is_scan_element(tmp1) && iio_channel_is_scan_element(tmp2)){ in iio_channel_compare() 50 if (iio_channel_get_index(tmp1) > iio_channel_get_index(tmp2)) in iio_channel_compare() 55 if (strcmp(tmp1->id, tmp2->id) == 0) in iio_channel_compare() 59 return strcmp(tmp1->id, tmp2->id); in iio_channel_compare() 65 const struct iio_channel_attr *tmp2 = (struct iio_channel_attr *)p2; in iio_channel_attr_compare() local 67 return strcmp(tmp1->name, tmp2->name); in iio_channel_attr_compare() 73 const struct iio_device *tmp2 = *(struct iio_device **)p2; in iio_device_compare() local [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | vshift.ll | 7 %tmp2 = load <8 x i8>, <8 x i8>* %B 8 %tmp3 = shl <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>, <4 x i16>* %B 17 %tmp3 = shl <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>, <2 x i32>* %B 26 %tmp3 = shl <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>, <1 x i64>* %B 35 %tmp3 = shl <1 x i64> %tmp1, %tmp2 43 %tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > 44 ret <8 x i8> %tmp2 [all …]
|
D | vbits.ll | 7 %tmp2 = load <8 x i8>, <8 x i8>* %B 8 %tmp3 = and <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>, <4 x i16>* %B 17 %tmp3 = and <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>, <2 x i32>* %B 26 %tmp3 = and <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>, <1 x i64>* %B 35 %tmp3 = and <1 x i64> %tmp1, %tmp2 43 %tmp2 = load <16 x i8>, <16 x i8>* %B 44 %tmp3 = and <16 x i8> %tmp1, %tmp2 [all …]
|
D | vcnt.ll | 8 %tmp2 = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %tmp1) 9 ret <8 x i8> %tmp2 16 %tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1) 17 ret <16 x i8> %tmp2 27 %tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 0) 28 ret <8 x i8> %tmp2 35 %tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 0) 36 ret <4 x i16> %tmp2 43 %tmp2 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %tmp1, i1 0) 44 ret <2 x i32> %tmp2 [all …]
|
D | vneg.ll | 7 %tmp2 = sub <8 x i8> zeroinitializer, %tmp1 8 ret <8 x i8> %tmp2 15 %tmp2 = sub <4 x i16> zeroinitializer, %tmp1 16 ret <4 x i16> %tmp2 23 %tmp2 = sub <2 x i32> zeroinitializer, %tmp1 24 ret <2 x i32> %tmp2 31 %tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1 32 ret <2 x float> %tmp2 39 %tmp2 = sub <16 x i8> zeroinitializer, %tmp1 40 ret <16 x i8> %tmp2 [all …]
|
D | vshl.ll | 7 %tmp2 = load <8 x i8>, <8 x i8>* %B 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 16 %tmp2 = load <4 x i16>, <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 25 %tmp2 = load <2 x i32>, <2 x i32>* %B 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 34 %tmp2 = load <1 x i64>, <1 x i64>* %B 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 43 %tmp2 = load <8 x i8>, <8 x i8>* %B 44 %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) [all …]
|
D | vqshl.ll | 7 %tmp2 = load <8 x i8>, <8 x i8>* %B 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 16 %tmp2 = load <4 x i16>, <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 25 %tmp2 = load <2 x i32>, <2 x i32>* %B 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 34 %tmp2 = load <1 x i64>, <1 x i64>* %B 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 43 %tmp2 = load <8 x i8>, <8 x i8>* %B 44 %tmp3 = call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) [all …]
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | vshift.ll | 7 %tmp2 = load <8 x i8>, <8 x i8>* %B 8 %tmp3 = shl <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>, <4 x i16>* %B 17 %tmp3 = shl <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>, <2 x i32>* %B 26 %tmp3 = shl <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>, <1 x i64>* %B 35 %tmp3 = shl <1 x i64> %tmp1, %tmp2 43 %tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > 44 ret <8 x i8> %tmp2 [all …]
|
D | vcnt.ll | 8 %tmp2 = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %tmp1) 9 ret <8 x i8> %tmp2 16 %tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1) 17 ret <16 x i8> %tmp2 27 %tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 0) 28 ret <8 x i8> %tmp2 35 %tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 0) 36 ret <4 x i16> %tmp2 43 %tmp2 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %tmp1, i1 0) 44 ret <2 x i32> %tmp2 [all …]
|
D | vneg.ll | 7 %tmp2 = sub <8 x i8> zeroinitializer, %tmp1 8 ret <8 x i8> %tmp2 15 %tmp2 = sub <4 x i16> zeroinitializer, %tmp1 16 ret <4 x i16> %tmp2 23 %tmp2 = sub <2 x i32> zeroinitializer, %tmp1 24 ret <2 x i32> %tmp2 31 %tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1 32 ret <2 x float> %tmp2 39 %tmp2 = sub <16 x i8> zeroinitializer, %tmp1 40 ret <16 x i8> %tmp2 [all …]
|
D | vshl.ll | 7 %tmp2 = load <8 x i8>, <8 x i8>* %B 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 16 %tmp2 = load <4 x i16>, <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 25 %tmp2 = load <2 x i32>, <2 x i32>* %B 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 34 %tmp2 = load <1 x i64>, <1 x i64>* %B 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 43 %tmp2 = load <8 x i8>, <8 x i8>* %B 44 %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) [all …]
|
D | vqshl.ll | 7 %tmp2 = load <8 x i8>, <8 x i8>* %B 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 16 %tmp2 = load <4 x i16>, <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 25 %tmp2 = load <2 x i32>, <2 x i32>* %B 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 34 %tmp2 = load <1 x i64>, <1 x i64>* %B 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 43 %tmp2 = load <8 x i8>, <8 x i8>* %B 44 %tmp3 = call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) [all …]
|
/external/llvm/test/Transforms/Reassociate/ |
D | otherops.ll | 7 ; CHECK-NEXT: %tmp2 = mul i32 %arg, 144 8 ; CHECK-NEXT: ret i32 %tmp2 11 %tmp2 = mul i32 %tmp1, 12 12 ret i32 %tmp2 17 ; CHECK-NEXT: %tmp2 = and i32 %arg, 14 18 ; CHECK-NEXT: ret i32 %tmp2 21 %tmp2 = and i32 %tmp1, 14 22 ret i32 %tmp2 27 ; CHECK-NEXT: %tmp2 = or i32 %arg, 14 28 ; CHECK-NEXT: ret i32 %tmp2 [all …]
|
/external/llvm-project/llvm/test/Transforms/Reassociate/ |
D | otherops.ll | 7 ; CHECK-NEXT: %tmp2 = mul i32 %arg, 144 8 ; CHECK-NEXT: ret i32 %tmp2 11 %tmp2 = mul i32 %tmp1, 12 12 ret i32 %tmp2 17 ; CHECK-NEXT: %tmp2 = and i32 %arg, 14 18 ; CHECK-NEXT: ret i32 %tmp2 21 %tmp2 = and i32 %tmp1, 14 22 ret i32 %tmp2 27 ; CHECK-NEXT: %tmp2 = or i32 %arg, 14 28 ; CHECK-NEXT: ret i32 %tmp2 [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-neon-scalar-by-elem-mul.ll | 7 %tmp2 = fmul float %a, %tmp1; 8 ret float %tmp2; 15 %tmp2 = fmul float %tmp1, %a; 16 ret float %tmp2; 24 %tmp2 = fmul float %a, %tmp1; 25 ret float %tmp2; 32 %tmp2 = fmul float %tmp1, %a; 33 ret float %tmp2; 41 %tmp2 = fmul double %a, %tmp1; 42 ret double %tmp2; [all …]
|
D | neon-mla-mls.ll | 7 %tmp2 = add <8 x i8> %C, %tmp1; 8 ret <8 x i8> %tmp2 14 %tmp2 = add <16 x i8> %C, %tmp1; 15 ret <16 x i8> %tmp2 21 %tmp2 = add <4 x i16> %C, %tmp1; 22 ret <4 x i16> %tmp2 28 %tmp2 = add <8 x i16> %C, %tmp1; 29 ret <8 x i16> %tmp2 35 %tmp2 = add <2 x i32> %C, %tmp1; 36 ret <2 x i32> %tmp2 [all …]
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | arm64-neon-scalar-by-elem-mul.ll | 7 %tmp2 = fmul float %a, %tmp1; 8 ret float %tmp2; 15 %tmp2 = fmul float %tmp1, %a; 16 ret float %tmp2; 24 %tmp2 = fmul float %a, %tmp1; 25 ret float %tmp2; 32 %tmp2 = fmul float %tmp1, %a; 33 ret float %tmp2; 41 %tmp2 = fmul double %a, %tmp1; 42 ret double %tmp2; [all …]
|
/external/llvm-project/libc/AOR_v20.02/string/arm/ |
D | memcpy.S | 66 #define tmp2 r10 macro 213 str tmp2, [sp, #-FRAME_SIZE]! 214 and tmp2, src, #7 216 cmp tmp1, tmp2 229 lsls tmp2, dst, #29 231 rsbs tmp2, tmp2, #0 232 sub count, count, tmp2, lsr #29 235 lsls tmp2, tmp2, #2 237 ldrbne tmp2, [src], #1 239 strbne tmp2, [dst], #1 [all …]
|
/external/arm-optimized-routines/string/arm/ |
D | memcpy.S | 65 #define tmp2 r10 macro 212 str tmp2, [sp, #-FRAME_SIZE]! 213 and tmp2, src, #7 215 cmp tmp1, tmp2 228 lsls tmp2, dst, #29 230 rsbs tmp2, tmp2, #0 231 sub count, count, tmp2, lsr #29 234 lsls tmp2, tmp2, #2 236 ldrbne tmp2, [src], #1 238 strbne tmp2, [dst], #1 [all …]
|
/external/webrtc/common_audio/signal_processing/ |
D | resample_by_2_mips.c | 153 int32_t tmp1, tmp2, diff; in WebRtcSpl_DownsampleBy2() local 163 tmp2 = MUL_ACCUM_2(kResampleAllpass2[1], diff, state1); in WebRtcSpl_DownsampleBy2() 165 diff = tmp2 - state3; in WebRtcSpl_DownsampleBy2() 167 state2 = tmp2; in WebRtcSpl_DownsampleBy2() 175 tmp2 = MUL_ACCUM_1(kResampleAllpass1[1], diff, state5); in WebRtcSpl_DownsampleBy2() 177 diff = tmp2 - state7; in WebRtcSpl_DownsampleBy2() 179 state6 = tmp2; in WebRtcSpl_DownsampleBy2() 192 tmp2 = MUL_ACCUM_2(kResampleAllpass2[1], diff, state1); in WebRtcSpl_DownsampleBy2() 194 diff = tmp2 - state3; in WebRtcSpl_DownsampleBy2() 196 state2 = tmp2; in WebRtcSpl_DownsampleBy2() [all …]
|