/external/svox/pico_resources/tools/LingwareBuilding/PicoLingware_tools_windows/tools/ |
D | symshift.pl | 203 $shifted = $id + $plane * 256; 205 if ($shifted{$sym}) { 206 $otherplane = int($shifted{$sym} / 256); 209 $shifted{$sym} = $shifted; 210 $sym{$shifted} = $sym; 211 $intable{$table}{$shifted}++; 227 $shifted = $id + $plane * 256; 228 $shifted{$sym} = $shifted unless $shifted{$sym}; 229 $sym{$shifted} = $sym unless $sym{$shifted}; 230 $intable{$table}{$shifted}++; [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | insert-05.ll | 90 %shifted = lshr i64 %a, 1 91 %and = and i64 %shifted, 18446744069414584320 102 %shifted = lshr i64 %a, 1 103 %and = and i64 %shifted, 18446744069414584320 114 %shifted = lshr i64 %a, 1 115 %and = and i64 %shifted, 9223372032559808512 126 %shifted = lshr i64 %a, 1 127 %and = and i64 %shifted, 9223372032559808512 172 %shifted = shl i64 %a, 1 173 %and = and i64 %shifted, 4294967295 [all …]
|
D | rnsbg-01.ll | 92 ; Test a case with a left shift and OR, where the OR covers all shifted bits. 116 ; shifted bits. We can't use RNSBG for the shift, but we can for the OR 141 ; Test a case with a right shift and OR, where the OR covers all the shifted 165 ; shifted bits. The shift needs to be done separately, but the OR and AND 245 ; Test a case with a shift, OR, and rotate where the OR covers all shifted bits.
|
D | risbg-01.ll | 6 ; Test an extraction of bit 0 from a right-shifted value. 26 ; Test an extraction of other bits from a right-shifted value. 46 ; Test an extraction of most bits from a right-shifted value. 90 ; Test an extraction of bits from a left-shifted value. The range should 257 ; Check that we use RISBG for shifted values even if the AND is a 433 ; Try a similar thing in which no shifted sign bits are kept. 472 ; when testing whether the shifted-in bits of the shift right were significant.
|
/external/llvm/test/CodeGen/AArch64/ |
D | bitfield.ll | 200 %shifted = lshr i32 %fields, 23 201 %masked = and i32 %shifted, 7 209 %shifted = lshr i64 %fields, 25 210 %masked = and i64 %shifted, 1023 219 %shifted = shl i32 %fields, 23 220 %extended = ashr i32 %shifted, 29 229 %shifted = shl i64 %fields, 1 230 %extended = ashr i64 %shifted, 1
|
D | arm64-misched-forwarding-A53.ll | 4 ; For Cortex-A53, shiftable operands that are not actually shifted
|
D | addsub.ll | 55 ; Add 12-bit immediates, shifted left by 12 bits 89 ; Subtract 12-bit immediates, shifted left by 12 bits
|
/external/mesa3d/src/gallium/auxiliary/gallivm/ |
D | lp_bld_format_aos.c | 154 LLVMValueRef shifted, casted, scaled, masked; in lp_build_unpack_arith_rgba_aos() local 229 shifted = LLVMBuildLShr(builder, packed, LLVMConstVector(shifts, 4), ""); in lp_build_unpack_arith_rgba_aos() 230 masked = LLVMBuildAnd(builder, shifted, LLVMConstVector(masks, 4), ""); in lp_build_unpack_arith_rgba_aos() 271 LLVMValueRef shifted, casted, scaled, unswizzled; in lp_build_pack_rgba_aos() local 335 shifted = LLVMBuildShl(builder, casted, LLVMConstVector(shifts, 4), ""); in lp_build_pack_rgba_aos() 340 LLVMValueRef component = LLVMBuildExtractElement(builder, shifted, in lp_build_pack_rgba_aos()
|
D | lp_bld_swizzle.c | 403 LLVMValueRef shifted; in lp_build_swizzle_aos() local 411 shifted = LLVMBuildShl(builder, masked, in lp_build_swizzle_aos() 414 shifted = LLVMBuildLShr(builder, masked, in lp_build_swizzle_aos() 417 shifted = masked; in lp_build_swizzle_aos() 420 res = LLVMBuildOr(builder, res, shifted, ""); in lp_build_swizzle_aos()
|
D | lp_bld_conv.c | 113 …LLVMValueRef shifted = LLVMBuildBitCast(builder, LLVMBuildShl(builder, expmant, i32_13, ""),… in lp_build_half_to_float() local 116 …LLVMValueRef scaled = LLVMBuildBitCast(builder, LLVMBuildFMul(builder, shifted, f32_magic, … in lp_build_half_to_float() 591 LLVMValueRef shifted; in lp_build_conv() local 594 shifted = LLVMBuildAShr(builder, tmp[i], shift, ""); in lp_build_conv() 596 shifted = LLVMBuildLShr(builder, tmp[i], shift, ""); in lp_build_conv() 598 tmp[i] = LLVMBuildSub(builder, tmp[i], shifted, ""); in lp_build_conv()
|
/external/llvm/test/CodeGen/X86/ |
D | trunc-to-bool.ll | 15 %shifted = ashr i32 %val, %mask 16 %anded = and i32 %shifted, 1
|
D | sext-load.ll | 17 ; preserved even when removing shifted-out low bits.
|
/external/icu/icu4c/source/data/coll/ |
D | th.txt | 15 "[alternate shifted]"
|
/external/libavc/common/arm/ |
D | ih264_intra_pred_luma_8x8_a9q.s | 468 @ q1 = q0 shifted to left once 469 @ q2 = q1 shifted to left once 560 @ q1 = q0 shifted to left once 561 @ q2 = q1 shifted to left once 650 @ q1 = q0 shifted to left once 651 @ q2 = q1 shifted to left once 770 @ q1 = q0 shifted to left once 771 @ q2 = q1 shifted to left once
|
/external/skia/src/opts/ |
D | SkBitmapProcState_opts_SSE2.cpp | 104 __m128i shifted = _mm_shuffle_epi32(sum, 0xEE); in S32_opaque_D32_filter_DX_SSE2() local 107 sum = _mm_add_epi16(sum, shifted); in S32_opaque_D32_filter_DX_SSE2() 214 __m128i shifted = _mm_shuffle_epi32(sum, 0xEE); in S32_alpha_D32_filter_DX_SSE2() local 217 sum = _mm_add_epi16(sum, shifted); in S32_alpha_D32_filter_DX_SSE2()
|
/external/autotest/client/deps/glbench/src/ |
D | yuv2rgb_1.glslf | 123 * and shifted down 2/3 to map from the U texels, and scaled by 1/6 124 * and shifted down 5/6 to map from the V texels. To map from U or V 125 * texels the 'x' coordinate is scaled by 1/2 always and shifted right
|
/external/icu/icu4c/source/test/perf/collationperf/ |
D | Makefile.in | 58 … ./$(TARGET) -loop 200 -file $(top_srcdir)/extra/uconv/samples/utf8/utf-8-demo.txt -keygen -shifted
|
/external/proguard/docs/manual/ |
D | style.css | 48 .shifted li
|
/external/icu/icu4j/main/tests/collate/src/com/ibm/icu/dev/data/ |
D | collationtest.txt | 702 % alternate=shifted 707 = y # alternate=shifted removes the tailoring difference on the last CE 1102 % alternate=shifted 1615 # ignorable when following a shifted code point. 1617 % alternate=shifted 1639 # ignorable when following a shifted code point. 2189 [maxVariable space][alternate shifted] 2201 % alternate=shifted 2305 ** test: shifted+reordering, ICU ticket 9507 2308 % alternate=shifted [all …]
|
/external/icu/icu4c/source/test/testdata/ |
D | collationtest.txt | 702 % alternate=shifted 707 = y # alternate=shifted removes the tailoring difference on the last CE 1102 % alternate=shifted 1615 # ignorable when following a shifted code point. 1617 % alternate=shifted 1639 # ignorable when following a shifted code point. 2189 [maxVariable space][alternate shifted] 2201 % alternate=shifted 2305 ** test: shifted+reordering, ICU ticket 9507 2308 % alternate=shifted [all …]
|
/external/icu/android_icu4j/src/main/tests/android/icu/dev/data/ |
D | collationtest.txt | 702 % alternate=shifted 707 = y # alternate=shifted removes the tailoring difference on the last CE 1102 % alternate=shifted 1615 # ignorable when following a shifted code point. 1617 % alternate=shifted 1639 # ignorable when following a shifted code point. 2189 [maxVariable space][alternate shifted] 2201 % alternate=shifted 2305 ** test: shifted+reordering, ICU ticket 9507 2308 % alternate=shifted [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | lshr-phi.ll | 6 ; bits in the operand which might be non-zero will be shifted
|
/external/mesa3d/src/gallium/state_trackers/vega/ |
D | bezier.c | 331 struct bezier *shifted, in shift() argument 384 bezier_init(shifted, l[0], l[1], in shift() 436 bezier_init2v(shifted, in shift() 440 return good_offset(orig, shifted, offset, threshold); in shift()
|
/external/llvm/test/MC/Disassembler/AArch64/ |
D | arm64-basic-a64-undefined.txt | 16 # ADD/SUB (shifted register) are reserved if shift == '11' or sf == '0' and imm6<5> == '1'.
|
/external/clang/lib/CodeGen/ |
D | README.txt | 14 Bitfields accesses can be shifted to simplify masking and sign
|