/third_party/flatbuffers/kotlin/flatbuffers-kotlin/src/commonMain/kotlin/com/google/flatbuffers/kotlin/ |
D | Utf8.kt | 43 utf8Length += 0x7f - c.toInt() ushr 31 // branch free! in computeEncodedLength() 64 utf8Length += 0x7f - c.toInt() ushr 31 // branch free! in encodedLengthGeneral() 192 Char.MIN_HIGH_SURROGATE - (MIN_SUPPLEMENTARY_CODE_POINT ushr 10) + in isNotTrailingByte() 193 (codePoint ushr 10) in isNotTrailingByte() 218 out[0] = (0xC0 or (c.toInt() ushr 6)).toByte() in isNotTrailingByte() 224 out[0] = (0xE0 or (c.toInt() ushr 12)).toByte() in isNotTrailingByte() 225 out[1] = (0x80 or (0x3F and (c.toInt() ushr 6))).toByte() in isNotTrailingByte() 237 out[0] = (0xF shl 4 or (codePoint ushr 18)).toByte() in isNotTrailingByte() 238 out[1] = (0x80 or (0x3F and (codePoint ushr 12))).toByte() in isNotTrailingByte() 239 out[2] = (0x80 or (0x3F and (codePoint ushr 6))).toByte() in isNotTrailingByte() [all …]
|
D | FlexBuffers.kt | 834 val mid = low + high ushr 1 in toString()
|
/third_party/openssl/crypto/chacha/asm/arm64/ |
D | chacha-armv8.S | 417 ushr v1.4s,v20.4s,#20 419 ushr v5.4s,v21.4s,#20 421 ushr v17.4s,v22.4s,#20 441 ushr v3.4s,v20.4s,#24 443 ushr v7.4s,v21.4s,#24 445 ushr v19.4s,v22.4s,#24 465 ushr v1.4s,v20.4s,#25 467 ushr v5.4s,v21.4s,#25 469 ushr v17.4s,v22.4s,#25 519 ushr v1.4s,v20.4s,#20 [all …]
|
/third_party/openssl/crypto/poly1305/asm/arm64/ |
D | poly1305-armv8.S | 467 ushr v31.2d,v31.2d,#38 626 ushr v29.2d,v22.2d,#26 628 ushr v30.2d,v19.2d,#26 634 ushr v29.2d,v23.2d,#26 636 ushr v30.2d,v20.2d,#26 652 ushr v30.2s,v27.2s,#26 767 ushr v29.2d,v22.2d,#26 769 ushr v30.2d,v19.2d,#26 775 ushr v29.2d,v23.2d,#26 777 ushr v30.2d,v20.2d,#26 [all …]
|
/third_party/openssl/crypto/sha/asm/arm64/ |
D | sha256-armv8.S | 1276 ushr v6.4s,v4.4s,#7 1278 ushr v5.4s,v4.4s,#3 1285 ushr v7.4s,v4.4s,#18 1294 ushr v16.4s,v19.4s,#17 1304 ushr v17.4s,v19.4s,#10 1307 ushr v7.4s,v19.4s,#19 1329 ushr v18.4s,v0.4s,#17 1331 ushr v19.4s,v0.4s,#10 1336 ushr v17.4s,v0.4s,#19 1385 ushr v6.4s,v4.4s,#7 [all …]
|
/third_party/openssl/crypto/aes/asm/arm64/ |
D | vpaes-armv8.S | 134 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 167 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i 221 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 223 ushr v8.16b, v15.16b, #4 273 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i 275 ushr v8.16b, v8.16b, #4 351 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 401 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i 462 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 464 ushr v8.16b, v15.16b, #4 [all …]
|
/third_party/ltp/tools/sparse/sparse-src/validation/backend/ |
D | bitwise-ops.c | 16 static unsigned int ushr(unsigned int x, unsigned int y) in ushr() function
|
/third_party/mesa3d/src/freedreno/.gitlab-ci/reference/ |
D | afuc_test.asm | 19 ushr $04, $04, 0x0006 89 ushr $02, $02, $regdata
|
/third_party/ffmpeg/libavcodec/aarch64/ |
D | hevcdsp_sao_neon.S | 63 ushr v2.8h, v0.8h, #3 // BIT_DEPTH - 3
|
D | vp9lpf_16bpp_neon.S | 61 ushr v5.8h, v5.8h, #1 164 ushr \tmp2\().8h, \tmp6\().8h, #1 // (1 << (BIT_DEPTH - 1)) - 1
|
D | vp8dsp_neon.S | 291 ushr v18.16b, v23.16b, #1 // abs(P1-Q1) / 2 317 ushr v18.16b, v23.16b, #1 // abs(P1-Q1) / 2
|
/third_party/openh264/codec/processing/src/arm64/ |
D | down_sample_aarch64_neon.S | 299 ushr v6.4h, v6.4h, #1 312 ushr v7.4h, v7.4h, #1
|
/third_party/icu/icu4c/source/test/intltest/ |
D | itrbnf.cpp | 726 LLAssert(llong(0x7fffa0a0, 0xbcbcdfdf).ushr(16) == llong(0x7fff,0xa0a0bcbc)); 727 LLAssert(llong(0x8000789a, 0xbcde0000).ushr(16) == llong(0x00008000,0x789abcde)); 728 LLAssert(llong(0x80000000, 0).ushr(63) == llong(0, 1)); 729 LLAssert(llong(0x80000000, 0).ushr(47) == llong(0, 0x10000)); 730 …LLAssert(llong(0x80000000, 0x80000000).ushr(64) == llong(0x80000000, 0x80000000)); // only lower 6… 731 LLAssert(llong(0x80000000, 0).ushr(-1) == llong(0, 1)); // only lower 6 bits are used
|
/third_party/mesa3d/docs/relnotes/ |
D | 10.5.4.rst | 77 - nir: Fix typo in "ushr by 0" algebraic replacement
|
D | 20.0.7.rst | 102 - nir/algebraic: Optimize ushr of pack_half, not ishr
|
D | 18.1.8.rst | 124 - nir/algebraic: Be more careful converting ushr to extract_u8/16
|
/third_party/openssl/crypto/chacha/asm/ |
D | chacha-armv8.pl | 360 "&ushr ('$b','$t',20)", 365 "&ushr ('$d','$t',24)", 370 "&ushr ('$b','$t',25)",
|
/third_party/mesa3d/src/gallium/drivers/etnaviv/ |
D | etnaviv_compiler_nir_emit.c | 122 UOP(ushr, RSHIFT, 0_X_1),
|
/third_party/openssl/crypto/modes/asm/arm64/ |
D | ghashv8-armx.S | 14 ushr v18.2d,v19.2d,#63 17 ushr v18.2d,v3.2d,#63
|
/third_party/mesa3d/src/compiler/nir/ |
D | nir_opt_algebraic.py | 381 ushr = "ushr@{}".format(s) variable 387 ((ushr, (ushr, a, '#b'), '#c'), ('bcsel', in_bounds, (ushr, a, ('iadd', b, c)), 0)),
|
D | nir_lower_int64.c | 718 COND_LOWER_CAST(b, u2u32, COND_LOWER_OP(b, ushr, x, discard)); in lower_2f()
|
/third_party/mesa3d/src/freedreno/afuc/ |
D | README.rst | 76 - ``ushr`` - unsigned shift-right
|
/third_party/skia/third_party/externals/swiftshader/src/Shader/ |
D | ShaderCore.hpp | 264 void ushr(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
|
/third_party/mesa3d/src/intel/common/tests/ |
D | mi_builder_test.cpp | 733 TEST_F(mi_builder_test, ushr) in TEST_F() argument
|
/third_party/openh264/codec/encoder/core/arm64/ |
D | reconstruct_aarch64_neon.S | 41 ushr \arg0\().16b, \arg0\().16b, 7
|