/external/okio/okio/src/hashFunctions/kotlin/okio/internal/ |
D | Sha512.kt | 183 (a shr 56).toByte(), in digest() 184 (a shr 48).toByte(), in digest() 185 (a shr 40).toByte(), in digest() 186 (a shr 32).toByte(), in digest() 187 (a shr 24).toByte(), in digest() 188 (a shr 16).toByte(), in digest() 189 (a shr 8).toByte(), in digest() 191 (b shr 56).toByte(), in digest() 192 (b shr 48).toByte(), in digest() 193 (b shr 40).toByte(), in digest() [all …]
|
D | Sha256.kt | 188 (a shr 24).toByte(), in digest() 189 (a shr 16).toByte(), in digest() 190 (a shr 8).toByte(), in digest() 192 (b shr 24).toByte(), in digest() 193 (b shr 16).toByte(), in digest() 194 (b shr 8).toByte(), in digest() 196 (c shr 24).toByte(), in digest() 197 (c shr 16).toByte(), in digest() 198 (c shr 8).toByte(), in digest() 200 (d shr 24).toByte(), in digest() [all …]
|
D | Sha1.kt | 168 (a shr 24).toByte(), in digest() 169 (a shr 16).toByte(), in digest() 170 (a shr 8).toByte(), in digest() 172 (b shr 24).toByte(), in digest() 173 (b shr 16).toByte(), in digest() 174 (b shr 8).toByte(), in digest() 176 (c shr 24).toByte(), in digest() 177 (c shr 16).toByte(), in digest() 178 (c shr 8).toByte(), in digest() 180 (d shr 24).toByte(), in digest() [all …]
|
/external/llvm/test/Transforms/InstSimplify/ |
D | shr-nop.ll | 20 %shr = lshr exact i8 0, %a 21 %cmp = icmp eq i8 %shr, 0 29 %shr = ashr exact i8 0, %a 30 %cmp = icmp eq i8 %shr, 0 38 %shr = ashr i8 0, %a 39 %cmp = icmp eq i8 %shr, 0 47 %shr = lshr exact i8 0, %a 48 %cmp = icmp ne i8 %shr, 0 56 %shr = ashr exact i8 0, %a 57 %cmp = icmp ne i8 %shr, 0 [all …]
|
/external/llvm-project/llvm/test/Transforms/InstSimplify/ |
D | shr-nop.ll | 20 %shr = lshr exact i8 0, %a 21 %cmp = icmp eq i8 %shr, 0 29 %shr = ashr exact i8 0, %a 30 %cmp = icmp eq i8 %shr, 0 38 %shr = ashr i8 0, %a 39 %cmp = icmp eq i8 %shr, 0 47 %shr = lshr exact i8 0, %a 48 %cmp = icmp ne i8 %shr, 0 56 %shr = ashr exact i8 0, %a 57 %cmp = icmp ne i8 %shr, 0 [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | icmp-shr.ll | 8 %shr = lshr i8 127, %a 9 %cmp = icmp eq i8 %shr, 0 16 %shr = ashr i8 127, %a 17 %cmp = icmp eq i8 %shr, 0 24 %shr = lshr i8 127, %a 25 %cmp = icmp ne i8 %shr, 0 32 %shr = ashr i8 127, %a 33 %cmp = icmp ne i8 %shr, 0 40 %shr = ashr i8 128, %a 41 %cmp = icmp eq i8 %shr, 128 [all …]
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | icmp-shr.ll | 11 %shr = lshr i8 127, %a 12 %cmp = icmp eq i8 %shr, 0 21 %shr = lshr <2 x i8> <i8 127, i8 127>, %a 22 %cmp = icmp eq <2 x i8> %shr, zeroinitializer 31 %shr = ashr i8 127, %a 32 %cmp = icmp eq i8 %shr, 0 41 %shr = lshr i8 127, %a 42 %cmp = icmp ne i8 %shr, 0 51 %shr = ashr i8 127, %a 52 %cmp = icmp ne i8 %shr, 0 [all …]
|
D | sub-ashr-and-to-icmp-select.ll | 20 %shr = ashr i8 %sub, 7 21 %and = and i8 %shr, %x 33 %shr = ashr i16 %sub, 15 34 %and = and i16 %shr, %x 45 %shr = ashr i32 %sub, 31 46 %and = and i32 %shr, %x 57 %shr = ashr i64 %sub, 63 58 %and = and i64 %shr, %x 71 %shr = ashr i32 %sub, 31 72 %and = and i32 %shr, %x [all …]
|
D | sub-ashr-or-to-icmp-select.ll | 21 %shr = ashr i32 %sub, 31 22 %or = or i32 %shr, %x 34 %shr = ashr i8 %sub, 7 35 %or = or i8 %shr, %x 46 %shr = ashr i16 %sub, 15 47 %or = or i16 %shr, %x 58 %shr = ashr i32 %sub, 31 59 %or = or i32 %shr, %x 70 %shr = ashr i64 %sub, 63 71 %or = or i64 %shr, %x [all …]
|
/external/okio/okio/src/commonMain/kotlin/okio/ |
D | -Util.kt | 68 internal inline infix fun Byte.shr(other: Int): Int = toInt() shr other in shr() method 106 result[0] = HEX_DIGIT_CHARS[this shr 4 and 0xf] in toHexString() 115 result[0] = HEX_DIGIT_CHARS[this shr 28 and 0xf] in toHexString() 116 result[1] = HEX_DIGIT_CHARS[this shr 24 and 0xf] in toHexString() 117 result[2] = HEX_DIGIT_CHARS[this shr 20 and 0xf] in toHexString() 118 result[3] = HEX_DIGIT_CHARS[this shr 16 and 0xf] in toHexString() 119 result[4] = HEX_DIGIT_CHARS[this shr 12 and 0xf] in toHexString() 120 result[5] = HEX_DIGIT_CHARS[this shr 8 and 0xf] // ktlint-disable no-multi-spaces in toHexString() 121 result[6] = HEX_DIGIT_CHARS[this shr 4 and 0xf] // ktlint-disable no-multi-spaces in toHexString() 138 result[ 0] = HEX_DIGIT_CHARS[(this shr 60 and 0xf).toInt()] // ktlint-disable no-multi-spaces in toHexString() [all …]
|
D | -Base64.kt | 83 out[outCount++] = (word shr 16).toByte() in decodeBase64ToArray() 84 out[outCount++] = (word shr 8).toByte() in decodeBase64ToArray() 98 out[outCount++] = (word shr 16).toByte() in decodeBase64ToArray() 103 out[outCount++] = (word shr 16).toByte() in decodeBase64ToArray() 104 out[outCount++] = (word shr 8).toByte() in decodeBase64ToArray() 125 out[index++] = map[(b0 and 0xff shr 2)] in encodeBase64() 126 out[index++] = map[(b0 and 0x03 shl 4) or (b1 and 0xff shr 4)] in encodeBase64() 127 out[index++] = map[(b1 and 0x0f shl 2) or (b2 and 0xff shr 6)] in encodeBase64() 133 out[index++] = map[b0 and 0xff shr 2] in encodeBase64() 141 out[index++] = map[(b0 and 0xff shr 2)] in encodeBase64() [all …]
|
/external/llvm-project/llvm/test/CodeGen/Mips/ |
D | mips64shift.ll | 13 %shr = ashr i64 %a0, %a1 14 ret i64 %shr 20 %shr = lshr i64 %a0, %a1 21 ret i64 %shr 34 %shr = ashr i64 %a0, 10 35 ret i64 %shr 41 %shr = lshr i64 %a0, 10 42 ret i64 %shr 55 %shr = ashr i64 %a0, 40 56 ret i64 %shr [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | dagcombine-shifts.ll | 14 %shr = lshr i8 %v, 4 15 %ext = zext i8 %shr to i16 22 ; CHECK-NOT: shr 28 %shr = lshr i8 %v, 4 29 %ext = zext i8 %shr to i32 36 ; CHECK-NOT: shr 42 %shr = lshr i16 %v, 4 43 %ext = zext i16 %shr to i32 50 ; CHECK-NOT: shr 56 %shr = lshr i8 %v, 4 [all …]
|
D | shift-combine.ll | 25 %shr = ashr exact i32 %sub, 3 26 %gep = getelementptr inbounds i32, i32* %x, i32 %shr 35 %shr = ashr exact i32 %sub, 3 36 %gep = getelementptr inbounds i32, i32* %x, i32 %shr 45 %shr = ashr exact i32 %sub, 2 46 %gep = getelementptr inbounds i32, i32* %x, i32 %shr 55 %shr = lshr exact i32 %sub, 3 56 %gep = getelementptr inbounds i32, i32* %x, i32 %shr 65 %shr = lshr exact i32 %sub, 3 66 %gep = getelementptr inbounds i32, i32* %x, i32 %shr [all …]
|
/external/llvm/test/CodeGen/Mips/ |
D | mips64shift.ll | 14 %shr = ashr i64 %a0, %a1 15 ret i64 %shr 21 %shr = lshr i64 %a0, %a1 22 ret i64 %shr 35 %shr = ashr i64 %a0, 10 36 ret i64 %shr 42 %shr = lshr i64 %a0, 10 43 ret i64 %shr 56 %shr = ashr i64 %a0, 40 57 ret i64 %shr [all …]
|
/external/mesa3d/src/intel/tools/tests/gen7.5/ |
D | shr.asm | 1 shr(1) g11<1>UD g11<0,1,0>UD 0x00000010UD { align1 1N }; 2 shr(8) g20<1>UD g19<8,8,1>UD 0x00000001UD { align1 1Q }; 3 shr(16) g88<1>UD g86<8,8,1>UD 0x00000001UD { align1 1H }; 4 shr(8) g10<1>.xyzUD g1<0>.xyzzUD g1.4<0>.xyzzUD { align16 1Q }; 5 shr(8) g3<1>UD g2<0,1,0>UD g2.2<0,1,0>UD { align1 1Q }; 6 shr(16) g3<1>UD g2<0,1,0>UD g2.2<0,1,0>UD { align1 1H }; 7 shr(8) g4<1>.yUD g1<0>.xUD 0x00000010UD { align16 NoDDChk 1Q }; 8 shr(1) g29<1>UD g29<0,1,0>UD 5D { align1 WE_all 1N }; 9 shr(8) g8<1>.xUD g7<4>.xUD 0x00000001UD { align16 1Q }; 10 shr(8) g19<2>UW g5<8,8,1>UD g4<8,8,1>UW { align1 1Q }; [all …]
|
/external/mesa3d/src/intel/tools/tests/gen6/ |
D | shr.asm | 1 shr(8) m18<1>D g25<4>.xUD 4D { align16 1Q }; 2 shr(8) g13<1>UD g12<8,8,1>UD 0x00000001UD { align1 1Q }; 3 shr(16) g19<1>UD g17<8,8,1>UD 0x00000001UD { align1 1H }; 4 shr(1) g22<1>UD g22<0,1,0>UD 5D { align1 WE_all 1N }; 5 shr(8) g34<1>UD g3<0>UD g1<0>.yUD { align16 1Q }; 6 shr(8) g3<1>.xUD g3<4>.xUD 0x00000001UD { align16 1Q }; 7 shr(8) g28<1>UD g3.5<0,1,0>UD g4.1<0,1,0>UD { align1 1Q }; 8 shr(16) g48<1>UD g3.5<0,1,0>UD g4.1<0,1,0>UD { align1 1H }; 9 shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N };
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | acle-intrinsics-rot.ll | 11 %shr.i = lshr i32 %a, 8 13 %or.i = or i32 %shl.i, %shr.i 22 %shr.i = lshr i32 %a, 16 24 %or.i = or i32 %shl.i, %shr.i 33 %shr.i = lshr i32 %a, 24 35 %or.i = or i32 %shl.i, %shr.i 44 %shr.i = lshr i32 %a, 8 46 %or.i = or i32 %shl.i, %shr.i 55 %shr.i = lshr i32 %a, 16 57 %or.i = or i32 %shl.i, %shr.i [all …]
|
/external/mesa3d/src/intel/tools/tests/gen9/ |
D | shr.asm | 1 shr(8) g20<1>UD g19<8,8,1>UD 0x00000001UD { align1 1Q }; 2 shr(16) g43<1>UD g41<8,8,1>UD 0x00000001UD { align1 1H }; 3 shr.z.f0.0(8) g3<1>UD g1<8,8,1>UD 0x0000001bUD { align1 1Q }; 4 shr(16) g8<1>UW g1<1,8,0>UB 0x44440000V { align1 1H }; 5 shr.z.f0.0(8) null<1>UD g1<8,8,1>UD 0x0000001bUD { align1 1Q }; 6 shr(8) g3<1>UW g1.28<1,8,0>UB 0x76543210V { align1 1Q }; 7 shr(8) g3<2>UW g5<8,8,1>UD g4<8,8,1>UW { align1 1Q }; 8 shr(16) g20<2>UW g15<8,8,1>UD g13<8,8,1>UW { align1 1H };
|
/external/mesa3d/src/intel/tools/tests/gen7/ |
D | shr.asm | 1 shr(1) g11<1>UD g11<0,1,0>UD 0x0000000fUD { align1 1N }; 2 shr(8) g13<1>.xUD g5.4<0>.zUD g5.4<0>.wUD { align16 1Q }; 3 shr(8) g13<1>UD g12<8,8,1>UD 0x00000001UD { align1 1Q }; 4 shr(16) g27<1>UD g25<8,8,1>UD 0x00000001UD { align1 1H }; 5 shr(8) g35<1>UD g31<8,8,1>UD g5.5<0,1,0>UD { align1 1Q }; 6 shr(16) g23<1>UD g56<8,8,1>UD g7.5<0,1,0>UD { align1 1H }; 7 shr(1) g9<1>UD g9<0,1,0>UD 5D { align1 WE_all 1N }; 8 shr(8) g54<1>.xUD g55<4>.xUD 0x00000005UD { align16 1Q };
|
/external/mesa3d/src/intel/tools/tests/gen8/ |
D | shr.asm | 1 shr(8) g20<1>UD g19<8,8,1>UD 0x00000001UD { align1 1Q }; 2 shr(16) g51<1>UD g49<8,8,1>UD 0x00000001UD { align1 1H }; 3 shr(16) g4<1>UW g1<1,8,0>UB 0x44440000V { align1 1H }; 4 shr.z.f0.0(8) g3<1>UD g1<8,8,1>UD 0x0000001bUD { align1 1Q }; 5 shr.z.f0.0(8) null<1>UD g1<8,8,1>UD 0x0000001bUD { align1 1Q }; 6 shr(8) g3<1>UW g1.28<1,8,0>UB 0x76543210V { align1 1Q }; 7 shr(8) g3<2>UW g5<8,8,1>UD g4<8,8,1>UW { align1 1Q }; 8 shr(16) g20<2>UW g15<8,8,1>UD g13<8,8,1>UW { align1 1H };
|
/external/llvm-project/llvm/test/CodeGen/Thumb/ |
D | shift-and.ll | 12 %shr = and i32 %0, 1023 13 ret i32 %shr 29 %shr = and i32 %0, 1022 30 ret i32 %shr 41 %shr = and i32 %0, 255 42 ret i32 %shr 53 %shr = and i32 %0, -128 54 ret i32 %shr 65 %shr = and i32 %0, 536870912 66 ret i32 %shr [all …]
|
/external/llvm-project/llvm/test/Transforms/IndVarSimplify/ |
D | iv-fold.ll | 5 ; Indvars should be able to fold IV increments into shr when low bits are zero. 8 ; CHECK: shr.1 = lshr i32 %0, 5 15 %shr = lshr i32 %0, 5 16 %arrayidx = getelementptr inbounds i32, i32* %bitmap, i32 %shr 19 %shr.1 = lshr i32 %inc.1, 5 20 %arrayidx.1 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.1 31 ; Invdars should not fold an increment into shr unless 2^shiftBits is 35 ; CHECK: shr.1 = lshr i32 %inc.1, 5 42 %shr = lshr i32 %0, 5 43 %arrayidx = getelementptr inbounds i32, i32* %bitmap, i32 %shr [all …]
|
/external/llvm/test/Transforms/IndVarSimplify/ |
D | iv-fold.ll | 5 ; Indvars should be able to fold IV increments into shr when low bits are zero. 8 ; CHECK: shr.1 = lshr i32 %0, 5 15 %shr = lshr i32 %0, 5 16 %arrayidx = getelementptr inbounds i32, i32* %bitmap, i32 %shr 19 %shr.1 = lshr i32 %inc.1, 5 20 %arrayidx.1 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.1 31 ; Invdars should not fold an increment into shr unless 2^shiftBits is 35 ; CHECK: shr.1 = lshr i32 %inc.1, 5 42 %shr = lshr i32 %0, 5 43 %arrayidx = getelementptr inbounds i32, i32* %bitmap, i32 %shr [all …]
|
/external/llvm-project/llvm/test/CodeGen/PowerPC/ |
D | combine-to-mulh-shift-amount.ll | 22 %shr = lshr i64 %mul, 33 23 %tr = trunc i64 %shr to i32 35 %shr = lshr i64 %mul, 33 36 %tr = trunc i64 %shr to i32 48 %shr = lshr i128 %mul, 63 49 %tr = trunc i128 %shr to i64 61 %shr = lshr i128 %mul, 63 62 %tr = trunc i128 %shr to i64 74 %shr = lshr i64 %mul, 33 75 %tr = trunc i64 %shr to i32 [all …]
|