/external/webrtc/rtc_base/numerics/ |
D | safe_compare_unittest.cc | 21 constexpr std::uintmax_t umax = std::numeric_limits<std::uintmax_t>::max(); variable 27 static_assert(static_cast<std::uintmax_t>(m1) == umax, ""); 28 static_assert(m1 == static_cast<std::intmax_t>(umax), ""); 72 bool TestLessThanConst2() { return SafeLt( m1, umax); } in TestLessThanConst2() 73 bool TestLessThanConst3() { return SafeLt(umax, imin); } in TestLessThanConst3() 101 static_assert(!SafeEq(imin, umax), ""); in TEST() 102 static_assert(!SafeEq(umax, imin), ""); in TEST() 103 static_assert(SafeEq(umax, umax), ""); in TEST() 106 static_assert(!SafeEq(m1, umax), ""); in TEST() 107 static_assert(!SafeEq(umax, m1), ""); in TEST() [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | umed3.ll | 125 define internal i32 @umax(i32 %x, i32 %y) #2 { 177 %tmp1 = call i32 @umax(i32 %x, i32 %y) 179 %tmp3 = call i32 @umax(i32 %tmp0, i32 %tmp2) 189 %tmp1 = call i32 @umax(i32 %y, i32 %x) 191 %tmp3 = call i32 @umax(i32 %tmp0, i32 %tmp2) 201 %tmp1 = call i32 @umax(i32 %x, i32 %y) 203 %tmp3 = call i32 @umax(i32 %tmp0, i32 %tmp2) 213 %tmp1 = call i32 @umax(i32 %y, i32 %x) 215 %tmp3 = call i32 @umax(i32 %tmp0, i32 %tmp2) 225 %tmp1 = call i32 @umax(i32 %x, i32 %y) [all …]
|
/external/capstone/suite/MC/AArch64/ |
D | neon-max-min.s.cs | 8 0x20,0x64,0x22,0x2e = umax v0.8b, v1.8b, v2.8b 9 0x20,0x64,0x22,0x6e = umax v0.16b, v1.16b, v2.16b 10 0x20,0x64,0x62,0x2e = umax v0.4h, v1.4h, v2.4h 11 0x20,0x64,0x62,0x6e = umax v0.8h, v1.8h, v2.8h 12 0x20,0x64,0xa2,0x2e = umax v0.2s, v1.2s, v2.2s 13 0x20,0x64,0xa2,0x6e = umax v0.4s, v1.4s, v2.4s
|
/external/llvm/test/MC/AArch64/ |
D | neon-max-min.s | 22 umax v0.8b, v1.8b, v2.8b 23 umax v0.16b, v1.16b, v2.16b 24 umax v0.4h, v1.4h, v2.4h 25 umax v0.8h, v1.8h, v2.8h 26 umax v0.2s, v1.2s, v2.2s 27 umax v0.4s, v1.4s, v2.4s
|
/external/llvm/test/Transforms/IndVarSimplify/ |
D | backedge-on-min-max.ll | 347 %umax.cmp = icmp ugt i32 %a_len, %n 348 %umax = select i1 %umax.cmp, i32 %a_len, i32 %n 349 %entry.cond = icmp ugt i32 5, %umax 365 %be.cond = icmp ugt i32 %idx.inc, %umax 375 %umax.cmp = icmp ugt i32 %a_len, %n 376 %umax = select i1 %umax.cmp, i32 %a_len, i32 %n 377 %entry.cond = icmp ugt i32 5, %umax 393 %be.cond = icmp ugt i32 %idx.inc, %umax 403 %umax.cmp = icmp ugt i32 42, %n 404 %umax = select i1 %umax.cmp, i32 42, i32 %n [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | optimize-max-1.ll | 3 ; LSR should be able to eliminate both smax and umax expressions 47 %umax = select i1 %tmp, i64 1, i64 %n ; <i64> [#uses=1] 55 %exitcond = icmp eq i64 %0, %umax ; <i1> [#uses=1] 65 %umax = select i1 %tmp, i64 %n, i64 1 ; <i64> [#uses=1] 73 %exitcond = icmp eq i64 %0, %umax ; <i1> [#uses=1]
|
D | optimize-max-2.ll | 15 %umax = select i1 %tmp, i64 1, i64 %y ; <i64> [#uses=2] 16 %tmp8 = icmp ugt i64 %umax, %x ; <i1> [#uses=1] 17 %umax9 = select i1 %tmp8, i64 %umax, i64 %x ; <i64> [#uses=1]
|
D | coalescer-commute4.ll | 10 %umax = select i1 %tmp2132, i32 1, i32 %c ; <i32> [#uses=1] 24 %exitcond = icmp eq i32 %indvar.next, %umax ; <i1> [#uses=1]
|
/external/iproute2/tc/ |
D | q_hfsc.c | 332 unsigned int umax = 0, dmax = 0, rate = 0; in hfsc_get_sc2() local 336 if (get_size(&umax, *argv) < 0) { in hfsc_get_sc2() 361 if (umax != 0 && dmax == 0) { in hfsc_get_sc2() 366 if (dmax != 0 && ceil(1.0 * umax * TIME_UNITS_PER_SEC / dmax) > rate) { in hfsc_get_sc2() 371 sc->m1 = ceil(1.0 * umax * TIME_UNITS_PER_SEC / dmax); /* in bps */ in hfsc_get_sc2() 380 sc->d = tc_core_time2ktime(ceil(dmax - umax * TIME_UNITS_PER_SEC / rate)); in hfsc_get_sc2()
|
/external/llvm/unittests/IR/ |
D | ConstantRangeTest.cpp | 414 EXPECT_EQ(Full.umax(Full), Full); in TEST_F() 415 EXPECT_EQ(Full.umax(Empty), Empty); in TEST_F() 416 EXPECT_EQ(Full.umax(Some), ConstantRange(APInt(16, 0xa), APInt(16, 0))); in TEST_F() 417 EXPECT_EQ(Full.umax(Wrap), Full); in TEST_F() 418 EXPECT_EQ(Full.umax(Some), ConstantRange(APInt(16, 0xa), APInt(16, 0))); in TEST_F() 419 EXPECT_EQ(Empty.umax(Empty), Empty); in TEST_F() 420 EXPECT_EQ(Empty.umax(Some), Empty); in TEST_F() 421 EXPECT_EQ(Empty.umax(Wrap), Empty); in TEST_F() 422 EXPECT_EQ(Empty.umax(One), Empty); in TEST_F() 423 EXPECT_EQ(Some.umax(Some), Some); in TEST_F() [all …]
|
/external/llvm/test/Transforms/LoopStrengthReduce/X86/ |
D | 2008-08-14-ShadowIV.ll | 13 %umax = select i1 %0, i32 1, i32 %n ; <i32> [#uses=1] 22 %exitcond = icmp eq i32 %indvar.next, %umax ; <i1> [#uses=1] 37 %umax = select i1 %0, i64 1, i64 %n ; <i64> [#uses=1] 47 %exitcond = icmp eq i64 %indvar.next, %umax ; <i1> [#uses=1]
|
/external/llvm/test/Analysis/LoopAccessAnalysis/ |
D | reverse-memcheck-bounds.ll | 52 ; When the stride is not constant, we are forced to do umin/umax to get 61 ; CHECK: Low: (-1 + (-1 * ((-60001 + (-1 * %a)) umax (-60001 + (40000 * %step) + (-1 * %a))))) 62 ; CHECK: High: ((60000 + %a)<nsw> umax (60000 + (-40000 * %step) + %a))
|
/external/llvm/test/Transforms/ObjCARC/ |
D | nested.ll | 55 %umax = select i1 %tmp7, i64 %forcoll.count.ph, i64 1 75 %exitcond = icmp eq i64 %4, %umax 120 %umax = select i1 %tmp8, i64 %forcoll.count.ph, i64 1 140 %exitcond = icmp eq i64 %4, %umax 185 %umax = select i1 %tmp9, i64 %forcoll.count.ph, i64 1 205 %exitcond = icmp eq i64 %4, %umax 250 %umax = select i1 %tmp8, i64 %forcoll.count.ph, i64 1 270 %exitcond = icmp eq i64 %4, %umax 316 %umax = select i1 %tmp8, i64 %forcoll.count.ph, i64 1 336 %exitcond = icmp eq i64 %4, %umax [all …]
|
/external/angle/third_party/vulkan-deps/glslang/src/Test/ |
D | 300BuiltIns.frag | 4 uint umax, umin; 43 uvec4 uv10 = clamp(uv4y, umin, umax);
|
/external/deqp-deps/glslang/Test/ |
D | 300BuiltIns.frag | 4 uint umax, umin; 43 uvec4 uv10 = clamp(uv4y, umin, umax);
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-vmax.ll | 66 ;CHECK: umax.8b 69 %tmp3 = call <8 x i8> @llvm.aarch64.neon.umax.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 75 ;CHECK: umax.16b 78 %tmp3 = call <16 x i8> @llvm.aarch64.neon.umax.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 84 ;CHECK: umax.4h 87 %tmp3 = call <4 x i16> @llvm.aarch64.neon.umax.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 93 ;CHECK: umax.8h 96 %tmp3 = call <8 x i16> @llvm.aarch64.neon.umax.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 102 ;CHECK: umax.2s 105 %tmp3 = call <2 x i32> @llvm.aarch64.neon.umax.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) [all …]
|
/external/llvm/lib/IR/ |
D | ConstantRange.cpp | 757 ConstantRange::umax(const ConstantRange &Other) const { in umax() function in ConstantRange 762 APInt NewL = APIntOps::umax(getUnsignedMin(), Other.getUnsignedMin()); in umax() 763 APInt NewU = APIntOps::umax(getUnsignedMax(), Other.getUnsignedMax()) + 1; in umax() 844 APInt umax = APIntOps::umax(getUnsignedMin(), Other.getUnsignedMin()); in binaryOr() local 845 if (umax.isMinValue()) in binaryOr() 847 return ConstantRange(umax, APInt::getNullValue(getBitWidth())); in binaryOr()
|
/external/llvm/test/CodeGen/ARM/ |
D | atomic-op.ll | 141 %13 = atomicrmw umax i32* %val2, i32 1 monotonic 149 %14 = atomicrmw umax i32* %val2, i32 0 monotonic 183 %2 = atomicrmw umax i16* %val, i16 1 monotonic 191 %3 = atomicrmw umax i16* %val, i16 0 monotonic 224 %2 = atomicrmw umax i8* %val, i8 1 monotonic 232 %3 = atomicrmw umax i8* %val, i8 0 monotonic
|
D | atomicrmw_minmax.ll | 9 %old = atomicrmw umax i32* %ptr, i32 %val monotonic
|
/external/libhevc/common/arm64/ |
D | ihevc_deblk_luma_horz.s | 239 umax v4.8b, v18.8b , v31.8b 255 umax v5.8b, v18.8b , v17.8b 287 umax v3.8b, v18.8b , v31.8b 313 umax v2.8b, v18.8b , v31.8b 414 umax v5.8b, v18.8b , v17.8b 425 umax v3.8b, v18.8b , v31.8b
|
D | ihevc_deblk_luma_vert.s | 230 umax v22.8b, v21.8b , v30.8b 262 umax v20.8b, v26.8b , v25.8b 267 umax v21.8b, v19.8b , v31.8b 299 umax v26.8b, v16.8b , v31.8b 430 umax v5.8b, v16.8b , v28.8b 437 umax v0.8b, v1.8b , v31.8b
|
/external/llvm/test/Analysis/ScalarEvolution/ |
D | trip-count13.ll | 16 ; CHECK-NEXT: Loop %loop: backedge-taken count is (-100 + (-1 * %rhs) + ((100 + %rhs) umax %rhs)) 36 ; CHECK-NEXT: Loop %loop: backedge-taken count is ((-1 * %start) + ((-100 + %start) umax %start))
|
D | 2008-02-15-UMax.ll | 4 ; CHECK: umax
|
/external/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/ |
D | llvm.amdgcn.buffer.atomic.ll | 51 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.umax( 54 %orig = call i32 @llvm.amdgcn.buffer.atomic.umax(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 97 declare i32 @llvm.amdgcn.buffer.atomic.umax(i32, <4 x i32>, i32, i32, i1) #0
|
/external/mesa3d/src/intel/tools/tests/gen9/ |
D | sends.asm | 30 …dp data 1 MsgDesc: ( DC typed atomic, Surface = 1, SIMD16, umax) mlen 2 ex_mlen 1 rlen 0 { align1 … 32 …dp data 1 MsgDesc: ( DC typed atomic, Surface = 1, SIMD8, umax) mlen 2 ex_mlen 1 rlen 0 { align1 2… 46 …dp data 1 MsgDesc: ( DC untyped atomic op, Surface = 254, SIMD16, umax) mlen 2 ex_mlen 2 rlen 0 { … 144 …dp data 1 MsgDesc: ( DC typed atomic, Surface = 0, SIMD16, umax) mlen 1 ex_mlen 1 rlen 1 { align1 … 186 …dp data 1 MsgDesc: ( DC untyped atomic op, Surface = 1, SIMD8, umax) mlen 1 ex_mlen 1 rlen 1 { ali… 200 …dp data 1 MsgDesc: ( DC untyped atomic op, Surface = 1, SIMD16, umax) mlen 2 ex_mlen 2 rlen 2 { al… 216 …dp data 1 MsgDesc: ( DC untyped atomic op, Surface = 1, SIMD8, umax) mlen 1 ex_mlen 1 rlen 0 { ali… 228 …dp data 1 MsgDesc: ( DC untyped atomic op, Surface = 1, SIMD16, umax) mlen 2 ex_mlen 2 rlen 0 { al…
|