Home
last modified time | relevance | path

Searched refs:umin (Results 1 – 25 of 71) sorted by relevance

123

/external/llvm/test/MC/AArch64/
Dneon-max-min.s53 umin v0.8b, v1.8b, v2.8b
54 umin v0.16b, v1.16b, v2.16b
55 umin v0.4h, v1.4h, v2.4h
56 umin v0.8h, v1.8h, v2.8h
57 umin v0.2s, v1.2s, v2.2s
58 umin v0.4s, v1.4s, v2.4s
/external/llvm/test/Transforms/IndVarSimplify/
Dbackedge-on-min-max.ll234 %umin.cmp = icmp ult i32 %a_len, %n
235 %umin = select i1 %umin.cmp, i32 %a_len, i32 %n
236 %entry.cond = icmp ult i32 5, %umin
252 %be.cond = icmp ult i32 %idx.inc, %umin
262 %umin.cmp = icmp ult i32 %a_len, %n
263 %umin = select i1 %umin.cmp, i32 %a_len, i32 %n
264 %entry.cond = icmp ult i32 5, %umin
280 %be.cond = icmp ult i32 %idx.inc, %umin
290 %umin.cmp = icmp ult i32 42, %n
291 %umin = select i1 %umin.cmp, i32 42, i32 %n
[all …]
/external/llvm/test/CodeGen/AMDGPU/
Dllvm.AMDGPU.umin.ll9 %min = call i32 @llvm.AMDGPU.umin(i32 %p0, i32 %load)
19 %min = call i32 @llvm.AMDGPU.umin(i32 %p0, i32 %p1)
33 %tmp3 = tail call i32 @llvm.AMDGPU.umin(i32 %tmp2, i32 0) nounwind readnone
41 declare i32 @llvm.AMDGPU.umin(i32, i32) #1
Dglobal_atomics.ll563 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
573 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
587 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
601 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
610 %0 = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
619 %0 = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
632 %0 = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
645 %0 = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
Dlocal-atomics64.ll218 %result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
228 %result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
442 %result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
451 %result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
Dlocal-atomics.ll270 %result = atomicrmw umin i32 addrspace(3)* %ptr, i32 4 seq_cst
281 %result = atomicrmw umin i32 addrspace(3)* %gep, i32 4 seq_cst
523 %result = atomicrmw umin i32 addrspace(3)* %ptr, i32 4 seq_cst
532 %result = atomicrmw umin i32 addrspace(3)* %gep, i32 4 seq_cst
/external/llvm/test/CodeGen/AArch64/
Dminmax.ll28 ; CHECK: umin
52 ; CHECK: umin
68 ; CHECK: umin
99 ; CHECK-NOT: umin
100 ; The icmp is used by two instructions, so don't produce a umin node.
Darm64-vmax.ll188 ;CHECK: umin.8b
191 %tmp3 = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
197 ;CHECK: umin.16b
200 %tmp3 = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
206 ;CHECK: umin.4h
209 %tmp3 = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
215 ;CHECK: umin.8h
218 %tmp3 = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
224 ;CHECK: umin.2s
227 %tmp3 = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
[all …]
/external/opencv3/modules/shape/src/
Dsc_dis.cpp510 float min=0, h=0, umin=0, usubmin=0, v2=0; in hungarian() local
576 umin = costMatrix.at<float>(i,0)-v[0]; in hungarian()
584 if (h >= umin) in hungarian()
591 usubmin = umin; in hungarian()
592 umin = h; in hungarian()
600 if (fabs(umin-usubmin) > LOWV) //if( umin < usubmin ) in hungarian()
602 v[j1] = v[j1] - (usubmin - umin); in hungarian()
619 if (fabs(umin-usubmin) > LOWV) in hungarian()
/external/llvm/test/CodeGen/ARM/
Datomic-op.ll136 %11 = atomicrmw umin i32* %val2, i32 16 monotonic
146 %12 = atomicrmw umin i32* %val2, i32 %uneg monotonic
182 %0 = atomicrmw umin i16* %val, i16 16 monotonic
192 %1 = atomicrmw umin i16* %val, i16 %uneg monotonic
227 %0 = atomicrmw umin i8* %val, i8 16 monotonic
237 %1 = atomicrmw umin i8* %val, i8 %uneg monotonic
Datomicrmw_minmax.ll19 %old = atomicrmw umin i32* %ptr, i32 %val monotonic
/external/libhevc/common/arm64/
Dihevc_deblk_luma_horz.s232 umin v18.8b, v20.8b , v30.8b
248 umin v18.8b, v21.8b , v16.8b
283 umin v18.8b, v19.8b , v30.8b
309 umin v18.8b, v20.8b , v30.8b
397 umin v18.8b, v21.8b , v16.8b
423 umin v18.8b, v19.8b , v30.8b
Dihevc_deblk_luma_vert.s228 umin v21.8b, v22.8b , v31.8b
259 umin v26.8b, v20.8b , v21.8b
265 umin v19.8b, v0.8b , v30.8b
295 umin v16.8b, v26.8b , v30.8b
425 umin v16.8b, v2.8b , v27.8b
436 umin v1.8b, v0.8b , v30.8b
/external/llvm/test/CodeGen/X86/
Dpr5145.ll24 %4 = atomicrmw umin i8* @sc8, i8 8 acquire
Datomic_op.ll91 %13 = atomicrmw umin i32* %val2, i32 1 monotonic
96 %14 = atomicrmw umin i32* %val2, i32 10 monotonic
Datomic-minmax-i6432.ll32 %4 = atomicrmw umin i64* @sc64, i64 8 acquire
/external/llvm/lib/IR/
DConstantRange.cpp752 APInt umin = APIntOps::umin(Other.getUnsignedMax(), getUnsignedMax()); in binaryAnd() local
753 if (umin.isAllOnesValue()) in binaryAnd()
755 return ConstantRange(APInt::getNullValue(getBitWidth()), umin + 1); in binaryAnd()
/external/mesa3d/src/gallium/state_trackers/d3d1x/d3d1xshader/defs/
Dopcodes.txt85 umin
/external/llvm/test/CodeGen/NVPTX/
Datomics.ll132 %ret = atomicrmw umin i32* %subr, i32 %val seq_cst
139 %ret = atomicrmw umin i64* %subr, i64 %val seq_cst
/external/llvm/test/CodeGen/SystemZ/
Datomicrmw-minmax-01.ll118 %res = atomicrmw umin i8 *%src, i8 %b seq_cst
205 %res = atomicrmw umin i8 *%src, i8 1 seq_cst
Datomicrmw-minmax-02.ll118 %res = atomicrmw umin i16 *%src, i16 %b seq_cst
205 %res = atomicrmw umin i16 *%src, i16 1 seq_cst
Datomicrmw-minmax-04.ll47 %res = atomicrmw umin i64 *%src, i64 %b seq_cst
/external/llvm/test/Analysis/LoopAccessAnalysis/
Dreverse-memcheck-bounds.ll51 ; When the stride is not constant, we are forced to do umin/umax to get
/external/llvm/test/CodeGen/SPARC/
Datomics.ll153 %0 = atomicrmw umin i32* %p, i32 %v seq_cst
/external/llvm/test/CodeGen/CPP/
Datomic.ll54 %inst10 = atomicrmw umin i32* %addr, i32 %inc singlethread release

123