/external/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/ |
D | lsr-postinc-pos-addrspace.ll | 10 ; CHECK: %lsr.iv1 = phi i32 [ %lsr.iv.next2, %bb ], [ 2, %entry ] 13 ; CHECK: %lsr.iv.next2 = add i32 %lsr.iv1, -2 41 ; CHECK: %lsr.iv1 = phi i64 44 ; CHECK: %lsr.iv.next2 = add i64 %lsr.iv1, -2 72 ; CHECK: %lsr.iv1 = phi i32 [ %lsr.iv.next2, %bb ], [ 0, %entry ] 75 ; CHECK: %lsr.iv.next2 = add i32 %lsr.iv1, 2 79 ; CHECK: %idxprom = sext i32 %lsr.iv1 to i64 103 ; CHECK: %lsr.iv1 = phi i32 [ %lsr.iv.next2, %bb ], [ 0, %entry ] 106 ; CHECK: %lsr.iv.next2 = add i32 %lsr.iv1, 2
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/ |
D | lsr-postinc-pos-addrspace.ll | 10 ; CHECK: %lsr.iv1 = phi i32 [ %lsr.iv.next2, %bb ], [ 2, %entry ] 13 ; CHECK: %lsr.iv.next2 = add i32 %lsr.iv1, -2 41 ; CHECK: %lsr.iv1 = phi i64 44 ; CHECK: %lsr.iv.next2 = add i64 %lsr.iv1, -2 72 ; CHECK: %lsr.iv1 = phi i32 [ %lsr.iv.next2, %bb ], [ 0, %entry ] 75 ; CHECK: %lsr.iv.next2 = add i32 %lsr.iv1, 2 79 ; CHECK: %idxprom = sext i32 %lsr.iv1 to i64 103 ; CHECK: %lsr.iv1 = phi i32 [ %lsr.iv.next2, %bb ], [ 0, %entry ] 106 ; CHECK: %lsr.iv.next2 = add i32 %lsr.iv1, 2
|
D | atomics.ll | 13 ; OPT: %lsr.iv1 = phi i32 addrspace(3)* [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ] 17 ; OPT: %tmp7 = atomicrmw add i32 addrspace(3)* %lsr.iv1, i32 undef seq_cst 18 ; OPT: %0 = atomicrmw add i32 addrspace(3)* %lsr.iv1, i32 %tmp8 seq_cst 53 ; OPT: %lsr.iv1 = phi i32 addrspace(3)* [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ] 92 ; OPT: %lsr.iv1 = phi i32 addrspace(3)* [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ] 96 ; OPT: %tmp7 = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %lsr.iv1, i32 undef, i3… 130 ; OPT: %lsr.iv1 = phi i32 addrspace(3)* [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ] 134 ; OPT: %tmp7 = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %lsr.iv1, i32 undef, i3…
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/ScalarEvolution/ |
D | different-loops-recs.ll | 518 ; CHECK: %iv1 = phi i64 [ %iv1.next, %guarded ], [ 0, %outer.loop ] 520 ; CHECK: %iv1.trunc = trunc i64 %iv1 to i32 522 ; CHECK: %iv1.next = add nuw nsw i64 %iv1, 1 530 ; CHECK: %ret = mul i64 %iv1, %iv2.ext 540 %iv1 = phi i64 [ %iv1.next, %guarded ], [ 0, %outer.loop ] 541 %iv1.trunc = trunc i64 %iv1 to i32 542 %cond1 = icmp ult i64 %iv1, 100 546 %iv1.next = add nuw nsw i64 %iv1, 1 547 %tmp16 = icmp slt i32 %iv1.trunc, 2 559 %cond2 = icmp slt i32 %iv2, %iv1.trunc [all …]
|
D | pr22856.ll | 20 %1 = add nsw i32 %iv1, 1 26 %iv1 = phi i32 [ %1, %block_E ], [ undef, %block_A ]
|
/external/ltp/testcases/kernel/io/direct_io/ |
D | diotest_routines.c | 93 int vbufcmp(struct iovec *iv1, struct iovec *iv2, int vcnt) in vbufcmp() argument 97 for (i = 0; i < vcnt; iv1++, iv2++, i++) { in vbufcmp() 98 if (bufcmp(iv1->iov_base, iv2->iov_base, iv1->iov_len) < 0) { in vbufcmp() 100 i, (char *)iv1->iov_base, in vbufcmp()
|
/external/swiftshader/third_party/LLVM/test/Transforms/LoopUnroll/ |
D | scevunroll.ll | 80 %iv1 = phi i32 [ 0, %entry ], [ %inc1, %l2 ] 82 %inc1 = add i32 %iv1, 1 84 %adr = getelementptr i32* %base, i32 %iv1 86 %cmp1 = icmp slt i32 %iv1, 5 111 %iv1 = phi i32 [ 0, %entry ], [ %inc1, %l3 ] 113 %inc1 = add i32 %iv1, 1 115 %adr = getelementptr i32* %base, i32 %iv1 117 %cmp1 = icmp slt i32 %iv1, 5
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoopUnroll/ |
D | runtime-loop4.ll | 25 %iv1 = phi i32 [ 0, %entry ], [ %inc1, %loop1.latch ] 26 %offset1 = getelementptr i32, i32* %addr1, i32 %iv1 27 store i32 %iv1, i32* %offset1, align 4 46 %inc1 = add i32 %iv1, 1
|
D | scevunroll.ll | 80 %iv1 = phi i32 [ 0, %entry ], [ %inc1, %l2 ] 82 %inc1 = add i32 %iv1, 1 84 %adr = getelementptr i32, i32* %base, i32 %iv1 86 %cmp1 = icmp slt i32 %iv1, 5 111 %iv1 = phi i32 [ 0, %entry ], [ %inc1, %l3 ] 113 %inc1 = add i32 %iv1, 1 115 %adr = getelementptr i32, i32* %base, i32 %iv1 117 %cmp1 = icmp slt i32 %iv1, 5
|
/external/llvm/test/Transforms/LoopUnroll/ |
D | runtime-loop4.ll | 25 %iv1 = phi i32 [ 0, %entry ], [ %inc1, %loop1.latch ] 26 %offset1 = getelementptr i32, i32* %addr1, i32 %iv1 27 store i32 %iv1, i32* %offset1, align 4 46 %inc1 = add i32 %iv1, 1
|
D | scevunroll.ll | 80 %iv1 = phi i32 [ 0, %entry ], [ %inc1, %l2 ] 82 %inc1 = add i32 %iv1, 1 84 %adr = getelementptr i32, i32* %base, i32 %iv1 86 %cmp1 = icmp slt i32 %iv1, 5 111 %iv1 = phi i32 [ 0, %entry ], [ %inc1, %l3 ] 113 %inc1 = add i32 %iv1, 1 115 %adr = getelementptr i32, i32* %base, i32 %iv1 117 %cmp1 = icmp slt i32 %iv1, 5
|
/external/clang/test/CodeGenObjC/ |
D | interface.m | 9 int iv1; field 19 return a0[2].iv1; 29 a[2].iv1 = 7;
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoopVectorize/AMDGPU/ |
D | unroll-in-loop-vectorizer.ll | 16 %iv = phi i32 [ %iv1, %loop ], [ 0, %entry ] 21 %iv1 = add i32 %iv, 1 23 %cond = icmp eq i32 %iv1, %size
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/ |
D | hwloop2.ll | 21 %lsr.iv1 = phi i32* [ %scevgep, %for.body ], [ %a, %for.body.lr.ph ] 22 %1 = load i32, i32* %lsr.iv1, align 4 27 %scevgep = getelementptr i32, i32* %lsr.iv1, i32 1
|
/external/llvm/test/CodeGen/Hexagon/ |
D | hwloop2.ll | 21 %lsr.iv1 = phi i32* [ %scevgep, %for.body ], [ %a, %for.body.lr.ph ] 22 %1 = load i32, i32* %lsr.iv1, align 4 27 %scevgep = getelementptr i32, i32* %lsr.iv1, i32 1
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/IRCE/ |
D | clamp.ll | 81 %iv1 = phi i64 [ 3, %entry ], [ %iv1.next, %in_bounds ] 91 %iv1.next = add nuw nsw i64 %iv1, 2 93 %cond = icmp ugt i64 %iv1, 204
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/BasicAA/ |
D | phi-spec-order.ll | 19 %lsr.iv1 = phi [16000 x double]* [ %i10, %for.body4 ], [ @X, %for.cond2.preheader ] 21 ; CHECK: NoAlias:{{[ \t]+}}[16000 x double]* %lsr.iv1, [16000 x double]* %lsr.iv4 25 %lsr.iv12 = bitcast [16000 x double]* %lsr.iv1 to <4 x double>* 53 %scevgep = getelementptr [16000 x double], [16000 x double]* %lsr.iv1, i64 0, i64 16
|
/external/llvm/test/Analysis/BasicAA/ |
D | phi-spec-order.ll | 19 %lsr.iv1 = phi [16000 x double]* [ %i10, %for.body4 ], [ @X, %for.cond2.preheader ] 21 ; CHECK: NoAlias:{{[ \t]+}}[16000 x double]* %lsr.iv1, [16000 x double]* %lsr.iv4 25 %lsr.iv12 = bitcast [16000 x double]* %lsr.iv1 to <4 x double>* 53 %scevgep = getelementptr [16000 x double], [16000 x double]* %lsr.iv1, i64 0, i64 16
|
/external/llvm/test/Analysis/ScalarEvolution/ |
D | pr22856.ll | 20 %1 = add nsw i32 %iv1, 1 26 %iv1 = phi i32 [ %1, %block_E ], [ undef, %block_A ]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/ |
D | cortex-a57-misched-vstm-wrback.ll | 19 define i32 @bar(double* %vptr, i32 %iv1, i32* %iptr) minsize { 39 %mul1 = mul i32 %ptr_new_ival, %iv1
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | falkor-hwpf.ll | 47 %iv1 = phi i32 [ 0, %entry ], [ %inc1, %loop1.latch ] 65 %gep2 = getelementptr inbounds i32, i32* %p, i32 %iv1 70 %inc1 = add i32 %iv1, 1
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Thumb2/ |
D | t2sizereduction.mir | 22 %lsr.iv1 = phi i32 [ %lsr.iv.next2, %for.body ], [ %x, %for.body.preheader ] 25 %mul = mul nsw i32 %lsr.iv1, %sum.07 27 %lsr.iv.next2 = add i32 %lsr.iv1, 1
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | fast-isel-trunc-kill-subreg.ll | 22 %lsr.iv1 = phi i32 [ %lsr.iv.next2, %bb241 ], [ 0, %bb ] 23 %lsr.iv.next2 = add nuw nsw i32 %lsr.iv1, 1
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoopStrengthReduce/ |
D | 2011-12-19-PostincQuadratic.ll | 14 ; CHECK: %lsr.iv1 = phi [121 x i32]* 20 ; CHECK: %lsr.iv3 = phi [121 x i32]* [ %lsr.iv1, %for.body43.preheader ]
|
/external/llvm/test/Transforms/LoopUnroll/AArch64/ |
D | partial.ll | 34 %iv1 = phi i32 [ 0, %entry ], [ %inc1, %loop1.latch ] 50 %inc1 = add i32 %iv1, 1
|