Home
last modified time | relevance | path

Searched refs:LSR (Results 1 – 25 of 134) sorted by relevance

123456

/external/llvm/test/Transforms/LoopStrengthReduce/ARM/
D2012-06-15-lsr-noaddrmode.ll3 ; LSR should only check for valid address modes when the IV user is a
11 ; LSR before the fix:
13 ; LSR Use: Kind=Special, Offsets={0}, all-fixups-outside-loop, widest fixup type: i32
15 ; LSR Use: Kind=ICmpZero, Offsets={0}, widest fixup type: i32
17 ; LSR Use: Kind=Address of i32, Offsets={0}, widest fixup type: i32*
19 ; LSR Use: Kind=Address of i32, Offsets={0}, widest fixup type: i32*
21 ; LSR Use: Kind=Special, Offsets={0}, all-fixups-outside-loop, widest fixup type: i32
24 ; LSR after the fix:
26 ; LSR Use: Kind=Special, Offsets={0}, all-fixups-outside-loop, widest fixup type: i32
28 ; LSR Use: Kind=ICmpZero, Offsets={0}, widest fixup type: i32
[all …]
/external/libhevc/decoder/arm/
Dihevcd_fmt_conv_420sp_to_420p.s147 MOV r9,r9,LSR #1 @// height/2
148 @ MOV r8,r8,LSR #1 @// Width/2
151 MOV r11,r8,LSR #1
184 SUB r3,r3,r6,LSR #1
185 SUB r5,r5,r6,LSR #1
Dihevcd_fmt_conv_420sp_to_420sp.s151 MOV r9,r9,LSR #1 @// height/2
152 @ MOV r8,r8,LSR #1 @// Width/2
Dihevcd_fmt_conv_420sp_to_rgba8888.s144 @, LSR #1 @// u offset
145 @SUB R12,R8,R3, LSR #1 @// v offset
149 MOV R5,R5, LSR #1 @// height_cnt = height / 16
162 MOV R6,R3, LSR #4 @// width_cnt = width / 16
/external/libhevc/decoder/arm64/
Dihevcd_fmt_conv_420sp_to_420p.s148 LSR x9, x9, #1 //// height/2
152 LSR x11, x8, #1
187 SUB x3,x3,x6,LSR #1
188 SUB x5,x5,x6,LSR #1
/external/llvm/test/Transforms/LoopStrengthReduce/X86/
D2011-11-29-postincphi.ll4 ; LSR first expands %t3 to %t2 in %phi
5 ; LSR then expands %t2 in %phi into two decrements, one on each loop exit.
12 ; Check that LSR did something close to the behavior at the time of the bug.
D2011-12-04-loserreg.ll3 ; Test LSR's ability to prune formulae that refer to nonexistent
6 ; Unable to reduce this case further because it requires LSR to exceed
9 ; We really just want to ensure that LSR can process this loop without
12 ; verify that LSR removes it.
D2011-07-20-DoubleIV.ll3 ; Test LSR's OptimizeShadowIV. Handle a floating-point IV with a
11 ; First, make sure LSR doesn't crash on an empty IVUsers list.
/external/tremolo/Tremolo/
Ddpen.s99 MOVS r0, r0, LSR #1 @ r0 = lok>>1 C = bottom bit
124 MOVS r0, r0, LSR #1 @ r0 = lok>>1 C = bottom bit
143 LDRB r14,[r12,r14,LSR #7] @ r14= t[chase+bit+1+(!bit || t[chase]0x0x80)]
154 MOV r6, r6, LSR #1
156 MOVS r0, r0, LSR #1 @ r0 = lok>>1 C = bottom bit
181 MOVS r0, r0, LSR #1 @ r0 = lok>>1 C = bottom bit
201 ADC r12,r8, r14,LSR #15 @ r12= 1+((chase+bit)<<1)+(!bit || t[chase]0x0x8000)
202 ADC r12,r12,r14,LSR #15 @ r12= t + (1+chase+bit+(!bit || t[chase]0x0x8000))<<1
214 MOVS r0, r0, LSR #1 @ r0 = lok>>1 C = bottom bit
299 MOV r8, r8, LSR r2 @ r8 = entry>>s->q_bits
[all …]
DbitwiseARM.s57 MOV r10,r10,LSR r14 @ r10= ptr[0]>>(32-bitsLeftInWord)
81 MOV r10,r10,LSR r14 @ r10= first bitsLeftInWord bits
155 ADD r6,r10,r10,LSR #3 @ r6 = pointer to data
248 MOV r10,r10,LSR r14 @ r10= ptr[0]>>(32-bitsLeftInWord)
273 MOV r10,r10,LSR r14 @ r10= first bitsLeftInWord bits
395 ADD r6,r10,r10,LSR #3 @ r6 = pointer to data
/external/llvm/test/Transforms/LoopStrengthReduce/
D2012-01-02-nopreheader.ll9 ; LSR should convert the inner loop (bb7.us) IV (j.01.us) into float*.
13 ; Currently, LSR won't kick in on such loops.
55 ; that LSR picks. We must detect that %bb8.preheader does not have a
56 ; preheader and avoid performing LSR on %bb7.
Dphi_node_update_multiple_preds.ll2 ; LSR should not crash on this.
Dephemeral.ll7 ; // i * a + b is ephemeral and shouldn't be promoted by LSR
Dshare_code_in_preheader.ll2 ; LSR should not make two copies of the Q*L expression in the preheader!
/external/llvm/lib/Target/AArch64/MCTargetDesc/
DAArch64AddressingModes.h35 LSR, enumerator
56 case AArch64_AM::LSR: return "lsr"; in getShiftExtendName()
77 case 1: return AArch64_AM::LSR; in getShiftType()
105 case AArch64_AM::LSR: STEnc = 1; break; in getShifterImm()
/external/v8/src/ic/arm/
Dstub-cache-arm.cc149 __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift)); in GenerateProbe()
161 __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift)); in GenerateProbe()
/external/llvm/test/CodeGen/X86/
Dnegative-stride-fptosi-user.ll3 ; LSR previously eliminated the sitofp by introducing an induction
Dloop-hoist.ll1 ; LSR should hoist the load from the "Arr" stub out of the loop.
Dlsr-nonaffine.ll3 ; LSR should leave non-affine expressions alone because it currently
Dlsr-delayed-fold.ll4 ; but LSR should tolerate this.
53 ; LSR ends up going into conservative pruning mode; don't prune the solution
136 ; LSR needs to remember inserted instructions even in postinc mode, because
Doptimize-max-2.ll7 ; LSR's OptimizeMax function shouldn't try to eliminate this max, because
Dlsr-wrap.ll3 ; LSR would like to use a single IV for both of these, however it's
Dlsr-overflow.ll4 ; The comparison uses the pre-inc value, which could lead LSR to
/external/llvm/test/Transforms/LoopStrengthReduce/AArch64/
Dlsr-memcpy.ll3 ; Prevent LSR of doing poor choice that cannot be folded in addressing mode
/external/v8/test/cctest/
Dtest-disasm-arm64.cc337 COMPARE(add(w12, w13, Operand(w14, LSR, 3)), "add w12, w13, w14, lsr #3"); in TEST_()
338 COMPARE(add(x15, x16, Operand(x17, LSR, 4)), "add x15, x16, x17, lsr #4"); in TEST_()
363 COMPARE(sub(w12, w13, Operand(w14, LSR, 3)), "sub w12, w13, w14, lsr #3"); in TEST_()
364 COMPARE(sub(x15, x16, Operand(x17, LSR, 4)), "sub x15, x16, x17, lsr #4"); in TEST_()
370 COMPARE(neg(lr, Operand(x0, LSR, 62)), "neg lr, x0, lsr #62"); in TEST_()
714 COMPARE(and_(w6, w7, Operand(w8, LSR, 2)), "and w6, w7, w8, lsr #2"); in TEST_()
720 COMPARE(bic(w21, w22, Operand(w23, LSR, 6)), "bic w21, w22, w23, lsr #6"); in TEST_()
726 COMPARE(orr(w6, w7, Operand(w8, LSR, 10)), "orr w6, w7, w8, lsr #10"); in TEST_()
732 COMPARE(orn(w21, w22, Operand(w23, LSR, 14)), "orn w21, w22, w23, lsr #14"); in TEST_()
738 COMPARE(eor(w6, w7, Operand(w8, LSR, 18)), "eor w6, w7, w8, lsr #18"); in TEST_()
[all …]

123456