• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "bounds_check_elimination.h"
18 
19 #include <limits>
20 
21 #include "base/arena_containers.h"
22 #include "induction_var_range.h"
23 #include "side_effects_analysis.h"
24 #include "nodes.h"
25 
26 namespace art {
27 
28 class MonotonicValueRange;
29 
30 /**
31  * A value bound is represented as a pair of value and constant,
32  * e.g. array.length - 1.
33  */
34 class ValueBound : public ValueObject {
35  public:
ValueBound(HInstruction * instruction,int32_t constant)36   ValueBound(HInstruction* instruction, int32_t constant) {
37     if (instruction != nullptr && instruction->IsIntConstant()) {
38       // Normalize ValueBound with constant instruction.
39       int32_t instr_const = instruction->AsIntConstant()->GetValue();
40       if (!WouldAddOverflowOrUnderflow(instr_const, constant)) {
41         instruction_ = nullptr;
42         constant_ = instr_const + constant;
43         return;
44       }
45     }
46     instruction_ = instruction;
47     constant_ = constant;
48   }
49 
50   // Return whether (left + right) overflows or underflows.
WouldAddOverflowOrUnderflow(int32_t left,int32_t right)51   static bool WouldAddOverflowOrUnderflow(int32_t left, int32_t right) {
52     if (right == 0) {
53       return false;
54     }
55     if ((right > 0) && (left <= (std::numeric_limits<int32_t>::max() - right))) {
56       // No overflow.
57       return false;
58     }
59     if ((right < 0) && (left >= (std::numeric_limits<int32_t>::min() - right))) {
60       // No underflow.
61       return false;
62     }
63     return true;
64   }
65 
66   // Return true if instruction can be expressed as "left_instruction + right_constant".
IsAddOrSubAConstant(HInstruction * instruction,HInstruction ** left_instruction,int32_t * right_constant)67   static bool IsAddOrSubAConstant(HInstruction* instruction,
68                                   /* out */ HInstruction** left_instruction,
69                                   /* out */ int32_t* right_constant) {
70     HInstruction* left_so_far = nullptr;
71     int32_t right_so_far = 0;
72     while (instruction->IsAdd() || instruction->IsSub()) {
73       HBinaryOperation* bin_op = instruction->AsBinaryOperation();
74       HInstruction* left = bin_op->GetLeft();
75       HInstruction* right = bin_op->GetRight();
76       if (right->IsIntConstant()) {
77         int32_t v = right->AsIntConstant()->GetValue();
78         int32_t c = instruction->IsAdd() ? v : -v;
79         if (!WouldAddOverflowOrUnderflow(right_so_far, c)) {
80           instruction = left;
81           left_so_far = left;
82           right_so_far += c;
83           continue;
84         }
85       }
86       break;
87     }
88     // Return result: either false and "null+0" or true and "instr+constant".
89     *left_instruction = left_so_far;
90     *right_constant = right_so_far;
91     return left_so_far != nullptr;
92   }
93 
94   // Expresses any instruction as a value bound.
AsValueBound(HInstruction * instruction)95   static ValueBound AsValueBound(HInstruction* instruction) {
96     if (instruction->IsIntConstant()) {
97       return ValueBound(nullptr, instruction->AsIntConstant()->GetValue());
98     }
99     HInstruction *left;
100     int32_t right;
101     if (IsAddOrSubAConstant(instruction, &left, &right)) {
102       return ValueBound(left, right);
103     }
104     return ValueBound(instruction, 0);
105   }
106 
107   // Try to detect useful value bound format from an instruction, e.g.
108   // a constant or array length related value.
DetectValueBoundFromValue(HInstruction * instruction,bool * found)109   static ValueBound DetectValueBoundFromValue(HInstruction* instruction, /* out */ bool* found) {
110     DCHECK(instruction != nullptr);
111     if (instruction->IsIntConstant()) {
112       *found = true;
113       return ValueBound(nullptr, instruction->AsIntConstant()->GetValue());
114     }
115 
116     if (instruction->IsArrayLength()) {
117       *found = true;
118       return ValueBound(instruction, 0);
119     }
120     // Try to detect (array.length + c) format.
121     HInstruction *left;
122     int32_t right;
123     if (IsAddOrSubAConstant(instruction, &left, &right)) {
124       if (left->IsArrayLength()) {
125         *found = true;
126         return ValueBound(left, right);
127       }
128     }
129 
130     // No useful bound detected.
131     *found = false;
132     return ValueBound::Max();
133   }
134 
GetInstruction() const135   HInstruction* GetInstruction() const { return instruction_; }
GetConstant() const136   int32_t GetConstant() const { return constant_; }
137 
IsRelatedToArrayLength() const138   bool IsRelatedToArrayLength() const {
139     // Some bounds are created with HNewArray* as the instruction instead
140     // of HArrayLength*. They are treated the same.
141     return (instruction_ != nullptr) &&
142            (instruction_->IsArrayLength() || instruction_->IsNewArray());
143   }
144 
IsConstant() const145   bool IsConstant() const {
146     return instruction_ == nullptr;
147   }
148 
Min()149   static ValueBound Min() { return ValueBound(nullptr, std::numeric_limits<int32_t>::min()); }
Max()150   static ValueBound Max() { return ValueBound(nullptr, std::numeric_limits<int32_t>::max()); }
151 
Equals(ValueBound bound) const152   bool Equals(ValueBound bound) const {
153     return instruction_ == bound.instruction_ && constant_ == bound.constant_;
154   }
155 
156   /*
157    * Hunt "under the hood" of array lengths (leading to array references),
158    * null checks (also leading to array references), and new arrays
159    * (leading to the actual length). This makes it more likely related
160    * instructions become actually comparable.
161    */
HuntForDeclaration(HInstruction * instruction)162   static HInstruction* HuntForDeclaration(HInstruction* instruction) {
163     while (instruction->IsArrayLength() ||
164            instruction->IsNullCheck() ||
165            instruction->IsNewArray()) {
166       instruction = instruction->InputAt(0);
167     }
168     return instruction;
169   }
170 
Equal(HInstruction * instruction1,HInstruction * instruction2)171   static bool Equal(HInstruction* instruction1, HInstruction* instruction2) {
172     if (instruction1 == instruction2) {
173       return true;
174     }
175     if (instruction1 == nullptr || instruction2 == nullptr) {
176       return false;
177     }
178     instruction1 = HuntForDeclaration(instruction1);
179     instruction2 = HuntForDeclaration(instruction2);
180     return instruction1 == instruction2;
181   }
182 
183   // Returns if it's certain this->bound >= `bound`.
GreaterThanOrEqualTo(ValueBound bound) const184   bool GreaterThanOrEqualTo(ValueBound bound) const {
185     if (Equal(instruction_, bound.instruction_)) {
186       return constant_ >= bound.constant_;
187     }
188     // Not comparable. Just return false.
189     return false;
190   }
191 
192   // Returns if it's certain this->bound <= `bound`.
LessThanOrEqualTo(ValueBound bound) const193   bool LessThanOrEqualTo(ValueBound bound) const {
194     if (Equal(instruction_, bound.instruction_)) {
195       return constant_ <= bound.constant_;
196     }
197     // Not comparable. Just return false.
198     return false;
199   }
200 
201   // Returns if it's certain this->bound > `bound`.
GreaterThan(ValueBound bound) const202   bool GreaterThan(ValueBound bound) const {
203     if (Equal(instruction_, bound.instruction_)) {
204       return constant_ > bound.constant_;
205     }
206     // Not comparable. Just return false.
207     return false;
208   }
209 
210   // Returns if it's certain this->bound < `bound`.
LessThan(ValueBound bound) const211   bool LessThan(ValueBound bound) const {
212     if (Equal(instruction_, bound.instruction_)) {
213       return constant_ < bound.constant_;
214     }
215     // Not comparable. Just return false.
216     return false;
217   }
218 
219   // Try to narrow lower bound. Returns the greatest of the two if possible.
220   // Pick one if they are not comparable.
NarrowLowerBound(ValueBound bound1,ValueBound bound2)221   static ValueBound NarrowLowerBound(ValueBound bound1, ValueBound bound2) {
222     if (bound1.GreaterThanOrEqualTo(bound2)) {
223       return bound1;
224     }
225     if (bound2.GreaterThanOrEqualTo(bound1)) {
226       return bound2;
227     }
228 
229     // Not comparable. Just pick one. We may lose some info, but that's ok.
230     // Favor constant as lower bound.
231     return bound1.IsConstant() ? bound1 : bound2;
232   }
233 
234   // Try to narrow upper bound. Returns the lowest of the two if possible.
235   // Pick one if they are not comparable.
NarrowUpperBound(ValueBound bound1,ValueBound bound2)236   static ValueBound NarrowUpperBound(ValueBound bound1, ValueBound bound2) {
237     if (bound1.LessThanOrEqualTo(bound2)) {
238       return bound1;
239     }
240     if (bound2.LessThanOrEqualTo(bound1)) {
241       return bound2;
242     }
243 
244     // Not comparable. Just pick one. We may lose some info, but that's ok.
245     // Favor array length as upper bound.
246     return bound1.IsRelatedToArrayLength() ? bound1 : bound2;
247   }
248 
249   // Add a constant to a ValueBound.
250   // `overflow` or `underflow` will return whether the resulting bound may
251   // overflow or underflow an int.
Add(int32_t c,bool * overflow,bool * underflow) const252   ValueBound Add(int32_t c, /* out */ bool* overflow, /* out */ bool* underflow) const {
253     *overflow = *underflow = false;
254     if (c == 0) {
255       return *this;
256     }
257 
258     int32_t new_constant;
259     if (c > 0) {
260       if (constant_ > (std::numeric_limits<int32_t>::max() - c)) {
261         *overflow = true;
262         return Max();
263       }
264 
265       new_constant = constant_ + c;
266       // (array.length + non-positive-constant) won't overflow an int.
267       if (IsConstant() || (IsRelatedToArrayLength() && new_constant <= 0)) {
268         return ValueBound(instruction_, new_constant);
269       }
270       // Be conservative.
271       *overflow = true;
272       return Max();
273     } else {
274       if (constant_ < (std::numeric_limits<int32_t>::min() - c)) {
275         *underflow = true;
276         return Min();
277       }
278 
279       new_constant = constant_ + c;
280       // Regardless of the value new_constant, (array.length+new_constant) will
281       // never underflow since array.length is no less than 0.
282       if (IsConstant() || IsRelatedToArrayLength()) {
283         return ValueBound(instruction_, new_constant);
284       }
285       // Be conservative.
286       *underflow = true;
287       return Min();
288     }
289   }
290 
291  private:
292   HInstruction* instruction_;
293   int32_t constant_;
294 };
295 
296 /**
297  * Represent a range of lower bound and upper bound, both being inclusive.
298  * Currently a ValueRange may be generated as a result of the following:
299  * comparisons related to array bounds, array bounds check, add/sub on top
300  * of an existing value range, NewArray or a loop phi corresponding to an
301  * incrementing/decrementing array index (MonotonicValueRange).
302  */
303 class ValueRange : public ArenaObject<kArenaAllocBoundsCheckElimination> {
304  public:
ValueRange(ArenaAllocator * allocator,ValueBound lower,ValueBound upper)305   ValueRange(ArenaAllocator* allocator, ValueBound lower, ValueBound upper)
306       : allocator_(allocator), lower_(lower), upper_(upper) {}
307 
~ValueRange()308   virtual ~ValueRange() {}
309 
AsMonotonicValueRange()310   virtual MonotonicValueRange* AsMonotonicValueRange() { return nullptr; }
IsMonotonicValueRange()311   bool IsMonotonicValueRange() {
312     return AsMonotonicValueRange() != nullptr;
313   }
314 
GetAllocator() const315   ArenaAllocator* GetAllocator() const { return allocator_; }
GetLower() const316   ValueBound GetLower() const { return lower_; }
GetUpper() const317   ValueBound GetUpper() const { return upper_; }
318 
IsConstantValueRange()319   bool IsConstantValueRange() { return lower_.IsConstant() && upper_.IsConstant(); }
320 
321   // If it's certain that this value range fits in other_range.
FitsIn(ValueRange * other_range) const322   virtual bool FitsIn(ValueRange* other_range) const {
323     if (other_range == nullptr) {
324       return true;
325     }
326     DCHECK(!other_range->IsMonotonicValueRange());
327     return lower_.GreaterThanOrEqualTo(other_range->lower_) &&
328            upper_.LessThanOrEqualTo(other_range->upper_);
329   }
330 
331   // Returns the intersection of this and range.
332   // If it's not possible to do intersection because some
333   // bounds are not comparable, it's ok to pick either bound.
Narrow(ValueRange * range)334   virtual ValueRange* Narrow(ValueRange* range) {
335     if (range == nullptr) {
336       return this;
337     }
338 
339     if (range->IsMonotonicValueRange()) {
340       return this;
341     }
342 
343     return new (allocator_) ValueRange(
344         allocator_,
345         ValueBound::NarrowLowerBound(lower_, range->lower_),
346         ValueBound::NarrowUpperBound(upper_, range->upper_));
347   }
348 
349   // Shift a range by a constant.
Add(int32_t constant) const350   ValueRange* Add(int32_t constant) const {
351     bool overflow, underflow;
352     ValueBound lower = lower_.Add(constant, &overflow, &underflow);
353     if (underflow) {
354       // Lower bound underflow will wrap around to positive values
355       // and invalidate the upper bound.
356       return nullptr;
357     }
358     ValueBound upper = upper_.Add(constant, &overflow, &underflow);
359     if (overflow) {
360       // Upper bound overflow will wrap around to negative values
361       // and invalidate the lower bound.
362       return nullptr;
363     }
364     return new (allocator_) ValueRange(allocator_, lower, upper);
365   }
366 
367  private:
368   ArenaAllocator* const allocator_;
369   const ValueBound lower_;  // inclusive
370   const ValueBound upper_;  // inclusive
371 
372   DISALLOW_COPY_AND_ASSIGN(ValueRange);
373 };
374 
375 /**
376  * A monotonically incrementing/decrementing value range, e.g.
377  * the variable i in "for (int i=0; i<array.length; i++)".
378  * Special care needs to be taken to account for overflow/underflow
379  * of such value ranges.
380  */
381 class MonotonicValueRange : public ValueRange {
382  public:
MonotonicValueRange(ArenaAllocator * allocator,HPhi * induction_variable,HInstruction * initial,int32_t increment,ValueBound bound)383   MonotonicValueRange(ArenaAllocator* allocator,
384                       HPhi* induction_variable,
385                       HInstruction* initial,
386                       int32_t increment,
387                       ValueBound bound)
388       // To be conservative, give it full range [Min(), Max()] in case it's
389       // used as a regular value range, due to possible overflow/underflow.
390       : ValueRange(allocator, ValueBound::Min(), ValueBound::Max()),
391         induction_variable_(induction_variable),
392         initial_(initial),
393         increment_(increment),
394         bound_(bound) {}
395 
~MonotonicValueRange()396   virtual ~MonotonicValueRange() {}
397 
GetIncrement() const398   int32_t GetIncrement() const { return increment_; }
GetBound() const399   ValueBound GetBound() const { return bound_; }
GetLoopHeader() const400   HBasicBlock* GetLoopHeader() const {
401     DCHECK(induction_variable_->GetBlock()->IsLoopHeader());
402     return induction_variable_->GetBlock();
403   }
404 
AsMonotonicValueRange()405   MonotonicValueRange* AsMonotonicValueRange() OVERRIDE { return this; }
406 
407   // If it's certain that this value range fits in other_range.
FitsIn(ValueRange * other_range) const408   bool FitsIn(ValueRange* other_range) const OVERRIDE {
409     if (other_range == nullptr) {
410       return true;
411     }
412     DCHECK(!other_range->IsMonotonicValueRange());
413     return false;
414   }
415 
416   // Try to narrow this MonotonicValueRange given another range.
417   // Ideally it will return a normal ValueRange. But due to
418   // possible overflow/underflow, that may not be possible.
Narrow(ValueRange * range)419   ValueRange* Narrow(ValueRange* range) OVERRIDE {
420     if (range == nullptr) {
421       return this;
422     }
423     DCHECK(!range->IsMonotonicValueRange());
424 
425     if (increment_ > 0) {
426       // Monotonically increasing.
427       ValueBound lower = ValueBound::NarrowLowerBound(bound_, range->GetLower());
428       if (!lower.IsConstant() || lower.GetConstant() == std::numeric_limits<int32_t>::min()) {
429         // Lower bound isn't useful. Leave it to deoptimization.
430         return this;
431       }
432 
433       // We currently conservatively assume max array length is Max().
434       // If we can make assumptions about the max array length, e.g. due to the max heap size,
435       // divided by the element size (such as 4 bytes for each integer array), we can
436       // lower this number and rule out some possible overflows.
437       int32_t max_array_len = std::numeric_limits<int32_t>::max();
438 
439       // max possible integer value of range's upper value.
440       int32_t upper = std::numeric_limits<int32_t>::max();
441       // Try to lower upper.
442       ValueBound upper_bound = range->GetUpper();
443       if (upper_bound.IsConstant()) {
444         upper = upper_bound.GetConstant();
445       } else if (upper_bound.IsRelatedToArrayLength() && upper_bound.GetConstant() <= 0) {
446         // Normal case. e.g. <= array.length - 1.
447         upper = max_array_len + upper_bound.GetConstant();
448       }
449 
450       // If we can prove for the last number in sequence of initial_,
451       // initial_ + increment_, initial_ + 2 x increment_, ...
452       // that's <= upper, (last_num_in_sequence + increment_) doesn't trigger overflow,
453       // then this MonoticValueRange is narrowed to a normal value range.
454 
455       // Be conservative first, assume last number in the sequence hits upper.
456       int32_t last_num_in_sequence = upper;
457       if (initial_->IsIntConstant()) {
458         int32_t initial_constant = initial_->AsIntConstant()->GetValue();
459         if (upper <= initial_constant) {
460           last_num_in_sequence = upper;
461         } else {
462           // Cast to int64_t for the substraction part to avoid int32_t overflow.
463           last_num_in_sequence = initial_constant +
464               ((int64_t)upper - (int64_t)initial_constant) / increment_ * increment_;
465         }
466       }
467       if (last_num_in_sequence <= (std::numeric_limits<int32_t>::max() - increment_)) {
468         // No overflow. The sequence will be stopped by the upper bound test as expected.
469         return new (GetAllocator()) ValueRange(GetAllocator(), lower, range->GetUpper());
470       }
471 
472       // There might be overflow. Give up narrowing.
473       return this;
474     } else {
475       DCHECK_NE(increment_, 0);
476       // Monotonically decreasing.
477       ValueBound upper = ValueBound::NarrowUpperBound(bound_, range->GetUpper());
478       if ((!upper.IsConstant() || upper.GetConstant() == std::numeric_limits<int32_t>::max()) &&
479           !upper.IsRelatedToArrayLength()) {
480         // Upper bound isn't useful. Leave it to deoptimization.
481         return this;
482       }
483 
484       // Need to take care of underflow. Try to prove underflow won't happen
485       // for common cases.
486       if (range->GetLower().IsConstant()) {
487         int32_t constant = range->GetLower().GetConstant();
488         if (constant >= (std::numeric_limits<int32_t>::min() - increment_)) {
489           return new (GetAllocator()) ValueRange(GetAllocator(), range->GetLower(), upper);
490         }
491       }
492 
493       // For non-constant lower bound, just assume might be underflow. Give up narrowing.
494       return this;
495     }
496   }
497 
498  private:
499   HPhi* const induction_variable_;  // Induction variable for this monotonic value range.
500   HInstruction* const initial_;     // Initial value.
501   const int32_t increment_;         // Increment for each loop iteration.
502   const ValueBound bound_;          // Additional value bound info for initial_.
503 
504   DISALLOW_COPY_AND_ASSIGN(MonotonicValueRange);
505 };
506 
507 class BCEVisitor : public HGraphVisitor {
508  public:
509   // The least number of bounds checks that should be eliminated by triggering
510   // the deoptimization technique.
511   static constexpr size_t kThresholdForAddingDeoptimize = 2;
512 
513   // Very large lengths are considered an anomaly. This is a threshold beyond which we don't
514   // bother to apply the deoptimization technique since it's likely, or sometimes certain,
515   // an AIOOBE will be thrown.
516   static constexpr uint32_t kMaxLengthForAddingDeoptimize =
517       std::numeric_limits<int32_t>::max() - 1024 * 1024;
518 
519   // Added blocks for loop body entry test.
IsAddedBlock(HBasicBlock * block) const520   bool IsAddedBlock(HBasicBlock* block) const {
521     return block->GetBlockId() >= initial_block_size_;
522   }
523 
BCEVisitor(HGraph * graph,const SideEffectsAnalysis & side_effects,HInductionVarAnalysis * induction_analysis)524   BCEVisitor(HGraph* graph,
525              const SideEffectsAnalysis& side_effects,
526              HInductionVarAnalysis* induction_analysis)
527       : HGraphVisitor(graph),
528         maps_(graph->GetBlocks().size(),
529               ArenaSafeMap<int, ValueRange*>(
530                   std::less<int>(),
531                   graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
532               graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
533         first_index_bounds_check_map_(
534             std::less<int>(),
535             graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
536         dynamic_bce_standby_(
537             graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
538         record_dynamic_bce_standby_(true),
539         early_exit_loop_(
540             std::less<uint32_t>(),
541             graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
542         taken_test_loop_(
543             std::less<uint32_t>(),
544             graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
545         finite_loop_(graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
546         has_dom_based_dynamic_bce_(false),
547         initial_block_size_(graph->GetBlocks().size()),
548         side_effects_(side_effects),
549         induction_range_(induction_analysis) {}
550 
VisitBasicBlock(HBasicBlock * block)551   void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
552     DCHECK(!IsAddedBlock(block));
553     first_index_bounds_check_map_.clear();
554     HGraphVisitor::VisitBasicBlock(block);
555     // We should never deoptimize from an osr method, otherwise we might wrongly optimize
556     // code dominated by the deoptimization.
557     if (!GetGraph()->IsCompilingOsr()) {
558       AddComparesWithDeoptimization(block);
559     }
560   }
561 
Finish()562   void Finish() {
563     // Retry dynamic bce candidates on standby that are still in the graph.
564     record_dynamic_bce_standby_ = false;
565     for (HBoundsCheck* bounds_check : dynamic_bce_standby_) {
566       if (bounds_check->IsInBlock()) {
567         TryDynamicBCE(bounds_check);
568       }
569     }
570 
571     // Preserve SSA structure which may have been broken by adding one or more
572     // new taken-test structures (see TransformLoopForDeoptimizationIfNeeded()).
573     InsertPhiNodes();
574 
575     // Clear the loop data structures.
576     early_exit_loop_.clear();
577     taken_test_loop_.clear();
578     finite_loop_.clear();
579     dynamic_bce_standby_.clear();
580   }
581 
582  private:
583   // Return the map of proven value ranges at the beginning of a basic block.
GetValueRangeMap(HBasicBlock * basic_block)584   ArenaSafeMap<int, ValueRange*>* GetValueRangeMap(HBasicBlock* basic_block) {
585     if (IsAddedBlock(basic_block)) {
586       // Added blocks don't keep value ranges.
587       return nullptr;
588     }
589     return &maps_[basic_block->GetBlockId()];
590   }
591 
592   // Traverse up the dominator tree to look for value range info.
LookupValueRange(HInstruction * instruction,HBasicBlock * basic_block)593   ValueRange* LookupValueRange(HInstruction* instruction, HBasicBlock* basic_block) {
594     while (basic_block != nullptr) {
595       ArenaSafeMap<int, ValueRange*>* map = GetValueRangeMap(basic_block);
596       if (map != nullptr) {
597         if (map->find(instruction->GetId()) != map->end()) {
598           return map->Get(instruction->GetId());
599         }
600       } else {
601         DCHECK(IsAddedBlock(basic_block));
602       }
603       basic_block = basic_block->GetDominator();
604     }
605     // Didn't find any.
606     return nullptr;
607   }
608 
609   // Helper method to assign a new range to an instruction in given basic block.
AssignRange(HBasicBlock * basic_block,HInstruction * instruction,ValueRange * range)610   void AssignRange(HBasicBlock* basic_block, HInstruction* instruction, ValueRange* range) {
611     GetValueRangeMap(basic_block)->Overwrite(instruction->GetId(), range);
612   }
613 
614   // Narrow the value range of `instruction` at the end of `basic_block` with `range`,
615   // and push the narrowed value range to `successor`.
ApplyRangeFromComparison(HInstruction * instruction,HBasicBlock * basic_block,HBasicBlock * successor,ValueRange * range)616   void ApplyRangeFromComparison(HInstruction* instruction, HBasicBlock* basic_block,
617                                 HBasicBlock* successor, ValueRange* range) {
618     ValueRange* existing_range = LookupValueRange(instruction, basic_block);
619     if (existing_range == nullptr) {
620       if (range != nullptr) {
621         AssignRange(successor, instruction, range);
622       }
623       return;
624     }
625     if (existing_range->IsMonotonicValueRange()) {
626       DCHECK(instruction->IsLoopHeaderPhi());
627       // Make sure the comparison is in the loop header so each increment is
628       // checked with a comparison.
629       if (instruction->GetBlock() != basic_block) {
630         return;
631       }
632     }
633     AssignRange(successor, instruction, existing_range->Narrow(range));
634   }
635 
636   // Special case that we may simultaneously narrow two MonotonicValueRange's to
637   // regular value ranges.
HandleIfBetweenTwoMonotonicValueRanges(HIf * instruction,HInstruction * left,HInstruction * right,IfCondition cond,MonotonicValueRange * left_range,MonotonicValueRange * right_range)638   void HandleIfBetweenTwoMonotonicValueRanges(HIf* instruction,
639                                               HInstruction* left,
640                                               HInstruction* right,
641                                               IfCondition cond,
642                                               MonotonicValueRange* left_range,
643                                               MonotonicValueRange* right_range) {
644     DCHECK(left->IsLoopHeaderPhi());
645     DCHECK(right->IsLoopHeaderPhi());
646     if (instruction->GetBlock() != left->GetBlock()) {
647       // Comparison needs to be in loop header to make sure it's done after each
648       // increment/decrement.
649       return;
650     }
651 
652     // Handle common cases which also don't have overflow/underflow concerns.
653     if (left_range->GetIncrement() == 1 &&
654         left_range->GetBound().IsConstant() &&
655         right_range->GetIncrement() == -1 &&
656         right_range->GetBound().IsRelatedToArrayLength() &&
657         right_range->GetBound().GetConstant() < 0) {
658       HBasicBlock* successor = nullptr;
659       int32_t left_compensation = 0;
660       int32_t right_compensation = 0;
661       if (cond == kCondLT) {
662         left_compensation = -1;
663         right_compensation = 1;
664         successor = instruction->IfTrueSuccessor();
665       } else if (cond == kCondLE) {
666         successor = instruction->IfTrueSuccessor();
667       } else if (cond == kCondGT) {
668         successor = instruction->IfFalseSuccessor();
669       } else if (cond == kCondGE) {
670         left_compensation = -1;
671         right_compensation = 1;
672         successor = instruction->IfFalseSuccessor();
673       } else {
674         // We don't handle '=='/'!=' test in case left and right can cross and
675         // miss each other.
676         return;
677       }
678 
679       if (successor != nullptr) {
680         bool overflow;
681         bool underflow;
682         ValueRange* new_left_range = new (GetGraph()->GetArena()) ValueRange(
683             GetGraph()->GetArena(),
684             left_range->GetBound(),
685             right_range->GetBound().Add(left_compensation, &overflow, &underflow));
686         if (!overflow && !underflow) {
687           ApplyRangeFromComparison(left, instruction->GetBlock(), successor,
688                                    new_left_range);
689         }
690 
691         ValueRange* new_right_range = new (GetGraph()->GetArena()) ValueRange(
692             GetGraph()->GetArena(),
693             left_range->GetBound().Add(right_compensation, &overflow, &underflow),
694             right_range->GetBound());
695         if (!overflow && !underflow) {
696           ApplyRangeFromComparison(right, instruction->GetBlock(), successor,
697                                    new_right_range);
698         }
699       }
700     }
701   }
702 
703   // Handle "if (left cmp_cond right)".
HandleIf(HIf * instruction,HInstruction * left,HInstruction * right,IfCondition cond)704   void HandleIf(HIf* instruction, HInstruction* left, HInstruction* right, IfCondition cond) {
705     HBasicBlock* block = instruction->GetBlock();
706 
707     HBasicBlock* true_successor = instruction->IfTrueSuccessor();
708     // There should be no critical edge at this point.
709     DCHECK_EQ(true_successor->GetPredecessors().size(), 1u);
710 
711     HBasicBlock* false_successor = instruction->IfFalseSuccessor();
712     // There should be no critical edge at this point.
713     DCHECK_EQ(false_successor->GetPredecessors().size(), 1u);
714 
715     ValueRange* left_range = LookupValueRange(left, block);
716     MonotonicValueRange* left_monotonic_range = nullptr;
717     if (left_range != nullptr) {
718       left_monotonic_range = left_range->AsMonotonicValueRange();
719       if (left_monotonic_range != nullptr) {
720         HBasicBlock* loop_head = left_monotonic_range->GetLoopHeader();
721         if (instruction->GetBlock() != loop_head) {
722           // For monotonic value range, don't handle `instruction`
723           // if it's not defined in the loop header.
724           return;
725         }
726       }
727     }
728 
729     bool found;
730     ValueBound bound = ValueBound::DetectValueBoundFromValue(right, &found);
731     // Each comparison can establish a lower bound and an upper bound
732     // for the left hand side.
733     ValueBound lower = bound;
734     ValueBound upper = bound;
735     if (!found) {
736       // No constant or array.length+c format bound found.
737       // For i<j, we can still use j's upper bound as i's upper bound. Same for lower.
738       ValueRange* right_range = LookupValueRange(right, block);
739       if (right_range != nullptr) {
740         if (right_range->IsMonotonicValueRange()) {
741           if (left_range != nullptr && left_range->IsMonotonicValueRange()) {
742             HandleIfBetweenTwoMonotonicValueRanges(instruction, left, right, cond,
743                                                    left_range->AsMonotonicValueRange(),
744                                                    right_range->AsMonotonicValueRange());
745             return;
746           }
747         }
748         lower = right_range->GetLower();
749         upper = right_range->GetUpper();
750       } else {
751         lower = ValueBound::Min();
752         upper = ValueBound::Max();
753       }
754     }
755 
756     bool overflow, underflow;
757     if (cond == kCondLT || cond == kCondLE) {
758       if (!upper.Equals(ValueBound::Max())) {
759         int32_t compensation = (cond == kCondLT) ? -1 : 0;  // upper bound is inclusive
760         ValueBound new_upper = upper.Add(compensation, &overflow, &underflow);
761         if (overflow || underflow) {
762           return;
763         }
764         ValueRange* new_range = new (GetGraph()->GetArena())
765             ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper);
766         ApplyRangeFromComparison(left, block, true_successor, new_range);
767       }
768 
769       // array.length as a lower bound isn't considered useful.
770       if (!lower.Equals(ValueBound::Min()) && !lower.IsRelatedToArrayLength()) {
771         int32_t compensation = (cond == kCondLE) ? 1 : 0;  // lower bound is inclusive
772         ValueBound new_lower = lower.Add(compensation, &overflow, &underflow);
773         if (overflow || underflow) {
774           return;
775         }
776         ValueRange* new_range = new (GetGraph()->GetArena())
777             ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max());
778         ApplyRangeFromComparison(left, block, false_successor, new_range);
779       }
780     } else if (cond == kCondGT || cond == kCondGE) {
781       // array.length as a lower bound isn't considered useful.
782       if (!lower.Equals(ValueBound::Min()) && !lower.IsRelatedToArrayLength()) {
783         int32_t compensation = (cond == kCondGT) ? 1 : 0;  // lower bound is inclusive
784         ValueBound new_lower = lower.Add(compensation, &overflow, &underflow);
785         if (overflow || underflow) {
786           return;
787         }
788         ValueRange* new_range = new (GetGraph()->GetArena())
789             ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max());
790         ApplyRangeFromComparison(left, block, true_successor, new_range);
791       }
792 
793       if (!upper.Equals(ValueBound::Max())) {
794         int32_t compensation = (cond == kCondGE) ? -1 : 0;  // upper bound is inclusive
795         ValueBound new_upper = upper.Add(compensation, &overflow, &underflow);
796         if (overflow || underflow) {
797           return;
798         }
799         ValueRange* new_range = new (GetGraph()->GetArena())
800             ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper);
801         ApplyRangeFromComparison(left, block, false_successor, new_range);
802       }
803     }
804   }
805 
VisitBoundsCheck(HBoundsCheck * bounds_check)806   void VisitBoundsCheck(HBoundsCheck* bounds_check) OVERRIDE {
807     HBasicBlock* block = bounds_check->GetBlock();
808     HInstruction* index = bounds_check->InputAt(0);
809     HInstruction* array_length = bounds_check->InputAt(1);
810     DCHECK(array_length->IsIntConstant() ||
811            array_length->IsArrayLength() ||
812            array_length->IsPhi());
813     bool try_dynamic_bce = true;
814 
815     // Analyze index range.
816     if (!index->IsIntConstant()) {
817       // Non-constant index.
818       ValueBound lower = ValueBound(nullptr, 0);        // constant 0
819       ValueBound upper = ValueBound(array_length, -1);  // array_length - 1
820       ValueRange array_range(GetGraph()->GetArena(), lower, upper);
821       // Try index range obtained by dominator-based analysis.
822       ValueRange* index_range = LookupValueRange(index, block);
823       if (index_range != nullptr && index_range->FitsIn(&array_range)) {
824         ReplaceInstruction(bounds_check, index);
825         return;
826       }
827       // Try index range obtained by induction variable analysis.
828       // Disables dynamic bce if OOB is certain.
829       if (InductionRangeFitsIn(&array_range, bounds_check, index, &try_dynamic_bce)) {
830         ReplaceInstruction(bounds_check, index);
831         return;
832       }
833     } else {
834       // Constant index.
835       int32_t constant = index->AsIntConstant()->GetValue();
836       if (constant < 0) {
837         // Will always throw exception.
838         return;
839       } else if (array_length->IsIntConstant()) {
840         if (constant < array_length->AsIntConstant()->GetValue()) {
841           ReplaceInstruction(bounds_check, index);
842         }
843         return;
844       }
845       // Analyze array length range.
846       DCHECK(array_length->IsArrayLength());
847       ValueRange* existing_range = LookupValueRange(array_length, block);
848       if (existing_range != nullptr) {
849         ValueBound lower = existing_range->GetLower();
850         DCHECK(lower.IsConstant());
851         if (constant < lower.GetConstant()) {
852           ReplaceInstruction(bounds_check, index);
853           return;
854         } else {
855           // Existing range isn't strong enough to eliminate the bounds check.
856           // Fall through to update the array_length range with info from this
857           // bounds check.
858         }
859       }
860       // Once we have an array access like 'array[5] = 1', we record array.length >= 6.
861       // We currently don't do it for non-constant index since a valid array[i] can't prove
862       // a valid array[i-1] yet due to the lower bound side.
863       if (constant == std::numeric_limits<int32_t>::max()) {
864         // Max() as an index will definitely throw AIOOBE.
865         return;
866       } else {
867         ValueBound lower = ValueBound(nullptr, constant + 1);
868         ValueBound upper = ValueBound::Max();
869         ValueRange* range = new (GetGraph()->GetArena())
870             ValueRange(GetGraph()->GetArena(), lower, upper);
871         AssignRange(block, array_length, range);
872       }
873     }
874 
875     // If static analysis fails, and OOB is not certain, try dynamic elimination.
876     if (try_dynamic_bce) {
877       // Try loop-based dynamic elimination.
878       if (TryDynamicBCE(bounds_check)) {
879         return;
880       }
881       // Prepare dominator-based dynamic elimination.
882       if (first_index_bounds_check_map_.find(array_length->GetId()) ==
883           first_index_bounds_check_map_.end()) {
884         // Remember the first bounds check against each array_length. That bounds check
885         // instruction has an associated HEnvironment where we may add an HDeoptimize
886         // to eliminate subsequent bounds checks against the same array_length.
887         first_index_bounds_check_map_.Put(array_length->GetId(), bounds_check);
888       }
889     }
890   }
891 
HasSameInputAtBackEdges(HPhi * phi)892   static bool HasSameInputAtBackEdges(HPhi* phi) {
893     DCHECK(phi->IsLoopHeaderPhi());
894     // Start with input 1. Input 0 is from the incoming block.
895     HInstruction* input1 = phi->InputAt(1);
896     DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge(
897         *phi->GetBlock()->GetPredecessors()[1]));
898     for (size_t i = 2, e = phi->InputCount(); i < e; ++i) {
899       DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge(
900           *phi->GetBlock()->GetPredecessors()[i]));
901       if (input1 != phi->InputAt(i)) {
902         return false;
903       }
904     }
905     return true;
906   }
907 
VisitPhi(HPhi * phi)908   void VisitPhi(HPhi* phi) OVERRIDE {
909     if (phi->IsLoopHeaderPhi()
910         && (phi->GetType() == Primitive::kPrimInt)
911         && HasSameInputAtBackEdges(phi)) {
912       HInstruction* instruction = phi->InputAt(1);
913       HInstruction *left;
914       int32_t increment;
915       if (ValueBound::IsAddOrSubAConstant(instruction, &left, &increment)) {
916         if (left == phi) {
917           HInstruction* initial_value = phi->InputAt(0);
918           ValueRange* range = nullptr;
919           if (increment == 0) {
920             // Add constant 0. It's really a fixed value.
921             range = new (GetGraph()->GetArena()) ValueRange(
922                 GetGraph()->GetArena(),
923                 ValueBound(initial_value, 0),
924                 ValueBound(initial_value, 0));
925           } else {
926             // Monotonically increasing/decreasing.
927             bool found;
928             ValueBound bound = ValueBound::DetectValueBoundFromValue(
929                 initial_value, &found);
930             if (!found) {
931               // No constant or array.length+c bound found.
932               // For i=j, we can still use j's upper bound as i's upper bound.
933               // Same for lower.
934               ValueRange* initial_range = LookupValueRange(initial_value, phi->GetBlock());
935               if (initial_range != nullptr) {
936                 bound = increment > 0 ? initial_range->GetLower() :
937                                         initial_range->GetUpper();
938               } else {
939                 bound = increment > 0 ? ValueBound::Min() : ValueBound::Max();
940               }
941             }
942             range = new (GetGraph()->GetArena()) MonotonicValueRange(
943                 GetGraph()->GetArena(),
944                 phi,
945                 initial_value,
946                 increment,
947                 bound);
948           }
949           AssignRange(phi->GetBlock(), phi, range);
950         }
951       }
952     }
953   }
954 
VisitIf(HIf * instruction)955   void VisitIf(HIf* instruction) OVERRIDE {
956     if (instruction->InputAt(0)->IsCondition()) {
957       HCondition* cond = instruction->InputAt(0)->AsCondition();
958       IfCondition cmp = cond->GetCondition();
959       if (cmp == kCondGT || cmp == kCondGE ||
960           cmp == kCondLT || cmp == kCondLE) {
961         HInstruction* left = cond->GetLeft();
962         HInstruction* right = cond->GetRight();
963         HandleIf(instruction, left, right, cmp);
964       }
965     }
966   }
967 
VisitAdd(HAdd * add)968   void VisitAdd(HAdd* add) OVERRIDE {
969     HInstruction* right = add->GetRight();
970     if (right->IsIntConstant()) {
971       ValueRange* left_range = LookupValueRange(add->GetLeft(), add->GetBlock());
972       if (left_range == nullptr) {
973         return;
974       }
975       ValueRange* range = left_range->Add(right->AsIntConstant()->GetValue());
976       if (range != nullptr) {
977         AssignRange(add->GetBlock(), add, range);
978       }
979     }
980   }
981 
VisitSub(HSub * sub)982   void VisitSub(HSub* sub) OVERRIDE {
983     HInstruction* left = sub->GetLeft();
984     HInstruction* right = sub->GetRight();
985     if (right->IsIntConstant()) {
986       ValueRange* left_range = LookupValueRange(left, sub->GetBlock());
987       if (left_range == nullptr) {
988         return;
989       }
990       ValueRange* range = left_range->Add(-right->AsIntConstant()->GetValue());
991       if (range != nullptr) {
992         AssignRange(sub->GetBlock(), sub, range);
993         return;
994       }
995     }
996 
997     // Here we are interested in the typical triangular case of nested loops,
998     // such as the inner loop 'for (int j=0; j<array.length-i; j++)' where i
999     // is the index for outer loop. In this case, we know j is bounded by array.length-1.
1000 
1001     // Try to handle (array.length - i) or (array.length + c - i) format.
1002     HInstruction* left_of_left;  // left input of left.
1003     int32_t right_const = 0;
1004     if (ValueBound::IsAddOrSubAConstant(left, &left_of_left, &right_const)) {
1005       left = left_of_left;
1006     }
1007     // The value of left input of the sub equals (left + right_const).
1008 
1009     if (left->IsArrayLength()) {
1010       HInstruction* array_length = left->AsArrayLength();
1011       ValueRange* right_range = LookupValueRange(right, sub->GetBlock());
1012       if (right_range != nullptr) {
1013         ValueBound lower = right_range->GetLower();
1014         ValueBound upper = right_range->GetUpper();
1015         if (lower.IsConstant() && upper.IsRelatedToArrayLength()) {
1016           HInstruction* upper_inst = upper.GetInstruction();
1017           // Make sure it's the same array.
1018           if (ValueBound::Equal(array_length, upper_inst)) {
1019             int32_t c0 = right_const;
1020             int32_t c1 = lower.GetConstant();
1021             int32_t c2 = upper.GetConstant();
1022             // (array.length + c0 - v) where v is in [c1, array.length + c2]
1023             // gets [c0 - c2, array.length + c0 - c1] as its value range.
1024             if (!ValueBound::WouldAddOverflowOrUnderflow(c0, -c2) &&
1025                 !ValueBound::WouldAddOverflowOrUnderflow(c0, -c1)) {
1026               if ((c0 - c1) <= 0) {
1027                 // array.length + (c0 - c1) won't overflow/underflow.
1028                 ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
1029                     GetGraph()->GetArena(),
1030                     ValueBound(nullptr, right_const - upper.GetConstant()),
1031                     ValueBound(array_length, right_const - lower.GetConstant()));
1032                 AssignRange(sub->GetBlock(), sub, range);
1033               }
1034             }
1035           }
1036         }
1037       }
1038     }
1039   }
1040 
FindAndHandlePartialArrayLength(HBinaryOperation * instruction)1041   void FindAndHandlePartialArrayLength(HBinaryOperation* instruction) {
1042     DCHECK(instruction->IsDiv() || instruction->IsShr() || instruction->IsUShr());
1043     HInstruction* right = instruction->GetRight();
1044     int32_t right_const;
1045     if (right->IsIntConstant()) {
1046       right_const = right->AsIntConstant()->GetValue();
1047       // Detect division by two or more.
1048       if ((instruction->IsDiv() && right_const <= 1) ||
1049           (instruction->IsShr() && right_const < 1) ||
1050           (instruction->IsUShr() && right_const < 1)) {
1051         return;
1052       }
1053     } else {
1054       return;
1055     }
1056 
1057     // Try to handle array.length/2 or (array.length-1)/2 format.
1058     HInstruction* left = instruction->GetLeft();
1059     HInstruction* left_of_left;  // left input of left.
1060     int32_t c = 0;
1061     if (ValueBound::IsAddOrSubAConstant(left, &left_of_left, &c)) {
1062       left = left_of_left;
1063     }
1064     // The value of left input of instruction equals (left + c).
1065 
1066     // (array_length + 1) or smaller divided by two or more
1067     // always generate a value in [Min(), array_length].
1068     // This is true even if array_length is Max().
1069     if (left->IsArrayLength() && c <= 1) {
1070       if (instruction->IsUShr() && c < 0) {
1071         // Make sure for unsigned shift, left side is not negative.
1072         // e.g. if array_length is 2, ((array_length - 3) >>> 2) is way bigger
1073         // than array_length.
1074         return;
1075       }
1076       ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
1077           GetGraph()->GetArena(),
1078           ValueBound(nullptr, std::numeric_limits<int32_t>::min()),
1079           ValueBound(left, 0));
1080       AssignRange(instruction->GetBlock(), instruction, range);
1081     }
1082   }
1083 
VisitDiv(HDiv * div)1084   void VisitDiv(HDiv* div) OVERRIDE {
1085     FindAndHandlePartialArrayLength(div);
1086   }
1087 
VisitShr(HShr * shr)1088   void VisitShr(HShr* shr) OVERRIDE {
1089     FindAndHandlePartialArrayLength(shr);
1090   }
1091 
VisitUShr(HUShr * ushr)1092   void VisitUShr(HUShr* ushr) OVERRIDE {
1093     FindAndHandlePartialArrayLength(ushr);
1094   }
1095 
VisitAnd(HAnd * instruction)1096   void VisitAnd(HAnd* instruction) OVERRIDE {
1097     if (instruction->GetRight()->IsIntConstant()) {
1098       int32_t constant = instruction->GetRight()->AsIntConstant()->GetValue();
1099       if (constant > 0) {
1100         // constant serves as a mask so any number masked with it
1101         // gets a [0, constant] value range.
1102         ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
1103             GetGraph()->GetArena(),
1104             ValueBound(nullptr, 0),
1105             ValueBound(nullptr, constant));
1106         AssignRange(instruction->GetBlock(), instruction, range);
1107       }
1108     }
1109   }
1110 
VisitNewArray(HNewArray * new_array)1111   void VisitNewArray(HNewArray* new_array) OVERRIDE {
1112     HInstruction* len = new_array->InputAt(0);
1113     if (!len->IsIntConstant()) {
1114       HInstruction *left;
1115       int32_t right_const;
1116       if (ValueBound::IsAddOrSubAConstant(len, &left, &right_const)) {
1117         // (left + right_const) is used as size to new the array.
1118         // We record "-right_const <= left <= new_array - right_const";
1119         ValueBound lower = ValueBound(nullptr, -right_const);
1120         // We use new_array for the bound instead of new_array.length,
1121         // which isn't available as an instruction yet. new_array will
1122         // be treated the same as new_array.length when it's used in a ValueBound.
1123         ValueBound upper = ValueBound(new_array, -right_const);
1124         ValueRange* range = new (GetGraph()->GetArena())
1125             ValueRange(GetGraph()->GetArena(), lower, upper);
1126         ValueRange* existing_range = LookupValueRange(left, new_array->GetBlock());
1127         if (existing_range != nullptr) {
1128           range = existing_range->Narrow(range);
1129         }
1130         AssignRange(new_array->GetBlock(), left, range);
1131       }
1132     }
1133   }
1134 
1135   /**
1136     * After null/bounds checks are eliminated, some invariant array references
1137     * may be exposed underneath which can be hoisted out of the loop to the
1138     * preheader or, in combination with dynamic bce, the deoptimization block.
1139     *
1140     * for (int i = 0; i < n; i++) {
1141     *                                <-------+
1142     *   for (int j = 0; j < n; j++)          |
1143     *     a[i][j] = 0;               --a[i]--+
1144     * }
1145     *
1146     * Note: this optimization is no longer applied after dominator-based dynamic deoptimization
1147     * has occurred (see AddCompareWithDeoptimization()), since in those cases it would be
1148     * unsafe to hoist array references across their deoptimization instruction inside a loop.
1149     */
VisitArrayGet(HArrayGet * array_get)1150   void VisitArrayGet(HArrayGet* array_get) OVERRIDE {
1151     if (!has_dom_based_dynamic_bce_ && array_get->IsInLoop()) {
1152       HLoopInformation* loop = array_get->GetBlock()->GetLoopInformation();
1153       if (loop->IsDefinedOutOfTheLoop(array_get->InputAt(0)) &&
1154           loop->IsDefinedOutOfTheLoop(array_get->InputAt(1))) {
1155         SideEffects loop_effects = side_effects_.GetLoopEffects(loop->GetHeader());
1156         if (!array_get->GetSideEffects().MayDependOn(loop_effects)) {
1157           // We can hoist ArrayGet only if its execution is guaranteed on every iteration.
1158           // In other words only if array_get_bb dominates all back branches.
1159           if (loop->DominatesAllBackEdges(array_get->GetBlock())) {
1160             HoistToPreHeaderOrDeoptBlock(loop, array_get);
1161           }
1162         }
1163       }
1164     }
1165   }
1166 
1167   // Perform dominator-based dynamic elimination on suitable set of bounds checks.
AddCompareWithDeoptimization(HBasicBlock * block,HInstruction * array_length,HInstruction * base,int32_t min_c,int32_t max_c)1168   void AddCompareWithDeoptimization(HBasicBlock* block,
1169                                     HInstruction* array_length,
1170                                     HInstruction* base,
1171                                     int32_t min_c, int32_t max_c) {
1172     HBoundsCheck* bounds_check =
1173         first_index_bounds_check_map_.Get(array_length->GetId())->AsBoundsCheck();
1174     // Construct deoptimization on single or double bounds on range [base-min_c,base+max_c],
1175     // for example either for a[0]..a[3] just 3 or for a[base-1]..a[base+3] both base-1
1176     // and base+3, since we made the assumption any in between value may occur too.
1177     static_assert(kMaxLengthForAddingDeoptimize < std::numeric_limits<int32_t>::max(),
1178                   "Incorrect max length may be subject to arithmetic wrap-around");
1179     HInstruction* upper = GetGraph()->GetIntConstant(max_c);
1180     if (base == nullptr) {
1181       DCHECK_GE(min_c, 0);
1182     } else {
1183       HInstruction* lower = new (GetGraph()->GetArena())
1184           HAdd(Primitive::kPrimInt, base, GetGraph()->GetIntConstant(min_c));
1185       upper = new (GetGraph()->GetArena()) HAdd(Primitive::kPrimInt, base, upper);
1186       block->InsertInstructionBefore(lower, bounds_check);
1187       block->InsertInstructionBefore(upper, bounds_check);
1188       InsertDeoptInBlock(bounds_check, new (GetGraph()->GetArena()) HAbove(lower, upper));
1189     }
1190     InsertDeoptInBlock(bounds_check, new (GetGraph()->GetArena()) HAboveOrEqual(upper, array_length));
1191     // Flag that this kind of deoptimization has occurred.
1192     has_dom_based_dynamic_bce_ = true;
1193   }
1194 
1195   // Attempt dominator-based dynamic elimination on remaining candidates.
AddComparesWithDeoptimization(HBasicBlock * block)1196   void AddComparesWithDeoptimization(HBasicBlock* block) {
1197     for (const auto& entry : first_index_bounds_check_map_) {
1198       HBoundsCheck* bounds_check = entry.second;
1199       HInstruction* index = bounds_check->InputAt(0);
1200       HInstruction* array_length = bounds_check->InputAt(1);
1201       if (!array_length->IsArrayLength()) {
1202         continue;  // disregard phis and constants
1203       }
1204       // Collect all bounds checks that are still there and that are related as "a[base + constant]"
1205       // for a base instruction (possibly absent) and various constants. Note that no attempt
1206       // is made to partition the set into matching subsets (viz. a[0], a[1] and a[base+1] and
1207       // a[base+2] are considered as one set).
1208       // TODO: would such a partitioning be worthwhile?
1209       ValueBound value = ValueBound::AsValueBound(index);
1210       HInstruction* base = value.GetInstruction();
1211       int32_t min_c = base == nullptr ? 0 : value.GetConstant();
1212       int32_t max_c = value.GetConstant();
1213       ArenaVector<HBoundsCheck*> candidates(
1214           GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
1215       ArenaVector<HBoundsCheck*> standby(
1216           GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
1217       for (const HUseListNode<HInstruction*>& use : array_length->GetUses()) {
1218         // Another bounds check in same or dominated block?
1219         HInstruction* user = use.GetUser();
1220         HBasicBlock* other_block = user->GetBlock();
1221         if (user->IsBoundsCheck() && block->Dominates(other_block)) {
1222           HBoundsCheck* other_bounds_check = user->AsBoundsCheck();
1223           HInstruction* other_index = other_bounds_check->InputAt(0);
1224           HInstruction* other_array_length = other_bounds_check->InputAt(1);
1225           ValueBound other_value = ValueBound::AsValueBound(other_index);
1226           if (array_length == other_array_length && base == other_value.GetInstruction()) {
1227             // Reject certain OOB if BoundsCheck(l, l) occurs on considered subset.
1228             if (array_length == other_index) {
1229               candidates.clear();
1230               standby.clear();
1231               break;
1232             }
1233             // Since a subsequent dominated block could be under a conditional, only accept
1234             // the other bounds check if it is in same block or both blocks dominate the exit.
1235             // TODO: we could improve this by testing proper post-dominance, or even if this
1236             //       constant is seen along *all* conditional paths that follow.
1237             HBasicBlock* exit = GetGraph()->GetExitBlock();
1238             if (block == user->GetBlock() ||
1239                 (block->Dominates(exit) && other_block->Dominates(exit))) {
1240               int32_t other_c = other_value.GetConstant();
1241               min_c = std::min(min_c, other_c);
1242               max_c = std::max(max_c, other_c);
1243               candidates.push_back(other_bounds_check);
1244             } else {
1245               // Add this candidate later only if it falls into the range.
1246               standby.push_back(other_bounds_check);
1247             }
1248           }
1249         }
1250       }
1251       // Add standby candidates that fall in selected range.
1252       for (HBoundsCheck* other_bounds_check : standby) {
1253         HInstruction* other_index = other_bounds_check->InputAt(0);
1254         int32_t other_c = ValueBound::AsValueBound(other_index).GetConstant();
1255         if (min_c <= other_c && other_c <= max_c) {
1256           candidates.push_back(other_bounds_check);
1257         }
1258       }
1259       // Perform dominator-based deoptimization if it seems profitable. Note that we reject cases
1260       // where the distance min_c:max_c range gets close to the maximum possible array length,
1261       // since those cases are likely to always deopt (such situations do not necessarily go
1262       // OOB, though, since the programmer could rely on wrap-around from max to min).
1263       size_t threshold = kThresholdForAddingDeoptimize + (base == nullptr ? 0 : 1);  // extra test?
1264       uint32_t distance = static_cast<uint32_t>(max_c) - static_cast<uint32_t>(min_c);
1265       if (candidates.size() >= threshold &&
1266           (base != nullptr || min_c >= 0) &&  // reject certain OOB
1267            distance <= kMaxLengthForAddingDeoptimize) {  // reject likely/certain deopt
1268         AddCompareWithDeoptimization(block, array_length, base, min_c, max_c);
1269         for (HInstruction* other_bounds_check : candidates) {
1270           // Only replace if still in the graph. This avoids visiting the same
1271           // bounds check twice if it occurred multiple times in the use list.
1272           if (other_bounds_check->IsInBlock()) {
1273             ReplaceInstruction(other_bounds_check, other_bounds_check->InputAt(0));
1274           }
1275         }
1276       }
1277     }
1278   }
1279 
1280   /**
1281    * Returns true if static range analysis based on induction variables can determine the bounds
1282    * check on the given array range is always satisfied with the computed index range. The output
1283    * parameter try_dynamic_bce is set to false if OOB is certain.
1284    */
InductionRangeFitsIn(ValueRange * array_range,HInstruction * context,HInstruction * index,bool * try_dynamic_bce)1285   bool InductionRangeFitsIn(ValueRange* array_range,
1286                             HInstruction* context,
1287                             HInstruction* index,
1288                             bool* try_dynamic_bce) {
1289     InductionVarRange::Value v1;
1290     InductionVarRange::Value v2;
1291     bool needs_finite_test = false;
1292     if (induction_range_.GetInductionRange(context, index, &v1, &v2, &needs_finite_test)) {
1293       do {
1294         if (v1.is_known && (v1.a_constant == 0 || v1.a_constant == 1) &&
1295             v2.is_known && (v2.a_constant == 0 || v2.a_constant == 1)) {
1296           DCHECK(v1.a_constant == 1 || v1.instruction == nullptr);
1297           DCHECK(v2.a_constant == 1 || v2.instruction == nullptr);
1298           ValueRange index_range(GetGraph()->GetArena(),
1299                                  ValueBound(v1.instruction, v1.b_constant),
1300                                  ValueBound(v2.instruction, v2.b_constant));
1301           // If analysis reveals a certain OOB, disable dynamic BCE.
1302           if (index_range.GetLower().LessThan(array_range->GetLower()) ||
1303               index_range.GetUpper().GreaterThan(array_range->GetUpper())) {
1304             *try_dynamic_bce = false;
1305             return false;
1306           }
1307           // Use analysis for static bce only if loop is finite.
1308           if (!needs_finite_test && index_range.FitsIn(array_range)) {
1309             return true;
1310           }
1311         }
1312       } while (induction_range_.RefineOuter(&v1, &v2));
1313     }
1314     return false;
1315   }
1316 
1317   /**
1318    * When the compiler fails to remove a bounds check statically, we try to remove the bounds
1319    * check dynamically by adding runtime tests that trigger a deoptimization in case bounds
1320    * will go out of range (we want to be rather certain of that given the slowdown of
1321    * deoptimization). If no deoptimization occurs, the loop is executed with all corresponding
1322    * bounds checks and related null checks removed.
1323    */
TryDynamicBCE(HBoundsCheck * instruction)1324   bool TryDynamicBCE(HBoundsCheck* instruction) {
1325     HLoopInformation* loop = instruction->GetBlock()->GetLoopInformation();
1326     HInstruction* index = instruction->InputAt(0);
1327     HInstruction* length = instruction->InputAt(1);
1328     // If dynamic bounds check elimination seems profitable and is possible, then proceed.
1329     bool needs_finite_test = false;
1330     bool needs_taken_test = false;
1331     if (DynamicBCESeemsProfitable(loop, instruction->GetBlock()) &&
1332         induction_range_.CanGenerateCode(
1333             instruction, index, &needs_finite_test, &needs_taken_test) &&
1334         CanHandleInfiniteLoop(loop, instruction, index, needs_finite_test) &&
1335         CanHandleLength(loop, length, needs_taken_test)) {  // do this test last (may code gen)
1336       HInstruction* lower = nullptr;
1337       HInstruction* upper = nullptr;
1338       // Generate the following unsigned comparisons
1339       //     if (lower > upper)   deoptimize;
1340       //     if (upper >= length) deoptimize;
1341       // or, for a non-induction index, just the unsigned comparison on its 'upper' value
1342       //     if (upper >= length) deoptimize;
1343       // as runtime test. By restricting dynamic bce to unit strides (with a maximum of 32-bit
1344       // iterations) and by not combining access (e.g. a[i], a[i-3], a[i+5] etc.), these tests
1345       // correctly guard against any possible OOB (including arithmetic wrap-around cases).
1346       TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
1347       HBasicBlock* block = GetPreHeader(loop, instruction);
1348       induction_range_.GenerateRangeCode(instruction, index, GetGraph(), block, &lower, &upper);
1349       if (lower != nullptr) {
1350         InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAbove(lower, upper));
1351       }
1352       InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAboveOrEqual(upper, length));
1353       ReplaceInstruction(instruction, index);
1354       return true;
1355     }
1356     return false;
1357   }
1358 
1359   /**
1360    * Returns true if heuristics indicate that dynamic bce may be profitable.
1361    */
DynamicBCESeemsProfitable(HLoopInformation * loop,HBasicBlock * block)1362   bool DynamicBCESeemsProfitable(HLoopInformation* loop, HBasicBlock* block) {
1363     if (loop != nullptr) {
1364       // The loop preheader of an irreducible loop does not dominate all the blocks in
1365       // the loop. We would need to find the common dominator of all blocks in the loop.
1366       if (loop->IsIrreducible()) {
1367         return false;
1368       }
1369       // We should never deoptimize from an osr method, otherwise we might wrongly optimize
1370       // code dominated by the deoptimization.
1371       if (GetGraph()->IsCompilingOsr()) {
1372         return false;
1373       }
1374       // A try boundary preheader is hard to handle.
1375       // TODO: remove this restriction.
1376       if (loop->GetPreHeader()->GetLastInstruction()->IsTryBoundary()) {
1377         return false;
1378       }
1379       // Does loop have early-exits? If so, the full range may not be covered by the loop
1380       // at runtime and testing the range may apply deoptimization unnecessarily.
1381       if (IsEarlyExitLoop(loop)) {
1382         return false;
1383       }
1384       // Does the current basic block dominate all back edges? If not,
1385       // don't apply dynamic bce to something that may not be executed.
1386       return loop->DominatesAllBackEdges(block);
1387     }
1388     return false;
1389   }
1390 
1391   /**
1392    * Returns true if the loop has early exits, which implies it may not cover
1393    * the full range computed by range analysis based on induction variables.
1394    */
IsEarlyExitLoop(HLoopInformation * loop)1395   bool IsEarlyExitLoop(HLoopInformation* loop) {
1396     const uint32_t loop_id = loop->GetHeader()->GetBlockId();
1397     // If loop has been analyzed earlier for early-exit, don't repeat the analysis.
1398     auto it = early_exit_loop_.find(loop_id);
1399     if (it != early_exit_loop_.end()) {
1400       return it->second;
1401     }
1402     // First time early-exit analysis for this loop. Since analysis requires scanning
1403     // the full loop-body, results of the analysis is stored for subsequent queries.
1404     HBlocksInLoopReversePostOrderIterator it_loop(*loop);
1405     for (it_loop.Advance(); !it_loop.Done(); it_loop.Advance()) {
1406       for (HBasicBlock* successor : it_loop.Current()->GetSuccessors()) {
1407         if (!loop->Contains(*successor)) {
1408           early_exit_loop_.Put(loop_id, true);
1409           return true;
1410         }
1411       }
1412     }
1413     early_exit_loop_.Put(loop_id, false);
1414     return false;
1415   }
1416 
1417   /**
1418    * Returns true if the array length is already loop invariant, or can be made so
1419    * by handling the null check under the hood of the array length operation.
1420    */
CanHandleLength(HLoopInformation * loop,HInstruction * length,bool needs_taken_test)1421   bool CanHandleLength(HLoopInformation* loop, HInstruction* length, bool needs_taken_test) {
1422     if (loop->IsDefinedOutOfTheLoop(length)) {
1423       return true;
1424     } else if (length->IsArrayLength() && length->GetBlock()->GetLoopInformation() == loop) {
1425       if (CanHandleNullCheck(loop, length->InputAt(0), needs_taken_test)) {
1426         HoistToPreHeaderOrDeoptBlock(loop, length);
1427         return true;
1428       }
1429     }
1430     return false;
1431   }
1432 
1433   /**
1434    * Returns true if the null check is already loop invariant, or can be made so
1435    * by generating a deoptimization test.
1436    */
CanHandleNullCheck(HLoopInformation * loop,HInstruction * check,bool needs_taken_test)1437   bool CanHandleNullCheck(HLoopInformation* loop, HInstruction* check, bool needs_taken_test) {
1438     if (loop->IsDefinedOutOfTheLoop(check)) {
1439       return true;
1440     } else if (check->IsNullCheck() && check->GetBlock()->GetLoopInformation() == loop) {
1441       HInstruction* array = check->InputAt(0);
1442       if (loop->IsDefinedOutOfTheLoop(array)) {
1443         // Generate: if (array == null) deoptimize;
1444         TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
1445         HBasicBlock* block = GetPreHeader(loop, check);
1446         HInstruction* cond =
1447             new (GetGraph()->GetArena()) HEqual(array, GetGraph()->GetNullConstant());
1448         InsertDeoptInLoop(loop, block, cond);
1449         ReplaceInstruction(check, array);
1450         return true;
1451       }
1452     }
1453     return false;
1454   }
1455 
1456   /**
1457    * Returns true if compiler can apply dynamic bce to loops that may be infinite
1458    * (e.g. for (int i = 0; i <= U; i++) with U = MAX_INT), which would invalidate
1459    * the range analysis evaluation code by "overshooting" the computed range.
1460    * Since deoptimization would be a bad choice, and there is no other version
1461    * of the loop to use, dynamic bce in such cases is only allowed if other tests
1462    * ensure the loop is finite.
1463    */
CanHandleInfiniteLoop(HLoopInformation * loop,HBoundsCheck * check,HInstruction * index,bool needs_infinite_test)1464   bool CanHandleInfiniteLoop(
1465       HLoopInformation* loop, HBoundsCheck* check, HInstruction* index, bool needs_infinite_test) {
1466     if (needs_infinite_test) {
1467       // If we already forced the loop to be finite, allow directly.
1468       const uint32_t loop_id = loop->GetHeader()->GetBlockId();
1469       if (finite_loop_.find(loop_id) != finite_loop_.end()) {
1470         return true;
1471       }
1472       // Otherwise, allow dynamic bce if the index (which is necessarily an induction at
1473       // this point) is the direct loop index (viz. a[i]), since then the runtime tests
1474       // ensure upper bound cannot cause an infinite loop.
1475       HInstruction* control = loop->GetHeader()->GetLastInstruction();
1476       if (control->IsIf()) {
1477         HInstruction* if_expr = control->AsIf()->InputAt(0);
1478         if (if_expr->IsCondition()) {
1479           HCondition* condition = if_expr->AsCondition();
1480           if (index == condition->InputAt(0) ||
1481               index == condition->InputAt(1)) {
1482             finite_loop_.insert(loop_id);
1483             return true;
1484           }
1485         }
1486       }
1487       // If bounds check made it this far, it is worthwhile to check later if
1488       // the loop was forced finite by another candidate.
1489       if (record_dynamic_bce_standby_) {
1490         dynamic_bce_standby_.push_back(check);
1491       }
1492       return false;
1493     }
1494     return true;
1495   }
1496 
1497   /**
1498    * Returns appropriate preheader for the loop, depending on whether the
1499    * instruction appears in the loop header or proper loop-body.
1500    */
GetPreHeader(HLoopInformation * loop,HInstruction * instruction)1501   HBasicBlock* GetPreHeader(HLoopInformation* loop, HInstruction* instruction) {
1502     // Use preheader unless there is an earlier generated deoptimization block since
1503     // hoisted expressions may depend on and/or used by the deoptimization tests.
1504     HBasicBlock* header = loop->GetHeader();
1505     const uint32_t loop_id = header->GetBlockId();
1506     auto it = taken_test_loop_.find(loop_id);
1507     if (it != taken_test_loop_.end()) {
1508       HBasicBlock* block = it->second;
1509       // If always taken, keep it that way by returning the original preheader,
1510       // which can be found by following the predecessor of the true-block twice.
1511       if (instruction->GetBlock() == header) {
1512         return block->GetSinglePredecessor()->GetSinglePredecessor();
1513       }
1514       return block;
1515     }
1516     return loop->GetPreHeader();
1517   }
1518 
1519   /** Inserts a deoptimization test in a loop preheader. */
InsertDeoptInLoop(HLoopInformation * loop,HBasicBlock * block,HInstruction * condition)1520   void InsertDeoptInLoop(HLoopInformation* loop, HBasicBlock* block, HInstruction* condition) {
1521     HInstruction* suspend = loop->GetSuspendCheck();
1522     block->InsertInstructionBefore(condition, block->GetLastInstruction());
1523     HDeoptimize* deoptimize =
1524         new (GetGraph()->GetArena()) HDeoptimize(condition, suspend->GetDexPc());
1525     block->InsertInstructionBefore(deoptimize, block->GetLastInstruction());
1526     if (suspend->HasEnvironment()) {
1527       deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
1528           suspend->GetEnvironment(), loop->GetHeader());
1529     }
1530   }
1531 
1532   /** Inserts a deoptimization test right before a bounds check. */
InsertDeoptInBlock(HBoundsCheck * bounds_check,HInstruction * condition)1533   void InsertDeoptInBlock(HBoundsCheck* bounds_check, HInstruction* condition) {
1534     HBasicBlock* block = bounds_check->GetBlock();
1535     block->InsertInstructionBefore(condition, bounds_check);
1536     HDeoptimize* deoptimize =
1537         new (GetGraph()->GetArena()) HDeoptimize(condition, bounds_check->GetDexPc());
1538     block->InsertInstructionBefore(deoptimize, bounds_check);
1539     deoptimize->CopyEnvironmentFrom(bounds_check->GetEnvironment());
1540   }
1541 
1542   /** Hoists instruction out of the loop to preheader or deoptimization block. */
HoistToPreHeaderOrDeoptBlock(HLoopInformation * loop,HInstruction * instruction)1543   void HoistToPreHeaderOrDeoptBlock(HLoopInformation* loop, HInstruction* instruction) {
1544     HBasicBlock* block = GetPreHeader(loop, instruction);
1545     DCHECK(!instruction->HasEnvironment());
1546     instruction->MoveBefore(block->GetLastInstruction());
1547   }
1548 
1549   /**
1550    * Adds a new taken-test structure to a loop if needed and not already done.
1551    * The taken-test protects range analysis evaluation code to avoid any
1552    * deoptimization caused by incorrect trip-count evaluation in non-taken loops.
1553    *
1554    *          old_preheader
1555    *               |
1556    *            if_block          <- taken-test protects deoptimization block
1557    *            /      \
1558    *     true_block  false_block  <- deoptimizations/invariants are placed in true_block
1559    *            \       /
1560    *          new_preheader       <- may require phi nodes to preserve SSA structure
1561    *                |
1562    *             header
1563    *
1564    * For example, this loop:
1565    *
1566    *   for (int i = lower; i < upper; i++) {
1567    *     array[i] = 0;
1568    *   }
1569    *
1570    * will be transformed to:
1571    *
1572    *   if (lower < upper) {
1573    *     if (array == null) deoptimize;
1574    *     array_length = array.length;
1575    *     if (lower > upper)         deoptimize;  // unsigned
1576    *     if (upper >= array_length) deoptimize;  // unsigned
1577    *   } else {
1578    *     array_length = 0;
1579    *   }
1580    *   for (int i = lower; i < upper; i++) {
1581    *     // Loop without null check and bounds check, and any array.length replaced with array_length.
1582    *     array[i] = 0;
1583    *   }
1584    */
TransformLoopForDeoptimizationIfNeeded(HLoopInformation * loop,bool needs_taken_test)1585   void TransformLoopForDeoptimizationIfNeeded(HLoopInformation* loop, bool needs_taken_test) {
1586     // Not needed (can use preheader) or already done (can reuse)?
1587     const uint32_t loop_id = loop->GetHeader()->GetBlockId();
1588     if (!needs_taken_test || taken_test_loop_.find(loop_id) != taken_test_loop_.end()) {
1589       return;
1590     }
1591 
1592     // Generate top test structure.
1593     HBasicBlock* header = loop->GetHeader();
1594     GetGraph()->TransformLoopHeaderForBCE(header);
1595     HBasicBlock* new_preheader = loop->GetPreHeader();
1596     HBasicBlock* if_block = new_preheader->GetDominator();
1597     HBasicBlock* true_block = if_block->GetSuccessors()[0];  // True successor.
1598     HBasicBlock* false_block = if_block->GetSuccessors()[1];  // False successor.
1599 
1600     // Goto instructions.
1601     true_block->AddInstruction(new (GetGraph()->GetArena()) HGoto());
1602     false_block->AddInstruction(new (GetGraph()->GetArena()) HGoto());
1603     new_preheader->AddInstruction(new (GetGraph()->GetArena()) HGoto());
1604 
1605     // Insert the taken-test to see if the loop body is entered. If the
1606     // loop isn't entered at all, it jumps around the deoptimization block.
1607     if_block->AddInstruction(new (GetGraph()->GetArena()) HGoto());  // placeholder
1608     HInstruction* condition = nullptr;
1609     induction_range_.GenerateTakenTest(header->GetLastInstruction(),
1610                                        GetGraph(),
1611                                        if_block,
1612                                        &condition);
1613     DCHECK(condition != nullptr);
1614     if_block->RemoveInstruction(if_block->GetLastInstruction());
1615     if_block->AddInstruction(new (GetGraph()->GetArena()) HIf(condition));
1616 
1617     taken_test_loop_.Put(loop_id, true_block);
1618   }
1619 
1620   /**
1621    * Inserts phi nodes that preserve SSA structure in generated top test structures.
1622    * All uses of instructions in the deoptimization block that reach the loop need
1623    * a phi node in the new loop preheader to fix the dominance relation.
1624    *
1625    * Example:
1626    *           if_block
1627    *            /      \
1628    *         x_0 = ..  false_block
1629    *            \       /
1630    *           x_1 = phi(x_0, null)   <- synthetic phi
1631    *               |
1632    *          new_preheader
1633    */
InsertPhiNodes()1634   void InsertPhiNodes() {
1635     // Scan all new deoptimization blocks.
1636     for (auto it1 = taken_test_loop_.begin(); it1 != taken_test_loop_.end(); ++it1) {
1637       HBasicBlock* true_block = it1->second;
1638       HBasicBlock* new_preheader = true_block->GetSingleSuccessor();
1639       // Scan all instructions in a new deoptimization block.
1640       for (HInstructionIterator it(true_block->GetInstructions()); !it.Done(); it.Advance()) {
1641         HInstruction* instruction = it.Current();
1642         Primitive::Type type = instruction->GetType();
1643         HPhi* phi = nullptr;
1644         // Scan all uses of an instruction and replace each later use with a phi node.
1645         const HUseList<HInstruction*>& uses = instruction->GetUses();
1646         for (auto it2 = uses.begin(), end2 = uses.end(); it2 != end2; /* ++it2 below */) {
1647           HInstruction* user = it2->GetUser();
1648           size_t index = it2->GetIndex();
1649           // Increment `it2` now because `*it2` may disappear thanks to user->ReplaceInput().
1650           ++it2;
1651           if (user->GetBlock() != true_block) {
1652             if (phi == nullptr) {
1653               phi = NewPhi(new_preheader, instruction, type);
1654             }
1655             user->ReplaceInput(phi, index);  // Removes the use node from the list.
1656           }
1657         }
1658         // Scan all environment uses of an instruction and replace each later use with a phi node.
1659         const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
1660         for (auto it2 = env_uses.begin(), end2 = env_uses.end(); it2 != end2; /* ++it2 below */) {
1661           HEnvironment* user = it2->GetUser();
1662           size_t index = it2->GetIndex();
1663           // Increment `it2` now because `*it2` may disappear thanks to user->RemoveAsUserOfInput().
1664           ++it2;
1665           if (user->GetHolder()->GetBlock() != true_block) {
1666             if (phi == nullptr) {
1667               phi = NewPhi(new_preheader, instruction, type);
1668             }
1669             user->RemoveAsUserOfInput(index);
1670             user->SetRawEnvAt(index, phi);
1671             phi->AddEnvUseAt(user, index);
1672           }
1673         }
1674       }
1675     }
1676   }
1677 
1678   /**
1679    * Construct a phi(instruction, 0) in the new preheader to fix the dominance relation.
1680    * These are synthetic phi nodes without a virtual register.
1681    */
NewPhi(HBasicBlock * new_preheader,HInstruction * instruction,Primitive::Type type)1682   HPhi* NewPhi(HBasicBlock* new_preheader,
1683                HInstruction* instruction,
1684                Primitive::Type type) {
1685     HGraph* graph = GetGraph();
1686     HInstruction* zero;
1687     switch (type) {
1688       case Primitive::kPrimNot: zero = graph->GetNullConstant(); break;
1689       case Primitive::kPrimFloat: zero = graph->GetFloatConstant(0); break;
1690       case Primitive::kPrimDouble: zero = graph->GetDoubleConstant(0); break;
1691       default: zero = graph->GetConstant(type, 0); break;
1692     }
1693     HPhi* phi = new (graph->GetArena())
1694         HPhi(graph->GetArena(), kNoRegNumber, /*number_of_inputs*/ 2, HPhi::ToPhiType(type));
1695     phi->SetRawInputAt(0, instruction);
1696     phi->SetRawInputAt(1, zero);
1697     if (type == Primitive::kPrimNot) {
1698       phi->SetReferenceTypeInfo(instruction->GetReferenceTypeInfo());
1699     }
1700     new_preheader->AddPhi(phi);
1701     return phi;
1702   }
1703 
1704   /** Helper method to replace an instruction with another instruction. */
ReplaceInstruction(HInstruction * instruction,HInstruction * replacement)1705   static void ReplaceInstruction(HInstruction* instruction, HInstruction* replacement) {
1706     instruction->ReplaceWith(replacement);
1707     instruction->GetBlock()->RemoveInstruction(instruction);
1708   }
1709 
1710   // A set of maps, one per basic block, from instruction to range.
1711   ArenaVector<ArenaSafeMap<int, ValueRange*>> maps_;
1712 
1713   // Map an HArrayLength instruction's id to the first HBoundsCheck instruction
1714   // in a block that checks an index against that HArrayLength.
1715   ArenaSafeMap<int, HBoundsCheck*> first_index_bounds_check_map_;
1716 
1717   // Stand by list for dynamic bce.
1718   ArenaVector<HBoundsCheck*> dynamic_bce_standby_;
1719   bool record_dynamic_bce_standby_;
1720 
1721   // Early-exit loop bookkeeping.
1722   ArenaSafeMap<uint32_t, bool> early_exit_loop_;
1723 
1724   // Taken-test loop bookkeeping.
1725   ArenaSafeMap<uint32_t, HBasicBlock*> taken_test_loop_;
1726 
1727   // Finite loop bookkeeping.
1728   ArenaSet<uint32_t> finite_loop_;
1729 
1730   // Flag that denotes whether dominator-based dynamic elimination has occurred.
1731   bool has_dom_based_dynamic_bce_;
1732 
1733   // Initial number of blocks.
1734   uint32_t initial_block_size_;
1735 
1736   // Side effects.
1737   const SideEffectsAnalysis& side_effects_;
1738 
1739   // Range analysis based on induction variables.
1740   InductionVarRange induction_range_;
1741 
1742   DISALLOW_COPY_AND_ASSIGN(BCEVisitor);
1743 };
1744 
Run()1745 void BoundsCheckElimination::Run() {
1746   if (!graph_->HasBoundsChecks()) {
1747     return;
1748   }
1749 
1750   // Reverse post order guarantees a node's dominators are visited first.
1751   // We want to visit in the dominator-based order since if a value is known to
1752   // be bounded by a range at one instruction, it must be true that all uses of
1753   // that value dominated by that instruction fits in that range. Range of that
1754   // value can be narrowed further down in the dominator tree.
1755   BCEVisitor visitor(graph_, side_effects_, induction_analysis_);
1756   for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
1757     HBasicBlock* current = it.Current();
1758     if (visitor.IsAddedBlock(current)) {
1759       // Skip added blocks. Their effects are already taken care of.
1760       continue;
1761     }
1762     visitor.VisitBasicBlock(current);
1763     // Skip forward to the current block in case new basic blocks were inserted
1764     // (which always appear earlier in reverse post order) to avoid visiting the
1765     // same basic block twice.
1766     for ( ; !it.Done() && it.Current() != current; it.Advance()) {
1767     }
1768   }
1769 
1770   // Perform cleanup.
1771   visitor.Finish();
1772 }
1773 
1774 }  // namespace art
1775