• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "gvn.h"
18 
19 #include "base/arena_bit_vector.h"
20 #include "base/bit_vector-inl.h"
21 #include "base/scoped_arena_allocator.h"
22 #include "base/scoped_arena_containers.h"
23 #include "base/utils.h"
24 #include "side_effects_analysis.h"
25 
26 namespace art HIDDEN {
27 
28 /**
29  * A ValueSet holds instructions that can replace other instructions. It is updated
30  * through the `Add` method, and the `Kill` method. The `Kill` method removes
31  * instructions that are affected by the given side effect.
32  *
33  * The `Lookup` method returns an equivalent instruction to the given instruction
34  * if there is one in the set. In GVN, we would say those instructions have the
35  * same "number".
36  */
37 class ValueSet : public ArenaObject<kArenaAllocGvn> {
38  public:
39   // Constructs an empty ValueSet which owns all its buckets.
ValueSet(ScopedArenaAllocator * allocator)40   explicit ValueSet(ScopedArenaAllocator* allocator)
41       : allocator_(allocator),
42         num_buckets_(kMinimumNumberOfBuckets),
43         buckets_(allocator->AllocArray<Node*>(num_buckets_, kArenaAllocGvn)),
44         buckets_owned_(ArenaBitVector::CreateFixedSize(allocator, num_buckets_, kArenaAllocGvn)),
45         num_entries_(0u) {
46     DCHECK(IsPowerOfTwo(num_buckets_));
47     std::fill_n(buckets_, num_buckets_, nullptr);
48     buckets_owned_.SetInitialBits(num_buckets_);
49   }
50 
51   // Copy constructor. Depending on the load factor, it will either make a deep
52   // copy (all buckets owned) or a shallow one (buckets pointing to the parent).
ValueSet(ScopedArenaAllocator * allocator,const ValueSet & other)53   ValueSet(ScopedArenaAllocator* allocator, const ValueSet& other)
54       : allocator_(allocator),
55         num_buckets_(other.IdealBucketCount()),
56         buckets_(allocator->AllocArray<Node*>(num_buckets_, kArenaAllocGvn)),
57         buckets_owned_(ArenaBitVector::CreateFixedSize(allocator, num_buckets_, kArenaAllocGvn)),
58         num_entries_(0u) {
59     DCHECK(IsPowerOfTwo(num_buckets_));
60     PopulateFromInternal(other);
61   }
62 
63   // Erases all values in this set and populates it with values from `other`.
PopulateFrom(const ValueSet & other)64   void PopulateFrom(const ValueSet& other) {
65     if (this == &other) {
66       return;
67     }
68     PopulateFromInternal(other);
69   }
70 
71   // Returns true if `this` has enough buckets so that if `other` is copied into
72   // it, the load factor will not cross the upper threshold.
73   // If `exact_match` is set, true is returned only if `this` has the ideal
74   // number of buckets. Larger number of buckets is allowed otherwise.
CanHoldCopyOf(const ValueSet & other,bool exact_match)75   bool CanHoldCopyOf(const ValueSet& other, bool exact_match) {
76     if (exact_match) {
77       return other.IdealBucketCount() == num_buckets_;
78     } else {
79       return other.IdealBucketCount() <= num_buckets_;
80     }
81   }
82 
83   // Adds an instruction in the set.
Add(HInstruction * instruction)84   void Add(HInstruction* instruction) {
85     DCHECK(Lookup(instruction) == nullptr);
86     size_t hash_code = HashCode(instruction);
87     size_t index = BucketIndex(hash_code);
88 
89     if (!buckets_owned_.IsBitSet(index)) {
90       CloneBucket(index);
91     }
92     buckets_[index] = new (allocator_) Node(instruction, hash_code, buckets_[index]);
93     ++num_entries_;
94   }
95 
96   // If in the set, returns an equivalent instruction to the given instruction.
97   // Returns null otherwise.
Lookup(HInstruction * instruction) const98   HInstruction* Lookup(HInstruction* instruction) const {
99     size_t hash_code = HashCode(instruction);
100     size_t index = BucketIndex(hash_code);
101 
102     for (Node* node = buckets_[index]; node != nullptr; node = node->GetNext()) {
103       if (node->GetHashCode() == hash_code) {
104         HInstruction* existing = node->GetInstruction();
105         if (existing->Equals(instruction)) {
106           return existing;
107         }
108       }
109     }
110     return nullptr;
111   }
112 
113   // Returns whether instruction is in the set.
Contains(HInstruction * instruction) const114   bool Contains(HInstruction* instruction) const {
115     size_t hash_code = HashCode(instruction);
116     size_t index = BucketIndex(hash_code);
117 
118     for (Node* node = buckets_[index]; node != nullptr; node = node->GetNext()) {
119       if (node->GetInstruction() == instruction) {
120         return true;
121       }
122     }
123     return false;
124   }
125 
126   // Removes all instructions in the set affected by the given side effects.
Kill(SideEffects side_effects)127   void Kill(SideEffects side_effects) {
128     // Nothing to do if the side effects don't have any change bit set, as MayDependOn will always
129     // return false.
130     if (side_effects.HasSideEffects()) {
131       DeleteAllImpureWhich([side_effects](Node* node) {
132         return node->GetSideEffects().MayDependOn(side_effects);
133       });
134     }
135   }
136 
Clear()137   void Clear() {
138     num_entries_ = 0;
139     for (size_t i = 0; i < num_buckets_; ++i) {
140       buckets_[i] = nullptr;
141     }
142     buckets_owned_.SetInitialBits(num_buckets_);
143   }
144 
145   // Updates this set by intersecting with instructions in a predecessor's set.
IntersectWith(ValueSet * predecessor)146   void IntersectWith(ValueSet* predecessor) {
147     if (IsEmpty()) {
148       return;
149     } else if (predecessor->IsEmpty()) {
150       Clear();
151     } else {
152       // Pure instructions do not need to be tested because only impure
153       // instructions can be killed.
154       DeleteAllImpureWhich([predecessor](Node* node) {
155         return !predecessor->Contains(node->GetInstruction());
156       });
157     }
158   }
159 
IsEmpty() const160   bool IsEmpty() const { return num_entries_ == 0; }
GetNumberOfEntries() const161   size_t GetNumberOfEntries() const { return num_entries_; }
162 
163  private:
164   // Copies all entries from `other` to `this`.
PopulateFromInternal(const ValueSet & other)165   void PopulateFromInternal(const ValueSet& other) {
166     DCHECK_NE(this, &other);
167     DCHECK_GE(num_buckets_, other.IdealBucketCount());
168 
169     if (num_buckets_ == other.num_buckets_) {
170       // Hash table remains the same size. We copy the bucket pointers and leave
171       // all buckets_owned_ bits false.
172       buckets_owned_.ClearAllBits();
173       memcpy(buckets_, other.buckets_, num_buckets_ * sizeof(Node*));
174     } else {
175       // Hash table size changes. We copy and rehash all entries, and set all
176       // buckets_owned_ bits to true.
177       std::fill_n(buckets_, num_buckets_, nullptr);
178       for (size_t i = 0; i < other.num_buckets_; ++i) {
179         for (Node* node = other.buckets_[i]; node != nullptr; node = node->GetNext()) {
180           size_t new_index = BucketIndex(node->GetHashCode());
181           buckets_[new_index] = node->Dup(allocator_, buckets_[new_index]);
182         }
183       }
184       buckets_owned_.SetInitialBits(num_buckets_);
185     }
186 
187     num_entries_ = other.num_entries_;
188   }
189 
190   class Node : public ArenaObject<kArenaAllocGvn> {
191    public:
Node(HInstruction * instruction,size_t hash_code,Node * next)192     Node(HInstruction* instruction, size_t hash_code, Node* next)
193         : instruction_(instruction), hash_code_(hash_code), next_(next) {}
194 
GetHashCode() const195     size_t GetHashCode() const { return hash_code_; }
GetInstruction() const196     HInstruction* GetInstruction() const { return instruction_; }
GetNext() const197     Node* GetNext() const { return next_; }
SetNext(Node * node)198     void SetNext(Node* node) { next_ = node; }
199 
Dup(ScopedArenaAllocator * allocator,Node * new_next=nullptr)200     Node* Dup(ScopedArenaAllocator* allocator, Node* new_next = nullptr) {
201       return new (allocator) Node(instruction_, hash_code_, new_next);
202     }
203 
GetSideEffects() const204     SideEffects GetSideEffects() const {
205       // Deoptimize is a weird instruction since it's predicated and
206       // never-return. Its side-effects are to prevent the splitting of dex
207       // instructions across it (which could cause inconsistencies once we begin
208       // interpreting again). In the context of GVN the 'perform-deopt' branch is not
209       // relevant and we only need to care about the no-op case, in which case there are
210       // no side-effects. By doing this we are able to eliminate redundant (i.e.
211       // dominated deopts with GVNd conditions) deoptimizations.
212       if (instruction_->IsDeoptimize()) {
213         return SideEffects::None();
214       } else {
215         return instruction_->GetSideEffects();
216       }
217     }
218 
219    private:
220     HInstruction* const instruction_;
221     const size_t hash_code_;
222     Node* next_;
223 
224     DISALLOW_COPY_AND_ASSIGN(Node);
225   };
226 
227   // Creates our own copy of a bucket that is currently pointing to a parent.
228   // This algorithm can be called while iterating over the bucket because it
229   // preserves the order of entries in the bucket and will return the clone of
230   // the given 'iterator'.
CloneBucket(size_t index,Node * iterator=nullptr)231   Node* CloneBucket(size_t index, Node* iterator = nullptr) {
232     DCHECK(!buckets_owned_.IsBitSet(index));
233     Node* clone_current = nullptr;
234     Node* clone_previous = nullptr;
235     Node* clone_iterator = nullptr;
236     for (Node* node = buckets_[index]; node != nullptr; node = node->GetNext()) {
237       clone_current = node->Dup(allocator_, nullptr);
238       if (node == iterator) {
239         clone_iterator = clone_current;
240       }
241       if (clone_previous == nullptr) {
242         buckets_[index] = clone_current;
243       } else {
244         clone_previous->SetNext(clone_current);
245       }
246       clone_previous = clone_current;
247     }
248     buckets_owned_.SetBit(index);
249     return clone_iterator;
250   }
251 
252   // Iterates over buckets with impure instructions (even indices) and deletes
253   // the ones on which 'cond' returns true.
254   template<typename Functor>
DeleteAllImpureWhich(Functor && cond)255   void DeleteAllImpureWhich(Functor&& cond) {
256     for (size_t i = 0; i < num_buckets_; i += 2) {
257       Node* node = buckets_[i];
258       Node* previous = nullptr;
259 
260       if (node == nullptr) {
261         continue;
262       }
263 
264       if (!buckets_owned_.IsBitSet(i)) {
265         // Bucket is not owned but maybe we won't need to change it at all.
266         // Iterate as long as the entries don't satisfy 'cond'.
267         while (node != nullptr) {
268           if (cond(node)) {
269             // We do need to delete an entry but we do not own the bucket.
270             // Clone the bucket, make sure 'previous' and 'node' point to
271             // the cloned entries and break.
272             previous = CloneBucket(i, previous);
273             node = (previous == nullptr) ? buckets_[i] : previous->GetNext();
274             break;
275           }
276           previous = node;
277           node = node->GetNext();
278         }
279       }
280 
281       // By this point we either own the bucket and can start deleting entries,
282       // or we do not own it but no entries matched 'cond'.
283       DCHECK(buckets_owned_.IsBitSet(i) || node == nullptr);
284 
285       // We iterate over the remainder of entries and delete those that match
286       // the given condition.
287       while (node != nullptr) {
288         Node* next = node->GetNext();
289         if (cond(node)) {
290           if (previous == nullptr) {
291             buckets_[i] = next;
292           } else {
293             previous->SetNext(next);
294           }
295         } else {
296           previous = node;
297         }
298         node = next;
299       }
300     }
301   }
302 
303   // Computes a bucket count such that the load factor is reasonable.
304   // This is estimated as (num_entries_ * 1.5) and rounded up to nearest pow2.
IdealBucketCount() const305   size_t IdealBucketCount() const {
306     size_t bucket_count = RoundUpToPowerOfTwo(num_entries_ + (num_entries_ >> 1));
307     if (bucket_count > kMinimumNumberOfBuckets) {
308       return bucket_count;
309     } else {
310       return kMinimumNumberOfBuckets;
311     }
312   }
313 
314   // Generates a hash code for an instruction.
HashCode(HInstruction * instruction) const315   size_t HashCode(HInstruction* instruction) const {
316     size_t hash_code = instruction->ComputeHashCode();
317     // Pure instructions are put into odd buckets to speed up deletion. Note that in the
318     // case of irreducible loops, we don't put pure instructions in odd buckets, as we
319     // need to delete them when entering the loop.
320     // ClinitCheck is treated as a pure instruction since it's only executed
321     // once.
322     bool pure = !instruction->GetSideEffects().HasDependencies() ||
323                 instruction->IsClinitCheck();
324     if (!pure || instruction->GetBlock()->GetGraph()->HasIrreducibleLoops()) {
325       return (hash_code << 1) | 0;
326     } else {
327       return (hash_code << 1) | 1;
328     }
329   }
330 
331   // Converts a hash code to a bucket index.
BucketIndex(size_t hash_code) const332   size_t BucketIndex(size_t hash_code) const {
333     return hash_code & (num_buckets_ - 1);
334   }
335 
336   ScopedArenaAllocator* const allocator_;
337 
338   // The internal bucket implementation of the set.
339   size_t const num_buckets_;
340   Node** const buckets_;
341 
342   // Flags specifying which buckets were copied into the set from its parent.
343   // If a flag is not set, the corresponding bucket points to entries in the
344   // parent and must be cloned prior to making changes.
345   BitVectorView<size_t> buckets_owned_;
346 
347   // The number of entries in the set.
348   size_t num_entries_;
349 
350   static constexpr size_t kMinimumNumberOfBuckets = 8;
351 
352   DISALLOW_COPY_AND_ASSIGN(ValueSet);
353 };
354 
355 /**
356  * Optimization phase that removes redundant instruction.
357  */
358 class GlobalValueNumberer : public ValueObject {
359  public:
GlobalValueNumberer(HGraph * graph,const SideEffectsAnalysis & side_effects)360   GlobalValueNumberer(HGraph* graph, const SideEffectsAnalysis& side_effects)
361       : graph_(graph),
362         allocator_(graph->GetArenaStack()),
363         side_effects_(side_effects),
364         sets_(graph->GetBlocks().size(), nullptr, allocator_.Adapter(kArenaAllocGvn)),
365         dominated_to_visit_(graph->GetBlocks().size(), allocator_.Adapter(kArenaAllocGvn)),
366         successors_to_visit_(graph->GetBlocks().size(), allocator_.Adapter(kArenaAllocGvn)),
367         free_sets_(ArenaBitVector::CreateFixedSize(
368             &allocator_, graph->GetBlocks().size(), kArenaAllocGvn)),
369         visited_blocks_(ArenaBitVector::CreateFixedSize(
370             &allocator_, graph->GetBlocks().size(), kArenaAllocGvn)),
371         did_optimization_(false) {
372     for (HBasicBlock* block : graph->GetReversePostOrder()) {
373       dominated_to_visit_[block->GetBlockId()] = block->GetDominatedBlocks().size();
374       successors_to_visit_[block->GetBlockId()] = block->GetSuccessors().size();
375     }
376   }
377 
378   bool Run();
379 
380  private:
381   // Per-block GVN. Will also update the ValueSet of the dominated and
382   // successor blocks.
383   void VisitBasicBlock(HBasicBlock* block);
384 
385   HGraph* graph_;
386   ScopedArenaAllocator allocator_;
387   const SideEffectsAnalysis& side_effects_;
388 
FindSetFor(HBasicBlock * block) const389   ValueSet* FindSetFor(HBasicBlock* block) const {
390     ValueSet* result = sets_[block->GetBlockId()];
391     DCHECK(result != nullptr) << "Could not find set for block B" << block->GetBlockId();
392     return result;
393   }
394 
AbandonSetFor(HBasicBlock * block)395   void AbandonSetFor(HBasicBlock* block) {
396     DCHECK(sets_[block->GetBlockId()] != nullptr)
397         << "Block B" << block->GetBlockId() << " expected to have a set";
398     sets_[block->GetBlockId()] = nullptr;
399     free_sets_.ClearBit(block->GetBlockId());
400   }
401 
402   // Returns false if the GlobalValueNumberer has already visited all blocks
403   // which may reference `block`.
404   bool WillBeReferencedAgain(uint32_t block_id) const;
405 
406   // Iterates over visited blocks and finds one which has a ValueSet such that:
407   // (a) it will not be referenced in the future, and
408   // (b) it can hold a copy of `reference_set` with a reasonable load factor.
409   HBasicBlock* FindVisitedBlockWithRecyclableSet(const ValueSet& reference_set) const;
410 
411   // ValueSet for blocks. Initially null, but for an individual block they
412   // are allocated and populated by the dominator, and updated by all blocks
413   // in the path from the dominator to the block.
414   ScopedArenaVector<ValueSet*> sets_;
415 
416   // Extra bookkeeping to speed up GVN, indexed by block id.
417   // Number of dominated blocks left to visit.
418   ScopedArenaVector<uint32_t> dominated_to_visit_;
419   // Number of successor blocks left to visit.
420   ScopedArenaVector<uint32_t> successors_to_visit_;
421   // True iff the block's ValueSet is free to be reused by another block.
422   BitVectorView<size_t> free_sets_;
423 
424   // BitVector which serves as a fast-access map from block id to
425   // visited/unvisited Boolean.
426   BitVectorView<size_t> visited_blocks_;
427 
428   // True if GVN did at least one removal.
429   bool did_optimization_;
430 
431   DISALLOW_COPY_AND_ASSIGN(GlobalValueNumberer);
432 };
433 
Run()434 bool GlobalValueNumberer::Run() {
435   DCHECK(side_effects_.HasRun());
436   sets_[graph_->GetEntryBlock()->GetBlockId()] = new (&allocator_) ValueSet(&allocator_);
437 
438   // Use the reverse post order to ensure the non back-edge predecessors of a block are
439   // visited before the block itself.
440   for (HBasicBlock* block : graph_->GetReversePostOrder()) {
441     VisitBasicBlock(block);
442   }
443   return did_optimization_;
444 }
445 
VisitBasicBlock(HBasicBlock * block)446 void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) {
447   ValueSet* set = nullptr;
448 
449   const ArenaVector<HBasicBlock*>& predecessors = block->GetPredecessors();
450   if (predecessors.size() == 0 || predecessors[0]->IsEntryBlock()) {
451     // The entry block should only accumulate constant instructions, and
452     // the builder puts constants only in the entry block.
453     // Therefore, there is no need to propagate the value set to the next block.
454     set = new (&allocator_) ValueSet(&allocator_);
455   } else {
456     HBasicBlock* dominator = block->GetDominator();
457     ValueSet* dominator_set = FindSetFor(dominator);
458 
459     if (dominator->GetSuccessors().size() == 1) {
460       // `block` is a direct successor of its dominator. No need to clone the
461       // dominator's set, `block` can take over its ownership including its buckets.
462       DCHECK_EQ(dominator->GetSingleSuccessor(), block);
463       AbandonSetFor(dominator);
464       set = dominator_set;
465     } else {
466       // Try to find a basic block which will never be referenced again and whose
467       // ValueSet can therefore be recycled. We will need to copy `dominator_set`
468       // into the recycled set, so we pass `dominator_set` as a reference for size.
469       HBasicBlock* recyclable = FindVisitedBlockWithRecyclableSet(*dominator_set);
470       if (recyclable == nullptr) {
471         // No block with a suitable ValueSet found. Allocate a new one and
472         // copy `dominator_set` into it.
473         set = new (&allocator_) ValueSet(&allocator_, *dominator_set);
474       } else {
475         // Block with a recyclable ValueSet found. Clone `dominator_set` into it.
476         set = FindSetFor(recyclable);
477         AbandonSetFor(recyclable);
478         set->PopulateFrom(*dominator_set);
479       }
480     }
481 
482     if (!set->IsEmpty()) {
483       if (block->IsLoopHeader()) {
484         if (block->GetLoopInformation()->ContainsIrreducibleLoop()) {
485           // To satisfy our linear scan algorithm, no instruction should flow in an irreducible
486           // loop header. We clear the set at entry of irreducible loops and any loop containing
487           // an irreducible loop, as in both cases, GVN can extend the liveness of an instruction
488           // across the irreducible loop.
489           // Note that, if we're not compiling OSR, we could still do GVN and introduce
490           // phis at irreducible loop headers. We decided it was not worth the complexity.
491           set->Clear();
492         } else {
493           DCHECK(!block->GetLoopInformation()->IsIrreducible());
494           DCHECK_EQ(block->GetDominator(), block->GetLoopInformation()->GetPreHeader());
495           set->Kill(side_effects_.GetLoopEffects(block));
496         }
497       } else if (predecessors.size() > 1) {
498         for (HBasicBlock* predecessor : predecessors) {
499           set->IntersectWith(FindSetFor(predecessor));
500           if (set->IsEmpty()) {
501             break;
502           }
503         }
504       }
505     }
506   }
507 
508   sets_[block->GetBlockId()] = set;
509 
510   HInstruction* current = block->GetFirstInstruction();
511   while (current != nullptr) {
512     // Save the next instruction in case `current` is removed from the graph.
513     HInstruction* next = current->GetNext();
514     // Do not kill the set with the side effects of the instruction just now: if
515     // the instruction is GVN'ed, we don't need to kill.
516     //
517     // BoundType is a special case example of an instruction which shouldn't be moved but can be
518     // GVN'ed.
519     //
520     // Deoptimize is a special case since even though we don't want to move it we can still remove
521     // it for GVN.
522     if (current->CanBeMoved() || current->IsBoundType() || current->IsDeoptimize()) {
523       if (current->IsBinaryOperation() && current->AsBinaryOperation()->IsCommutative()) {
524         // For commutative ops, (x op y) will be treated the same as (y op x)
525         // after fixed ordering.
526         current->AsBinaryOperation()->OrderInputs();
527       }
528       HInstruction* existing = set->Lookup(current);
529       if (existing != nullptr) {
530         // This replacement doesn't make more OrderInputs() necessary since
531         // current is either used by an instruction that it dominates,
532         // which hasn't been visited yet due to the order we visit instructions.
533         // Or current is used by a phi, and we don't do OrderInputs() on a phi anyway.
534         current->ReplaceWith(existing);
535         current->GetBlock()->RemoveInstruction(current);
536         did_optimization_ = true;
537       } else {
538         set->Kill(current->GetSideEffects());
539         set->Add(current);
540       }
541     } else {
542       set->Kill(current->GetSideEffects());
543     }
544     current = next;
545   }
546 
547   visited_blocks_.SetBit(block->GetBlockId());
548 
549   // Bookkeeping to mark ValueSets to be reused. We mark them as free if the dominator /
550   // predecessor will never be referenced again, and if it hasn't been reused already by this
551   // method (See the `dominator->GetSuccessors().size() == 1` case).
552   if (block->GetDominator() != nullptr) {
553     const uint32_t id = block->GetDominator()->GetBlockId();
554     dominated_to_visit_[id]--;
555     if (!WillBeReferencedAgain(id) && sets_[id] != nullptr) {
556       free_sets_.SetBit(id);
557     }
558   }
559   for (HBasicBlock* pred : predecessors) {
560     const uint32_t id = pred->GetBlockId();
561     successors_to_visit_[id]--;
562     if (!WillBeReferencedAgain(id) && sets_[id] != nullptr) {
563       free_sets_.SetBit(id);
564     }
565   }
566 }
567 
WillBeReferencedAgain(uint32_t block_id) const568 bool GlobalValueNumberer::WillBeReferencedAgain(uint32_t block_id) const {
569   // Block itself hasn't been visited, or
570   // a dominated block has yet to be visited, or
571   // a successor is yet to be visited.
572   return !visited_blocks_.IsBitSet(block_id) ||
573          dominated_to_visit_[block_id] != 0 ||
574          successors_to_visit_[block_id] != 0;
575 }
576 
FindVisitedBlockWithRecyclableSet(const ValueSet & reference_set) const577 HBasicBlock* GlobalValueNumberer::FindVisitedBlockWithRecyclableSet(
578     const ValueSet& reference_set) const {
579   HBasicBlock* secondary_match = nullptr;
580 
581   // TODO(solanes): If we keep `free_sets_` sorted by size we could do a binary search instead of a
582   // linear one. Note that while a HashMap<size, free_sets> is better for knowing if there's an
583   // exact match, that data structure is worse for the exact_match=false case.
584   for (size_t block_id : free_sets_.Indexes()) {
585     DCHECK(!WillBeReferencedAgain(block_id));
586     ValueSet* current_set = sets_[block_id];
587     DCHECK_NE(current_set, nullptr);
588 
589     // We test if `current_set` has enough buckets to store a copy of
590     // `reference_set` with a reasonable load factor. If we find a set whose
591     // number of buckets matches perfectly, we return right away. If we find one
592     // that is larger, we return it if no perfectly-matching set is found.
593     if (current_set->CanHoldCopyOf(reference_set, /* exact_match= */ true)) {
594       return graph_->GetBlocks()[block_id];
595     } else if (secondary_match == nullptr &&
596                current_set->CanHoldCopyOf(reference_set, /* exact_match= */ false)) {
597       secondary_match = graph_->GetBlocks()[block_id];
598     }
599   }
600 
601   return secondary_match;
602 }
603 
Run()604 bool GVNOptimization::Run() {
605   GlobalValueNumberer gvn(graph_, side_effects_);
606   return gvn.Run();
607 }
608 
609 }  // namespace art
610