• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "load_store_analysis.h"
18 
19 #include "base/scoped_arena_allocator.h"
20 #include "optimizing/escape.h"
21 
22 namespace art {
23 
24 // A cap for the number of heap locations to prevent pathological time/space consumption.
25 // The number of heap locations for most of the methods stays below this threshold.
26 constexpr size_t kMaxNumberOfHeapLocations = 32;
27 
28 // Test if two integer ranges [l1,h1] and [l2,h2] overlap.
29 // Note that the ranges are inclusive on both ends.
30 //       l1|------|h1
31 //  l2|------|h2
CanIntegerRangesOverlap(int64_t l1,int64_t h1,int64_t l2,int64_t h2)32 static bool CanIntegerRangesOverlap(int64_t l1, int64_t h1, int64_t l2, int64_t h2) {
33   return std::max(l1, l2) <= std::min(h1, h2);
34 }
35 
CanBinaryOpAndIndexAlias(const HBinaryOperation * idx1,const size_t vector_length1,const HInstruction * idx2,const size_t vector_length2)36 static bool CanBinaryOpAndIndexAlias(const HBinaryOperation* idx1,
37                                      const size_t vector_length1,
38                                      const HInstruction* idx2,
39                                      const size_t vector_length2) {
40   if (!IsAddOrSub(idx1)) {
41     // We currently only support Add and Sub operations.
42     return true;
43   }
44   if (idx1->AsBinaryOperation()->GetLeastConstantLeft() != idx2) {
45     // Cannot analyze [i+CONST1] and [j].
46     return true;
47   }
48   if (!idx1->GetConstantRight()->IsIntConstant()) {
49     return true;
50   }
51 
52   // Since 'i' are the same in [i+CONST] and [i],
53   // further compare [CONST] and [0].
54   int64_t l1 = idx1->IsAdd() ?
55                idx1->GetConstantRight()->AsIntConstant()->GetValue() :
56                -idx1->GetConstantRight()->AsIntConstant()->GetValue();
57   int64_t l2 = 0;
58   int64_t h1 = l1 + (vector_length1 - 1);
59   int64_t h2 = l2 + (vector_length2 - 1);
60   return CanIntegerRangesOverlap(l1, h1, l2, h2);
61 }
62 
CanBinaryOpsAlias(const HBinaryOperation * idx1,const size_t vector_length1,const HBinaryOperation * idx2,const size_t vector_length2)63 static bool CanBinaryOpsAlias(const HBinaryOperation* idx1,
64                               const size_t vector_length1,
65                               const HBinaryOperation* idx2,
66                               const size_t vector_length2) {
67   if (!IsAddOrSub(idx1) || !IsAddOrSub(idx2)) {
68     // We currently only support Add and Sub operations.
69     return true;
70   }
71   if (idx1->AsBinaryOperation()->GetLeastConstantLeft() !=
72       idx2->AsBinaryOperation()->GetLeastConstantLeft()) {
73     // Cannot analyze [i+CONST1] and [j+CONST2].
74     return true;
75   }
76   if (!idx1->GetConstantRight()->IsIntConstant() ||
77       !idx2->GetConstantRight()->IsIntConstant()) {
78     return true;
79   }
80 
81   // Since 'i' are the same in [i+CONST1] and [i+CONST2],
82   // further compare [CONST1] and [CONST2].
83   int64_t l1 = idx1->IsAdd() ?
84                idx1->GetConstantRight()->AsIntConstant()->GetValue() :
85                -idx1->GetConstantRight()->AsIntConstant()->GetValue();
86   int64_t l2 = idx2->IsAdd() ?
87                idx2->GetConstantRight()->AsIntConstant()->GetValue() :
88                -idx2->GetConstantRight()->AsIntConstant()->GetValue();
89   int64_t h1 = l1 + (vector_length1 - 1);
90   int64_t h2 = l2 + (vector_length2 - 1);
91   return CanIntegerRangesOverlap(l1, h1, l2, h2);
92 }
93 
94 // Make sure we mark any writes/potential writes to heap-locations within partially
95 // escaped values as escaping.
PrunePartialEscapeWrites()96 void ReferenceInfo::PrunePartialEscapeWrites() {
97   DCHECK(subgraph_ != nullptr);
98   if (!subgraph_->IsValid()) {
99     // All paths escape.
100     return;
101   }
102   HGraph* graph = reference_->GetBlock()->GetGraph();
103   ArenaBitVector additional_exclusions(
104       allocator_, graph->GetBlocks().size(), false, kArenaAllocLSA);
105   for (const HUseListNode<HInstruction*>& use : reference_->GetUses()) {
106     const HInstruction* user = use.GetUser();
107     if (!additional_exclusions.IsBitSet(user->GetBlock()->GetBlockId()) &&
108         subgraph_->ContainsBlock(user->GetBlock()) &&
109         (user->IsUnresolvedInstanceFieldSet() || user->IsUnresolvedStaticFieldSet() ||
110          user->IsInstanceFieldSet() || user->IsStaticFieldSet() || user->IsArraySet()) &&
111         (reference_ == user->InputAt(0)) &&
112         std::any_of(subgraph_->UnreachableBlocks().begin(),
113                     subgraph_->UnreachableBlocks().end(),
114                     [&](const HBasicBlock* excluded) -> bool {
115                       return reference_->GetBlock()->GetGraph()->PathBetween(excluded,
116                                                                              user->GetBlock());
117                     })) {
118       // This object had memory written to it somewhere, if it escaped along
119       // some paths prior to the current block this write also counts as an
120       // escape.
121       additional_exclusions.SetBit(user->GetBlock()->GetBlockId());
122     }
123   }
124   if (UNLIKELY(additional_exclusions.IsAnyBitSet())) {
125     for (uint32_t exc : additional_exclusions.Indexes()) {
126       subgraph_->RemoveBlock(graph->GetBlocks()[exc]);
127     }
128   }
129 }
130 
InstructionEligibleForLSERemoval(HInstruction * inst) const131 bool HeapLocationCollector::InstructionEligibleForLSERemoval(HInstruction* inst) const {
132   if (inst->IsNewInstance()) {
133     return !inst->AsNewInstance()->NeedsChecks();
134   } else if (inst->IsNewArray()) {
135     HInstruction* array_length = inst->AsNewArray()->GetLength();
136     bool known_array_length =
137         array_length->IsIntConstant() && array_length->AsIntConstant()->GetValue() >= 0;
138     return known_array_length &&
139            std::all_of(inst->GetUses().cbegin(),
140                        inst->GetUses().cend(),
141                        [&](const HUseListNode<HInstruction*>& user) {
142                          if (user.GetUser()->IsArrayGet() || user.GetUser()->IsArraySet()) {
143                            return user.GetUser()->InputAt(1)->IsIntConstant();
144                          }
145                          return true;
146                        });
147   } else {
148     return false;
149   }
150 }
151 
CollectPartialEscapes(HGraph * graph)152 void ReferenceInfo::CollectPartialEscapes(HGraph* graph) {
153   ScopedArenaAllocator saa(graph->GetArenaStack());
154   ArenaBitVector seen_instructions(&saa, graph->GetCurrentInstructionId(), false, kArenaAllocLSA);
155   // Get regular escapes.
156   ScopedArenaVector<HInstruction*> additional_escape_vectors(saa.Adapter(kArenaAllocLSA));
157   LambdaEscapeVisitor scan_instructions([&](HInstruction* escape) -> bool {
158     HandleEscape(escape);
159     // LSE can't track heap-locations through Phi and Select instructions so we
160     // need to assume all escapes from these are escapes for the base reference.
161     if ((escape->IsPhi() || escape->IsSelect()) && !seen_instructions.IsBitSet(escape->GetId())) {
162       seen_instructions.SetBit(escape->GetId());
163       additional_escape_vectors.push_back(escape);
164     }
165     return true;
166   });
167   additional_escape_vectors.push_back(reference_);
168   while (!additional_escape_vectors.empty()) {
169     HInstruction* ref = additional_escape_vectors.back();
170     additional_escape_vectors.pop_back();
171     DCHECK(ref == reference_ || ref->IsPhi() || ref->IsSelect()) << *ref;
172     VisitEscapes(ref, scan_instructions);
173   }
174 
175   // Mark irreducible loop headers as escaping since they cannot be tracked through.
176   for (HBasicBlock* blk : graph->GetActiveBlocks()) {
177     if (blk->IsLoopHeader() && blk->GetLoopInformation()->IsIrreducible()) {
178       HandleEscape(blk);
179     }
180   }
181 }
182 
DumpReferenceStats(OptimizingCompilerStats * stats)183 void HeapLocationCollector::DumpReferenceStats(OptimizingCompilerStats* stats) {
184   if (stats == nullptr) {
185     return;
186   }
187   std::vector<bool> seen_instructions(GetGraph()->GetCurrentInstructionId(), false);
188   for (auto hl : heap_locations_) {
189     auto ri = hl->GetReferenceInfo();
190     if (ri == nullptr || seen_instructions[ri->GetReference()->GetId()]) {
191       continue;
192     }
193     auto instruction = ri->GetReference();
194     seen_instructions[instruction->GetId()] = true;
195     if (ri->IsSingletonAndRemovable()) {
196       if (InstructionEligibleForLSERemoval(instruction)) {
197         MaybeRecordStat(stats, MethodCompilationStat::kFullLSEPossible);
198       }
199     }
200     // TODO This is an estimate of the number of allocations we will be able
201     // to (partially) remove. As additional work is done this can be refined.
202     if (ri->IsPartialSingleton() && instruction->IsNewInstance() &&
203         ri->GetNoEscapeSubgraph()->ContainsBlock(instruction->GetBlock()) &&
204         !ri->GetNoEscapeSubgraph()->GetExcludedCohorts().empty() &&
205         InstructionEligibleForLSERemoval(instruction)) {
206       MaybeRecordStat(stats, MethodCompilationStat::kPartialLSEPossible);
207     }
208   }
209 }
210 
CanArrayElementsAlias(const HInstruction * idx1,const size_t vector_length1,const HInstruction * idx2,const size_t vector_length2) const211 bool HeapLocationCollector::CanArrayElementsAlias(const HInstruction* idx1,
212                                                   const size_t vector_length1,
213                                                   const HInstruction* idx2,
214                                                   const size_t vector_length2) const {
215   DCHECK(idx1 != nullptr);
216   DCHECK(idx2 != nullptr);
217   DCHECK_GE(vector_length1, HeapLocation::kScalar);
218   DCHECK_GE(vector_length2, HeapLocation::kScalar);
219 
220   // [i] and [i].
221   if (idx1 == idx2) {
222     return true;
223   }
224 
225   // [CONST1] and [CONST2].
226   if (idx1->IsIntConstant() && idx2->IsIntConstant()) {
227     int64_t l1 = idx1->AsIntConstant()->GetValue();
228     int64_t l2 = idx2->AsIntConstant()->GetValue();
229     // To avoid any overflow in following CONST+vector_length calculation,
230     // use int64_t instead of int32_t.
231     int64_t h1 = l1 + (vector_length1 - 1);
232     int64_t h2 = l2 + (vector_length2 - 1);
233     return CanIntegerRangesOverlap(l1, h1, l2, h2);
234   }
235 
236   // [i+CONST] and [i].
237   if (idx1->IsBinaryOperation() &&
238       idx1->AsBinaryOperation()->GetConstantRight() != nullptr &&
239       idx1->AsBinaryOperation()->GetLeastConstantLeft() == idx2) {
240     return CanBinaryOpAndIndexAlias(idx1->AsBinaryOperation(),
241                                     vector_length1,
242                                     idx2,
243                                     vector_length2);
244   }
245 
246   // [i] and [i+CONST].
247   if (idx2->IsBinaryOperation() &&
248       idx2->AsBinaryOperation()->GetConstantRight() != nullptr &&
249       idx2->AsBinaryOperation()->GetLeastConstantLeft() == idx1) {
250     return CanBinaryOpAndIndexAlias(idx2->AsBinaryOperation(),
251                                     vector_length2,
252                                     idx1,
253                                     vector_length1);
254   }
255 
256   // [i+CONST1] and [i+CONST2].
257   if (idx1->IsBinaryOperation() &&
258       idx1->AsBinaryOperation()->GetConstantRight() != nullptr &&
259       idx2->IsBinaryOperation() &&
260       idx2->AsBinaryOperation()->GetConstantRight() != nullptr) {
261     return CanBinaryOpsAlias(idx1->AsBinaryOperation(),
262                              vector_length1,
263                              idx2->AsBinaryOperation(),
264                              vector_length2);
265   }
266 
267   // By default, MAY alias.
268   return true;
269 }
270 
Run()271 bool LoadStoreAnalysis::Run() {
272   for (HBasicBlock* block : graph_->GetReversePostOrder()) {
273     heap_location_collector_.VisitBasicBlock(block);
274   }
275 
276   if (heap_location_collector_.GetNumberOfHeapLocations() > kMaxNumberOfHeapLocations) {
277     // Bail out if there are too many heap locations to deal with.
278     heap_location_collector_.CleanUp();
279     return false;
280   }
281   if (!heap_location_collector_.HasHeapStores()) {
282     // Without heap stores, this pass would act mostly as GVN on heap accesses.
283     heap_location_collector_.CleanUp();
284     return false;
285   }
286   if (heap_location_collector_.HasVolatile() || heap_location_collector_.HasMonitorOps()) {
287     // Don't do load/store elimination if the method has volatile field accesses or
288     // monitor operations, for now.
289     // TODO: do it right.
290     heap_location_collector_.CleanUp();
291     return false;
292   }
293 
294   heap_location_collector_.BuildAliasingMatrix();
295   heap_location_collector_.DumpReferenceStats(stats_);
296   return true;
297 }
298 
299 }  // namespace art
300