1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "execution_subgraph.h"
18
19 #include <algorithm>
20 #include <unordered_set>
21
22 #include "android-base/macros.h"
23 #include "base/arena_allocator.h"
24 #include "base/arena_bit_vector.h"
25 #include "base/globals.h"
26 #include "base/scoped_arena_allocator.h"
27 #include "nodes.h"
28
29 namespace art {
30
ExecutionSubgraph(HGraph * graph,ScopedArenaAllocator * allocator)31 ExecutionSubgraph::ExecutionSubgraph(HGraph* graph, ScopedArenaAllocator* allocator)
32 : graph_(graph),
33 allocator_(allocator),
34 allowed_successors_(graph_->GetBlocks().size(),
35 ~(std::bitset<kMaxFilterableSuccessors> {}),
36 allocator_->Adapter(kArenaAllocLSA)),
37 unreachable_blocks_(
38 allocator_, graph_->GetBlocks().size(), /*expandable=*/ false, kArenaAllocLSA),
39 valid_(true),
40 needs_prune_(false),
41 finalized_(false) {
42 if (valid_) {
__anon8468d4a20102(HBasicBlock* it) 43 DCHECK(std::all_of(graph->GetBlocks().begin(), graph->GetBlocks().end(), [](HBasicBlock* it) {
44 return it == nullptr || it->GetSuccessors().size() <= kMaxFilterableSuccessors;
45 }));
46 }
47 }
48
RemoveBlock(const HBasicBlock * to_remove)49 void ExecutionSubgraph::RemoveBlock(const HBasicBlock* to_remove) {
50 if (!valid_) {
51 return;
52 }
53 uint32_t id = to_remove->GetBlockId();
54 if (unreachable_blocks_.IsBitSet(id)) {
55 if (kIsDebugBuild) {
56 // This isn't really needed but it's good to have this so it functions as
57 // a DCHECK that we always call Prune after removing any block.
58 needs_prune_ = true;
59 }
60 return;
61 }
62 unreachable_blocks_.SetBit(id);
63 for (HBasicBlock* pred : to_remove->GetPredecessors()) {
64 std::bitset<kMaxFilterableSuccessors> allowed_successors {};
65 // ZipCount iterates over both the successors and the index of them at the same time.
66 for (auto [succ, i] : ZipCount(MakeIterationRange(pred->GetSuccessors()))) {
67 if (succ != to_remove) {
68 allowed_successors.set(i);
69 }
70 }
71 LimitBlockSuccessors(pred, allowed_successors);
72 }
73 }
74
75 // Removes sink nodes.
Prune()76 void ExecutionSubgraph::Prune() {
77 if (UNLIKELY(!valid_)) {
78 return;
79 }
80 needs_prune_ = false;
81 // This is the record of the edges that were both (1) explored and (2) reached
82 // the exit node.
83 {
84 // Allocator for temporary values.
85 ScopedArenaAllocator temporaries(graph_->GetArenaStack());
86 ScopedArenaVector<std::bitset<kMaxFilterableSuccessors>> results(
87 graph_->GetBlocks().size(), temporaries.Adapter(kArenaAllocLSA));
88 unreachable_blocks_.ClearAllBits();
89 // TODO We should support infinite loops as well.
90 if (UNLIKELY(graph_->GetExitBlock() == nullptr)) {
91 // Infinite loop
92 valid_ = false;
93 return;
94 }
95 // Fills up the 'results' map with what we need to add to update
96 // allowed_successors in order to prune sink nodes.
97 bool start_reaches_end = false;
98 // This is basically a DFS of the graph with some edges skipped.
99 {
100 const size_t num_blocks = graph_->GetBlocks().size();
101 constexpr ssize_t kUnvisitedSuccIdx = -1;
102 ArenaBitVector visiting(&temporaries, num_blocks, false, kArenaAllocLSA);
103 // How many of the successors of each block we have already examined. This
104 // has three states.
105 // (1) kUnvisitedSuccIdx: we have not examined any edges,
106 // (2) 0 <= val < # of successors: we have examined 'val' successors/are
107 // currently examining successors_[val],
108 // (3) kMaxFilterableSuccessors: We have examined all of the successors of
109 // the block (the 'result' is final).
110 ScopedArenaVector<ssize_t> last_succ_seen(
111 num_blocks, kUnvisitedSuccIdx, temporaries.Adapter(kArenaAllocLSA));
112 // A stack of which blocks we are visiting in this DFS traversal. Does not
113 // include the current-block. Used with last_succ_seen to figure out which
114 // bits to set if we find a path to the end/loop.
115 ScopedArenaVector<uint32_t> current_path(temporaries.Adapter(kArenaAllocLSA));
116 // Just ensure we have enough space. The allocator will be cleared shortly
117 // anyway so this is fast.
118 current_path.reserve(num_blocks);
119 // Current block we are examining. Modified only by 'push_block' and 'pop_block'
120 const HBasicBlock* cur_block = graph_->GetEntryBlock();
121 // Used to note a recur where we will start iterating on 'blk' and save
122 // where we are. We must 'continue' immediately after this.
123 auto push_block = [&](const HBasicBlock* blk) {
124 DCHECK(std::find(current_path.cbegin(), current_path.cend(), cur_block->GetBlockId()) ==
125 current_path.end());
126 if (kIsDebugBuild) {
127 std::for_each(current_path.cbegin(), current_path.cend(), [&](auto id) {
128 DCHECK_GT(last_succ_seen[id], kUnvisitedSuccIdx) << id;
129 DCHECK_LT(last_succ_seen[id], static_cast<ssize_t>(kMaxFilterableSuccessors)) << id;
130 });
131 }
132 current_path.push_back(cur_block->GetBlockId());
133 visiting.SetBit(cur_block->GetBlockId());
134 cur_block = blk;
135 };
136 // Used to note that we have fully explored a block and should return back
137 // up. Sets cur_block appropriately. We must 'continue' immediately after
138 // calling this.
139 auto pop_block = [&]() {
140 if (UNLIKELY(current_path.empty())) {
141 // Should only happen if entry-blocks successors are exhausted.
142 DCHECK_GE(last_succ_seen[graph_->GetEntryBlock()->GetBlockId()],
143 static_cast<ssize_t>(graph_->GetEntryBlock()->GetSuccessors().size()));
144 cur_block = nullptr;
145 } else {
146 const HBasicBlock* last = graph_->GetBlocks()[current_path.back()];
147 visiting.ClearBit(current_path.back());
148 current_path.pop_back();
149 cur_block = last;
150 }
151 };
152 // Mark the current path as a path to the end. This is in contrast to paths
153 // that end in (eg) removed blocks.
154 auto propagate_true = [&]() {
155 for (uint32_t id : current_path) {
156 DCHECK_GT(last_succ_seen[id], kUnvisitedSuccIdx);
157 DCHECK_LT(last_succ_seen[id], static_cast<ssize_t>(kMaxFilterableSuccessors));
158 results[id].set(last_succ_seen[id]);
159 }
160 };
161 ssize_t num_entry_succ = graph_->GetEntryBlock()->GetSuccessors().size();
162 // As long as the entry-block has not explored all successors we still have
163 // work to do.
164 const uint32_t entry_block_id = graph_->GetEntryBlock()->GetBlockId();
165 while (num_entry_succ > last_succ_seen[entry_block_id]) {
166 DCHECK(cur_block != nullptr);
167 uint32_t id = cur_block->GetBlockId();
168 DCHECK((current_path.empty() && cur_block == graph_->GetEntryBlock()) ||
169 current_path.front() == graph_->GetEntryBlock()->GetBlockId())
170 << "current path size: " << current_path.size()
171 << " cur_block id: " << cur_block->GetBlockId() << " entry id "
172 << graph_->GetEntryBlock()->GetBlockId();
173 DCHECK(!visiting.IsBitSet(id))
174 << "Somehow ended up in a loop! This should have been caught before now! " << id;
175 std::bitset<kMaxFilterableSuccessors>& result = results[id];
176 if (cur_block == graph_->GetExitBlock()) {
177 start_reaches_end = true;
178 propagate_true();
179 pop_block();
180 continue;
181 } else if (last_succ_seen[id] == kMaxFilterableSuccessors) {
182 // Already fully explored.
183 if (result.any()) {
184 propagate_true();
185 }
186 pop_block();
187 continue;
188 }
189 // NB This is a pointer. Modifications modify the last_succ_seen.
190 ssize_t* cur_succ = &last_succ_seen[id];
191 std::bitset<kMaxFilterableSuccessors> succ_bitmap = GetAllowedSuccessors(cur_block);
192 // Get next successor allowed.
193 while (++(*cur_succ) < static_cast<ssize_t>(kMaxFilterableSuccessors) &&
194 !succ_bitmap.test(*cur_succ)) {
195 DCHECK_GE(*cur_succ, 0);
196 }
197 if (*cur_succ >= static_cast<ssize_t>(cur_block->GetSuccessors().size())) {
198 // No more successors. Mark that we've checked everything. Later visits
199 // to this node can use the existing data.
200 DCHECK_LE(*cur_succ, static_cast<ssize_t>(kMaxFilterableSuccessors));
201 *cur_succ = kMaxFilterableSuccessors;
202 pop_block();
203 continue;
204 }
205 const HBasicBlock* nxt = cur_block->GetSuccessors()[*cur_succ];
206 DCHECK(nxt != nullptr) << "id: " << *cur_succ
207 << " max: " << cur_block->GetSuccessors().size();
208 if (visiting.IsBitSet(nxt->GetBlockId())) {
209 // This is a loop. Mark it and continue on. Mark allowed-successor on
210 // this block's results as well.
211 result.set(*cur_succ);
212 propagate_true();
213 } else {
214 // Not a loop yet. Recur.
215 push_block(nxt);
216 }
217 }
218 }
219 // If we can't reach the end then there is no path through the graph without
220 // hitting excluded blocks
221 if (UNLIKELY(!start_reaches_end)) {
222 valid_ = false;
223 return;
224 }
225 // Mark blocks we didn't see in the ReachesEnd flood-fill
226 for (const HBasicBlock* blk : graph_->GetBlocks()) {
227 if (blk != nullptr &&
228 results[blk->GetBlockId()].none() &&
229 blk != graph_->GetExitBlock() &&
230 blk != graph_->GetEntryBlock()) {
231 // We never visited this block, must be unreachable.
232 unreachable_blocks_.SetBit(blk->GetBlockId());
233 }
234 }
235 // write the new data.
236 memcpy(allowed_successors_.data(),
237 results.data(),
238 results.size() * sizeof(std::bitset<kMaxFilterableSuccessors>));
239 }
240 RecalculateExcludedCohort();
241 }
242
RemoveConcavity()243 void ExecutionSubgraph::RemoveConcavity() {
244 if (UNLIKELY(!valid_)) {
245 return;
246 }
247 DCHECK(!needs_prune_);
248 for (const HBasicBlock* blk : graph_->GetBlocks()) {
249 if (blk == nullptr || unreachable_blocks_.IsBitSet(blk->GetBlockId())) {
250 continue;
251 }
252 uint32_t blkid = blk->GetBlockId();
253 if (std::any_of(unreachable_blocks_.Indexes().begin(),
254 unreachable_blocks_.Indexes().end(),
255 [&](uint32_t skipped) { return graph_->PathBetween(skipped, blkid); }) &&
256 std::any_of(unreachable_blocks_.Indexes().begin(),
257 unreachable_blocks_.Indexes().end(),
258 [&](uint32_t skipped) { return graph_->PathBetween(blkid, skipped); })) {
259 RemoveBlock(blk);
260 }
261 }
262 Prune();
263 }
264
RecalculateExcludedCohort()265 void ExecutionSubgraph::RecalculateExcludedCohort() {
266 DCHECK(!needs_prune_);
267 excluded_list_.emplace(allocator_->Adapter(kArenaAllocLSA));
268 ScopedArenaVector<ExcludedCohort>& res = excluded_list_.value();
269 // Make a copy of unreachable_blocks_;
270 ArenaBitVector unreachable(allocator_, graph_->GetBlocks().size(), false, kArenaAllocLSA);
271 unreachable.Copy(&unreachable_blocks_);
272 // Split cohorts with union-find
273 while (unreachable.IsAnyBitSet()) {
274 res.emplace_back(allocator_, graph_);
275 ExcludedCohort& cohort = res.back();
276 // We don't allocate except for the queue beyond here so create another arena to save memory.
277 ScopedArenaAllocator alloc(graph_->GetArenaStack());
278 ScopedArenaQueue<const HBasicBlock*> worklist(alloc.Adapter(kArenaAllocLSA));
279 // Select an arbitrary node
280 const HBasicBlock* first = graph_->GetBlocks()[unreachable.GetHighestBitSet()];
281 worklist.push(first);
282 do {
283 // Flood-fill both forwards and backwards.
284 const HBasicBlock* cur = worklist.front();
285 worklist.pop();
286 if (!unreachable.IsBitSet(cur->GetBlockId())) {
287 // Already visited or reachable somewhere else.
288 continue;
289 }
290 unreachable.ClearBit(cur->GetBlockId());
291 cohort.blocks_.SetBit(cur->GetBlockId());
292 // don't bother filtering here, it's done next go-around
293 for (const HBasicBlock* pred : cur->GetPredecessors()) {
294 worklist.push(pred);
295 }
296 for (const HBasicBlock* succ : cur->GetSuccessors()) {
297 worklist.push(succ);
298 }
299 } while (!worklist.empty());
300 }
301 // Figure out entry & exit nodes.
302 for (ExcludedCohort& cohort : res) {
303 DCHECK(cohort.blocks_.IsAnyBitSet());
304 auto is_external = [&](const HBasicBlock* ext) -> bool {
305 return !cohort.blocks_.IsBitSet(ext->GetBlockId());
306 };
307 for (const HBasicBlock* blk : cohort.Blocks()) {
308 const auto& preds = blk->GetPredecessors();
309 const auto& succs = blk->GetSuccessors();
310 if (std::any_of(preds.cbegin(), preds.cend(), is_external)) {
311 cohort.entry_blocks_.SetBit(blk->GetBlockId());
312 }
313 if (std::any_of(succs.cbegin(), succs.cend(), is_external)) {
314 cohort.exit_blocks_.SetBit(blk->GetBlockId());
315 }
316 }
317 }
318 }
319
operator <<(std::ostream & os,const ExecutionSubgraph::ExcludedCohort & ex)320 std::ostream& operator<<(std::ostream& os, const ExecutionSubgraph::ExcludedCohort& ex) {
321 ex.Dump(os);
322 return os;
323 }
324
Dump(std::ostream & os) const325 void ExecutionSubgraph::ExcludedCohort::Dump(std::ostream& os) const {
326 auto dump = [&](BitVecBlockRange arr) {
327 os << "[";
328 bool first = true;
329 for (const HBasicBlock* b : arr) {
330 if (!first) {
331 os << ", ";
332 }
333 first = false;
334 os << b->GetBlockId();
335 }
336 os << "]";
337 };
338 auto dump_blocks = [&]() {
339 os << "[";
340 bool first = true;
341 for (const HBasicBlock* b : Blocks()) {
342 if (!entry_blocks_.IsBitSet(b->GetBlockId()) && !exit_blocks_.IsBitSet(b->GetBlockId())) {
343 if (!first) {
344 os << ", ";
345 }
346 first = false;
347 os << b->GetBlockId();
348 }
349 }
350 os << "]";
351 };
352
353 os << "{ entry: ";
354 dump(EntryBlocks());
355 os << ", interior: ";
356 dump_blocks();
357 os << ", exit: ";
358 dump(ExitBlocks());
359 os << "}";
360 }
361
362 } // namespace art
363