1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "loop_optimization.h"
18
19 #include "arch/arm/instruction_set_features_arm.h"
20 #include "arch/arm64/instruction_set_features_arm64.h"
21 #include "arch/instruction_set.h"
22 #include "arch/x86/instruction_set_features_x86.h"
23 #include "arch/x86_64/instruction_set_features_x86_64.h"
24 #include "driver/compiler_options.h"
25 #include "linear_order.h"
26 #include "mirror/array-inl.h"
27 #include "mirror/string.h"
28
29 namespace art {
30
31 // Enables vectorization (SIMDization) in the loop optimizer.
32 static constexpr bool kEnableVectorization = true;
33
34 //
35 // Static helpers.
36 //
37
38 // Base alignment for arrays/strings guaranteed by the Android runtime.
BaseAlignment()39 static uint32_t BaseAlignment() {
40 return kObjectAlignment;
41 }
42
43 // Hidden offset for arrays/strings guaranteed by the Android runtime.
HiddenOffset(DataType::Type type,bool is_string_char_at)44 static uint32_t HiddenOffset(DataType::Type type, bool is_string_char_at) {
45 return is_string_char_at
46 ? mirror::String::ValueOffset().Uint32Value()
47 : mirror::Array::DataOffset(DataType::Size(type)).Uint32Value();
48 }
49
50 // Remove the instruction from the graph. A bit more elaborate than the usual
51 // instruction removal, since there may be a cycle in the use structure.
RemoveFromCycle(HInstruction * instruction)52 static void RemoveFromCycle(HInstruction* instruction) {
53 instruction->RemoveAsUserOfAllInputs();
54 instruction->RemoveEnvironmentUsers();
55 instruction->GetBlock()->RemoveInstructionOrPhi(instruction, /*ensure_safety=*/ false);
56 RemoveEnvironmentUses(instruction);
57 ResetEnvironmentInputRecords(instruction);
58 }
59
60 // Detect a goto block and sets succ to the single successor.
IsGotoBlock(HBasicBlock * block,HBasicBlock ** succ)61 static bool IsGotoBlock(HBasicBlock* block, /*out*/ HBasicBlock** succ) {
62 if (block->GetPredecessors().size() == 1 &&
63 block->GetSuccessors().size() == 1 &&
64 block->IsSingleGoto()) {
65 *succ = block->GetSingleSuccessor();
66 return true;
67 }
68 return false;
69 }
70
71 // Detect an early exit loop.
IsEarlyExit(HLoopInformation * loop_info)72 static bool IsEarlyExit(HLoopInformation* loop_info) {
73 HBlocksInLoopReversePostOrderIterator it_loop(*loop_info);
74 for (it_loop.Advance(); !it_loop.Done(); it_loop.Advance()) {
75 for (HBasicBlock* successor : it_loop.Current()->GetSuccessors()) {
76 if (!loop_info->Contains(*successor)) {
77 return true;
78 }
79 }
80 }
81 return false;
82 }
83
84 // Forward declaration.
85 static bool IsZeroExtensionAndGet(HInstruction* instruction,
86 DataType::Type type,
87 /*out*/ HInstruction** operand);
88
89 // Detect a sign extension in instruction from the given type.
90 // Returns the promoted operand on success.
IsSignExtensionAndGet(HInstruction * instruction,DataType::Type type,HInstruction ** operand)91 static bool IsSignExtensionAndGet(HInstruction* instruction,
92 DataType::Type type,
93 /*out*/ HInstruction** operand) {
94 // Accept any already wider constant that would be handled properly by sign
95 // extension when represented in the *width* of the given narrower data type
96 // (the fact that Uint8/Uint16 normally zero extend does not matter here).
97 int64_t value = 0;
98 if (IsInt64AndGet(instruction, /*out*/ &value)) {
99 switch (type) {
100 case DataType::Type::kUint8:
101 case DataType::Type::kInt8:
102 if (IsInt<8>(value)) {
103 *operand = instruction;
104 return true;
105 }
106 return false;
107 case DataType::Type::kUint16:
108 case DataType::Type::kInt16:
109 if (IsInt<16>(value)) {
110 *operand = instruction;
111 return true;
112 }
113 return false;
114 default:
115 return false;
116 }
117 }
118 // An implicit widening conversion of any signed expression sign-extends.
119 if (instruction->GetType() == type) {
120 switch (type) {
121 case DataType::Type::kInt8:
122 case DataType::Type::kInt16:
123 *operand = instruction;
124 return true;
125 default:
126 return false;
127 }
128 }
129 // An explicit widening conversion of a signed expression sign-extends.
130 if (instruction->IsTypeConversion()) {
131 HInstruction* conv = instruction->InputAt(0);
132 DataType::Type from = conv->GetType();
133 switch (instruction->GetType()) {
134 case DataType::Type::kInt32:
135 case DataType::Type::kInt64:
136 if (type == from && (from == DataType::Type::kInt8 ||
137 from == DataType::Type::kInt16 ||
138 from == DataType::Type::kInt32)) {
139 *operand = conv;
140 return true;
141 }
142 return false;
143 case DataType::Type::kInt16:
144 return type == DataType::Type::kUint16 &&
145 from == DataType::Type::kUint16 &&
146 IsZeroExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand);
147 default:
148 return false;
149 }
150 }
151 return false;
152 }
153
154 // Detect a zero extension in instruction from the given type.
155 // Returns the promoted operand on success.
IsZeroExtensionAndGet(HInstruction * instruction,DataType::Type type,HInstruction ** operand)156 static bool IsZeroExtensionAndGet(HInstruction* instruction,
157 DataType::Type type,
158 /*out*/ HInstruction** operand) {
159 // Accept any already wider constant that would be handled properly by zero
160 // extension when represented in the *width* of the given narrower data type
161 // (the fact that Int8/Int16 normally sign extend does not matter here).
162 int64_t value = 0;
163 if (IsInt64AndGet(instruction, /*out*/ &value)) {
164 switch (type) {
165 case DataType::Type::kUint8:
166 case DataType::Type::kInt8:
167 if (IsUint<8>(value)) {
168 *operand = instruction;
169 return true;
170 }
171 return false;
172 case DataType::Type::kUint16:
173 case DataType::Type::kInt16:
174 if (IsUint<16>(value)) {
175 *operand = instruction;
176 return true;
177 }
178 return false;
179 default:
180 return false;
181 }
182 }
183 // An implicit widening conversion of any unsigned expression zero-extends.
184 if (instruction->GetType() == type) {
185 switch (type) {
186 case DataType::Type::kUint8:
187 case DataType::Type::kUint16:
188 *operand = instruction;
189 return true;
190 default:
191 return false;
192 }
193 }
194 // An explicit widening conversion of an unsigned expression zero-extends.
195 if (instruction->IsTypeConversion()) {
196 HInstruction* conv = instruction->InputAt(0);
197 DataType::Type from = conv->GetType();
198 switch (instruction->GetType()) {
199 case DataType::Type::kInt32:
200 case DataType::Type::kInt64:
201 if (type == from && from == DataType::Type::kUint16) {
202 *operand = conv;
203 return true;
204 }
205 return false;
206 case DataType::Type::kUint16:
207 return type == DataType::Type::kInt16 &&
208 from == DataType::Type::kInt16 &&
209 IsSignExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand);
210 default:
211 return false;
212 }
213 }
214 return false;
215 }
216
217 // Detect situations with same-extension narrower operands.
218 // Returns true on success and sets is_unsigned accordingly.
IsNarrowerOperands(HInstruction * a,HInstruction * b,DataType::Type type,HInstruction ** r,HInstruction ** s,bool * is_unsigned)219 static bool IsNarrowerOperands(HInstruction* a,
220 HInstruction* b,
221 DataType::Type type,
222 /*out*/ HInstruction** r,
223 /*out*/ HInstruction** s,
224 /*out*/ bool* is_unsigned) {
225 DCHECK(a != nullptr && b != nullptr);
226 // Look for a matching sign extension.
227 DataType::Type stype = HVecOperation::ToSignedType(type);
228 if (IsSignExtensionAndGet(a, stype, r) && IsSignExtensionAndGet(b, stype, s)) {
229 *is_unsigned = false;
230 return true;
231 }
232 // Look for a matching zero extension.
233 DataType::Type utype = HVecOperation::ToUnsignedType(type);
234 if (IsZeroExtensionAndGet(a, utype, r) && IsZeroExtensionAndGet(b, utype, s)) {
235 *is_unsigned = true;
236 return true;
237 }
238 return false;
239 }
240
241 // As above, single operand.
IsNarrowerOperand(HInstruction * a,DataType::Type type,HInstruction ** r,bool * is_unsigned)242 static bool IsNarrowerOperand(HInstruction* a,
243 DataType::Type type,
244 /*out*/ HInstruction** r,
245 /*out*/ bool* is_unsigned) {
246 DCHECK(a != nullptr);
247 // Look for a matching sign extension.
248 DataType::Type stype = HVecOperation::ToSignedType(type);
249 if (IsSignExtensionAndGet(a, stype, r)) {
250 *is_unsigned = false;
251 return true;
252 }
253 // Look for a matching zero extension.
254 DataType::Type utype = HVecOperation::ToUnsignedType(type);
255 if (IsZeroExtensionAndGet(a, utype, r)) {
256 *is_unsigned = true;
257 return true;
258 }
259 return false;
260 }
261
262 // Compute relative vector length based on type difference.
GetOtherVL(DataType::Type other_type,DataType::Type vector_type,uint32_t vl)263 static uint32_t GetOtherVL(DataType::Type other_type, DataType::Type vector_type, uint32_t vl) {
264 DCHECK(DataType::IsIntegralType(other_type));
265 DCHECK(DataType::IsIntegralType(vector_type));
266 DCHECK_GE(DataType::SizeShift(other_type), DataType::SizeShift(vector_type));
267 return vl >> (DataType::SizeShift(other_type) - DataType::SizeShift(vector_type));
268 }
269
270 // Detect up to two added operands a and b and an acccumulated constant c.
IsAddConst(HInstruction * instruction,HInstruction ** a,HInstruction ** b,int64_t * c,int32_t depth=8)271 static bool IsAddConst(HInstruction* instruction,
272 /*out*/ HInstruction** a,
273 /*out*/ HInstruction** b,
274 /*out*/ int64_t* c,
275 int32_t depth = 8) { // don't search too deep
276 int64_t value = 0;
277 // Enter add/sub while still within reasonable depth.
278 if (depth > 0) {
279 if (instruction->IsAdd()) {
280 return IsAddConst(instruction->InputAt(0), a, b, c, depth - 1) &&
281 IsAddConst(instruction->InputAt(1), a, b, c, depth - 1);
282 } else if (instruction->IsSub() &&
283 IsInt64AndGet(instruction->InputAt(1), &value)) {
284 *c -= value;
285 return IsAddConst(instruction->InputAt(0), a, b, c, depth - 1);
286 }
287 }
288 // Otherwise, deal with leaf nodes.
289 if (IsInt64AndGet(instruction, &value)) {
290 *c += value;
291 return true;
292 } else if (*a == nullptr) {
293 *a = instruction;
294 return true;
295 } else if (*b == nullptr) {
296 *b = instruction;
297 return true;
298 }
299 return false; // too many operands
300 }
301
302 // Detect a + b + c with optional constant c.
IsAddConst2(HGraph * graph,HInstruction * instruction,HInstruction ** a,HInstruction ** b,int64_t * c)303 static bool IsAddConst2(HGraph* graph,
304 HInstruction* instruction,
305 /*out*/ HInstruction** a,
306 /*out*/ HInstruction** b,
307 /*out*/ int64_t* c) {
308 if (IsAddConst(instruction, a, b, c) && *a != nullptr) {
309 if (*b == nullptr) {
310 // Constant is usually already present, unless accumulated.
311 *b = graph->GetConstant(instruction->GetType(), (*c));
312 *c = 0;
313 }
314 return true;
315 }
316 return false;
317 }
318
319 // Detect a direct a - b or a hidden a - (-c).
IsSubConst2(HGraph * graph,HInstruction * instruction,HInstruction ** a,HInstruction ** b)320 static bool IsSubConst2(HGraph* graph,
321 HInstruction* instruction,
322 /*out*/ HInstruction** a,
323 /*out*/ HInstruction** b) {
324 int64_t c = 0;
325 if (instruction->IsSub()) {
326 *a = instruction->InputAt(0);
327 *b = instruction->InputAt(1);
328 return true;
329 } else if (IsAddConst(instruction, a, b, &c) && *a != nullptr && *b == nullptr) {
330 // Constant for the hidden subtraction.
331 *b = graph->GetConstant(instruction->GetType(), -c);
332 return true;
333 }
334 return false;
335 }
336
337 // Detect reductions of the following forms,
338 // x = x_phi + ..
339 // x = x_phi - ..
HasReductionFormat(HInstruction * reduction,HInstruction * phi)340 static bool HasReductionFormat(HInstruction* reduction, HInstruction* phi) {
341 if (reduction->IsAdd()) {
342 return (reduction->InputAt(0) == phi && reduction->InputAt(1) != phi) ||
343 (reduction->InputAt(0) != phi && reduction->InputAt(1) == phi);
344 } else if (reduction->IsSub()) {
345 return (reduction->InputAt(0) == phi && reduction->InputAt(1) != phi);
346 }
347 return false;
348 }
349
350 // Translates vector operation to reduction kind.
GetReductionKind(HVecOperation * reduction)351 static HVecReduce::ReductionKind GetReductionKind(HVecOperation* reduction) {
352 if (reduction->IsVecAdd() ||
353 reduction->IsVecSub() ||
354 reduction->IsVecSADAccumulate() ||
355 reduction->IsVecDotProd()) {
356 return HVecReduce::kSum;
357 }
358 LOG(FATAL) << "Unsupported SIMD reduction " << reduction->GetId();
359 UNREACHABLE();
360 }
361
362 // Test vector restrictions.
HasVectorRestrictions(uint64_t restrictions,uint64_t tested)363 static bool HasVectorRestrictions(uint64_t restrictions, uint64_t tested) {
364 return (restrictions & tested) != 0;
365 }
366
367 // Insert an instruction.
Insert(HBasicBlock * block,HInstruction * instruction)368 static HInstruction* Insert(HBasicBlock* block, HInstruction* instruction) {
369 DCHECK(block != nullptr);
370 DCHECK(instruction != nullptr);
371 block->InsertInstructionBefore(instruction, block->GetLastInstruction());
372 return instruction;
373 }
374
375 // Check that instructions from the induction sets are fully removed: have no uses
376 // and no other instructions use them.
CheckInductionSetFullyRemoved(ScopedArenaSet<HInstruction * > * iset)377 static bool CheckInductionSetFullyRemoved(ScopedArenaSet<HInstruction*>* iset) {
378 for (HInstruction* instr : *iset) {
379 if (instr->GetBlock() != nullptr ||
380 !instr->GetUses().empty() ||
381 !instr->GetEnvUses().empty() ||
382 HasEnvironmentUsedByOthers(instr)) {
383 return false;
384 }
385 }
386 return true;
387 }
388
389 // Tries to statically evaluate condition of the specified "HIf" for other condition checks.
TryToEvaluateIfCondition(HIf * instruction,HGraph * graph)390 static void TryToEvaluateIfCondition(HIf* instruction, HGraph* graph) {
391 HInstruction* cond = instruction->InputAt(0);
392
393 // If a condition 'cond' is evaluated in an HIf instruction then in the successors of the
394 // IF_BLOCK we statically know the value of the condition 'cond' (TRUE in TRUE_SUCC, FALSE in
395 // FALSE_SUCC). Using that we can replace another evaluation (use) EVAL of the same 'cond'
396 // with TRUE value (FALSE value) if every path from the ENTRY_BLOCK to EVAL_BLOCK contains the
397 // edge HIF_BLOCK->TRUE_SUCC (HIF_BLOCK->FALSE_SUCC).
398 // if (cond) { if(cond) {
399 // if (cond) {} if (1) {}
400 // } else { =======> } else {
401 // if (cond) {} if (0) {}
402 // } }
403 if (!cond->IsConstant()) {
404 HBasicBlock* true_succ = instruction->IfTrueSuccessor();
405 HBasicBlock* false_succ = instruction->IfFalseSuccessor();
406
407 DCHECK_EQ(true_succ->GetPredecessors().size(), 1u);
408 DCHECK_EQ(false_succ->GetPredecessors().size(), 1u);
409
410 const HUseList<HInstruction*>& uses = cond->GetUses();
411 for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
412 HInstruction* user = it->GetUser();
413 size_t index = it->GetIndex();
414 HBasicBlock* user_block = user->GetBlock();
415 // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput().
416 ++it;
417 if (true_succ->Dominates(user_block)) {
418 user->ReplaceInput(graph->GetIntConstant(1), index);
419 } else if (false_succ->Dominates(user_block)) {
420 user->ReplaceInput(graph->GetIntConstant(0), index);
421 }
422 }
423 }
424 }
425
426 // Peel the first 'count' iterations of the loop.
PeelByCount(HLoopInformation * loop_info,int count,InductionVarRange * induction_range)427 static void PeelByCount(HLoopInformation* loop_info,
428 int count,
429 InductionVarRange* induction_range) {
430 for (int i = 0; i < count; i++) {
431 // Perform peeling.
432 PeelUnrollSimpleHelper helper(loop_info, induction_range);
433 helper.DoPeeling();
434 }
435 }
436
437 // Returns the narrower type out of instructions a and b types.
GetNarrowerType(HInstruction * a,HInstruction * b)438 static DataType::Type GetNarrowerType(HInstruction* a, HInstruction* b) {
439 DataType::Type type = a->GetType();
440 if (DataType::Size(b->GetType()) < DataType::Size(type)) {
441 type = b->GetType();
442 }
443 if (a->IsTypeConversion() &&
444 DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(type)) {
445 type = a->InputAt(0)->GetType();
446 }
447 if (b->IsTypeConversion() &&
448 DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(type)) {
449 type = b->InputAt(0)->GetType();
450 }
451 return type;
452 }
453
454 //
455 // Public methods.
456 //
457
HLoopOptimization(HGraph * graph,const CompilerOptions * compiler_options,HInductionVarAnalysis * induction_analysis,OptimizingCompilerStats * stats,const char * name)458 HLoopOptimization::HLoopOptimization(HGraph* graph,
459 const CompilerOptions* compiler_options,
460 HInductionVarAnalysis* induction_analysis,
461 OptimizingCompilerStats* stats,
462 const char* name)
463 : HOptimization(graph, name, stats),
464 compiler_options_(compiler_options),
465 induction_range_(induction_analysis),
466 loop_allocator_(nullptr),
467 global_allocator_(graph_->GetAllocator()),
468 top_loop_(nullptr),
469 last_loop_(nullptr),
470 iset_(nullptr),
471 reductions_(nullptr),
472 simplified_(false),
473 vector_length_(0),
474 vector_refs_(nullptr),
475 vector_static_peeling_factor_(0),
476 vector_dynamic_peeling_candidate_(nullptr),
477 vector_runtime_test_a_(nullptr),
478 vector_runtime_test_b_(nullptr),
479 vector_map_(nullptr),
480 vector_permanent_map_(nullptr),
481 vector_mode_(kSequential),
482 vector_preheader_(nullptr),
483 vector_header_(nullptr),
484 vector_body_(nullptr),
485 vector_index_(nullptr),
486 arch_loop_helper_(ArchNoOptsLoopHelper::Create(compiler_options_ != nullptr
487 ? compiler_options_->GetInstructionSet()
488 : InstructionSet::kNone,
489 global_allocator_)) {
490 }
491
Run()492 bool HLoopOptimization::Run() {
493 // Skip if there is no loop or the graph has try-catch/irreducible loops.
494 // TODO: make this less of a sledgehammer.
495 if (!graph_->HasLoops() || graph_->HasTryCatch() || graph_->HasIrreducibleLoops()) {
496 return false;
497 }
498
499 // Phase-local allocator.
500 ScopedArenaAllocator allocator(graph_->GetArenaStack());
501 loop_allocator_ = &allocator;
502
503 // Perform loop optimizations.
504 bool didLoopOpt = LocalRun();
505 if (top_loop_ == nullptr) {
506 graph_->SetHasLoops(false); // no more loops
507 }
508
509 // Detach.
510 loop_allocator_ = nullptr;
511 last_loop_ = top_loop_ = nullptr;
512
513 return didLoopOpt;
514 }
515
516 //
517 // Loop setup and traversal.
518 //
519
LocalRun()520 bool HLoopOptimization::LocalRun() {
521 bool didLoopOpt = false;
522 // Build the linear order using the phase-local allocator. This step enables building
523 // a loop hierarchy that properly reflects the outer-inner and previous-next relation.
524 ScopedArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder));
525 LinearizeGraph(graph_, &linear_order);
526
527 // Build the loop hierarchy.
528 for (HBasicBlock* block : linear_order) {
529 if (block->IsLoopHeader()) {
530 AddLoop(block->GetLoopInformation());
531 }
532 }
533
534 // Traverse the loop hierarchy inner-to-outer and optimize. Traversal can use
535 // temporary data structures using the phase-local allocator. All new HIR
536 // should use the global allocator.
537 if (top_loop_ != nullptr) {
538 ScopedArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
539 ScopedArenaSafeMap<HInstruction*, HInstruction*> reds(
540 std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
541 ScopedArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
542 ScopedArenaSafeMap<HInstruction*, HInstruction*> map(
543 std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
544 ScopedArenaSafeMap<HInstruction*, HInstruction*> perm(
545 std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
546 // Attach.
547 iset_ = &iset;
548 reductions_ = &reds;
549 vector_refs_ = &refs;
550 vector_map_ = ↦
551 vector_permanent_map_ = &perm;
552 // Traverse.
553 didLoopOpt = TraverseLoopsInnerToOuter(top_loop_);
554 // Detach.
555 iset_ = nullptr;
556 reductions_ = nullptr;
557 vector_refs_ = nullptr;
558 vector_map_ = nullptr;
559 vector_permanent_map_ = nullptr;
560 }
561 return didLoopOpt;
562 }
563
AddLoop(HLoopInformation * loop_info)564 void HLoopOptimization::AddLoop(HLoopInformation* loop_info) {
565 DCHECK(loop_info != nullptr);
566 LoopNode* node = new (loop_allocator_) LoopNode(loop_info);
567 if (last_loop_ == nullptr) {
568 // First loop.
569 DCHECK(top_loop_ == nullptr);
570 last_loop_ = top_loop_ = node;
571 } else if (loop_info->IsIn(*last_loop_->loop_info)) {
572 // Inner loop.
573 node->outer = last_loop_;
574 DCHECK(last_loop_->inner == nullptr);
575 last_loop_ = last_loop_->inner = node;
576 } else {
577 // Subsequent loop.
578 while (last_loop_->outer != nullptr && !loop_info->IsIn(*last_loop_->outer->loop_info)) {
579 last_loop_ = last_loop_->outer;
580 }
581 node->outer = last_loop_->outer;
582 node->previous = last_loop_;
583 DCHECK(last_loop_->next == nullptr);
584 last_loop_ = last_loop_->next = node;
585 }
586 }
587
RemoveLoop(LoopNode * node)588 void HLoopOptimization::RemoveLoop(LoopNode* node) {
589 DCHECK(node != nullptr);
590 DCHECK(node->inner == nullptr);
591 if (node->previous != nullptr) {
592 // Within sequence.
593 node->previous->next = node->next;
594 if (node->next != nullptr) {
595 node->next->previous = node->previous;
596 }
597 } else {
598 // First of sequence.
599 if (node->outer != nullptr) {
600 node->outer->inner = node->next;
601 } else {
602 top_loop_ = node->next;
603 }
604 if (node->next != nullptr) {
605 node->next->outer = node->outer;
606 node->next->previous = nullptr;
607 }
608 }
609 }
610
TraverseLoopsInnerToOuter(LoopNode * node)611 bool HLoopOptimization::TraverseLoopsInnerToOuter(LoopNode* node) {
612 bool changed = false;
613 for ( ; node != nullptr; node = node->next) {
614 // Visit inner loops first. Recompute induction information for this
615 // loop if the induction of any inner loop has changed.
616 if (TraverseLoopsInnerToOuter(node->inner)) {
617 induction_range_.ReVisit(node->loop_info);
618 changed = true;
619 }
620 // Repeat simplifications in the loop-body until no more changes occur.
621 // Note that since each simplification consists of eliminating code (without
622 // introducing new code), this process is always finite.
623 do {
624 simplified_ = false;
625 SimplifyInduction(node);
626 SimplifyBlocks(node);
627 changed = simplified_ || changed;
628 } while (simplified_);
629 // Optimize inner loop.
630 if (node->inner == nullptr) {
631 changed = OptimizeInnerLoop(node) || changed;
632 }
633 }
634 return changed;
635 }
636
637 //
638 // Optimization.
639 //
640
SimplifyInduction(LoopNode * node)641 void HLoopOptimization::SimplifyInduction(LoopNode* node) {
642 HBasicBlock* header = node->loop_info->GetHeader();
643 HBasicBlock* preheader = node->loop_info->GetPreHeader();
644 // Scan the phis in the header to find opportunities to simplify an induction
645 // cycle that is only used outside the loop. Replace these uses, if any, with
646 // the last value and remove the induction cycle.
647 // Examples: for (int i = 0; x != null; i++) { .... no i .... }
648 // for (int i = 0; i < 10; i++, k++) { .... no k .... } return k;
649 for (HInstructionIterator it(header->GetPhis()); !it.Done(); it.Advance()) {
650 HPhi* phi = it.Current()->AsPhi();
651 if (TrySetPhiInduction(phi, /*restrict_uses*/ true) &&
652 TryAssignLastValue(node->loop_info, phi, preheader, /*collect_loop_uses*/ false)) {
653 // Note that it's ok to have replaced uses after the loop with the last value, without
654 // being able to remove the cycle. Environment uses (which are the reason we may not be
655 // able to remove the cycle) within the loop will still hold the right value. We must
656 // have tried first, however, to replace outside uses.
657 if (CanRemoveCycle()) {
658 simplified_ = true;
659 for (HInstruction* i : *iset_) {
660 RemoveFromCycle(i);
661 }
662 DCHECK(CheckInductionSetFullyRemoved(iset_));
663 }
664 }
665 }
666 }
667
SimplifyBlocks(LoopNode * node)668 void HLoopOptimization::SimplifyBlocks(LoopNode* node) {
669 // Iterate over all basic blocks in the loop-body.
670 for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
671 HBasicBlock* block = it.Current();
672 // Remove dead instructions from the loop-body.
673 RemoveDeadInstructions(block->GetPhis());
674 RemoveDeadInstructions(block->GetInstructions());
675 // Remove trivial control flow blocks from the loop-body.
676 if (block->GetPredecessors().size() == 1 &&
677 block->GetSuccessors().size() == 1 &&
678 block->GetSingleSuccessor()->GetPredecessors().size() == 1) {
679 simplified_ = true;
680 block->MergeWith(block->GetSingleSuccessor());
681 } else if (block->GetSuccessors().size() == 2) {
682 // Trivial if block can be bypassed to either branch.
683 HBasicBlock* succ0 = block->GetSuccessors()[0];
684 HBasicBlock* succ1 = block->GetSuccessors()[1];
685 HBasicBlock* meet0 = nullptr;
686 HBasicBlock* meet1 = nullptr;
687 if (succ0 != succ1 &&
688 IsGotoBlock(succ0, &meet0) &&
689 IsGotoBlock(succ1, &meet1) &&
690 meet0 == meet1 && // meets again
691 meet0 != block && // no self-loop
692 meet0->GetPhis().IsEmpty()) { // not used for merging
693 simplified_ = true;
694 succ0->DisconnectAndDelete();
695 if (block->Dominates(meet0)) {
696 block->RemoveDominatedBlock(meet0);
697 succ1->AddDominatedBlock(meet0);
698 meet0->SetDominator(succ1);
699 }
700 }
701 }
702 }
703 }
704
TryOptimizeInnerLoopFinite(LoopNode * node)705 bool HLoopOptimization::TryOptimizeInnerLoopFinite(LoopNode* node) {
706 HBasicBlock* header = node->loop_info->GetHeader();
707 HBasicBlock* preheader = node->loop_info->GetPreHeader();
708 // Ensure loop header logic is finite.
709 int64_t trip_count = 0;
710 if (!induction_range_.IsFinite(node->loop_info, &trip_count)) {
711 return false;
712 }
713 // Ensure there is only a single loop-body (besides the header).
714 HBasicBlock* body = nullptr;
715 for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
716 if (it.Current() != header) {
717 if (body != nullptr) {
718 return false;
719 }
720 body = it.Current();
721 }
722 }
723 CHECK(body != nullptr);
724 // Ensure there is only a single exit point.
725 if (header->GetSuccessors().size() != 2) {
726 return false;
727 }
728 HBasicBlock* exit = (header->GetSuccessors()[0] == body)
729 ? header->GetSuccessors()[1]
730 : header->GetSuccessors()[0];
731 // Ensure exit can only be reached by exiting loop.
732 if (exit->GetPredecessors().size() != 1) {
733 return false;
734 }
735 // Detect either an empty loop (no side effects other than plain iteration) or
736 // a trivial loop (just iterating once). Replace subsequent index uses, if any,
737 // with the last value and remove the loop, possibly after unrolling its body.
738 HPhi* main_phi = nullptr;
739 if (TrySetSimpleLoopHeader(header, &main_phi)) {
740 bool is_empty = IsEmptyBody(body);
741 if (reductions_->empty() && // TODO: possible with some effort
742 (is_empty || trip_count == 1) &&
743 TryAssignLastValue(node->loop_info, main_phi, preheader, /*collect_loop_uses*/ true)) {
744 if (!is_empty) {
745 // Unroll the loop-body, which sees initial value of the index.
746 main_phi->ReplaceWith(main_phi->InputAt(0));
747 preheader->MergeInstructionsWith(body);
748 }
749 body->DisconnectAndDelete();
750 exit->RemovePredecessor(header);
751 header->RemoveSuccessor(exit);
752 header->RemoveDominatedBlock(exit);
753 header->DisconnectAndDelete();
754 preheader->AddSuccessor(exit);
755 preheader->AddInstruction(new (global_allocator_) HGoto());
756 preheader->AddDominatedBlock(exit);
757 exit->SetDominator(preheader);
758 RemoveLoop(node); // update hierarchy
759 return true;
760 }
761 }
762 // Vectorize loop, if possible and valid.
763 if (kEnableVectorization &&
764 // Disable vectorization for debuggable graphs: this is a workaround for the bug
765 // in 'GenerateNewLoop' which caused the SuspendCheck environment to be invalid.
766 // TODO: b/138601207, investigate other possible cases with wrong environment values and
767 // possibly switch back vectorization on for debuggable graphs.
768 !graph_->IsDebuggable() &&
769 TrySetSimpleLoopHeader(header, &main_phi) &&
770 ShouldVectorize(node, body, trip_count) &&
771 TryAssignLastValue(node->loop_info, main_phi, preheader, /*collect_loop_uses*/ true)) {
772 Vectorize(node, body, exit, trip_count);
773 graph_->SetHasSIMD(true); // flag SIMD usage
774 MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorized);
775 return true;
776 }
777 return false;
778 }
779
OptimizeInnerLoop(LoopNode * node)780 bool HLoopOptimization::OptimizeInnerLoop(LoopNode* node) {
781 return TryOptimizeInnerLoopFinite(node) || TryPeelingAndUnrolling(node);
782 }
783
784
785
786 //
787 // Scalar loop peeling and unrolling: generic part methods.
788 //
789
TryUnrollingForBranchPenaltyReduction(LoopAnalysisInfo * analysis_info,bool generate_code)790 bool HLoopOptimization::TryUnrollingForBranchPenaltyReduction(LoopAnalysisInfo* analysis_info,
791 bool generate_code) {
792 if (analysis_info->GetNumberOfExits() > 1) {
793 return false;
794 }
795
796 uint32_t unrolling_factor = arch_loop_helper_->GetScalarUnrollingFactor(analysis_info);
797 if (unrolling_factor == LoopAnalysisInfo::kNoUnrollingFactor) {
798 return false;
799 }
800
801 if (generate_code) {
802 // TODO: support other unrolling factors.
803 DCHECK_EQ(unrolling_factor, 2u);
804
805 // Perform unrolling.
806 HLoopInformation* loop_info = analysis_info->GetLoopInfo();
807 PeelUnrollSimpleHelper helper(loop_info, &induction_range_);
808 helper.DoUnrolling();
809
810 // Remove the redundant loop check after unrolling.
811 HIf* copy_hif =
812 helper.GetBasicBlockMap()->Get(loop_info->GetHeader())->GetLastInstruction()->AsIf();
813 int32_t constant = loop_info->Contains(*copy_hif->IfTrueSuccessor()) ? 1 : 0;
814 copy_hif->ReplaceInput(graph_->GetIntConstant(constant), 0u);
815 }
816 return true;
817 }
818
TryPeelingForLoopInvariantExitsElimination(LoopAnalysisInfo * analysis_info,bool generate_code)819 bool HLoopOptimization::TryPeelingForLoopInvariantExitsElimination(LoopAnalysisInfo* analysis_info,
820 bool generate_code) {
821 HLoopInformation* loop_info = analysis_info->GetLoopInfo();
822 if (!arch_loop_helper_->IsLoopPeelingEnabled()) {
823 return false;
824 }
825
826 if (analysis_info->GetNumberOfInvariantExits() == 0) {
827 return false;
828 }
829
830 if (generate_code) {
831 // Perform peeling.
832 PeelUnrollSimpleHelper helper(loop_info, &induction_range_);
833 helper.DoPeeling();
834
835 // Statically evaluate loop check after peeling for loop invariant condition.
836 const SuperblockCloner::HInstructionMap* hir_map = helper.GetInstructionMap();
837 for (auto entry : *hir_map) {
838 HInstruction* copy = entry.second;
839 if (copy->IsIf()) {
840 TryToEvaluateIfCondition(copy->AsIf(), graph_);
841 }
842 }
843 }
844
845 return true;
846 }
847
TryFullUnrolling(LoopAnalysisInfo * analysis_info,bool generate_code)848 bool HLoopOptimization::TryFullUnrolling(LoopAnalysisInfo* analysis_info, bool generate_code) {
849 // Fully unroll loops with a known and small trip count.
850 int64_t trip_count = analysis_info->GetTripCount();
851 if (!arch_loop_helper_->IsLoopPeelingEnabled() ||
852 trip_count == LoopAnalysisInfo::kUnknownTripCount ||
853 !arch_loop_helper_->IsFullUnrollingBeneficial(analysis_info)) {
854 return false;
855 }
856
857 if (generate_code) {
858 // Peeling of the N first iterations (where N equals to the trip count) will effectively
859 // eliminate the loop: after peeling we will have N sequential iterations copied into the loop
860 // preheader and the original loop. The trip count of this loop will be 0 as the sequential
861 // iterations are executed first and there are exactly N of them. Thus we can statically
862 // evaluate the loop exit condition to 'false' and fully eliminate it.
863 //
864 // Here is an example of full unrolling of a loop with a trip count 2:
865 //
866 // loop_cond_1
867 // loop_body_1 <- First iteration.
868 // |
869 // \ v
870 // ==\ loop_cond_2
871 // ==/ loop_body_2 <- Second iteration.
872 // / |
873 // <- v <-
874 // loop_cond \ loop_cond \ <- This cond is always false.
875 // loop_body _/ loop_body _/
876 //
877 HLoopInformation* loop_info = analysis_info->GetLoopInfo();
878 PeelByCount(loop_info, trip_count, &induction_range_);
879 HIf* loop_hif = loop_info->GetHeader()->GetLastInstruction()->AsIf();
880 int32_t constant = loop_info->Contains(*loop_hif->IfTrueSuccessor()) ? 0 : 1;
881 loop_hif->ReplaceInput(graph_->GetIntConstant(constant), 0u);
882 }
883
884 return true;
885 }
886
TryPeelingAndUnrolling(LoopNode * node)887 bool HLoopOptimization::TryPeelingAndUnrolling(LoopNode* node) {
888 // Don't run peeling/unrolling if compiler_options_ is nullptr (i.e., running under tests)
889 // as InstructionSet is needed.
890 if (compiler_options_ == nullptr) {
891 return false;
892 }
893
894 HLoopInformation* loop_info = node->loop_info;
895 int64_t trip_count = LoopAnalysis::GetLoopTripCount(loop_info, &induction_range_);
896 LoopAnalysisInfo analysis_info(loop_info);
897 LoopAnalysis::CalculateLoopBasicProperties(loop_info, &analysis_info, trip_count);
898
899 if (analysis_info.HasInstructionsPreventingScalarOpts() ||
900 arch_loop_helper_->IsLoopNonBeneficialForScalarOpts(&analysis_info)) {
901 return false;
902 }
903
904 if (!TryFullUnrolling(&analysis_info, /*generate_code*/ false) &&
905 !TryPeelingForLoopInvariantExitsElimination(&analysis_info, /*generate_code*/ false) &&
906 !TryUnrollingForBranchPenaltyReduction(&analysis_info, /*generate_code*/ false)) {
907 return false;
908 }
909
910 // Run 'IsLoopClonable' the last as it might be time-consuming.
911 if (!PeelUnrollHelper::IsLoopClonable(loop_info)) {
912 return false;
913 }
914
915 return TryFullUnrolling(&analysis_info) ||
916 TryPeelingForLoopInvariantExitsElimination(&analysis_info) ||
917 TryUnrollingForBranchPenaltyReduction(&analysis_info);
918 }
919
920 //
921 // Loop vectorization. The implementation is based on the book by Aart J.C. Bik:
922 // "The Software Vectorization Handbook. Applying Multimedia Extensions for Maximum Performance."
923 // Intel Press, June, 2004 (http://www.aartbik.com/).
924 //
925
ShouldVectorize(LoopNode * node,HBasicBlock * block,int64_t trip_count)926 bool HLoopOptimization::ShouldVectorize(LoopNode* node, HBasicBlock* block, int64_t trip_count) {
927 // Reset vector bookkeeping.
928 vector_length_ = 0;
929 vector_refs_->clear();
930 vector_static_peeling_factor_ = 0;
931 vector_dynamic_peeling_candidate_ = nullptr;
932 vector_runtime_test_a_ =
933 vector_runtime_test_b_ = nullptr;
934
935 // Phis in the loop-body prevent vectorization.
936 if (!block->GetPhis().IsEmpty()) {
937 return false;
938 }
939
940 // Scan the loop-body, starting a right-hand-side tree traversal at each left-hand-side
941 // occurrence, which allows passing down attributes down the use tree.
942 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
943 if (!VectorizeDef(node, it.Current(), /*generate_code*/ false)) {
944 return false; // failure to vectorize a left-hand-side
945 }
946 }
947
948 // Prepare alignment analysis:
949 // (1) find desired alignment (SIMD vector size in bytes).
950 // (2) initialize static loop peeling votes (peeling factor that will
951 // make one particular reference aligned), never to exceed (1).
952 // (3) variable to record how many references share same alignment.
953 // (4) variable to record suitable candidate for dynamic loop peeling.
954 uint32_t desired_alignment = GetVectorSizeInBytes();
955 DCHECK_LE(desired_alignment, 16u);
956 uint32_t peeling_votes[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
957 uint32_t max_num_same_alignment = 0;
958 const ArrayReference* peeling_candidate = nullptr;
959
960 // Data dependence analysis. Find each pair of references with same type, where
961 // at least one is a write. Each such pair denotes a possible data dependence.
962 // This analysis exploits the property that differently typed arrays cannot be
963 // aliased, as well as the property that references either point to the same
964 // array or to two completely disjoint arrays, i.e., no partial aliasing.
965 // Other than a few simply heuristics, no detailed subscript analysis is done.
966 // The scan over references also prepares finding a suitable alignment strategy.
967 for (auto i = vector_refs_->begin(); i != vector_refs_->end(); ++i) {
968 uint32_t num_same_alignment = 0;
969 // Scan over all next references.
970 for (auto j = i; ++j != vector_refs_->end(); ) {
971 if (i->type == j->type && (i->lhs || j->lhs)) {
972 // Found same-typed a[i+x] vs. b[i+y], where at least one is a write.
973 HInstruction* a = i->base;
974 HInstruction* b = j->base;
975 HInstruction* x = i->offset;
976 HInstruction* y = j->offset;
977 if (a == b) {
978 // Found a[i+x] vs. a[i+y]. Accept if x == y (loop-independent data dependence).
979 // Conservatively assume a loop-carried data dependence otherwise, and reject.
980 if (x != y) {
981 return false;
982 }
983 // Count the number of references that have the same alignment (since
984 // base and offset are the same) and where at least one is a write, so
985 // e.g. a[i] = a[i] + b[i] counts a[i] but not b[i]).
986 num_same_alignment++;
987 } else {
988 // Found a[i+x] vs. b[i+y]. Accept if x == y (at worst loop-independent data dependence).
989 // Conservatively assume a potential loop-carried data dependence otherwise, avoided by
990 // generating an explicit a != b disambiguation runtime test on the two references.
991 if (x != y) {
992 // To avoid excessive overhead, we only accept one a != b test.
993 if (vector_runtime_test_a_ == nullptr) {
994 // First test found.
995 vector_runtime_test_a_ = a;
996 vector_runtime_test_b_ = b;
997 } else if ((vector_runtime_test_a_ != a || vector_runtime_test_b_ != b) &&
998 (vector_runtime_test_a_ != b || vector_runtime_test_b_ != a)) {
999 return false; // second test would be needed
1000 }
1001 }
1002 }
1003 }
1004 }
1005 // Update information for finding suitable alignment strategy:
1006 // (1) update votes for static loop peeling,
1007 // (2) update suitable candidate for dynamic loop peeling.
1008 Alignment alignment = ComputeAlignment(i->offset, i->type, i->is_string_char_at);
1009 if (alignment.Base() >= desired_alignment) {
1010 // If the array/string object has a known, sufficient alignment, use the
1011 // initial offset to compute the static loop peeling vote (this always
1012 // works, since elements have natural alignment).
1013 uint32_t offset = alignment.Offset() & (desired_alignment - 1u);
1014 uint32_t vote = (offset == 0)
1015 ? 0
1016 : ((desired_alignment - offset) >> DataType::SizeShift(i->type));
1017 DCHECK_LT(vote, 16u);
1018 ++peeling_votes[vote];
1019 } else if (BaseAlignment() >= desired_alignment &&
1020 num_same_alignment > max_num_same_alignment) {
1021 // Otherwise, if the array/string object has a known, sufficient alignment
1022 // for just the base but with an unknown offset, record the candidate with
1023 // the most occurrences for dynamic loop peeling (again, the peeling always
1024 // works, since elements have natural alignment).
1025 max_num_same_alignment = num_same_alignment;
1026 peeling_candidate = &(*i);
1027 }
1028 } // for i
1029
1030 // Find a suitable alignment strategy.
1031 SetAlignmentStrategy(peeling_votes, peeling_candidate);
1032
1033 // Does vectorization seem profitable?
1034 if (!IsVectorizationProfitable(trip_count)) {
1035 return false;
1036 }
1037
1038 // Success!
1039 return true;
1040 }
1041
Vectorize(LoopNode * node,HBasicBlock * block,HBasicBlock * exit,int64_t trip_count)1042 void HLoopOptimization::Vectorize(LoopNode* node,
1043 HBasicBlock* block,
1044 HBasicBlock* exit,
1045 int64_t trip_count) {
1046 HBasicBlock* header = node->loop_info->GetHeader();
1047 HBasicBlock* preheader = node->loop_info->GetPreHeader();
1048
1049 // Pick a loop unrolling factor for the vector loop.
1050 uint32_t unroll = arch_loop_helper_->GetSIMDUnrollingFactor(
1051 block, trip_count, MaxNumberPeeled(), vector_length_);
1052 uint32_t chunk = vector_length_ * unroll;
1053
1054 DCHECK(trip_count == 0 || (trip_count >= MaxNumberPeeled() + chunk));
1055
1056 // A cleanup loop is needed, at least, for any unknown trip count or
1057 // for a known trip count with remainder iterations after vectorization.
1058 bool needs_cleanup = trip_count == 0 ||
1059 ((trip_count - vector_static_peeling_factor_) % chunk) != 0;
1060
1061 // Adjust vector bookkeeping.
1062 HPhi* main_phi = nullptr;
1063 bool is_simple_loop_header = TrySetSimpleLoopHeader(header, &main_phi); // refills sets
1064 DCHECK(is_simple_loop_header);
1065 vector_header_ = header;
1066 vector_body_ = block;
1067
1068 // Loop induction type.
1069 DataType::Type induc_type = main_phi->GetType();
1070 DCHECK(induc_type == DataType::Type::kInt32 || induc_type == DataType::Type::kInt64)
1071 << induc_type;
1072
1073 // Generate the trip count for static or dynamic loop peeling, if needed:
1074 // ptc = <peeling factor>;
1075 HInstruction* ptc = nullptr;
1076 if (vector_static_peeling_factor_ != 0) {
1077 // Static loop peeling for SIMD alignment (using the most suitable
1078 // fixed peeling factor found during prior alignment analysis).
1079 DCHECK(vector_dynamic_peeling_candidate_ == nullptr);
1080 ptc = graph_->GetConstant(induc_type, vector_static_peeling_factor_);
1081 } else if (vector_dynamic_peeling_candidate_ != nullptr) {
1082 // Dynamic loop peeling for SIMD alignment (using the most suitable
1083 // candidate found during prior alignment analysis):
1084 // rem = offset % ALIGN; // adjusted as #elements
1085 // ptc = rem == 0 ? 0 : (ALIGN - rem);
1086 uint32_t shift = DataType::SizeShift(vector_dynamic_peeling_candidate_->type);
1087 uint32_t align = GetVectorSizeInBytes() >> shift;
1088 uint32_t hidden_offset = HiddenOffset(vector_dynamic_peeling_candidate_->type,
1089 vector_dynamic_peeling_candidate_->is_string_char_at);
1090 HInstruction* adjusted_offset = graph_->GetConstant(induc_type, hidden_offset >> shift);
1091 HInstruction* offset = Insert(preheader, new (global_allocator_) HAdd(
1092 induc_type, vector_dynamic_peeling_candidate_->offset, adjusted_offset));
1093 HInstruction* rem = Insert(preheader, new (global_allocator_) HAnd(
1094 induc_type, offset, graph_->GetConstant(induc_type, align - 1u)));
1095 HInstruction* sub = Insert(preheader, new (global_allocator_) HSub(
1096 induc_type, graph_->GetConstant(induc_type, align), rem));
1097 HInstruction* cond = Insert(preheader, new (global_allocator_) HEqual(
1098 rem, graph_->GetConstant(induc_type, 0)));
1099 ptc = Insert(preheader, new (global_allocator_) HSelect(
1100 cond, graph_->GetConstant(induc_type, 0), sub, kNoDexPc));
1101 needs_cleanup = true; // don't know the exact amount
1102 }
1103
1104 // Generate loop control:
1105 // stc = <trip-count>;
1106 // ptc = min(stc, ptc);
1107 // vtc = stc - (stc - ptc) % chunk;
1108 // i = 0;
1109 HInstruction* stc = induction_range_.GenerateTripCount(node->loop_info, graph_, preheader);
1110 HInstruction* vtc = stc;
1111 if (needs_cleanup) {
1112 DCHECK(IsPowerOfTwo(chunk));
1113 HInstruction* diff = stc;
1114 if (ptc != nullptr) {
1115 if (trip_count == 0) {
1116 HInstruction* cond = Insert(preheader, new (global_allocator_) HAboveOrEqual(stc, ptc));
1117 ptc = Insert(preheader, new (global_allocator_) HSelect(cond, ptc, stc, kNoDexPc));
1118 }
1119 diff = Insert(preheader, new (global_allocator_) HSub(induc_type, stc, ptc));
1120 }
1121 HInstruction* rem = Insert(
1122 preheader, new (global_allocator_) HAnd(induc_type,
1123 diff,
1124 graph_->GetConstant(induc_type, chunk - 1)));
1125 vtc = Insert(preheader, new (global_allocator_) HSub(induc_type, stc, rem));
1126 }
1127 vector_index_ = graph_->GetConstant(induc_type, 0);
1128
1129 // Generate runtime disambiguation test:
1130 // vtc = a != b ? vtc : 0;
1131 if (vector_runtime_test_a_ != nullptr) {
1132 HInstruction* rt = Insert(
1133 preheader,
1134 new (global_allocator_) HNotEqual(vector_runtime_test_a_, vector_runtime_test_b_));
1135 vtc = Insert(preheader,
1136 new (global_allocator_)
1137 HSelect(rt, vtc, graph_->GetConstant(induc_type, 0), kNoDexPc));
1138 needs_cleanup = true;
1139 }
1140
1141 // Generate alignment peeling loop, if needed:
1142 // for ( ; i < ptc; i += 1)
1143 // <loop-body>
1144 //
1145 // NOTE: The alignment forced by the peeling loop is preserved even if data is
1146 // moved around during suspend checks, since all analysis was based on
1147 // nothing more than the Android runtime alignment conventions.
1148 if (ptc != nullptr) {
1149 vector_mode_ = kSequential;
1150 GenerateNewLoop(node,
1151 block,
1152 graph_->TransformLoopForVectorization(vector_header_, vector_body_, exit),
1153 vector_index_,
1154 ptc,
1155 graph_->GetConstant(induc_type, 1),
1156 LoopAnalysisInfo::kNoUnrollingFactor);
1157 }
1158
1159 // Generate vector loop, possibly further unrolled:
1160 // for ( ; i < vtc; i += chunk)
1161 // <vectorized-loop-body>
1162 vector_mode_ = kVector;
1163 GenerateNewLoop(node,
1164 block,
1165 graph_->TransformLoopForVectorization(vector_header_, vector_body_, exit),
1166 vector_index_,
1167 vtc,
1168 graph_->GetConstant(induc_type, vector_length_), // increment per unroll
1169 unroll);
1170 HLoopInformation* vloop = vector_header_->GetLoopInformation();
1171
1172 // Generate cleanup loop, if needed:
1173 // for ( ; i < stc; i += 1)
1174 // <loop-body>
1175 if (needs_cleanup) {
1176 vector_mode_ = kSequential;
1177 GenerateNewLoop(node,
1178 block,
1179 graph_->TransformLoopForVectorization(vector_header_, vector_body_, exit),
1180 vector_index_,
1181 stc,
1182 graph_->GetConstant(induc_type, 1),
1183 LoopAnalysisInfo::kNoUnrollingFactor);
1184 }
1185
1186 // Link reductions to their final uses.
1187 for (auto i = reductions_->begin(); i != reductions_->end(); ++i) {
1188 if (i->first->IsPhi()) {
1189 HInstruction* phi = i->first;
1190 HInstruction* repl = ReduceAndExtractIfNeeded(i->second);
1191 // Deal with regular uses.
1192 for (const HUseListNode<HInstruction*>& use : phi->GetUses()) {
1193 induction_range_.Replace(use.GetUser(), phi, repl); // update induction use
1194 }
1195 phi->ReplaceWith(repl);
1196 }
1197 }
1198
1199 // Remove the original loop by disconnecting the body block
1200 // and removing all instructions from the header.
1201 block->DisconnectAndDelete();
1202 while (!header->GetFirstInstruction()->IsGoto()) {
1203 header->RemoveInstruction(header->GetFirstInstruction());
1204 }
1205
1206 // Update loop hierarchy: the old header now resides in the same outer loop
1207 // as the old preheader. Note that we don't bother putting sequential
1208 // loops back in the hierarchy at this point.
1209 header->SetLoopInformation(preheader->GetLoopInformation()); // outward
1210 node->loop_info = vloop;
1211 }
1212
GenerateNewLoop(LoopNode * node,HBasicBlock * block,HBasicBlock * new_preheader,HInstruction * lo,HInstruction * hi,HInstruction * step,uint32_t unroll)1213 void HLoopOptimization::GenerateNewLoop(LoopNode* node,
1214 HBasicBlock* block,
1215 HBasicBlock* new_preheader,
1216 HInstruction* lo,
1217 HInstruction* hi,
1218 HInstruction* step,
1219 uint32_t unroll) {
1220 DCHECK(unroll == 1 || vector_mode_ == kVector);
1221 DataType::Type induc_type = lo->GetType();
1222 // Prepare new loop.
1223 vector_preheader_ = new_preheader,
1224 vector_header_ = vector_preheader_->GetSingleSuccessor();
1225 vector_body_ = vector_header_->GetSuccessors()[1];
1226 HPhi* phi = new (global_allocator_) HPhi(global_allocator_,
1227 kNoRegNumber,
1228 0,
1229 HPhi::ToPhiType(induc_type));
1230 // Generate header and prepare body.
1231 // for (i = lo; i < hi; i += step)
1232 // <loop-body>
1233 HInstruction* cond = new (global_allocator_) HAboveOrEqual(phi, hi);
1234 vector_header_->AddPhi(phi);
1235 vector_header_->AddInstruction(cond);
1236 vector_header_->AddInstruction(new (global_allocator_) HIf(cond));
1237 vector_index_ = phi;
1238 vector_permanent_map_->clear(); // preserved over unrolling
1239 for (uint32_t u = 0; u < unroll; u++) {
1240 // Generate instruction map.
1241 vector_map_->clear();
1242 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
1243 bool vectorized_def = VectorizeDef(node, it.Current(), /*generate_code*/ true);
1244 DCHECK(vectorized_def);
1245 }
1246 // Generate body from the instruction map, but in original program order.
1247 HEnvironment* env = vector_header_->GetFirstInstruction()->GetEnvironment();
1248 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
1249 auto i = vector_map_->find(it.Current());
1250 if (i != vector_map_->end() && !i->second->IsInBlock()) {
1251 Insert(vector_body_, i->second);
1252 // Deal with instructions that need an environment, such as the scalar intrinsics.
1253 if (i->second->NeedsEnvironment()) {
1254 i->second->CopyEnvironmentFromWithLoopPhiAdjustment(env, vector_header_);
1255 }
1256 }
1257 }
1258 // Generate the induction.
1259 vector_index_ = new (global_allocator_) HAdd(induc_type, vector_index_, step);
1260 Insert(vector_body_, vector_index_);
1261 }
1262 // Finalize phi inputs for the reductions (if any).
1263 for (auto i = reductions_->begin(); i != reductions_->end(); ++i) {
1264 if (!i->first->IsPhi()) {
1265 DCHECK(i->second->IsPhi());
1266 GenerateVecReductionPhiInputs(i->second->AsPhi(), i->first);
1267 }
1268 }
1269 // Finalize phi inputs for the loop index.
1270 phi->AddInput(lo);
1271 phi->AddInput(vector_index_);
1272 vector_index_ = phi;
1273 }
1274
VectorizeDef(LoopNode * node,HInstruction * instruction,bool generate_code)1275 bool HLoopOptimization::VectorizeDef(LoopNode* node,
1276 HInstruction* instruction,
1277 bool generate_code) {
1278 // Accept a left-hand-side array base[index] for
1279 // (1) supported vector type,
1280 // (2) loop-invariant base,
1281 // (3) unit stride index,
1282 // (4) vectorizable right-hand-side value.
1283 uint64_t restrictions = kNone;
1284 // Don't accept expressions that can throw.
1285 if (instruction->CanThrow()) {
1286 return false;
1287 }
1288 if (instruction->IsArraySet()) {
1289 DataType::Type type = instruction->AsArraySet()->GetComponentType();
1290 HInstruction* base = instruction->InputAt(0);
1291 HInstruction* index = instruction->InputAt(1);
1292 HInstruction* value = instruction->InputAt(2);
1293 HInstruction* offset = nullptr;
1294 // For narrow types, explicit type conversion may have been
1295 // optimized way, so set the no hi bits restriction here.
1296 if (DataType::Size(type) <= 2) {
1297 restrictions |= kNoHiBits;
1298 }
1299 if (TrySetVectorType(type, &restrictions) &&
1300 node->loop_info->IsDefinedOutOfTheLoop(base) &&
1301 induction_range_.IsUnitStride(instruction, index, graph_, &offset) &&
1302 VectorizeUse(node, value, generate_code, type, restrictions)) {
1303 if (generate_code) {
1304 GenerateVecSub(index, offset);
1305 GenerateVecMem(instruction, vector_map_->Get(index), vector_map_->Get(value), offset, type);
1306 } else {
1307 vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ true));
1308 }
1309 return true;
1310 }
1311 return false;
1312 }
1313 // Accept a left-hand-side reduction for
1314 // (1) supported vector type,
1315 // (2) vectorizable right-hand-side value.
1316 auto redit = reductions_->find(instruction);
1317 if (redit != reductions_->end()) {
1318 DataType::Type type = instruction->GetType();
1319 // Recognize SAD idiom or direct reduction.
1320 if (VectorizeSADIdiom(node, instruction, generate_code, type, restrictions) ||
1321 VectorizeDotProdIdiom(node, instruction, generate_code, type, restrictions) ||
1322 (TrySetVectorType(type, &restrictions) &&
1323 VectorizeUse(node, instruction, generate_code, type, restrictions))) {
1324 if (generate_code) {
1325 HInstruction* new_red = vector_map_->Get(instruction);
1326 vector_permanent_map_->Put(new_red, vector_map_->Get(redit->second));
1327 vector_permanent_map_->Overwrite(redit->second, new_red);
1328 }
1329 return true;
1330 }
1331 return false;
1332 }
1333 // Branch back okay.
1334 if (instruction->IsGoto()) {
1335 return true;
1336 }
1337 // Otherwise accept only expressions with no effects outside the immediate loop-body.
1338 // Note that actual uses are inspected during right-hand-side tree traversal.
1339 return !IsUsedOutsideLoop(node->loop_info, instruction)
1340 && !instruction->DoesAnyWrite();
1341 }
1342
VectorizeUse(LoopNode * node,HInstruction * instruction,bool generate_code,DataType::Type type,uint64_t restrictions)1343 bool HLoopOptimization::VectorizeUse(LoopNode* node,
1344 HInstruction* instruction,
1345 bool generate_code,
1346 DataType::Type type,
1347 uint64_t restrictions) {
1348 // Accept anything for which code has already been generated.
1349 if (generate_code) {
1350 if (vector_map_->find(instruction) != vector_map_->end()) {
1351 return true;
1352 }
1353 }
1354 // Continue the right-hand-side tree traversal, passing in proper
1355 // types and vector restrictions along the way. During code generation,
1356 // all new nodes are drawn from the global allocator.
1357 if (node->loop_info->IsDefinedOutOfTheLoop(instruction)) {
1358 // Accept invariant use, using scalar expansion.
1359 if (generate_code) {
1360 GenerateVecInv(instruction, type);
1361 }
1362 return true;
1363 } else if (instruction->IsArrayGet()) {
1364 // Deal with vector restrictions.
1365 bool is_string_char_at = instruction->AsArrayGet()->IsStringCharAt();
1366 if (is_string_char_at && HasVectorRestrictions(restrictions, kNoStringCharAt)) {
1367 return false;
1368 }
1369 // Accept a right-hand-side array base[index] for
1370 // (1) matching vector type (exact match or signed/unsigned integral type of the same size),
1371 // (2) loop-invariant base,
1372 // (3) unit stride index,
1373 // (4) vectorizable right-hand-side value.
1374 HInstruction* base = instruction->InputAt(0);
1375 HInstruction* index = instruction->InputAt(1);
1376 HInstruction* offset = nullptr;
1377 if (HVecOperation::ToSignedType(type) == HVecOperation::ToSignedType(instruction->GetType()) &&
1378 node->loop_info->IsDefinedOutOfTheLoop(base) &&
1379 induction_range_.IsUnitStride(instruction, index, graph_, &offset)) {
1380 if (generate_code) {
1381 GenerateVecSub(index, offset);
1382 GenerateVecMem(instruction, vector_map_->Get(index), nullptr, offset, type);
1383 } else {
1384 vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ false, is_string_char_at));
1385 }
1386 return true;
1387 }
1388 } else if (instruction->IsPhi()) {
1389 // Accept particular phi operations.
1390 if (reductions_->find(instruction) != reductions_->end()) {
1391 // Deal with vector restrictions.
1392 if (HasVectorRestrictions(restrictions, kNoReduction)) {
1393 return false;
1394 }
1395 // Accept a reduction.
1396 if (generate_code) {
1397 GenerateVecReductionPhi(instruction->AsPhi());
1398 }
1399 return true;
1400 }
1401 // TODO: accept right-hand-side induction?
1402 return false;
1403 } else if (instruction->IsTypeConversion()) {
1404 // Accept particular type conversions.
1405 HTypeConversion* conversion = instruction->AsTypeConversion();
1406 HInstruction* opa = conversion->InputAt(0);
1407 DataType::Type from = conversion->GetInputType();
1408 DataType::Type to = conversion->GetResultType();
1409 if (DataType::IsIntegralType(from) && DataType::IsIntegralType(to)) {
1410 uint32_t size_vec = DataType::Size(type);
1411 uint32_t size_from = DataType::Size(from);
1412 uint32_t size_to = DataType::Size(to);
1413 // Accept an integral conversion
1414 // (1a) narrowing into vector type, "wider" operations cannot bring in higher order bits, or
1415 // (1b) widening from at least vector type, and
1416 // (2) vectorizable operand.
1417 if ((size_to < size_from &&
1418 size_to == size_vec &&
1419 VectorizeUse(node, opa, generate_code, type, restrictions | kNoHiBits)) ||
1420 (size_to >= size_from &&
1421 size_from >= size_vec &&
1422 VectorizeUse(node, opa, generate_code, type, restrictions))) {
1423 if (generate_code) {
1424 if (vector_mode_ == kVector) {
1425 vector_map_->Put(instruction, vector_map_->Get(opa)); // operand pass-through
1426 } else {
1427 GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
1428 }
1429 }
1430 return true;
1431 }
1432 } else if (to == DataType::Type::kFloat32 && from == DataType::Type::kInt32) {
1433 DCHECK_EQ(to, type);
1434 // Accept int to float conversion for
1435 // (1) supported int,
1436 // (2) vectorizable operand.
1437 if (TrySetVectorType(from, &restrictions) &&
1438 VectorizeUse(node, opa, generate_code, from, restrictions)) {
1439 if (generate_code) {
1440 GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
1441 }
1442 return true;
1443 }
1444 }
1445 return false;
1446 } else if (instruction->IsNeg() || instruction->IsNot() || instruction->IsBooleanNot()) {
1447 // Accept unary operator for vectorizable operand.
1448 HInstruction* opa = instruction->InputAt(0);
1449 if (VectorizeUse(node, opa, generate_code, type, restrictions)) {
1450 if (generate_code) {
1451 GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
1452 }
1453 return true;
1454 }
1455 } else if (instruction->IsAdd() || instruction->IsSub() ||
1456 instruction->IsMul() || instruction->IsDiv() ||
1457 instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
1458 // Deal with vector restrictions.
1459 if ((instruction->IsMul() && HasVectorRestrictions(restrictions, kNoMul)) ||
1460 (instruction->IsDiv() && HasVectorRestrictions(restrictions, kNoDiv))) {
1461 return false;
1462 }
1463 // Accept binary operator for vectorizable operands.
1464 HInstruction* opa = instruction->InputAt(0);
1465 HInstruction* opb = instruction->InputAt(1);
1466 if (VectorizeUse(node, opa, generate_code, type, restrictions) &&
1467 VectorizeUse(node, opb, generate_code, type, restrictions)) {
1468 if (generate_code) {
1469 GenerateVecOp(instruction, vector_map_->Get(opa), vector_map_->Get(opb), type);
1470 }
1471 return true;
1472 }
1473 } else if (instruction->IsShl() || instruction->IsShr() || instruction->IsUShr()) {
1474 // Recognize halving add idiom.
1475 if (VectorizeHalvingAddIdiom(node, instruction, generate_code, type, restrictions)) {
1476 return true;
1477 }
1478 // Deal with vector restrictions.
1479 HInstruction* opa = instruction->InputAt(0);
1480 HInstruction* opb = instruction->InputAt(1);
1481 HInstruction* r = opa;
1482 bool is_unsigned = false;
1483 if ((HasVectorRestrictions(restrictions, kNoShift)) ||
1484 (instruction->IsShr() && HasVectorRestrictions(restrictions, kNoShr))) {
1485 return false; // unsupported instruction
1486 } else if (HasVectorRestrictions(restrictions, kNoHiBits)) {
1487 // Shifts right need extra care to account for higher order bits.
1488 // TODO: less likely shr/unsigned and ushr/signed can by flipping signess.
1489 if (instruction->IsShr() &&
1490 (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || is_unsigned)) {
1491 return false; // reject, unless all operands are sign-extension narrower
1492 } else if (instruction->IsUShr() &&
1493 (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || !is_unsigned)) {
1494 return false; // reject, unless all operands are zero-extension narrower
1495 }
1496 }
1497 // Accept shift operator for vectorizable/invariant operands.
1498 // TODO: accept symbolic, albeit loop invariant shift factors.
1499 DCHECK(r != nullptr);
1500 if (generate_code && vector_mode_ != kVector) { // de-idiom
1501 r = opa;
1502 }
1503 int64_t distance = 0;
1504 if (VectorizeUse(node, r, generate_code, type, restrictions) &&
1505 IsInt64AndGet(opb, /*out*/ &distance)) {
1506 // Restrict shift distance to packed data type width.
1507 int64_t max_distance = DataType::Size(type) * 8;
1508 if (0 <= distance && distance < max_distance) {
1509 if (generate_code) {
1510 GenerateVecOp(instruction, vector_map_->Get(r), opb, type);
1511 }
1512 return true;
1513 }
1514 }
1515 } else if (instruction->IsAbs()) {
1516 // Deal with vector restrictions.
1517 HInstruction* opa = instruction->InputAt(0);
1518 HInstruction* r = opa;
1519 bool is_unsigned = false;
1520 if (HasVectorRestrictions(restrictions, kNoAbs)) {
1521 return false;
1522 } else if (HasVectorRestrictions(restrictions, kNoHiBits) &&
1523 (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || is_unsigned)) {
1524 return false; // reject, unless operand is sign-extension narrower
1525 }
1526 // Accept ABS(x) for vectorizable operand.
1527 DCHECK(r != nullptr);
1528 if (generate_code && vector_mode_ != kVector) { // de-idiom
1529 r = opa;
1530 }
1531 if (VectorizeUse(node, r, generate_code, type, restrictions)) {
1532 if (generate_code) {
1533 GenerateVecOp(instruction,
1534 vector_map_->Get(r),
1535 nullptr,
1536 HVecOperation::ToProperType(type, is_unsigned));
1537 }
1538 return true;
1539 }
1540 }
1541 return false;
1542 }
1543
GetVectorSizeInBytes()1544 uint32_t HLoopOptimization::GetVectorSizeInBytes() {
1545 switch (compiler_options_->GetInstructionSet()) {
1546 case InstructionSet::kArm:
1547 case InstructionSet::kThumb2:
1548 return 8; // 64-bit SIMD
1549 default:
1550 return 16; // 128-bit SIMD
1551 }
1552 }
1553
TrySetVectorType(DataType::Type type,uint64_t * restrictions)1554 bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrictions) {
1555 const InstructionSetFeatures* features = compiler_options_->GetInstructionSetFeatures();
1556 switch (compiler_options_->GetInstructionSet()) {
1557 case InstructionSet::kArm:
1558 case InstructionSet::kThumb2:
1559 // Allow vectorization for all ARM devices, because Android assumes that
1560 // ARM 32-bit always supports advanced SIMD (64-bit SIMD).
1561 switch (type) {
1562 case DataType::Type::kBool:
1563 case DataType::Type::kUint8:
1564 case DataType::Type::kInt8:
1565 *restrictions |= kNoDiv | kNoReduction | kNoDotProd;
1566 return TrySetVectorLength(8);
1567 case DataType::Type::kUint16:
1568 case DataType::Type::kInt16:
1569 *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction | kNoDotProd;
1570 return TrySetVectorLength(4);
1571 case DataType::Type::kInt32:
1572 *restrictions |= kNoDiv | kNoWideSAD;
1573 return TrySetVectorLength(2);
1574 default:
1575 break;
1576 }
1577 return false;
1578 case InstructionSet::kArm64:
1579 // Allow vectorization for all ARM devices, because Android assumes that
1580 // ARMv8 AArch64 always supports advanced SIMD (128-bit SIMD).
1581 switch (type) {
1582 case DataType::Type::kBool:
1583 case DataType::Type::kUint8:
1584 case DataType::Type::kInt8:
1585 *restrictions |= kNoDiv;
1586 return TrySetVectorLength(16);
1587 case DataType::Type::kUint16:
1588 case DataType::Type::kInt16:
1589 *restrictions |= kNoDiv;
1590 return TrySetVectorLength(8);
1591 case DataType::Type::kInt32:
1592 *restrictions |= kNoDiv;
1593 return TrySetVectorLength(4);
1594 case DataType::Type::kInt64:
1595 *restrictions |= kNoDiv | kNoMul;
1596 return TrySetVectorLength(2);
1597 case DataType::Type::kFloat32:
1598 *restrictions |= kNoReduction;
1599 return TrySetVectorLength(4);
1600 case DataType::Type::kFloat64:
1601 *restrictions |= kNoReduction;
1602 return TrySetVectorLength(2);
1603 default:
1604 return false;
1605 }
1606 case InstructionSet::kX86:
1607 case InstructionSet::kX86_64:
1608 // Allow vectorization for SSE4.1-enabled X86 devices only (128-bit SIMD).
1609 if (features->AsX86InstructionSetFeatures()->HasSSE4_1()) {
1610 switch (type) {
1611 case DataType::Type::kBool:
1612 case DataType::Type::kUint8:
1613 case DataType::Type::kInt8:
1614 *restrictions |= kNoMul |
1615 kNoDiv |
1616 kNoShift |
1617 kNoAbs |
1618 kNoSignedHAdd |
1619 kNoUnroundedHAdd |
1620 kNoSAD |
1621 kNoDotProd;
1622 return TrySetVectorLength(16);
1623 case DataType::Type::kUint16:
1624 *restrictions |= kNoDiv |
1625 kNoAbs |
1626 kNoSignedHAdd |
1627 kNoUnroundedHAdd |
1628 kNoSAD |
1629 kNoDotProd;
1630 return TrySetVectorLength(8);
1631 case DataType::Type::kInt16:
1632 *restrictions |= kNoDiv |
1633 kNoAbs |
1634 kNoSignedHAdd |
1635 kNoUnroundedHAdd |
1636 kNoSAD;
1637 return TrySetVectorLength(8);
1638 case DataType::Type::kInt32:
1639 *restrictions |= kNoDiv | kNoSAD;
1640 return TrySetVectorLength(4);
1641 case DataType::Type::kInt64:
1642 *restrictions |= kNoMul | kNoDiv | kNoShr | kNoAbs | kNoSAD;
1643 return TrySetVectorLength(2);
1644 case DataType::Type::kFloat32:
1645 *restrictions |= kNoReduction;
1646 return TrySetVectorLength(4);
1647 case DataType::Type::kFloat64:
1648 *restrictions |= kNoReduction;
1649 return TrySetVectorLength(2);
1650 default:
1651 break;
1652 } // switch type
1653 }
1654 return false;
1655 default:
1656 return false;
1657 } // switch instruction set
1658 }
1659
TrySetVectorLength(uint32_t length)1660 bool HLoopOptimization::TrySetVectorLength(uint32_t length) {
1661 DCHECK(IsPowerOfTwo(length) && length >= 2u);
1662 // First time set?
1663 if (vector_length_ == 0) {
1664 vector_length_ = length;
1665 }
1666 // Different types are acceptable within a loop-body, as long as all the corresponding vector
1667 // lengths match exactly to obtain a uniform traversal through the vector iteration space
1668 // (idiomatic exceptions to this rule can be handled by further unrolling sub-expressions).
1669 return vector_length_ == length;
1670 }
1671
GenerateVecInv(HInstruction * org,DataType::Type type)1672 void HLoopOptimization::GenerateVecInv(HInstruction* org, DataType::Type type) {
1673 if (vector_map_->find(org) == vector_map_->end()) {
1674 // In scalar code, just use a self pass-through for scalar invariants
1675 // (viz. expression remains itself).
1676 if (vector_mode_ == kSequential) {
1677 vector_map_->Put(org, org);
1678 return;
1679 }
1680 // In vector code, explicit scalar expansion is needed.
1681 HInstruction* vector = nullptr;
1682 auto it = vector_permanent_map_->find(org);
1683 if (it != vector_permanent_map_->end()) {
1684 vector = it->second; // reuse during unrolling
1685 } else {
1686 // Generates ReplicateScalar( (optional_type_conv) org ).
1687 HInstruction* input = org;
1688 DataType::Type input_type = input->GetType();
1689 if (type != input_type && (type == DataType::Type::kInt64 ||
1690 input_type == DataType::Type::kInt64)) {
1691 input = Insert(vector_preheader_,
1692 new (global_allocator_) HTypeConversion(type, input, kNoDexPc));
1693 }
1694 vector = new (global_allocator_)
1695 HVecReplicateScalar(global_allocator_, input, type, vector_length_, kNoDexPc);
1696 vector_permanent_map_->Put(org, Insert(vector_preheader_, vector));
1697 }
1698 vector_map_->Put(org, vector);
1699 }
1700 }
1701
GenerateVecSub(HInstruction * org,HInstruction * offset)1702 void HLoopOptimization::GenerateVecSub(HInstruction* org, HInstruction* offset) {
1703 if (vector_map_->find(org) == vector_map_->end()) {
1704 HInstruction* subscript = vector_index_;
1705 int64_t value = 0;
1706 if (!IsInt64AndGet(offset, &value) || value != 0) {
1707 subscript = new (global_allocator_) HAdd(DataType::Type::kInt32, subscript, offset);
1708 if (org->IsPhi()) {
1709 Insert(vector_body_, subscript); // lacks layout placeholder
1710 }
1711 }
1712 vector_map_->Put(org, subscript);
1713 }
1714 }
1715
GenerateVecMem(HInstruction * org,HInstruction * opa,HInstruction * opb,HInstruction * offset,DataType::Type type)1716 void HLoopOptimization::GenerateVecMem(HInstruction* org,
1717 HInstruction* opa,
1718 HInstruction* opb,
1719 HInstruction* offset,
1720 DataType::Type type) {
1721 uint32_t dex_pc = org->GetDexPc();
1722 HInstruction* vector = nullptr;
1723 if (vector_mode_ == kVector) {
1724 // Vector store or load.
1725 bool is_string_char_at = false;
1726 HInstruction* base = org->InputAt(0);
1727 if (opb != nullptr) {
1728 vector = new (global_allocator_) HVecStore(
1729 global_allocator_, base, opa, opb, type, org->GetSideEffects(), vector_length_, dex_pc);
1730 } else {
1731 is_string_char_at = org->AsArrayGet()->IsStringCharAt();
1732 vector = new (global_allocator_) HVecLoad(global_allocator_,
1733 base,
1734 opa,
1735 type,
1736 org->GetSideEffects(),
1737 vector_length_,
1738 is_string_char_at,
1739 dex_pc);
1740 }
1741 // Known (forced/adjusted/original) alignment?
1742 if (vector_dynamic_peeling_candidate_ != nullptr) {
1743 if (vector_dynamic_peeling_candidate_->offset == offset && // TODO: diffs too?
1744 DataType::Size(vector_dynamic_peeling_candidate_->type) == DataType::Size(type) &&
1745 vector_dynamic_peeling_candidate_->is_string_char_at == is_string_char_at) {
1746 vector->AsVecMemoryOperation()->SetAlignment( // forced
1747 Alignment(GetVectorSizeInBytes(), 0));
1748 }
1749 } else {
1750 vector->AsVecMemoryOperation()->SetAlignment( // adjusted/original
1751 ComputeAlignment(offset, type, is_string_char_at, vector_static_peeling_factor_));
1752 }
1753 } else {
1754 // Scalar store or load.
1755 DCHECK(vector_mode_ == kSequential);
1756 if (opb != nullptr) {
1757 DataType::Type component_type = org->AsArraySet()->GetComponentType();
1758 vector = new (global_allocator_) HArraySet(
1759 org->InputAt(0), opa, opb, component_type, org->GetSideEffects(), dex_pc);
1760 } else {
1761 bool is_string_char_at = org->AsArrayGet()->IsStringCharAt();
1762 vector = new (global_allocator_) HArrayGet(
1763 org->InputAt(0), opa, org->GetType(), org->GetSideEffects(), dex_pc, is_string_char_at);
1764 }
1765 }
1766 vector_map_->Put(org, vector);
1767 }
1768
GenerateVecReductionPhi(HPhi * phi)1769 void HLoopOptimization::GenerateVecReductionPhi(HPhi* phi) {
1770 DCHECK(reductions_->find(phi) != reductions_->end());
1771 DCHECK(reductions_->Get(phi->InputAt(1)) == phi);
1772 HInstruction* vector = nullptr;
1773 if (vector_mode_ == kSequential) {
1774 HPhi* new_phi = new (global_allocator_) HPhi(
1775 global_allocator_, kNoRegNumber, 0, phi->GetType());
1776 vector_header_->AddPhi(new_phi);
1777 vector = new_phi;
1778 } else {
1779 // Link vector reduction back to prior unrolled update, or a first phi.
1780 auto it = vector_permanent_map_->find(phi);
1781 if (it != vector_permanent_map_->end()) {
1782 vector = it->second;
1783 } else {
1784 HPhi* new_phi = new (global_allocator_) HPhi(
1785 global_allocator_, kNoRegNumber, 0, HVecOperation::kSIMDType);
1786 vector_header_->AddPhi(new_phi);
1787 vector = new_phi;
1788 }
1789 }
1790 vector_map_->Put(phi, vector);
1791 }
1792
GenerateVecReductionPhiInputs(HPhi * phi,HInstruction * reduction)1793 void HLoopOptimization::GenerateVecReductionPhiInputs(HPhi* phi, HInstruction* reduction) {
1794 HInstruction* new_phi = vector_map_->Get(phi);
1795 HInstruction* new_init = reductions_->Get(phi);
1796 HInstruction* new_red = vector_map_->Get(reduction);
1797 // Link unrolled vector loop back to new phi.
1798 for (; !new_phi->IsPhi(); new_phi = vector_permanent_map_->Get(new_phi)) {
1799 DCHECK(new_phi->IsVecOperation());
1800 }
1801 // Prepare the new initialization.
1802 if (vector_mode_ == kVector) {
1803 // Generate a [initial, 0, .., 0] vector for add or
1804 // a [initial, initial, .., initial] vector for min/max.
1805 HVecOperation* red_vector = new_red->AsVecOperation();
1806 HVecReduce::ReductionKind kind = GetReductionKind(red_vector);
1807 uint32_t vector_length = red_vector->GetVectorLength();
1808 DataType::Type type = red_vector->GetPackedType();
1809 if (kind == HVecReduce::ReductionKind::kSum) {
1810 new_init = Insert(vector_preheader_,
1811 new (global_allocator_) HVecSetScalars(global_allocator_,
1812 &new_init,
1813 type,
1814 vector_length,
1815 1,
1816 kNoDexPc));
1817 } else {
1818 new_init = Insert(vector_preheader_,
1819 new (global_allocator_) HVecReplicateScalar(global_allocator_,
1820 new_init,
1821 type,
1822 vector_length,
1823 kNoDexPc));
1824 }
1825 } else {
1826 new_init = ReduceAndExtractIfNeeded(new_init);
1827 }
1828 // Set the phi inputs.
1829 DCHECK(new_phi->IsPhi());
1830 new_phi->AsPhi()->AddInput(new_init);
1831 new_phi->AsPhi()->AddInput(new_red);
1832 // New feed value for next phi (safe mutation in iteration).
1833 reductions_->find(phi)->second = new_phi;
1834 }
1835
ReduceAndExtractIfNeeded(HInstruction * instruction)1836 HInstruction* HLoopOptimization::ReduceAndExtractIfNeeded(HInstruction* instruction) {
1837 if (instruction->IsPhi()) {
1838 HInstruction* input = instruction->InputAt(1);
1839 if (HVecOperation::ReturnsSIMDValue(input)) {
1840 DCHECK(!input->IsPhi());
1841 HVecOperation* input_vector = input->AsVecOperation();
1842 uint32_t vector_length = input_vector->GetVectorLength();
1843 DataType::Type type = input_vector->GetPackedType();
1844 HVecReduce::ReductionKind kind = GetReductionKind(input_vector);
1845 HBasicBlock* exit = instruction->GetBlock()->GetSuccessors()[0];
1846 // Generate a vector reduction and scalar extract
1847 // x = REDUCE( [x_1, .., x_n] )
1848 // y = x_1
1849 // along the exit of the defining loop.
1850 HInstruction* reduce = new (global_allocator_) HVecReduce(
1851 global_allocator_, instruction, type, vector_length, kind, kNoDexPc);
1852 exit->InsertInstructionBefore(reduce, exit->GetFirstInstruction());
1853 instruction = new (global_allocator_) HVecExtractScalar(
1854 global_allocator_, reduce, type, vector_length, 0, kNoDexPc);
1855 exit->InsertInstructionAfter(instruction, reduce);
1856 }
1857 }
1858 return instruction;
1859 }
1860
1861 #define GENERATE_VEC(x, y) \
1862 if (vector_mode_ == kVector) { \
1863 vector = (x); \
1864 } else { \
1865 DCHECK(vector_mode_ == kSequential); \
1866 vector = (y); \
1867 } \
1868 break;
1869
GenerateVecOp(HInstruction * org,HInstruction * opa,HInstruction * opb,DataType::Type type)1870 void HLoopOptimization::GenerateVecOp(HInstruction* org,
1871 HInstruction* opa,
1872 HInstruction* opb,
1873 DataType::Type type) {
1874 uint32_t dex_pc = org->GetDexPc();
1875 HInstruction* vector = nullptr;
1876 DataType::Type org_type = org->GetType();
1877 switch (org->GetKind()) {
1878 case HInstruction::kNeg:
1879 DCHECK(opb == nullptr);
1880 GENERATE_VEC(
1881 new (global_allocator_) HVecNeg(global_allocator_, opa, type, vector_length_, dex_pc),
1882 new (global_allocator_) HNeg(org_type, opa, dex_pc));
1883 case HInstruction::kNot:
1884 DCHECK(opb == nullptr);
1885 GENERATE_VEC(
1886 new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_, dex_pc),
1887 new (global_allocator_) HNot(org_type, opa, dex_pc));
1888 case HInstruction::kBooleanNot:
1889 DCHECK(opb == nullptr);
1890 GENERATE_VEC(
1891 new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_, dex_pc),
1892 new (global_allocator_) HBooleanNot(opa, dex_pc));
1893 case HInstruction::kTypeConversion:
1894 DCHECK(opb == nullptr);
1895 GENERATE_VEC(
1896 new (global_allocator_) HVecCnv(global_allocator_, opa, type, vector_length_, dex_pc),
1897 new (global_allocator_) HTypeConversion(org_type, opa, dex_pc));
1898 case HInstruction::kAdd:
1899 GENERATE_VEC(
1900 new (global_allocator_) HVecAdd(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1901 new (global_allocator_) HAdd(org_type, opa, opb, dex_pc));
1902 case HInstruction::kSub:
1903 GENERATE_VEC(
1904 new (global_allocator_) HVecSub(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1905 new (global_allocator_) HSub(org_type, opa, opb, dex_pc));
1906 case HInstruction::kMul:
1907 GENERATE_VEC(
1908 new (global_allocator_) HVecMul(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1909 new (global_allocator_) HMul(org_type, opa, opb, dex_pc));
1910 case HInstruction::kDiv:
1911 GENERATE_VEC(
1912 new (global_allocator_) HVecDiv(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1913 new (global_allocator_) HDiv(org_type, opa, opb, dex_pc));
1914 case HInstruction::kAnd:
1915 GENERATE_VEC(
1916 new (global_allocator_) HVecAnd(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1917 new (global_allocator_) HAnd(org_type, opa, opb, dex_pc));
1918 case HInstruction::kOr:
1919 GENERATE_VEC(
1920 new (global_allocator_) HVecOr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1921 new (global_allocator_) HOr(org_type, opa, opb, dex_pc));
1922 case HInstruction::kXor:
1923 GENERATE_VEC(
1924 new (global_allocator_) HVecXor(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1925 new (global_allocator_) HXor(org_type, opa, opb, dex_pc));
1926 case HInstruction::kShl:
1927 GENERATE_VEC(
1928 new (global_allocator_) HVecShl(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1929 new (global_allocator_) HShl(org_type, opa, opb, dex_pc));
1930 case HInstruction::kShr:
1931 GENERATE_VEC(
1932 new (global_allocator_) HVecShr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1933 new (global_allocator_) HShr(org_type, opa, opb, dex_pc));
1934 case HInstruction::kUShr:
1935 GENERATE_VEC(
1936 new (global_allocator_) HVecUShr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1937 new (global_allocator_) HUShr(org_type, opa, opb, dex_pc));
1938 case HInstruction::kAbs:
1939 DCHECK(opb == nullptr);
1940 GENERATE_VEC(
1941 new (global_allocator_) HVecAbs(global_allocator_, opa, type, vector_length_, dex_pc),
1942 new (global_allocator_) HAbs(org_type, opa, dex_pc));
1943 default:
1944 break;
1945 } // switch
1946 CHECK(vector != nullptr) << "Unsupported SIMD operator";
1947 vector_map_->Put(org, vector);
1948 }
1949
1950 #undef GENERATE_VEC
1951
1952 //
1953 // Vectorization idioms.
1954 //
1955
1956 // Method recognizes the following idioms:
1957 // rounding halving add (a + b + 1) >> 1 for unsigned/signed operands a, b
1958 // truncated halving add (a + b) >> 1 for unsigned/signed operands a, b
1959 // Provided that the operands are promoted to a wider form to do the arithmetic and
1960 // then cast back to narrower form, the idioms can be mapped into efficient SIMD
1961 // implementation that operates directly in narrower form (plus one extra bit).
1962 // TODO: current version recognizes implicit byte/short/char widening only;
1963 // explicit widening from int to long could be added later.
VectorizeHalvingAddIdiom(LoopNode * node,HInstruction * instruction,bool generate_code,DataType::Type type,uint64_t restrictions)1964 bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node,
1965 HInstruction* instruction,
1966 bool generate_code,
1967 DataType::Type type,
1968 uint64_t restrictions) {
1969 // Test for top level arithmetic shift right x >> 1 or logical shift right x >>> 1
1970 // (note whether the sign bit in wider precision is shifted in has no effect
1971 // on the narrow precision computed by the idiom).
1972 if ((instruction->IsShr() ||
1973 instruction->IsUShr()) &&
1974 IsInt64Value(instruction->InputAt(1), 1)) {
1975 // Test for (a + b + c) >> 1 for optional constant c.
1976 HInstruction* a = nullptr;
1977 HInstruction* b = nullptr;
1978 int64_t c = 0;
1979 if (IsAddConst2(graph_, instruction->InputAt(0), /*out*/ &a, /*out*/ &b, /*out*/ &c)) {
1980 // Accept c == 1 (rounded) or c == 0 (not rounded).
1981 bool is_rounded = false;
1982 if (c == 1) {
1983 is_rounded = true;
1984 } else if (c != 0) {
1985 return false;
1986 }
1987 // Accept consistent zero or sign extension on operands a and b.
1988 HInstruction* r = nullptr;
1989 HInstruction* s = nullptr;
1990 bool is_unsigned = false;
1991 if (!IsNarrowerOperands(a, b, type, &r, &s, &is_unsigned)) {
1992 return false;
1993 }
1994 // Deal with vector restrictions.
1995 if ((!is_unsigned && HasVectorRestrictions(restrictions, kNoSignedHAdd)) ||
1996 (!is_rounded && HasVectorRestrictions(restrictions, kNoUnroundedHAdd))) {
1997 return false;
1998 }
1999 // Accept recognized halving add for vectorizable operands. Vectorized code uses the
2000 // shorthand idiomatic operation. Sequential code uses the original scalar expressions.
2001 DCHECK(r != nullptr && s != nullptr);
2002 if (generate_code && vector_mode_ != kVector) { // de-idiom
2003 r = instruction->InputAt(0);
2004 s = instruction->InputAt(1);
2005 }
2006 if (VectorizeUse(node, r, generate_code, type, restrictions) &&
2007 VectorizeUse(node, s, generate_code, type, restrictions)) {
2008 if (generate_code) {
2009 if (vector_mode_ == kVector) {
2010 vector_map_->Put(instruction, new (global_allocator_) HVecHalvingAdd(
2011 global_allocator_,
2012 vector_map_->Get(r),
2013 vector_map_->Get(s),
2014 HVecOperation::ToProperType(type, is_unsigned),
2015 vector_length_,
2016 is_rounded,
2017 kNoDexPc));
2018 MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
2019 } else {
2020 GenerateVecOp(instruction, vector_map_->Get(r), vector_map_->Get(s), type);
2021 }
2022 }
2023 return true;
2024 }
2025 }
2026 }
2027 return false;
2028 }
2029
2030 // Method recognizes the following idiom:
2031 // q += ABS(a - b) for signed operands a, b
2032 // Provided that the operands have the same type or are promoted to a wider form.
2033 // Since this may involve a vector length change, the idiom is handled by going directly
2034 // to a sad-accumulate node (rather than relying combining finer grained nodes later).
2035 // TODO: unsigned SAD too?
VectorizeSADIdiom(LoopNode * node,HInstruction * instruction,bool generate_code,DataType::Type reduction_type,uint64_t restrictions)2036 bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node,
2037 HInstruction* instruction,
2038 bool generate_code,
2039 DataType::Type reduction_type,
2040 uint64_t restrictions) {
2041 // Filter integral "q += ABS(a - b);" reduction, where ABS and SUB
2042 // are done in the same precision (either int or long).
2043 if (!instruction->IsAdd() ||
2044 (reduction_type != DataType::Type::kInt32 && reduction_type != DataType::Type::kInt64)) {
2045 return false;
2046 }
2047 HInstruction* q = instruction->InputAt(0);
2048 HInstruction* v = instruction->InputAt(1);
2049 HInstruction* a = nullptr;
2050 HInstruction* b = nullptr;
2051 if (v->IsAbs() &&
2052 v->GetType() == reduction_type &&
2053 IsSubConst2(graph_, v->InputAt(0), /*out*/ &a, /*out*/ &b)) {
2054 DCHECK(a != nullptr && b != nullptr);
2055 } else {
2056 return false;
2057 }
2058 // Accept same-type or consistent sign extension for narrower-type on operands a and b.
2059 // The same-type or narrower operands are called r (a or lower) and s (b or lower).
2060 // We inspect the operands carefully to pick the most suited type.
2061 HInstruction* r = a;
2062 HInstruction* s = b;
2063 bool is_unsigned = false;
2064 DataType::Type sub_type = GetNarrowerType(a, b);
2065 if (reduction_type != sub_type &&
2066 (!IsNarrowerOperands(a, b, sub_type, &r, &s, &is_unsigned) || is_unsigned)) {
2067 return false;
2068 }
2069 // Try same/narrower type and deal with vector restrictions.
2070 if (!TrySetVectorType(sub_type, &restrictions) ||
2071 HasVectorRestrictions(restrictions, kNoSAD) ||
2072 (reduction_type != sub_type && HasVectorRestrictions(restrictions, kNoWideSAD))) {
2073 return false;
2074 }
2075 // Accept SAD idiom for vectorizable operands. Vectorized code uses the shorthand
2076 // idiomatic operation. Sequential code uses the original scalar expressions.
2077 DCHECK(r != nullptr && s != nullptr);
2078 if (generate_code && vector_mode_ != kVector) { // de-idiom
2079 r = s = v->InputAt(0);
2080 }
2081 if (VectorizeUse(node, q, generate_code, sub_type, restrictions) &&
2082 VectorizeUse(node, r, generate_code, sub_type, restrictions) &&
2083 VectorizeUse(node, s, generate_code, sub_type, restrictions)) {
2084 if (generate_code) {
2085 if (vector_mode_ == kVector) {
2086 vector_map_->Put(instruction, new (global_allocator_) HVecSADAccumulate(
2087 global_allocator_,
2088 vector_map_->Get(q),
2089 vector_map_->Get(r),
2090 vector_map_->Get(s),
2091 HVecOperation::ToProperType(reduction_type, is_unsigned),
2092 GetOtherVL(reduction_type, sub_type, vector_length_),
2093 kNoDexPc));
2094 MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
2095 } else {
2096 GenerateVecOp(v, vector_map_->Get(r), nullptr, reduction_type);
2097 GenerateVecOp(instruction, vector_map_->Get(q), vector_map_->Get(v), reduction_type);
2098 }
2099 }
2100 return true;
2101 }
2102 return false;
2103 }
2104
2105 // Method recognises the following dot product idiom:
2106 // q += a * b for operands a, b whose type is narrower than the reduction one.
2107 // Provided that the operands have the same type or are promoted to a wider form.
2108 // Since this may involve a vector length change, the idiom is handled by going directly
2109 // to a dot product node (rather than relying combining finer grained nodes later).
VectorizeDotProdIdiom(LoopNode * node,HInstruction * instruction,bool generate_code,DataType::Type reduction_type,uint64_t restrictions)2110 bool HLoopOptimization::VectorizeDotProdIdiom(LoopNode* node,
2111 HInstruction* instruction,
2112 bool generate_code,
2113 DataType::Type reduction_type,
2114 uint64_t restrictions) {
2115 if (!instruction->IsAdd() || reduction_type != DataType::Type::kInt32) {
2116 return false;
2117 }
2118
2119 HInstruction* q = instruction->InputAt(0);
2120 HInstruction* v = instruction->InputAt(1);
2121 if (!v->IsMul() || v->GetType() != reduction_type) {
2122 return false;
2123 }
2124
2125 HInstruction* a = v->InputAt(0);
2126 HInstruction* b = v->InputAt(1);
2127 HInstruction* r = a;
2128 HInstruction* s = b;
2129 DataType::Type op_type = GetNarrowerType(a, b);
2130 bool is_unsigned = false;
2131
2132 if (!IsNarrowerOperands(a, b, op_type, &r, &s, &is_unsigned)) {
2133 return false;
2134 }
2135 op_type = HVecOperation::ToProperType(op_type, is_unsigned);
2136
2137 if (!TrySetVectorType(op_type, &restrictions) ||
2138 HasVectorRestrictions(restrictions, kNoDotProd)) {
2139 return false;
2140 }
2141
2142 DCHECK(r != nullptr && s != nullptr);
2143 // Accept dot product idiom for vectorizable operands. Vectorized code uses the shorthand
2144 // idiomatic operation. Sequential code uses the original scalar expressions.
2145 if (generate_code && vector_mode_ != kVector) { // de-idiom
2146 r = a;
2147 s = b;
2148 }
2149 if (VectorizeUse(node, q, generate_code, op_type, restrictions) &&
2150 VectorizeUse(node, r, generate_code, op_type, restrictions) &&
2151 VectorizeUse(node, s, generate_code, op_type, restrictions)) {
2152 if (generate_code) {
2153 if (vector_mode_ == kVector) {
2154 vector_map_->Put(instruction, new (global_allocator_) HVecDotProd(
2155 global_allocator_,
2156 vector_map_->Get(q),
2157 vector_map_->Get(r),
2158 vector_map_->Get(s),
2159 reduction_type,
2160 is_unsigned,
2161 GetOtherVL(reduction_type, op_type, vector_length_),
2162 kNoDexPc));
2163 MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
2164 } else {
2165 GenerateVecOp(v, vector_map_->Get(r), vector_map_->Get(s), reduction_type);
2166 GenerateVecOp(instruction, vector_map_->Get(q), vector_map_->Get(v), reduction_type);
2167 }
2168 }
2169 return true;
2170 }
2171 return false;
2172 }
2173
2174 //
2175 // Vectorization heuristics.
2176 //
2177
ComputeAlignment(HInstruction * offset,DataType::Type type,bool is_string_char_at,uint32_t peeling)2178 Alignment HLoopOptimization::ComputeAlignment(HInstruction* offset,
2179 DataType::Type type,
2180 bool is_string_char_at,
2181 uint32_t peeling) {
2182 // Combine the alignment and hidden offset that is guaranteed by
2183 // the Android runtime with a known starting index adjusted as bytes.
2184 int64_t value = 0;
2185 if (IsInt64AndGet(offset, /*out*/ &value)) {
2186 uint32_t start_offset =
2187 HiddenOffset(type, is_string_char_at) + (value + peeling) * DataType::Size(type);
2188 return Alignment(BaseAlignment(), start_offset & (BaseAlignment() - 1u));
2189 }
2190 // Otherwise, the Android runtime guarantees at least natural alignment.
2191 return Alignment(DataType::Size(type), 0);
2192 }
2193
SetAlignmentStrategy(uint32_t peeling_votes[],const ArrayReference * peeling_candidate)2194 void HLoopOptimization::SetAlignmentStrategy(uint32_t peeling_votes[],
2195 const ArrayReference* peeling_candidate) {
2196 // Current heuristic: pick the best static loop peeling factor, if any,
2197 // or otherwise use dynamic loop peeling on suggested peeling candidate.
2198 uint32_t max_vote = 0;
2199 for (int32_t i = 0; i < 16; i++) {
2200 if (peeling_votes[i] > max_vote) {
2201 max_vote = peeling_votes[i];
2202 vector_static_peeling_factor_ = i;
2203 }
2204 }
2205 if (max_vote == 0) {
2206 vector_dynamic_peeling_candidate_ = peeling_candidate;
2207 }
2208 }
2209
MaxNumberPeeled()2210 uint32_t HLoopOptimization::MaxNumberPeeled() {
2211 if (vector_dynamic_peeling_candidate_ != nullptr) {
2212 return vector_length_ - 1u; // worst-case
2213 }
2214 return vector_static_peeling_factor_; // known exactly
2215 }
2216
IsVectorizationProfitable(int64_t trip_count)2217 bool HLoopOptimization::IsVectorizationProfitable(int64_t trip_count) {
2218 // Current heuristic: non-empty body with sufficient number of iterations (if known).
2219 // TODO: refine by looking at e.g. operation count, alignment, etc.
2220 // TODO: trip count is really unsigned entity, provided the guarding test
2221 // is satisfied; deal with this more carefully later
2222 uint32_t max_peel = MaxNumberPeeled();
2223 if (vector_length_ == 0) {
2224 return false; // nothing found
2225 } else if (trip_count < 0) {
2226 return false; // guard against non-taken/large
2227 } else if ((0 < trip_count) && (trip_count < (vector_length_ + max_peel))) {
2228 return false; // insufficient iterations
2229 }
2230 return true;
2231 }
2232
2233 //
2234 // Helpers.
2235 //
2236
TrySetPhiInduction(HPhi * phi,bool restrict_uses)2237 bool HLoopOptimization::TrySetPhiInduction(HPhi* phi, bool restrict_uses) {
2238 // Start with empty phi induction.
2239 iset_->clear();
2240
2241 // Special case Phis that have equivalent in a debuggable setup. Our graph checker isn't
2242 // smart enough to follow strongly connected components (and it's probably not worth
2243 // it to make it so). See b/33775412.
2244 if (graph_->IsDebuggable() && phi->HasEquivalentPhi()) {
2245 return false;
2246 }
2247
2248 // Lookup phi induction cycle.
2249 ArenaSet<HInstruction*>* set = induction_range_.LookupCycle(phi);
2250 if (set != nullptr) {
2251 for (HInstruction* i : *set) {
2252 // Check that, other than instructions that are no longer in the graph (removed earlier)
2253 // each instruction is removable and, when restrict uses are requested, other than for phi,
2254 // all uses are contained within the cycle.
2255 if (!i->IsInBlock()) {
2256 continue;
2257 } else if (!i->IsRemovable()) {
2258 return false;
2259 } else if (i != phi && restrict_uses) {
2260 // Deal with regular uses.
2261 for (const HUseListNode<HInstruction*>& use : i->GetUses()) {
2262 if (set->find(use.GetUser()) == set->end()) {
2263 return false;
2264 }
2265 }
2266 }
2267 iset_->insert(i); // copy
2268 }
2269 return true;
2270 }
2271 return false;
2272 }
2273
TrySetPhiReduction(HPhi * phi)2274 bool HLoopOptimization::TrySetPhiReduction(HPhi* phi) {
2275 DCHECK(iset_->empty());
2276 // Only unclassified phi cycles are candidates for reductions.
2277 if (induction_range_.IsClassified(phi)) {
2278 return false;
2279 }
2280 // Accept operations like x = x + .., provided that the phi and the reduction are
2281 // used exactly once inside the loop, and by each other.
2282 HInputsRef inputs = phi->GetInputs();
2283 if (inputs.size() == 2) {
2284 HInstruction* reduction = inputs[1];
2285 if (HasReductionFormat(reduction, phi)) {
2286 HLoopInformation* loop_info = phi->GetBlock()->GetLoopInformation();
2287 uint32_t use_count = 0;
2288 bool single_use_inside_loop =
2289 // Reduction update only used by phi.
2290 reduction->GetUses().HasExactlyOneElement() &&
2291 !reduction->HasEnvironmentUses() &&
2292 // Reduction update is only use of phi inside the loop.
2293 IsOnlyUsedAfterLoop(loop_info, phi, /*collect_loop_uses*/ true, &use_count) &&
2294 iset_->size() == 1;
2295 iset_->clear(); // leave the way you found it
2296 if (single_use_inside_loop) {
2297 // Link reduction back, and start recording feed value.
2298 reductions_->Put(reduction, phi);
2299 reductions_->Put(phi, phi->InputAt(0));
2300 return true;
2301 }
2302 }
2303 }
2304 return false;
2305 }
2306
TrySetSimpleLoopHeader(HBasicBlock * block,HPhi ** main_phi)2307 bool HLoopOptimization::TrySetSimpleLoopHeader(HBasicBlock* block, /*out*/ HPhi** main_phi) {
2308 // Start with empty phi induction and reductions.
2309 iset_->clear();
2310 reductions_->clear();
2311
2312 // Scan the phis to find the following (the induction structure has already
2313 // been optimized, so we don't need to worry about trivial cases):
2314 // (1) optional reductions in loop,
2315 // (2) the main induction, used in loop control.
2316 HPhi* phi = nullptr;
2317 for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
2318 if (TrySetPhiReduction(it.Current()->AsPhi())) {
2319 continue;
2320 } else if (phi == nullptr) {
2321 // Found the first candidate for main induction.
2322 phi = it.Current()->AsPhi();
2323 } else {
2324 return false;
2325 }
2326 }
2327
2328 // Then test for a typical loopheader:
2329 // s: SuspendCheck
2330 // c: Condition(phi, bound)
2331 // i: If(c)
2332 if (phi != nullptr && TrySetPhiInduction(phi, /*restrict_uses*/ false)) {
2333 HInstruction* s = block->GetFirstInstruction();
2334 if (s != nullptr && s->IsSuspendCheck()) {
2335 HInstruction* c = s->GetNext();
2336 if (c != nullptr &&
2337 c->IsCondition() &&
2338 c->GetUses().HasExactlyOneElement() && // only used for termination
2339 !c->HasEnvironmentUses()) { // unlikely, but not impossible
2340 HInstruction* i = c->GetNext();
2341 if (i != nullptr && i->IsIf() && i->InputAt(0) == c) {
2342 iset_->insert(c);
2343 iset_->insert(s);
2344 *main_phi = phi;
2345 return true;
2346 }
2347 }
2348 }
2349 }
2350 return false;
2351 }
2352
IsEmptyBody(HBasicBlock * block)2353 bool HLoopOptimization::IsEmptyBody(HBasicBlock* block) {
2354 if (!block->GetPhis().IsEmpty()) {
2355 return false;
2356 }
2357 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
2358 HInstruction* instruction = it.Current();
2359 if (!instruction->IsGoto() && iset_->find(instruction) == iset_->end()) {
2360 return false;
2361 }
2362 }
2363 return true;
2364 }
2365
IsUsedOutsideLoop(HLoopInformation * loop_info,HInstruction * instruction)2366 bool HLoopOptimization::IsUsedOutsideLoop(HLoopInformation* loop_info,
2367 HInstruction* instruction) {
2368 // Deal with regular uses.
2369 for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
2370 if (use.GetUser()->GetBlock()->GetLoopInformation() != loop_info) {
2371 return true;
2372 }
2373 }
2374 return false;
2375 }
2376
IsOnlyUsedAfterLoop(HLoopInformation * loop_info,HInstruction * instruction,bool collect_loop_uses,uint32_t * use_count)2377 bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
2378 HInstruction* instruction,
2379 bool collect_loop_uses,
2380 /*out*/ uint32_t* use_count) {
2381 // Deal with regular uses.
2382 for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
2383 HInstruction* user = use.GetUser();
2384 if (iset_->find(user) == iset_->end()) { // not excluded?
2385 HLoopInformation* other_loop_info = user->GetBlock()->GetLoopInformation();
2386 if (other_loop_info != nullptr && other_loop_info->IsIn(*loop_info)) {
2387 // If collect_loop_uses is set, simply keep adding those uses to the set.
2388 // Otherwise, reject uses inside the loop that were not already in the set.
2389 if (collect_loop_uses) {
2390 iset_->insert(user);
2391 continue;
2392 }
2393 return false;
2394 }
2395 ++*use_count;
2396 }
2397 }
2398 return true;
2399 }
2400
TryReplaceWithLastValue(HLoopInformation * loop_info,HInstruction * instruction,HBasicBlock * block)2401 bool HLoopOptimization::TryReplaceWithLastValue(HLoopInformation* loop_info,
2402 HInstruction* instruction,
2403 HBasicBlock* block) {
2404 // Try to replace outside uses with the last value.
2405 if (induction_range_.CanGenerateLastValue(instruction)) {
2406 HInstruction* replacement = induction_range_.GenerateLastValue(instruction, graph_, block);
2407 // Deal with regular uses.
2408 const HUseList<HInstruction*>& uses = instruction->GetUses();
2409 for (auto it = uses.begin(), end = uses.end(); it != end;) {
2410 HInstruction* user = it->GetUser();
2411 size_t index = it->GetIndex();
2412 ++it; // increment before replacing
2413 if (iset_->find(user) == iset_->end()) { // not excluded?
2414 if (kIsDebugBuild) {
2415 // We have checked earlier in 'IsOnlyUsedAfterLoop' that the use is after the loop.
2416 HLoopInformation* other_loop_info = user->GetBlock()->GetLoopInformation();
2417 CHECK(other_loop_info == nullptr || !other_loop_info->IsIn(*loop_info));
2418 }
2419 user->ReplaceInput(replacement, index);
2420 induction_range_.Replace(user, instruction, replacement); // update induction
2421 }
2422 }
2423 // Deal with environment uses.
2424 const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
2425 for (auto it = env_uses.begin(), end = env_uses.end(); it != end;) {
2426 HEnvironment* user = it->GetUser();
2427 size_t index = it->GetIndex();
2428 ++it; // increment before replacing
2429 if (iset_->find(user->GetHolder()) == iset_->end()) { // not excluded?
2430 // Only update environment uses after the loop.
2431 HLoopInformation* other_loop_info = user->GetHolder()->GetBlock()->GetLoopInformation();
2432 if (other_loop_info == nullptr || !other_loop_info->IsIn(*loop_info)) {
2433 user->RemoveAsUserOfInput(index);
2434 user->SetRawEnvAt(index, replacement);
2435 replacement->AddEnvUseAt(user, index);
2436 }
2437 }
2438 }
2439 return true;
2440 }
2441 return false;
2442 }
2443
TryAssignLastValue(HLoopInformation * loop_info,HInstruction * instruction,HBasicBlock * block,bool collect_loop_uses)2444 bool HLoopOptimization::TryAssignLastValue(HLoopInformation* loop_info,
2445 HInstruction* instruction,
2446 HBasicBlock* block,
2447 bool collect_loop_uses) {
2448 // Assigning the last value is always successful if there are no uses.
2449 // Otherwise, it succeeds in a no early-exit loop by generating the
2450 // proper last value assignment.
2451 uint32_t use_count = 0;
2452 return IsOnlyUsedAfterLoop(loop_info, instruction, collect_loop_uses, &use_count) &&
2453 (use_count == 0 ||
2454 (!IsEarlyExit(loop_info) && TryReplaceWithLastValue(loop_info, instruction, block)));
2455 }
2456
RemoveDeadInstructions(const HInstructionList & list)2457 void HLoopOptimization::RemoveDeadInstructions(const HInstructionList& list) {
2458 for (HBackwardInstructionIterator i(list); !i.Done(); i.Advance()) {
2459 HInstruction* instruction = i.Current();
2460 if (instruction->IsDeadAndRemovable()) {
2461 simplified_ = true;
2462 instruction->GetBlock()->RemoveInstructionOrPhi(instruction);
2463 }
2464 }
2465 }
2466
CanRemoveCycle()2467 bool HLoopOptimization::CanRemoveCycle() {
2468 for (HInstruction* i : *iset_) {
2469 // We can never remove instructions that have environment
2470 // uses when we compile 'debuggable'.
2471 if (i->HasEnvironmentUses() && graph_->IsDebuggable()) {
2472 return false;
2473 }
2474 // A deoptimization should never have an environment input removed.
2475 for (const HUseListNode<HEnvironment*>& use : i->GetEnvUses()) {
2476 if (use.GetUser()->GetHolder()->IsDeoptimize()) {
2477 return false;
2478 }
2479 }
2480 }
2481 return true;
2482 }
2483
2484 } // namespace art
2485