• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/move-optimizer.h"
6 
7 namespace v8 {
8 namespace internal {
9 namespace compiler {
10 
11 namespace {
12 
13 typedef std::pair<InstructionOperand, InstructionOperand> MoveKey;
14 
15 struct MoveKeyCompare {
operator ()v8::internal::compiler::__anonf867b4d60111::MoveKeyCompare16   bool operator()(const MoveKey& a, const MoveKey& b) const {
17     if (a.first.EqualsCanonicalized(b.first)) {
18       return a.second.CompareCanonicalized(b.second);
19     }
20     return a.first.CompareCanonicalized(b.first);
21   }
22 };
23 
24 struct OperandCompare {
operator ()v8::internal::compiler::__anonf867b4d60111::OperandCompare25   bool operator()(const InstructionOperand& a,
26                   const InstructionOperand& b) const {
27     return a.CompareCanonicalized(b);
28   }
29 };
30 
31 typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
32 typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
33 
34 
GapsCanMoveOver(Instruction * instr,Zone * zone)35 bool GapsCanMoveOver(Instruction* instr, Zone* zone) {
36   if (instr->IsNop()) return true;
37   if (instr->ClobbersTemps() || instr->ClobbersRegisters() ||
38       instr->ClobbersDoubleRegisters()) {
39     return false;
40   }
41   if (instr->arch_opcode() != ArchOpcode::kArchNop) return false;
42 
43   ZoneSet<InstructionOperand, OperandCompare> operands(zone);
44   for (size_t i = 0; i < instr->InputCount(); ++i) {
45     operands.insert(*instr->InputAt(i));
46   }
47   for (size_t i = 0; i < instr->OutputCount(); ++i) {
48     operands.insert(*instr->OutputAt(i));
49   }
50   for (size_t i = 0; i < instr->TempCount(); ++i) {
51     operands.insert(*instr->TempAt(i));
52   }
53   for (int i = Instruction::GapPosition::FIRST_GAP_POSITION;
54        i <= Instruction::GapPosition::LAST_GAP_POSITION; ++i) {
55     ParallelMove* moves = instr->parallel_moves()[i];
56     if (moves == nullptr) continue;
57     for (MoveOperands* move : *moves) {
58       if (operands.count(move->source()) > 0 ||
59           operands.count(move->destination()) > 0) {
60         return false;
61       }
62     }
63   }
64   return true;
65 }
66 
67 
FindFirstNonEmptySlot(const Instruction * instr)68 int FindFirstNonEmptySlot(const Instruction* instr) {
69   int i = Instruction::FIRST_GAP_POSITION;
70   for (; i <= Instruction::LAST_GAP_POSITION; i++) {
71     ParallelMove* moves = instr->parallel_moves()[i];
72     if (moves == nullptr) continue;
73     for (MoveOperands* move : *moves) {
74       if (!move->IsRedundant()) return i;
75       move->Eliminate();
76     }
77     moves->clear();  // Clear this redundant move.
78   }
79   return i;
80 }
81 
82 }  // namespace
83 
84 
MoveOptimizer(Zone * local_zone,InstructionSequence * code)85 MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
86     : local_zone_(local_zone),
87       code_(code),
88       to_finalize_(local_zone),
89       local_vector_(local_zone) {}
90 
91 
Run()92 void MoveOptimizer::Run() {
93   for (InstructionBlock* block : code()->instruction_blocks()) {
94     CompressBlock(block);
95   }
96   for (InstructionBlock* block : code()->instruction_blocks()) {
97     if (block->PredecessorCount() <= 1) continue;
98     if (!block->IsDeferred()) {
99       bool has_only_deferred = true;
100       for (RpoNumber& pred_id : block->predecessors()) {
101         if (!code()->InstructionBlockAt(pred_id)->IsDeferred()) {
102           has_only_deferred = false;
103           break;
104         }
105       }
106       // This would pull down common moves. If the moves occur in deferred
107       // blocks, and the closest common successor is not deferred, we lose the
108       // optimization of just spilling/filling in deferred blocks, when the
109       // current block is not deferred.
110       if (has_only_deferred) continue;
111     }
112     OptimizeMerge(block);
113   }
114   for (Instruction* gap : to_finalize_) {
115     FinalizeMoves(gap);
116   }
117 }
118 
119 
CompressMoves(ParallelMove * left,ParallelMove * right)120 void MoveOptimizer::CompressMoves(ParallelMove* left, ParallelMove* right) {
121   if (right == nullptr) return;
122 
123   MoveOpVector& eliminated = local_vector();
124   DCHECK(eliminated.empty());
125 
126   if (!left->empty()) {
127     // Modify the right moves in place and collect moves that will be killed by
128     // merging the two gaps.
129     for (MoveOperands* move : *right) {
130       if (move->IsRedundant()) continue;
131       MoveOperands* to_eliminate = left->PrepareInsertAfter(move);
132       if (to_eliminate != nullptr) eliminated.push_back(to_eliminate);
133     }
134     // Eliminate dead moves.
135     for (MoveOperands* to_eliminate : eliminated) {
136       to_eliminate->Eliminate();
137     }
138     eliminated.clear();
139   }
140   // Add all possibly modified moves from right side.
141   for (MoveOperands* move : *right) {
142     if (move->IsRedundant()) continue;
143     left->push_back(move);
144   }
145   // Nuke right.
146   right->clear();
147   DCHECK(eliminated.empty());
148 }
149 
150 
151 // Smash all consecutive moves into the left most move slot and accumulate them
152 // as much as possible across instructions.
CompressBlock(InstructionBlock * block)153 void MoveOptimizer::CompressBlock(InstructionBlock* block) {
154   Instruction* prev_instr = nullptr;
155   for (int index = block->code_start(); index < block->code_end(); ++index) {
156     Instruction* instr = code()->instructions()[index];
157     int i = FindFirstNonEmptySlot(instr);
158     bool has_moves = i <= Instruction::LAST_GAP_POSITION;
159 
160     if (i == Instruction::LAST_GAP_POSITION) {
161       std::swap(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
162                 instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
163     } else if (i == Instruction::FIRST_GAP_POSITION) {
164       CompressMoves(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
165                     instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
166     }
167     // We either have no moves, or, after swapping or compressing, we have
168     // all the moves in the first gap position, and none in the second/end gap
169     // position.
170     ParallelMove* first =
171         instr->parallel_moves()[Instruction::FIRST_GAP_POSITION];
172     ParallelMove* last =
173         instr->parallel_moves()[Instruction::LAST_GAP_POSITION];
174     USE(last);
175 
176     DCHECK(!has_moves ||
177            (first != nullptr && (last == nullptr || last->empty())));
178 
179     if (prev_instr != nullptr) {
180       if (has_moves) {
181         // Smash first into prev_instr, killing left.
182         ParallelMove* pred_moves = prev_instr->parallel_moves()[0];
183         CompressMoves(pred_moves, first);
184       }
185       // Slide prev_instr down so we always know where to look for it.
186       std::swap(prev_instr->parallel_moves()[0], instr->parallel_moves()[0]);
187     }
188 
189     prev_instr = instr->parallel_moves()[0] == nullptr ? nullptr : instr;
190     if (GapsCanMoveOver(instr, local_zone())) continue;
191     if (prev_instr != nullptr) {
192       to_finalize_.push_back(prev_instr);
193       prev_instr = nullptr;
194     }
195   }
196   if (prev_instr != nullptr) {
197     to_finalize_.push_back(prev_instr);
198   }
199 }
200 
201 
LastInstruction(const InstructionBlock * block) const202 const Instruction* MoveOptimizer::LastInstruction(
203     const InstructionBlock* block) const {
204   return code()->instructions()[block->last_instruction_index()];
205 }
206 
207 
OptimizeMerge(InstructionBlock * block)208 void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
209   DCHECK(block->PredecessorCount() > 1);
210   // Ensure that the last instruction in all incoming blocks don't contain
211   // things that would prevent moving gap moves across them.
212   for (RpoNumber& pred_index : block->predecessors()) {
213     const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
214     const Instruction* last_instr =
215         code()->instructions()[pred->last_instruction_index()];
216     if (last_instr->IsCall()) return;
217     if (last_instr->TempCount() != 0) return;
218     if (last_instr->OutputCount() != 0) return;
219     for (size_t i = 0; i < last_instr->InputCount(); ++i) {
220       const InstructionOperand* op = last_instr->InputAt(i);
221       if (!op->IsConstant() && !op->IsImmediate()) return;
222     }
223   }
224   // TODO(dcarney): pass a ZonePool down for this?
225   MoveMap move_map(local_zone());
226   size_t correct_counts = 0;
227   // Accumulate set of shared moves.
228   for (RpoNumber& pred_index : block->predecessors()) {
229     const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
230     const Instruction* instr = LastInstruction(pred);
231     if (instr->parallel_moves()[0] == nullptr ||
232         instr->parallel_moves()[0]->empty()) {
233       return;
234     }
235     for (const MoveOperands* move : *instr->parallel_moves()[0]) {
236       if (move->IsRedundant()) continue;
237       InstructionOperand src = move->source();
238       InstructionOperand dst = move->destination();
239       MoveKey key = {src, dst};
240       auto res = move_map.insert(std::make_pair(key, 1));
241       if (!res.second) {
242         res.first->second++;
243         if (res.first->second == block->PredecessorCount()) {
244           correct_counts++;
245         }
246       }
247     }
248   }
249   if (move_map.empty() || correct_counts != move_map.size()) return;
250   // Find insertion point.
251   Instruction* instr = nullptr;
252   for (int i = block->first_instruction_index();
253        i <= block->last_instruction_index(); ++i) {
254     instr = code()->instructions()[i];
255     if (!GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant())
256       break;
257   }
258   DCHECK_NOT_NULL(instr);
259   bool gap_initialized = true;
260   if (instr->parallel_moves()[0] == nullptr ||
261       instr->parallel_moves()[0]->empty()) {
262     to_finalize_.push_back(instr);
263   } else {
264     // Will compress after insertion.
265     gap_initialized = false;
266     std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]);
267   }
268   ParallelMove* moves = instr->GetOrCreateParallelMove(
269       static_cast<Instruction::GapPosition>(0), code_zone());
270   // Delete relevant entries in predecessors and move everything to block.
271   bool first_iteration = true;
272   for (RpoNumber& pred_index : block->predecessors()) {
273     const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
274     for (MoveOperands* move : *LastInstruction(pred)->parallel_moves()[0]) {
275       if (move->IsRedundant()) continue;
276       MoveKey key = {move->source(), move->destination()};
277       auto it = move_map.find(key);
278       USE(it);
279       DCHECK(it != move_map.end());
280       if (first_iteration) {
281         moves->AddMove(move->source(), move->destination());
282       }
283       move->Eliminate();
284     }
285     first_iteration = false;
286   }
287   // Compress.
288   if (!gap_initialized) {
289     CompressMoves(instr->parallel_moves()[0], instr->parallel_moves()[1]);
290   }
291 }
292 
293 
294 namespace {
295 
IsSlot(const InstructionOperand & op)296 bool IsSlot(const InstructionOperand& op) {
297   return op.IsStackSlot() || op.IsDoubleStackSlot();
298 }
299 
300 
LoadCompare(const MoveOperands * a,const MoveOperands * b)301 bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
302   if (!a->source().EqualsCanonicalized(b->source())) {
303     return a->source().CompareCanonicalized(b->source());
304   }
305   if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false;
306   if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true;
307   return a->destination().CompareCanonicalized(b->destination());
308 }
309 
310 }  // namespace
311 
312 
313 // Split multiple loads of the same constant or stack slot off into the second
314 // slot and keep remaining moves in the first slot.
FinalizeMoves(Instruction * instr)315 void MoveOptimizer::FinalizeMoves(Instruction* instr) {
316   MoveOpVector& loads = local_vector();
317   DCHECK(loads.empty());
318 
319   // Find all the loads.
320   for (MoveOperands* move : *instr->parallel_moves()[0]) {
321     if (move->IsRedundant()) continue;
322     if (move->source().IsConstant() || IsSlot(move->source())) {
323       loads.push_back(move);
324     }
325   }
326   if (loads.empty()) return;
327   // Group the loads by source, moving the preferred destination to the
328   // beginning of the group.
329   std::sort(loads.begin(), loads.end(), LoadCompare);
330   MoveOperands* group_begin = nullptr;
331   for (MoveOperands* load : loads) {
332     // New group.
333     if (group_begin == nullptr ||
334         !load->source().EqualsCanonicalized(group_begin->source())) {
335       group_begin = load;
336       continue;
337     }
338     // Nothing to be gained from splitting here.
339     if (IsSlot(group_begin->destination())) continue;
340     // Insert new move into slot 1.
341     ParallelMove* slot_1 = instr->GetOrCreateParallelMove(
342         static_cast<Instruction::GapPosition>(1), code_zone());
343     slot_1->AddMove(group_begin->destination(), load->destination());
344     load->Eliminate();
345   }
346   loads.clear();
347 }
348 
349 }  // namespace compiler
350 }  // namespace internal
351 }  // namespace v8
352