• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_IA32
6 
7 #include "src/crankshaft/ia32/lithium-codegen-ia32.h"
8 #include "src/crankshaft/ia32/lithium-gap-resolver-ia32.h"
9 #include "src/register-configuration.h"
10 
11 namespace v8 {
12 namespace internal {
13 
LGapResolver(LCodeGen * owner)14 LGapResolver::LGapResolver(LCodeGen* owner)
15     : cgen_(owner),
16       moves_(32, owner->zone()),
17       source_uses_(),
18       destination_uses_(),
19       spilled_register_(-1) {}
20 
21 
Resolve(LParallelMove * parallel_move)22 void LGapResolver::Resolve(LParallelMove* parallel_move) {
23   DCHECK(HasBeenReset());
24   // Build up a worklist of moves.
25   BuildInitialMoveList(parallel_move);
26 
27   for (int i = 0; i < moves_.length(); ++i) {
28     LMoveOperands move = moves_[i];
29     // Skip constants to perform them last.  They don't block other moves
30     // and skipping such moves with register destinations keeps those
31     // registers free for the whole algorithm.
32     if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
33       PerformMove(i);
34     }
35   }
36 
37   // Perform the moves with constant sources.
38   for (int i = 0; i < moves_.length(); ++i) {
39     if (!moves_[i].IsEliminated()) {
40       DCHECK(moves_[i].source()->IsConstantOperand());
41       EmitMove(i);
42     }
43   }
44 
45   Finish();
46   DCHECK(HasBeenReset());
47 }
48 
49 
BuildInitialMoveList(LParallelMove * parallel_move)50 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
51   // Perform a linear sweep of the moves to add them to the initial list of
52   // moves to perform, ignoring any move that is redundant (the source is
53   // the same as the destination, the destination is ignored and
54   // unallocated, or the move was already eliminated).
55   const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
56   for (int i = 0; i < moves->length(); ++i) {
57     LMoveOperands move = moves->at(i);
58     if (!move.IsRedundant()) AddMove(move);
59   }
60   Verify();
61 }
62 
63 
PerformMove(int index)64 void LGapResolver::PerformMove(int index) {
65   // Each call to this function performs a move and deletes it from the move
66   // graph.  We first recursively perform any move blocking this one.  We
67   // mark a move as "pending" on entry to PerformMove in order to detect
68   // cycles in the move graph.  We use operand swaps to resolve cycles,
69   // which means that a call to PerformMove could change any source operand
70   // in the move graph.
71 
72   DCHECK(!moves_[index].IsPending());
73   DCHECK(!moves_[index].IsRedundant());
74 
75   // Clear this move's destination to indicate a pending move.  The actual
76   // destination is saved on the side.
77   DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
78   LOperand* destination = moves_[index].destination();
79   moves_[index].set_destination(NULL);
80 
81   // Perform a depth-first traversal of the move graph to resolve
82   // dependencies.  Any unperformed, unpending move with a source the same
83   // as this one's destination blocks this one so recursively perform all
84   // such moves.
85   for (int i = 0; i < moves_.length(); ++i) {
86     LMoveOperands other_move = moves_[i];
87     if (other_move.Blocks(destination) && !other_move.IsPending()) {
88       // Though PerformMove can change any source operand in the move graph,
89       // this call cannot create a blocking move via a swap (this loop does
90       // not miss any).  Assume there is a non-blocking move with source A
91       // and this move is blocked on source B and there is a swap of A and
92       // B.  Then A and B must be involved in the same cycle (or they would
93       // not be swapped).  Since this move's destination is B and there is
94       // only a single incoming edge to an operand, this move must also be
95       // involved in the same cycle.  In that case, the blocking move will
96       // be created but will be "pending" when we return from PerformMove.
97       PerformMove(i);
98     }
99   }
100 
101   // We are about to resolve this move and don't need it marked as
102   // pending, so restore its destination.
103   moves_[index].set_destination(destination);
104 
105   // This move's source may have changed due to swaps to resolve cycles and
106   // so it may now be the last move in the cycle.  If so remove it.
107   if (moves_[index].source()->Equals(destination)) {
108     RemoveMove(index);
109     return;
110   }
111 
112   // The move may be blocked on a (at most one) pending move, in which case
113   // we have a cycle.  Search for such a blocking move and perform a swap to
114   // resolve it.
115   for (int i = 0; i < moves_.length(); ++i) {
116     LMoveOperands other_move = moves_[i];
117     if (other_move.Blocks(destination)) {
118       DCHECK(other_move.IsPending());
119       EmitSwap(index);
120       return;
121     }
122   }
123 
124   // This move is not blocked.
125   EmitMove(index);
126 }
127 
128 
AddMove(LMoveOperands move)129 void LGapResolver::AddMove(LMoveOperands move) {
130   LOperand* source = move.source();
131   if (source->IsRegister()) ++source_uses_[source->index()];
132 
133   LOperand* destination = move.destination();
134   if (destination->IsRegister()) ++destination_uses_[destination->index()];
135 
136   moves_.Add(move, cgen_->zone());
137 }
138 
139 
RemoveMove(int index)140 void LGapResolver::RemoveMove(int index) {
141   LOperand* source = moves_[index].source();
142   if (source->IsRegister()) {
143     --source_uses_[source->index()];
144     DCHECK(source_uses_[source->index()] >= 0);
145   }
146 
147   LOperand* destination = moves_[index].destination();
148   if (destination->IsRegister()) {
149     --destination_uses_[destination->index()];
150     DCHECK(destination_uses_[destination->index()] >= 0);
151   }
152 
153   moves_[index].Eliminate();
154 }
155 
156 
CountSourceUses(LOperand * operand)157 int LGapResolver::CountSourceUses(LOperand* operand) {
158   int count = 0;
159   for (int i = 0; i < moves_.length(); ++i) {
160     if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
161       ++count;
162     }
163   }
164   return count;
165 }
166 
167 
GetFreeRegisterNot(Register reg)168 Register LGapResolver::GetFreeRegisterNot(Register reg) {
169   int skip_index = reg.is(no_reg) ? -1 : reg.code();
170   const RegisterConfiguration* config =
171       RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
172   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
173     int code = config->GetAllocatableGeneralCode(i);
174     if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
175         code != skip_index) {
176       return Register::from_code(code);
177     }
178   }
179   return no_reg;
180 }
181 
182 
HasBeenReset()183 bool LGapResolver::HasBeenReset() {
184   if (!moves_.is_empty()) return false;
185   if (spilled_register_ >= 0) return false;
186   const RegisterConfiguration* config =
187       RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
188   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
189     int code = config->GetAllocatableGeneralCode(i);
190     if (source_uses_[code] != 0) return false;
191     if (destination_uses_[code] != 0) return false;
192   }
193   return true;
194 }
195 
196 
Verify()197 void LGapResolver::Verify() {
198 #ifdef ENABLE_SLOW_DCHECKS
199   // No operand should be the destination for more than one move.
200   for (int i = 0; i < moves_.length(); ++i) {
201     LOperand* destination = moves_[i].destination();
202     for (int j = i + 1; j < moves_.length(); ++j) {
203       SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
204     }
205   }
206 #endif
207 }
208 
209 
210 #define __ ACCESS_MASM(cgen_->masm())
211 
Finish()212 void LGapResolver::Finish() {
213   if (spilled_register_ >= 0) {
214     __ pop(Register::from_code(spilled_register_));
215     spilled_register_ = -1;
216   }
217   moves_.Rewind(0);
218 }
219 
220 
EnsureRestored(LOperand * operand)221 void LGapResolver::EnsureRestored(LOperand* operand) {
222   if (operand->IsRegister() && operand->index() == spilled_register_) {
223     __ pop(Register::from_code(spilled_register_));
224     spilled_register_ = -1;
225   }
226 }
227 
228 
EnsureTempRegister()229 Register LGapResolver::EnsureTempRegister() {
230   // 1. We may have already spilled to create a temp register.
231   if (spilled_register_ >= 0) {
232     return Register::from_code(spilled_register_);
233   }
234 
235   // 2. We may have a free register that we can use without spilling.
236   Register free = GetFreeRegisterNot(no_reg);
237   if (!free.is(no_reg)) return free;
238 
239   // 3. Prefer to spill a register that is not used in any remaining move
240   // because it will not need to be restored until the end.
241   const RegisterConfiguration* config =
242       RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
243   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
244     int code = config->GetAllocatableGeneralCode(i);
245     if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
246       Register scratch = Register::from_code(code);
247       __ push(scratch);
248       spilled_register_ = code;
249       return scratch;
250     }
251   }
252 
253   // 4. Use an arbitrary register.  Register 0 is as arbitrary as any other.
254   spilled_register_ = config->GetAllocatableGeneralCode(0);
255   Register scratch = Register::from_code(spilled_register_);
256   __ push(scratch);
257   return scratch;
258 }
259 
260 
EmitMove(int index)261 void LGapResolver::EmitMove(int index) {
262   LOperand* source = moves_[index].source();
263   LOperand* destination = moves_[index].destination();
264   EnsureRestored(source);
265   EnsureRestored(destination);
266 
267   // Dispatch on the source and destination operand kinds.  Not all
268   // combinations are possible.
269   if (source->IsRegister()) {
270     DCHECK(destination->IsRegister() || destination->IsStackSlot());
271     Register src = cgen_->ToRegister(source);
272     Operand dst = cgen_->ToOperand(destination);
273     __ mov(dst, src);
274 
275   } else if (source->IsStackSlot()) {
276     DCHECK(destination->IsRegister() || destination->IsStackSlot());
277     Operand src = cgen_->ToOperand(source);
278     if (destination->IsRegister()) {
279       Register dst = cgen_->ToRegister(destination);
280       __ mov(dst, src);
281     } else {
282       // Spill on demand to use a temporary register for memory-to-memory
283       // moves.
284       Register tmp = EnsureTempRegister();
285       Operand dst = cgen_->ToOperand(destination);
286       __ mov(tmp, src);
287       __ mov(dst, tmp);
288     }
289 
290   } else if (source->IsConstantOperand()) {
291     LConstantOperand* constant_source = LConstantOperand::cast(source);
292     if (destination->IsRegister()) {
293       Register dst = cgen_->ToRegister(destination);
294       Representation r = cgen_->IsSmi(constant_source)
295           ? Representation::Smi() : Representation::Integer32();
296       if (cgen_->IsInteger32(constant_source)) {
297         __ Move(dst, cgen_->ToImmediate(constant_source, r));
298       } else {
299         __ LoadObject(dst, cgen_->ToHandle(constant_source));
300       }
301     } else if (destination->IsDoubleRegister()) {
302       double v = cgen_->ToDouble(constant_source);
303       uint64_t int_val = bit_cast<uint64_t, double>(v);
304       int32_t lower = static_cast<int32_t>(int_val);
305       int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
306       XMMRegister dst = cgen_->ToDoubleRegister(destination);
307       if (int_val == 0) {
308         __ xorps(dst, dst);
309       } else {
310         __ push(Immediate(upper));
311         __ push(Immediate(lower));
312         __ movsd(dst, Operand(esp, 0));
313         __ add(esp, Immediate(kDoubleSize));
314       }
315     } else {
316       DCHECK(destination->IsStackSlot());
317       Operand dst = cgen_->ToOperand(destination);
318       Representation r = cgen_->IsSmi(constant_source)
319           ? Representation::Smi() : Representation::Integer32();
320       if (cgen_->IsInteger32(constant_source)) {
321         __ Move(dst, cgen_->ToImmediate(constant_source, r));
322       } else {
323         Register tmp = EnsureTempRegister();
324         __ LoadObject(tmp, cgen_->ToHandle(constant_source));
325         __ mov(dst, tmp);
326       }
327     }
328 
329   } else if (source->IsDoubleRegister()) {
330     XMMRegister src = cgen_->ToDoubleRegister(source);
331     if (destination->IsDoubleRegister()) {
332       XMMRegister dst = cgen_->ToDoubleRegister(destination);
333       __ movaps(dst, src);
334     } else {
335       DCHECK(destination->IsDoubleStackSlot());
336       Operand dst = cgen_->ToOperand(destination);
337       __ movsd(dst, src);
338     }
339   } else if (source->IsDoubleStackSlot()) {
340     DCHECK(destination->IsDoubleRegister() ||
341            destination->IsDoubleStackSlot());
342     Operand src = cgen_->ToOperand(source);
343     if (destination->IsDoubleRegister()) {
344       XMMRegister dst = cgen_->ToDoubleRegister(destination);
345       __ movsd(dst, src);
346     } else {
347       // We rely on having xmm0 available as a fixed scratch register.
348       Operand dst = cgen_->ToOperand(destination);
349       __ movsd(xmm0, src);
350       __ movsd(dst, xmm0);
351     }
352   } else {
353     UNREACHABLE();
354   }
355 
356   RemoveMove(index);
357 }
358 
359 
EmitSwap(int index)360 void LGapResolver::EmitSwap(int index) {
361   LOperand* source = moves_[index].source();
362   LOperand* destination = moves_[index].destination();
363   EnsureRestored(source);
364   EnsureRestored(destination);
365 
366   // Dispatch on the source and destination operand kinds.  Not all
367   // combinations are possible.
368   if (source->IsRegister() && destination->IsRegister()) {
369     // Register-register.
370     Register src = cgen_->ToRegister(source);
371     Register dst = cgen_->ToRegister(destination);
372     __ push(src);
373     __ mov(src, dst);
374     __ pop(dst);
375 
376   } else if ((source->IsRegister() && destination->IsStackSlot()) ||
377              (source->IsStackSlot() && destination->IsRegister())) {
378     // Register-memory.  Use a free register as a temp if possible.  Do not
379     // spill on demand because the simple spill implementation cannot avoid
380     // spilling src at this point.
381     Register tmp = GetFreeRegisterNot(no_reg);
382     Register reg =
383         cgen_->ToRegister(source->IsRegister() ? source : destination);
384     Operand mem =
385         cgen_->ToOperand(source->IsRegister() ? destination : source);
386     if (tmp.is(no_reg)) {
387       __ xor_(reg, mem);
388       __ xor_(mem, reg);
389       __ xor_(reg, mem);
390     } else {
391       __ mov(tmp, mem);
392       __ mov(mem, reg);
393       __ mov(reg, tmp);
394     }
395 
396   } else if (source->IsStackSlot() && destination->IsStackSlot()) {
397     // Memory-memory.  Spill on demand to use a temporary.  If there is a
398     // free register after that, use it as a second temporary.
399     Register tmp0 = EnsureTempRegister();
400     Register tmp1 = GetFreeRegisterNot(tmp0);
401     Operand src = cgen_->ToOperand(source);
402     Operand dst = cgen_->ToOperand(destination);
403     if (tmp1.is(no_reg)) {
404       // Only one temp register available to us.
405       __ mov(tmp0, dst);
406       __ xor_(tmp0, src);
407       __ xor_(src, tmp0);
408       __ xor_(tmp0, src);
409       __ mov(dst, tmp0);
410     } else {
411       __ mov(tmp0, dst);
412       __ mov(tmp1, src);
413       __ mov(dst, tmp1);
414       __ mov(src, tmp0);
415     }
416   } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
417     // XMM register-register swap. We rely on having xmm0
418     // available as a fixed scratch register.
419     XMMRegister src = cgen_->ToDoubleRegister(source);
420     XMMRegister dst = cgen_->ToDoubleRegister(destination);
421     __ movaps(xmm0, src);
422     __ movaps(src, dst);
423     __ movaps(dst, xmm0);
424   } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
425     // XMM register-memory swap.  We rely on having xmm0
426     // available as a fixed scratch register.
427     DCHECK(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
428     XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
429                                               ? source
430                                               : destination);
431     Operand other =
432         cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
433     __ movsd(xmm0, other);
434     __ movsd(other, reg);
435     __ movaps(reg, xmm0);
436   } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
437     // Double-width memory-to-memory.  Spill on demand to use a general
438     // purpose temporary register and also rely on having xmm0 available as
439     // a fixed scratch register.
440     Register tmp = EnsureTempRegister();
441     Operand src0 = cgen_->ToOperand(source);
442     Operand src1 = cgen_->HighOperand(source);
443     Operand dst0 = cgen_->ToOperand(destination);
444     Operand dst1 = cgen_->HighOperand(destination);
445     __ movsd(xmm0, dst0);  // Save destination in xmm0.
446     __ mov(tmp, src0);  // Then use tmp to copy source to destination.
447     __ mov(dst0, tmp);
448     __ mov(tmp, src1);
449     __ mov(dst1, tmp);
450     __ movsd(src0, xmm0);
451 
452   } else {
453     // No other combinations are possible.
454     UNREACHABLE();
455   }
456 
457   // The swap of source and destination has executed a move from source to
458   // destination.
459   RemoveMove(index);
460 
461   // Any unperformed (including pending) move with a source of either
462   // this move's source or destination needs to have their source
463   // changed to reflect the state of affairs after the swap.
464   for (int i = 0; i < moves_.length(); ++i) {
465     LMoveOperands other_move = moves_[i];
466     if (other_move.Blocks(source)) {
467       moves_[i].set_source(destination);
468     } else if (other_move.Blocks(destination)) {
469       moves_[i].set_source(source);
470     }
471   }
472 
473   // In addition to swapping the actual uses as sources, we need to update
474   // the use counts.
475   if (source->IsRegister() && destination->IsRegister()) {
476     int temp = source_uses_[source->index()];
477     source_uses_[source->index()] = source_uses_[destination->index()];
478     source_uses_[destination->index()] = temp;
479   } else if (source->IsRegister()) {
480     // We don't have use counts for non-register operands like destination.
481     // Compute those counts now.
482     source_uses_[source->index()] = CountSourceUses(source);
483   } else if (destination->IsRegister()) {
484     source_uses_[destination->index()] = CountSourceUses(destination);
485   }
486 }
487 
488 #undef __
489 
490 }  // namespace internal
491 }  // namespace v8
492 
493 #endif  // V8_TARGET_ARCH_IA32
494