• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_X87
6 
7 #include "src/crankshaft/x87/lithium-gap-resolver-x87.h"
8 #include "src/register-configuration.h"
9 
10 #include "src/crankshaft/x87/lithium-codegen-x87.h"
11 
12 namespace v8 {
13 namespace internal {
14 
LGapResolver(LCodeGen * owner)15 LGapResolver::LGapResolver(LCodeGen* owner)
16     : cgen_(owner),
17       moves_(32, owner->zone()),
18       source_uses_(),
19       destination_uses_(),
20       spilled_register_(-1) {}
21 
22 
Resolve(LParallelMove * parallel_move)23 void LGapResolver::Resolve(LParallelMove* parallel_move) {
24   DCHECK(HasBeenReset());
25   // Build up a worklist of moves.
26   BuildInitialMoveList(parallel_move);
27 
28   for (int i = 0; i < moves_.length(); ++i) {
29     LMoveOperands move = moves_[i];
30     // Skip constants to perform them last.  They don't block other moves
31     // and skipping such moves with register destinations keeps those
32     // registers free for the whole algorithm.
33     if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
34       PerformMove(i);
35     }
36   }
37 
38   // Perform the moves with constant sources.
39   for (int i = 0; i < moves_.length(); ++i) {
40     if (!moves_[i].IsEliminated()) {
41       DCHECK(moves_[i].source()->IsConstantOperand());
42       EmitMove(i);
43     }
44   }
45 
46   Finish();
47   DCHECK(HasBeenReset());
48 }
49 
50 
BuildInitialMoveList(LParallelMove * parallel_move)51 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
52   // Perform a linear sweep of the moves to add them to the initial list of
53   // moves to perform, ignoring any move that is redundant (the source is
54   // the same as the destination, the destination is ignored and
55   // unallocated, or the move was already eliminated).
56   const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
57   for (int i = 0; i < moves->length(); ++i) {
58     LMoveOperands move = moves->at(i);
59     if (!move.IsRedundant()) AddMove(move);
60   }
61   Verify();
62 }
63 
64 
PerformMove(int index)65 void LGapResolver::PerformMove(int index) {
66   // Each call to this function performs a move and deletes it from the move
67   // graph.  We first recursively perform any move blocking this one.  We
68   // mark a move as "pending" on entry to PerformMove in order to detect
69   // cycles in the move graph.  We use operand swaps to resolve cycles,
70   // which means that a call to PerformMove could change any source operand
71   // in the move graph.
72 
73   DCHECK(!moves_[index].IsPending());
74   DCHECK(!moves_[index].IsRedundant());
75 
76   // Clear this move's destination to indicate a pending move.  The actual
77   // destination is saved on the side.
78   DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
79   LOperand* destination = moves_[index].destination();
80   moves_[index].set_destination(NULL);
81 
82   // Perform a depth-first traversal of the move graph to resolve
83   // dependencies.  Any unperformed, unpending move with a source the same
84   // as this one's destination blocks this one so recursively perform all
85   // such moves.
86   for (int i = 0; i < moves_.length(); ++i) {
87     LMoveOperands other_move = moves_[i];
88     if (other_move.Blocks(destination) && !other_move.IsPending()) {
89       // Though PerformMove can change any source operand in the move graph,
90       // this call cannot create a blocking move via a swap (this loop does
91       // not miss any).  Assume there is a non-blocking move with source A
92       // and this move is blocked on source B and there is a swap of A and
93       // B.  Then A and B must be involved in the same cycle (or they would
94       // not be swapped).  Since this move's destination is B and there is
95       // only a single incoming edge to an operand, this move must also be
96       // involved in the same cycle.  In that case, the blocking move will
97       // be created but will be "pending" when we return from PerformMove.
98       PerformMove(i);
99     }
100   }
101 
102   // We are about to resolve this move and don't need it marked as
103   // pending, so restore its destination.
104   moves_[index].set_destination(destination);
105 
106   // This move's source may have changed due to swaps to resolve cycles and
107   // so it may now be the last move in the cycle.  If so remove it.
108   if (moves_[index].source()->Equals(destination)) {
109     RemoveMove(index);
110     return;
111   }
112 
113   // The move may be blocked on a (at most one) pending move, in which case
114   // we have a cycle.  Search for such a blocking move and perform a swap to
115   // resolve it.
116   for (int i = 0; i < moves_.length(); ++i) {
117     LMoveOperands other_move = moves_[i];
118     if (other_move.Blocks(destination)) {
119       DCHECK(other_move.IsPending());
120       EmitSwap(index);
121       return;
122     }
123   }
124 
125   // This move is not blocked.
126   EmitMove(index);
127 }
128 
129 
AddMove(LMoveOperands move)130 void LGapResolver::AddMove(LMoveOperands move) {
131   LOperand* source = move.source();
132   if (source->IsRegister()) ++source_uses_[source->index()];
133 
134   LOperand* destination = move.destination();
135   if (destination->IsRegister()) ++destination_uses_[destination->index()];
136 
137   moves_.Add(move, cgen_->zone());
138 }
139 
140 
RemoveMove(int index)141 void LGapResolver::RemoveMove(int index) {
142   LOperand* source = moves_[index].source();
143   if (source->IsRegister()) {
144     --source_uses_[source->index()];
145     DCHECK(source_uses_[source->index()] >= 0);
146   }
147 
148   LOperand* destination = moves_[index].destination();
149   if (destination->IsRegister()) {
150     --destination_uses_[destination->index()];
151     DCHECK(destination_uses_[destination->index()] >= 0);
152   }
153 
154   moves_[index].Eliminate();
155 }
156 
157 
CountSourceUses(LOperand * operand)158 int LGapResolver::CountSourceUses(LOperand* operand) {
159   int count = 0;
160   for (int i = 0; i < moves_.length(); ++i) {
161     if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
162       ++count;
163     }
164   }
165   return count;
166 }
167 
168 
GetFreeRegisterNot(Register reg)169 Register LGapResolver::GetFreeRegisterNot(Register reg) {
170   int skip_index = reg.is(no_reg) ? -1 : reg.code();
171   const RegisterConfiguration* config =
172       RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
173   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
174     int code = config->GetAllocatableGeneralCode(i);
175     if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
176         code != skip_index) {
177       return Register::from_code(code);
178     }
179   }
180   return no_reg;
181 }
182 
183 
HasBeenReset()184 bool LGapResolver::HasBeenReset() {
185   if (!moves_.is_empty()) return false;
186   if (spilled_register_ >= 0) return false;
187   const RegisterConfiguration* config =
188       RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
189   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
190     int code = config->GetAllocatableGeneralCode(i);
191     if (source_uses_[code] != 0) return false;
192     if (destination_uses_[code] != 0) return false;
193   }
194   return true;
195 }
196 
197 
Verify()198 void LGapResolver::Verify() {
199 #ifdef ENABLE_SLOW_DCHECKS
200   // No operand should be the destination for more than one move.
201   for (int i = 0; i < moves_.length(); ++i) {
202     LOperand* destination = moves_[i].destination();
203     for (int j = i + 1; j < moves_.length(); ++j) {
204       SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
205     }
206   }
207 #endif
208 }
209 
210 
211 #define __ ACCESS_MASM(cgen_->masm())
212 
Finish()213 void LGapResolver::Finish() {
214   if (spilled_register_ >= 0) {
215     __ pop(Register::from_code(spilled_register_));
216     spilled_register_ = -1;
217   }
218   moves_.Rewind(0);
219 }
220 
221 
EnsureRestored(LOperand * operand)222 void LGapResolver::EnsureRestored(LOperand* operand) {
223   if (operand->IsRegister() && operand->index() == spilled_register_) {
224     __ pop(Register::from_code(spilled_register_));
225     spilled_register_ = -1;
226   }
227 }
228 
229 
EnsureTempRegister()230 Register LGapResolver::EnsureTempRegister() {
231   // 1. We may have already spilled to create a temp register.
232   if (spilled_register_ >= 0) {
233     return Register::from_code(spilled_register_);
234   }
235 
236   // 2. We may have a free register that we can use without spilling.
237   Register free = GetFreeRegisterNot(no_reg);
238   if (!free.is(no_reg)) return free;
239 
240   // 3. Prefer to spill a register that is not used in any remaining move
241   // because it will not need to be restored until the end.
242   const RegisterConfiguration* config =
243       RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
244   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
245     int code = config->GetAllocatableGeneralCode(i);
246     if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
247       Register scratch = Register::from_code(code);
248       __ push(scratch);
249       spilled_register_ = code;
250       return scratch;
251     }
252   }
253 
254   // 4. Use an arbitrary register.  Register 0 is as arbitrary as any other.
255   spilled_register_ = config->GetAllocatableGeneralCode(0);
256   Register scratch = Register::from_code(spilled_register_);
257   __ push(scratch);
258   return scratch;
259 }
260 
261 
EmitMove(int index)262 void LGapResolver::EmitMove(int index) {
263   LOperand* source = moves_[index].source();
264   LOperand* destination = moves_[index].destination();
265   EnsureRestored(source);
266   EnsureRestored(destination);
267 
268   // Dispatch on the source and destination operand kinds.  Not all
269   // combinations are possible.
270   if (source->IsRegister()) {
271     DCHECK(destination->IsRegister() || destination->IsStackSlot());
272     Register src = cgen_->ToRegister(source);
273     Operand dst = cgen_->ToOperand(destination);
274     __ mov(dst, src);
275 
276   } else if (source->IsStackSlot()) {
277     DCHECK(destination->IsRegister() || destination->IsStackSlot());
278     Operand src = cgen_->ToOperand(source);
279     if (destination->IsRegister()) {
280       Register dst = cgen_->ToRegister(destination);
281       __ mov(dst, src);
282     } else {
283       // Spill on demand to use a temporary register for memory-to-memory
284       // moves.
285       Register tmp = EnsureTempRegister();
286       Operand dst = cgen_->ToOperand(destination);
287       __ mov(tmp, src);
288       __ mov(dst, tmp);
289     }
290 
291   } else if (source->IsConstantOperand()) {
292     LConstantOperand* constant_source = LConstantOperand::cast(source);
293     if (destination->IsRegister()) {
294       Register dst = cgen_->ToRegister(destination);
295       Representation r = cgen_->IsSmi(constant_source)
296           ? Representation::Smi() : Representation::Integer32();
297       if (cgen_->IsInteger32(constant_source)) {
298         __ Move(dst, cgen_->ToImmediate(constant_source, r));
299       } else {
300         __ LoadObject(dst, cgen_->ToHandle(constant_source));
301       }
302     } else if (destination->IsDoubleRegister()) {
303       double v = cgen_->ToDouble(constant_source);
304       uint64_t int_val = bit_cast<uint64_t, double>(v);
305       int32_t lower = static_cast<int32_t>(int_val);
306       int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
307       __ push(Immediate(upper));
308       __ push(Immediate(lower));
309       X87Register dst = cgen_->ToX87Register(destination);
310       cgen_->X87Mov(dst, MemOperand(esp, 0));
311       __ add(esp, Immediate(kDoubleSize));
312     } else {
313       DCHECK(destination->IsStackSlot());
314       Operand dst = cgen_->ToOperand(destination);
315       Representation r = cgen_->IsSmi(constant_source)
316           ? Representation::Smi() : Representation::Integer32();
317       if (cgen_->IsInteger32(constant_source)) {
318         __ Move(dst, cgen_->ToImmediate(constant_source, r));
319       } else {
320         Register tmp = EnsureTempRegister();
321         __ LoadObject(tmp, cgen_->ToHandle(constant_source));
322         __ mov(dst, tmp);
323       }
324     }
325 
326   } else if (source->IsDoubleRegister()) {
327     // load from the register onto the stack, store in destination, which must
328     // be a double stack slot in the non-SSE2 case.
329     if (destination->IsDoubleStackSlot()) {
330       Operand dst = cgen_->ToOperand(destination);
331       X87Register src = cgen_->ToX87Register(source);
332       cgen_->X87Mov(dst, src);
333     } else {
334       X87Register dst = cgen_->ToX87Register(destination);
335       X87Register src = cgen_->ToX87Register(source);
336       cgen_->X87Mov(dst, src);
337     }
338   } else if (source->IsDoubleStackSlot()) {
339     // load from the stack slot on top of the floating point stack, and then
340     // store in destination. If destination is a double register, then it
341     // represents the top of the stack and nothing needs to be done.
342     if (destination->IsDoubleStackSlot()) {
343       Register tmp = EnsureTempRegister();
344       Operand src0 = cgen_->ToOperand(source);
345       Operand src1 = cgen_->HighOperand(source);
346       Operand dst0 = cgen_->ToOperand(destination);
347       Operand dst1 = cgen_->HighOperand(destination);
348       __ mov(tmp, src0);  // Then use tmp to copy source to destination.
349       __ mov(dst0, tmp);
350       __ mov(tmp, src1);
351       __ mov(dst1, tmp);
352     } else {
353       Operand src = cgen_->ToOperand(source);
354       X87Register dst = cgen_->ToX87Register(destination);
355       cgen_->X87Mov(dst, src);
356     }
357   } else {
358     UNREACHABLE();
359   }
360 
361   RemoveMove(index);
362 }
363 
364 
EmitSwap(int index)365 void LGapResolver::EmitSwap(int index) {
366   LOperand* source = moves_[index].source();
367   LOperand* destination = moves_[index].destination();
368   EnsureRestored(source);
369   EnsureRestored(destination);
370 
371   // Dispatch on the source and destination operand kinds.  Not all
372   // combinations are possible.
373   if (source->IsRegister() && destination->IsRegister()) {
374     // Register-register.
375     Register src = cgen_->ToRegister(source);
376     Register dst = cgen_->ToRegister(destination);
377     __ xchg(dst, src);
378 
379   } else if ((source->IsRegister() && destination->IsStackSlot()) ||
380              (source->IsStackSlot() && destination->IsRegister())) {
381     // Register-memory.  Use a free register as a temp if possible.  Do not
382     // spill on demand because the simple spill implementation cannot avoid
383     // spilling src at this point.
384     Register tmp = GetFreeRegisterNot(no_reg);
385     Register reg =
386         cgen_->ToRegister(source->IsRegister() ? source : destination);
387     Operand mem =
388         cgen_->ToOperand(source->IsRegister() ? destination : source);
389     if (tmp.is(no_reg)) {
390       __ xor_(reg, mem);
391       __ xor_(mem, reg);
392       __ xor_(reg, mem);
393     } else {
394       __ mov(tmp, mem);
395       __ mov(mem, reg);
396       __ mov(reg, tmp);
397     }
398 
399   } else if (source->IsStackSlot() && destination->IsStackSlot()) {
400     // Memory-memory.  Spill on demand to use a temporary.  If there is a
401     // free register after that, use it as a second temporary.
402     Register tmp0 = EnsureTempRegister();
403     Register tmp1 = GetFreeRegisterNot(tmp0);
404     Operand src = cgen_->ToOperand(source);
405     Operand dst = cgen_->ToOperand(destination);
406     if (tmp1.is(no_reg)) {
407       // Only one temp register available to us.
408       __ mov(tmp0, dst);
409       __ xor_(tmp0, src);
410       __ xor_(src, tmp0);
411       __ xor_(tmp0, src);
412       __ mov(dst, tmp0);
413     } else {
414       __ mov(tmp0, dst);
415       __ mov(tmp1, src);
416       __ mov(dst, tmp1);
417       __ mov(src, tmp0);
418     }
419   } else {
420     // No other combinations are possible.
421     UNREACHABLE();
422   }
423 
424   // The swap of source and destination has executed a move from source to
425   // destination.
426   RemoveMove(index);
427 
428   // Any unperformed (including pending) move with a source of either
429   // this move's source or destination needs to have their source
430   // changed to reflect the state of affairs after the swap.
431   for (int i = 0; i < moves_.length(); ++i) {
432     LMoveOperands other_move = moves_[i];
433     if (other_move.Blocks(source)) {
434       moves_[i].set_source(destination);
435     } else if (other_move.Blocks(destination)) {
436       moves_[i].set_source(source);
437     }
438   }
439 
440   // In addition to swapping the actual uses as sources, we need to update
441   // the use counts.
442   if (source->IsRegister() && destination->IsRegister()) {
443     int temp = source_uses_[source->index()];
444     source_uses_[source->index()] = source_uses_[destination->index()];
445     source_uses_[destination->index()] = temp;
446   } else if (source->IsRegister()) {
447     // We don't have use counts for non-register operands like destination.
448     // Compute those counts now.
449     source_uses_[source->index()] = CountSourceUses(source);
450   } else if (destination->IsRegister()) {
451     source_uses_[destination->index()] = CountSourceUses(destination);
452   }
453 }
454 
455 #undef __
456 
457 }  // namespace internal
458 }  // namespace v8
459 
460 #endif  // V8_TARGET_ARCH_X87
461