• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "arm/lithium-gap-resolver-arm.h"
31 #include "arm/lithium-codegen-arm.h"
32 
33 namespace v8 {
34 namespace internal {
35 
36 static const Register kSavedValueRegister = { 9 };
37 
LGapResolver(LCodeGen * owner)38 LGapResolver::LGapResolver(LCodeGen* owner)
39     : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false),
40       saved_destination_(NULL) { }
41 
42 
Resolve(LParallelMove * parallel_move)43 void LGapResolver::Resolve(LParallelMove* parallel_move) {
44   ASSERT(moves_.is_empty());
45   // Build up a worklist of moves.
46   BuildInitialMoveList(parallel_move);
47 
48   for (int i = 0; i < moves_.length(); ++i) {
49     LMoveOperands move = moves_[i];
50     // Skip constants to perform them last.  They don't block other moves
51     // and skipping such moves with register destinations keeps those
52     // registers free for the whole algorithm.
53     if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
54       root_index_ = i;  // Any cycle is found when by reaching this move again.
55       PerformMove(i);
56       if (in_cycle_) {
57         RestoreValue();
58       }
59     }
60   }
61 
62   // Perform the moves with constant sources.
63   for (int i = 0; i < moves_.length(); ++i) {
64     if (!moves_[i].IsEliminated()) {
65       ASSERT(moves_[i].source()->IsConstantOperand());
66       EmitMove(i);
67     }
68   }
69 
70   moves_.Rewind(0);
71 }
72 
73 
BuildInitialMoveList(LParallelMove * parallel_move)74 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
75   // Perform a linear sweep of the moves to add them to the initial list of
76   // moves to perform, ignoring any move that is redundant (the source is
77   // the same as the destination, the destination is ignored and
78   // unallocated, or the move was already eliminated).
79   const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
80   for (int i = 0; i < moves->length(); ++i) {
81     LMoveOperands move = moves->at(i);
82     if (!move.IsRedundant()) moves_.Add(move);
83   }
84   Verify();
85 }
86 
87 
PerformMove(int index)88 void LGapResolver::PerformMove(int index) {
89   // Each call to this function performs a move and deletes it from the move
90   // graph.  We first recursively perform any move blocking this one.  We
91   // mark a move as "pending" on entry to PerformMove in order to detect
92   // cycles in the move graph.
93 
94   // We can only find a cycle, when doing a depth-first traversal of moves,
95   // be encountering the starting move again. So by spilling the source of
96   // the starting move, we break the cycle.  All moves are then unblocked,
97   // and the starting move is completed by writing the spilled value to
98   // its destination.  All other moves from the spilled source have been
99   // completed prior to breaking the cycle.
100   // An additional complication is that moves to MemOperands with large
101   // offsets (more than 1K or 4K) require us to spill this spilled value to
102   // the stack, to free up the register.
103   ASSERT(!moves_[index].IsPending());
104   ASSERT(!moves_[index].IsRedundant());
105 
106   // Clear this move's destination to indicate a pending move.  The actual
107   // destination is saved in a stack allocated local.  Multiple moves can
108   // be pending because this function is recursive.
109   ASSERT(moves_[index].source() != NULL);  // Or else it will look eliminated.
110   LOperand* destination = moves_[index].destination();
111   moves_[index].set_destination(NULL);
112 
113   // Perform a depth-first traversal of the move graph to resolve
114   // dependencies.  Any unperformed, unpending move with a source the same
115   // as this one's destination blocks this one so recursively perform all
116   // such moves.
117   for (int i = 0; i < moves_.length(); ++i) {
118     LMoveOperands other_move = moves_[i];
119     if (other_move.Blocks(destination) && !other_move.IsPending()) {
120       PerformMove(i);
121       // If there is a blocking, pending move it must be moves_[root_index_]
122       // and all other moves with the same source as moves_[root_index_] are
123       // sucessfully executed (because they are cycle-free) by this loop.
124     }
125   }
126 
127   // We are about to resolve this move and don't need it marked as
128   // pending, so restore its destination.
129   moves_[index].set_destination(destination);
130 
131   // The move may be blocked on a pending move, which must be the starting move.
132   // In this case, we have a cycle, and we save the source of this move to
133   // a scratch register to break it.
134   LMoveOperands other_move = moves_[root_index_];
135   if (other_move.Blocks(destination)) {
136     ASSERT(other_move.IsPending());
137     BreakCycle(index);
138     return;
139   }
140 
141   // This move is no longer blocked.
142   EmitMove(index);
143 }
144 
145 
Verify()146 void LGapResolver::Verify() {
147 #ifdef ENABLE_SLOW_ASSERTS
148   // No operand should be the destination for more than one move.
149   for (int i = 0; i < moves_.length(); ++i) {
150     LOperand* destination = moves_[i].destination();
151     for (int j = i + 1; j < moves_.length(); ++j) {
152       SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
153     }
154   }
155 #endif
156 }
157 
158 #define __ ACCESS_MASM(cgen_->masm())
159 
BreakCycle(int index)160 void LGapResolver::BreakCycle(int index) {
161   // We save in a register the value that should end up in the source of
162   // moves_[root_index].  After performing all moves in the tree rooted
163   // in that move, we save the value to that source.
164   ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
165   ASSERT(!in_cycle_);
166   in_cycle_ = true;
167   LOperand* source = moves_[index].source();
168   saved_destination_ = moves_[index].destination();
169   if (source->IsRegister()) {
170     __ mov(kSavedValueRegister, cgen_->ToRegister(source));
171   } else if (source->IsStackSlot()) {
172     __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
173   } else if (source->IsDoubleRegister()) {
174     __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
175   } else if (source->IsDoubleStackSlot()) {
176     __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
177   } else {
178     UNREACHABLE();
179   }
180   // This move will be done by restoring the saved value to the destination.
181   moves_[index].Eliminate();
182 }
183 
184 
RestoreValue()185 void LGapResolver::RestoreValue() {
186   ASSERT(in_cycle_);
187   ASSERT(saved_destination_ != NULL);
188 
189   // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
190   if (saved_destination_->IsRegister()) {
191     __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
192   } else if (saved_destination_->IsStackSlot()) {
193     __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
194   } else if (saved_destination_->IsDoubleRegister()) {
195     __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
196   } else if (saved_destination_->IsDoubleStackSlot()) {
197     __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
198   } else {
199     UNREACHABLE();
200   }
201 
202   in_cycle_ = false;
203   saved_destination_ = NULL;
204 }
205 
206 
EmitMove(int index)207 void LGapResolver::EmitMove(int index) {
208   LOperand* source = moves_[index].source();
209   LOperand* destination = moves_[index].destination();
210 
211   // Dispatch on the source and destination operand kinds.  Not all
212   // combinations are possible.
213 
214   if (source->IsRegister()) {
215     Register source_register = cgen_->ToRegister(source);
216     if (destination->IsRegister()) {
217       __ mov(cgen_->ToRegister(destination), source_register);
218     } else {
219       ASSERT(destination->IsStackSlot());
220       __ str(source_register, cgen_->ToMemOperand(destination));
221     }
222 
223   } else if (source->IsStackSlot()) {
224     MemOperand source_operand = cgen_->ToMemOperand(source);
225     if (destination->IsRegister()) {
226       __ ldr(cgen_->ToRegister(destination), source_operand);
227     } else {
228       ASSERT(destination->IsStackSlot());
229       MemOperand destination_operand = cgen_->ToMemOperand(destination);
230       if (in_cycle_) {
231         if (!destination_operand.OffsetIsUint12Encodable()) {
232           // ip is overwritten while saving the value to the destination.
233           // Therefore we can't use ip.  It is OK if the read from the source
234           // destroys ip, since that happens before the value is read.
235           __ vldr(kScratchDoubleReg.low(), source_operand);
236           __ vstr(kScratchDoubleReg.low(), destination_operand);
237         } else {
238           __ ldr(ip, source_operand);
239           __ str(ip, destination_operand);
240         }
241       } else {
242         __ ldr(kSavedValueRegister, source_operand);
243         __ str(kSavedValueRegister, destination_operand);
244       }
245     }
246 
247   } else if (source->IsConstantOperand()) {
248     LConstantOperand* constant_source = LConstantOperand::cast(source);
249     if (destination->IsRegister()) {
250       Register dst = cgen_->ToRegister(destination);
251       if (cgen_->IsInteger32(constant_source)) {
252         __ mov(dst, Operand(cgen_->ToInteger32(constant_source)));
253       } else {
254         __ LoadObject(dst, cgen_->ToHandle(constant_source));
255       }
256     } else {
257       ASSERT(destination->IsStackSlot());
258       ASSERT(!in_cycle_);  // Constant moves happen after all cycles are gone.
259       if (cgen_->IsInteger32(constant_source)) {
260         __ mov(kSavedValueRegister,
261                Operand(cgen_->ToInteger32(constant_source)));
262       } else {
263         __ LoadObject(kSavedValueRegister,
264                       cgen_->ToHandle(constant_source));
265       }
266       __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
267     }
268 
269   } else if (source->IsDoubleRegister()) {
270     DoubleRegister source_register = cgen_->ToDoubleRegister(source);
271     if (destination->IsDoubleRegister()) {
272       __ vmov(cgen_->ToDoubleRegister(destination), source_register);
273     } else {
274       ASSERT(destination->IsDoubleStackSlot());
275       __ vstr(source_register, cgen_->ToMemOperand(destination));
276     }
277 
278   } else if (source->IsDoubleStackSlot()) {
279     MemOperand source_operand = cgen_->ToMemOperand(source);
280     if (destination->IsDoubleRegister()) {
281       __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
282     } else {
283       ASSERT(destination->IsDoubleStackSlot());
284       MemOperand destination_operand = cgen_->ToMemOperand(destination);
285       if (in_cycle_) {
286         // kSavedDoubleValueRegister was used to break the cycle,
287         // but kSavedValueRegister is free.
288         MemOperand source_high_operand =
289             cgen_->ToHighMemOperand(source);
290         MemOperand destination_high_operand =
291             cgen_->ToHighMemOperand(destination);
292         __ ldr(kSavedValueRegister, source_operand);
293         __ str(kSavedValueRegister, destination_operand);
294         __ ldr(kSavedValueRegister, source_high_operand);
295         __ str(kSavedValueRegister, destination_high_operand);
296       } else {
297         __ vldr(kScratchDoubleReg, source_operand);
298         __ vstr(kScratchDoubleReg, destination_operand);
299       }
300     }
301   } else {
302     UNREACHABLE();
303   }
304 
305   moves_[index].Eliminate();
306 }
307 
308 
309 #undef __
310 
311 } }  // namespace v8::internal
312