1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #if V8_TARGET_ARCH_X64
8
9 #include "src/x64/lithium-codegen-x64.h"
10 #include "src/x64/lithium-gap-resolver-x64.h"
11
12 namespace v8 {
13 namespace internal {
14
LGapResolver(LCodeGen * owner)15 LGapResolver::LGapResolver(LCodeGen* owner)
16 : cgen_(owner), moves_(32, owner->zone()) {}
17
18
Resolve(LParallelMove * parallel_move)19 void LGapResolver::Resolve(LParallelMove* parallel_move) {
20 DCHECK(moves_.is_empty());
21 // Build up a worklist of moves.
22 BuildInitialMoveList(parallel_move);
23
24 for (int i = 0; i < moves_.length(); ++i) {
25 LMoveOperands move = moves_[i];
26 // Skip constants to perform them last. They don't block other moves
27 // and skipping such moves with register destinations keeps those
28 // registers free for the whole algorithm.
29 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
30 PerformMove(i);
31 }
32 }
33
34 // Perform the moves with constant sources.
35 for (int i = 0; i < moves_.length(); ++i) {
36 if (!moves_[i].IsEliminated()) {
37 DCHECK(moves_[i].source()->IsConstantOperand());
38 EmitMove(i);
39 }
40 }
41
42 moves_.Rewind(0);
43 }
44
45
BuildInitialMoveList(LParallelMove * parallel_move)46 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
47 // Perform a linear sweep of the moves to add them to the initial list of
48 // moves to perform, ignoring any move that is redundant (the source is
49 // the same as the destination, the destination is ignored and
50 // unallocated, or the move was already eliminated).
51 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
52 for (int i = 0; i < moves->length(); ++i) {
53 LMoveOperands move = moves->at(i);
54 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
55 }
56 Verify();
57 }
58
59
PerformMove(int index)60 void LGapResolver::PerformMove(int index) {
61 // Each call to this function performs a move and deletes it from the move
62 // graph. We first recursively perform any move blocking this one. We
63 // mark a move as "pending" on entry to PerformMove in order to detect
64 // cycles in the move graph. We use operand swaps to resolve cycles,
65 // which means that a call to PerformMove could change any source operand
66 // in the move graph.
67
68 DCHECK(!moves_[index].IsPending());
69 DCHECK(!moves_[index].IsRedundant());
70
71 // Clear this move's destination to indicate a pending move. The actual
72 // destination is saved in a stack-allocated local. Recursion may allow
73 // multiple moves to be pending.
74 DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
75 LOperand* destination = moves_[index].destination();
76 moves_[index].set_destination(NULL);
77
78 // Perform a depth-first traversal of the move graph to resolve
79 // dependencies. Any unperformed, unpending move with a source the same
80 // as this one's destination blocks this one so recursively perform all
81 // such moves.
82 for (int i = 0; i < moves_.length(); ++i) {
83 LMoveOperands other_move = moves_[i];
84 if (other_move.Blocks(destination) && !other_move.IsPending()) {
85 // Though PerformMove can change any source operand in the move graph,
86 // this call cannot create a blocking move via a swap (this loop does
87 // not miss any). Assume there is a non-blocking move with source A
88 // and this move is blocked on source B and there is a swap of A and
89 // B. Then A and B must be involved in the same cycle (or they would
90 // not be swapped). Since this move's destination is B and there is
91 // only a single incoming edge to an operand, this move must also be
92 // involved in the same cycle. In that case, the blocking move will
93 // be created but will be "pending" when we return from PerformMove.
94 PerformMove(i);
95 }
96 }
97
98 // We are about to resolve this move and don't need it marked as
99 // pending, so restore its destination.
100 moves_[index].set_destination(destination);
101
102 // This move's source may have changed due to swaps to resolve cycles and
103 // so it may now be the last move in the cycle. If so remove it.
104 if (moves_[index].source()->Equals(destination)) {
105 moves_[index].Eliminate();
106 return;
107 }
108
109 // The move may be blocked on a (at most one) pending move, in which case
110 // we have a cycle. Search for such a blocking move and perform a swap to
111 // resolve it.
112 for (int i = 0; i < moves_.length(); ++i) {
113 LMoveOperands other_move = moves_[i];
114 if (other_move.Blocks(destination)) {
115 DCHECK(other_move.IsPending());
116 EmitSwap(index);
117 return;
118 }
119 }
120
121 // This move is not blocked.
122 EmitMove(index);
123 }
124
125
Verify()126 void LGapResolver::Verify() {
127 #ifdef ENABLE_SLOW_DCHECKS
128 // No operand should be the destination for more than one move.
129 for (int i = 0; i < moves_.length(); ++i) {
130 LOperand* destination = moves_[i].destination();
131 for (int j = i + 1; j < moves_.length(); ++j) {
132 SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
133 }
134 }
135 #endif
136 }
137
138
139 #define __ ACCESS_MASM(cgen_->masm())
140
141
EmitMove(int index)142 void LGapResolver::EmitMove(int index) {
143 LOperand* source = moves_[index].source();
144 LOperand* destination = moves_[index].destination();
145
146 // Dispatch on the source and destination operand kinds. Not all
147 // combinations are possible.
148 if (source->IsRegister()) {
149 Register src = cgen_->ToRegister(source);
150 if (destination->IsRegister()) {
151 Register dst = cgen_->ToRegister(destination);
152 __ movp(dst, src);
153 } else {
154 DCHECK(destination->IsStackSlot());
155 Operand dst = cgen_->ToOperand(destination);
156 __ movp(dst, src);
157 }
158
159 } else if (source->IsStackSlot()) {
160 Operand src = cgen_->ToOperand(source);
161 if (destination->IsRegister()) {
162 Register dst = cgen_->ToRegister(destination);
163 __ movp(dst, src);
164 } else {
165 DCHECK(destination->IsStackSlot());
166 Operand dst = cgen_->ToOperand(destination);
167 __ movp(kScratchRegister, src);
168 __ movp(dst, kScratchRegister);
169 }
170
171 } else if (source->IsConstantOperand()) {
172 LConstantOperand* constant_source = LConstantOperand::cast(source);
173 if (destination->IsRegister()) {
174 Register dst = cgen_->ToRegister(destination);
175 if (cgen_->IsSmiConstant(constant_source)) {
176 __ Move(dst, cgen_->ToSmi(constant_source));
177 } else if (cgen_->IsInteger32Constant(constant_source)) {
178 int32_t constant = cgen_->ToInteger32(constant_source);
179 // Do sign extension only for constant used as de-hoisted array key.
180 // Others only need zero extension, which saves 2 bytes.
181 if (cgen_->IsDehoistedKeyConstant(constant_source)) {
182 __ Set(dst, constant);
183 } else {
184 __ Set(dst, static_cast<uint32_t>(constant));
185 }
186 } else {
187 __ Move(dst, cgen_->ToHandle(constant_source));
188 }
189 } else if (destination->IsDoubleRegister()) {
190 double v = cgen_->ToDouble(constant_source);
191 uint64_t int_val = bit_cast<uint64_t, double>(v);
192 XMMRegister dst = cgen_->ToDoubleRegister(destination);
193 if (int_val == 0) {
194 __ xorps(dst, dst);
195 } else {
196 __ Set(kScratchRegister, int_val);
197 __ movq(dst, kScratchRegister);
198 }
199 } else {
200 DCHECK(destination->IsStackSlot());
201 Operand dst = cgen_->ToOperand(destination);
202 if (cgen_->IsSmiConstant(constant_source)) {
203 __ Move(dst, cgen_->ToSmi(constant_source));
204 } else if (cgen_->IsInteger32Constant(constant_source)) {
205 // Do sign extension to 64 bits when stored into stack slot.
206 __ movp(dst, Immediate(cgen_->ToInteger32(constant_source)));
207 } else {
208 __ Move(kScratchRegister, cgen_->ToHandle(constant_source));
209 __ movp(dst, kScratchRegister);
210 }
211 }
212
213 } else if (source->IsDoubleRegister()) {
214 XMMRegister src = cgen_->ToDoubleRegister(source);
215 if (destination->IsDoubleRegister()) {
216 __ movaps(cgen_->ToDoubleRegister(destination), src);
217 } else {
218 DCHECK(destination->IsDoubleStackSlot());
219 __ movsd(cgen_->ToOperand(destination), src);
220 }
221 } else if (source->IsDoubleStackSlot()) {
222 Operand src = cgen_->ToOperand(source);
223 if (destination->IsDoubleRegister()) {
224 __ movsd(cgen_->ToDoubleRegister(destination), src);
225 } else {
226 DCHECK(destination->IsDoubleStackSlot());
227 __ movsd(xmm0, src);
228 __ movsd(cgen_->ToOperand(destination), xmm0);
229 }
230 } else {
231 UNREACHABLE();
232 }
233
234 moves_[index].Eliminate();
235 }
236
237
EmitSwap(int index)238 void LGapResolver::EmitSwap(int index) {
239 LOperand* source = moves_[index].source();
240 LOperand* destination = moves_[index].destination();
241
242 // Dispatch on the source and destination operand kinds. Not all
243 // combinations are possible.
244 if (source->IsRegister() && destination->IsRegister()) {
245 // Swap two general-purpose registers.
246 Register src = cgen_->ToRegister(source);
247 Register dst = cgen_->ToRegister(destination);
248 __ xchgq(dst, src);
249
250 } else if ((source->IsRegister() && destination->IsStackSlot()) ||
251 (source->IsStackSlot() && destination->IsRegister())) {
252 // Swap a general-purpose register and a stack slot.
253 Register reg =
254 cgen_->ToRegister(source->IsRegister() ? source : destination);
255 Operand mem =
256 cgen_->ToOperand(source->IsRegister() ? destination : source);
257 __ movp(kScratchRegister, mem);
258 __ movp(mem, reg);
259 __ movp(reg, kScratchRegister);
260
261 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
262 (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
263 // Swap two stack slots or two double stack slots.
264 Operand src = cgen_->ToOperand(source);
265 Operand dst = cgen_->ToOperand(destination);
266 __ movsd(xmm0, src);
267 __ movp(kScratchRegister, dst);
268 __ movsd(dst, xmm0);
269 __ movp(src, kScratchRegister);
270
271 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
272 // Swap two double registers.
273 XMMRegister source_reg = cgen_->ToDoubleRegister(source);
274 XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
275 __ movaps(xmm0, source_reg);
276 __ movaps(source_reg, destination_reg);
277 __ movaps(destination_reg, xmm0);
278
279 } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
280 // Swap a double register and a double stack slot.
281 DCHECK((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
282 (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
283 XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
284 ? source
285 : destination);
286 LOperand* other = source->IsDoubleRegister() ? destination : source;
287 DCHECK(other->IsDoubleStackSlot());
288 Operand other_operand = cgen_->ToOperand(other);
289 __ movsd(xmm0, other_operand);
290 __ movsd(other_operand, reg);
291 __ movaps(reg, xmm0);
292
293 } else {
294 // No other combinations are possible.
295 UNREACHABLE();
296 }
297
298 // The swap of source and destination has executed a move from source to
299 // destination.
300 moves_[index].Eliminate();
301
302 // Any unperformed (including pending) move with a source of either
303 // this move's source or destination needs to have their source
304 // changed to reflect the state of affairs after the swap.
305 for (int i = 0; i < moves_.length(); ++i) {
306 LMoveOperands other_move = moves_[i];
307 if (other_move.Blocks(source)) {
308 moves_[i].set_source(destination);
309 } else if (other_move.Blocks(destination)) {
310 moves_[i].set_source(source);
311 }
312 }
313 }
314
315 #undef __
316
317 } } // namespace v8::internal
318
319 #endif // V8_TARGET_ARCH_X64
320