• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/change-lowering.h"
6 
7 #include "src/address-map.h"
8 #include "src/code-factory.h"
9 #include "src/compiler/js-graph.h"
10 #include "src/compiler/linkage.h"
11 #include "src/compiler/machine-operator.h"
12 #include "src/compiler/node-properties.h"
13 #include "src/compiler/operator-properties.h"
14 #include "src/compiler/simplified-operator.h"
15 
16 namespace v8 {
17 namespace internal {
18 namespace compiler {
19 
~ChangeLowering()20 ChangeLowering::~ChangeLowering() {}
21 
22 
Reduce(Node * node)23 Reduction ChangeLowering::Reduce(Node* node) {
24   Node* control = graph()->start();
25   switch (node->opcode()) {
26     case IrOpcode::kChangeBitToBool:
27       return ChangeBitToBool(node->InputAt(0), control);
28     case IrOpcode::kChangeBoolToBit:
29       return ChangeBoolToBit(node->InputAt(0));
30     case IrOpcode::kChangeFloat64ToTagged:
31       return ChangeFloat64ToTagged(node->InputAt(0), control);
32     case IrOpcode::kChangeInt32ToTagged:
33       return ChangeInt32ToTagged(node->InputAt(0), control);
34     case IrOpcode::kChangeTaggedToFloat64:
35       return ChangeTaggedToFloat64(node->InputAt(0), control);
36     case IrOpcode::kChangeTaggedToInt32:
37       return ChangeTaggedToUI32(node->InputAt(0), control, kSigned);
38     case IrOpcode::kChangeTaggedToUint32:
39       return ChangeTaggedToUI32(node->InputAt(0), control, kUnsigned);
40     case IrOpcode::kChangeUint32ToTagged:
41       return ChangeUint32ToTagged(node->InputAt(0), control);
42     case IrOpcode::kLoadField:
43       return LoadField(node);
44     case IrOpcode::kStoreField:
45       return StoreField(node);
46     case IrOpcode::kLoadElement:
47       return LoadElement(node);
48     case IrOpcode::kStoreElement:
49       return StoreElement(node);
50     case IrOpcode::kAllocate:
51       return Allocate(node);
52     default:
53       return NoChange();
54   }
55   UNREACHABLE();
56   return NoChange();
57 }
58 
59 
HeapNumberValueIndexConstant()60 Node* ChangeLowering::HeapNumberValueIndexConstant() {
61   return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
62 }
63 
64 
SmiMaxValueConstant()65 Node* ChangeLowering::SmiMaxValueConstant() {
66   return jsgraph()->Int32Constant(Smi::kMaxValue);
67 }
68 
69 
SmiShiftBitsConstant()70 Node* ChangeLowering::SmiShiftBitsConstant() {
71   return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
72 }
73 
74 
AllocateHeapNumberWithValue(Node * value,Node * control)75 Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
76   // The AllocateHeapNumberStub does not use the context, so we can safely pass
77   // in Smi zero here.
78   Callable callable = CodeFactory::AllocateHeapNumber(isolate());
79   Node* target = jsgraph()->HeapConstant(callable.code());
80   Node* context = jsgraph()->NoContextConstant();
81   Node* effect = graph()->NewNode(common()->BeginRegion(), graph()->start());
82   if (!allocate_heap_number_operator_.is_set()) {
83     CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
84         isolate(), jsgraph()->zone(), callable.descriptor(), 0,
85         CallDescriptor::kNoFlags, Operator::kNoThrow);
86     allocate_heap_number_operator_.set(common()->Call(descriptor));
87   }
88   Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
89                                        target, context, effect, control);
90   Node* store = graph()->NewNode(
91       machine()->Store(StoreRepresentation(MachineRepresentation::kFloat64,
92                                            kNoWriteBarrier)),
93       heap_number, HeapNumberValueIndexConstant(), value, heap_number, control);
94   return graph()->NewNode(common()->FinishRegion(), heap_number, store);
95 }
96 
97 
ChangeInt32ToFloat64(Node * value)98 Node* ChangeLowering::ChangeInt32ToFloat64(Node* value) {
99   return graph()->NewNode(machine()->ChangeInt32ToFloat64(), value);
100 }
101 
102 
ChangeInt32ToSmi(Node * value)103 Node* ChangeLowering::ChangeInt32ToSmi(Node* value) {
104   if (machine()->Is64()) {
105     value = graph()->NewNode(machine()->ChangeInt32ToInt64(), value);
106   }
107   return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
108 }
109 
110 
ChangeSmiToFloat64(Node * value)111 Node* ChangeLowering::ChangeSmiToFloat64(Node* value) {
112   return ChangeInt32ToFloat64(ChangeSmiToInt32(value));
113 }
114 
115 
ChangeSmiToInt32(Node * value)116 Node* ChangeLowering::ChangeSmiToInt32(Node* value) {
117   value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
118   if (machine()->Is64()) {
119     value = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
120   }
121   return value;
122 }
123 
124 
ChangeUint32ToFloat64(Node * value)125 Node* ChangeLowering::ChangeUint32ToFloat64(Node* value) {
126   return graph()->NewNode(machine()->ChangeUint32ToFloat64(), value);
127 }
128 
129 
ChangeUint32ToSmi(Node * value)130 Node* ChangeLowering::ChangeUint32ToSmi(Node* value) {
131   if (machine()->Is64()) {
132     value = graph()->NewNode(machine()->ChangeUint32ToUint64(), value);
133   }
134   return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
135 }
136 
137 
LoadHeapNumberValue(Node * value,Node * control)138 Node* ChangeLowering::LoadHeapNumberValue(Node* value, Node* control) {
139   return graph()->NewNode(machine()->Load(MachineType::Float64()), value,
140                           HeapNumberValueIndexConstant(), graph()->start(),
141                           control);
142 }
143 
144 
TestNotSmi(Node * value)145 Node* ChangeLowering::TestNotSmi(Node* value) {
146   STATIC_ASSERT(kSmiTag == 0);
147   STATIC_ASSERT(kSmiTagMask == 1);
148   return graph()->NewNode(machine()->WordAnd(), value,
149                           jsgraph()->IntPtrConstant(kSmiTagMask));
150 }
151 
152 
ChangeBitToBool(Node * value,Node * control)153 Reduction ChangeLowering::ChangeBitToBool(Node* value, Node* control) {
154   return Replace(
155       graph()->NewNode(common()->Select(MachineRepresentation::kTagged), value,
156                        jsgraph()->TrueConstant(), jsgraph()->FalseConstant()));
157 }
158 
159 
ChangeBoolToBit(Node * value)160 Reduction ChangeLowering::ChangeBoolToBit(Node* value) {
161   return Replace(graph()->NewNode(machine()->WordEqual(), value,
162                                   jsgraph()->TrueConstant()));
163 }
164 
165 
ChangeFloat64ToTagged(Node * value,Node * control)166 Reduction ChangeLowering::ChangeFloat64ToTagged(Node* value, Node* control) {
167   Type* const value_type = NodeProperties::GetType(value);
168   Node* const value32 = graph()->NewNode(
169       machine()->TruncateFloat64ToInt32(TruncationMode::kRoundToZero), value);
170   // TODO(bmeurer): This fast case must be disabled until we kill the asm.js
171   // support in the generic JavaScript pipeline, because LoadBuffer is lying
172   // about its result.
173   // if (value_type->Is(Type::Signed32())) {
174   //   return ChangeInt32ToTagged(value32, control);
175   // }
176   Node* check_same = graph()->NewNode(
177       machine()->Float64Equal(), value,
178       graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
179   Node* branch_same = graph()->NewNode(common()->Branch(), check_same, control);
180 
181   Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_same);
182   Node* vsmi;
183   Node* if_box = graph()->NewNode(common()->IfFalse(), branch_same);
184   Node* vbox;
185 
186   // We only need to check for -0 if the {value} can potentially contain -0.
187   if (value_type->Maybe(Type::MinusZero())) {
188     Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
189                                         jsgraph()->Int32Constant(0));
190     Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
191                                          check_zero, if_smi);
192 
193     Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
194     Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
195 
196     // In case of 0, we need to check the high bits for the IEEE -0 pattern.
197     Node* check_negative = graph()->NewNode(
198         machine()->Int32LessThan(),
199         graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
200         jsgraph()->Int32Constant(0));
201     Node* branch_negative = graph()->NewNode(
202         common()->Branch(BranchHint::kFalse), check_negative, if_zero);
203 
204     Node* if_negative = graph()->NewNode(common()->IfTrue(), branch_negative);
205     Node* if_notnegative =
206         graph()->NewNode(common()->IfFalse(), branch_negative);
207 
208     // We need to create a box for negative 0.
209     if_smi = graph()->NewNode(common()->Merge(2), if_notzero, if_notnegative);
210     if_box = graph()->NewNode(common()->Merge(2), if_box, if_negative);
211   }
212 
213   // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
214   // machines we need to deal with potential overflow and fallback to boxing.
215   if (machine()->Is64() || value_type->Is(Type::SignedSmall())) {
216     vsmi = ChangeInt32ToSmi(value32);
217   } else {
218     Node* smi_tag =
219         graph()->NewNode(machine()->Int32AddWithOverflow(), value32, value32);
220 
221     Node* check_ovf = graph()->NewNode(common()->Projection(1), smi_tag);
222     Node* branch_ovf = graph()->NewNode(common()->Branch(BranchHint::kFalse),
223                                         check_ovf, if_smi);
224 
225     Node* if_ovf = graph()->NewNode(common()->IfTrue(), branch_ovf);
226     if_box = graph()->NewNode(common()->Merge(2), if_ovf, if_box);
227 
228     if_smi = graph()->NewNode(common()->IfFalse(), branch_ovf);
229     vsmi = graph()->NewNode(common()->Projection(0), smi_tag);
230   }
231 
232   // Allocate the box for the {value}.
233   vbox = AllocateHeapNumberWithValue(value, if_box);
234 
235   control = graph()->NewNode(common()->Merge(2), if_smi, if_box);
236   value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
237                            vsmi, vbox, control);
238   return Replace(value);
239 }
240 
241 
ChangeInt32ToTagged(Node * value,Node * control)242 Reduction ChangeLowering::ChangeInt32ToTagged(Node* value, Node* control) {
243   if (machine()->Is64() ||
244       NodeProperties::GetType(value)->Is(Type::SignedSmall())) {
245     return Replace(ChangeInt32ToSmi(value));
246   }
247 
248   Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value);
249 
250   Node* ovf = graph()->NewNode(common()->Projection(1), add);
251   Node* branch =
252       graph()->NewNode(common()->Branch(BranchHint::kFalse), ovf, control);
253 
254   Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
255   Node* vtrue =
256       AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), if_true);
257 
258   Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
259   Node* vfalse = graph()->NewNode(common()->Projection(0), add);
260 
261   Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
262   Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
263                                vtrue, vfalse, merge);
264 
265   return Replace(phi);
266 }
267 
268 
ChangeTaggedToUI32(Node * value,Node * control,Signedness signedness)269 Reduction ChangeLowering::ChangeTaggedToUI32(Node* value, Node* control,
270                                              Signedness signedness) {
271   if (NodeProperties::GetType(value)->Is(Type::TaggedSigned())) {
272     return Replace(ChangeSmiToInt32(value));
273   }
274 
275   const Operator* op = (signedness == kSigned)
276                            ? machine()->ChangeFloat64ToInt32()
277                            : machine()->ChangeFloat64ToUint32();
278 
279   if (NodeProperties::GetType(value)->Is(Type::TaggedPointer())) {
280     return Replace(graph()->NewNode(op, LoadHeapNumberValue(value, control)));
281   }
282 
283   Node* check = TestNotSmi(value);
284   Node* branch =
285       graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
286 
287   Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
288   Node* vtrue = graph()->NewNode(op, LoadHeapNumberValue(value, if_true));
289 
290   Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
291   Node* vfalse = ChangeSmiToInt32(value);
292 
293   Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
294   Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
295                                vtrue, vfalse, merge);
296 
297   return Replace(phi);
298 }
299 
300 
301 namespace {
302 
CanCover(Node * value,IrOpcode::Value opcode)303 bool CanCover(Node* value, IrOpcode::Value opcode) {
304   if (value->opcode() != opcode) return false;
305   bool first = true;
306   for (Edge const edge : value->use_edges()) {
307     if (NodeProperties::IsControlEdge(edge)) continue;
308     if (NodeProperties::IsEffectEdge(edge)) continue;
309     DCHECK(NodeProperties::IsValueEdge(edge));
310     if (!first) return false;
311     first = false;
312   }
313   return true;
314 }
315 
316 }  // namespace
317 
318 
ChangeTaggedToFloat64(Node * value,Node * control)319 Reduction ChangeLowering::ChangeTaggedToFloat64(Node* value, Node* control) {
320   if (CanCover(value, IrOpcode::kJSToNumber)) {
321     // ChangeTaggedToFloat64(JSToNumber(x)) =>
322     //   if IsSmi(x) then ChangeSmiToFloat64(x)
323     //   else let y = JSToNumber(x) in
324     //     if IsSmi(y) then ChangeSmiToFloat64(y)
325     //     else LoadHeapNumberValue(y)
326     Node* const object = NodeProperties::GetValueInput(value, 0);
327     Node* const context = NodeProperties::GetContextInput(value);
328     Node* const frame_state = NodeProperties::GetFrameStateInput(value, 0);
329     Node* const effect = NodeProperties::GetEffectInput(value);
330     Node* const control = NodeProperties::GetControlInput(value);
331 
332     const Operator* merge_op = common()->Merge(2);
333     const Operator* ephi_op = common()->EffectPhi(2);
334     const Operator* phi_op = common()->Phi(MachineRepresentation::kFloat64, 2);
335 
336     Node* check1 = TestNotSmi(object);
337     Node* branch1 =
338         graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
339 
340     Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
341     Node* vtrue1 = graph()->NewNode(value->op(), object, context, frame_state,
342                                     effect, if_true1);
343     Node* etrue1 = vtrue1;
344 
345     Node* check2 = TestNotSmi(vtrue1);
346     Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_true1);
347 
348     Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
349     Node* vtrue2 = LoadHeapNumberValue(vtrue1, if_true2);
350 
351     Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
352     Node* vfalse2 = ChangeSmiToFloat64(vtrue1);
353 
354     if_true1 = graph()->NewNode(merge_op, if_true2, if_false2);
355     vtrue1 = graph()->NewNode(phi_op, vtrue2, vfalse2, if_true1);
356 
357     Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
358     Node* vfalse1 = ChangeSmiToFloat64(object);
359     Node* efalse1 = effect;
360 
361     Node* merge1 = graph()->NewNode(merge_op, if_true1, if_false1);
362     Node* ephi1 = graph()->NewNode(ephi_op, etrue1, efalse1, merge1);
363     Node* phi1 = graph()->NewNode(phi_op, vtrue1, vfalse1, merge1);
364 
365     // Wire the new diamond into the graph, {JSToNumber} can still throw.
366     NodeProperties::ReplaceUses(value, phi1, ephi1, etrue1, etrue1);
367 
368     // TODO(mstarzinger): This iteration cuts out the IfSuccess projection from
369     // the node and places it inside the diamond. Come up with a helper method!
370     for (Node* use : etrue1->uses()) {
371       if (use->opcode() == IrOpcode::kIfSuccess) {
372         use->ReplaceUses(merge1);
373         NodeProperties::ReplaceControlInput(branch2, use);
374       }
375     }
376 
377     return Replace(phi1);
378   }
379 
380   Node* check = TestNotSmi(value);
381   Node* branch =
382       graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
383 
384   Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
385   Node* vtrue = LoadHeapNumberValue(value, if_true);
386 
387   Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
388   Node* vfalse = ChangeSmiToFloat64(value);
389 
390   Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
391   Node* phi = graph()->NewNode(
392       common()->Phi(MachineRepresentation::kFloat64, 2), vtrue, vfalse, merge);
393 
394   return Replace(phi);
395 }
396 
397 
ChangeUint32ToTagged(Node * value,Node * control)398 Reduction ChangeLowering::ChangeUint32ToTagged(Node* value, Node* control) {
399   if (NodeProperties::GetType(value)->Is(Type::UnsignedSmall())) {
400     return Replace(ChangeUint32ToSmi(value));
401   }
402 
403   Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
404                                  SmiMaxValueConstant());
405   Node* branch =
406       graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
407 
408   Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
409   Node* vtrue = ChangeUint32ToSmi(value);
410 
411   Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
412   Node* vfalse =
413       AllocateHeapNumberWithValue(ChangeUint32ToFloat64(value), if_false);
414 
415   Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
416   Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
417                                vtrue, vfalse, merge);
418 
419   return Replace(phi);
420 }
421 
422 
423 namespace {
424 
ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,MachineRepresentation representation,Type * field_type,Type * input_type)425 WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
426                                          MachineRepresentation representation,
427                                          Type* field_type, Type* input_type) {
428   if (field_type->Is(Type::TaggedSigned()) ||
429       input_type->Is(Type::TaggedSigned())) {
430     // Write barriers are only for writes of heap objects.
431     return kNoWriteBarrier;
432   }
433   if (input_type->Is(Type::BooleanOrNullOrUndefined())) {
434     // Write barriers are not necessary when storing true, false, null or
435     // undefined, because these special oddballs are always in the root set.
436     return kNoWriteBarrier;
437   }
438   if (base_is_tagged == kTaggedBase &&
439       representation == MachineRepresentation::kTagged) {
440     if (input_type->IsConstant() &&
441         input_type->AsConstant()->Value()->IsHeapObject()) {
442       Handle<HeapObject> input =
443           Handle<HeapObject>::cast(input_type->AsConstant()->Value());
444       if (input->IsMap()) {
445         // Write barriers for storing maps are cheaper.
446         return kMapWriteBarrier;
447       }
448       Isolate* const isolate = input->GetIsolate();
449       RootIndexMap root_index_map(isolate);
450       int root_index = root_index_map.Lookup(*input);
451       if (root_index != RootIndexMap::kInvalidRootIndex &&
452           isolate->heap()->RootIsImmortalImmovable(root_index)) {
453         // Write barriers are unnecessary for immortal immovable roots.
454         return kNoWriteBarrier;
455       }
456     }
457     if (field_type->Is(Type::TaggedPointer()) ||
458         input_type->Is(Type::TaggedPointer())) {
459       // Write barriers for heap objects don't need a Smi check.
460       return kPointerWriteBarrier;
461     }
462     // Write barriers are only for writes into heap objects (i.e. tagged base).
463     return kFullWriteBarrier;
464   }
465   return kNoWriteBarrier;
466 }
467 
468 
ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,MachineRepresentation representation,int field_offset,Type * field_type,Type * input_type)469 WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
470                                          MachineRepresentation representation,
471                                          int field_offset, Type* field_type,
472                                          Type* input_type) {
473   if (base_is_tagged == kTaggedBase && field_offset == HeapObject::kMapOffset) {
474     // Write barriers for storing maps are cheaper.
475     return kMapWriteBarrier;
476   }
477   return ComputeWriteBarrierKind(base_is_tagged, representation, field_type,
478                                  input_type);
479 }
480 
481 }  // namespace
482 
483 
LoadField(Node * node)484 Reduction ChangeLowering::LoadField(Node* node) {
485   const FieldAccess& access = FieldAccessOf(node->op());
486   Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
487   node->InsertInput(graph()->zone(), 1, offset);
488   NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
489   return Changed(node);
490 }
491 
492 
StoreField(Node * node)493 Reduction ChangeLowering::StoreField(Node* node) {
494   const FieldAccess& access = FieldAccessOf(node->op());
495   Type* type = NodeProperties::GetType(node->InputAt(1));
496   WriteBarrierKind kind = ComputeWriteBarrierKind(
497       access.base_is_tagged, access.machine_type.representation(),
498       access.offset, access.type, type);
499   Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
500   node->InsertInput(graph()->zone(), 1, offset);
501   NodeProperties::ChangeOp(node,
502                            machine()->Store(StoreRepresentation(
503                                access.machine_type.representation(), kind)));
504   return Changed(node);
505 }
506 
507 
ComputeIndex(const ElementAccess & access,Node * const key)508 Node* ChangeLowering::ComputeIndex(const ElementAccess& access,
509                                    Node* const key) {
510   Node* index = key;
511   const int element_size_shift =
512       ElementSizeLog2Of(access.machine_type.representation());
513   if (element_size_shift) {
514     index = graph()->NewNode(machine()->Word32Shl(), index,
515                              jsgraph()->Int32Constant(element_size_shift));
516   }
517   const int fixed_offset = access.header_size - access.tag();
518   if (fixed_offset) {
519     index = graph()->NewNode(machine()->Int32Add(), index,
520                              jsgraph()->Int32Constant(fixed_offset));
521   }
522   if (machine()->Is64()) {
523     // TODO(turbofan): This is probably only correct for typed arrays, and only
524     // if the typed arrays are at most 2GiB in size, which happens to match
525     // exactly our current situation.
526     index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
527   }
528   return index;
529 }
530 
531 
LoadElement(Node * node)532 Reduction ChangeLowering::LoadElement(Node* node) {
533   const ElementAccess& access = ElementAccessOf(node->op());
534   node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
535   NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
536   return Changed(node);
537 }
538 
539 
StoreElement(Node * node)540 Reduction ChangeLowering::StoreElement(Node* node) {
541   const ElementAccess& access = ElementAccessOf(node->op());
542   Type* type = NodeProperties::GetType(node->InputAt(2));
543   node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
544   NodeProperties::ChangeOp(
545       node, machine()->Store(StoreRepresentation(
546                 access.machine_type.representation(),
547                 ComputeWriteBarrierKind(access.base_is_tagged,
548                                         access.machine_type.representation(),
549                                         access.type, type))));
550   return Changed(node);
551 }
552 
553 
Allocate(Node * node)554 Reduction ChangeLowering::Allocate(Node* node) {
555   PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
556   if (pretenure == NOT_TENURED) {
557     Callable callable = CodeFactory::AllocateInNewSpace(isolate());
558     Node* target = jsgraph()->HeapConstant(callable.code());
559     CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
560         isolate(), jsgraph()->zone(), callable.descriptor(), 0,
561         CallDescriptor::kNoFlags, Operator::kNoThrow);
562     const Operator* op = common()->Call(descriptor);
563     node->InsertInput(graph()->zone(), 0, target);
564     node->InsertInput(graph()->zone(), 2, jsgraph()->NoContextConstant());
565     NodeProperties::ChangeOp(node, op);
566   } else {
567     DCHECK_EQ(TENURED, pretenure);
568     AllocationSpace space = OLD_SPACE;
569     Runtime::FunctionId f = Runtime::kAllocateInTargetSpace;
570     Operator::Properties props = node->op()->properties();
571     CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
572         jsgraph()->zone(), f, 2, props, CallDescriptor::kNeedsFrameState);
573     ExternalReference ref(f, jsgraph()->isolate());
574     int32_t flags = AllocateTargetSpace::encode(space);
575     node->InsertInput(graph()->zone(), 0, jsgraph()->CEntryStubConstant(1));
576     node->InsertInput(graph()->zone(), 2, jsgraph()->SmiConstant(flags));
577     node->InsertInput(graph()->zone(), 3, jsgraph()->ExternalConstant(ref));
578     node->InsertInput(graph()->zone(), 4, jsgraph()->Int32Constant(2));
579     node->InsertInput(graph()->zone(), 5, jsgraph()->NoContextConstant());
580     NodeProperties::ChangeOp(node, common()->Call(desc));
581   }
582   return Changed(node);
583 }
584 
585 
isolate() const586 Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); }
587 
588 
graph() const589 Graph* ChangeLowering::graph() const { return jsgraph()->graph(); }
590 
591 
common() const592 CommonOperatorBuilder* ChangeLowering::common() const {
593   return jsgraph()->common();
594 }
595 
596 
machine() const597 MachineOperatorBuilder* ChangeLowering::machine() const {
598   return jsgraph()->machine();
599 }
600 
601 }  // namespace compiler
602 }  // namespace internal
603 }  // namespace v8
604