1 // Copyright 2019 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/memory-lowering.h"
6
7 #include "src/codegen/interface-descriptors-inl.h"
8 #include "src/compiler/access-builder.h"
9 #include "src/compiler/js-graph.h"
10 #include "src/compiler/linkage.h"
11 #include "src/compiler/node-matchers.h"
12 #include "src/compiler/node-properties.h"
13 #include "src/compiler/node.h"
14 #include "src/compiler/simplified-operator.h"
15 #include "src/roots/roots-inl.h"
16 #include "src/sandbox/external-pointer.h"
17
18 #if V8_ENABLE_WEBASSEMBLY
19 #include "src/wasm/wasm-linkage.h"
20 #include "src/wasm/wasm-objects.h"
21 #endif
22 namespace v8 {
23 namespace internal {
24 namespace compiler {
25
26 // An allocation group represents a set of allocations that have been folded
27 // together.
28 class MemoryLowering::AllocationGroup final : public ZoneObject {
29 public:
30 AllocationGroup(Node* node, AllocationType allocation, Zone* zone);
31 AllocationGroup(Node* node, AllocationType allocation, Node* size,
32 Zone* zone);
33 ~AllocationGroup() = default;
34
35 void Add(Node* object);
36 bool Contains(Node* object) const;
IsYoungGenerationAllocation() const37 bool IsYoungGenerationAllocation() const {
38 return allocation() == AllocationType::kYoung;
39 }
40
allocation() const41 AllocationType allocation() const { return allocation_; }
size() const42 Node* size() const { return size_; }
43
44 private:
45 ZoneSet<NodeId> node_ids_;
46 AllocationType const allocation_;
47 Node* const size_;
48
CheckAllocationType(AllocationType allocation)49 static inline AllocationType CheckAllocationType(AllocationType allocation) {
50 // For non-generational heap, all young allocations are redirected to old
51 // space.
52 if (FLAG_single_generation && allocation == AllocationType::kYoung) {
53 return AllocationType::kOld;
54 }
55 return allocation;
56 }
57
58 DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup);
59 };
60
MemoryLowering(JSGraph * jsgraph,Zone * zone,JSGraphAssembler * graph_assembler,AllocationFolding allocation_folding,WriteBarrierAssertFailedCallback callback,const char * function_debug_name)61 MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
62 JSGraphAssembler* graph_assembler,
63 AllocationFolding allocation_folding,
64 WriteBarrierAssertFailedCallback callback,
65 const char* function_debug_name)
66 : isolate_(jsgraph->isolate()),
67 zone_(zone),
68 graph_(jsgraph->graph()),
69 common_(jsgraph->common()),
70 machine_(jsgraph->machine()),
71 graph_assembler_(graph_assembler),
72 allocation_folding_(allocation_folding),
73 write_barrier_assert_failed_(callback),
74 function_debug_name_(function_debug_name) {}
75
graph_zone() const76 Zone* MemoryLowering::graph_zone() const { return graph()->zone(); }
77
Reduce(Node * node)78 Reduction MemoryLowering::Reduce(Node* node) {
79 switch (node->opcode()) {
80 case IrOpcode::kAllocate:
81 // Allocate nodes were purged from the graph in effect-control
82 // linearization.
83 UNREACHABLE();
84 case IrOpcode::kAllocateRaw:
85 return ReduceAllocateRaw(node);
86 case IrOpcode::kLoadFromObject:
87 case IrOpcode::kLoadImmutableFromObject:
88 return ReduceLoadFromObject(node);
89 case IrOpcode::kLoadElement:
90 return ReduceLoadElement(node);
91 case IrOpcode::kLoadField:
92 return ReduceLoadField(node);
93 case IrOpcode::kStoreToObject:
94 case IrOpcode::kInitializeImmutableInObject:
95 return ReduceStoreToObject(node);
96 case IrOpcode::kStoreElement:
97 return ReduceStoreElement(node);
98 case IrOpcode::kStoreField:
99 return ReduceStoreField(node);
100 case IrOpcode::kStore:
101 return ReduceStore(node);
102 default:
103 return NoChange();
104 }
105 }
106
EnsureAllocateOperator()107 void MemoryLowering::EnsureAllocateOperator() {
108 if (allocate_operator_.is_set()) return;
109
110 auto descriptor = AllocateDescriptor{};
111 StubCallMode mode = isolate_ != nullptr ? StubCallMode::kCallCodeObject
112 : StubCallMode::kCallBuiltinPointer;
113 auto call_descriptor = Linkage::GetStubCallDescriptor(
114 graph_zone(), descriptor, descriptor.GetStackParameterCount(),
115 CallDescriptor::kCanUseRoots, Operator::kNoThrow, mode);
116 allocate_operator_.set(common()->Call(call_descriptor));
117 }
118
119 #if V8_ENABLE_WEBASSEMBLY
GetWasmInstanceNode()120 Node* MemoryLowering::GetWasmInstanceNode() {
121 if (wasm_instance_node_.is_set()) return wasm_instance_node_.get();
122 for (Node* use : graph()->start()->uses()) {
123 if (use->opcode() == IrOpcode::kParameter &&
124 ParameterIndexOf(use->op()) == wasm::kWasmInstanceParameterIndex) {
125 wasm_instance_node_.set(use);
126 return use;
127 }
128 }
129 UNREACHABLE(); // The instance node must have been created before.
130 }
131 #endif // V8_ENABLE_WEBASSEMBLY
132
133 #define __ gasm()->
134
ReduceAllocateRaw(Node * node,AllocationType allocation_type,AllowLargeObjects allow_large_objects,AllocationState const ** state_ptr)135 Reduction MemoryLowering::ReduceAllocateRaw(
136 Node* node, AllocationType allocation_type,
137 AllowLargeObjects allow_large_objects, AllocationState const** state_ptr) {
138 DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
139 DCHECK_IMPLIES(allocation_folding_ == AllocationFolding::kDoAllocationFolding,
140 state_ptr != nullptr);
141 if (FLAG_single_generation && allocation_type == AllocationType::kYoung) {
142 allocation_type = AllocationType::kOld;
143 }
144 // Code objects may have a maximum size smaller than kMaxHeapObjectSize due to
145 // guard pages. If we need to support allocating code here we would need to
146 // call MemoryChunkLayout::MaxRegularCodeObjectSize() at runtime.
147 DCHECK_NE(allocation_type, AllocationType::kCode);
148 Node* value;
149 Node* size = node->InputAt(0);
150 Node* effect = node->InputAt(1);
151 Node* control = node->InputAt(2);
152
153 gasm()->InitializeEffectControl(effect, control);
154
155 Node* allocate_builtin;
156 if (isolate_ != nullptr) {
157 if (allocation_type == AllocationType::kYoung) {
158 if (allow_large_objects == AllowLargeObjects::kTrue) {
159 allocate_builtin = __ AllocateInYoungGenerationStubConstant();
160 } else {
161 allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
162 }
163 } else {
164 if (allow_large_objects == AllowLargeObjects::kTrue) {
165 allocate_builtin = __ AllocateInOldGenerationStubConstant();
166 } else {
167 allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
168 }
169 }
170 } else {
171 // This lowering is used by Wasm, where we compile isolate-independent
172 // code. Builtin calls simply encode the target builtin ID, which will
173 // be patched to the builtin's address later.
174 #if V8_ENABLE_WEBASSEMBLY
175 Builtin builtin;
176 if (allocation_type == AllocationType::kYoung) {
177 if (allow_large_objects == AllowLargeObjects::kTrue) {
178 builtin = Builtin::kAllocateInYoungGeneration;
179 } else {
180 builtin = Builtin::kAllocateRegularInYoungGeneration;
181 }
182 } else {
183 if (allow_large_objects == AllowLargeObjects::kTrue) {
184 builtin = Builtin::kAllocateInOldGeneration;
185 } else {
186 builtin = Builtin::kAllocateRegularInOldGeneration;
187 }
188 }
189 static_assert(std::is_same<Smi, BuiltinPtr>(), "BuiltinPtr must be Smi");
190 allocate_builtin =
191 graph()->NewNode(common()->NumberConstant(static_cast<int>(builtin)));
192 #else
193 UNREACHABLE();
194 #endif
195 }
196
197 // Determine the top/limit addresses.
198 Node* top_address;
199 Node* limit_address;
200 if (isolate_ != nullptr) {
201 top_address = __ ExternalConstant(
202 allocation_type == AllocationType::kYoung
203 ? ExternalReference::new_space_allocation_top_address(isolate())
204 : ExternalReference::old_space_allocation_top_address(isolate()));
205 limit_address = __ ExternalConstant(
206 allocation_type == AllocationType::kYoung
207 ? ExternalReference::new_space_allocation_limit_address(isolate())
208 : ExternalReference::old_space_allocation_limit_address(isolate()));
209 } else {
210 // Wasm mode: producing isolate-independent code, loading the isolate
211 // address at runtime.
212 #if V8_ENABLE_WEBASSEMBLY
213 Node* instance_node = GetWasmInstanceNode();
214 int top_address_offset =
215 allocation_type == AllocationType::kYoung
216 ? WasmInstanceObject::kNewAllocationTopAddressOffset
217 : WasmInstanceObject::kOldAllocationTopAddressOffset;
218 int limit_address_offset =
219 allocation_type == AllocationType::kYoung
220 ? WasmInstanceObject::kNewAllocationLimitAddressOffset
221 : WasmInstanceObject::kOldAllocationLimitAddressOffset;
222 top_address =
223 __ Load(MachineType::Pointer(), instance_node,
224 __ IntPtrConstant(top_address_offset - kHeapObjectTag));
225 limit_address =
226 __ Load(MachineType::Pointer(), instance_node,
227 __ IntPtrConstant(limit_address_offset - kHeapObjectTag));
228 #else
229 UNREACHABLE();
230 #endif // V8_ENABLE_WEBASSEMBLY
231 }
232
233 // Check if we can fold this allocation into a previous allocation represented
234 // by the incoming {state}.
235 IntPtrMatcher m(size);
236 if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new &&
237 allocation_folding_ == AllocationFolding::kDoAllocationFolding) {
238 intptr_t const object_size = m.ResolvedValue();
239 AllocationState const* state = *state_ptr;
240 if (state->size() <= kMaxRegularHeapObjectSize - object_size &&
241 state->group()->allocation() == allocation_type) {
242 // We can fold this Allocate {node} into the allocation {group}
243 // represented by the given {state}. Compute the upper bound for
244 // the new {state}.
245 intptr_t const state_size = state->size() + object_size;
246
247 // Update the reservation check to the actual maximum upper bound.
248 AllocationGroup* const group = state->group();
249 if (machine()->Is64()) {
250 if (OpParameter<int64_t>(group->size()->op()) < state_size) {
251 NodeProperties::ChangeOp(group->size(),
252 common()->Int64Constant(state_size));
253 }
254 } else {
255 if (OpParameter<int32_t>(group->size()->op()) < state_size) {
256 NodeProperties::ChangeOp(
257 group->size(),
258 common()->Int32Constant(static_cast<int32_t>(state_size)));
259 }
260 }
261
262 // Update the allocation top with the new object allocation.
263 // TODO(bmeurer): Defer writing back top as much as possible.
264 Node* top = __ IntAdd(state->top(), size);
265 __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
266 kNoWriteBarrier),
267 top_address, __ IntPtrConstant(0), top);
268
269 // Compute the effective inner allocated address.
270 value = __ BitcastWordToTagged(
271 __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
272 effect = gasm()->effect();
273 control = gasm()->control();
274
275 // Extend the allocation {group}.
276 group->Add(value);
277 *state_ptr =
278 AllocationState::Open(group, state_size, top, effect, zone());
279 } else {
280 auto call_runtime = __ MakeDeferredLabel();
281 auto done = __ MakeLabel(MachineType::PointerRepresentation());
282
283 // Setup a mutable reservation size node; will be patched as we fold
284 // additional allocations into this new group.
285 Node* reservation_size = __ UniqueIntPtrConstant(object_size);
286
287 // Load allocation top and limit.
288 Node* top =
289 __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
290 Node* limit =
291 __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
292
293 // Check if we need to collect garbage before we can start bump pointer
294 // allocation (always done for folded allocations).
295 Node* check = __ UintLessThan(__ IntAdd(top, reservation_size), limit);
296
297 __ GotoIfNot(check, &call_runtime);
298 __ Goto(&done, top);
299
300 __ Bind(&call_runtime);
301 {
302 EnsureAllocateOperator();
303 Node* vfalse = __ BitcastTaggedToWord(__ Call(
304 allocate_operator_.get(), allocate_builtin, reservation_size));
305 vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
306 __ Goto(&done, vfalse);
307 }
308
309 __ Bind(&done);
310
311 // Compute the new top and write it back.
312 top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
313 __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
314 kNoWriteBarrier),
315 top_address, __ IntPtrConstant(0), top);
316
317 // Compute the initial object address.
318 value = __ BitcastWordToTagged(
319 __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
320 effect = gasm()->effect();
321 control = gasm()->control();
322
323 // Start a new allocation group.
324 AllocationGroup* group = zone()->New<AllocationGroup>(
325 value, allocation_type, reservation_size, zone());
326 *state_ptr =
327 AllocationState::Open(group, object_size, top, effect, zone());
328 }
329 } else {
330 auto call_runtime = __ MakeDeferredLabel();
331 auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
332
333 // Load allocation top and limit.
334 Node* top =
335 __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
336 Node* limit =
337 __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
338
339 // Compute the new top.
340 Node* new_top = __ IntAdd(top, size);
341
342 // Check if we can do bump pointer allocation here.
343 Node* check = __ UintLessThan(new_top, limit);
344 __ GotoIfNot(check, &call_runtime);
345 if (allow_large_objects == AllowLargeObjects::kTrue) {
346 __ GotoIfNot(
347 __ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)),
348 &call_runtime);
349 }
350 __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
351 kNoWriteBarrier),
352 top_address, __ IntPtrConstant(0), new_top);
353 __ Goto(&done, __ BitcastWordToTagged(
354 __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
355
356 __ Bind(&call_runtime);
357 EnsureAllocateOperator();
358 __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
359
360 __ Bind(&done);
361 value = done.PhiAt(0);
362 effect = gasm()->effect();
363 control = gasm()->control();
364
365 if (state_ptr) {
366 // Create an unfoldable allocation group.
367 AllocationGroup* group =
368 zone()->New<AllocationGroup>(value, allocation_type, zone());
369 *state_ptr = AllocationState::Closed(group, effect, zone());
370 }
371 }
372
373 return Replace(value);
374 }
375
ReduceLoadFromObject(Node * node)376 Reduction MemoryLowering::ReduceLoadFromObject(Node* node) {
377 DCHECK(node->opcode() == IrOpcode::kLoadFromObject ||
378 node->opcode() == IrOpcode::kLoadImmutableFromObject);
379 ObjectAccess const& access = ObjectAccessOf(node->op());
380
381 MachineType machine_type = access.machine_type;
382
383 if (machine_type.IsMapWord()) {
384 CHECK_EQ(machine_type.semantic(), MachineSemantic::kAny);
385 return ReduceLoadMap(node);
386 }
387
388 MachineRepresentation rep = machine_type.representation();
389 const Operator* load_op =
390 ElementSizeInBytes(rep) > kTaggedSize &&
391 !machine()->UnalignedLoadSupported(machine_type.representation())
392 ? machine()->UnalignedLoad(machine_type)
393 : machine()->Load(machine_type);
394 NodeProperties::ChangeOp(node, load_op);
395 return Changed(node);
396 }
397
ReduceLoadElement(Node * node)398 Reduction MemoryLowering::ReduceLoadElement(Node* node) {
399 DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
400 ElementAccess const& access = ElementAccessOf(node->op());
401 Node* index = node->InputAt(1);
402 node->ReplaceInput(1, ComputeIndex(access, index));
403 MachineType type = access.machine_type;
404 DCHECK(!type.IsMapWord());
405 NodeProperties::ChangeOp(node, machine()->Load(type));
406 return Changed(node);
407 }
408
DecodeExternalPointer(Node * node,ExternalPointerTag external_pointer_tag)409 Node* MemoryLowering::DecodeExternalPointer(
410 Node* node, ExternalPointerTag external_pointer_tag) {
411 #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
412 DCHECK(V8_SANDBOXED_EXTERNAL_POINTERS_BOOL);
413 DCHECK(node->opcode() == IrOpcode::kLoad);
414 DCHECK_EQ(kExternalPointerSize, kUInt32Size);
415 DCHECK_NE(kExternalPointerNullTag, external_pointer_tag);
416 Node* effect = NodeProperties::GetEffectInput(node);
417 Node* control = NodeProperties::GetControlInput(node);
418 __ InitializeEffectControl(effect, control);
419
420 // Clone the load node and put it here.
421 // TODO(turbofan): consider adding GraphAssembler::Clone() suitable for
422 // cloning nodes from arbitrary locaions in effect/control chains.
423 STATIC_ASSERT(kExternalPointerIndexShift > kSystemPointerSizeLog2);
424 Node* shifted_index = __ AddNode(graph()->CloneNode(node));
425 Node* shift_amount =
426 __ Int32Constant(kExternalPointerIndexShift - kSystemPointerSizeLog2);
427 Node* offset = __ Word32Shr(shifted_index, shift_amount);
428
429 // Uncomment this to generate a breakpoint for debugging purposes.
430 // __ DebugBreak();
431
432 // Decode loaded external pointer.
433 //
434 // Here we access the external pointer table through an ExternalReference.
435 // Alternatively, we could also hardcode the address of the table since it is
436 // never reallocated. However, in that case we must be able to guarantee that
437 // the generated code is never executed under a different Isolate, as that
438 // would allow access to external objects from different Isolates. It also
439 // would break if the code is serialized/deserialized at some point.
440 Node* table_address = __ ExternalConstant(
441 ExternalReference::external_pointer_table_address(isolate()));
442 Node* table = __ Load(MachineType::Pointer(), table_address,
443 Internals::kExternalPointerTableBufferOffset);
444 Node* decoded_ptr =
445 __ Load(MachineType::Pointer(), table, __ ChangeUint32ToUint64(offset));
446 Node* tag = __ IntPtrConstant(~external_pointer_tag);
447 decoded_ptr = __ WordAnd(decoded_ptr, tag);
448 return decoded_ptr;
449 #else
450 return node;
451 #endif // V8_SANDBOXED_EXTERNAL_POINTERS
452 }
453
ReduceLoadMap(Node * node)454 Reduction MemoryLowering::ReduceLoadMap(Node* node) {
455 #ifdef V8_MAP_PACKING
456 NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
457
458 Node* effect = NodeProperties::GetEffectInput(node);
459 Node* control = NodeProperties::GetControlInput(node);
460 __ InitializeEffectControl(effect, control);
461
462 node = __ AddNode(graph()->CloneNode(node));
463 return Replace(__ UnpackMapWord(node));
464 #else
465 NodeProperties::ChangeOp(node, machine()->Load(MachineType::TaggedPointer()));
466 return Changed(node);
467 #endif
468 }
469
ReduceLoadField(Node * node)470 Reduction MemoryLowering::ReduceLoadField(Node* node) {
471 DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
472 FieldAccess const& access = FieldAccessOf(node->op());
473 Node* offset = __ IntPtrConstant(access.offset - access.tag());
474 node->InsertInput(graph_zone(), 1, offset);
475 MachineType type = access.machine_type;
476 if (V8_SANDBOXED_EXTERNAL_POINTERS_BOOL &&
477 access.type.Is(Type::ExternalPointer())) {
478 // External pointer table indices are stored as 32-bit numbers
479 type = MachineType::Uint32();
480 }
481
482 if (type.IsMapWord()) {
483 DCHECK(!access.type.Is(Type::ExternalPointer()));
484 return ReduceLoadMap(node);
485 }
486
487 NodeProperties::ChangeOp(node, machine()->Load(type));
488
489 #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
490 if (access.type.Is(Type::ExternalPointer())) {
491 ExternalPointerTag tag = access.external_pointer_tag;
492 DCHECK_NE(kExternalPointerNullTag, tag);
493 node = DecodeExternalPointer(node, tag);
494 return Replace(node);
495 }
496 #endif
497
498 return Changed(node);
499 }
500
ReduceStoreToObject(Node * node,AllocationState const * state)501 Reduction MemoryLowering::ReduceStoreToObject(Node* node,
502 AllocationState const* state) {
503 DCHECK(node->opcode() == IrOpcode::kStoreToObject ||
504 node->opcode() == IrOpcode::kInitializeImmutableInObject);
505 ObjectAccess const& access = ObjectAccessOf(node->op());
506 Node* object = node->InputAt(0);
507 Node* value = node->InputAt(2);
508
509 WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
510 node, object, value, state, access.write_barrier_kind);
511 DCHECK(!access.machine_type.IsMapWord());
512 MachineRepresentation rep = access.machine_type.representation();
513 StoreRepresentation store_rep(rep, write_barrier_kind);
514 const Operator* store_op = ElementSizeInBytes(rep) > kTaggedSize &&
515 !machine()->UnalignedStoreSupported(rep)
516 ? machine()->UnalignedStore(rep)
517 : machine()->Store(store_rep);
518 NodeProperties::ChangeOp(node, store_op);
519 return Changed(node);
520 }
521
ReduceStoreElement(Node * node,AllocationState const * state)522 Reduction MemoryLowering::ReduceStoreElement(Node* node,
523 AllocationState const* state) {
524 DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
525 ElementAccess const& access = ElementAccessOf(node->op());
526 Node* object = node->InputAt(0);
527 Node* index = node->InputAt(1);
528 Node* value = node->InputAt(2);
529 node->ReplaceInput(1, ComputeIndex(access, index));
530 WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
531 node, object, value, state, access.write_barrier_kind);
532 NodeProperties::ChangeOp(
533 node, machine()->Store(StoreRepresentation(
534 access.machine_type.representation(), write_barrier_kind)));
535 return Changed(node);
536 }
537
ReduceStoreField(Node * node,AllocationState const * state)538 Reduction MemoryLowering::ReduceStoreField(Node* node,
539 AllocationState const* state) {
540 DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
541 FieldAccess const& access = FieldAccessOf(node->op());
542 // External pointer must never be stored by optimized code.
543 DCHECK_IMPLIES(V8_SANDBOXED_EXTERNAL_POINTERS_BOOL,
544 !access.type.Is(Type::ExternalPointer()));
545 // SandboxedPointers are not currently stored by optimized code.
546 DCHECK(!access.type.Is(Type::SandboxedPointer()));
547 MachineType machine_type = access.machine_type;
548 Node* object = node->InputAt(0);
549 Node* value = node->InputAt(1);
550
551 Node* effect = NodeProperties::GetEffectInput(node);
552 Node* control = NodeProperties::GetControlInput(node);
553 __ InitializeEffectControl(effect, control);
554
555 WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
556 node, object, value, state, access.write_barrier_kind);
557 Node* offset = __ IntPtrConstant(access.offset - access.tag());
558 node->InsertInput(graph_zone(), 1, offset);
559
560 if (machine_type.IsMapWord()) {
561 machine_type = MachineType::TaggedPointer();
562 #ifdef V8_MAP_PACKING
563 Node* mapword = __ PackMapWord(TNode<Map>::UncheckedCast(value));
564 node->ReplaceInput(2, mapword);
565 #endif
566 }
567 NodeProperties::ChangeOp(
568 node, machine()->Store(StoreRepresentation(machine_type.representation(),
569 write_barrier_kind)));
570 return Changed(node);
571 }
572
ReduceStore(Node * node,AllocationState const * state)573 Reduction MemoryLowering::ReduceStore(Node* node,
574 AllocationState const* state) {
575 DCHECK_EQ(IrOpcode::kStore, node->opcode());
576 StoreRepresentation representation = StoreRepresentationOf(node->op());
577 Node* object = node->InputAt(0);
578 Node* value = node->InputAt(2);
579 WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
580 node, object, value, state, representation.write_barrier_kind());
581 if (write_barrier_kind != representation.write_barrier_kind()) {
582 NodeProperties::ChangeOp(
583 node, machine()->Store(StoreRepresentation(
584 representation.representation(), write_barrier_kind)));
585 return Changed(node);
586 }
587 return NoChange();
588 }
589
ComputeIndex(ElementAccess const & access,Node * index)590 Node* MemoryLowering::ComputeIndex(ElementAccess const& access, Node* index) {
591 int const element_size_shift =
592 ElementSizeLog2Of(access.machine_type.representation());
593 if (element_size_shift) {
594 index = __ WordShl(index, __ IntPtrConstant(element_size_shift));
595 }
596 int const fixed_offset = access.header_size - access.tag();
597 if (fixed_offset) {
598 index = __ IntAdd(index, __ IntPtrConstant(fixed_offset));
599 }
600 return index;
601 }
602
603 #undef __
604
605 namespace {
606
ValueNeedsWriteBarrier(Node * value,Isolate * isolate)607 bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) {
608 while (true) {
609 switch (value->opcode()) {
610 case IrOpcode::kBitcastWordToTaggedSigned:
611 return false;
612 case IrOpcode::kHeapConstant: {
613 RootIndex root_index;
614 if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()),
615 &root_index) &&
616 RootsTable::IsImmortalImmovable(root_index)) {
617 return false;
618 }
619 break;
620 }
621 default:
622 break;
623 }
624 return true;
625 }
626 }
627
628 } // namespace
629
ReduceAllocateRaw(Node * node)630 Reduction MemoryLowering::ReduceAllocateRaw(Node* node) {
631 DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
632 const AllocateParameters& allocation = AllocateParametersOf(node->op());
633 return ReduceAllocateRaw(node, allocation.allocation_type(),
634 allocation.allow_large_objects(), nullptr);
635 }
636
ComputeWriteBarrierKind(Node * node,Node * object,Node * value,AllocationState const * state,WriteBarrierKind write_barrier_kind)637 WriteBarrierKind MemoryLowering::ComputeWriteBarrierKind(
638 Node* node, Node* object, Node* value, AllocationState const* state,
639 WriteBarrierKind write_barrier_kind) {
640 if (state && state->IsYoungGenerationAllocation() &&
641 state->group()->Contains(object)) {
642 write_barrier_kind = kNoWriteBarrier;
643 }
644 if (!ValueNeedsWriteBarrier(value, isolate())) {
645 write_barrier_kind = kNoWriteBarrier;
646 }
647 if (FLAG_disable_write_barriers) {
648 write_barrier_kind = kNoWriteBarrier;
649 }
650 if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) {
651 write_barrier_assert_failed_(node, object, function_debug_name_, zone());
652 }
653 return write_barrier_kind;
654 }
655
AllocationGroup(Node * node,AllocationType allocation,Zone * zone)656 MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
657 AllocationType allocation,
658 Zone* zone)
659 : node_ids_(zone),
660 allocation_(CheckAllocationType(allocation)),
661 size_(nullptr) {
662 node_ids_.insert(node->id());
663 }
664
AllocationGroup(Node * node,AllocationType allocation,Node * size,Zone * zone)665 MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
666 AllocationType allocation,
667 Node* size, Zone* zone)
668 : node_ids_(zone),
669 allocation_(CheckAllocationType(allocation)),
670 size_(size) {
671 node_ids_.insert(node->id());
672 }
673
Add(Node * node)674 void MemoryLowering::AllocationGroup::Add(Node* node) {
675 node_ids_.insert(node->id());
676 }
677
Contains(Node * node) const678 bool MemoryLowering::AllocationGroup::Contains(Node* node) const {
679 // Additions should stay within the same allocated object, so it's safe to
680 // ignore them.
681 while (node_ids_.find(node->id()) == node_ids_.end()) {
682 switch (node->opcode()) {
683 case IrOpcode::kBitcastTaggedToWord:
684 case IrOpcode::kBitcastWordToTagged:
685 case IrOpcode::kInt32Add:
686 case IrOpcode::kInt64Add:
687 node = NodeProperties::GetValueInput(node, 0);
688 break;
689 default:
690 return false;
691 }
692 }
693 return true;
694 }
695
AllocationState()696 MemoryLowering::AllocationState::AllocationState()
697 : group_(nullptr),
698 size_(std::numeric_limits<int>::max()),
699 top_(nullptr),
700 effect_(nullptr) {}
701
AllocationState(AllocationGroup * group,Node * effect)702 MemoryLowering::AllocationState::AllocationState(AllocationGroup* group,
703 Node* effect)
704 : group_(group),
705 size_(std::numeric_limits<int>::max()),
706 top_(nullptr),
707 effect_(effect) {}
708
AllocationState(AllocationGroup * group,intptr_t size,Node * top,Node * effect)709 MemoryLowering::AllocationState::AllocationState(AllocationGroup* group,
710 intptr_t size, Node* top,
711 Node* effect)
712 : group_(group), size_(size), top_(top), effect_(effect) {}
713
IsYoungGenerationAllocation() const714 bool MemoryLowering::AllocationState::IsYoungGenerationAllocation() const {
715 return group() && group()->IsYoungGenerationAllocation();
716 }
717
718 } // namespace compiler
719 } // namespace internal
720 } // namespace v8
721