1 // Copyright 2019 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/memory-lowering.h"
6
7 #include "src/codegen/interface-descriptors.h"
8 #include "src/common/external-pointer.h"
9 #include "src/compiler/js-graph.h"
10 #include "src/compiler/linkage.h"
11 #include "src/compiler/node-matchers.h"
12 #include "src/compiler/node-properties.h"
13 #include "src/compiler/node.h"
14 #include "src/compiler/simplified-operator.h"
15 #include "src/roots/roots-inl.h"
16
17 namespace v8 {
18 namespace internal {
19 namespace compiler {
20
21 // An allocation group represents a set of allocations that have been folded
22 // together.
23 class MemoryLowering::AllocationGroup final : public ZoneObject {
24 public:
25 AllocationGroup(Node* node, AllocationType allocation, Zone* zone);
26 AllocationGroup(Node* node, AllocationType allocation, Node* size,
27 Zone* zone);
28 ~AllocationGroup() = default;
29
30 void Add(Node* object);
31 bool Contains(Node* object) const;
IsYoungGenerationAllocation() const32 bool IsYoungGenerationAllocation() const {
33 return allocation() == AllocationType::kYoung;
34 }
35
allocation() const36 AllocationType allocation() const { return allocation_; }
size() const37 Node* size() const { return size_; }
38
39 private:
40 ZoneSet<NodeId> node_ids_;
41 AllocationType const allocation_;
42 Node* const size_;
43
44 DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup);
45 };
46
MemoryLowering(JSGraph * jsgraph,Zone * zone,JSGraphAssembler * graph_assembler,PoisoningMitigationLevel poisoning_level,AllocationFolding allocation_folding,WriteBarrierAssertFailedCallback callback,const char * function_debug_name)47 MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
48 JSGraphAssembler* graph_assembler,
49 PoisoningMitigationLevel poisoning_level,
50 AllocationFolding allocation_folding,
51 WriteBarrierAssertFailedCallback callback,
52 const char* function_debug_name)
53 : isolate_(jsgraph->isolate()),
54 zone_(zone),
55 graph_(jsgraph->graph()),
56 common_(jsgraph->common()),
57 machine_(jsgraph->machine()),
58 graph_assembler_(graph_assembler),
59 allocation_folding_(allocation_folding),
60 poisoning_level_(poisoning_level),
61 write_barrier_assert_failed_(callback),
62 function_debug_name_(function_debug_name) {}
63
graph_zone() const64 Zone* MemoryLowering::graph_zone() const { return graph()->zone(); }
65
Reduce(Node * node)66 Reduction MemoryLowering::Reduce(Node* node) {
67 switch (node->opcode()) {
68 case IrOpcode::kAllocate:
69 // Allocate nodes were purged from the graph in effect-control
70 // linearization.
71 UNREACHABLE();
72 case IrOpcode::kAllocateRaw:
73 return ReduceAllocateRaw(node);
74 case IrOpcode::kLoadFromObject:
75 return ReduceLoadFromObject(node);
76 case IrOpcode::kLoadElement:
77 return ReduceLoadElement(node);
78 case IrOpcode::kLoadField:
79 return ReduceLoadField(node);
80 case IrOpcode::kStoreToObject:
81 return ReduceStoreToObject(node);
82 case IrOpcode::kStoreElement:
83 return ReduceStoreElement(node);
84 case IrOpcode::kStoreField:
85 return ReduceStoreField(node);
86 case IrOpcode::kStore:
87 return ReduceStore(node);
88 default:
89 return NoChange();
90 }
91 }
92
93 #define __ gasm()->
94
ReduceAllocateRaw(Node * node,AllocationType allocation_type,AllowLargeObjects allow_large_objects,AllocationState const ** state_ptr)95 Reduction MemoryLowering::ReduceAllocateRaw(
96 Node* node, AllocationType allocation_type,
97 AllowLargeObjects allow_large_objects, AllocationState const** state_ptr) {
98 DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
99 DCHECK_IMPLIES(allocation_folding_ == AllocationFolding::kDoAllocationFolding,
100 state_ptr != nullptr);
101 // Code objects may have a maximum size smaller than kMaxHeapObjectSize due to
102 // guard pages. If we need to support allocating code here we would need to
103 // call MemoryChunkLayout::MaxRegularCodeObjectSize() at runtime.
104 DCHECK_NE(allocation_type, AllocationType::kCode);
105 Node* value;
106 Node* size = node->InputAt(0);
107 Node* effect = node->InputAt(1);
108 Node* control = node->InputAt(2);
109
110 gasm()->InitializeEffectControl(effect, control);
111
112 Node* allocate_builtin;
113 if (allocation_type == AllocationType::kYoung) {
114 if (allow_large_objects == AllowLargeObjects::kTrue) {
115 allocate_builtin = __ AllocateInYoungGenerationStubConstant();
116 } else {
117 allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
118 }
119 } else {
120 if (allow_large_objects == AllowLargeObjects::kTrue) {
121 allocate_builtin = __ AllocateInOldGenerationStubConstant();
122 } else {
123 allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
124 }
125 }
126
127 // Determine the top/limit addresses.
128 Node* top_address = __ ExternalConstant(
129 allocation_type == AllocationType::kYoung
130 ? ExternalReference::new_space_allocation_top_address(isolate())
131 : ExternalReference::old_space_allocation_top_address(isolate()));
132 Node* limit_address = __ ExternalConstant(
133 allocation_type == AllocationType::kYoung
134 ? ExternalReference::new_space_allocation_limit_address(isolate())
135 : ExternalReference::old_space_allocation_limit_address(isolate()));
136
137 // Check if we can fold this allocation into a previous allocation represented
138 // by the incoming {state}.
139 IntPtrMatcher m(size);
140 if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new &&
141 allocation_folding_ == AllocationFolding::kDoAllocationFolding) {
142 intptr_t const object_size = m.ResolvedValue();
143 AllocationState const* state = *state_ptr;
144 if (state->size() <= kMaxRegularHeapObjectSize - object_size &&
145 state->group()->allocation() == allocation_type) {
146 // We can fold this Allocate {node} into the allocation {group}
147 // represented by the given {state}. Compute the upper bound for
148 // the new {state}.
149 intptr_t const state_size = state->size() + object_size;
150
151 // Update the reservation check to the actual maximum upper bound.
152 AllocationGroup* const group = state->group();
153 if (machine()->Is64()) {
154 if (OpParameter<int64_t>(group->size()->op()) < state_size) {
155 NodeProperties::ChangeOp(group->size(),
156 common()->Int64Constant(state_size));
157 }
158 } else {
159 if (OpParameter<int32_t>(group->size()->op()) < state_size) {
160 NodeProperties::ChangeOp(
161 group->size(),
162 common()->Int32Constant(static_cast<int32_t>(state_size)));
163 }
164 }
165
166 // Update the allocation top with the new object allocation.
167 // TODO(bmeurer): Defer writing back top as much as possible.
168 Node* top = __ IntAdd(state->top(), size);
169 __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
170 kNoWriteBarrier),
171 top_address, __ IntPtrConstant(0), top);
172
173 // Compute the effective inner allocated address.
174 value = __ BitcastWordToTagged(
175 __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
176 effect = gasm()->effect();
177 control = gasm()->control();
178
179 // Extend the allocation {group}.
180 group->Add(value);
181 *state_ptr =
182 AllocationState::Open(group, state_size, top, effect, zone());
183 } else {
184 auto call_runtime = __ MakeDeferredLabel();
185 auto done = __ MakeLabel(MachineType::PointerRepresentation());
186
187 // Setup a mutable reservation size node; will be patched as we fold
188 // additional allocations into this new group.
189 Node* size = __ UniqueIntPtrConstant(object_size);
190
191 // Load allocation top and limit.
192 Node* top =
193 __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
194 Node* limit =
195 __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
196
197 // Check if we need to collect garbage before we can start bump pointer
198 // allocation (always done for folded allocations).
199 Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
200
201 __ GotoIfNot(check, &call_runtime);
202 __ Goto(&done, top);
203
204 __ Bind(&call_runtime);
205 {
206 if (!allocate_operator_.is_set()) {
207 auto descriptor = AllocateDescriptor{};
208 auto call_descriptor = Linkage::GetStubCallDescriptor(
209 graph_zone(), descriptor, descriptor.GetStackParameterCount(),
210 CallDescriptor::kCanUseRoots, Operator::kNoThrow);
211 allocate_operator_.set(common()->Call(call_descriptor));
212 }
213 Node* vfalse = __ BitcastTaggedToWord(
214 __ Call(allocate_operator_.get(), allocate_builtin, size));
215 vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
216 __ Goto(&done, vfalse);
217 }
218
219 __ Bind(&done);
220
221 // Compute the new top and write it back.
222 top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
223 __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
224 kNoWriteBarrier),
225 top_address, __ IntPtrConstant(0), top);
226
227 // Compute the initial object address.
228 value = __ BitcastWordToTagged(
229 __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
230 effect = gasm()->effect();
231 control = gasm()->control();
232
233 // Start a new allocation group.
234 AllocationGroup* group =
235 zone()->New<AllocationGroup>(value, allocation_type, size, zone());
236 *state_ptr =
237 AllocationState::Open(group, object_size, top, effect, zone());
238 }
239 } else {
240 auto call_runtime = __ MakeDeferredLabel();
241 auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
242
243 // Load allocation top and limit.
244 Node* top =
245 __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
246 Node* limit =
247 __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
248
249 // Compute the new top.
250 Node* new_top = __ IntAdd(top, size);
251
252 // Check if we can do bump pointer allocation here.
253 Node* check = __ UintLessThan(new_top, limit);
254 __ GotoIfNot(check, &call_runtime);
255 if (allow_large_objects == AllowLargeObjects::kTrue) {
256 __ GotoIfNot(
257 __ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)),
258 &call_runtime);
259 }
260 __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
261 kNoWriteBarrier),
262 top_address, __ IntPtrConstant(0), new_top);
263 __ Goto(&done, __ BitcastWordToTagged(
264 __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
265
266 __ Bind(&call_runtime);
267 if (!allocate_operator_.is_set()) {
268 auto descriptor = AllocateDescriptor{};
269 auto call_descriptor = Linkage::GetStubCallDescriptor(
270 graph_zone(), descriptor, descriptor.GetStackParameterCount(),
271 CallDescriptor::kCanUseRoots, Operator::kNoThrow);
272 allocate_operator_.set(common()->Call(call_descriptor));
273 }
274 __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
275
276 __ Bind(&done);
277 value = done.PhiAt(0);
278 effect = gasm()->effect();
279 control = gasm()->control();
280
281 if (state_ptr) {
282 // Create an unfoldable allocation group.
283 AllocationGroup* group =
284 zone()->New<AllocationGroup>(value, allocation_type, zone());
285 *state_ptr = AllocationState::Closed(group, effect, zone());
286 }
287 }
288
289 return Replace(value);
290 }
291
ReduceLoadFromObject(Node * node)292 Reduction MemoryLowering::ReduceLoadFromObject(Node* node) {
293 DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
294 ObjectAccess const& access = ObjectAccessOf(node->op());
295 NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
296 return Changed(node);
297 }
298
ReduceLoadElement(Node * node)299 Reduction MemoryLowering::ReduceLoadElement(Node* node) {
300 DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
301 ElementAccess const& access = ElementAccessOf(node->op());
302 Node* index = node->InputAt(1);
303 node->ReplaceInput(1, ComputeIndex(access, index));
304 MachineType type = access.machine_type;
305 if (NeedsPoisoning(access.load_sensitivity)) {
306 NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
307 } else {
308 NodeProperties::ChangeOp(node, machine()->Load(type));
309 }
310 return Changed(node);
311 }
312
DecodeExternalPointer(Node * node,ExternalPointerTag external_pointer_tag)313 Node* MemoryLowering::DecodeExternalPointer(
314 Node* node, ExternalPointerTag external_pointer_tag) {
315 #ifdef V8_HEAP_SANDBOX
316 DCHECK(V8_HEAP_SANDBOX_BOOL);
317 DCHECK(node->opcode() == IrOpcode::kLoad ||
318 node->opcode() == IrOpcode::kPoisonedLoad);
319 Node* effect = NodeProperties::GetEffectInput(node);
320 Node* control = NodeProperties::GetControlInput(node);
321 __ InitializeEffectControl(effect, control);
322
323 // Clone the load node and put it here.
324 // TODO(turbofan): consider adding GraphAssembler::Clone() suitable for
325 // cloning nodes from arbitrary locaions in effect/control chains.
326 Node* index = __ AddNode(graph()->CloneNode(node));
327
328 // Uncomment this to generate a breakpoint for debugging purposes.
329 // __ DebugBreak();
330
331 // Decode loaded external pointer.
332 STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
333 Node* external_pointer_table_address = __ ExternalConstant(
334 ExternalReference::external_pointer_table_address(isolate()));
335 Node* table = __ Load(MachineType::Pointer(), external_pointer_table_address,
336 Internals::kExternalPointerTableBufferOffset);
337 // TODO(v8:10391, saelo): bounds check if table is not caged
338 Node* offset = __ Int32Mul(index, __ Int32Constant(8));
339 Node* decoded_ptr =
340 __ Load(MachineType::Pointer(), table, __ ChangeUint32ToUint64(offset));
341 if (external_pointer_tag != 0) {
342 Node* tag = __ IntPtrConstant(external_pointer_tag);
343 decoded_ptr = __ WordXor(decoded_ptr, tag);
344 }
345 return decoded_ptr;
346 #else
347 return node;
348 #endif // V8_HEAP_SANDBOX
349 }
350
ReduceLoadField(Node * node)351 Reduction MemoryLowering::ReduceLoadField(Node* node) {
352 DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
353 FieldAccess const& access = FieldAccessOf(node->op());
354 Node* offset = __ IntPtrConstant(access.offset - access.tag());
355 node->InsertInput(graph_zone(), 1, offset);
356 MachineType type = access.machine_type;
357 if (V8_HEAP_SANDBOX_BOOL &&
358 access.type.Is(Type::SandboxedExternalPointer())) {
359 // External pointer table indices are 32bit numbers
360 type = MachineType::Uint32();
361 }
362 if (NeedsPoisoning(access.load_sensitivity)) {
363 NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
364 } else {
365 NodeProperties::ChangeOp(node, machine()->Load(type));
366 }
367 if (V8_HEAP_SANDBOX_BOOL &&
368 access.type.Is(Type::SandboxedExternalPointer())) {
369 #ifdef V8_HEAP_SANDBOX
370 ExternalPointerTag tag = access.external_pointer_tag;
371 #else
372 ExternalPointerTag tag = kExternalPointerNullTag;
373 #endif
374 node = DecodeExternalPointer(node, tag);
375 return Replace(node);
376 } else {
377 DCHECK(!access.type.Is(Type::SandboxedExternalPointer()));
378 }
379 return Changed(node);
380 }
381
ReduceStoreToObject(Node * node,AllocationState const * state)382 Reduction MemoryLowering::ReduceStoreToObject(Node* node,
383 AllocationState const* state) {
384 DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
385 ObjectAccess const& access = ObjectAccessOf(node->op());
386 Node* object = node->InputAt(0);
387 Node* value = node->InputAt(2);
388 WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
389 node, object, value, state, access.write_barrier_kind);
390 NodeProperties::ChangeOp(
391 node, machine()->Store(StoreRepresentation(
392 access.machine_type.representation(), write_barrier_kind)));
393 return Changed(node);
394 }
395
ReduceStoreElement(Node * node,AllocationState const * state)396 Reduction MemoryLowering::ReduceStoreElement(Node* node,
397 AllocationState const* state) {
398 DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
399 ElementAccess const& access = ElementAccessOf(node->op());
400 Node* object = node->InputAt(0);
401 Node* index = node->InputAt(1);
402 Node* value = node->InputAt(2);
403 node->ReplaceInput(1, ComputeIndex(access, index));
404 WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
405 node, object, value, state, access.write_barrier_kind);
406 NodeProperties::ChangeOp(
407 node, machine()->Store(StoreRepresentation(
408 access.machine_type.representation(), write_barrier_kind)));
409 return Changed(node);
410 }
411
ReduceStoreField(Node * node,AllocationState const * state)412 Reduction MemoryLowering::ReduceStoreField(Node* node,
413 AllocationState const* state) {
414 DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
415 FieldAccess const& access = FieldAccessOf(node->op());
416 // External pointer must never be stored by optimized code.
417 DCHECK_IMPLIES(V8_HEAP_SANDBOX_BOOL,
418 !access.type.Is(Type::ExternalPointer()) &&
419 !access.type.Is(Type::SandboxedExternalPointer()));
420 Node* object = node->InputAt(0);
421 Node* value = node->InputAt(1);
422 WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
423 node, object, value, state, access.write_barrier_kind);
424 Node* offset = __ IntPtrConstant(access.offset - access.tag());
425 node->InsertInput(graph_zone(), 1, offset);
426 NodeProperties::ChangeOp(
427 node, machine()->Store(StoreRepresentation(
428 access.machine_type.representation(), write_barrier_kind)));
429 return Changed(node);
430 }
431
ReduceStore(Node * node,AllocationState const * state)432 Reduction MemoryLowering::ReduceStore(Node* node,
433 AllocationState const* state) {
434 DCHECK_EQ(IrOpcode::kStore, node->opcode());
435 StoreRepresentation representation = StoreRepresentationOf(node->op());
436 Node* object = node->InputAt(0);
437 Node* value = node->InputAt(2);
438 WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
439 node, object, value, state, representation.write_barrier_kind());
440 if (write_barrier_kind != representation.write_barrier_kind()) {
441 NodeProperties::ChangeOp(
442 node, machine()->Store(StoreRepresentation(
443 representation.representation(), write_barrier_kind)));
444 return Changed(node);
445 }
446 return NoChange();
447 }
448
ComputeIndex(ElementAccess const & access,Node * index)449 Node* MemoryLowering::ComputeIndex(ElementAccess const& access, Node* index) {
450 int const element_size_shift =
451 ElementSizeLog2Of(access.machine_type.representation());
452 if (element_size_shift) {
453 index = __ WordShl(index, __ IntPtrConstant(element_size_shift));
454 }
455 int const fixed_offset = access.header_size - access.tag();
456 if (fixed_offset) {
457 index = __ IntAdd(index, __ IntPtrConstant(fixed_offset));
458 }
459 return index;
460 }
461
462 #undef __
463
464 namespace {
465
ValueNeedsWriteBarrier(Node * value,Isolate * isolate)466 bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) {
467 while (true) {
468 switch (value->opcode()) {
469 case IrOpcode::kBitcastWordToTaggedSigned:
470 return false;
471 case IrOpcode::kHeapConstant: {
472 RootIndex root_index;
473 if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()),
474 &root_index) &&
475 RootsTable::IsImmortalImmovable(root_index)) {
476 return false;
477 }
478 break;
479 }
480 default:
481 break;
482 }
483 return true;
484 }
485 }
486
487 } // namespace
488
ReduceAllocateRaw(Node * node)489 Reduction MemoryLowering::ReduceAllocateRaw(Node* node) {
490 DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
491 const AllocateParameters& allocation = AllocateParametersOf(node->op());
492 return ReduceAllocateRaw(node, allocation.allocation_type(),
493 allocation.allow_large_objects(), nullptr);
494 }
495
ComputeWriteBarrierKind(Node * node,Node * object,Node * value,AllocationState const * state,WriteBarrierKind write_barrier_kind)496 WriteBarrierKind MemoryLowering::ComputeWriteBarrierKind(
497 Node* node, Node* object, Node* value, AllocationState const* state,
498 WriteBarrierKind write_barrier_kind) {
499 if (state && state->IsYoungGenerationAllocation() &&
500 state->group()->Contains(object)) {
501 write_barrier_kind = kNoWriteBarrier;
502 }
503 if (!ValueNeedsWriteBarrier(value, isolate())) {
504 write_barrier_kind = kNoWriteBarrier;
505 }
506 if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) {
507 write_barrier_assert_failed_(node, object, function_debug_name_, zone());
508 }
509 return write_barrier_kind;
510 }
511
NeedsPoisoning(LoadSensitivity load_sensitivity) const512 bool MemoryLowering::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
513 // Safe loads do not need poisoning.
514 if (load_sensitivity == LoadSensitivity::kSafe) return false;
515
516 switch (poisoning_level_) {
517 case PoisoningMitigationLevel::kDontPoison:
518 return false;
519 case PoisoningMitigationLevel::kPoisonAll:
520 return true;
521 case PoisoningMitigationLevel::kPoisonCriticalOnly:
522 return load_sensitivity == LoadSensitivity::kCritical;
523 }
524 UNREACHABLE();
525 }
526
AllocationGroup(Node * node,AllocationType allocation,Zone * zone)527 MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
528 AllocationType allocation,
529 Zone* zone)
530 : node_ids_(zone), allocation_(allocation), size_(nullptr) {
531 node_ids_.insert(node->id());
532 }
533
AllocationGroup(Node * node,AllocationType allocation,Node * size,Zone * zone)534 MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
535 AllocationType allocation,
536 Node* size, Zone* zone)
537 : node_ids_(zone), allocation_(allocation), size_(size) {
538 node_ids_.insert(node->id());
539 }
540
Add(Node * node)541 void MemoryLowering::AllocationGroup::Add(Node* node) {
542 node_ids_.insert(node->id());
543 }
544
Contains(Node * node) const545 bool MemoryLowering::AllocationGroup::Contains(Node* node) const {
546 // Additions should stay within the same allocated object, so it's safe to
547 // ignore them.
548 while (node_ids_.find(node->id()) == node_ids_.end()) {
549 switch (node->opcode()) {
550 case IrOpcode::kBitcastTaggedToWord:
551 case IrOpcode::kBitcastWordToTagged:
552 case IrOpcode::kInt32Add:
553 case IrOpcode::kInt64Add:
554 node = NodeProperties::GetValueInput(node, 0);
555 break;
556 default:
557 return false;
558 }
559 }
560 return true;
561 }
562
AllocationState()563 MemoryLowering::AllocationState::AllocationState()
564 : group_(nullptr),
565 size_(std::numeric_limits<int>::max()),
566 top_(nullptr),
567 effect_(nullptr) {}
568
AllocationState(AllocationGroup * group,Node * effect)569 MemoryLowering::AllocationState::AllocationState(AllocationGroup* group,
570 Node* effect)
571 : group_(group),
572 size_(std::numeric_limits<int>::max()),
573 top_(nullptr),
574 effect_(effect) {}
575
AllocationState(AllocationGroup * group,intptr_t size,Node * top,Node * effect)576 MemoryLowering::AllocationState::AllocationState(AllocationGroup* group,
577 intptr_t size, Node* top,
578 Node* effect)
579 : group_(group), size_(size), top_(top), effect_(effect) {}
580
IsYoungGenerationAllocation() const581 bool MemoryLowering::AllocationState::IsYoungGenerationAllocation() const {
582 return group() && group()->IsYoungGenerationAllocation();
583 }
584
585 } // namespace compiler
586 } // namespace internal
587 } // namespace v8
588