• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/memory-optimizer.h"
6 
7 #include "src/compiler/js-graph.h"
8 #include "src/compiler/linkage.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties.h"
11 #include "src/compiler/node.h"
12 #include "src/compiler/simplified-operator.h"
13 
14 namespace v8 {
15 namespace internal {
16 namespace compiler {
17 
MemoryOptimizer(JSGraph * jsgraph,Zone * zone,PoisoningMitigationLevel poisoning_level,AllocationFolding allocation_folding)18 MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
19                                  PoisoningMitigationLevel poisoning_level,
20                                  AllocationFolding allocation_folding)
21     : jsgraph_(jsgraph),
22       empty_state_(AllocationState::Empty(zone)),
23       pending_(zone),
24       tokens_(zone),
25       zone_(zone),
26       graph_assembler_(jsgraph, nullptr, nullptr, zone),
27       poisoning_level_(poisoning_level),
28       allocation_folding_(allocation_folding) {}
29 
Optimize()30 void MemoryOptimizer::Optimize() {
31   EnqueueUses(graph()->start(), empty_state());
32   while (!tokens_.empty()) {
33     Token const token = tokens_.front();
34     tokens_.pop();
35     VisitNode(token.node, token.state);
36   }
37   DCHECK(pending_.empty());
38   DCHECK(tokens_.empty());
39 }
40 
AllocationGroup(Node * node,PretenureFlag pretenure,Zone * zone)41 MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
42                                                   PretenureFlag pretenure,
43                                                   Zone* zone)
44     : node_ids_(zone), pretenure_(pretenure), size_(nullptr) {
45   node_ids_.insert(node->id());
46 }
47 
AllocationGroup(Node * node,PretenureFlag pretenure,Node * size,Zone * zone)48 MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
49                                                   PretenureFlag pretenure,
50                                                   Node* size, Zone* zone)
51     : node_ids_(zone), pretenure_(pretenure), size_(size) {
52   node_ids_.insert(node->id());
53 }
54 
Add(Node * node)55 void MemoryOptimizer::AllocationGroup::Add(Node* node) {
56   node_ids_.insert(node->id());
57 }
58 
Contains(Node * node) const59 bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
60   return node_ids_.find(node->id()) != node_ids_.end();
61 }
62 
AllocationState()63 MemoryOptimizer::AllocationState::AllocationState()
64     : group_(nullptr), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
65 
AllocationState(AllocationGroup * group)66 MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
67     : group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
68 
AllocationState(AllocationGroup * group,int size,Node * top)69 MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
70                                                   int size, Node* top)
71     : group_(group), size_(size), top_(top) {}
72 
IsNewSpaceAllocation() const73 bool MemoryOptimizer::AllocationState::IsNewSpaceAllocation() const {
74   return group() && group()->IsNewSpaceAllocation();
75 }
76 
VisitNode(Node * node,AllocationState const * state)77 void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
78   DCHECK(!node->IsDead());
79   DCHECK_LT(0, node->op()->EffectInputCount());
80   switch (node->opcode()) {
81     case IrOpcode::kAllocate:
82       // Allocate nodes were purged from the graph in effect-control
83       // linearization.
84       UNREACHABLE();
85     case IrOpcode::kAllocateRaw:
86       return VisitAllocateRaw(node, state);
87     case IrOpcode::kCall:
88       return VisitCall(node, state);
89     case IrOpcode::kCallWithCallerSavedRegisters:
90       return VisitCallWithCallerSavedRegisters(node, state);
91     case IrOpcode::kLoadElement:
92       return VisitLoadElement(node, state);
93     case IrOpcode::kLoadField:
94       return VisitLoadField(node, state);
95     case IrOpcode::kStoreElement:
96       return VisitStoreElement(node, state);
97     case IrOpcode::kStoreField:
98       return VisitStoreField(node, state);
99     case IrOpcode::kDeoptimizeIf:
100     case IrOpcode::kDeoptimizeUnless:
101     case IrOpcode::kIfException:
102     case IrOpcode::kLoad:
103     case IrOpcode::kProtectedLoad:
104     case IrOpcode::kUnalignedLoad:
105     case IrOpcode::kStore:
106     case IrOpcode::kProtectedStore:
107     case IrOpcode::kUnalignedStore:
108     case IrOpcode::kRetain:
109     case IrOpcode::kUnsafePointerAdd:
110     case IrOpcode::kDebugBreak:
111     case IrOpcode::kUnreachable:
112     case IrOpcode::kWord32PoisonOnSpeculation:
113     case IrOpcode::kWord64PoisonOnSpeculation:
114       return VisitOtherEffect(node, state);
115     default:
116       break;
117   }
118   DCHECK_EQ(0, node->op()->EffectOutputCount());
119 }
120 
121 #define __ gasm()->
122 
VisitAllocateRaw(Node * node,AllocationState const * state)123 void MemoryOptimizer::VisitAllocateRaw(Node* node,
124                                        AllocationState const* state) {
125   DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
126   Node* value;
127   Node* size = node->InputAt(0);
128   Node* effect = node->InputAt(1);
129   Node* control = node->InputAt(2);
130 
131   gasm()->Reset(effect, control);
132 
133   PretenureFlag pretenure = PretenureFlagOf(node->op());
134 
135   // Propagate tenuring from outer allocations to inner allocations, i.e.
136   // when we allocate an object in old space and store a newly allocated
137   // child object into the pretenured object, then the newly allocated
138   // child object also should get pretenured to old space.
139   if (pretenure == TENURED) {
140     for (Edge const edge : node->use_edges()) {
141       Node* const user = edge.from();
142       if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
143         Node* const child = user->InputAt(1);
144         if (child->opcode() == IrOpcode::kAllocateRaw &&
145             PretenureFlagOf(child->op()) == NOT_TENURED) {
146           NodeProperties::ChangeOp(child, node->op());
147           break;
148         }
149       }
150     }
151   } else {
152     DCHECK_EQ(NOT_TENURED, pretenure);
153     for (Edge const edge : node->use_edges()) {
154       Node* const user = edge.from();
155       if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
156         Node* const parent = user->InputAt(0);
157         if (parent->opcode() == IrOpcode::kAllocateRaw &&
158             PretenureFlagOf(parent->op()) == TENURED) {
159           pretenure = TENURED;
160           break;
161         }
162       }
163     }
164   }
165 
166   // Determine the top/limit addresses.
167   Node* top_address = __ ExternalConstant(
168       pretenure == NOT_TENURED
169           ? ExternalReference::new_space_allocation_top_address(isolate())
170           : ExternalReference::old_space_allocation_top_address(isolate()));
171   Node* limit_address = __ ExternalConstant(
172       pretenure == NOT_TENURED
173           ? ExternalReference::new_space_allocation_limit_address(isolate())
174           : ExternalReference::old_space_allocation_limit_address(isolate()));
175 
176   // Check if we can fold this allocation into a previous allocation represented
177   // by the incoming {state}.
178   Int32Matcher m(size);
179   if (m.HasValue() && m.Value() < kMaxRegularHeapObjectSize) {
180     int32_t const object_size = m.Value();
181     if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
182         state->size() <= kMaxRegularHeapObjectSize - object_size &&
183         state->group()->pretenure() == pretenure) {
184       // We can fold this Allocate {node} into the allocation {group}
185       // represented by the given {state}. Compute the upper bound for
186       // the new {state}.
187       int32_t const state_size = state->size() + object_size;
188 
189       // Update the reservation check to the actual maximum upper bound.
190       AllocationGroup* const group = state->group();
191       if (OpParameter<int32_t>(group->size()->op()) < state_size) {
192         NodeProperties::ChangeOp(group->size(),
193                                  common()->Int32Constant(state_size));
194       }
195 
196       // Update the allocation top with the new object allocation.
197       // TODO(bmeurer): Defer writing back top as much as possible.
198       Node* top = __ IntAdd(state->top(), __ IntPtrConstant(object_size));
199       __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
200                                    kNoWriteBarrier),
201                top_address, __ IntPtrConstant(0), top);
202 
203       // Compute the effective inner allocated address.
204       value = __ BitcastWordToTagged(
205           __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
206 
207       // Extend the allocation {group}.
208       group->Add(value);
209       state = AllocationState::Open(group, state_size, top, zone());
210     } else {
211       auto call_runtime = __ MakeDeferredLabel();
212       auto done = __ MakeLabel(MachineType::PointerRepresentation());
213 
214       // Setup a mutable reservation size node; will be patched as we fold
215       // additional allocations into this new group.
216       Node* size = __ UniqueInt32Constant(object_size);
217 
218       // Load allocation top and limit.
219       Node* top =
220           __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
221       Node* limit =
222           __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
223 
224       // Check if we need to collect garbage before we can start bump pointer
225       // allocation (always done for folded allocations).
226       Node* check = __ UintLessThan(
227           __ IntAdd(top,
228                     machine()->Is64() ? __ ChangeInt32ToInt64(size) : size),
229           limit);
230 
231       __ GotoIfNot(check, &call_runtime);
232       __ Goto(&done, top);
233 
234       __ Bind(&call_runtime);
235       {
236         Node* target =
237             pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
238                                      : __
239                                        AllocateInOldSpaceStubConstant();
240         if (!allocate_operator_.is_set()) {
241           auto call_descriptor = Linkage::GetStubCallDescriptor(
242               graph()->zone(), AllocateDescriptor{}, 0,
243               CallDescriptor::kCanUseRoots, Operator::kNoThrow);
244           allocate_operator_.set(common()->Call(call_descriptor));
245         }
246         Node* vfalse = __ Call(allocate_operator_.get(), target, size);
247         vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
248         __ Goto(&done, vfalse);
249       }
250 
251       __ Bind(&done);
252 
253       // Compute the new top and write it back.
254       top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
255       __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
256                                    kNoWriteBarrier),
257                top_address, __ IntPtrConstant(0), top);
258 
259       // Compute the initial object address.
260       value = __ BitcastWordToTagged(
261           __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
262 
263       // Start a new allocation group.
264       AllocationGroup* group =
265           new (zone()) AllocationGroup(value, pretenure, size, zone());
266       state = AllocationState::Open(group, object_size, top, zone());
267     }
268   } else {
269     auto call_runtime = __ MakeDeferredLabel();
270     auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
271 
272     // Load allocation top and limit.
273     Node* top =
274         __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
275     Node* limit =
276         __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
277 
278     // Compute the new top.
279     Node* new_top =
280         __ IntAdd(top, machine()->Is64() ? __ ChangeInt32ToInt64(size) : size);
281 
282     // Check if we can do bump pointer allocation here.
283     Node* check = __ UintLessThan(new_top, limit);
284     __ GotoIfNot(check, &call_runtime);
285     __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
286                                  kNoWriteBarrier),
287              top_address, __ IntPtrConstant(0), new_top);
288     __ Goto(&done, __ BitcastWordToTagged(
289                        __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
290 
291     __ Bind(&call_runtime);
292     Node* target =
293         pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
294                                  : __
295                                    AllocateInOldSpaceStubConstant();
296     if (!allocate_operator_.is_set()) {
297       auto call_descriptor = Linkage::GetStubCallDescriptor(
298           graph()->zone(), AllocateDescriptor{}, 0,
299           CallDescriptor::kCanUseRoots, Operator::kNoThrow);
300       allocate_operator_.set(common()->Call(call_descriptor));
301     }
302     __ Goto(&done, __ Call(allocate_operator_.get(), target, size));
303 
304     __ Bind(&done);
305     value = done.PhiAt(0);
306 
307     // Create an unfoldable allocation group.
308     AllocationGroup* group =
309         new (zone()) AllocationGroup(value, pretenure, zone());
310     state = AllocationState::Closed(group, zone());
311   }
312 
313   effect = __ ExtractCurrentEffect();
314   control = __ ExtractCurrentControl();
315 
316   // Replace all effect uses of {node} with the {effect}, enqueue the
317   // effect uses for further processing, and replace all value uses of
318   // {node} with the {value}.
319   for (Edge edge : node->use_edges()) {
320     if (NodeProperties::IsEffectEdge(edge)) {
321       EnqueueUse(edge.from(), edge.index(), state);
322       edge.UpdateTo(effect);
323     } else if (NodeProperties::IsValueEdge(edge)) {
324       edge.UpdateTo(value);
325     } else {
326       DCHECK(NodeProperties::IsControlEdge(edge));
327       edge.UpdateTo(control);
328     }
329   }
330 
331   // Kill the {node} to make sure we don't leave dangling dead uses.
332   node->Kill();
333 }
334 
335 #undef __
336 
VisitCall(Node * node,AllocationState const * state)337 void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
338   DCHECK_EQ(IrOpcode::kCall, node->opcode());
339   // If the call can allocate, we start with a fresh state.
340   if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
341     state = empty_state();
342   }
343   EnqueueUses(node, state);
344 }
345 
VisitCallWithCallerSavedRegisters(Node * node,AllocationState const * state)346 void MemoryOptimizer::VisitCallWithCallerSavedRegisters(
347     Node* node, AllocationState const* state) {
348   DCHECK_EQ(IrOpcode::kCallWithCallerSavedRegisters, node->opcode());
349   // If the call can allocate, we start with a fresh state.
350   if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
351     state = empty_state();
352   }
353   EnqueueUses(node, state);
354 }
355 
VisitLoadElement(Node * node,AllocationState const * state)356 void MemoryOptimizer::VisitLoadElement(Node* node,
357                                        AllocationState const* state) {
358   DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
359   ElementAccess const& access = ElementAccessOf(node->op());
360   Node* index = node->InputAt(1);
361   node->ReplaceInput(1, ComputeIndex(access, index));
362   if (NeedsPoisoning(access.load_sensitivity) &&
363       access.machine_type.representation() !=
364           MachineRepresentation::kTaggedPointer) {
365     NodeProperties::ChangeOp(node,
366                              machine()->PoisonedLoad(access.machine_type));
367   } else {
368     NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
369   }
370   EnqueueUses(node, state);
371 }
372 
VisitLoadField(Node * node,AllocationState const * state)373 void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
374   DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
375   FieldAccess const& access = FieldAccessOf(node->op());
376   Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
377   node->InsertInput(graph()->zone(), 1, offset);
378   if (NeedsPoisoning(access.load_sensitivity) &&
379       access.machine_type.representation() !=
380           MachineRepresentation::kTaggedPointer) {
381     NodeProperties::ChangeOp(node,
382                              machine()->PoisonedLoad(access.machine_type));
383   } else {
384     NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
385   }
386   EnqueueUses(node, state);
387 }
388 
VisitStoreElement(Node * node,AllocationState const * state)389 void MemoryOptimizer::VisitStoreElement(Node* node,
390                                         AllocationState const* state) {
391   DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
392   ElementAccess const& access = ElementAccessOf(node->op());
393   Node* object = node->InputAt(0);
394   Node* index = node->InputAt(1);
395   WriteBarrierKind write_barrier_kind =
396       ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
397   node->ReplaceInput(1, ComputeIndex(access, index));
398   NodeProperties::ChangeOp(
399       node, machine()->Store(StoreRepresentation(
400                 access.machine_type.representation(), write_barrier_kind)));
401   EnqueueUses(node, state);
402 }
403 
VisitStoreField(Node * node,AllocationState const * state)404 void MemoryOptimizer::VisitStoreField(Node* node,
405                                       AllocationState const* state) {
406   DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
407   FieldAccess const& access = FieldAccessOf(node->op());
408   Node* object = node->InputAt(0);
409   WriteBarrierKind write_barrier_kind =
410       ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
411   Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
412   node->InsertInput(graph()->zone(), 1, offset);
413   NodeProperties::ChangeOp(
414       node, machine()->Store(StoreRepresentation(
415                 access.machine_type.representation(), write_barrier_kind)));
416   EnqueueUses(node, state);
417 }
418 
VisitOtherEffect(Node * node,AllocationState const * state)419 void MemoryOptimizer::VisitOtherEffect(Node* node,
420                                        AllocationState const* state) {
421   EnqueueUses(node, state);
422 }
423 
ComputeIndex(ElementAccess const & access,Node * key)424 Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* key) {
425   Node* index;
426   if (machine()->Is64()) {
427     // On 64-bit platforms, we need to feed a Word64 index to the Load and
428     // Store operators. Since LoadElement or StoreElement don't do any bounds
429     // checking themselves, we can be sure that the {key} was already checked
430     // and is in valid range, so we can do the further address computation on
431     // Word64 below, which ideally allows us to fuse the address computation
432     // with the actual memory access operation on Intel platforms.
433     index = graph()->NewNode(machine()->ChangeUint32ToUint64(), key);
434   } else {
435     index = key;
436   }
437   int const element_size_shift =
438       ElementSizeLog2Of(access.machine_type.representation());
439   if (element_size_shift) {
440     index = graph()->NewNode(machine()->WordShl(), index,
441                              jsgraph()->IntPtrConstant(element_size_shift));
442   }
443   int const fixed_offset = access.header_size - access.tag();
444   if (fixed_offset) {
445     index = graph()->NewNode(machine()->IntAdd(), index,
446                              jsgraph()->IntPtrConstant(fixed_offset));
447   }
448   return index;
449 }
450 
ComputeWriteBarrierKind(Node * object,AllocationState const * state,WriteBarrierKind write_barrier_kind)451 WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
452     Node* object, AllocationState const* state,
453     WriteBarrierKind write_barrier_kind) {
454   if (state->IsNewSpaceAllocation() && state->group()->Contains(object)) {
455     write_barrier_kind = kNoWriteBarrier;
456   }
457   return write_barrier_kind;
458 }
459 
MergeStates(AllocationStates const & states)460 MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
461     AllocationStates const& states) {
462   // Check if all states are the same; or at least if all allocation
463   // states belong to the same allocation group.
464   AllocationState const* state = states.front();
465   AllocationGroup* group = state->group();
466   for (size_t i = 1; i < states.size(); ++i) {
467     if (states[i] != state) state = nullptr;
468     if (states[i]->group() != group) group = nullptr;
469   }
470   if (state == nullptr) {
471     if (group != nullptr) {
472       // We cannot fold any more allocations into this group, but we can still
473       // eliminate write barriers on stores to this group.
474       // TODO(bmeurer): We could potentially just create a Phi here to merge
475       // the various tops; but we need to pay special attention not to create
476       // an unschedulable graph.
477       state = AllocationState::Closed(group, zone());
478     } else {
479       // The states are from different allocation groups.
480       state = empty_state();
481     }
482   }
483   return state;
484 }
485 
EnqueueMerge(Node * node,int index,AllocationState const * state)486 void MemoryOptimizer::EnqueueMerge(Node* node, int index,
487                                    AllocationState const* state) {
488   DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
489   int const input_count = node->InputCount() - 1;
490   DCHECK_LT(0, input_count);
491   Node* const control = node->InputAt(input_count);
492   if (control->opcode() == IrOpcode::kLoop) {
493     // For loops we always start with an empty state at the beginning.
494     if (index == 0) EnqueueUses(node, empty_state());
495   } else {
496     DCHECK_EQ(IrOpcode::kMerge, control->opcode());
497     // Check if we already know about this pending merge.
498     NodeId const id = node->id();
499     auto it = pending_.find(id);
500     if (it == pending_.end()) {
501       // Insert a new pending merge.
502       it = pending_.insert(std::make_pair(id, AllocationStates(zone()))).first;
503     }
504     // Add the next input state.
505     it->second.push_back(state);
506     // Check if states for all inputs are available by now.
507     if (it->second.size() == static_cast<size_t>(input_count)) {
508       // All inputs to this effect merge are done, merge the states given all
509       // input constraints, drop the pending merge and enqueue uses of the
510       // EffectPhi {node}.
511       state = MergeStates(it->second);
512       EnqueueUses(node, state);
513       pending_.erase(it);
514     }
515   }
516 }
517 
EnqueueUses(Node * node,AllocationState const * state)518 void MemoryOptimizer::EnqueueUses(Node* node, AllocationState const* state) {
519   for (Edge const edge : node->use_edges()) {
520     if (NodeProperties::IsEffectEdge(edge)) {
521       EnqueueUse(edge.from(), edge.index(), state);
522     }
523   }
524 }
525 
EnqueueUse(Node * node,int index,AllocationState const * state)526 void MemoryOptimizer::EnqueueUse(Node* node, int index,
527                                  AllocationState const* state) {
528   if (node->opcode() == IrOpcode::kEffectPhi) {
529     // An EffectPhi represents a merge of different effect chains, which
530     // needs special handling depending on whether the merge is part of a
531     // loop or just a normal control join.
532     EnqueueMerge(node, index, state);
533   } else {
534     Token token = {node, state};
535     tokens_.push(token);
536   }
537 }
538 
graph() const539 Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); }
540 
isolate() const541 Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); }
542 
common() const543 CommonOperatorBuilder* MemoryOptimizer::common() const {
544   return jsgraph()->common();
545 }
546 
machine() const547 MachineOperatorBuilder* MemoryOptimizer::machine() const {
548   return jsgraph()->machine();
549 }
550 
NeedsPoisoning(LoadSensitivity load_sensitivity) const551 bool MemoryOptimizer::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
552   // Safe loads do not need poisoning.
553   if (load_sensitivity == LoadSensitivity::kSafe) return false;
554 
555   switch (poisoning_level_) {
556     case PoisoningMitigationLevel::kDontPoison:
557       return false;
558     case PoisoningMitigationLevel::kPoisonAll:
559       return true;
560     case PoisoningMitigationLevel::kPoisonCriticalOnly:
561       return load_sensitivity == LoadSensitivity::kCritical;
562   }
563   UNREACHABLE();
564 }
565 
566 }  // namespace compiler
567 }  // namespace internal
568 }  // namespace v8
569