• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1diff --git a/v8/BUILD.gn b/v8/BUILD.gn
2index 159ea937fa..7aa9c61544 100644
3--- a/v8/BUILD.gn
4+++ b/v8/BUILD.gn
5@@ -210,7 +210,7 @@ declare_args() {
6   # site-isolation in Chrome and on simulator builds which test code generation
7   # on these platforms.
8   v8_untrusted_code_mitigations =
9-      v8_current_cpu != "x86" && (is_android || target_is_simulator)
10+      v8_current_cpu != "x86" && (is_android || target_is_simulator || is_ohos)
11
12   # Enable minor mark compact.
13   v8_enable_minor_mc = true
14@@ -398,7 +398,7 @@ assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity,
15
16 if (v8_enable_shared_ro_heap && v8_enable_pointer_compression) {
17   assert(
18-      is_linux || is_chromeos || is_android,
19+      is_linux || is_chromeos || is_android || is_ohos,
20       "Sharing read-only heap with pointer compression is only supported on Linux or Android")
21 }
22
23@@ -989,7 +989,7 @@ config("toolchain") {
24   # mksnapshot. We additionally set V8_HAVE_TARGET_OS to determine that a
25   # target OS has in fact been set; otherwise we internally assume that target
26   # OS == host OS (see v8config.h).
27-  if (target_os == "android") {
28+  if (target_os == "android" || target_os == "ohos") {
29     defines += [ "V8_HAVE_TARGET_OS" ]
30     defines += [ "V8_TARGET_OS_ANDROID" ]
31   } else if (target_os == "fuchsia") {
32@@ -1114,7 +1114,7 @@ config("always_optimize") {
33
34   # TODO(crbug.com/621335) Rework this so that we don't have the confusion
35   # between "optimize_speed" and "optimize_max".
36-  if (((is_posix && !is_android) || is_fuchsia) && !using_sanitizer) {
37+  if (((is_posix && !is_android && !is_ohos) || is_fuchsia) && !using_sanitizer) {
38     configs += [ "//build/config/compiler:optimize_speed" ]
39   } else {
40     configs += [ "//build/config/compiler:optimize_max" ]
41@@ -4059,7 +4059,7 @@ v8_source_set("v8_base_without_compiler") {
42
43     # iOS Xcode simulator builds run on an x64 target. iOS and macOS are both
44     # based on Darwin and thus POSIX-compliant to a similar degree.
45-    if (is_linux || is_chromeos || is_mac || is_ios || target_os == "freebsd") {
46+    if (is_linux || is_chromeos || is_mac || is_ios || target_os == "freebsd" || is_ohos) {
47       sources += [
48         "src/trap-handler/handler-inside-posix.cc",
49         "src/trap-handler/handler-outside-posix.cc",
50@@ -4602,7 +4602,7 @@ v8_component("v8_libbase") {
51     ]
52
53     libs = [ "dl" ]
54-  } else if (is_android) {
55+  } else if (is_android || is_ohos) {
56     if (current_toolchain == host_toolchain) {
57       libs = [
58         "dl",
59diff --git a/v8/include/v8config.h b/v8/include/v8config.h
60index acd34d7a1f..2eb2d802d6 100644
61--- a/v8/include/v8config.h
62+++ b/v8/include/v8config.h
63@@ -82,7 +82,7 @@ path. Add it with -I<path> to the command line
64 //  V8_OS_AIX           - AIX
65 //  V8_OS_WIN           - Microsoft Windows
66
67-#if defined(__ANDROID__)
68+#if defined(__ANDROID__) || defined(OS_OHOS) || defined(__OHOS__)
69 # define V8_OS_ANDROID 1
70 # define V8_OS_LINUX 1
71 # define V8_OS_POSIX 1
72diff --git a/v8/src/builtins/builtins-collections-gen.cc b/v8/src/builtins/builtins-collections-gen.cc
73index 785a1af90a..8e343d9d2d 100644
74--- a/v8/src/builtins/builtins-collections-gen.cc
75+++ b/v8/src/builtins/builtins-collections-gen.cc
76@@ -1752,6 +1752,9 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
77   ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
78                          "Map.prototype.delete");
79
80+  // This check breaks a known exploitation technique. See crbug.com/1263462
81+  CSA_CHECK(this, TaggedNotEqual(key, TheHoleConstant()));
82+
83   const TNode<OrderedHashMap> table =
84       LoadObjectField<OrderedHashMap>(CAST(receiver), JSMap::kTableOffset);
85
86@@ -1920,6 +1923,9 @@ TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) {
87   ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
88                          "Set.prototype.delete");
89
90+  // This check breaks a known exploitation technique. See crbug.com/1263462
91+  CSA_CHECK(this, TaggedNotEqual(key, TheHoleConstant()));
92+
93   const TNode<OrderedHashSet> table =
94       LoadObjectField<OrderedHashSet>(CAST(receiver), JSMap::kTableOffset);
95
96@@ -2866,6 +2872,9 @@ TF_BUILTIN(WeakMapPrototypeDelete, CodeStubAssembler) {
97   ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE,
98                          "WeakMap.prototype.delete");
99
100+  // This check breaks a known exploitation technique. See crbug.com/1263462
101+  CSA_CHECK(this, TaggedNotEqual(key, TheHoleConstant()));
102+
103   Return(CallBuiltin(Builtins::kWeakCollectionDelete, context, receiver, key));
104 }
105
106@@ -2914,6 +2923,9 @@ TF_BUILTIN(WeakSetPrototypeDelete, CodeStubAssembler) {
107   ThrowIfNotInstanceType(context, receiver, JS_WEAK_SET_TYPE,
108                          "WeakSet.prototype.delete");
109
110+  // This check breaks a known exploitation technique. See crbug.com/1263462
111+  CSA_CHECK(this, TaggedNotEqual(value, TheHoleConstant()));
112+
113   Return(
114       CallBuiltin(Builtins::kWeakCollectionDelete, context, receiver, value));
115 }
116diff --git a/v8/src/builtins/finalization-registry.tq b/v8/src/builtins/finalization-registry.tq
117index 84499e19e1..389b9a5ce0 100644
118--- a/v8/src/builtins/finalization-registry.tq
119+++ b/v8/src/builtins/finalization-registry.tq
120@@ -143,21 +143,22 @@ FinalizationRegistryRegister(
121     ThrowTypeError(
122         MessageTemplate::kWeakRefsRegisterTargetAndHoldingsMustNotBeSame);
123   }
124-  const unregisterToken = arguments[2];
125   // 5. If Type(unregisterToken) is not Object,
126   //   a. If unregisterToken is not undefined, throw a TypeError exception.
127   //   b. Set unregisterToken to empty.
128-  let hasUnregisterToken: bool = false;
129-  typeswitch (unregisterToken) {
130+  const unregisterTokenRaw = arguments[2];
131+  let unregisterToken: JSReceiver|Undefined;
132+  typeswitch (unregisterTokenRaw) {
133     case (Undefined): {
134+      unregisterToken = Undefined;
135     }
136-    case (JSReceiver): {
137-      hasUnregisterToken = true;
138+    case (unregisterTokenObj: JSReceiver): {
139+      unregisterToken = unregisterTokenObj;
140     }
141     case (JSAny): deferred {
142       ThrowTypeError(
143           MessageTemplate::kWeakRefsUnregisterTokenMustBeObject,
144-          unregisterToken);
145+          unregisterTokenRaw);
146     }
147   }
148   // 6. Let cell be the Record { [[WeakRefTarget]] : target, [[HeldValue]]:
149@@ -178,7 +179,7 @@ FinalizationRegistryRegister(
150   };
151   // 7. Append cell to finalizationRegistry.[[Cells]].
152   PushCell(finalizationRegistry, cell);
153-  if (hasUnregisterToken) {
154+  if (unregisterToken != Undefined) {
155     // If an unregister token is provided, a runtime call is needed to
156     // do some OrderedHashTable operations and register the mapping.
157     // See v8:10705.
158diff --git a/v8/src/codegen/external-reference-table.cc b/v8/src/codegen/external-reference-table.cc
159index 2741bd8ec2..4c544f16d5 100644
160--- a/v8/src/codegen/external-reference-table.cc
161+++ b/v8/src/codegen/external-reference-table.cc
162@@ -9,7 +9,7 @@
163 #include "src/ic/stub-cache.h"
164 #include "src/logging/counters.h"
165
166-#if defined(DEBUG) && defined(V8_OS_LINUX) && !defined(V8_OS_ANDROID)
167+#if defined(DEBUG) && defined(V8_OS_LINUX) && !defined(V8_OS_ANDROID) && !defined(__MUSL__)
168 #define SYMBOLIZE_FUNCTION
169 #include <execinfo.h>
170
171diff --git a/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
172index 6e51346c50..7c07a701bb 100644
173--- a/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
174+++ b/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
175@@ -98,11 +98,14 @@ class IA32OperandGenerator final : public OperandGenerator {
176   bool CanBeImmediate(Node* node) {
177     switch (node->opcode()) {
178       case IrOpcode::kInt32Constant:
179-      case IrOpcode::kNumberConstant:
180       case IrOpcode::kExternalConstant:
181       case IrOpcode::kRelocatableInt32Constant:
182       case IrOpcode::kRelocatableInt64Constant:
183         return true;
184+      case IrOpcode::kNumberConstant: {
185+        const double value = OpParameter<double>(node->op());
186+        return bit_cast<int64_t>(value) == 0;
187+      }
188       case IrOpcode::kHeapConstant: {
189 // TODO(bmeurer): We must not dereference handles concurrently. If we
190 // really have to this here, then we need to find a way to put this
191diff --git a/v8/src/compiler/backend/instruction-selector.cc b/v8/src/compiler/backend/instruction-selector.cc
192index 4c40835634..0abf01e4d6 100644
193--- a/v8/src/compiler/backend/instruction-selector.cc
194+++ b/v8/src/compiler/backend/instruction-selector.cc
195@@ -274,7 +274,7 @@ Instruction* InstructionSelector::Emit(Instruction* instr) {
196
197 bool InstructionSelector::CanCover(Node* user, Node* node) const {
198   // 1. Both {user} and {node} must be in the same basic block.
199-  if (schedule()->block(node) != schedule()->block(user)) {
200+  if (schedule()->block(node) != current_block_) {
201     return false;
202   }
203   // 2. Pure {node}s must be owned by the {user}.
204@@ -282,7 +282,7 @@ bool InstructionSelector::CanCover(Node* user, Node* node) const {
205     return node->OwnedBy(user);
206   }
207   // 3. Impure {node}s must match the effect level of {user}.
208-  if (GetEffectLevel(node) != GetEffectLevel(user)) {
209+  if (GetEffectLevel(node) != current_effect_level_) {
210     return false;
211   }
212   // 4. Only {node} must have value edges pointing to {user}.
213@@ -294,21 +294,6 @@ bool InstructionSelector::CanCover(Node* user, Node* node) const {
214   return true;
215 }
216
217-bool InstructionSelector::CanCoverTransitively(Node* user, Node* node,
218-                                               Node* node_input) const {
219-  if (CanCover(user, node) && CanCover(node, node_input)) {
220-    // If {node} is pure, transitivity might not hold.
221-    if (node->op()->HasProperty(Operator::kPure)) {
222-      // If {node_input} is pure, the effect levels do not matter.
223-      if (node_input->op()->HasProperty(Operator::kPure)) return true;
224-      // Otherwise, {user} and {node_input} must have the same effect level.
225-      return GetEffectLevel(user) == GetEffectLevel(node_input);
226-    }
227-    return true;
228-  }
229-  return false;
230-}
231-
232 bool InstructionSelector::IsOnlyUserOfNodeInSameBlock(Node* user,
233                                                       Node* node) const {
234   BasicBlock* bb_user = schedule()->block(user);
235@@ -1212,6 +1197,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
236   int effect_level = 0;
237   for (Node* const node : *block) {
238     SetEffectLevel(node, effect_level);
239+    current_effect_level_ = effect_level;
240     if (node->opcode() == IrOpcode::kStore ||
241         node->opcode() == IrOpcode::kUnalignedStore ||
242         node->opcode() == IrOpcode::kCall ||
243@@ -1231,6 +1217,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
244   // control input should be on the same effect level as the last node.
245   if (block->control_input() != nullptr) {
246     SetEffectLevel(block->control_input(), effect_level);
247+    current_effect_level_ = effect_level;
248   }
249
250   auto FinishEmittedInstructions = [&](Node* node, int instruction_start) {
251diff --git a/v8/src/compiler/backend/instruction-selector.h b/v8/src/compiler/backend/instruction-selector.h
252index c7bc99005d..51aafc36b5 100644
253--- a/v8/src/compiler/backend/instruction-selector.h
254+++ b/v8/src/compiler/backend/instruction-selector.h
255@@ -417,12 +417,12 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
256   // Used in pattern matching during code generation.
257   // Check if {node} can be covered while generating code for the current
258   // instruction. A node can be covered if the {user} of the node has the only
259-  // edge and the two are in the same basic block.
260+  // edge, the two are in the same basic block, and there are no side-effects
261+  // in-between. The last check is crucial for soundness.
262+  // For pure nodes, CanCover(a,b) is checked to avoid duplicated execution:
263+  // If this is not the case, code for b must still be generated for other
264+  // users, and fusing is unlikely to improve performance.
265   bool CanCover(Node* user, Node* node) const;
266-  // CanCover is not transitive.  The counter example are Nodes A,B,C such that
267-  // CanCover(A, B) and CanCover(B,C) and B is pure: The the effect level of A
268-  // and B might differ. CanCoverTransitively does the additional checks.
269-  bool CanCoverTransitively(Node* user, Node* node, Node* node_input) const;
270
271   // Used in pattern matching during code generation.
272   // This function checks that {node} and {user} are in the same basic block,
273@@ -741,6 +741,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
274   BoolVector defined_;
275   BoolVector used_;
276   IntVector effect_level_;
277+  int current_effect_level_;
278   IntVector virtual_registers_;
279   IntVector virtual_register_rename_;
280   InstructionScheduler* scheduler_;
281diff --git a/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
282index 1fa6ce8f3d..d43a8a56e0 100644
283--- a/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
284+++ b/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
285@@ -1523,7 +1523,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
286   if (CanCover(node, value)) {
287     switch (value->opcode()) {
288       case IrOpcode::kWord64Sar: {
289-        if (CanCoverTransitively(node, value, value->InputAt(0)) &&
290+        if (CanCover(value, value->InputAt(0)) &&
291             TryEmitExtendingLoad(this, value, node)) {
292           return;
293         } else {
294diff --git a/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
295index 1d6b506685..7d2e5e7853 100644
296--- a/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
297+++ b/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
298@@ -1277,7 +1277,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
299   if (CanCover(node, value)) {
300     switch (value->opcode()) {
301       case IrOpcode::kWord64Sar: {
302-        if (CanCoverTransitively(node, value, value->InputAt(0)) &&
303+        if (CanCover(value, value->InputAt(0)) &&
304             TryEmitExtendingLoad(this, value, node)) {
305           return;
306         } else {
307diff --git a/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/v8/src/compiler/backend/x64/instruction-selector-x64.cc
308index 3f005475b8..ca33549e3a 100644
309--- a/v8/src/compiler/backend/x64/instruction-selector-x64.cc
310+++ b/v8/src/compiler/backend/x64/instruction-selector-x64.cc
311@@ -1685,7 +1685,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
312       case IrOpcode::kWord64Shr: {
313         Int64BinopMatcher m(value);
314         if (m.right().Is(32)) {
315-          if (CanCoverTransitively(node, value, value->InputAt(0)) &&
316+          if (CanCover(value, value->InputAt(0)) &&
317               TryMatchLoadWord64AndShiftRight(this, value, kX64Movl)) {
318             return EmitIdentity(node);
319           }
320diff --git a/v8/src/compiler/escape-analysis.cc b/v8/src/compiler/escape-analysis.cc
321index 7ff6ab684f..23dfb00184 100644
322--- a/v8/src/compiler/escape-analysis.cc
323+++ b/v8/src/compiler/escape-analysis.cc
324@@ -5,10 +5,12 @@
325 #include "src/compiler/escape-analysis.h"
326
327 #include "src/codegen/tick-counter.h"
328+#include "src/compiler/frame-states.h"
329 #include "src/compiler/linkage.h"
330 #include "src/compiler/node-matchers.h"
331 #include "src/compiler/operator-properties.h"
332 #include "src/compiler/simplified-operator.h"
333+#include "src/compiler/state-values-utils.h"
334 #include "src/handles/handles-inl.h"
335 #include "src/init/bootstrapper.h"
336 #include "src/objects/map-inl.h"
337@@ -224,6 +226,11 @@ class EscapeAnalysisTracker : public ZoneObject {
338       return tracker_->ResolveReplacement(
339           NodeProperties::GetContextInput(current_node()));
340     }
341+    // Accessing the current node is fine for `FrameState nodes.
342+    Node* CurrentNode() {
343+      DCHECK_EQ(current_node()->opcode(), IrOpcode::kFrameState);
344+      return current_node();
345+    }
346
347     void SetReplacement(Node* replacement) {
348       replacement_ = replacement;
349@@ -796,9 +803,30 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
350       break;
351     }
352     case IrOpcode::kStateValues:
353-    case IrOpcode::kFrameState:
354       // These uses are always safe.
355       break;
356+    case IrOpcode::kFrameState: {
357+      // We mark the receiver as escaping due to the non-standard `.getThis`
358+      // API.
359+      FrameState frame_state{current->CurrentNode()};
360+      FrameStateType type = frame_state.frame_state_info().type();
361+      // This needs to be kept in sync with the frame types supported in
362+      // `OptimizedFrame::Summarize`.
363+      if (type != FrameStateType::kUnoptimizedFunction &&
364+          type != FrameStateType::kJavaScriptBuiltinContinuation &&
365+          type != FrameStateType::kJavaScriptBuiltinContinuationWithCatch) {
366+        break;
367+      }
368+      StateValuesAccess::iterator it =
369+          StateValuesAccess(frame_state.parameters()).begin();
370+      if (!it.done()) {
371+        if (Node* receiver = it.node()) {
372+          current->SetEscaped(receiver);
373+        }
374+        current->SetEscaped(frame_state.function());
375+      }
376+      break;
377+    }
378     default: {
379       // For unknown nodes, treat all value inputs as escaping.
380       int value_input_count = op->ValueInputCount();
381diff --git a/v8/src/compiler/js-call-reducer.cc b/v8/src/compiler/js-call-reducer.cc
382index 43aa4a5990..46cd5a71a6 100644
383--- a/v8/src/compiler/js-call-reducer.cc
384+++ b/v8/src/compiler/js-call-reducer.cc
385@@ -5924,11 +5924,12 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
386   Node* etrue = effect;
387   Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
388   {
389-    // We know that the {index} is range of the {length} now.
390+    // This extra check exists to refine the type of {index} but also to break
391+    // an exploitation technique that abuses typer mismatches.
392     index = etrue = graph()->NewNode(
393-        common()->TypeGuard(
394-            Type::Range(0.0, length_access.type.Max() - 1.0, graph()->zone())),
395-        index, etrue, if_true);
396+        simplified()->CheckBounds(p.feedback(),
397+                                  CheckBoundsFlag::kAbortOnOutOfBounds),
398+        index, length, etrue, if_true);
399
400     done_true = jsgraph()->FalseConstant();
401     if (iteration_kind == IterationKind::kKeys) {
402diff --git a/v8/src/compiler/js-heap-broker.cc b/v8/src/compiler/js-heap-broker.cc
403index 6cfd6d87c0..21b44fdeaf 100644
404--- a/v8/src/compiler/js-heap-broker.cc
405+++ b/v8/src/compiler/js-heap-broker.cc
406@@ -335,13 +335,9 @@ bool PropertyCellData::Serialize(JSHeapBroker* broker) {
407     }
408   }
409
410-  if (property_details.cell_type() == PropertyCellType::kConstant) {
411-    Handle<Object> value_again =
412-        broker->CanonicalPersistentHandle(cell->value(kAcquireLoad));
413-    if (*value != *value_again) {
414-      DCHECK(!broker->IsMainThread());
415-      return false;
416-    }
417+  if (property_details.cell_type() == PropertyCellType::kInTransition) {
418+    DCHECK(!broker->IsMainThread());
419+    return false;
420   }
421
422   ObjectData* value_data = broker->TryGetOrCreateData(value, false);
423diff --git a/v8/src/compiler/js-native-context-specialization.cc b/v8/src/compiler/js-native-context-specialization.cc
424index 7e96f0c9d7..747b4facf8 100644
425--- a/v8/src/compiler/js-native-context-specialization.cc
426+++ b/v8/src/compiler/js-native-context-specialization.cc
427@@ -834,6 +834,12 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
428       return NoChange();
429     } else if (property_cell_type == PropertyCellType::kUndefined) {
430       return NoChange();
431+    } else if (property_cell_type == PropertyCellType::kConstantType) {
432+      // We rely on stability further below.
433+      if (property_cell_value.IsHeapObject() &&
434+          !property_cell_value.AsHeapObject().map().is_stable()) {
435+        return NoChange();
436+      }
437     }
438   } else if (access_mode == AccessMode::kHas) {
439     DCHECK_EQ(receiver, lookup_start_object);
440@@ -950,17 +956,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
441         if (property_cell_value.IsHeapObject()) {
442           MapRef property_cell_value_map =
443               property_cell_value.AsHeapObject().map();
444-          if (property_cell_value_map.is_stable()) {
445-            dependencies()->DependOnStableMap(property_cell_value_map);
446-          } else {
447-            // The value's map is already unstable. If this store were to go
448-            // through the C++ runtime, it would transition the PropertyCell to
449-            // kMutable. We don't want to change the cell type from generated
450-            // code (to simplify concurrent heap access), however, so we keep
451-            // it as kConstantType and do the store anyways (if the new value's
452-            // map matches). This is safe because it merely prolongs the limbo
453-            // state that we are in already.
454-          }
455+          dependencies()->DependOnStableMap(property_cell_value_map);
456
457           // Check that the {value} is a HeapObject.
458           value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
459@@ -999,6 +995,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
460         break;
461       }
462       case PropertyCellType::kUndefined:
463+      case PropertyCellType::kInTransition:
464         UNREACHABLE();
465     }
466   }
467diff --git a/v8/src/compiler/machine-operator-reducer.cc b/v8/src/compiler/machine-operator-reducer.cc
468index 2220cdb82f..8bf33d53a0 100644
469--- a/v8/src/compiler/machine-operator-reducer.cc
470+++ b/v8/src/compiler/machine-operator-reducer.cc
471@@ -1697,11 +1697,21 @@ Reduction MachineOperatorReducer::ReduceWordNAnd(Node* node) {
472 namespace {
473
474 // Represents an operation of the form `(source & mask) == masked_value`.
475+// where each bit set in masked_value also has to be set in mask.
476 struct BitfieldCheck {
477-  Node* source;
478-  uint32_t mask;
479-  uint32_t masked_value;
480-  bool truncate_from_64_bit;
481+  Node* const source;
482+  uint32_t const mask;
483+  uint32_t const masked_value;
484+  bool const truncate_from_64_bit;
485+
486+  BitfieldCheck(Node* source, uint32_t mask, uint32_t masked_value,
487+                bool truncate_from_64_bit)
488+      : source(source),
489+        mask(mask),
490+        masked_value(masked_value),
491+        truncate_from_64_bit(truncate_from_64_bit) {
492+    CHECK_EQ(masked_value & ~mask, 0);
493+  }
494
495   static base::Optional<BitfieldCheck> Detect(Node* node) {
496     // There are two patterns to check for here:
497@@ -1716,14 +1726,16 @@ struct BitfieldCheck {
498       if (eq.left().IsWord32And()) {
499         Uint32BinopMatcher mand(eq.left().node());
500         if (mand.right().HasResolvedValue() && eq.right().HasResolvedValue()) {
501-          BitfieldCheck result{mand.left().node(), mand.right().ResolvedValue(),
502-                               eq.right().ResolvedValue(), false};
503+          uint32_t mask = mand.right().ResolvedValue();
504+          uint32_t masked_value = eq.right().ResolvedValue();
505+          if ((masked_value & ~mask) != 0) return {};
506           if (mand.left().IsTruncateInt64ToInt32()) {
507-            result.truncate_from_64_bit = true;
508-            result.source =
509-                NodeProperties::GetValueInput(mand.left().node(), 0);
510+            return BitfieldCheck(
511+                NodeProperties::GetValueInput(mand.left().node(), 0), mask,
512+                masked_value, true);
513+          } else {
514+            return BitfieldCheck(mand.left().node(), mask, masked_value, false);
515           }
516-          return result;
517         }
518       }
519     } else {
520@@ -1815,17 +1827,20 @@ Reduction MachineOperatorReducer::ReduceWord64And(Node* node) {
521 }
522
523 Reduction MachineOperatorReducer::TryMatchWord32Ror(Node* node) {
524+  // Recognize rotation, we are matching and transforming as follows:
525+  //   x << y         |  x >>> (32 - y)    =>  x ror (32 - y)
526+  //   x << (32 - y)  |  x >>> y           =>  x ror y
527+  //   x << y         ^  x >>> (32 - y)    =>  x ror (32 - y)   if y & 31 != 0
528+  //   x << (32 - y)  ^  x >>> y           =>  x ror y          if y & 31 != 0
529+  // (As well as the commuted forms.)
530+  // Note the side condition for XOR: the optimization doesn't hold for
531+  // multiples of 32.
532+
533   DCHECK(IrOpcode::kWord32Or == node->opcode() ||
534          IrOpcode::kWord32Xor == node->opcode());
535   Int32BinopMatcher m(node);
536   Node* shl = nullptr;
537   Node* shr = nullptr;
538-  // Recognize rotation, we are matching:
539-  //  * x << y | x >>> (32 - y) => x ror (32 - y), i.e  x rol y
540-  //  * x << (32 - y) | x >>> y => x ror y
541-  //  * x << y ^ x >>> (32 - y) => x ror (32 - y), i.e. x rol y
542-  //  * x << (32 - y) ^ x >>> y => x ror y
543-  // as well as their commuted form.
544   if (m.left().IsWord32Shl() && m.right().IsWord32Shr()) {
545     shl = m.left().node();
546     shr = m.right().node();
547@@ -1842,8 +1857,13 @@ Reduction MachineOperatorReducer::TryMatchWord32Ror(Node* node) {
548
549   if (mshl.right().HasResolvedValue() && mshr.right().HasResolvedValue()) {
550     // Case where y is a constant.
551-    if (mshl.right().ResolvedValue() + mshr.right().ResolvedValue() != 32)
552+    if (mshl.right().ResolvedValue() + mshr.right().ResolvedValue() != 32) {
553       return NoChange();
554+    }
555+    if (node->opcode() == IrOpcode::kWord32Xor &&
556+        (mshl.right().ResolvedValue() & 31) == 0) {
557+      return NoChange();
558+    }
559   } else {
560     Node* sub = nullptr;
561     Node* y = nullptr;
562@@ -1859,6 +1879,9 @@ Reduction MachineOperatorReducer::TryMatchWord32Ror(Node* node) {
563
564     Int32BinopMatcher msub(sub);
565     if (!msub.left().Is(32) || msub.right().node() != y) return NoChange();
566+    if (node->opcode() == IrOpcode::kWord32Xor) {
567+      return NoChange();  // Can't guarantee y & 31 != 0.
568+    }
569   }
570
571   node->ReplaceInput(0, mshl.left().node());
572diff --git a/v8/src/compiler/simplified-operator-reducer.cc b/v8/src/compiler/simplified-operator-reducer.cc
573index b1d3f8b2f3..f31a6c9a03 100644
574--- a/v8/src/compiler/simplified-operator-reducer.cc
575+++ b/v8/src/compiler/simplified-operator-reducer.cc
576@@ -75,7 +75,7 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
577     case IrOpcode::kChangeInt32ToTagged: {
578       Int32Matcher m(node->InputAt(0));
579       if (m.HasResolvedValue()) return ReplaceNumber(m.ResolvedValue());
580-      if (m.IsChangeTaggedToInt32() || m.IsChangeTaggedSignedToInt32()) {
581+      if (m.IsChangeTaggedSignedToInt32()) {
582         return Replace(m.InputAt(0));
583       }
584       break;
585diff --git a/v8/src/compiler/wasm-compiler.cc b/v8/src/compiler/wasm-compiler.cc
586index 8956e5e887..42b6911307 100644
587--- a/v8/src/compiler/wasm-compiler.cc
588+++ b/v8/src/compiler/wasm-compiler.cc
589@@ -6568,9 +6568,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
590         Node* jump_table_slot =
591             gasm_->IntAdd(jump_table_start, jump_table_offset);
592         args[0] = jump_table_slot;
593+        Node* instance_node = gasm_->LoadFromObject(
594+            MachineType::TaggedPointer(), function_data,
595+            wasm::ObjectAccess::ToTagged(WasmExportedFunctionData::kInstanceOffset));
596
597         BuildWasmCall(sig_, VectorOf(args), VectorOf(rets),
598-                      wasm::kNoCodePosition, nullptr, kNoRetpoline,
599+                      wasm::kNoCodePosition, instance_node, kNoRetpoline,
600                       frame_state);
601       }
602     }
603diff --git a/v8/src/execution/arguments-inl.h b/v8/src/execution/arguments-inl.h
604index 0be2325837..2f69cd7adc 100644
605--- a/v8/src/execution/arguments-inl.h
606+++ b/v8/src/execution/arguments-inl.h
607@@ -14,6 +14,15 @@
608 namespace v8 {
609 namespace internal {
610
611+template <ArgumentsType T>
612+Arguments<T>::ChangeValueScope::ChangeValueScope(Isolate* isolate,
613+                                                 Arguments* args, int index,
614+                                                 Object value)
615+    : location_(args->address_of_arg_at(index)) {
616+  old_value_ = handle(Object(*location_), isolate);
617+  *location_ = value.ptr();
618+}
619+
620 template <ArgumentsType T>
621 int Arguments<T>::smi_at(int index) const {
622   return Smi::ToInt(Object(*address_of_arg_at(index)));
623diff --git a/v8/src/execution/arguments.h b/v8/src/execution/arguments.h
624index 39877cf4d2..de70619d69 100644
625--- a/v8/src/execution/arguments.h
626+++ b/v8/src/execution/arguments.h
627@@ -33,6 +33,18 @@ namespace internal {
628 template <ArgumentsType arguments_type>
629 class Arguments {
630  public:
631+  // Scope to temporarily change the value of an argument.
632+  class ChangeValueScope {
633+   public:
634+    inline ChangeValueScope(Isolate* isolate, Arguments* args, int index,
635+                            Object value);
636+    ~ChangeValueScope() { *location_ = old_value_->ptr(); }
637+
638+   private:
639+    Address* location_;
640+    Handle<Object> old_value_;
641+  };
642+
643   Arguments(int length, Address* arguments)
644       : length_(length), arguments_(arguments) {
645     DCHECK_GE(length_, 0);
646@@ -51,10 +63,6 @@ class Arguments {
647
648   inline double number_at(int index) const;
649
650-  inline void set_at(int index, Object value) {
651-    *address_of_arg_at(index) = value.ptr();
652-  }
653-
654   inline FullObjectSlot slot_at(int index) const {
655     return FullObjectSlot(address_of_arg_at(index));
656   }
657diff --git a/v8/src/execution/isolate-inl.h b/v8/src/execution/isolate-inl.h
658index 96ea770e65..3486925aed 100644
659--- a/v8/src/execution/isolate-inl.h
660+++ b/v8/src/execution/isolate-inl.h
661@@ -35,7 +35,7 @@ NativeContext Isolate::raw_native_context() {
662 }
663
664 Object Isolate::pending_exception() {
665-  DCHECK(has_pending_exception());
666+  CHECK(has_pending_exception());
667   DCHECK(!thread_local_top()->pending_exception_.IsException(this));
668   return thread_local_top()->pending_exception_;
669 }
670diff --git a/v8/src/heap/concurrent-marking.cc b/v8/src/heap/concurrent-marking.cc
671index eb1511f71d..085af90436 100644
672--- a/v8/src/heap/concurrent-marking.cc
673+++ b/v8/src/heap/concurrent-marking.cc
674@@ -433,7 +433,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
675     isolate->PrintWithTimestamp("Starting concurrent marking task %d\n",
676                                 task_id);
677   }
678-  bool ephemeron_marked = false;
679+  bool another_ephemeron_iteration = false;
680
681   {
682     TimedScope scope(&time_ms);
683@@ -443,7 +443,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
684
685       while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
686         if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
687-          ephemeron_marked = true;
688+          another_ephemeron_iteration = true;
689         }
690       }
691     }
692@@ -484,6 +484,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
693           current_marked_bytes += visited_size;
694         }
695       }
696+      if (objects_processed > 0) another_ephemeron_iteration = true;
697       marked_bytes += current_marked_bytes;
698       base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
699                                                 marked_bytes);
700@@ -499,7 +500,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
701
702       while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
703         if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
704-          ephemeron_marked = true;
705+          another_ephemeron_iteration = true;
706         }
707       }
708     }
709@@ -519,8 +520,8 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
710     base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
711     total_marked_bytes_ += marked_bytes;
712
713-    if (ephemeron_marked) {
714-      set_ephemeron_marked(true);
715+    if (another_ephemeron_iteration) {
716+      set_another_ephemeron_iteration(true);
717     }
718   }
719   if (FLAG_trace_concurrent_marking) {
720diff --git a/v8/src/heap/concurrent-marking.h b/v8/src/heap/concurrent-marking.h
721index c685f5cca6..54f6057f58 100644
722--- a/v8/src/heap/concurrent-marking.h
723+++ b/v8/src/heap/concurrent-marking.h
724@@ -91,10 +91,12 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
725
726   size_t TotalMarkedBytes();
727
728-  void set_ephemeron_marked(bool ephemeron_marked) {
729-    ephemeron_marked_.store(ephemeron_marked);
730+  void set_another_ephemeron_iteration(bool another_ephemeron_iteration) {
731+    another_ephemeron_iteration_.store(another_ephemeron_iteration);
732+  }
733+  bool another_ephemeron_iteration() {
734+    return another_ephemeron_iteration_.load();
735   }
736-  bool ephemeron_marked() { return ephemeron_marked_.load(); }
737
738  private:
739   struct TaskState {
740@@ -115,7 +117,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
741   WeakObjects* const weak_objects_;
742   TaskState task_state_[kMaxTasks + 1];
743   std::atomic<size_t> total_marked_bytes_{0};
744-  std::atomic<bool> ephemeron_marked_{false};
745+  std::atomic<bool> another_ephemeron_iteration_{false};
746 };
747
748 }  // namespace internal
749diff --git a/v8/src/heap/cppgc/marker.cc b/v8/src/heap/cppgc/marker.cc
750index d30bb0a8ec..304244502a 100644
751--- a/v8/src/heap/cppgc/marker.cc
752+++ b/v8/src/heap/cppgc/marker.cc
753@@ -241,6 +241,7 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
754   }
755   config_.stack_state = stack_state;
756   config_.marking_type = MarkingConfig::MarkingType::kAtomic;
757+  mutator_marking_state_.set_in_atomic_pause();
758
759   // Lock guards against changes to {Weak}CrossThreadPersistent handles, that
760   // may conflict with marking. E.g., a WeakCrossThreadPersistent may be
761@@ -407,7 +408,9 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
762     size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
763   StatsCollector::EnabledScope stats_scope(
764       heap().stats_collector(), StatsCollector::kMarkTransitiveClosure);
765+  bool saved_did_discover_new_ephemeron_pairs;
766   do {
767+    mutator_marking_state_.ResetDidDiscoverNewEphemeronPairs();
768     if ((config_.marking_type == MarkingConfig::MarkingType::kAtomic) ||
769         schedule_.ShouldFlushEphemeronPairs()) {
770       mutator_marking_state_.FlushDiscoveredEphemeronPairs();
771@@ -484,6 +487,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
772       }
773     }
774
775+    saved_did_discover_new_ephemeron_pairs =
776+        mutator_marking_state_.DidDiscoverNewEphemeronPairs();
777     {
778       StatsCollector::EnabledScope stats_scope(
779           heap().stats_collector(), StatsCollector::kMarkProcessEphemerons);
780@@ -497,7 +502,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
781         return false;
782       }
783     }
784-  } while (!mutator_marking_state_.marking_worklist().IsLocalAndGlobalEmpty());
785+  } while (!mutator_marking_state_.marking_worklist().IsLocalAndGlobalEmpty() ||
786+           saved_did_discover_new_ephemeron_pairs);
787   return true;
788 }
789
790diff --git a/v8/src/heap/cppgc/marking-state.h b/v8/src/heap/cppgc/marking-state.h
791index 6e08fc3e10..ca43372a96 100644
792--- a/v8/src/heap/cppgc/marking-state.h
793+++ b/v8/src/heap/cppgc/marking-state.h
794@@ -9,6 +9,7 @@
795
796 #include "include/cppgc/trace-trait.h"
797 #include "include/cppgc/visitor.h"
798+#include "src/base/logging.h"
799 #include "src/heap/cppgc/compaction-worklists.h"
800 #include "src/heap/cppgc/globals.h"
801 #include "src/heap/cppgc/heap-object-header.h"
802@@ -111,6 +112,16 @@ class MarkingStateBase {
803     movable_slots_worklist_.reset();
804   }
805
806+  bool DidDiscoverNewEphemeronPairs() const {
807+    return discovered_new_ephemeron_pairs_;
808+  }
809+
810+  void ResetDidDiscoverNewEphemeronPairs() {
811+    discovered_new_ephemeron_pairs_ = false;
812+  }
813+
814+  void set_in_atomic_pause() { in_atomic_pause_ = true; }
815+
816  protected:
817   inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
818
819@@ -144,6 +155,9 @@ class MarkingStateBase {
820       movable_slots_worklist_;
821
822   size_t marked_bytes_ = 0;
823+  bool in_ephemeron_processing_ = false;
824+  bool discovered_new_ephemeron_pairs_ = false;
825+  bool in_atomic_pause_ = false;
826 };
827
828 MarkingStateBase::MarkingStateBase(HeapBase& heap,
829@@ -270,10 +284,23 @@ void MarkingStateBase::ProcessWeakContainer(const void* object,
830 void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
831                                         TraceDescriptor value_desc,
832                                         Visitor& visitor) {
833-  // Filter out already marked keys. The write barrier for WeakMember
834-  // ensures that any newly set value after this point is kept alive and does
835-  // not require the callback.
836-  if (HeapObjectHeader::FromPayload(key).IsMarked<AccessMode::kAtomic>()) {
837+  // ProcessEphemeron is not expected to find new ephemerons recursively, which
838+  // would break the main marking loop.
839+  DCHECK(!in_ephemeron_processing_);
840+  in_ephemeron_processing_ = true;
841+  // Keys are considered live even in incremental/concurrent marking settings
842+  // because the write barrier for WeakMember ensures that any newly set value
843+  // after this point is kept alive and does not require the callback.
844+  const bool key_in_construction = HeapObjectHeader::FromPayload(key)
845+                                       .IsInConstruction<AccessMode::kAtomic>();
846+  const bool key_considered_as_live =
847+      key_in_construction
848+          ? in_atomic_pause_
849+          : HeapObjectHeader::FromPayload(key).IsMarked<AccessMode::kAtomic>();
850+  DCHECK_IMPLIES(
851+      key_in_construction && in_atomic_pause_,
852+      HeapObjectHeader::FromPayload(key).IsMarked<AccessMode::kAtomic>());
853+  if (key_considered_as_live) {
854     if (value_desc.base_object_payload) {
855       MarkAndPush(value_desc.base_object_payload, value_desc);
856     } else {
857@@ -281,9 +308,11 @@ void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
858       // should be immediately traced.
859       value_desc.callback(&visitor, value);
860     }
861-    return;
862+  } else {
863+    discovered_ephemeron_pairs_worklist_.Push({key, value, value_desc});
864+    discovered_new_ephemeron_pairs_ = true;
865   }
866-  discovered_ephemeron_pairs_worklist_.Push({key, value, value_desc});
867+  in_ephemeron_processing_ = false;
868 }
869
870 void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
871diff --git a/v8/src/heap/incremental-marking.cc b/v8/src/heap/incremental-marking.cc
872index a093835981..efedcdb32b 100644
873--- a/v8/src/heap/incremental-marking.cc
874+++ b/v8/src/heap/incremental-marking.cc
875@@ -921,7 +921,8 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
876     // This ignores that case where the embedder finds new V8-side objects. The
877     // assumption is that large graphs are well connected and can mostly be
878     // processed on their own. For small graphs, helping is not necessary.
879-    v8_bytes_processed = collector_->ProcessMarkingWorklist(bytes_to_process);
880+    std::tie(v8_bytes_processed, std::ignore) =
881+        collector_->ProcessMarkingWorklist(bytes_to_process);
882     StepResult v8_result = local_marking_worklists()->IsEmpty()
883                                ? StepResult::kNoImmediateWork
884                                : StepResult::kMoreWorkRemaining;
885diff --git a/v8/src/heap/mark-compact.cc b/v8/src/heap/mark-compact.cc
886index 756bf6e5e1..9e4f36d35c 100644
887--- a/v8/src/heap/mark-compact.cc
888+++ b/v8/src/heap/mark-compact.cc
889@@ -1603,24 +1603,24 @@ void MarkCompactCollector::MarkDescriptorArrayFromWriteBarrier(
890       descriptors, number_of_own_descriptors);
891 }
892
893-void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
894-  bool work_to_do = true;
895+bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
896   int iterations = 0;
897   int max_iterations = FLAG_ephemeron_fixpoint_iterations;
898
899-  while (work_to_do) {
900+  bool another_ephemeron_iteration_main_thread;
901+
902+  do {
903     PerformWrapperTracing();
904
905     if (iterations >= max_iterations) {
906       // Give up fixpoint iteration and switch to linear algorithm.
907-      ProcessEphemeronsLinear();
908-      break;
909+      return false;
910     }
911
912     // Move ephemerons from next_ephemerons into current_ephemerons to
913     // drain them in this iteration.
914     weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
915-    heap()->concurrent_marking()->set_ephemeron_marked(false);
916+    heap()->concurrent_marking()->set_another_ephemeron_iteration(false);
917
918     {
919       TRACE_GC(heap()->tracer(),
920@@ -1631,47 +1631,54 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
921             TaskPriority::kUserBlocking);
922       }
923
924-      work_to_do = ProcessEphemerons();
925+      another_ephemeron_iteration_main_thread = ProcessEphemerons();
926       FinishConcurrentMarking();
927     }
928
929     CHECK(weak_objects_.current_ephemerons.IsEmpty());
930     CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
931
932-    work_to_do = work_to_do || !local_marking_worklists()->IsEmpty() ||
933-                 heap()->concurrent_marking()->ephemeron_marked() ||
934-                 !local_marking_worklists()->IsEmbedderEmpty() ||
935-                 !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
936     ++iterations;
937-  }
938+  } while (another_ephemeron_iteration_main_thread ||
939+           heap()->concurrent_marking()->another_ephemeron_iteration() ||
940+           !local_marking_worklists()->IsEmpty() ||
941+           !local_marking_worklists()->IsEmbedderEmpty() ||
942+           !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
943
944   CHECK(local_marking_worklists()->IsEmpty());
945   CHECK(weak_objects_.current_ephemerons.IsEmpty());
946   CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
947+  return true;
948 }
949
950 bool MarkCompactCollector::ProcessEphemerons() {
951   Ephemeron ephemeron;
952-  bool ephemeron_marked = false;
953+  bool another_ephemeron_iteration = false;
954
955   // Drain current_ephemerons and push ephemerons where key and value are still
956   // unreachable into next_ephemerons.
957   while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
958     if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
959-      ephemeron_marked = true;
960+      another_ephemeron_iteration = true;
961     }
962   }
963
964   // Drain marking worklist and push discovered ephemerons into
965   // discovered_ephemerons.
966-  DrainMarkingWorklist();
967+  size_t objects_processed;
968+  std::tie(std::ignore, objects_processed) = ProcessMarkingWorklist(0);
969+
970+  // As soon as a single object was processed and potentially marked another
971+  // object we need another iteration. Otherwise we might miss to apply
972+  // ephemeron semantics on it.
973+  if (objects_processed > 0) another_ephemeron_iteration = true;
974
975   // Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
976   // before) and push ephemerons where key and value are still unreachable into
977   // next_ephemerons.
978   while (weak_objects_.discovered_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
979     if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
980-      ephemeron_marked = true;
981+      another_ephemeron_iteration = true;
982     }
983   }
984
985@@ -1679,7 +1686,7 @@ bool MarkCompactCollector::ProcessEphemerons() {
986   weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThreadTask);
987   weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
988
989-  return ephemeron_marked;
990+  return another_ephemeron_iteration;
991 }
992
993 void MarkCompactCollector::ProcessEphemeronsLinear() {
994@@ -1765,6 +1772,12 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
995   ephemeron_marking_.newly_discovered.shrink_to_fit();
996
997   CHECK(local_marking_worklists()->IsEmpty());
998+  CHECK(weak_objects_.current_ephemerons.IsEmpty());
999+  CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1000+
1001+  // Flush local ephemerons for main task to global pool.
1002+  weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThreadTask);
1003+  weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
1004 }
1005
1006 void MarkCompactCollector::PerformWrapperTracing() {
1007@@ -1786,9 +1799,11 @@ void MarkCompactCollector::PerformWrapperTracing() {
1008 void MarkCompactCollector::DrainMarkingWorklist() { ProcessMarkingWorklist(0); }
1009
1010 template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
1011-size_t MarkCompactCollector::ProcessMarkingWorklist(size_t bytes_to_process) {
1012+std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
1013+    size_t bytes_to_process) {
1014   HeapObject object;
1015   size_t bytes_processed = 0;
1016+  size_t objects_processed = 0;
1017   bool is_per_context_mode = local_marking_worklists()->IsPerContextMode();
1018   Isolate* isolate = heap()->isolate();
1019   while (local_marking_worklists()->Pop(&object) ||
1020@@ -1828,18 +1843,19 @@ size_t MarkCompactCollector::ProcessMarkingWorklist(size_t bytes_to_process) {
1021                                           map, object, visited_size);
1022     }
1023     bytes_processed += visited_size;
1024+    objects_processed++;
1025     if (bytes_to_process && bytes_processed >= bytes_to_process) {
1026       break;
1027     }
1028   }
1029-  return bytes_processed;
1030+  return std::make_pair(bytes_processed, objects_processed);
1031 }
1032
1033 // Generate definitions for use in other files.
1034-template size_t MarkCompactCollector::ProcessMarkingWorklist<
1035+template std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist<
1036     MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>(
1037     size_t bytes_to_process);
1038-template size_t MarkCompactCollector::ProcessMarkingWorklist<
1039+template std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist<
1040     MarkCompactCollector::MarkingWorklistProcessingMode::
1041         kTrackNewlyDiscoveredObjects>(size_t bytes_to_process);
1042
1043@@ -1864,7 +1880,23 @@ void MarkCompactCollector::ProcessEphemeronMarking() {
1044   // buffer, flush it into global pool.
1045   weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
1046
1047-  ProcessEphemeronsUntilFixpoint();
1048+  if (!ProcessEphemeronsUntilFixpoint()) {
1049+    // Fixpoint iteration needed too many iterations and was cancelled. Use the
1050+    // guaranteed linear algorithm.
1051+    ProcessEphemeronsLinear();
1052+  }
1053+
1054+#ifdef VERIFY_HEAP
1055+  if (FLAG_verify_heap) {
1056+    Ephemeron ephemeron;
1057+
1058+    weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
1059+
1060+    while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
1061+      CHECK(!ProcessEphemeron(ephemeron.key, ephemeron.value));
1062+    }
1063+  }
1064+#endif
1065
1066   CHECK(local_marking_worklists()->IsEmpty());
1067   CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
1068@@ -2525,28 +2557,19 @@ void MarkCompactCollector::ClearJSWeakRefs() {
1069       RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
1070     }
1071
1072-    HeapObject unregister_token =
1073-        HeapObject::cast(weak_cell.unregister_token());
1074+    HeapObject unregister_token = weak_cell.unregister_token();
1075     if (!non_atomic_marking_state()->IsBlackOrGrey(unregister_token)) {
1076       // The unregister token is dead. Remove any corresponding entries in the
1077       // key map. Multiple WeakCell with the same token will have all their
1078       // unregister_token field set to undefined when processing the first
1079       // WeakCell. Like above, we're modifying pointers during GC, so record the
1080       // slots.
1081-      HeapObject undefined = ReadOnlyRoots(isolate()).undefined_value();
1082       JSFinalizationRegistry finalization_registry =
1083           JSFinalizationRegistry::cast(weak_cell.finalization_registry());
1084       finalization_registry.RemoveUnregisterToken(
1085           JSReceiver::cast(unregister_token), isolate(),
1086-          [undefined](WeakCell matched_cell) {
1087-            matched_cell.set_unregister_token(undefined);
1088-          },
1089+          JSFinalizationRegistry::kKeepMatchedCellsInRegistry,
1090           gc_notify_updated_slot);
1091-      // The following is necessary because in the case that weak_cell has
1092-      // already been popped and removed from the FinalizationRegistry, the call
1093-      // to JSFinalizationRegistry::RemoveUnregisterToken above will not find
1094-      // weak_cell itself to clear its unregister token.
1095-      weak_cell.set_unregister_token(undefined);
1096     } else {
1097       // The unregister_token is alive.
1098       ObjectSlot slot = weak_cell.RawField(WeakCell::kUnregisterTokenOffset);
1099diff --git a/v8/src/heap/mark-compact.h b/v8/src/heap/mark-compact.h
1100index 733588ae80..0674ce674f 100644
1101--- a/v8/src/heap/mark-compact.h
1102+++ b/v8/src/heap/mark-compact.h
1103@@ -588,7 +588,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
1104   // is drained until it is empty.
1105   template <MarkingWorklistProcessingMode mode =
1106                 MarkingWorklistProcessingMode::kDefault>
1107-  size_t ProcessMarkingWorklist(size_t bytes_to_process);
1108+  std::pair<size_t, size_t> ProcessMarkingWorklist(size_t bytes_to_process);
1109
1110  private:
1111   void ComputeEvacuationHeuristics(size_t area_size,
1112@@ -634,8 +634,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
1113   bool ProcessEphemeron(HeapObject key, HeapObject value);
1114
1115   // Marks ephemerons and drains marking worklist iteratively
1116-  // until a fixpoint is reached.
1117-  void ProcessEphemeronsUntilFixpoint();
1118+  // until a fixpoint is reached. Returns false if too many iterations have been
1119+  // tried and the linear approach should be used.
1120+  bool ProcessEphemeronsUntilFixpoint();
1121
1122   // Drains ephemeron and marking worklists. Single iteration of the
1123   // fixpoint iteration.
1124diff --git a/v8/src/heap/marking-visitor-inl.h b/v8/src/heap/marking-visitor-inl.h
1125index 55c37e535b..557d6248fc 100644
1126--- a/v8/src/heap/marking-visitor-inl.h
1127+++ b/v8/src/heap/marking-visitor-inl.h
1128@@ -326,7 +326,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitWeakCell(
1129   this->VisitMapPointer(weak_cell);
1130   WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
1131   HeapObject target = weak_cell.relaxed_target();
1132-  HeapObject unregister_token = HeapObject::cast(weak_cell.unregister_token());
1133+  HeapObject unregister_token = weak_cell.relaxed_unregister_token();
1134   concrete_visitor()->SynchronizePageAccess(target);
1135   concrete_visitor()->SynchronizePageAccess(unregister_token);
1136   if (concrete_visitor()->marking_state()->IsBlackOrGrey(target) &&
1137diff --git a/v8/src/ic/accessor-assembler.cc b/v8/src/ic/accessor-assembler.cc
1138index 59a950b48f..2a9dc032a8 100644
1139--- a/v8/src/ic/accessor-assembler.cc
1140+++ b/v8/src/ic/accessor-assembler.cc
1141@@ -654,8 +654,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
1142     Comment("module export");
1143     TNode<UintPtrT> index =
1144         DecodeWord<LoadHandler::ExportsIndexBits>(handler_word);
1145-    TNode<Module> module = LoadObjectField<Module>(
1146-        CAST(p->receiver()), JSModuleNamespace::kModuleOffset);
1147+    TNode<Module> module =
1148+        LoadObjectField<Module>(CAST(holder), JSModuleNamespace::kModuleOffset);
1149     TNode<ObjectHashTable> exports =
1150         LoadObjectField<ObjectHashTable>(module, Module::kExportsOffset);
1151     TNode<Cell> cell = CAST(LoadFixedArrayElement(exports, index));
1152diff --git a/v8/src/ic/ic.cc b/v8/src/ic/ic.cc
1153index e607bb53a8..1fadcc1ead 100644
1154--- a/v8/src/ic/ic.cc
1155+++ b/v8/src/ic/ic.cc
1156@@ -857,8 +857,14 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
1157                                Smi::ToInt(lookup->name()->GetHash()));
1158         // We found the accessor, so the entry must exist.
1159         DCHECK(entry.is_found());
1160-        int index = ObjectHashTable::EntryToValueIndex(entry);
1161-        return LoadHandler::LoadModuleExport(isolate(), index);
1162+        int value_index = ObjectHashTable::EntryToValueIndex(entry);
1163+        Handle<Smi> smi_handler =
1164+            LoadHandler::LoadModuleExport(isolate(), value_index);
1165+        if (holder_is_lookup_start_object) {
1166+          return smi_handler;
1167+        }
1168+        return LoadHandler::LoadFromPrototype(isolate(), map, holder,
1169+                                              smi_handler);
1170       }
1171
1172       Handle<Object> accessors = lookup->GetAccessors();
1173diff --git a/v8/src/libsampler/sampler.cc b/v8/src/libsampler/sampler.cc
1174index e933c61864..f187aeaca6 100644
1175--- a/v8/src/libsampler/sampler.cc
1176+++ b/v8/src/libsampler/sampler.cc
1177@@ -60,7 +60,7 @@ using zx_thread_state_general_regs_t = zx_arm64_general_regs_t;
1178 #include "src/base/atomic-utils.h"
1179 #include "src/base/platform/platform.h"
1180
1181-#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
1182+#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && !defined(OSOHOS)
1183
1184 // Not all versions of Android's C library provide ucontext_t.
1185 // Detect this and provide custom but compatible definitions. Note that these
1186diff --git a/v8/src/objects/js-weak-refs-inl.h b/v8/src/objects/js-weak-refs-inl.h
1187index aa51ee18c8..0e39b00d13 100644
1188--- a/v8/src/objects/js-weak-refs-inl.h
1189+++ b/v8/src/objects/js-weak-refs-inl.h
1190@@ -71,16 +71,14 @@ bool JSFinalizationRegistry::Unregister(
1191   // key. Each WeakCell will be in the "active_cells" or "cleared_cells" list of
1192   // its FinalizationRegistry; remove it from there.
1193   return finalization_registry->RemoveUnregisterToken(
1194-      *unregister_token, isolate,
1195-      [isolate](WeakCell matched_cell) {
1196-        matched_cell.RemoveFromFinalizationRegistryCells(isolate);
1197-      },
1198+      *unregister_token, isolate, kRemoveMatchedCellsFromRegistry,
1199       [](HeapObject, ObjectSlot, Object) {});
1200 }
1201
1202-template <typename MatchCallback, typename GCNotifyUpdatedSlotCallback>
1203+template <typename GCNotifyUpdatedSlotCallback>
1204 bool JSFinalizationRegistry::RemoveUnregisterToken(
1205-    JSReceiver unregister_token, Isolate* isolate, MatchCallback match_callback,
1206+    JSReceiver unregister_token, Isolate* isolate,
1207+    RemoveUnregisterTokenMode removal_mode,
1208     GCNotifyUpdatedSlotCallback gc_notify_updated_slot) {
1209   // This method is called from both FinalizationRegistry#unregister and for
1210   // removing weakly-held dead unregister tokens. The latter is during GC so
1211@@ -118,7 +116,16 @@ bool JSFinalizationRegistry::RemoveUnregisterToken(
1212     value = weak_cell.key_list_next();
1213     if (weak_cell.unregister_token() == unregister_token) {
1214       // weak_cell has the same unregister token; remove it from the key list.
1215-      match_callback(weak_cell);
1216+      switch (removal_mode) {
1217+        case kRemoveMatchedCellsFromRegistry:
1218+          weak_cell.RemoveFromFinalizationRegistryCells(isolate);
1219+          break;
1220+        case kKeepMatchedCellsInRegistry:
1221+          // Do nothing.
1222+          break;
1223+      }
1224+      // Clear unregister token-related fields.
1225+      weak_cell.set_unregister_token(undefined);
1226       weak_cell.set_key_list_prev(undefined);
1227       weak_cell.set_key_list_next(undefined);
1228       was_present = true;
1229@@ -163,6 +170,10 @@ HeapObject WeakCell::relaxed_target() const {
1230   return TaggedField<HeapObject>::Relaxed_Load(*this, kTargetOffset);
1231 }
1232
1233+HeapObject WeakCell::relaxed_unregister_token() const {
1234+  return TaggedField<HeapObject>::Relaxed_Load(*this, kUnregisterTokenOffset);
1235+}
1236+
1237 template <typename GCNotifyUpdatedSlotCallback>
1238 void WeakCell::Nullify(Isolate* isolate,
1239                        GCNotifyUpdatedSlotCallback gc_notify_updated_slot) {
1240diff --git a/v8/src/objects/js-weak-refs.h b/v8/src/objects/js-weak-refs.h
1241index 300673381a..88361ad1c0 100644
1242--- a/v8/src/objects/js-weak-refs.h
1243+++ b/v8/src/objects/js-weak-refs.h
1244@@ -53,10 +53,14 @@ class JSFinalizationRegistry : public JSObject {
1245   // it modifies slots in key_map and WeakCells and the normal write barrier is
1246   // disabled during GC, we need to tell the GC about the modified slots via the
1247   // gc_notify_updated_slot function.
1248-  template <typename MatchCallback, typename GCNotifyUpdatedSlotCallback>
1249+  enum RemoveUnregisterTokenMode {
1250+    kRemoveMatchedCellsFromRegistry,
1251+    kKeepMatchedCellsInRegistry
1252+  };
1253+  template <typename GCNotifyUpdatedSlotCallback>
1254   inline bool RemoveUnregisterToken(
1255       JSReceiver unregister_token, Isolate* isolate,
1256-      MatchCallback match_callback,
1257+      RemoveUnregisterTokenMode removal_mode,
1258       GCNotifyUpdatedSlotCallback gc_notify_updated_slot);
1259
1260   // Returns true if the cleared_cells list is non-empty.
1261@@ -93,6 +97,9 @@ class WeakCell : public TorqueGeneratedWeakCell<WeakCell, HeapObject> {
1262   // Provide relaxed load access to target field.
1263   inline HeapObject relaxed_target() const;
1264
1265+  // Provide relaxed load access to the unregister token field.
1266+  inline HeapObject relaxed_unregister_token() const;
1267+
1268   // Nullify is called during GC and it modifies the pointers in WeakCell and
1269   // JSFinalizationRegistry. Thus we need to tell the GC about the modified
1270   // slots via the gc_notify_updated_slot function. The normal write barrier is
1271diff --git a/v8/src/objects/js-weak-refs.tq b/v8/src/objects/js-weak-refs.tq
1272index 9008f64290..3447e31b71 100644
1273--- a/v8/src/objects/js-weak-refs.tq
1274+++ b/v8/src/objects/js-weak-refs.tq
1275@@ -22,7 +22,7 @@ extern class JSFinalizationRegistry extends JSObject {
1276 extern class WeakCell extends HeapObject {
1277   finalization_registry: Undefined|JSFinalizationRegistry;
1278   target: Undefined|JSReceiver;
1279-  unregister_token: JSAny;
1280+  unregister_token: Undefined|JSReceiver;
1281   holdings: JSAny;
1282
1283   // For storing doubly linked lists of WeakCells in JSFinalizationRegistry's
1284diff --git a/v8/src/objects/objects.cc b/v8/src/objects/objects.cc
1285index 96c84dab5f..0c1cc482de 100644
1286--- a/v8/src/objects/objects.cc
1287+++ b/v8/src/objects/objects.cc
1288@@ -196,6 +196,8 @@ std::ostream& operator<<(std::ostream& os, PropertyCellType type) {
1289       return os << "ConstantType";
1290     case PropertyCellType::kMutable:
1291       return os << "Mutable";
1292+    case PropertyCellType::kInTransition:
1293+      return os << "InTransition";
1294   }
1295   UNREACHABLE();
1296 }
1297@@ -2527,6 +2529,12 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
1298           Maybe<bool> result =
1299               JSObject::SetPropertyWithInterceptor(it, should_throw, value);
1300           if (result.IsNothing() || result.FromJust()) return result;
1301+          // Assuming that the callback have side effects, we use
1302+          // Object::SetSuperProperty() which works properly regardless on
1303+          // whether the property was present on the receiver or not when
1304+          // storing to the receiver.
1305+          // Proceed lookup from the next state.
1306+          it->Next();
1307         } else {
1308           Maybe<PropertyAttributes> maybe_attributes =
1309               JSObject::GetPropertyAttributesWithInterceptor(it);
1310@@ -2534,11 +2542,21 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
1311           if ((maybe_attributes.FromJust() & READ_ONLY) != 0) {
1312             return WriteToReadOnlyProperty(it, value, should_throw);
1313           }
1314-          if (maybe_attributes.FromJust() == ABSENT) break;
1315-          *found = false;
1316-          return Nothing<bool>();
1317+          // At this point we might have called interceptor's query or getter
1318+          // callback. Assuming that the callbacks have side effects, we use
1319+          // Object::SetSuperProperty() which works properly regardless on
1320+          // whether the property was present on the receiver or not when
1321+          // storing to the receiver.
1322+          if (maybe_attributes.FromJust() == ABSENT) {
1323+            // Proceed lookup from the next state.
1324+            it->Next();
1325+          } else {
1326+            // Finish lookup in order to make Object::SetSuperProperty() store
1327+            // property to the receiver.
1328+            it->NotFound();
1329+          }
1330         }
1331-        break;
1332+        return Object::SetSuperProperty(it, value, store_origin, should_throw);
1333       }
1334
1335       case LookupIterator::ACCESSOR: {
1336@@ -2601,6 +2619,26 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
1337   return Nothing<bool>();
1338 }
1339
1340+bool Object::CheckContextualStoreToJSGlobalObject(
1341+    LookupIterator* it, Maybe<ShouldThrow> should_throw) {
1342+  Isolate* isolate = it->isolate();
1343+
1344+  if (it->GetReceiver()->IsJSGlobalObject(isolate) &&
1345+      (GetShouldThrow(isolate, should_throw) == ShouldThrow::kThrowOnError)) {
1346+    if (it->state() == LookupIterator::TRANSITION) {
1347+      // The property cell that we have created is garbage because we are going
1348+      // to throw now instead of putting it into the global dictionary. However,
1349+      // the cell might already have been stored into the feedback vector, so
1350+      // we must invalidate it nevertheless.
1351+      it->transition_cell()->ClearAndInvalidate(ReadOnlyRoots(isolate));
1352+    }
1353+    isolate->Throw(*isolate->factory()->NewReferenceError(
1354+        MessageTemplate::kNotDefined, it->GetName()));
1355+    return false;
1356+  }
1357+  return true;
1358+}
1359+
1360 Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
1361                                 StoreOrigin store_origin,
1362                                 Maybe<ShouldThrow> should_throw) {
1363@@ -2611,24 +2649,9 @@ Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
1364     if (found) return result;
1365   }
1366
1367-  // If the receiver is the JSGlobalObject, the store was contextual. In case
1368-  // the property did not exist yet on the global object itself, we have to
1369-  // throw a reference error in strict mode.  In sloppy mode, we continue.
1370-  if (it->GetReceiver()->IsJSGlobalObject() &&
1371-      (GetShouldThrow(it->isolate(), should_throw) ==
1372-       ShouldThrow::kThrowOnError)) {
1373-    if (it->state() == LookupIterator::TRANSITION) {
1374-      // The property cell that we have created is garbage because we are going
1375-      // to throw now instead of putting it into the global dictionary. However,
1376-      // the cell might already have been stored into the feedback vector, so
1377-      // we must invalidate it nevertheless.
1378-      it->transition_cell()->ClearAndInvalidate(ReadOnlyRoots(it->isolate()));
1379-    }
1380-    it->isolate()->Throw(*it->isolate()->factory()->NewReferenceError(
1381-        MessageTemplate::kNotDefined, it->GetName()));
1382+  if (!CheckContextualStoreToJSGlobalObject(it, should_throw)) {
1383     return Nothing<bool>();
1384   }
1385-
1386   return AddDataProperty(it, value, NONE, should_throw, store_origin);
1387 }
1388
1389@@ -2695,6 +2718,9 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
1390             JSReceiver::GetOwnPropertyDescriptor(&own_lookup, &desc);
1391         MAYBE_RETURN(owned, Nothing<bool>());
1392         if (!owned.FromJust()) {
1393+          if (!CheckContextualStoreToJSGlobalObject(&own_lookup, should_throw)) {
1394+            return Nothing<bool>();
1395+          }
1396           return JSReceiver::CreateDataProperty(&own_lookup, value,
1397                                                 should_throw);
1398         }
1399@@ -6395,6 +6421,8 @@ PropertyCellType PropertyCell::UpdatedType(Isolate* isolate,
1400       V8_FALLTHROUGH;
1401     case PropertyCellType::kMutable:
1402       return PropertyCellType::kMutable;
1403+    case PropertyCellType::kInTransition:
1404+      UNREACHABLE();
1405   }
1406 }
1407
1408@@ -6450,6 +6478,7 @@ bool PropertyCell::CheckDataIsCompatible(PropertyDetails details,
1409                                          Object value) {
1410   DisallowGarbageCollection no_gc;
1411   PropertyCellType cell_type = details.cell_type();
1412+  CHECK_NE(cell_type, PropertyCellType::kInTransition);
1413   if (value.IsTheHole()) {
1414     CHECK_EQ(cell_type, PropertyCellType::kConstant);
1415   } else {
1416@@ -6483,8 +6512,9 @@ bool PropertyCell::CanTransitionTo(PropertyDetails new_details,
1417       return new_details.cell_type() == PropertyCellType::kMutable ||
1418              (new_details.cell_type() == PropertyCellType::kConstant &&
1419               new_value.IsTheHole());
1420+    case PropertyCellType::kInTransition:
1421+      UNREACHABLE();
1422   }
1423-  UNREACHABLE();
1424 }
1425 #endif  // DEBUG
1426
1427@@ -6710,6 +6740,7 @@ void JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap(
1428       JSFinalizationRegistry::cast(Object(raw_finalization_registry));
1429   WeakCell weak_cell = WeakCell::cast(Object(raw_weak_cell));
1430   DCHECK(!weak_cell.unregister_token().IsUndefined(isolate));
1431+  HeapObject undefined = ReadOnlyRoots(isolate).undefined_value();
1432
1433   // Remove weak_cell from the linked list of other WeakCells with the same
1434   // unregister token and remove its unregister token from key_map if necessary
1435@@ -6718,7 +6749,7 @@ void JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap(
1436   if (weak_cell.key_list_prev().IsUndefined(isolate)) {
1437     SimpleNumberDictionary key_map =
1438         SimpleNumberDictionary::cast(finalization_registry.key_map());
1439-    Object unregister_token = weak_cell.unregister_token();
1440+    HeapObject unregister_token = weak_cell.unregister_token();
1441     uint32_t key = Smi::ToInt(unregister_token.GetHash());
1442     InternalIndex entry = key_map.FindEntry(isolate, key);
1443     DCHECK(entry.is_found());
1444@@ -6733,8 +6764,7 @@ void JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap(
1445       // of the key in the hash table.
1446       WeakCell next = WeakCell::cast(weak_cell.key_list_next());
1447       DCHECK_EQ(next.key_list_prev(), weak_cell);
1448-      next.set_key_list_prev(ReadOnlyRoots(isolate).undefined_value());
1449-      weak_cell.set_key_list_next(ReadOnlyRoots(isolate).undefined_value());
1450+      next.set_key_list_prev(undefined);
1451       key_map.ValueAtPut(entry, next);
1452     }
1453   } else {
1454@@ -6746,6 +6776,12 @@ void JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap(
1455       next.set_key_list_prev(weak_cell.key_list_prev());
1456     }
1457   }
1458+
1459+  // weak_cell is now removed from the unregister token map, so clear its
1460+  // unregister token-related fields.
1461+  weak_cell.set_unregister_token(undefined);
1462+  weak_cell.set_key_list_prev(undefined);
1463+  weak_cell.set_key_list_next(undefined);
1464 }
1465
1466 }  // namespace internal
1467diff --git a/v8/src/objects/objects.h b/v8/src/objects/objects.h
1468index c68445597f..93624b7b39 100644
1469--- a/v8/src/objects/objects.h
1470+++ b/v8/src/objects/objects.h
1471@@ -726,6 +726,8 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
1472   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
1473   ConvertToIndex(Isolate* isolate, Handle<Object> input,
1474                  MessageTemplate error_index);
1475+  V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static bool CheckContextualStoreToJSGlobalObject(
1476+    LookupIterator* it, Maybe<ShouldThrow> should_throw = Nothing<ShouldThrow>());
1477 };
1478
1479 V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const Object& obj);
1480diff --git a/v8/src/objects/property-cell-inl.h b/v8/src/objects/property-cell-inl.h
1481index 154dcab41f..266f1d1e03 100644
1482--- a/v8/src/objects/property-cell-inl.h
1483+++ b/v8/src/objects/property-cell-inl.h
1484@@ -57,6 +57,9 @@ void PropertyCell::Transition(PropertyDetails new_details,
1485   DCHECK(CanTransitionTo(new_details, *new_value));
1486   // This code must be in sync with its counterpart in
1487   // PropertyCellData::Serialize.
1488+  PropertyDetails transition_marker = new_details;
1489+  transition_marker.set_cell_type(PropertyCellType::kInTransition);
1490+  set_property_details_raw(transition_marker.AsSmi(), kReleaseStore);
1491   set_value(*new_value, kReleaseStore);
1492   set_property_details_raw(new_details.AsSmi(), kReleaseStore);
1493 }
1494diff --git a/v8/src/objects/property-details.h b/v8/src/objects/property-details.h
1495index bab6e297e4..cdea8883d1 100644
1496--- a/v8/src/objects/property-details.h
1497+++ b/v8/src/objects/property-details.h
1498@@ -208,6 +208,9 @@ enum class PropertyCellType {
1499   kUndefined,     // The PREMONOMORPHIC of property cells.
1500   kConstant,      // Cell has been assigned only once.
1501   kConstantType,  // Cell has been assigned only one type.
1502+  // Temporary value indicating an ongoing property cell state transition. Only
1503+  // observable by a background thread.
1504+  kInTransition,
1505   // Value for dictionaries not holding cells, must be 0:
1506   kNoCell = kMutable,
1507 };
1508@@ -347,8 +350,7 @@ class PropertyDetails {
1509   // Bit fields in value_ (type, shift, size). Must be public so the
1510   // constants can be embedded in generated code.
1511   using KindField = base::BitField<PropertyKind, 0, 1>;
1512-  using LocationField = KindField::Next<PropertyLocation, 1>;
1513-  using ConstnessField = LocationField::Next<PropertyConstness, 1>;
1514+  using ConstnessField = KindField::Next<PropertyConstness, 1>;
1515   using AttributesField = ConstnessField::Next<PropertyAttributes, 3>;
1516   static const int kAttributesReadOnlyMask =
1517       (READ_ONLY << AttributesField::kShift);
1518@@ -358,11 +360,12 @@ class PropertyDetails {
1519       (DONT_ENUM << AttributesField::kShift);
1520
1521   // Bit fields for normalized/dictionary mode objects.
1522-  using PropertyCellTypeField = AttributesField::Next<PropertyCellType, 2>;
1523+  using PropertyCellTypeField = AttributesField::Next<PropertyCellType, 3>;
1524   using DictionaryStorageField = PropertyCellTypeField::Next<uint32_t, 23>;
1525
1526   // Bit fields for fast objects.
1527-  using RepresentationField = AttributesField::Next<uint32_t, 3>;
1528+  using LocationField = AttributesField::Next<PropertyLocation, 1>;
1529+  using RepresentationField = LocationField::Next<uint32_t, 3>;
1530   using DescriptorPointer =
1531       RepresentationField::Next<uint32_t, kDescriptorIndexBitCount>;
1532   using FieldIndexField =
1533@@ -381,7 +384,6 @@ class PropertyDetails {
1534   STATIC_ASSERT(KindField::kLastUsedBit < 8);
1535   STATIC_ASSERT(ConstnessField::kLastUsedBit < 8);
1536   STATIC_ASSERT(AttributesField::kLastUsedBit < 8);
1537-  STATIC_ASSERT(LocationField::kLastUsedBit < 8);
1538
1539   static const int kInitialIndex = 1;
1540
1541@@ -411,12 +413,12 @@ class PropertyDetails {
1542   // with an enumeration index of 0 as a single byte.
1543   uint8_t ToByte() {
1544     // We only care about the value of KindField, ConstnessField, and
1545-    // AttributesField. LocationField is also stored, but it will always be
1546-    // kField. We've statically asserted earlier that all those fields fit into
1547-    // a byte together.
1548+    // AttributesField. We've statically asserted earlier that these fields fit
1549+    // into a byte together.
1550+
1551+    DCHECK_EQ(PropertyLocation::kField, location());
1552+    STATIC_ASSERT(static_cast<int>(PropertyLocation::kField) == 0);
1553
1554-    // PropertyCellTypeField comes next, its value must be kNoCell == 0 for
1555-    // dictionary mode PropertyDetails anyway.
1556     DCHECK_EQ(PropertyCellType::kNoCell, cell_type());
1557     STATIC_ASSERT(static_cast<int>(PropertyCellType::kNoCell) == 0);
1558
1559@@ -430,16 +432,13 @@ class PropertyDetails {
1560   // Only to be used for bytes obtained by ToByte. In particular, only used for
1561   // non-global dictionary properties.
1562   static PropertyDetails FromByte(uint8_t encoded_details) {
1563-    // The 0-extension to 32bit sets PropertyCellType to kNoCell and
1564-    // enumeration index to 0, as intended. Everything else is obtained from
1565-    // |encoded_details|.
1566-
1567+    // The 0-extension to 32bit sets PropertyLocation to kField,
1568+    // PropertyCellType to kNoCell, and enumeration index to 0, as intended.
1569+    // Everything else is obtained from |encoded_details|.
1570     PropertyDetails details(encoded_details);
1571-
1572-    DCHECK_EQ(0, details.dictionary_index());
1573     DCHECK_EQ(PropertyLocation::kField, details.location());
1574     DCHECK_EQ(PropertyCellType::kNoCell, details.cell_type());
1575-
1576+    DCHECK_EQ(0, details.dictionary_index());
1577     return details;
1578   }
1579
1580diff --git a/v8/src/regexp/regexp-utils.cc b/v8/src/regexp/regexp-utils.cc
1581index 8bb243d611..bf8479c5ec 100644
1582--- a/v8/src/regexp/regexp-utils.cc
1583+++ b/v8/src/regexp/regexp-utils.cc
1584@@ -49,7 +49,8 @@ MaybeHandle<Object> RegExpUtils::SetLastIndex(Isolate* isolate,
1585   Handle<Object> value_as_object =
1586       isolate->factory()->NewNumberFromInt64(value);
1587   if (HasInitialRegExpMap(isolate, *recv)) {
1588-    JSRegExp::cast(*recv).set_last_index(*value_as_object, SKIP_WRITE_BARRIER);
1589+    JSRegExp::cast(*recv).set_last_index(*value_as_object,
1590+                                         UPDATE_WRITE_BARRIER);
1591     return recv;
1592   } else {
1593     return Object::SetProperty(
1594diff --git a/v8/src/runtime/runtime-classes.cc b/v8/src/runtime/runtime-classes.cc
1595index 088ae3d73d..6c3501334c 100644
1596--- a/v8/src/runtime/runtime-classes.cc
1597+++ b/v8/src/runtime/runtime-classes.cc
1598@@ -650,7 +650,12 @@ MaybeHandle<Object> DefineClass(
1599
1600   Handle<JSObject> prototype = CreateClassPrototype(isolate);
1601   DCHECK_EQ(*constructor, args[ClassBoilerplate::kConstructorArgumentIndex]);
1602-  args.set_at(ClassBoilerplate::kPrototypeArgumentIndex, *prototype);
1603+  // Temporarily change ClassBoilerplate::kPrototypeArgumentIndex for the
1604+  // subsequent calls, but use a scope to make sure to change it back before
1605+  // returning, to not corrupt the caller's argument frame (in particular, for
1606+  // the interpreter, to not clobber the register frame).
1607+  RuntimeArguments::ChangeValueScope set_prototype_value_scope(
1608+      isolate, &args, ClassBoilerplate::kPrototypeArgumentIndex, *prototype);
1609
1610   if (!InitClassConstructor(isolate, class_boilerplate, constructor_parent,
1611                             constructor, args) ||
1612diff --git a/v8/test/cctest/test-api-interceptors.cc b/v8/test/cctest/test-api-interceptors.cc
1613index af5858eaef..36ae3a4838 100644
1614--- a/v8/test/cctest/test-api-interceptors.cc
1615+++ b/v8/test/cctest/test-api-interceptors.cc
1616@@ -5490,10 +5490,10 @@ void DatabaseGetter(Local<Name> name,
1617                     const v8::PropertyCallbackInfo<Value>& info) {
1618   ApiTestFuzzer::Fuzz();
1619   auto context = info.GetIsolate()->GetCurrentContext();
1620-  Local<v8::Object> db = info.Holder()
1621-                             ->GetRealNamedProperty(context, v8_str("db"))
1622-                             .ToLocalChecked()
1623-                             .As<v8::Object>();
1624+  v8::MaybeLocal<Value> maybe_db =
1625+      info.Holder()->GetRealNamedProperty(context, v8_str("db"));
1626+  if (maybe_db.IsEmpty()) return;
1627+  Local<v8::Object> db = maybe_db.ToLocalChecked().As<v8::Object>();
1628   if (!db->Has(context, name).FromJust()) return;
1629   info.GetReturnValue().Set(db->Get(context, name).ToLocalChecked());
1630 }
1631diff --git a/v8/test/cctest/test-js-weak-refs.cc b/v8/test/cctest/test-js-weak-refs.cc
1632index 1291283515..52cf8a529e 100644
1633--- a/v8/test/cctest/test-js-weak-refs.cc
1634+++ b/v8/test/cctest/test-js-weak-refs.cc
1635@@ -831,7 +831,7 @@ TEST(TestRemoveUnregisterToken) {
1636
1637   Handle<JSObject> token1 = CreateKey("token1", isolate);
1638   Handle<JSObject> token2 = CreateKey("token2", isolate);
1639-  Handle<Object> undefined =
1640+  Handle<HeapObject> undefined =
1641       handle(ReadOnlyRoots(isolate).undefined_value(), isolate);
1642
1643   Handle<WeakCell> weak_cell1a = FinalizationRegistryRegister(
1644@@ -861,9 +861,7 @@ TEST(TestRemoveUnregisterToken) {
1645
1646   finalization_registry->RemoveUnregisterToken(
1647       JSReceiver::cast(*token2), isolate,
1648-      [undefined](WeakCell matched_cell) {
1649-        matched_cell.set_unregister_token(*undefined);
1650-      },
1651+      JSFinalizationRegistry::kKeepMatchedCellsInRegistry,
1652       [](HeapObject, ObjectSlot, Object) {});
1653
1654   // Both weak_cell2a and weak_cell2b remain on the weak cell chains.
1655@@ -989,15 +987,17 @@ TEST(UnregisterTokenHeapVerifier) {
1656   v8::HandleScope outer_scope(isolate);
1657
1658   {
1659-    // Make a new FinalizationRegistry and register an object with an unregister
1660-    // token that's unreachable after the IIFE returns.
1661+    // Make a new FinalizationRegistry and register two objects with the same
1662+    // unregister token that's unreachable after the IIFE returns.
1663     v8::HandleScope scope(isolate);
1664     CompileRun(
1665         "var token = {}; "
1666         "var registry = new FinalizationRegistry(function ()  {}); "
1667         "(function () { "
1668-        "  let o = {}; "
1669-        "  registry.register(o, {}, token); "
1670+        "  let o1 = {}; "
1671+        "  let o2 = {}; "
1672+        "  registry.register(o1, {}, token); "
1673+        "  registry.register(o2, {}, token); "
1674         "})();");
1675   }
1676
1677@@ -1022,5 +1022,52 @@ TEST(UnregisterTokenHeapVerifier) {
1678   EmptyMessageQueues(isolate);
1679 }
1680
1681+TEST(UnregisteredAndUnclearedCellHeapVerifier) {
1682+  if (!FLAG_incremental_marking) return;
1683+  ManualGCScope manual_gc_scope;
1684+#ifdef VERIFY_HEAP
1685+  FLAG_verify_heap = true;
1686+#endif
1687+
1688+  CcTest::InitializeVM();
1689+  v8::Isolate* isolate = CcTest::isolate();
1690+  Heap* heap = CcTest::heap();
1691+  v8::HandleScope outer_scope(isolate);
1692+
1693+  {
1694+    // Make a new FinalizationRegistry and register an object with a token.
1695+    v8::HandleScope scope(isolate);
1696+    CompileRun(
1697+        "var token = {}; "
1698+        "var registry = new FinalizationRegistry(function () {}); "
1699+        "registry.register({}, undefined, token);");
1700+  }
1701+
1702+  // Start incremental marking to activate the marking barrier.
1703+  heap::SimulateIncrementalMarking(heap, false);
1704+
1705+  {
1706+    // Make a WeakCell list with length >1, then unregister with the token to
1707+    // the WeakCell from the registry. The linked list manipulation keeps the
1708+    // unregistered WeakCell alive (i.e. not put into cleared_cells) due to the
1709+    // marking barrier from incremental marking. Then make the original token
1710+    // collectible.
1711+    v8::HandleScope scope(isolate);
1712+    CompileRun(
1713+        "registry.register({}); "
1714+        "registry.unregister(token); "
1715+        "token = 0;");
1716+  }
1717+
1718+  // Trigger GC.
1719+  CcTest::CollectAllGarbage();
1720+  CcTest::CollectAllGarbage();
1721+
1722+  // Pump message loop to run the finalizer task, then the incremental marking
1723+  // task. The verifier will verify that live WeakCells don't point to dead
1724+  // unregister tokens.
1725+  EmptyMessageQueues(isolate);
1726+}
1727+
1728 }  // namespace internal
1729 }  // namespace v8
1730diff --git a/v8/test/mjsunit/compiler/regress-crbug-1228407.js b/v8/test/mjsunit/compiler/regress-crbug-1228407.js
1731new file mode 100644
1732index 0000000000..f01eafb80e
1733--- /dev/null
1734+++ b/v8/test/mjsunit/compiler/regress-crbug-1228407.js
1735@@ -0,0 +1,24 @@
1736+// Copyright 2021 the V8 project authors. All rights reserved.
1737+// Use of this source code is governed by a BSD-style license that can be
1738+// found in the LICENSE file.
1739+
1740+// Flags: --interrupt-budget=100
1741+
1742+function foo() {
1743+  return function bar() {
1744+    a.p = 42;
1745+    for (let i = 0; i < 100; i++) this.p();
1746+    this.p = a;
1747+  };
1748+}
1749+
1750+var a = foo();
1751+var b = foo();
1752+
1753+a.prototype = { p() {} };
1754+b.prototype = { p() {
1755+  this.q = new a();
1756+  for (let i = 0; i < 200; i++) ;
1757+}};
1758+
1759+new b();
1760diff --git a/v8/test/mjsunit/compiler/regress-crbug-1234764.js b/v8/test/mjsunit/compiler/regress-crbug-1234764.js
1761new file mode 100644
1762index 0000000000..eca9346d17
1763--- /dev/null
1764+++ b/v8/test/mjsunit/compiler/regress-crbug-1234764.js
1765@@ -0,0 +1,21 @@
1766+// Copyright 2021 the V8 project authors. All rights reserved.
1767+// Use of this source code is governed by a BSD-style license that can be
1768+// found in the LICENSE file.
1769+
1770+// Flags: --allow-natives-syntax
1771+
1772+function foo(arg_true) {
1773+  let o = {c0: 0};
1774+  let c0a = arg_true ? 0 : "x";
1775+  let c0 = Math.max(c0a, 0) + c0a;
1776+  let v01 = 2**32 + (o.c0 & 1);
1777+  let ra = ((2**32 - 1) >>> c0) - v01;
1778+  let rb = (-1) << (32 - c0);
1779+  return (ra^rb) >> 31;
1780+}
1781+
1782+%PrepareFunctionForOptimization(foo);
1783+assertEquals(0, foo(true));
1784+assertEquals(0, foo(true));
1785+%OptimizeFunctionOnNextCall(foo);
1786+assertEquals(0, foo(true));
1787diff --git a/v8/test/mjsunit/compiler/regress-crbug-1234770.js b/v8/test/mjsunit/compiler/regress-crbug-1234770.js
1788new file mode 100644
1789index 0000000000..22f68db902
1790--- /dev/null
1791+++ b/v8/test/mjsunit/compiler/regress-crbug-1234770.js
1792@@ -0,0 +1,14 @@
1793+// Copyright 2021 the V8 project authors. All rights reserved.
1794+// Use of this source code is governed by a BSD-style license that can be
1795+// found in the LICENSE file.
1796+
1797+// Flags: --allow-natives-syntax
1798+
1799+function foo(a) {
1800+  return ((a & 1) == 1) & ((a & 2) == 1);
1801+}
1802+
1803+%PrepareFunctionForOptimization(foo);
1804+assertEquals(0, foo(1));
1805+%OptimizeFunctionOnNextCall(foo);
1806+assertEquals(0, foo(1));
1807diff --git a/v8/test/mjsunit/compiler/regress-crbug-1247763.js b/v8/test/mjsunit/compiler/regress-crbug-1247763.js
1808new file mode 100644
1809index 0000000000..760fb92d08
1810--- /dev/null
1811+++ b/v8/test/mjsunit/compiler/regress-crbug-1247763.js
1812@@ -0,0 +1,30 @@
1813+// Copyright 2021 the V8 project authors. All rights reserved.
1814+// Use of this source code is governed by a BSD-style license that can be
1815+// found in the LICENSE file.
1816+
1817+// Flags: --allow-natives-syntax
1818+
1819+class C extends Array {};
1820+%NeverOptimizeFunction(C);
1821+
1822+for (let i = 0; i < 3; i++) {
1823+
1824+  function store_global() { global = new C(); };
1825+  store_global();
1826+  %PrepareFunctionForOptimization(store_global);
1827+  store_global();
1828+  %OptimizeFunctionOnNextCall(store_global);
1829+  store_global();
1830+
1831+  new C(42);
1832+
1833+  function load_global() { global.p1 = {}; global.p2 = {}; }
1834+  if (i) {
1835+    load_global();
1836+    %PrepareFunctionForOptimization(load_global);
1837+    load_global();
1838+    %OptimizeFunctionOnNextCall(load_global);
1839+    load_global();
1840+  }
1841+
1842+}
1843diff --git a/v8/test/unittests/api/interceptor-unittest.cc b/v8/test/unittests/api/interceptor-unittest.cc
1844index 8a1db3f823..bc00462a29 100644
1845--- a/v8/test/unittests/api/interceptor-unittest.cc
1846+++ b/v8/test/unittests/api/interceptor-unittest.cc
1847@@ -170,8 +170,8 @@ TEST_F(InterceptorLoggingTest, DispatchTest) {
1848   EXPECT_EQ(Run("obj.foo"), "named getter");
1849   EXPECT_EQ(Run("obj[42]"), "indexed getter");
1850
1851-  EXPECT_EQ(Run("obj.foo = null"), "named setter");
1852-  EXPECT_EQ(Run("obj[42] = null"), "indexed setter");
1853+  EXPECT_EQ(Run("obj.foo = null"), "named setter, named descriptor");
1854+  EXPECT_EQ(Run("obj[42] = null"), "indexed setter, indexed descriptor");
1855
1856   EXPECT_EQ(Run("Object.getOwnPropertyDescriptor(obj, 'foo')"),
1857             "named descriptor");
1858diff --git a/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
1859index d54e927e83..ba9de255f1 100644
1860--- a/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
1861+++ b/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
1862@@ -956,16 +956,12 @@ TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithParameters) {
1863   // (x << y) ^ (x >>> (32 - y)) => x ror (32 - y)
1864   Node* node3 = graph()->NewNode(machine()->Word32Xor(), shl_l, shr_l);
1865   Reduction reduction3 = Reduce(node3);
1866-  EXPECT_TRUE(reduction3.Changed());
1867-  EXPECT_EQ(reduction3.replacement(), node3);
1868-  EXPECT_THAT(reduction3.replacement(), IsWord32Ror(value, sub));
1869+  EXPECT_FALSE(reduction3.Changed());
1870
1871   // (x >>> (32 - y)) ^ (x << y) => x ror (32 - y)
1872   Node* node4 = graph()->NewNode(machine()->Word32Xor(), shr_l, shl_l);
1873   Reduction reduction4 = Reduce(node4);
1874-  EXPECT_TRUE(reduction4.Changed());
1875-  EXPECT_EQ(reduction4.replacement(), node4);
1876-  EXPECT_THAT(reduction4.replacement(), IsWord32Ror(value, sub));
1877+  EXPECT_FALSE(reduction4.Changed());
1878
1879   // Testing rotate right.
1880   Node* shl_r = graph()->NewNode(machine()->Word32Shl(), value, sub);
1881@@ -988,16 +984,12 @@ TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithParameters) {
1882   // (x << (32 - y)) ^ (x >>> y) => x ror y
1883   Node* node7 = graph()->NewNode(machine()->Word32Xor(), shl_r, shr_r);
1884   Reduction reduction7 = Reduce(node7);
1885-  EXPECT_TRUE(reduction7.Changed());
1886-  EXPECT_EQ(reduction7.replacement(), node7);
1887-  EXPECT_THAT(reduction7.replacement(), IsWord32Ror(value, shift));
1888+  EXPECT_FALSE(reduction7.Changed());
1889
1890   // (x >>> y) ^ (x << (32 - y)) => x ror y
1891   Node* node8 = graph()->NewNode(machine()->Word32Xor(), shr_r, shl_r);
1892   Reduction reduction8 = Reduce(node8);
1893-  EXPECT_TRUE(reduction8.Changed());
1894-  EXPECT_EQ(reduction8.replacement(), node8);
1895-  EXPECT_THAT(reduction8.replacement(), IsWord32Ror(value, shift));
1896+  EXPECT_FALSE(reduction8.Changed());
1897 }
1898
1899 TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithConstant) {
1900diff --git a/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc b/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
1901index 32a5929fe4..f5d07c7fd5 100644
1902--- a/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
1903+++ b/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
1904@@ -239,5 +239,50 @@ TEST_F(EphemeronPairTest, EphemeronPairWithEmptyMixinValue) {
1905   FinishMarking();
1906 }
1907
1908+namespace {
1909+
1910+class KeyWithCallback final : public GarbageCollected<KeyWithCallback> {
1911+ public:
1912+  template <typename Callback>
1913+  explicit KeyWithCallback(Callback callback) {
1914+    callback(this);
1915+  }
1916+  void Trace(Visitor*) const {}
1917+};
1918+
1919+class EphemeronHolderForKeyWithCallback final
1920+    : public GarbageCollected<EphemeronHolderForKeyWithCallback> {
1921+ public:
1922+  EphemeronHolderForKeyWithCallback(KeyWithCallback* key, GCed* value)
1923+      : ephemeron_pair_(key, value) {}
1924+  void Trace(cppgc::Visitor* visitor) const { visitor->Trace(ephemeron_pair_); }
1925+
1926+ private:
1927+  const EphemeronPair<KeyWithCallback, GCed> ephemeron_pair_;
1928+};
1929+
1930+}  // namespace
1931+
1932+TEST_F(EphemeronPairTest, EphemeronPairWithKeyInConstruction) {
1933+  GCed* value = MakeGarbageCollected<GCed>(GetAllocationHandle());
1934+  Persistent<EphemeronHolderForKeyWithCallback> holder;
1935+  InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get());
1936+  FinishSteps();
1937+  MakeGarbageCollected<KeyWithCallback>(
1938+      GetAllocationHandle(), [this, &holder, value](KeyWithCallback* thiz) {
1939+        // The test doesn't use conservative stack scanning to retain key to
1940+        // avoid retaining value as a side effect.
1941+        EXPECT_TRUE(HeapObjectHeader::FromObject(thiz).TryMarkAtomic());
1942+        holder = MakeGarbageCollected<EphemeronHolderForKeyWithCallback>(
1943+            GetAllocationHandle(), thiz, value);
1944+        // Finishing marking at this point will leave an ephemeron pair
1945+        // reachable where the key is still in construction. The GC needs to
1946+        // mark the value for such pairs as live in the atomic pause as they key
1947+        // is considered live.
1948+        FinishMarking();
1949+      });
1950+  EXPECT_TRUE(HeapObjectHeader::FromObject(value).IsMarked());
1951+}
1952+
1953 }  // namespace internal
1954 }  // namespace cppgc
1955diff --git a/v8/test/unittests/heap/cppgc/marker-unittest.cc b/v8/test/unittests/heap/cppgc/marker-unittest.cc
1956index 8f8191c6d0..3a9d56eb67 100644
1957--- a/v8/test/unittests/heap/cppgc/marker-unittest.cc
1958+++ b/v8/test/unittests/heap/cppgc/marker-unittest.cc
1959@@ -7,12 +7,14 @@
1960 #include <memory>
1961
1962 #include "include/cppgc/allocation.h"
1963+#include "include/cppgc/ephemeron-pair.h"
1964 #include "include/cppgc/internal/pointer-policies.h"
1965 #include "include/cppgc/member.h"
1966 #include "include/cppgc/persistent.h"
1967 #include "include/cppgc/trace-trait.h"
1968 #include "src/heap/cppgc/heap-object-header.h"
1969 #include "src/heap/cppgc/marking-visitor.h"
1970+#include "src/heap/cppgc/object-allocator.h"
1971 #include "src/heap/cppgc/stats-collector.h"
1972 #include "test/unittests/heap/cppgc/tests.h"
1973 #include "testing/gtest/include/gtest/gtest.h"
1974@@ -44,6 +46,8 @@ class MarkerTest : public testing::TestWithHeap {
1975
1976   Marker* marker() const { return marker_.get(); }
1977
1978+  void ResetMarker() { marker_.reset(); }
1979+
1980  private:
1981   std::unique_ptr<Marker> marker_;
1982 };
1983@@ -346,6 +350,50 @@ TEST_F(MarkerTest, SentinelNotClearedOnWeakPersistentHandling) {
1984   EXPECT_EQ(kSentinelPointer, root->weak_child());
1985 }
1986
1987+namespace {
1988+
1989+class SimpleObject final : public GarbageCollected<SimpleObject> {
1990+ public:
1991+  void Trace(Visitor*) const {}
1992+};
1993+
1994+class ObjectWithEphemeronPair final
1995+    : public GarbageCollected<ObjectWithEphemeronPair> {
1996+ public:
1997+  explicit ObjectWithEphemeronPair(AllocationHandle& handle)
1998+      : ephemeron_pair_(MakeGarbageCollected<SimpleObject>(handle),
1999+                        MakeGarbageCollected<SimpleObject>(handle)) {}
2000+
2001+  void Trace(Visitor* visitor) const {
2002+    // First trace the ephemeron pair. The key is not yet marked as live, so the
2003+    // pair should be recorded for later processing. Then strongly mark the key.
2004+    // Marking the key will not trigger another worklist processing iteration,
2005+    // as it merely continues the same loop for regular objects and will leave
2006+    // the main marking worklist empty. If recording the ephemeron pair doesn't
2007+    // as well, we will get a crash when destroying the marker.
2008+    visitor->Trace(ephemeron_pair_);
2009+    visitor->Trace(const_cast<const SimpleObject*>(ephemeron_pair_.key.Get()));
2010+  }
2011+
2012+ private:
2013+  const EphemeronPair<SimpleObject, SimpleObject> ephemeron_pair_;
2014+};
2015+
2016+}  // namespace
2017+
2018+TEST_F(MarkerTest, MarkerProcessesAllEphemeronPairs) {
2019+  static const Marker::MarkingConfig config = {
2020+      MarkingConfig::CollectionType::kMajor,
2021+      MarkingConfig::StackState::kNoHeapPointers,
2022+      MarkingConfig::MarkingType::kAtomic};
2023+  Persistent<ObjectWithEphemeronPair> obj =
2024+      MakeGarbageCollected<ObjectWithEphemeronPair>(GetAllocationHandle(),
2025+                                                    GetAllocationHandle());
2026+  InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get(), config);
2027+  marker()->FinishMarking(MarkingConfig::StackState::kNoHeapPointers);
2028+  ResetMarker();
2029+}
2030+
2031 // Incremental Marking
2032
2033 class IncrementalMarkingTest : public testing::TestWithHeap {
2034