// Copyright 2017 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/api/api.h" #include "src/baseline/baseline.h" #include "src/builtins/builtins-utils-gen.h" #include "src/builtins/builtins.h" #include "src/codegen/code-stub-assembler.h" #include "src/codegen/interface-descriptors-inl.h" #include "src/codegen/macro-assembler.h" #include "src/common/globals.h" #include "src/execution/frame-constants.h" #include "src/heap/memory-chunk.h" #include "src/ic/accessor-assembler.h" #include "src/ic/keyed-store-generic.h" #include "src/logging/counters.h" #include "src/objects/debug-objects.h" #include "src/objects/shared-function-info.h" #include "src/runtime/runtime.h" namespace v8 { namespace internal { // ----------------------------------------------------------------------------- // Stack checks. void Builtins::Generate_StackCheck(MacroAssembler* masm) { masm->TailCallRuntime(Runtime::kStackGuard); } // ----------------------------------------------------------------------------- // TurboFan support builtins. TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) { auto js_object = Parameter(Descriptor::kObject); // Load the {object}s elements. TNode source = CAST(LoadObjectField(js_object, JSObject::kElementsOffset)); TNode target = CloneFixedArray(source, ExtractFixedArrayFlag::kFixedArrays); StoreObjectField(js_object, JSObject::kElementsOffset, target); Return(target); } TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) { auto object = Parameter(Descriptor::kObject); auto key = Parameter(Descriptor::kKey); Label runtime(this, Label::kDeferred); TNode elements = LoadElements(object); elements = TryGrowElementsCapacity(object, elements, PACKED_DOUBLE_ELEMENTS, key, &runtime); Return(elements); BIND(&runtime); TailCallRuntime(Runtime::kGrowArrayElements, NoContextConstant(), object, key); } TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) { auto object = Parameter(Descriptor::kObject); auto key = Parameter(Descriptor::kKey); Label runtime(this, Label::kDeferred); TNode elements = LoadElements(object); elements = TryGrowElementsCapacity(object, elements, PACKED_ELEMENTS, key, &runtime); Return(elements); BIND(&runtime); TailCallRuntime(Runtime::kGrowArrayElements, NoContextConstant(), object, key); } TF_BUILTIN(ReturnReceiver, CodeStubAssembler) { auto receiver = Parameter(Descriptor::kReceiver); Return(receiver); } TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) { Label tailcall_to_shared(this); auto context = Parameter(Descriptor::kContext); auto new_target = Parameter(Descriptor::kJSNewTarget); auto arg_count = UncheckedParameter(Descriptor::kJSActualArgumentsCount); auto function = Parameter(Descriptor::kJSTarget); // Check break-at-entry flag on the debug info. TNode shared = CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset)); TNode maybe_heap_object_or_smi = LoadObjectField(shared, SharedFunctionInfo::kScriptOrDebugInfoOffset); TNode maybe_debug_info = TaggedToHeapObject(maybe_heap_object_or_smi, &tailcall_to_shared); GotoIfNot(HasInstanceType(maybe_debug_info, InstanceType::DEBUG_INFO_TYPE), &tailcall_to_shared); { TNode debug_info = CAST(maybe_debug_info); TNode flags = CAST(LoadObjectField(debug_info, DebugInfo::kFlagsOffset)); GotoIfNot(SmiToInt32(SmiAnd(flags, SmiConstant(DebugInfo::kBreakAtEntry))), &tailcall_to_shared); CallRuntime(Runtime::kDebugBreakAtEntry, context, function); Goto(&tailcall_to_shared); } BIND(&tailcall_to_shared); // Tail call into code object on the SharedFunctionInfo. TNode code = GetSharedFunctionInfoCode(shared); TailCallJSCode(code, context, function, new_target, arg_count); } class WriteBarrierCodeStubAssembler : public CodeStubAssembler { public: explicit WriteBarrierCodeStubAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} TNode IsMarking() { TNode is_marking_addr = ExternalConstant( ExternalReference::heap_is_marking_flag_address(this->isolate())); return Word32NotEqual(Load(is_marking_addr), Int32Constant(0)); } TNode IsPageFlagSet(TNode object, int mask) { TNode page = PageFromAddress(object); TNode flags = UncheckedCast( Load(MachineType::Pointer(), page, IntPtrConstant(BasicMemoryChunk::kFlagsOffset))); return WordNotEqual(WordAnd(flags, IntPtrConstant(mask)), IntPtrConstant(0)); } TNode IsWhite(TNode object) { DCHECK_EQ(strcmp(Marking::kWhiteBitPattern, "00"), 0); TNode cell; TNode mask; GetMarkBit(object, &cell, &mask); TNode mask32 = TruncateIntPtrToInt32(mask); // Non-white has 1 for the first bit, so we only need to check for the first // bit. return Word32Equal(Word32And(Load(cell), mask32), Int32Constant(0)); } void GetMarkBit(TNode object, TNode* cell, TNode* mask) { TNode page = PageFromAddress(object); TNode bitmap = IntPtrAdd(page, IntPtrConstant(MemoryChunk::kMarkingBitmapOffset)); { // Temp variable to calculate cell offset in bitmap. TNode r0; int shift = Bitmap::kBitsPerCellLog2 + kTaggedSizeLog2 - Bitmap::kBytesPerCellLog2; r0 = WordShr(object, IntPtrConstant(shift)); r0 = WordAnd(r0, IntPtrConstant((kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1))); *cell = IntPtrAdd(bitmap, Signed(r0)); } { // Temp variable to calculate bit offset in cell. TNode r1; r1 = WordShr(object, IntPtrConstant(kTaggedSizeLog2)); r1 = WordAnd(r1, IntPtrConstant((1 << Bitmap::kBitsPerCellLog2) - 1)); // It seems that LSB(e.g. cl) is automatically used, so no manual masking // is needed. Uncomment the following line otherwise. // WordAnd(r1, IntPtrConstant((1 << kBitsPerByte) - 1))); *mask = WordShl(IntPtrConstant(1), r1); } } void InsertIntoRememberedSet(TNode object, TNode slot, SaveFPRegsMode fp_mode) { Label slow_path(this), next(this); TNode page = PageFromAddress(object); // Load address of SlotSet TNode slot_set = LoadSlotSet(page, &slow_path); TNode slot_offset = IntPtrSub(slot, page); // Load bucket TNode bucket = LoadBucket(slot_set, slot_offset, &slow_path); // Update cell SetBitInCell(bucket, slot_offset); Goto(&next); BIND(&slow_path); { TNode function = ExternalConstant(ExternalReference::insert_remembered_set_function()); CallCFunctionWithCallerSavedRegisters( function, MachineTypeOf::value, fp_mode, std::make_pair(MachineTypeOf::value, page), std::make_pair(MachineTypeOf::value, slot)); Goto(&next); } BIND(&next); } TNode LoadSlotSet(TNode page, Label* slow_path) { TNode slot_set = UncheckedCast( Load(MachineType::Pointer(), page, IntPtrConstant(MemoryChunk::kOldToNewSlotSetOffset))); GotoIf(WordEqual(slot_set, IntPtrConstant(0)), slow_path); return slot_set; } TNode LoadBucket(TNode slot_set, TNode slot_offset, Label* slow_path) { TNode bucket_index = WordShr(slot_offset, SlotSet::kBitsPerBucketLog2 + kTaggedSizeLog2); TNode bucket = UncheckedCast( Load(MachineType::Pointer(), slot_set, WordShl(bucket_index, kSystemPointerSizeLog2))); GotoIf(WordEqual(bucket, IntPtrConstant(0)), slow_path); return bucket; } void SetBitInCell(TNode bucket, TNode slot_offset) { // Load cell value TNode cell_offset = WordAnd( WordShr(slot_offset, SlotSet::kBitsPerCellLog2 + kTaggedSizeLog2 - SlotSet::kCellSizeBytesLog2), IntPtrConstant((SlotSet::kCellsPerBucket - 1) << SlotSet::kCellSizeBytesLog2)); TNode cell_address = UncheckedCast(IntPtrAdd(bucket, cell_offset)); TNode old_cell_value = ChangeInt32ToIntPtr(Load(cell_address)); // Calculate new cell value TNode bit_index = WordAnd(WordShr(slot_offset, kTaggedSizeLog2), IntPtrConstant(SlotSet::kBitsPerCell - 1)); TNode new_cell_value = UncheckedCast( WordOr(old_cell_value, WordShl(IntPtrConstant(1), bit_index))); // Update cell value StoreNoWriteBarrier(MachineRepresentation::kWord32, cell_address, TruncateIntPtrToInt32(new_cell_value)); } void GenerationalWriteBarrier(SaveFPRegsMode fp_mode) { Label incremental_wb(this), test_old_to_young_flags(this), remembered_set_only(this), remembered_set_and_incremental_wb(this), next(this); // When incremental marking is not on, we skip cross generation pointer // checking here, because there are checks for // `kPointersFromHereAreInterestingMask` and // `kPointersToHereAreInterestingMask` in // `src/compiler//code-generator-.cc` before calling this // stub, which serves as the cross generation checking. auto slot = UncheckedParameter(WriteBarrierDescriptor::kSlotAddress); Branch(IsMarking(), &test_old_to_young_flags, &remembered_set_only); BIND(&test_old_to_young_flags); { // TODO(ishell): do a new-space range check instead. TNode value = BitcastTaggedToWord(Load(slot)); // TODO(albertnetymk): Try to cache the page flag for value and // object, instead of calling IsPageFlagSet each time. TNode value_is_young = IsPageFlagSet(value, MemoryChunk::kIsInYoungGenerationMask); GotoIfNot(value_is_young, &incremental_wb); TNode object = BitcastTaggedToWord( UncheckedParameter(WriteBarrierDescriptor::kObject)); TNode object_is_young = IsPageFlagSet(object, MemoryChunk::kIsInYoungGenerationMask); Branch(object_is_young, &incremental_wb, &remembered_set_and_incremental_wb); } BIND(&remembered_set_only); { TNode object = BitcastTaggedToWord( UncheckedParameter(WriteBarrierDescriptor::kObject)); InsertIntoRememberedSet(object, slot, fp_mode); Goto(&next); } BIND(&remembered_set_and_incremental_wb); { TNode object = BitcastTaggedToWord( UncheckedParameter(WriteBarrierDescriptor::kObject)); InsertIntoRememberedSet(object, slot, fp_mode); Goto(&incremental_wb); } BIND(&incremental_wb); { TNode value = BitcastTaggedToWord(Load(slot)); IncrementalWriteBarrier(slot, value, fp_mode); Goto(&next); } BIND(&next); } void IncrementalWriteBarrier(SaveFPRegsMode fp_mode) { auto slot = UncheckedParameter(WriteBarrierDescriptor::kSlotAddress); TNode value = BitcastTaggedToWord(Load(slot)); IncrementalWriteBarrier(slot, value, fp_mode); } void IncrementalWriteBarrier(TNode slot, TNode value, SaveFPRegsMode fp_mode) { Label call_incremental_wb(this), next(this); // There are two cases we need to call incremental write barrier. // 1) value_is_white GotoIf(IsWhite(value), &call_incremental_wb); // 2) is_compacting && value_in_EC && obj_isnt_skip // is_compacting = true when is_marking = true GotoIfNot(IsPageFlagSet(value, MemoryChunk::kEvacuationCandidateMask), &next); { TNode object = BitcastTaggedToWord( UncheckedParameter(WriteBarrierDescriptor::kObject)); Branch( IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask), &next, &call_incremental_wb); } BIND(&call_incremental_wb); { TNode function = ExternalConstant( ExternalReference::write_barrier_marking_from_code_function()); TNode object = BitcastTaggedToWord( UncheckedParameter(WriteBarrierDescriptor::kObject)); CallCFunctionWithCallerSavedRegisters( function, MachineTypeOf::value, fp_mode, std::make_pair(MachineTypeOf::value, object), std::make_pair(MachineTypeOf::value, slot)); Goto(&next); } BIND(&next); } void GenerateRecordWrite(RememberedSetAction rs_mode, SaveFPRegsMode fp_mode) { if (V8_DISABLE_WRITE_BARRIERS_BOOL) { Return(TrueConstant()); return; } switch (rs_mode) { case RememberedSetAction::kEmit: GenerationalWriteBarrier(fp_mode); break; case RememberedSetAction::kOmit: IncrementalWriteBarrier(fp_mode); break; } IncrementCounter(isolate()->counters()->write_barriers(), 1); Return(TrueConstant()); } void GenerateEphemeronKeyBarrier(SaveFPRegsMode fp_mode) { TNode function = ExternalConstant( ExternalReference::ephemeron_key_write_barrier_function()); TNode isolate_constant = ExternalConstant(ExternalReference::isolate_address(isolate())); // In this method we limit the allocatable registers so we have to use // UncheckedParameter. Parameter does not work because the checked cast // needs more registers. auto address = UncheckedParameter(WriteBarrierDescriptor::kSlotAddress); TNode object = BitcastTaggedToWord( UncheckedParameter(WriteBarrierDescriptor::kObject)); CallCFunctionWithCallerSavedRegisters( function, MachineTypeOf::value, fp_mode, std::make_pair(MachineTypeOf::value, object), std::make_pair(MachineTypeOf::value, address), std::make_pair(MachineTypeOf::value, isolate_constant)); IncrementCounter(isolate()->counters()->write_barriers(), 1); Return(TrueConstant()); } }; TF_BUILTIN(RecordWriteEmitRememberedSetSaveFP, WriteBarrierCodeStubAssembler) { GenerateRecordWrite(RememberedSetAction::kEmit, SaveFPRegsMode::kSave); } TF_BUILTIN(RecordWriteOmitRememberedSetSaveFP, WriteBarrierCodeStubAssembler) { GenerateRecordWrite(RememberedSetAction::kOmit, SaveFPRegsMode::kSave); } TF_BUILTIN(RecordWriteEmitRememberedSetIgnoreFP, WriteBarrierCodeStubAssembler) { GenerateRecordWrite(RememberedSetAction::kEmit, SaveFPRegsMode::kIgnore); } TF_BUILTIN(RecordWriteOmitRememberedSetIgnoreFP, WriteBarrierCodeStubAssembler) { GenerateRecordWrite(RememberedSetAction::kOmit, SaveFPRegsMode::kIgnore); } TF_BUILTIN(EphemeronKeyBarrierSaveFP, WriteBarrierCodeStubAssembler) { GenerateEphemeronKeyBarrier(SaveFPRegsMode::kSave); } TF_BUILTIN(EphemeronKeyBarrierIgnoreFP, WriteBarrierCodeStubAssembler) { GenerateEphemeronKeyBarrier(SaveFPRegsMode::kIgnore); } #ifdef V8_IS_TSAN class TSANRelaxedStoreCodeStubAssembler : public CodeStubAssembler { public: explicit TSANRelaxedStoreCodeStubAssembler( compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} TNode GetExternalReference(int size) { if (size == kInt8Size) { return ExternalConstant( ExternalReference::tsan_relaxed_store_function_8_bits()); } else if (size == kInt16Size) { return ExternalConstant( ExternalReference::tsan_relaxed_store_function_16_bits()); } else if (size == kInt32Size) { return ExternalConstant( ExternalReference::tsan_relaxed_store_function_32_bits()); } else { CHECK_EQ(size, kInt64Size); return ExternalConstant( ExternalReference::tsan_relaxed_store_function_64_bits()); } } void GenerateTSANRelaxedStore(SaveFPRegsMode fp_mode, int size) { TNode function = GetExternalReference(size); auto address = UncheckedParameter(TSANStoreDescriptor::kAddress); TNode value = BitcastTaggedToWord( UncheckedParameter(TSANStoreDescriptor::kValue)); CallCFunctionWithCallerSavedRegisters( function, MachineType::Int32(), fp_mode, std::make_pair(MachineType::IntPtr(), address), std::make_pair(MachineType::IntPtr(), value)); Return(UndefinedConstant()); } }; TF_BUILTIN(TSANRelaxedStore8IgnoreFP, TSANRelaxedStoreCodeStubAssembler) { GenerateTSANRelaxedStore(SaveFPRegsMode::kIgnore, kInt8Size); } TF_BUILTIN(TSANRelaxedStore8SaveFP, TSANRelaxedStoreCodeStubAssembler) { GenerateTSANRelaxedStore(SaveFPRegsMode::kSave, kInt8Size); } TF_BUILTIN(TSANRelaxedStore16IgnoreFP, TSANRelaxedStoreCodeStubAssembler) { GenerateTSANRelaxedStore(SaveFPRegsMode::kIgnore, kInt16Size); } TF_BUILTIN(TSANRelaxedStore16SaveFP, TSANRelaxedStoreCodeStubAssembler) { GenerateTSANRelaxedStore(SaveFPRegsMode::kSave, kInt16Size); } TF_BUILTIN(TSANRelaxedStore32IgnoreFP, TSANRelaxedStoreCodeStubAssembler) { GenerateTSANRelaxedStore(SaveFPRegsMode::kIgnore, kInt32Size); } TF_BUILTIN(TSANRelaxedStore32SaveFP, TSANRelaxedStoreCodeStubAssembler) { GenerateTSANRelaxedStore(SaveFPRegsMode::kSave, kInt32Size); } TF_BUILTIN(TSANRelaxedStore64IgnoreFP, TSANRelaxedStoreCodeStubAssembler) { GenerateTSANRelaxedStore(SaveFPRegsMode::kIgnore, kInt64Size); } TF_BUILTIN(TSANRelaxedStore64SaveFP, TSANRelaxedStoreCodeStubAssembler) { GenerateTSANRelaxedStore(SaveFPRegsMode::kSave, kInt64Size); } class TSANSeqCstStoreCodeStubAssembler : public CodeStubAssembler { public: explicit TSANSeqCstStoreCodeStubAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} TNode GetExternalReference(int size) { if (size == kInt8Size) { return ExternalConstant( ExternalReference::tsan_seq_cst_store_function_8_bits()); } else if (size == kInt16Size) { return ExternalConstant( ExternalReference::tsan_seq_cst_store_function_16_bits()); } else if (size == kInt32Size) { return ExternalConstant( ExternalReference::tsan_seq_cst_store_function_32_bits()); } else { CHECK_EQ(size, kInt64Size); return ExternalConstant( ExternalReference::tsan_seq_cst_store_function_64_bits()); } } void GenerateTSANSeqCstStore(SaveFPRegsMode fp_mode, int size) { TNode function = GetExternalReference(size); auto address = UncheckedParameter(TSANStoreDescriptor::kAddress); TNode value = BitcastTaggedToWord( UncheckedParameter(TSANStoreDescriptor::kValue)); CallCFunctionWithCallerSavedRegisters( function, MachineType::Int32(), fp_mode, std::make_pair(MachineType::IntPtr(), address), std::make_pair(MachineType::IntPtr(), value)); Return(UndefinedConstant()); } }; TF_BUILTIN(TSANSeqCstStore8IgnoreFP, TSANSeqCstStoreCodeStubAssembler) { GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt8Size); } TF_BUILTIN(TSANSeqCstStore8SaveFP, TSANSeqCstStoreCodeStubAssembler) { GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt8Size); } TF_BUILTIN(TSANSeqCstStore16IgnoreFP, TSANSeqCstStoreCodeStubAssembler) { GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt16Size); } TF_BUILTIN(TSANSeqCstStore16SaveFP, TSANSeqCstStoreCodeStubAssembler) { GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt16Size); } TF_BUILTIN(TSANSeqCstStore32IgnoreFP, TSANSeqCstStoreCodeStubAssembler) { GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt32Size); } TF_BUILTIN(TSANSeqCstStore32SaveFP, TSANSeqCstStoreCodeStubAssembler) { GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt32Size); } TF_BUILTIN(TSANSeqCstStore64IgnoreFP, TSANSeqCstStoreCodeStubAssembler) { GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt64Size); } TF_BUILTIN(TSANSeqCstStore64SaveFP, TSANSeqCstStoreCodeStubAssembler) { GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt64Size); } class TSANRelaxedLoadCodeStubAssembler : public CodeStubAssembler { public: explicit TSANRelaxedLoadCodeStubAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} TNode GetExternalReference(int size) { if (size == kInt32Size) { return ExternalConstant( ExternalReference::tsan_relaxed_load_function_32_bits()); } else { CHECK_EQ(size, kInt64Size); return ExternalConstant( ExternalReference::tsan_relaxed_load_function_64_bits()); } } void GenerateTSANRelaxedLoad(SaveFPRegsMode fp_mode, int size) { TNode function = GetExternalReference(size); auto address = UncheckedParameter(TSANLoadDescriptor::kAddress); CallCFunctionWithCallerSavedRegisters( function, MachineType::Int32(), fp_mode, std::make_pair(MachineType::IntPtr(), address)); Return(UndefinedConstant()); } }; TF_BUILTIN(TSANRelaxedLoad32IgnoreFP, TSANRelaxedLoadCodeStubAssembler) { GenerateTSANRelaxedLoad(SaveFPRegsMode::kIgnore, kInt32Size); } TF_BUILTIN(TSANRelaxedLoad32SaveFP, TSANRelaxedLoadCodeStubAssembler) { GenerateTSANRelaxedLoad(SaveFPRegsMode::kSave, kInt32Size); } TF_BUILTIN(TSANRelaxedLoad64IgnoreFP, TSANRelaxedLoadCodeStubAssembler) { GenerateTSANRelaxedLoad(SaveFPRegsMode::kIgnore, kInt64Size); } TF_BUILTIN(TSANRelaxedLoad64SaveFP, TSANRelaxedLoadCodeStubAssembler) { GenerateTSANRelaxedLoad(SaveFPRegsMode::kSave, kInt64Size); } #endif // V8_IS_TSAN class DeletePropertyBaseAssembler : public AccessorAssembler { public: explicit DeletePropertyBaseAssembler(compiler::CodeAssemblerState* state) : AccessorAssembler(state) {} void DictionarySpecificDelete(TNode receiver, TNode properties, TNode key_index, TNode context) { // Overwrite the entry itself (see NameDictionary::SetEntry). TNode filler = TheHoleConstant(); DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kTheHoleValue)); StoreFixedArrayElement(properties, key_index, filler, SKIP_WRITE_BARRIER); StoreValueByKeyIndex(properties, key_index, filler, SKIP_WRITE_BARRIER); StoreDetailsByKeyIndex(properties, key_index, SmiConstant(0)); // Update bookkeeping information (see NameDictionary::ElementRemoved). TNode nof = GetNumberOfElements(properties); TNode new_nof = SmiSub(nof, SmiConstant(1)); SetNumberOfElements(properties, new_nof); TNode num_deleted = GetNumberOfDeletedElements(properties); TNode new_deleted = SmiAdd(num_deleted, SmiConstant(1)); SetNumberOfDeletedElements(properties, new_deleted); // Shrink the dictionary if necessary (see NameDictionary::Shrink). Label shrinking_done(this); TNode capacity = GetCapacity(properties); GotoIf(SmiGreaterThan(new_nof, SmiShr(capacity, 2)), &shrinking_done); GotoIf(SmiLessThan(new_nof, SmiConstant(16)), &shrinking_done); TNode new_properties = CAST(CallRuntime(Runtime::kShrinkNameDictionary, context, properties)); StoreJSReceiverPropertiesOrHash(receiver, new_properties); Goto(&shrinking_done); BIND(&shrinking_done); } void DictionarySpecificDelete(TNode receiver, TNode properties, TNode key_index, TNode context) { Label shrunk(this), done(this); TVARIABLE(SwissNameDictionary, shrunk_table); SwissNameDictionaryDelete(properties, key_index, &shrunk, &shrunk_table); Goto(&done); BIND(&shrunk); StoreJSReceiverPropertiesOrHash(receiver, shrunk_table.value()); Goto(&done); BIND(&done); } template void DeleteDictionaryProperty(TNode receiver, TNode properties, TNode name, TNode context, Label* dont_delete, Label* notfound) { TVARIABLE(IntPtrT, var_name_index); Label dictionary_found(this, &var_name_index); NameDictionaryLookup(properties, name, &dictionary_found, &var_name_index, notfound); BIND(&dictionary_found); TNode key_index = var_name_index.value(); TNode details = LoadDetailsByKeyIndex(properties, key_index); GotoIf(IsSetWord32(details, PropertyDetails::kAttributesDontDeleteMask), dont_delete); DictionarySpecificDelete(receiver, properties, key_index, context); Return(TrueConstant()); } }; TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) { auto receiver = Parameter(Descriptor::kObject); auto key = Parameter(Descriptor::kKey); auto language_mode = Parameter(Descriptor::kLanguageMode); auto context = Parameter(Descriptor::kContext); TVARIABLE(IntPtrT, var_index); TVARIABLE(Name, var_unique); Label if_index(this, &var_index), if_unique_name(this), if_notunique(this), if_notfound(this), slow(this), if_proxy(this); GotoIf(TaggedIsSmi(receiver), &slow); TNode receiver_map = LoadMap(CAST(receiver)); TNode instance_type = LoadMapInstanceType(receiver_map); GotoIf(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), &if_proxy); GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &slow); TryToName(key, &if_index, &var_index, &if_unique_name, &var_unique, &slow, &if_notunique); BIND(&if_index); { Comment("integer index"); Goto(&slow); // TODO(jkummerow): Implement more smarts here. } BIND(&if_unique_name); { Comment("key is unique name"); CheckForAssociatedProtector(var_unique.value(), &slow); Label dictionary(this), dont_delete(this); GotoIf(IsDictionaryMap(receiver_map), &dictionary); // Fast properties need to clear recorded slots and mark the deleted // property as mutable, which can only be done in C++. Goto(&slow); BIND(&dictionary); { InvalidateValidityCellIfPrototype(receiver_map); TNode properties = CAST(LoadSlowProperties(CAST(receiver))); DeleteDictionaryProperty(CAST(receiver), properties, var_unique.value(), context, &dont_delete, &if_notfound); } BIND(&dont_delete); { STATIC_ASSERT(LanguageModeSize == 2); GotoIf(SmiNotEqual(language_mode, SmiConstant(LanguageMode::kSloppy)), &slow); Return(FalseConstant()); } } BIND(&if_notunique); { // If the string was not found in the string table, then no object can // have a property with that name. TryInternalizeString(CAST(key), &if_index, &var_index, &if_unique_name, &var_unique, &if_notfound, &slow); } BIND(&if_notfound); Return(TrueConstant()); BIND(&if_proxy); { TNode name = CAST(CallBuiltin(Builtin::kToName, context, key)); GotoIf(IsPrivateSymbol(name), &slow); TailCallBuiltin(Builtin::kProxyDeleteProperty, context, receiver, name, language_mode); } BIND(&slow); { TailCallRuntime(Runtime::kDeleteProperty, context, receiver, key, language_mode); } } namespace { class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler { public: explicit SetOrCopyDataPropertiesAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} protected: TNode AllocateJsObjectTarget(TNode context) { const TNode native_context = LoadNativeContext(context); const TNode object_function = Cast( LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX)); const TNode map = Cast(LoadJSFunctionPrototypeOrInitialMap(object_function)); const TNode target = AllocateJSObjectFromMap(map); return target; } TNode SetOrCopyDataProperties( TNode context, TNode target, TNode source, Label* if_runtime, base::Optional> excluded_property_count = base::nullopt, base::Optional> excluded_property_base = base::nullopt, bool use_set = true) { Label if_done(this), if_noelements(this), if_sourcenotjsobject(this, Label::kDeferred); // JSPrimitiveWrapper wrappers for numbers don't have any enumerable own // properties, so we can immediately skip the whole operation if {source} is // a Smi. GotoIf(TaggedIsSmi(source), &if_done); // Otherwise check if {source} is a proper JSObject, and if not, defer // to testing for non-empty strings below. TNode source_map = LoadMap(CAST(source)); TNode source_instance_type = LoadMapInstanceType(source_map); GotoIfNot(IsJSObjectInstanceType(source_instance_type), &if_sourcenotjsobject); TNode source_elements = LoadElements(CAST(source)); GotoIf(IsEmptyFixedArray(source_elements), &if_noelements); Branch(IsEmptySlowElementDictionary(source_elements), &if_noelements, if_runtime); BIND(&if_noelements); { // If the target is deprecated, the object will be updated on first // store. If the source for that store equals the target, this will // invalidate the cached representation of the source. Handle this case // in runtime. TNode target_map = LoadMap(target); GotoIf(IsDeprecatedMap(target_map), if_runtime); if (use_set) { TNode target_is_simple_receiver = IsSimpleObjectMap(target_map); ForEachEnumerableOwnProperty( context, source_map, CAST(source), kEnumerationOrder, [=](TNode key, TNode value) { KeyedStoreGenericGenerator::SetProperty( state(), context, target, target_is_simple_receiver, key, value, LanguageMode::kStrict); }, if_runtime); } else { ForEachEnumerableOwnProperty( context, source_map, CAST(source), kEnumerationOrder, [=](TNode key, TNode value) { Label skip(this); if (excluded_property_count.has_value()) { BuildFastLoop( IntPtrConstant(0), excluded_property_count.value(), [&](TNode index) { auto offset = Signed(TimesSystemPointerSize(index)); TNode location = Signed( IntPtrSub(excluded_property_base.value(), offset)); auto property = LoadFullTagged(location); Label continue_label(this); BranchIfSameValue(key, property, &skip, &continue_label); Bind(&continue_label); }, 1, IndexAdvanceMode::kPost); } CallBuiltin(Builtin::kCreateDataProperty, context, target, key, value); Goto(&skip); Bind(&skip); }, if_runtime); } Goto(&if_done); } BIND(&if_sourcenotjsobject); { // Handle other JSReceivers in the runtime. GotoIf(IsJSReceiverInstanceType(source_instance_type), if_runtime); // Non-empty strings are the only non-JSReceivers that need to be // handled explicitly by Object.assign() and CopyDataProperties. GotoIfNot(IsStringInstanceType(source_instance_type), &if_done); TNode source_length = LoadStringLengthAsWord(CAST(source)); Branch(IntPtrEqual(source_length, IntPtrConstant(0)), &if_done, if_runtime); } BIND(&if_done); return target; } }; } // namespace TF_BUILTIN(CopyDataPropertiesWithExcludedPropertiesOnStack, SetOrCopyDataPropertiesAssembler) { auto source = UncheckedParameter(Descriptor::kSource); auto excluded_property_count = UncheckedParameter(Descriptor::kExcludedPropertyCount); auto excluded_properties = UncheckedParameter(Descriptor::kExcludedPropertyBase); auto context = Parameter(Descriptor::kContext); // first check undefine or null Label if_runtime(this, Label::kDeferred); GotoIf(IsNullOrUndefined(source), &if_runtime); TNode target = AllocateJsObjectTarget(context); Return(SetOrCopyDataProperties(context, target, source, &if_runtime, excluded_property_count, excluded_properties, false)); BIND(&if_runtime); // The excluded_property_base is passed as a raw stack pointer, but is // bitcasted to a Smi . This is safe because the stack pointer is aligned, so // it looks like a Smi to the GC. CSA_DCHECK(this, IntPtrEqual(WordAnd(excluded_properties, IntPtrConstant(kSmiTagMask)), IntPtrConstant(kSmiTag))); TailCallRuntime(Runtime::kCopyDataPropertiesWithExcludedPropertiesOnStack, context, source, SmiTag(excluded_property_count), BitcastWordToTaggedSigned(excluded_properties)); } TF_BUILTIN(CopyDataPropertiesWithExcludedProperties, SetOrCopyDataPropertiesAssembler) { auto source = UncheckedParameter(Descriptor::kSource); auto excluded_property_count_smi = UncheckedParameter(Descriptor::kExcludedPropertyCount); auto context = Parameter(Descriptor::kContext); auto excluded_property_count = SmiToIntPtr(excluded_property_count_smi); CodeStubArguments arguments(this, excluded_property_count); TNode excluded_properties = ReinterpretCast(arguments.AtIndexPtr( IntPtrSub(excluded_property_count, IntPtrConstant(2)))); arguments.PopAndReturn(CallBuiltin( Builtin::kCopyDataPropertiesWithExcludedPropertiesOnStack, context, source, excluded_property_count, excluded_properties)); } // ES #sec-copydataproperties TF_BUILTIN(CopyDataProperties, SetOrCopyDataPropertiesAssembler) { auto target = Parameter(Descriptor::kTarget); auto source = Parameter(Descriptor::kSource); auto context = Parameter(Descriptor::kContext); CSA_DCHECK(this, TaggedNotEqual(target, source)); Label if_runtime(this, Label::kDeferred); SetOrCopyDataProperties(context, target, source, &if_runtime, base::nullopt, base::nullopt, false); Return(UndefinedConstant()); BIND(&if_runtime); TailCallRuntime(Runtime::kCopyDataProperties, context, target, source); } TF_BUILTIN(SetDataProperties, SetOrCopyDataPropertiesAssembler) { auto target = Parameter(Descriptor::kTarget); auto source = Parameter(Descriptor::kSource); auto context = Parameter(Descriptor::kContext); Label if_runtime(this, Label::kDeferred); GotoIfForceSlowPath(&if_runtime); SetOrCopyDataProperties(context, target, source, &if_runtime, base::nullopt, base::nullopt, true); Return(UndefinedConstant()); BIND(&if_runtime); TailCallRuntime(Runtime::kSetDataProperties, context, target, source); } TF_BUILTIN(ForInEnumerate, CodeStubAssembler) { auto receiver = Parameter(Descriptor::kReceiver); auto context = Parameter(Descriptor::kContext); Label if_empty(this), if_runtime(this, Label::kDeferred); TNode receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime); Return(receiver_map); BIND(&if_empty); Return(EmptyFixedArrayConstant()); BIND(&if_runtime); TailCallRuntime(Runtime::kForInEnumerate, context, receiver); } TF_BUILTIN(ForInPrepare, CodeStubAssembler) { // The {enumerator} is either a Map or a FixedArray. auto enumerator = Parameter(Descriptor::kEnumerator); auto index = Parameter(Descriptor::kVectorIndex); auto feedback_vector = Parameter(Descriptor::kFeedbackVector); TNode vector_index = Unsigned(TaggedIndexToIntPtr(index)); TNode cache_array; TNode cache_length; ForInPrepare(enumerator, vector_index, feedback_vector, &cache_array, &cache_length, UpdateFeedbackMode::kGuaranteedFeedback); Return(cache_array, cache_length); } TF_BUILTIN(ForInFilter, CodeStubAssembler) { auto key = Parameter(Descriptor::kKey); auto object = Parameter(Descriptor::kObject); auto context = Parameter(Descriptor::kContext); Label if_true(this), if_false(this); TNode result = HasProperty(context, object, key, kForInHasProperty); Branch(IsTrue(result), &if_true, &if_false); BIND(&if_true); Return(key); BIND(&if_false); Return(UndefinedConstant()); } TF_BUILTIN(SameValue, CodeStubAssembler) { auto lhs = Parameter(Descriptor::kLeft); auto rhs = Parameter(Descriptor::kRight); Label if_true(this), if_false(this); BranchIfSameValue(lhs, rhs, &if_true, &if_false); BIND(&if_true); Return(TrueConstant()); BIND(&if_false); Return(FalseConstant()); } TF_BUILTIN(SameValueNumbersOnly, CodeStubAssembler) { auto lhs = Parameter(Descriptor::kLeft); auto rhs = Parameter(Descriptor::kRight); Label if_true(this), if_false(this); BranchIfSameValue(lhs, rhs, &if_true, &if_false, SameValueMode::kNumbersOnly); BIND(&if_true); Return(TrueConstant()); BIND(&if_false); Return(FalseConstant()); } TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) { auto target = Parameter(Descriptor::kTarget); auto new_target = Parameter(Descriptor::kNewTarget); auto c_function = UncheckedParameter(Descriptor::kCFunction); // The logic contained here is mirrored for TurboFan inlining in // JSTypedLowering::ReduceJSCall{Function,Construct}. Keep these in sync. // Make sure we operate in the context of the called function (for example // ConstructStubs implemented in C++ will be run in the context of the caller // instead of the callee, due to the way that [[Construct]] is defined for // ordinary functions). TNode context = LoadJSFunctionContext(target); auto actual_argc = UncheckedParameter(Descriptor::kActualArgumentsCount); CodeStubArguments args(this, actual_argc); TVARIABLE(Int32T, pushed_argc, TruncateIntPtrToInt32(args.GetLengthWithReceiver())); TNode shared = LoadJSFunctionSharedFunctionInfo(target); TNode formal_count = UncheckedCast( LoadSharedFunctionInfoFormalParameterCountWithReceiver(shared)); // The number of arguments pushed is the maximum of actual arguments count // and formal parameters count. Except when the formal parameters count is // the sentinel. Label check_argc(this), update_argc(this), done_argc(this); Branch(IsSharedFunctionInfoDontAdaptArguments(shared), &done_argc, &check_argc); BIND(&check_argc); Branch(Int32GreaterThan(formal_count, pushed_argc.value()), &update_argc, &done_argc); BIND(&update_argc); pushed_argc = formal_count; Goto(&done_argc); BIND(&done_argc); // Update arguments count for CEntry to contain the number of arguments // including the receiver and the extra arguments. TNode argc = Int32Add( pushed_argc.value(), Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithoutReceiver)); const bool builtin_exit_frame = true; TNode code = HeapConstant(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, builtin_exit_frame)); // Unconditionally push argc, target and new target as extra stack arguments. // They will be used by stack frame iterators when constructing stack trace. TailCallStub(CEntry1ArgvOnStackDescriptor{}, // descriptor code, context, // standard arguments for TailCallStub argc, c_function, // register arguments TheHoleConstant(), // additional stack argument 1 (padding) SmiFromInt32(argc), // additional stack argument 2 target, // additional stack argument 3 new_target); // additional stack argument 4 } TF_BUILTIN(AllocateInYoungGeneration, CodeStubAssembler) { auto requested_size = UncheckedParameter(Descriptor::kRequestedSize); CSA_CHECK(this, IsValidPositiveSmi(requested_size)); TNode allocation_flags = SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) | AllowLargeObjectAllocationFlag::encode(true))); TailCallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), SmiFromIntPtr(requested_size), allocation_flags); } TF_BUILTIN(AllocateRegularInYoungGeneration, CodeStubAssembler) { auto requested_size = UncheckedParameter(Descriptor::kRequestedSize); CSA_CHECK(this, IsValidPositiveSmi(requested_size)); TNode allocation_flags = SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) | AllowLargeObjectAllocationFlag::encode(false))); TailCallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), SmiFromIntPtr(requested_size), allocation_flags); } TF_BUILTIN(AllocateInOldGeneration, CodeStubAssembler) { auto requested_size = UncheckedParameter(Descriptor::kRequestedSize); CSA_CHECK(this, IsValidPositiveSmi(requested_size)); TNode runtime_flags = SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) | AllowLargeObjectAllocationFlag::encode(true))); TailCallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(), SmiFromIntPtr(requested_size), runtime_flags); } TF_BUILTIN(AllocateRegularInOldGeneration, CodeStubAssembler) { auto requested_size = UncheckedParameter(Descriptor::kRequestedSize); CSA_CHECK(this, IsValidPositiveSmi(requested_size)); TNode runtime_flags = SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) | AllowLargeObjectAllocationFlag::encode(false))); TailCallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(), SmiFromIntPtr(requested_size), runtime_flags); } TF_BUILTIN(Abort, CodeStubAssembler) { auto message_id = Parameter(Descriptor::kMessageOrMessageId); TailCallRuntime(Runtime::kAbort, NoContextConstant(), message_id); } TF_BUILTIN(AbortCSADcheck, CodeStubAssembler) { auto message = Parameter(Descriptor::kMessageOrMessageId); TailCallRuntime(Runtime::kAbortCSADcheck, NoContextConstant(), message); } void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit( MacroAssembler* masm) { Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, false); } void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit( MacroAssembler* masm) { Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true); } void Builtins:: Generate_CEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit( MacroAssembler* masm) { Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kRegister, false); } void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit( MacroAssembler* masm) { Generate_CEntry(masm, 1, SaveFPRegsMode::kSave, ArgvMode::kStack, false); } void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit( MacroAssembler* masm) { Generate_CEntry(masm, 1, SaveFPRegsMode::kSave, ArgvMode::kStack, true); } void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit( MacroAssembler* masm) { Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kStack, false); } void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit( MacroAssembler* masm) { Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true); } void Builtins:: Generate_CEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit( MacroAssembler* masm) { Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kRegister, false); } void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit( MacroAssembler* masm) { Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, false); } void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit( MacroAssembler* masm) { Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, true); } #if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS) void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) { masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET); } #endif // !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS) #ifndef V8_TARGET_ARCH_IA32 void Builtins::Generate_MemMove(MacroAssembler* masm) { masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET); } #endif // V8_TARGET_ARCH_IA32 // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. #if ENABLE_SPARKPLUG void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) { EmitReturnBaseline(masm); } #else // Stub out implementations of arch-specific baseline builtins. void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { masm->Trap(); } void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) { masm->Trap(); } void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) { masm->Trap(); } #endif // ES6 [[Get]] operation. TF_BUILTIN(GetProperty, CodeStubAssembler) { auto object = Parameter(Descriptor::kObject); auto key = Parameter(Descriptor::kKey); auto context = Parameter(Descriptor::kContext); // TODO(duongn): consider tailcalling to GetPropertyWithReceiver(object, // object, key, OnNonExistent::kReturnUndefined). Label if_notfound(this), if_proxy(this, Label::kDeferred), if_slow(this, Label::kDeferred); CodeStubAssembler::LookupPropertyInHolder lookup_property_in_holder = [=](TNode receiver, TNode holder, TNode holder_map, TNode holder_instance_type, TNode unique_name, Label* next_holder, Label* if_bailout) { TVARIABLE(Object, var_value); Label if_found(this); TryGetOwnProperty(context, receiver, CAST(holder), holder_map, holder_instance_type, unique_name, &if_found, &var_value, next_holder, if_bailout); BIND(&if_found); Return(var_value.value()); }; CodeStubAssembler::LookupElementInHolder lookup_element_in_holder = [=](TNode receiver, TNode holder, TNode holder_map, TNode holder_instance_type, TNode index, Label* next_holder, Label* if_bailout) { // Not supported yet. Use(next_holder); Goto(if_bailout); }; TryPrototypeChainLookup(object, object, key, lookup_property_in_holder, lookup_element_in_holder, &if_notfound, &if_slow, &if_proxy); BIND(&if_notfound); Return(UndefinedConstant()); BIND(&if_slow); TailCallRuntime(Runtime::kGetProperty, context, object, key); BIND(&if_proxy); { // Convert the {key} to a Name first. TNode name = CallBuiltin(Builtin::kToName, context, key); // The {object} is a JSProxy instance, look up the {name} on it, passing // {object} both as receiver and holder. If {name} is absent we can safely // return undefined from here. TailCallBuiltin(Builtin::kProxyGetProperty, context, object, name, object, SmiConstant(OnNonExistent::kReturnUndefined)); } } // ES6 [[Get]] operation with Receiver. TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) { auto object = Parameter(Descriptor::kObject); auto key = Parameter(Descriptor::kKey); auto context = Parameter(Descriptor::kContext); auto receiver = Parameter(Descriptor::kReceiver); auto on_non_existent = Parameter(Descriptor::kOnNonExistent); Label if_notfound(this), if_proxy(this, Label::kDeferred), if_slow(this, Label::kDeferred); CodeStubAssembler::LookupPropertyInHolder lookup_property_in_holder = [=](TNode receiver, TNode holder, TNode holder_map, TNode holder_instance_type, TNode unique_name, Label* next_holder, Label* if_bailout) { TVARIABLE(Object, var_value); Label if_found(this); TryGetOwnProperty(context, receiver, CAST(holder), holder_map, holder_instance_type, unique_name, &if_found, &var_value, next_holder, if_bailout); BIND(&if_found); Return(var_value.value()); }; CodeStubAssembler::LookupElementInHolder lookup_element_in_holder = [=](TNode receiver, TNode holder, TNode holder_map, TNode holder_instance_type, TNode index, Label* next_holder, Label* if_bailout) { // Not supported yet. Use(next_holder); Goto(if_bailout); }; TryPrototypeChainLookup(receiver, object, key, lookup_property_in_holder, lookup_element_in_holder, &if_notfound, &if_slow, &if_proxy); BIND(&if_notfound); Label throw_reference_error(this); GotoIf(TaggedEqual(on_non_existent, SmiConstant(OnNonExistent::kThrowReferenceError)), &throw_reference_error); CSA_DCHECK(this, TaggedEqual(on_non_existent, SmiConstant(OnNonExistent::kReturnUndefined))); Return(UndefinedConstant()); BIND(&throw_reference_error); Return(CallRuntime(Runtime::kThrowReferenceError, context, key)); BIND(&if_slow); TailCallRuntime(Runtime::kGetPropertyWithReceiver, context, object, key, receiver, on_non_existent); BIND(&if_proxy); { // Convert the {key} to a Name first. TNode name = CAST(CallBuiltin(Builtin::kToName, context, key)); // Proxy cannot handle private symbol so bailout. GotoIf(IsPrivateSymbol(name), &if_slow); // The {object} is a JSProxy instance, look up the {name} on it, passing // {object} both as receiver and holder. If {name} is absent we can safely // return undefined from here. TailCallBuiltin(Builtin::kProxyGetProperty, context, object, name, receiver, on_non_existent); } } // ES6 [[Set]] operation. TF_BUILTIN(SetProperty, CodeStubAssembler) { auto context = Parameter(Descriptor::kContext); auto receiver = Parameter(Descriptor::kReceiver); auto key = Parameter(Descriptor::kKey); auto value = Parameter(Descriptor::kValue); KeyedStoreGenericGenerator::SetProperty(state(), context, receiver, key, value, LanguageMode::kStrict); } // ES6 CreateDataProperty(), specialized for the case where objects are still // being initialized, and have not yet been made accessible to the user. Thus, // any operation here should be unobservable until after the object has been // returned. TF_BUILTIN(CreateDataProperty, CodeStubAssembler) { auto context = Parameter(Descriptor::kContext); auto receiver = Parameter(Descriptor::kReceiver); auto key = Parameter(Descriptor::kKey); auto value = Parameter(Descriptor::kValue); KeyedStoreGenericGenerator::CreateDataProperty(state(), context, receiver, key, value); } TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) { Label tailcall_to_function(this); auto context = Parameter(Descriptor::kContext); auto new_target = Parameter(Descriptor::kNewTarget); auto arg_count = UncheckedParameter(Descriptor::kActualArgumentsCount); auto function = Parameter(Descriptor::kTarget); // Retrieve arguments from caller (stdlib, foreign, heap). CodeStubArguments args(this, arg_count); TNode stdlib = args.GetOptionalArgumentValue(0); TNode foreign = args.GetOptionalArgumentValue(1); TNode heap = args.GetOptionalArgumentValue(2); // Call runtime, on success just pass the result to the caller and pop all // arguments. A smi 0 is returned on failure, an object on success. TNode maybe_result_or_smi_zero = CallRuntime( Runtime::kInstantiateAsmJs, context, function, stdlib, foreign, heap); GotoIf(TaggedIsSmi(maybe_result_or_smi_zero), &tailcall_to_function); TNode shared = LoadJSFunctionSharedFunctionInfo(function); TNode parameter_count = UncheckedCast( LoadSharedFunctionInfoFormalParameterCountWithReceiver(shared)); // This builtin intercepts a call to {function}, where the number of arguments // pushed is the maximum of actual arguments count and formal parameters // count. Label argc_lt_param_count(this), argc_ge_param_count(this); Branch(IntPtrLessThan(args.GetLengthWithReceiver(), ChangeInt32ToIntPtr(parameter_count)), &argc_lt_param_count, &argc_ge_param_count); BIND(&argc_lt_param_count); PopAndReturn(parameter_count, maybe_result_or_smi_zero); BIND(&argc_ge_param_count); args.PopAndReturn(maybe_result_or_smi_zero); BIND(&tailcall_to_function); // On failure, tail call back to regular JavaScript by re-calling the given // function which has been reset to the compile lazy builtin. TNode code = LoadJSFunctionCode(function); TailCallJSCode(code, context, function, new_target, arg_count); } } // namespace internal } // namespace v8