// Copyright 2016 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef V8_CODEGEN_CODE_STUB_ASSEMBLER_H_ #define V8_CODEGEN_CODE_STUB_ASSEMBLER_H_ #include #include "src/base/macros.h" #include "src/codegen/bailout-reason.h" #include "src/codegen/tnode.h" #include "src/common/globals.h" #include "src/common/message-template.h" #include "src/compiler/code-assembler.h" #include "src/numbers/integer-literal.h" #include "src/objects/arguments.h" #include "src/objects/bigint.h" #include "src/objects/cell.h" #include "src/objects/feedback-vector.h" #include "src/objects/js-function.h" #include "src/objects/js-generator.h" #include "src/objects/js-promise.h" #include "src/objects/objects.h" #include "src/objects/promise.h" #include "src/objects/shared-function-info.h" #include "src/objects/smi.h" #include "src/objects/swiss-name-dictionary.h" #include "src/objects/tagged-index.h" #include "src/roots/roots.h" #include "src/sandbox/external-pointer.h" #include "torque-generated/exported-macros-assembler.h" namespace v8 { namespace internal { class CallInterfaceDescriptor; class CodeStubArguments; class CodeStubAssembler; class StatsCounter; class StubCache; enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; #define HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \ V(ArrayIteratorProtector, array_iterator_protector, ArrayIteratorProtector) \ V(ArraySpeciesProtector, array_species_protector, ArraySpeciesProtector) \ V(AsyncFunctionAwaitRejectSharedFun, async_function_await_reject_shared_fun, \ AsyncFunctionAwaitRejectSharedFun) \ V(AsyncFunctionAwaitResolveSharedFun, \ async_function_await_resolve_shared_fun, \ AsyncFunctionAwaitResolveSharedFun) \ V(AsyncGeneratorAwaitRejectSharedFun, \ async_generator_await_reject_shared_fun, \ AsyncGeneratorAwaitRejectSharedFun) \ V(AsyncGeneratorAwaitResolveSharedFun, \ async_generator_await_resolve_shared_fun, \ AsyncGeneratorAwaitResolveSharedFun) \ V(AsyncGeneratorReturnClosedRejectSharedFun, \ async_generator_return_closed_reject_shared_fun, \ AsyncGeneratorReturnClosedRejectSharedFun) \ V(AsyncGeneratorReturnClosedResolveSharedFun, \ async_generator_return_closed_resolve_shared_fun, \ AsyncGeneratorReturnClosedResolveSharedFun) \ V(AsyncGeneratorReturnResolveSharedFun, \ async_generator_return_resolve_shared_fun, \ AsyncGeneratorReturnResolveSharedFun) \ V(AsyncGeneratorYieldResolveSharedFun, \ async_generator_yield_resolve_shared_fun, \ AsyncGeneratorYieldResolveSharedFun) \ V(AsyncIteratorValueUnwrapSharedFun, async_iterator_value_unwrap_shared_fun, \ AsyncIteratorValueUnwrapSharedFun) \ V(IsConcatSpreadableProtector, is_concat_spreadable_protector, \ IsConcatSpreadableProtector) \ V(MapIteratorProtector, map_iterator_protector, MapIteratorProtector) \ V(NoElementsProtector, no_elements_protector, NoElementsProtector) \ V(MegaDOMProtector, mega_dom_protector, MegaDOMProtector) \ V(NumberStringCache, number_string_cache, NumberStringCache) \ V(PromiseAllResolveElementSharedFun, promise_all_resolve_element_shared_fun, \ PromiseAllResolveElementSharedFun) \ V(PromiseAllSettledRejectElementSharedFun, \ promise_all_settled_reject_element_shared_fun, \ PromiseAllSettledRejectElementSharedFun) \ V(PromiseAllSettledResolveElementSharedFun, \ promise_all_settled_resolve_element_shared_fun, \ PromiseAllSettledResolveElementSharedFun) \ V(PromiseAnyRejectElementSharedFun, promise_any_reject_element_shared_fun, \ PromiseAnyRejectElementSharedFun) \ V(PromiseCapabilityDefaultRejectSharedFun, \ promise_capability_default_reject_shared_fun, \ PromiseCapabilityDefaultRejectSharedFun) \ V(PromiseCapabilityDefaultResolveSharedFun, \ promise_capability_default_resolve_shared_fun, \ PromiseCapabilityDefaultResolveSharedFun) \ V(PromiseCatchFinallySharedFun, promise_catch_finally_shared_fun, \ PromiseCatchFinallySharedFun) \ V(PromiseGetCapabilitiesExecutorSharedFun, \ promise_get_capabilities_executor_shared_fun, \ PromiseGetCapabilitiesExecutorSharedFun) \ V(PromiseResolveProtector, promise_resolve_protector, \ PromiseResolveProtector) \ V(PromiseSpeciesProtector, promise_species_protector, \ PromiseSpeciesProtector) \ V(PromiseThenFinallySharedFun, promise_then_finally_shared_fun, \ PromiseThenFinallySharedFun) \ V(PromiseThenProtector, promise_then_protector, PromiseThenProtector) \ V(PromiseThrowerFinallySharedFun, promise_thrower_finally_shared_fun, \ PromiseThrowerFinallySharedFun) \ V(PromiseValueThunkFinallySharedFun, promise_value_thunk_finally_shared_fun, \ PromiseValueThunkFinallySharedFun) \ V(ProxyRevokeSharedFun, proxy_revoke_shared_fun, ProxyRevokeSharedFun) \ V(RegExpSpeciesProtector, regexp_species_protector, RegExpSpeciesProtector) \ V(SetIteratorProtector, set_iterator_protector, SetIteratorProtector) \ V(SingleCharacterStringCache, single_character_string_cache, \ SingleCharacterStringCache) \ V(StringIteratorProtector, string_iterator_protector, \ StringIteratorProtector) \ V(TypedArraySpeciesProtector, typed_array_species_protector, \ TypedArraySpeciesProtector) #define UNIQUE_INSTANCE_TYPE_IMMUTABLE_IMMOVABLE_MAP_ADAPTER( \ V, rootIndexName, rootAccessorName, class_name) \ V(rootIndexName, rootAccessorName, class_name##Map) #define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \ V(AllocationSiteWithoutWeakNextMap, allocation_site_without_weaknext_map, \ AllocationSiteWithoutWeakNextMap) \ V(AllocationSiteWithWeakNextMap, allocation_site_map, AllocationSiteMap) \ V(arguments_to_string, arguments_to_string, ArgumentsToString) \ V(Array_string, Array_string, ArrayString) \ V(array_to_string, array_to_string, ArrayToString) \ V(BooleanMap, boolean_map, BooleanMap) \ V(boolean_to_string, boolean_to_string, BooleanToString) \ V(ConsOneByteStringMap, cons_one_byte_string_map, ConsOneByteStringMap) \ V(ConsStringMap, cons_string_map, ConsStringMap) \ V(constructor_string, constructor_string, ConstructorString) \ V(date_to_string, date_to_string, DateToString) \ V(default_string, default_string, DefaultString) \ V(EmptyByteArray, empty_byte_array, EmptyByteArray) \ V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \ V(EmptyScopeInfo, empty_scope_info, EmptyScopeInfo) \ V(EmptyPropertyDictionary, empty_property_dictionary, \ EmptyPropertyDictionary) \ V(EmptyOrderedPropertyDictionary, empty_ordered_property_dictionary, \ EmptyOrderedPropertyDictionary) \ V(EmptySwissPropertyDictionary, empty_swiss_property_dictionary, \ EmptySwissPropertyDictionary) \ V(EmptySlowElementDictionary, empty_slow_element_dictionary, \ EmptySlowElementDictionary) \ V(empty_string, empty_string, EmptyString) \ V(error_to_string, error_to_string, ErrorToString) \ V(errors_string, errors_string, ErrorsString) \ V(FalseValue, false_value, False) \ V(FixedArrayMap, fixed_array_map, FixedArrayMap) \ V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \ V(Function_string, function_string, FunctionString) \ V(function_to_string, function_to_string, FunctionToString) \ V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \ V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \ V(Infinity_string, Infinity_string, InfinityString) \ V(is_concat_spreadable_symbol, is_concat_spreadable_symbol, \ IsConcatSpreadableSymbol) \ V(iterator_symbol, iterator_symbol, IteratorSymbol) \ V(length_string, length_string, LengthString) \ V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \ V(match_symbol, match_symbol, MatchSymbol) \ V(megamorphic_symbol, megamorphic_symbol, MegamorphicSymbol) \ V(mega_dom_symbol, mega_dom_symbol, MegaDOMSymbol) \ V(message_string, message_string, MessageString) \ V(minus_Infinity_string, minus_Infinity_string, MinusInfinityString) \ V(MinusZeroValue, minus_zero_value, MinusZero) \ V(name_string, name_string, NameString) \ V(NanValue, nan_value, Nan) \ V(NaN_string, NaN_string, NaNString) \ V(next_string, next_string, NextString) \ V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \ V(null_to_string, null_to_string, NullToString) \ V(NullValue, null_value, Null) \ V(number_string, number_string, NumberString) \ V(number_to_string, number_to_string, NumberToString) \ V(Object_string, Object_string, ObjectString) \ V(object_to_string, object_to_string, ObjectToString) \ V(OneByteStringMap, one_byte_string_map, OneByteStringMap) \ V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \ V(OnePointerFillerMap, one_pointer_filler_map, OnePointerFillerMap) \ V(PromiseCapabilityMap, promise_capability_map, PromiseCapabilityMap) \ V(promise_forwarding_handler_symbol, promise_forwarding_handler_symbol, \ PromiseForwardingHandlerSymbol) \ V(PromiseFulfillReactionJobTaskMap, promise_fulfill_reaction_job_task_map, \ PromiseFulfillReactionJobTaskMap) \ V(promise_handled_by_symbol, promise_handled_by_symbol, \ PromiseHandledBySymbol) \ V(PromiseReactionMap, promise_reaction_map, PromiseReactionMap) \ V(PromiseRejectReactionJobTaskMap, promise_reject_reaction_job_task_map, \ PromiseRejectReactionJobTaskMap) \ V(PromiseResolveThenableJobTaskMap, promise_resolve_thenable_job_task_map, \ PromiseResolveThenableJobTaskMap) \ V(prototype_string, prototype_string, PrototypeString) \ V(replace_symbol, replace_symbol, ReplaceSymbol) \ V(regexp_to_string, regexp_to_string, RegexpToString) \ V(resolve_string, resolve_string, ResolveString) \ V(return_string, return_string, ReturnString) \ V(search_symbol, search_symbol, SearchSymbol) \ V(species_symbol, species_symbol, SpeciesSymbol) \ V(StaleRegister, stale_register, StaleRegister) \ V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \ V(string_string, string_string, StringString) \ V(string_to_string, string_to_string, StringToString) \ V(StringMap, string_map, StringMap) \ V(TheHoleValue, the_hole_value, TheHole) \ V(then_string, then_string, ThenString) \ V(toString_string, toString_string, ToStringString) \ V(to_primitive_symbol, to_primitive_symbol, ToPrimitiveSymbol) \ V(to_string_tag_symbol, to_string_tag_symbol, ToStringTagSymbol) \ V(TrueValue, true_value, True) \ V(undefined_to_string, undefined_to_string, UndefinedToString) \ V(UndefinedValue, undefined_value, Undefined) \ V(uninitialized_symbol, uninitialized_symbol, UninitializedSymbol) \ V(valueOf_string, valueOf_string, ValueOfString) \ V(wasm_wrapped_object_symbol, wasm_wrapped_object_symbol, \ WasmWrappedObjectSymbol) \ V(zero_string, zero_string, ZeroString) \ UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR( \ UNIQUE_INSTANCE_TYPE_IMMUTABLE_IMMOVABLE_MAP_ADAPTER, V) #define HEAP_IMMOVABLE_OBJECT_LIST(V) \ HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \ HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) #ifdef DEBUG #define CSA_CHECK(csa, x) \ (csa)->Check([&]() -> TNode { return x; }, #x, __FILE__, __LINE__) #else #define CSA_CHECK(csa, x) (csa)->FastCheck(x) #endif #ifdef DEBUG // CSA_DCHECK_ARGS generates an // std::initializer_list from __VA_ARGS__. It // currently supports between 0 and 2 arguments. // clang-format off #define CSA_DCHECK_0_ARGS(...) {} #define CSA_DCHECK_1_ARG(a, ...) {{a, #a}} #define CSA_DCHECK_2_ARGS(a, b, ...) {{a, #a}, {b, #b}} // clang-format on #define SWITCH_CSA_DCHECK_ARGS(dummy, a, b, FUNC, ...) FUNC(a, b) #define CSA_DCHECK_ARGS(...) \ CALL(SWITCH_CSA_DCHECK_ARGS, (, ##__VA_ARGS__, CSA_DCHECK_2_ARGS, \ CSA_DCHECK_1_ARG, CSA_DCHECK_0_ARGS)) // Workaround for MSVC to skip comma in empty __VA_ARGS__. #define CALL(x, y) x y // CSA_DCHECK(csa, , ) #define CSA_DCHECK(csa, condition_node, ...) \ (csa)->Dcheck(condition_node, #condition_node, __FILE__, __LINE__, \ CSA_DCHECK_ARGS(__VA_ARGS__)) // CSA_DCHECK_BRANCH(csa, [](Label* ok, Label* not_ok) {...}, // ) #define CSA_DCHECK_BRANCH(csa, gen, ...) \ (csa)->Dcheck(gen, #gen, __FILE__, __LINE__, CSA_DCHECK_ARGS(__VA_ARGS__)) #define CSA_DCHECK_JS_ARGC_OP(csa, Op, op, expected) \ (csa)->Dcheck( \ [&]() -> TNode { \ const TNode argc = (csa)->UncheckedParameter( \ Descriptor::kJSActualArgumentsCount); \ return (csa)->Op(argc, \ (csa)->Int32Constant(i::JSParameterCount(expected))); \ }, \ "argc " #op " " #expected, __FILE__, __LINE__, \ {{SmiFromInt32((csa)->UncheckedParameter( \ Descriptor::kJSActualArgumentsCount)), \ "argc"}}) #define CSA_DCHECK_JS_ARGC_EQ(csa, expected) \ CSA_DCHECK_JS_ARGC_OP(csa, Word32Equal, ==, expected) #define CSA_DEBUG_INFO(name) \ { #name, __FILE__, __LINE__ } #define BIND(label) Bind(label, CSA_DEBUG_INFO(label)) #define TYPED_VARIABLE_DEF(type, name, ...) \ TVariable name(CSA_DEBUG_INFO(name), __VA_ARGS__) #define TYPED_VARIABLE_CONSTRUCTOR(name, ...) \ name(CSA_DEBUG_INFO(name), __VA_ARGS__) #else // DEBUG #define CSA_DCHECK(csa, ...) ((void)0) #define CSA_DCHECK_BRANCH(csa, ...) ((void)0) #define CSA_DCHECK_JS_ARGC_EQ(csa, expected) ((void)0) #define BIND(label) Bind(label) #define TYPED_VARIABLE_DEF(type, name, ...) TVariable name(__VA_ARGS__) #define TYPED_VARIABLE_CONSTRUCTOR(name, ...) name(__VA_ARGS__) #endif // DEBUG #define TVARIABLE(...) EXPAND(TYPED_VARIABLE_DEF(__VA_ARGS__, this)) #define TVARIABLE_CONSTRUCTOR(...) \ EXPAND(TYPED_VARIABLE_CONSTRUCTOR(__VA_ARGS__, this)) #ifdef ENABLE_SLOW_DCHECKS #define CSA_SLOW_DCHECK(csa, ...) \ if (FLAG_enable_slow_asserts) { \ CSA_DCHECK(csa, __VA_ARGS__); \ } #else #define CSA_SLOW_DCHECK(csa, ...) ((void)0) #endif // Provides JavaScript-specific "macro-assembler" functionality on top of the // CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler, // it's possible to add JavaScript-specific useful CodeAssembler "macros" // without modifying files in the compiler directory (and requiring a review // from a compiler directory OWNER). class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler, public TorqueGeneratedExportedMacrosAssembler { public: using ScopedExceptionHandler = compiler::ScopedExceptionHandler; template using LazyNode = std::function()>; explicit CodeStubAssembler(compiler::CodeAssemblerState* state); enum class AllocationFlag : uint8_t { kNone = 0, kDoubleAlignment = 1, kPretenured = 1 << 1, kAllowLargeObjectAllocation = 1 << 2, }; enum SlackTrackingMode { kWithSlackTracking, kNoSlackTracking }; using AllocationFlags = base::Flags; TNode ParameterToIntPtr(TNode value) { return SmiUntag(value); } TNode ParameterToIntPtr(TNode value) { return value; } TNode ParameterToIntPtr(TNode value) { return Signed(value); } enum InitializationMode { kUninitialized, kInitializeToZero, kInitializeToNull }; TNode ParameterToTagged(TNode value) { return value; } TNode ParameterToTagged(TNode value) { return SmiTag(value); } template TNode TaggedToParameter(TNode value); bool ToParameterConstant(TNode node, intptr_t* out) { if (TryToIntPtrConstant(node, out)) { return true; } return false; } bool ToParameterConstant(TNode node, intptr_t* out) { intptr_t constant; if (TryToIntPtrConstant(node, &constant)) { *out = constant; return true; } return false; } #if defined(BINT_IS_SMI) TNode BIntToSmi(TNode source) { return source; } TNode BIntToIntPtr(TNode source) { return SmiToIntPtr(source); } TNode SmiToBInt(TNode source) { return source; } TNode IntPtrToBInt(TNode source) { return SmiFromIntPtr(source); } #elif defined(BINT_IS_INTPTR) TNode BIntToSmi(TNode source) { return SmiFromIntPtr(source); } TNode BIntToIntPtr(TNode source) { return source; } TNode SmiToBInt(TNode source) { return SmiToIntPtr(source); } TNode IntPtrToBInt(TNode source) { return source; } #else #error Unknown architecture. #endif TNode TaggedIndexToIntPtr(TNode value); TNode IntPtrToTaggedIndex(TNode value); // TODO(v8:10047): Get rid of these convertions eventually. TNode TaggedIndexToSmi(TNode value); TNode SmiToTaggedIndex(TNode value); // Pointer compression specific. Ensures that the upper 32 bits of a Smi // contain the sign of a lower 32 bits so that the Smi can be directly used // as an index in element offset computation. TNode NormalizeSmiIndex(TNode smi_index); TNode TaggedToSmi(TNode value, Label* fail) { GotoIf(TaggedIsNotSmi(value), fail); return UncheckedCast(value); } TNode TaggedToPositiveSmi(TNode value, Label* fail) { GotoIfNot(TaggedIsPositiveSmi(value), fail); return UncheckedCast(value); } TNode TaggedToDirectString(TNode value, Label* fail); TNode TaggedToHeapObject(TNode value, Label* fail) { GotoIf(TaggedIsSmi(value), fail); return UncheckedCast(value); } TNode Uint16Constant(uint16_t t) { return UncheckedCast(Int32Constant(t)); } TNode HeapObjectToJSDataView(TNode heap_object, Label* fail) { GotoIfNot(IsJSDataView(heap_object), fail); return CAST(heap_object); } TNode HeapObjectToJSProxy(TNode heap_object, Label* fail) { GotoIfNot(IsJSProxy(heap_object), fail); return CAST(heap_object); } TNode HeapObjectToJSStringIterator( TNode heap_object, Label* fail) { GotoIfNot(IsJSStringIterator(heap_object), fail); return CAST(heap_object); } TNode HeapObjectToCallable(TNode heap_object, Label* fail) { GotoIfNot(IsCallable(heap_object), fail); return CAST(heap_object); } TNode HeapObjectToString(TNode heap_object, Label* fail) { GotoIfNot(IsString(heap_object), fail); return CAST(heap_object); } TNode HeapObjectToConstructor(TNode heap_object, Label* fail) { GotoIfNot(IsConstructor(heap_object), fail); return CAST(heap_object); } TNode HeapObjectToJSFunctionWithPrototypeSlot( TNode heap_object, Label* fail) { GotoIfNot(IsJSFunctionWithPrototypeSlot(heap_object), fail); return CAST(heap_object); } template TNode RunLazy(LazyNode lazy) { return lazy(); } #define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \ TNode OpName(TNode a, TNode b) { return SmiOpName(a, b); } \ TNode OpName(TNode a, TNode b) { \ return IntPtrOpName(a, b); \ } \ TNode OpName(TNode a, TNode b) { \ return Unsigned(IntPtrOpName(Signed(a), Signed(b))); \ } \ TNode OpName(TNode a, TNode b) { \ return ReinterpretCast(IntPtrOpName( \ ReinterpretCast(a), ReinterpretCast(b))); \ } // TODO(v8:9708): Define BInt operations once all uses are ported. PARAMETER_BINOP(IntPtrOrSmiAdd, IntPtrAdd, SmiAdd) PARAMETER_BINOP(IntPtrOrSmiSub, IntPtrSub, SmiSub) #undef PARAMETER_BINOP #define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \ TNode OpName(TNode a, TNode b) { return SmiOpName(a, b); } \ TNode OpName(TNode a, TNode b) { \ return IntPtrOpName(a, b); \ } \ TNode OpName(TNode a, TNode b) { \ return IntPtrOpName(Signed(a), Signed(b)); \ } \ TNode OpName(TNode a, TNode b) { \ return IntPtrOpName(a, b); \ } // TODO(v8:9708): Define BInt operations once all uses are ported. PARAMETER_BINOP(IntPtrOrSmiEqual, WordEqual, SmiEqual) PARAMETER_BINOP(IntPtrOrSmiNotEqual, WordNotEqual, SmiNotEqual) PARAMETER_BINOP(IntPtrOrSmiLessThanOrEqual, IntPtrLessThanOrEqual, SmiLessThanOrEqual) PARAMETER_BINOP(IntPtrOrSmiGreaterThan, IntPtrGreaterThan, SmiGreaterThan) PARAMETER_BINOP(UintPtrOrSmiLessThan, UintPtrLessThan, SmiBelow) PARAMETER_BINOP(UintPtrOrSmiGreaterThanOrEqual, UintPtrGreaterThanOrEqual, SmiAboveOrEqual) #undef PARAMETER_BINOP uintptr_t ConstexprUintPtrShl(uintptr_t a, int32_t b) { return a << b; } uintptr_t ConstexprUintPtrShr(uintptr_t a, int32_t b) { return a >> b; } intptr_t ConstexprIntPtrAdd(intptr_t a, intptr_t b) { return a + b; } uintptr_t ConstexprUintPtrAdd(uintptr_t a, uintptr_t b) { return a + b; } intptr_t ConstexprWordNot(intptr_t a) { return ~a; } uintptr_t ConstexprWordNot(uintptr_t a) { return ~a; } TNode TaggedEqual(TNode a, TNode b) { if (COMPRESS_POINTERS_BOOL) { return Word32Equal(ReinterpretCast(a), ReinterpretCast(b)); } else { return WordEqual(ReinterpretCast(a), ReinterpretCast(b)); } } TNode TaggedNotEqual(TNode a, TNode b) { return Word32BinaryNot(TaggedEqual(a, b)); } TNode NoContextConstant(); #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ TNode().rootAccessorName())>::type>::type> \ name##Constant(); HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) #undef HEAP_CONSTANT_ACCESSOR #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ TNode().rootAccessorName())>::type>::type> \ name##Constant(); HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) #undef HEAP_CONSTANT_ACCESSOR #define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \ TNode Is##name(TNode value); \ TNode IsNot##name(TNode value); HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST) #undef HEAP_CONSTANT_TEST TNode BIntConstant(int value); template TNode IntPtrOrSmiConstant(int value); bool TryGetIntPtrOrSmiConstantValue(TNode maybe_constant, int* value); bool TryGetIntPtrOrSmiConstantValue(TNode maybe_constant, int* value); TNode PopulationCountFallback(TNode value); TNode PopulationCount64(TNode value); TNode PopulationCount32(TNode value); TNode CountTrailingZeros64(TNode value); TNode CountTrailingZeros32(TNode value); TNode CountLeadingZeros64(TNode value); TNode CountLeadingZeros32(TNode value); // Round the 32bits payload of the provided word up to the next power of two. TNode IntPtrRoundUpToPowerOfTwo32(TNode value); // Select the maximum of the two provided IntPtr values. TNode IntPtrMax(TNode left, TNode right); // Select the minimum of the two provided IntPtr values. TNode IntPtrMin(TNode left, TNode right); TNode UintPtrMin(TNode left, TNode right); // Float64 operations. TNode Float64Ceil(TNode x); TNode Float64Floor(TNode x); TNode Float64Round(TNode x); TNode Float64RoundToEven(TNode x); TNode Float64Trunc(TNode x); // Select the minimum of the two provided Number values. TNode NumberMax(TNode left, TNode right); // Select the minimum of the two provided Number values. TNode NumberMin(TNode left, TNode right); // Returns true iff the given value fits into smi range and is >= 0. TNode IsValidPositiveSmi(TNode value); // Tag an IntPtr as a Smi value. TNode SmiTag(TNode value); // Untag a Smi value as an IntPtr. TNode SmiUntag(TNode value); // Smi conversions. TNode SmiToFloat64(TNode value); TNode SmiFromIntPtr(TNode value) { return SmiTag(value); } TNode SmiFromInt32(TNode value); TNode SmiFromUint32(TNode value); TNode SmiToIntPtr(TNode value) { return SmiUntag(value); } TNode SmiToInt32(TNode value); // Smi operations. #define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \ TNode SmiOpName(TNode a, TNode b) { \ if (SmiValuesAre32Bits()) { \ return BitcastWordToTaggedSigned( \ IntPtrOpName(BitcastTaggedToWordForTagAndSmiBits(a), \ BitcastTaggedToWordForTagAndSmiBits(b))); \ } else { \ DCHECK(SmiValuesAre31Bits()); \ return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Int32OpName( \ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), \ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(b))))); \ } \ } SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd, Int32Add) SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub) SMI_ARITHMETIC_BINOP(SmiAnd, WordAnd, Word32And) SMI_ARITHMETIC_BINOP(SmiOr, WordOr, Word32Or) SMI_ARITHMETIC_BINOP(SmiXor, WordXor, Word32Xor) #undef SMI_ARITHMETIC_BINOP TNode TryIntPtrAdd(TNode a, TNode b, Label* if_overflow); TNode TryIntPtrSub(TNode a, TNode b, Label* if_overflow); TNode TryInt32Mul(TNode a, TNode b, Label* if_overflow); TNode TrySmiAdd(TNode a, TNode b, Label* if_overflow); TNode TrySmiSub(TNode a, TNode b, Label* if_overflow); TNode TrySmiAbs(TNode a, Label* if_overflow); TNode SmiShl(TNode a, int shift) { TNode result = BitcastWordToTaggedSigned( WordShl(BitcastTaggedToWordForTagAndSmiBits(a), shift)); // Smi shift have different result to int32 shift when the inputs are not // strictly limited. The CSA_DCHECK is to ensure valid inputs. CSA_DCHECK( this, TaggedEqual(result, BitwiseOp(SmiToInt32(a), Int32Constant(shift), Operation::kShiftLeft))); return result; } TNode SmiShr(TNode a, int shift) { TNode result; if (kTaggedSize == kInt64Size) { result = BitcastWordToTaggedSigned( WordAnd(WordShr(BitcastTaggedToWordForTagAndSmiBits(a), shift), BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1)))); } else { // For pointer compressed Smis, we want to make sure that we truncate to // int32 before shifting, to avoid the values of the top 32-bits from // leaking into the sign bit of the smi. result = BitcastWordToTaggedSigned(WordAnd( ChangeInt32ToIntPtr(Word32Shr( TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), shift)), BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1)))); } // Smi shift have different result to int32 shift when the inputs are not // strictly limited. The CSA_DCHECK is to ensure valid inputs. CSA_DCHECK( this, TaggedEqual(result, BitwiseOp(SmiToInt32(a), Int32Constant(shift), Operation::kShiftRightLogical))); return result; } TNode SmiSar(TNode a, int shift) { // The number of shift bits is |shift % 64| for 64-bits value and |shift % // 32| for 32-bits value. The DCHECK is to ensure valid inputs. DCHECK_LT(shift, 32); if (kTaggedSize == kInt64Size) { return BitcastWordToTaggedSigned( WordAnd(WordSar(BitcastTaggedToWordForTagAndSmiBits(a), shift), BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1)))); } else { // For pointer compressed Smis, we want to make sure that we truncate to // int32 before shifting, to avoid the values of the top 32-bits from // changing the sign bit of the smi. return BitcastWordToTaggedSigned(WordAnd( ChangeInt32ToIntPtr(Word32Sar( TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), shift)), BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1)))); } } TNode WordOrSmiShr(TNode a, int shift) { return SmiShr(a, shift); } TNode WordOrSmiShr(TNode a, int shift) { return WordShr(a, shift); } #define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \ TNode SmiOpName(TNode a, TNode b) { \ if (kTaggedSize == kInt64Size) { \ return IntPtrOpName(BitcastTaggedToWordForTagAndSmiBits(a), \ BitcastTaggedToWordForTagAndSmiBits(b)); \ } else { \ DCHECK_EQ(kTaggedSize, kInt32Size); \ DCHECK(SmiValuesAre31Bits()); \ return Int32OpName( \ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), \ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(b))); \ } \ } SMI_COMPARISON_OP(SmiEqual, WordEqual, Word32Equal) SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual, Word32NotEqual) SMI_COMPARISON_OP(SmiAbove, UintPtrGreaterThan, Uint32GreaterThan) SMI_COMPARISON_OP(SmiAboveOrEqual, UintPtrGreaterThanOrEqual, Uint32GreaterThanOrEqual) SMI_COMPARISON_OP(SmiBelow, UintPtrLessThan, Uint32LessThan) SMI_COMPARISON_OP(SmiLessThan, IntPtrLessThan, Int32LessThan) SMI_COMPARISON_OP(SmiLessThanOrEqual, IntPtrLessThanOrEqual, Int32LessThanOrEqual) SMI_COMPARISON_OP(SmiGreaterThan, IntPtrGreaterThan, Int32GreaterThan) SMI_COMPARISON_OP(SmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual, Int32GreaterThanOrEqual) #undef SMI_COMPARISON_OP TNode SmiMax(TNode a, TNode b); TNode SmiMin(TNode a, TNode b); // Computes a % b for Smi inputs a and b; result is not necessarily a Smi. TNode SmiMod(TNode a, TNode b); // Computes a * b for Smi inputs a and b; result is not necessarily a Smi. TNode SmiMul(TNode a, TNode b); // Tries to compute dividend / divisor for Smi inputs; branching to bailout // if the division needs to be performed as a floating point operation. TNode TrySmiDiv(TNode dividend, TNode divisor, Label* bailout); // Compares two Smis a and b as if they were converted to strings and then // compared lexicographically. Returns: // -1 iff x < y. // 0 iff x == y. // 1 iff x > y. TNode SmiLexicographicCompare(TNode x, TNode y); #ifdef BINT_IS_SMI #define BINT_COMPARISON_OP(BIntOpName, SmiOpName, IntPtrOpName) \ TNode BIntOpName(TNode a, TNode b) { \ return SmiOpName(a, b); \ } #else #define BINT_COMPARISON_OP(BIntOpName, SmiOpName, IntPtrOpName) \ TNode BIntOpName(TNode a, TNode b) { \ return IntPtrOpName(a, b); \ } #endif BINT_COMPARISON_OP(BIntEqual, SmiEqual, WordEqual) BINT_COMPARISON_OP(BIntNotEqual, SmiNotEqual, WordNotEqual) BINT_COMPARISON_OP(BIntAbove, SmiAbove, UintPtrGreaterThan) BINT_COMPARISON_OP(BIntAboveOrEqual, SmiAboveOrEqual, UintPtrGreaterThanOrEqual) BINT_COMPARISON_OP(BIntBelow, SmiBelow, UintPtrLessThan) BINT_COMPARISON_OP(BIntLessThan, SmiLessThan, IntPtrLessThan) BINT_COMPARISON_OP(BIntLessThanOrEqual, SmiLessThanOrEqual, IntPtrLessThanOrEqual) BINT_COMPARISON_OP(BIntGreaterThan, SmiGreaterThan, IntPtrGreaterThan) BINT_COMPARISON_OP(BIntGreaterThanOrEqual, SmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual) #undef BINT_COMPARISON_OP // Smi | HeapNumber operations. TNode NumberInc(TNode value); TNode NumberDec(TNode value); TNode NumberAdd(TNode a, TNode b); TNode NumberSub(TNode a, TNode b); void GotoIfNotNumber(TNode value, Label* is_not_number); void GotoIfNumber(TNode value, Label* is_number); TNode SmiToNumber(TNode v) { return v; } TNode BitwiseOp(TNode left32, TNode right32, Operation bitwise_op); TNode BitwiseSmiOp(TNode left32, TNode right32, Operation bitwise_op); // Allocate an object of the given size. TNode AllocateInNewSpace( TNode size, AllocationFlags flags = AllocationFlag::kNone); TNode AllocateInNewSpace( int size, AllocationFlags flags = AllocationFlag::kNone); TNode Allocate(TNode size, AllocationFlags flags = AllocationFlag::kNone); TNode Allocate(int size, AllocationFlags flags = AllocationFlag::kNone); TNode IsRegularHeapObjectSize(TNode size); using BranchGenerator = std::function; template using NodeGenerator = std::function()>; using ExtraNode = std::pair, const char*>; void Dcheck(const BranchGenerator& branch, const char* message, const char* file, int line, std::initializer_list extra_nodes = {}); void Dcheck(const NodeGenerator& condition_body, const char* message, const char* file, int line, std::initializer_list extra_nodes = {}); void Dcheck(TNode condition_node, const char* message, const char* file, int line, std::initializer_list extra_nodes = {}); void Check(const BranchGenerator& branch, const char* message, const char* file, int line, std::initializer_list extra_nodes = {}); void Check(const NodeGenerator& condition_body, const char* message, const char* file, int line, std::initializer_list extra_nodes = {}); void Check(TNode condition_node, const char* message, const char* file, int line, std::initializer_list extra_nodes = {}); void FailAssert(const char* message, const std::vector& files_and_lines, std::initializer_list extra_nodes = {}); void FastCheck(TNode condition); TNode IsCodeTMap(TNode map) { return V8_EXTERNAL_CODE_SPACE_BOOL ? IsCodeDataContainerMap(map) : IsCodeMap(map); } TNode IsCodeT(TNode object) { return IsCodeTMap(LoadMap(object)); } // TODO(v8:11880): remove once Code::bytecode_or_interpreter_data field // is cached in or moved to CodeT. TNode FromCodeT(TNode code) { #ifdef V8_EXTERNAL_CODE_SPACE #if V8_TARGET_BIG_ENDIAN #error "This code requires updating for big-endian architectures" #endif // Given the fields layout we can read the Code reference as a full word. STATIC_ASSERT(CodeDataContainer::kCodeCageBaseUpper32BitsOffset == CodeDataContainer::kCodeOffset + kTaggedSize); TNode o = BitcastWordToTagged(Load( code, IntPtrConstant(CodeDataContainer::kCodeOffset - kHeapObjectTag))); return CAST(o); #else return code; #endif } TNode CodeDataContainerFromCodeT(TNode code) { #ifdef V8_EXTERNAL_CODE_SPACE return code; #else return LoadObjectField(code, Code::kCodeDataContainerOffset); #endif } TNode ToCodeT(TNode code) { #ifdef V8_EXTERNAL_CODE_SPACE return LoadObjectField(code, Code::kCodeDataContainerOffset); #else return code; #endif } TNode ToCodeT(TNode code, TNode code_data_container) { #ifdef V8_EXTERNAL_CODE_SPACE return code_data_container; #else return code; #endif } TNode GetCodeEntry(TNode code); // The following Call wrappers call an object according to the semantics that // one finds in the EcmaScript spec, operating on an Callable (e.g. a // JSFunction or proxy) rather than a Code object. template TNode Call(TNode context, TNode callable, TNode receiver, TArgs... args) { return CallJS( CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined), context, callable, receiver, args...); } template TNode Call(TNode context, TNode callable, TNode receiver, TArgs... args) { if (IsUndefinedConstant(receiver) || IsNullConstant(receiver)) { return CallJS( CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined), context, callable, receiver, args...); } return CallJS(CodeFactory::Call(isolate()), context, callable, receiver, args...); } TNode CallApiCallback(TNode context, TNode callback, TNode argc, TNode data, TNode holder, TNode receiver); TNode CallApiCallback(TNode context, TNode callback, TNode argc, TNode data, TNode holder, TNode receiver, TNode value); TNode CallRuntimeNewArray(TNode context, TNode receiver, TNode length, TNode new_target, TNode allocation_site); void TailCallRuntimeNewArray(TNode context, TNode receiver, TNode length, TNode new_target, TNode allocation_site); template TNode ConstructWithTarget(TNode context, TNode target, TNode new_target, TArgs... args) { return CAST(ConstructJSWithTarget(CodeFactory::Construct(isolate()), context, target, new_target, implicit_cast>(args)...)); } template TNode Construct(TNode context, TNode new_target, TArgs... args) { return ConstructWithTarget(context, new_target, new_target, args...); } template TNode Select(TNode condition, const NodeGenerator& true_body, const NodeGenerator& false_body) { TVARIABLE(T, value); Label vtrue(this), vfalse(this), end(this); Branch(condition, &vtrue, &vfalse); BIND(&vtrue); { value = true_body(); Goto(&end); } BIND(&vfalse); { value = false_body(); Goto(&end); } BIND(&end); return value.value(); } template TNode SelectConstant(TNode condition, TNode true_value, TNode false_value) { return Select( condition, [=] { return true_value; }, [=] { return false_value; }); } TNode SelectInt32Constant(TNode condition, int true_value, int false_value); TNode SelectIntPtrConstant(TNode condition, int true_value, int false_value); TNode SelectBooleanConstant(TNode condition); TNode SelectSmiConstant(TNode condition, Smi true_value, Smi false_value); TNode SelectSmiConstant(TNode condition, int true_value, Smi false_value) { return SelectSmiConstant(condition, Smi::FromInt(true_value), false_value); } TNode SelectSmiConstant(TNode condition, Smi true_value, int false_value) { return SelectSmiConstant(condition, true_value, Smi::FromInt(false_value)); } TNode SelectSmiConstant(TNode condition, int true_value, int false_value) { return SelectSmiConstant(condition, Smi::FromInt(true_value), Smi::FromInt(false_value)); } TNode SingleCharacterStringConstant(char const* single_char) { DCHECK_EQ(strlen(single_char), 1); return HeapConstant( isolate()->factory()->LookupSingleCharacterStringFromCode( single_char[0])); } TNode TruncateWordToInt32(TNode value); TNode TruncateIntPtrToInt32(TNode value); // Check a value for smi-ness TNode TaggedIsSmi(TNode a); TNode TaggedIsNotSmi(TNode a); // Check that the value is a non-negative smi. TNode TaggedIsPositiveSmi(TNode a); // Check that a word has a word-aligned address. TNode WordIsAligned(TNode word, size_t alignment); TNode WordIsPowerOfTwo(TNode value); // Check if lower_limit <= value <= higher_limit. template TNode IsInRange(TNode value, U lower_limit, U higher_limit) { DCHECK_LE(lower_limit, higher_limit); STATIC_ASSERT(sizeof(U) <= kInt32Size); return Uint32LessThanOrEqual(Int32Sub(value, Int32Constant(lower_limit)), Int32Constant(higher_limit - lower_limit)); } TNode IsInRange(TNode value, intptr_t lower_limit, intptr_t higher_limit) { DCHECK_LE(lower_limit, higher_limit); return UintPtrLessThanOrEqual(IntPtrSub(value, IntPtrConstant(lower_limit)), IntPtrConstant(higher_limit - lower_limit)); } #if DEBUG void Bind(Label* label, AssemblerDebugInfo debug_info); #endif // DEBUG void Bind(Label* label); template void Bind(compiler::CodeAssemblerParameterizedLabel* label, TNode*... phis) { CodeAssembler::Bind(label, phis...); } void BranchIfSmiEqual(TNode a, TNode b, Label* if_true, Label* if_false) { Branch(SmiEqual(a, b), if_true, if_false); } void BranchIfSmiLessThan(TNode a, TNode b, Label* if_true, Label* if_false) { Branch(SmiLessThan(a, b), if_true, if_false); } void BranchIfSmiLessThanOrEqual(TNode a, TNode b, Label* if_true, Label* if_false) { Branch(SmiLessThanOrEqual(a, b), if_true, if_false); } void BranchIfFloat64IsNaN(TNode value, Label* if_true, Label* if_false) { Branch(Float64Equal(value, value), if_false, if_true); } // Branches to {if_true} if ToBoolean applied to {value} yields true, // otherwise goes to {if_false}. void BranchIfToBooleanIsTrue(TNode value, Label* if_true, Label* if_false); // Branches to {if_false} if ToBoolean applied to {value} yields false, // otherwise goes to {if_true}. void BranchIfToBooleanIsFalse(TNode value, Label* if_false, Label* if_true) { BranchIfToBooleanIsTrue(value, if_true, if_false); } void BranchIfJSReceiver(TNode object, Label* if_true, Label* if_false); // Branches to {if_true} when --force-slow-path flag has been passed. // It's used for testing to ensure that slow path implementation behave // equivalent to corresponding fast paths (where applicable). // // Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise. void GotoIfForceSlowPath(Label* if_true); // // Caged pointer related functionality. // // Load a caged pointer value from an object. TNode LoadSandboxedPointerFromObject(TNode object, int offset) { return LoadSandboxedPointerFromObject(object, IntPtrConstant(offset)); } TNode LoadSandboxedPointerFromObject(TNode object, TNode offset); // Stored a caged pointer value to an object. void StoreSandboxedPointerToObject(TNode object, int offset, TNode pointer) { StoreSandboxedPointerToObject(object, IntPtrConstant(offset), pointer); } void StoreSandboxedPointerToObject(TNode object, TNode offset, TNode pointer); TNode EmptyBackingStoreBufferConstant(); // // ExternalPointerT-related functionality. // #ifdef V8_SANDBOXED_EXTERNAL_POINTERS TNode ChangeIndexToExternalPointer(TNode index); TNode ChangeExternalPointerToIndex(TNode pointer); #endif // V8_SANDBOXED_EXTERNAL_POINTERS // Initialize an external pointer field in an object. void InitializeExternalPointerField(TNode object, int offset) { InitializeExternalPointerField(object, IntPtrConstant(offset)); } void InitializeExternalPointerField(TNode object, TNode offset); // Initialize an external pointer field in an object with given value. void InitializeExternalPointerField(TNode object, int offset, TNode pointer, ExternalPointerTag tag) { InitializeExternalPointerField(object, IntPtrConstant(offset), pointer, tag); } void InitializeExternalPointerField(TNode object, TNode offset, TNode pointer, ExternalPointerTag tag) { InitializeExternalPointerField(object, offset); StoreExternalPointerToObject(object, offset, pointer, tag); } // Load an external pointer value from an object. TNode LoadExternalPointerFromObject(TNode object, int offset, ExternalPointerTag tag) { return LoadExternalPointerFromObject(object, IntPtrConstant(offset), tag); } TNode LoadExternalPointerFromObject(TNode object, TNode offset, ExternalPointerTag tag); // Store external object pointer to object. void StoreExternalPointerToObject(TNode object, int offset, TNode pointer, ExternalPointerTag tag) { StoreExternalPointerToObject(object, IntPtrConstant(offset), pointer, tag); } void StoreExternalPointerToObject(TNode object, TNode offset, TNode pointer, ExternalPointerTag tag); TNode LoadForeignForeignAddressPtr(TNode object) { return LoadExternalPointerFromObject(object, Foreign::kForeignAddressOffset, kForeignForeignAddressTag); } TNode LoadExternalStringResourcePtr(TNode object) { return LoadExternalPointerFromObject( object, ExternalString::kResourceOffset, kExternalStringResourceTag); } TNode LoadExternalStringResourceDataPtr( TNode object) { // This is only valid for ExternalStrings where the resource data // pointer is cached (i.e. no uncached external strings). CSA_DCHECK(this, Word32NotEqual( Word32And(LoadInstanceType(object), Int32Constant(kUncachedExternalStringMask)), Int32Constant(kUncachedExternalStringTag))); return LoadExternalPointerFromObject(object, ExternalString::kResourceDataOffset, kExternalStringResourceDataTag); } TNode LoadJSTypedArrayExternalPointerPtr( TNode holder) { return LoadSandboxedPointerFromObject(holder, JSTypedArray::kExternalPointerOffset); } void StoreJSTypedArrayExternalPointerPtr(TNode holder, TNode value) { StoreSandboxedPointerToObject(holder, JSTypedArray::kExternalPointerOffset, value); } // Load value from current parent frame by given offset in bytes. TNode LoadFromParentFrame(int offset); // Load an object pointer from a buffer that isn't in the heap. TNode LoadBufferObject(TNode buffer, int offset) { return LoadFullTagged(buffer, IntPtrConstant(offset)); } template TNode LoadBufferData(TNode buffer, int offset) { return UncheckedCast( Load(MachineTypeOf::value, buffer, IntPtrConstant(offset))); } TNode LoadBufferPointer(TNode buffer, int offset) { return LoadBufferData(buffer, offset); } TNode LoadBufferSmi(TNode buffer, int offset) { return CAST(LoadBufferObject(buffer, offset)); } TNode LoadBufferIntptr(TNode buffer, int offset) { return LoadBufferData(buffer, offset); } TNode LoadUint8Ptr(TNode ptr, TNode offset); // Load a field from an object on the heap. template , TNode>::value && std::is_base_of::value, int>::type = 0> TNode LoadObjectField(TNode object, int offset) { const MachineType machine_type = offset == HeapObject::kMapOffset ? MachineType::MapInHeader() : MachineTypeOf::value; return CAST(LoadFromObject(machine_type, object, IntPtrConstant(offset - kHeapObjectTag))); } template , TNode>::value && !std::is_base_of::value, int>::type = 0> TNode LoadObjectField(TNode object, int offset) { return CAST(LoadFromObject(MachineTypeOf::value, object, IntPtrConstant(offset - kHeapObjectTag))); } template , TNode>::value, int>::type = 0> TNode LoadObjectField(TNode object, int offset) { return UncheckedCast( LoadFromObject(MachineTypeOf::value, object, IntPtrConstant(offset - kHeapObjectTag))); } TNode LoadObjectField(TNode object, int offset) { return UncheckedCast( LoadFromObject(MachineType::AnyTagged(), object, IntPtrConstant(offset - kHeapObjectTag))); } TNode LoadObjectField(TNode object, TNode offset) { return UncheckedCast( LoadFromObject(MachineType::AnyTagged(), object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)))); } template , TNode>::value, int>::type = 0> TNode LoadObjectField(TNode object, TNode offset) { return UncheckedCast( LoadFromObject(MachineTypeOf::value, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)))); } // Load a SMI field and untag it. TNode LoadAndUntagObjectField(TNode object, int offset); // Load a SMI field, untag it, and convert to Word32. TNode LoadAndUntagToWord32ObjectField(TNode object, int offset); TNode LoadMaybeWeakObjectField(TNode object, int offset) { return UncheckedCast(LoadObjectField(object, offset)); } TNode LoadConstructorOrBackPointer(TNode map) { return LoadObjectField(map, Map::kConstructorOrBackPointerOrNativeContextOffset); } TNode LoadSimd128(TNode ptr) { return Load(ptr); } // Reference is the CSA-equivalent of a Torque reference value, representing // an inner pointer into a HeapObject. // // The object can be a HeapObject or an all-zero bitpattern. The latter is // used for off-heap data, in which case the offset holds the actual address // and the data must be untagged (i.e. accessed via the Load-/StoreReference // overloads for TNode-convertible types below). // // TODO(gsps): Remove in favor of flattened {Load,Store}Reference interface. struct Reference { TNode object; TNode offset; std::tuple, TNode> Flatten() const { return std::make_tuple(object, offset); } }; template , TNode>::value, int>::type = 0> TNode LoadReference(Reference reference) { if (IsMapOffsetConstant(reference.offset)) { TNode map = LoadMap(CAST(reference.object)); DCHECK((std::is_base_of::value)); return ReinterpretCast(map); } TNode offset = IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); CSA_DCHECK(this, TaggedIsNotSmi(reference.object)); return CAST( LoadFromObject(MachineTypeOf::value, reference.object, offset)); } template , TNode>::value || std::is_same::value, int>::type = 0> TNode LoadReference(Reference reference) { DCHECK(!IsMapOffsetConstant(reference.offset)); TNode offset = IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); return UncheckedCast( LoadFromObject(MachineTypeOf::value, reference.object, offset)); } template , TNode>::value || std::is_same::value, int>::type = 0> void StoreReference(Reference reference, TNode value) { if (IsMapOffsetConstant(reference.offset)) { DCHECK((std::is_base_of::value)); return StoreMap(CAST(reference.object), ReinterpretCast(value)); } MachineRepresentation rep = MachineRepresentationOf::value; StoreToObjectWriteBarrier write_barrier = StoreToObjectWriteBarrier::kFull; if (std::is_same::value) { write_barrier = StoreToObjectWriteBarrier::kNone; } else if (std::is_same::value) { write_barrier = StoreToObjectWriteBarrier::kMap; } TNode offset = IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); CSA_DCHECK(this, TaggedIsNotSmi(reference.object)); StoreToObject(rep, reference.object, offset, value, write_barrier); } template , TNode>::value, int>::type = 0> void StoreReference(Reference reference, TNode value) { DCHECK(!IsMapOffsetConstant(reference.offset)); TNode offset = IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); StoreToObject(MachineRepresentationOf::value, reference.object, offset, value, StoreToObjectWriteBarrier::kNone); } TNode GCUnsafeReferenceToRawPtr(TNode object, TNode offset) { return ReinterpretCast( IntPtrAdd(BitcastTaggedToWord(object), IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)))); } // Load the floating point value of a HeapNumber. TNode LoadHeapNumberValue(TNode object); // Load the Map of an HeapObject. TNode LoadMap(TNode object); // Load the instance type of an HeapObject. TNode LoadInstanceType(TNode object); // Compare the instance the type of the object against the provided one. TNode HasInstanceType(TNode object, InstanceType type); TNode DoesntHaveInstanceType(TNode object, InstanceType type); TNode TaggedDoesntHaveInstanceType(TNode any_tagged, InstanceType type); TNode IsStringWrapperElementsKind(TNode map); void GotoIfMapHasSlowProperties(TNode map, Label* if_slow); // Load the properties backing store of a JSReceiver. TNode LoadSlowProperties(TNode object); TNode LoadFastProperties(TNode object); // Load the elements backing store of a JSObject. TNode LoadElements(TNode object) { return LoadJSObjectElements(object); } // Load the length of a JSArray instance. TNode LoadJSArgumentsObjectLength(TNode context, TNode array); // Load the length of a fast JSArray instance. Returns a positive Smi. TNode LoadFastJSArrayLength(TNode array); // Load the length of a fixed array base instance. TNode LoadFixedArrayBaseLength(TNode array); // Load the length of a fixed array base instance. TNode LoadAndUntagFixedArrayBaseLength(TNode array); // Load the length of a WeakFixedArray. TNode LoadWeakFixedArrayLength(TNode array); TNode LoadAndUntagWeakFixedArrayLength(TNode array); // Load the number of descriptors in DescriptorArray. TNode LoadNumberOfDescriptors(TNode array); // Load the number of own descriptors of a map. TNode LoadNumberOfOwnDescriptors(TNode map); // Load the bit field of a Map. TNode LoadMapBitField(TNode map); // Load bit field 2 of a map. TNode LoadMapBitField2(TNode map); // Load bit field 3 of a map. TNode LoadMapBitField3(TNode map); // Load the instance type of a map. TNode LoadMapInstanceType(TNode map); // Load the ElementsKind of a map. TNode LoadMapElementsKind(TNode map); TNode LoadElementsKind(TNode object); // Load the instance descriptors of a map. TNode LoadMapDescriptors(TNode map); // Load the prototype of a map. TNode LoadMapPrototype(TNode map); // Load the instance size of a Map. TNode LoadMapInstanceSizeInWords(TNode map); // Load the inobject properties start of a Map (valid only for JSObjects). TNode LoadMapInobjectPropertiesStartInWords(TNode map); // Load the constructor function index of a Map (only for primitive maps). TNode LoadMapConstructorFunctionIndex(TNode map); // Load the constructor of a Map (equivalent to Map::GetConstructor()). TNode LoadMapConstructor(TNode map); // Load the EnumLength of a Map. TNode LoadMapEnumLength(TNode map); // Load the back-pointer of a Map. TNode LoadMapBackPointer(TNode map); // Checks that |map| has only simple properties, returns bitfield3. TNode EnsureOnlyHasSimpleProperties(TNode map, TNode instance_type, Label* bailout); // Load the identity hash of a JSRececiver. TNode LoadJSReceiverIdentityHash(TNode receiver, Label* if_no_hash = nullptr); // This is only used on a newly allocated PropertyArray which // doesn't have an existing hash. void InitializePropertyArrayLength(TNode property_array, TNode length); // Check if the map is set for slow properties. TNode IsDictionaryMap(TNode map); // Load the Name::hash() value of a name as an uint32 value. // If {if_hash_not_computed} label is specified then it also checks if // hash is actually computed. TNode LoadNameHash(TNode name, Label* if_hash_not_computed = nullptr); TNode LoadNameHashAssumeComputed(TNode name); // Load length field of a String object as Smi value. TNode LoadStringLengthAsSmi(TNode string); // Load length field of a String object as intptr_t value. TNode LoadStringLengthAsWord(TNode string); // Load length field of a String object as uint32_t value. TNode LoadStringLengthAsWord32(TNode string); // Load value field of a JSPrimitiveWrapper object. TNode LoadJSPrimitiveWrapperValue(TNode object); // Figures out whether the value of maybe_object is: // - a SMI (jump to "if_smi", "extracted" will be the SMI value) // - a cleared weak reference (jump to "if_cleared", "extracted" will be // untouched) // - a weak reference (jump to "if_weak", "extracted" will be the object // pointed to) // - a strong reference (jump to "if_strong", "extracted" will be the object // pointed to) void DispatchMaybeObject(TNode maybe_object, Label* if_smi, Label* if_cleared, Label* if_weak, Label* if_strong, TVariable* extracted); // See MaybeObject for semantics of these functions. TNode IsStrong(TNode value); TNode GetHeapObjectIfStrong(TNode value, Label* if_not_strong); TNode IsWeakOrCleared(TNode value); TNode IsCleared(TNode value); TNode IsNotCleared(TNode value) { return Word32BinaryNot(IsCleared(value)); } // Removes the weak bit + asserts it was set. TNode GetHeapObjectAssumeWeak(TNode value); TNode GetHeapObjectAssumeWeak(TNode value, Label* if_cleared); // Checks if |maybe_object| is a weak reference to given |heap_object|. // Works for both any tagged |maybe_object| values. TNode IsWeakReferenceTo(TNode maybe_object, TNode heap_object); // Returns true if the |object| is a HeapObject and |maybe_object| is a weak // reference to |object|. // The |maybe_object| must not be a Smi. TNode IsWeakReferenceToObject(TNode maybe_object, TNode object); TNode MakeWeak(TNode value); void FixedArrayBoundsCheck(TNode array, TNode index, int additional_offset); void FixedArrayBoundsCheck(TNode array, TNode index, int additional_offset); void FixedArrayBoundsCheck(TNode array, TNode index, int additional_offset) { FixedArrayBoundsCheck(array, Signed(index), additional_offset); } // Array is any array-like type that has a fixed header followed by // tagged elements. template TNode LoadArrayLength(TNode array); // Array is any array-like type that has a fixed header followed by // tagged elements. template TNode LoadArrayElement(TNode array, int array_header_size, TNode index, int additional_offset = 0); template TNode LoadFixedArrayElement( TNode object, TNode index, int additional_offset = 0, CheckBounds check_bounds = CheckBounds::kAlways); // This doesn't emit a bounds-check. As part of the security-performance // tradeoff, only use it if it is performance critical. TNode UnsafeLoadFixedArrayElement(TNode object, TNode index, int additional_offset = 0) { return LoadFixedArrayElement(object, index, additional_offset, CheckBounds::kDebugOnly); } TNode LoadFixedArrayElement(TNode object, int index, int additional_offset = 0) { return LoadFixedArrayElement(object, IntPtrConstant(index), additional_offset); } // This doesn't emit a bounds-check. As part of the security-performance // tradeoff, only use it if it is performance critical. TNode UnsafeLoadFixedArrayElement(TNode object, int index, int additional_offset = 0) { return LoadFixedArrayElement(object, IntPtrConstant(index), additional_offset, CheckBounds::kDebugOnly); } TNode LoadPropertyArrayElement(TNode object, TNode index); TNode LoadPropertyArrayLength(TNode object); // Load an element from an array and untag it and return it as Word32. // Array is any array-like type that has a fixed header followed by // tagged elements. template TNode LoadAndUntagToWord32ArrayElement(TNode array, int array_header_size, TNode index, int additional_offset = 0); // Load an array element from a FixedArray, untag it and return it as Word32. TNode LoadAndUntagToWord32FixedArrayElement( TNode object, TNode index, int additional_offset = 0); // Load an array element from a WeakFixedArray. TNode LoadWeakFixedArrayElement(TNode object, TNode index, int additional_offset = 0); // Load an array element from a FixedDoubleArray. TNode LoadFixedDoubleArrayElement( TNode object, TNode index, Label* if_hole = nullptr, MachineType machine_type = MachineType::Float64()); // Load an array element from a FixedArray, FixedDoubleArray or a // NumberDictionary (depending on the |elements_kind|) and return // it as a tagged value. Assumes that the |index| passed a length // check before. Bails out to |if_accessor| if the element that // was found is an accessor, or to |if_hole| if the element at // the given |index| is not found in |elements|. TNode LoadFixedArrayBaseElementAsTagged( TNode elements, TNode index, TNode elements_kind, Label* if_accessor, Label* if_hole); // Load a feedback slot from a FeedbackVector. template TNode LoadFeedbackVectorSlot( TNode feedback_vector, TNode slot, int additional_offset = 0); TNode LoadFeedbackVectorLength(TNode); TNode LoadDoubleWithHoleCheck(TNode array, TNode index, Label* if_hole = nullptr); TNode IsDoubleHole(TNode base, TNode offset); // Load Float64 value by |base| + |offset| address. If the value is a double // hole then jump to |if_hole|. If |machine_type| is None then only the hole // check is generated. TNode LoadDoubleWithHoleCheck( TNode base, TNode offset, Label* if_hole, MachineType machine_type = MachineType::Float64()); TNode LoadFixedTypedArrayElementAsTagged(TNode data_pointer, TNode index, ElementsKind elements_kind); TNode LoadFixedTypedArrayElementAsTagged( TNode data_pointer, TNode index, TNode elements_kind); // Parts of the above, factored out for readability: TNode LoadFixedBigInt64ArrayElementAsTagged( TNode data_pointer, TNode offset); TNode LoadFixedBigUint64ArrayElementAsTagged( TNode data_pointer, TNode offset); // 64-bit platforms only: TNode BigIntFromInt64(TNode value); TNode BigIntFromUint64(TNode value); // 32-bit platforms only: TNode BigIntFromInt32Pair(TNode low, TNode high); TNode BigIntFromUint32Pair(TNode low, TNode high); // ScopeInfo: TNode LoadScopeInfo(TNode context); TNode LoadScopeInfoHasExtensionField(TNode scope_info); // Context manipulation: void StoreContextElementNoWriteBarrier(TNode context, int slot_index, TNode value); TNode LoadNativeContext(TNode context); // Calling this is only valid if there's a module context in the chain. TNode LoadModuleContext(TNode context); TNode GetImportMetaObject(TNode context); void GotoIfContextElementEqual(TNode value, TNode native_context, int slot_index, Label* if_equal) { GotoIf(TaggedEqual(value, LoadContextElement(native_context, slot_index)), if_equal); } // Loads the initial map of the the Object constructor. TNode LoadObjectFunctionInitialMap(TNode native_context); TNode LoadSlowObjectWithNullPrototypeMap( TNode native_context); TNode LoadJSArrayElementsMap(ElementsKind kind, TNode native_context); TNode LoadJSArrayElementsMap(TNode kind, TNode native_context); TNode IsJSFunctionWithPrototypeSlot(TNode object); TNode IsGeneratorFunction(TNode function); void BranchIfHasPrototypeProperty(TNode function, TNode function_map_bit_field, Label* if_true, Label* if_false); void GotoIfPrototypeRequiresRuntimeLookup(TNode function, TNode map, Label* runtime); // Load the "prototype" property of a JSFunction. TNode LoadJSFunctionPrototype(TNode function, Label* if_bailout); TNode LoadSharedFunctionInfoBytecodeArray( TNode shared); void StoreObjectByteNoWriteBarrier(TNode object, int offset, TNode value); // Store the floating point value of a HeapNumber. void StoreHeapNumberValue(TNode object, TNode value); // Store a field to an object on the heap. void StoreObjectField(TNode object, int offset, TNode value); void StoreObjectField(TNode object, TNode offset, TNode value); void StoreObjectField(TNode object, int offset, TNode value); void StoreObjectField(TNode object, TNode offset, TNode value); template void StoreObjectFieldNoWriteBarrier(TNode object, TNode offset, TNode value) { int const_offset; if (TryToInt32Constant(offset, &const_offset)) { return StoreObjectFieldNoWriteBarrier(object, const_offset, value); } StoreNoWriteBarrier(MachineRepresentationOf::value, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value); } template void StoreObjectFieldNoWriteBarrier(TNode object, int offset, TNode value) { if (CanBeTaggedPointer(MachineRepresentationOf::value)) { OptimizedStoreFieldAssertNoWriteBarrier(MachineRepresentationOf::value, object, offset, value); } else { OptimizedStoreFieldUnsafeNoWriteBarrier(MachineRepresentationOf::value, object, offset, value); } } void UnsafeStoreObjectFieldNoWriteBarrier(TNode object, int offset, TNode value); // Store the Map of an HeapObject. void StoreMap(TNode object, TNode map); void StoreMapNoWriteBarrier(TNode object, RootIndex map_root_index); void StoreMapNoWriteBarrier(TNode object, TNode map); void StoreObjectFieldRoot(TNode object, int offset, RootIndex root); // Store an array element to a FixedArray. void StoreFixedArrayElement( TNode object, int index, TNode value, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, CheckBounds check_bounds = CheckBounds::kAlways) { return StoreFixedArrayElement(object, IntPtrConstant(index), value, barrier_mode, 0, check_bounds); } void StoreFixedArrayElement(TNode object, int index, TNode value, CheckBounds check_bounds = CheckBounds::kAlways) { return StoreFixedArrayElement(object, IntPtrConstant(index), TNode{value}, UNSAFE_SKIP_WRITE_BARRIER, 0, check_bounds); } template void StoreFixedArrayElement( TNode array, TNode index, TNode value, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, int additional_offset = 0, CheckBounds check_bounds = CheckBounds::kAlways) { // TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants? static_assert(std::is_same::value || std::is_same::value || std::is_same::value, "Only Smi, UintPtrT or IntPtrT index is allowed"); if (NeedsBoundsCheck(check_bounds)) { FixedArrayBoundsCheck(array, index, additional_offset); } StoreFixedArrayOrPropertyArrayElement(array, index, value, barrier_mode, additional_offset); } template void StoreFixedArrayElement(TNode array, TNode index, TNode value, int additional_offset = 0) { static_assert(std::is_same::value || std::is_same::value, "Only Smi or IntPtrT indeces is allowed"); StoreFixedArrayElement(array, index, TNode{value}, UNSAFE_SKIP_WRITE_BARRIER, additional_offset); } // These don't emit a bounds-check. As part of the security-performance // tradeoff, only use it if it is performance critical. void UnsafeStoreFixedArrayElement( TNode object, int index, TNode value, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) { return StoreFixedArrayElement(object, IntPtrConstant(index), value, barrier_mode, 0, CheckBounds::kDebugOnly); } void UnsafeStoreFixedArrayElement(TNode object, int index, TNode value) { return StoreFixedArrayElement(object, IntPtrConstant(index), value, UNSAFE_SKIP_WRITE_BARRIER, 0, CheckBounds::kDebugOnly); } void UnsafeStoreFixedArrayElement( TNode array, TNode index, TNode value, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, int additional_offset = 0) { return StoreFixedArrayElement(array, index, value, barrier_mode, additional_offset, CheckBounds::kDebugOnly); } void UnsafeStoreFixedArrayElement(TNode array, TNode index, TNode value, int additional_offset) { return StoreFixedArrayElement(array, index, value, UNSAFE_SKIP_WRITE_BARRIER, additional_offset, CheckBounds::kDebugOnly); } void StorePropertyArrayElement(TNode array, TNode index, TNode value) { StoreFixedArrayOrPropertyArrayElement(array, index, value, UPDATE_WRITE_BARRIER); } template void StoreFixedDoubleArrayElement( TNode object, TNode index, TNode value, CheckBounds check_bounds = CheckBounds::kAlways); void StoreDoubleHole(TNode object, TNode offset); void StoreFixedDoubleArrayHole(TNode array, TNode index); void StoreFeedbackVectorSlot( TNode feedback_vector, TNode slot, TNode value, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, int additional_offset = 0); void StoreJSSharedStructInObjectField(TNode object, TNode offset, TNode value); void StoreJSSharedStructPropertyArrayElement(TNode array, TNode index, TNode value) { // JSSharedStructs are allocated in the shared old space, which is currently // collected by stopping the world, so the incremental write barrier is not // needed. They can only store Smis and other HeapObjects in the shared old // space, so the generational write barrier is also not needed. // TODO(v8:12547): Add a safer, shared variant of SKIP_WRITE_BARRIER. StoreFixedArrayOrPropertyArrayElement(array, index, value, UNSAFE_SKIP_WRITE_BARRIER); } // EnsureArrayPushable verifies that receiver with this map is: // 1. Is not a prototype. // 2. Is not a dictionary. // 3. Has a writeable length property. // It returns ElementsKind as a node for further division into cases. TNode EnsureArrayPushable(TNode context, TNode map, Label* bailout); void TryStoreArrayElement(ElementsKind kind, Label* bailout, TNode elements, TNode index, TNode value); // Consumes args into the array, and returns tagged new length. TNode BuildAppendJSArray(ElementsKind kind, TNode array, CodeStubArguments* args, TVariable* arg_index, Label* bailout); // Pushes value onto the end of array. void BuildAppendJSArray(ElementsKind kind, TNode array, TNode value, Label* bailout); void StoreFieldsNoWriteBarrier(TNode start_address, TNode end_address, TNode value); // Marks the FixedArray copy-on-write without moving it. void MakeFixedArrayCOW(TNode array); TNode AllocateCellWithValue( TNode value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER); TNode AllocateSmiCell(int value = 0) { return AllocateCellWithValue(SmiConstant(value), SKIP_WRITE_BARRIER); } TNode LoadCellValue(TNode cell); void StoreCellValue(TNode cell, TNode value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER); // Allocate a HeapNumber without initializing its value. TNode AllocateHeapNumber(); // Allocate a HeapNumber with a specific value. TNode AllocateHeapNumberWithValue(TNode value); TNode AllocateHeapNumberWithValue(double value) { return AllocateHeapNumberWithValue(Float64Constant(value)); } // Allocate a BigInt with {length} digits. Sets the sign bit to {false}. // Does not initialize the digits. TNode AllocateBigInt(TNode length); // Like above, but allowing custom bitfield initialization. TNode AllocateRawBigInt(TNode length); void StoreBigIntBitfield(TNode bigint, TNode bitfield); void StoreBigIntDigit(TNode bigint, intptr_t digit_index, TNode digit); void StoreBigIntDigit(TNode bigint, TNode digit_index, TNode digit); TNode LoadBigIntBitfield(TNode bigint); TNode LoadBigIntDigit(TNode bigint, intptr_t digit_index); TNode LoadBigIntDigit(TNode bigint, TNode digit_index); // Allocate a ByteArray with the given non-zero length. TNode AllocateNonEmptyByteArray(TNode length, AllocationFlags flags); // Allocate a ByteArray with the given length. TNode AllocateByteArray( TNode length, AllocationFlags flags = AllocationFlag::kNone); // Allocate a SeqOneByteString with the given length. TNode AllocateSeqOneByteString( uint32_t length, AllocationFlags flags = AllocationFlag::kNone); using TorqueGeneratedExportedMacrosAssembler::AllocateSeqOneByteString; // Allocate a SeqTwoByteString with the given length. TNode AllocateSeqTwoByteString( uint32_t length, AllocationFlags flags = AllocationFlag::kNone); using TorqueGeneratedExportedMacrosAssembler::AllocateSeqTwoByteString; // Allocate a SlicedOneByteString with the given length, parent and offset. // |length| and |offset| are expected to be tagged. TNode AllocateSlicedOneByteString(TNode length, TNode parent, TNode offset); // Allocate a SlicedTwoByteString with the given length, parent and offset. // |length| and |offset| are expected to be tagged. TNode AllocateSlicedTwoByteString(TNode length, TNode parent, TNode offset); TNode AllocateNameDictionary(int at_least_space_for); TNode AllocateNameDictionary( TNode at_least_space_for, AllocationFlags = AllocationFlag::kNone); TNode AllocateNameDictionaryWithCapacity( TNode capacity, AllocationFlags = AllocationFlag::kNone); TNode CopyNameDictionary(TNode dictionary, Label* large_object_fallback); TNode AllocateOrderedHashSet(); TNode AllocateOrderedHashMap(); // Allocates an OrderedNameDictionary of the given capacity. This guarantees // that |capacity| entries can be added without reallocating. TNode AllocateOrderedNameDictionary( TNode capacity); TNode AllocateOrderedNameDictionary(int capacity); TNode AllocateJSObjectFromMap( TNode map, base::Optional> properties = base::nullopt, base::Optional> elements = base::nullopt, AllocationFlags flags = AllocationFlag::kNone, SlackTrackingMode slack_tracking_mode = kNoSlackTracking); void InitializeJSObjectFromMap( TNode object, TNode map, TNode instance_size, base::Optional> properties = base::nullopt, base::Optional> elements = base::nullopt, SlackTrackingMode slack_tracking_mode = kNoSlackTracking); void InitializeJSObjectBodyWithSlackTracking(TNode object, TNode map, TNode instance_size); void InitializeJSObjectBodyNoSlackTracking( TNode object, TNode map, TNode instance_size, int start_offset = JSObject::kHeaderSize); TNode IsValidFastJSArrayCapacity(TNode capacity); // // Allocate and return a JSArray with initialized header fields and its // uninitialized elements. std::pair, TNode> AllocateUninitializedJSArrayWithElements( ElementsKind kind, TNode array_map, TNode length, base::Optional> allocation_site, TNode capacity, AllocationFlags allocation_flags = AllocationFlag::kNone, int array_header_size = JSArray::kHeaderSize); // Allocate a JSArray and fill elements with the hole. TNode AllocateJSArray( ElementsKind kind, TNode array_map, TNode capacity, TNode length, base::Optional> allocation_site, AllocationFlags allocation_flags = AllocationFlag::kNone); TNode AllocateJSArray( ElementsKind kind, TNode array_map, TNode capacity, TNode length, base::Optional> allocation_site, AllocationFlags allocation_flags = AllocationFlag::kNone) { return AllocateJSArray(kind, array_map, SmiUntag(capacity), length, allocation_site, allocation_flags); } TNode AllocateJSArray( ElementsKind kind, TNode array_map, TNode capacity, TNode length, AllocationFlags allocation_flags = AllocationFlag::kNone) { return AllocateJSArray(kind, array_map, SmiUntag(capacity), length, base::nullopt, allocation_flags); } TNode AllocateJSArray( ElementsKind kind, TNode array_map, TNode capacity, TNode length, AllocationFlags allocation_flags = AllocationFlag::kNone) { return AllocateJSArray(kind, array_map, capacity, length, base::nullopt, allocation_flags); } // Allocate a JSArray and initialize the header fields. TNode AllocateJSArray( TNode array_map, TNode elements, TNode length, base::Optional> allocation_site = base::nullopt, int array_header_size = JSArray::kHeaderSize); enum class HoleConversionMode { kDontConvert, kConvertToUndefined }; // Clone a fast JSArray |array| into a new fast JSArray. // |convert_holes| tells the function to convert holes into undefined or not. // If |convert_holes| is set to kConvertToUndefined, but the function did not // find any hole in |array|, the resulting array will have the same elements // kind as |array|. If the function did find a hole, it will convert holes in // |array| to undefined in the resulting array, who will now have // PACKED_ELEMENTS kind. // If |convert_holes| is set kDontConvert, holes are also copied to the // resulting array, who will have the same elements kind as |array|. The // function generates significantly less code in this case. TNode CloneFastJSArray( TNode context, TNode array, base::Optional> allocation_site = base::nullopt, HoleConversionMode convert_holes = HoleConversionMode::kDontConvert); TNode ExtractFastJSArray(TNode context, TNode array, TNode begin, TNode count); template TNode AllocateFixedArray( ElementsKind kind, TNode capacity, AllocationFlags flags = AllocationFlag::kNone, base::Optional> fixed_array_map = base::nullopt); TNode GetCreationContext(TNode receiver, Label* if_bailout); TNode GetFunctionRealm(TNode context, TNode receiver, Label* if_bailout); TNode GetConstructor(TNode map); TNode GetInstanceTypeMap(InstanceType instance_type); TNode AllocateUninitializedFixedArray(intptr_t capacity) { return UncheckedCast(AllocateFixedArray( PACKED_ELEMENTS, IntPtrConstant(capacity), AllocationFlag::kNone)); } TNode AllocateZeroedFixedArray(TNode capacity) { TNode result = UncheckedCast( AllocateFixedArray(PACKED_ELEMENTS, capacity, AllocationFlag::kAllowLargeObjectAllocation)); FillFixedArrayWithSmiZero(result, capacity); return result; } TNode AllocateZeroedFixedDoubleArray( TNode capacity) { TNode result = UncheckedCast( AllocateFixedArray(PACKED_DOUBLE_ELEMENTS, capacity, AllocationFlag::kAllowLargeObjectAllocation)); FillFixedDoubleArrayWithZero(result, capacity); return result; } TNode AllocateFixedArrayWithHoles(TNode capacity, AllocationFlags flags) { TNode result = UncheckedCast( AllocateFixedArray(PACKED_ELEMENTS, capacity, flags)); FillFixedArrayWithValue(PACKED_ELEMENTS, result, IntPtrConstant(0), capacity, RootIndex::kTheHoleValue); return result; } TNode AllocateFixedDoubleArrayWithHoles( TNode capacity, AllocationFlags flags) { TNode result = UncheckedCast( AllocateFixedArray(PACKED_DOUBLE_ELEMENTS, capacity, flags)); FillFixedArrayWithValue(PACKED_DOUBLE_ELEMENTS, result, IntPtrConstant(0), capacity, RootIndex::kTheHoleValue); return result; } TNode AllocatePropertyArray(TNode capacity); TNode AllocateWasmArray(TNode size_in_bytes, int initialization); // TODO(v8:9722): Return type should be JSIteratorResult TNode AllocateJSIteratorResult(TNode context, TNode value, TNode done); // TODO(v8:9722): Return type should be JSIteratorResult TNode AllocateJSIteratorResultForEntry(TNode context, TNode key, TNode value); TNode ArraySpeciesCreate(TNode context, TNode originalArray, TNode len); template void FillFixedArrayWithValue(ElementsKind kind, TNode array, TNode from_index, TNode to_index, RootIndex value_root_index); // Uses memset to effectively initialize the given FixedArray with zeroes. void FillFixedArrayWithSmiZero(TNode array, TNode length); void FillFixedDoubleArrayWithZero(TNode array, TNode length); void FillPropertyArrayWithUndefined(TNode array, TNode from_index, TNode to_index); enum class DestroySource { kNo, kYes }; // Increment the call count for a CALL_IC or construct call. // The call count is located at feedback_vector[slot_id + 1]. void IncrementCallCount(TNode feedback_vector, TNode slot_id); // Specify DestroySource::kYes if {from_array} is being supplanted by // {to_array}. This offers a slight performance benefit by simply copying the // array word by word. The source may be destroyed at the end of this macro. // // Otherwise, specify DestroySource::kNo for operations where an Object is // being cloned, to ensure that mutable HeapNumbers are unique between the // source and cloned object. void CopyPropertyArrayValues(TNode from_array, TNode to_array, TNode length, WriteBarrierMode barrier_mode, DestroySource destroy_source); // Copies all elements from |from_array| of |length| size to // |to_array| of the same size respecting the elements kind. template void CopyFixedArrayElements( ElementsKind kind, TNode from_array, TNode to_array, TNode length, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) { CopyFixedArrayElements(kind, from_array, kind, to_array, IntPtrOrSmiConstant(0), length, length, barrier_mode); } // Copies |element_count| elements from |from_array| starting from element // zero to |to_array| of |capacity| size respecting both array's elements // kinds. template void CopyFixedArrayElements( ElementsKind from_kind, TNode from_array, ElementsKind to_kind, TNode to_array, TNode element_count, TNode capacity, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) { CopyFixedArrayElements(from_kind, from_array, to_kind, to_array, IntPtrOrSmiConstant(0), element_count, capacity, barrier_mode); } // Copies |element_count| elements from |from_array| starting from element // |first_element| to |to_array| of |capacity| size respecting both array's // elements kinds. // |convert_holes| tells the function whether to convert holes to undefined. // |var_holes_converted| can be used to signify that the conversion happened // (i.e. that there were holes). If |convert_holes_to_undefined| is // HoleConversionMode::kConvertToUndefined, then it must not be the case that // IsDoubleElementsKind(to_kind). template void CopyFixedArrayElements( ElementsKind from_kind, TNode from_array, ElementsKind to_kind, TNode to_array, TNode first_element, TNode element_count, TNode capacity, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, HoleConversionMode convert_holes = HoleConversionMode::kDontConvert, TVariable* var_holes_converted = nullptr); void JumpIfPointersFromHereAreInteresting(TNode object, Label* interesting); // Efficiently copy elements within a single array. The regions // [src_index, src_index + length) and [dst_index, dst_index + length) // can be overlapping. void MoveElements(ElementsKind kind, TNode elements, TNode dst_index, TNode src_index, TNode length); // Efficiently copy elements from one array to another. The ElementsKind // needs to be the same. Copy from src_elements at // [src_index, src_index + length) to dst_elements at // [dst_index, dst_index + length). // The function decides whether it can use memcpy. In case it cannot, // |write_barrier| can help it to skip write barrier. SKIP_WRITE_BARRIER is // only safe when copying to new space, or when copying to old space and the // array does not contain object pointers. void CopyElements(ElementsKind kind, TNode dst_elements, TNode dst_index, TNode src_elements, TNode src_index, TNode length, WriteBarrierMode write_barrier = UPDATE_WRITE_BARRIER); TNode HeapObjectToFixedArray(TNode base, Label* cast_fail); TNode HeapObjectToFixedDoubleArray(TNode base, Label* cast_fail) { GotoIf(TaggedNotEqual(LoadMap(base), FixedDoubleArrayMapConstant()), cast_fail); return UncheckedCast(base); } template bool ClassHasMapConstant() { return false; } template TNode GetClassMapConstant() { UNREACHABLE(); return TNode(); } enum class ExtractFixedArrayFlag { kFixedArrays = 1, kFixedDoubleArrays = 2, kDontCopyCOW = 4, kAllFixedArrays = kFixedArrays | kFixedDoubleArrays, kAllFixedArraysDontCopyCOW = kAllFixedArrays | kDontCopyCOW }; using ExtractFixedArrayFlags = base::Flags; // Copy a portion of an existing FixedArray or FixedDoubleArray into a new // array, including special appropriate handling for empty arrays and COW // arrays. The result array will be of the same type as the original array. // // * |source| is either a FixedArray or FixedDoubleArray from which to copy // elements. // * |first| is the starting element index to copy from, if nullptr is passed // then index zero is used by default. // * |count| is the number of elements to copy out of the source array // starting from and including the element indexed by |start|. If |count| is // nullptr, then all of the elements from |start| to the end of |source| are // copied. // * |capacity| determines the size of the allocated result array, with // |capacity| >= |count|. If |capacity| is nullptr, then |count| is used as // the destination array's capacity. // * |extract_flags| determines whether FixedArrays, FixedDoubleArrays or both // are detected and copied. Although it's always correct to pass // kAllFixedArrays, the generated code is more compact and efficient if the // caller can specify whether only FixedArrays or FixedDoubleArrays will be // passed as the |source| parameter. // * |parameter_mode| determines the parameter mode of |first|, |count| and // |capacity|. // * If |var_holes_converted| is given, any holes will be converted to // undefined and the variable will be set according to whether or not there // were any hole. // * If |source_elements_kind| is given, the function will try to use the // runtime elements kind of source to make copy faster. More specifically, it // can skip write barriers. template TNode ExtractFixedArray( TNode source, base::Optional> first, base::Optional> count = base::nullopt, base::Optional> capacity = base::nullopt, ExtractFixedArrayFlags extract_flags = ExtractFixedArrayFlag::kAllFixedArrays, TVariable* var_holes_converted = nullptr, base::Optional> source_elements_kind = base::nullopt); // Copy a portion of an existing FixedArray or FixedDoubleArray into a new // FixedArray, including special appropriate handling for COW arrays. // * |source| is either a FixedArray or FixedDoubleArray from which to copy // elements. |source| is assumed to be non-empty. // * |first| is the starting element index to copy from. // * |count| is the number of elements to copy out of the source array // starting from and including the element indexed by |start|. // * |capacity| determines the size of the allocated result array, with // |capacity| >= |count|. // * |source_map| is the map of the |source|. // * |from_kind| is the elements kind that is consistent with |source| being // a FixedArray or FixedDoubleArray. This function only cares about double vs. // non-double, so as to distinguish FixedDoubleArray vs. FixedArray. It does // not care about holeyness. For example, when |source| is a FixedArray, // PACKED/HOLEY_ELEMENTS can be used, but not PACKED_DOUBLE_ELEMENTS. // * |allocation_flags| and |extract_flags| influence how the target // FixedArray is allocated. // * |convert_holes| is used to signify that the target array should use // undefined in places of holes. // * If |convert_holes| is true and |var_holes_converted| not nullptr, then // |var_holes_converted| is used to signal whether any holes were found and // converted. The caller should use this information to decide which map is // compatible with the result array. For example, if the input was of // HOLEY_SMI_ELEMENTS kind, and a conversion took place, the result will be // compatible only with HOLEY_ELEMENTS and PACKED_ELEMENTS. template TNode ExtractToFixedArray( TNode source, TNode first, TNode count, TNode capacity, TNode source_map, ElementsKind from_kind, AllocationFlags allocation_flags, ExtractFixedArrayFlags extract_flags, HoleConversionMode convert_holes, TVariable* var_holes_converted = nullptr, base::Optional> source_runtime_kind = base::nullopt); // Attempt to copy a FixedDoubleArray to another FixedDoubleArray. In the case // where the source array has a hole, produce a FixedArray instead where holes // are replaced with undefined. // * |source| is a FixedDoubleArray from which to copy elements. // * |first| is the starting element index to copy from. // * |count| is the number of elements to copy out of the source array // starting from and including the element indexed by |start|. // * |capacity| determines the size of the allocated result array, with // |capacity| >= |count|. // * |source_map| is the map of |source|. It will be used as the map of the // target array if the target can stay a FixedDoubleArray. Otherwise if the // target array needs to be a FixedArray, the FixedArrayMap will be used. // * |var_holes_converted| is used to signal whether a FixedAray // is produced or not. // * |allocation_flags| and |extract_flags| influence how the target array is // allocated. template TNode ExtractFixedDoubleArrayFillingHoles( TNode source, TNode first, TNode count, TNode capacity, TNode source_map, TVariable* var_holes_converted, AllocationFlags allocation_flags, ExtractFixedArrayFlags extract_flags); // Copy the entire contents of a FixedArray or FixedDoubleArray to a new // array, including special appropriate handling for empty arrays and COW // arrays. // // * |source| is either a FixedArray or FixedDoubleArray from which to copy // elements. // * |extract_flags| determines whether FixedArrays, FixedDoubleArrays or both // are detected and copied. Although it's always correct to pass // kAllFixedArrays, the generated code is more compact and efficient if the // caller can specify whether only FixedArrays or FixedDoubleArrays will be // passed as the |source| parameter. TNode CloneFixedArray( TNode source, ExtractFixedArrayFlags flags = ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW); // Loads an element from |array| of |from_kind| elements by given |offset| // (NOTE: not index!), does a hole check if |if_hole| is provided and // converts the value so that it becomes ready for storing to array of // |to_kind| elements. template TNode LoadElementAndPrepareForStore(TNode array, TNode offset, ElementsKind from_kind, ElementsKind to_kind, Label* if_hole); template TNode CalculateNewElementsCapacity(TNode old_capacity); // Tries to grow the |elements| array of given |object| to store the |key| // or bails out if the growing gap is too big. Returns new elements. TNode TryGrowElementsCapacity(TNode object, TNode elements, ElementsKind kind, TNode key, Label* bailout); // Tries to grow the |capacity|-length |elements| array of given |object| // to store the |key| or bails out if the growing gap is too big. Returns // new elements. template TNode TryGrowElementsCapacity(TNode object, TNode elements, ElementsKind kind, TNode key, TNode capacity, Label* bailout); // Grows elements capacity of given object. Returns new elements. template TNode GrowElementsCapacity( TNode object, TNode elements, ElementsKind from_kind, ElementsKind to_kind, TNode capacity, TNode new_capacity, Label* bailout); // Given a need to grow by |growth|, allocate an appropriate new capacity // if necessary, and return a new elements FixedArray object. Label |bailout| // is followed for allocation failure. void PossiblyGrowElementsCapacity(ElementsKind kind, TNode array, TNode length, TVariable* var_elements, TNode growth, Label* bailout); // Allocation site manipulation void InitializeAllocationMemento(TNode base, TNode base_allocation_size, TNode allocation_site); TNode TryTaggedToInt32AsIntPtr(TNode value, Label* if_not_possible); TNode TryTaggedToFloat64(TNode value, Label* if_valueisnotnumber); TNode TruncateTaggedToFloat64(TNode context, TNode value); TNode TruncateTaggedToWord32(TNode context, TNode value); void TaggedToWord32OrBigInt(TNode context, TNode value, Label* if_number, TVariable* var_word32, Label* if_bigint, TVariable* var_maybe_bigint); void TaggedToWord32OrBigIntWithFeedback(TNode context, TNode value, Label* if_number, TVariable* var_word32, Label* if_bigint, TVariable* var_maybe_bigint, TVariable* var_feedback); void TaggedPointerToWord32OrBigIntWithFeedback( TNode context, TNode pointer, Label* if_number, TVariable* var_word32, Label* if_bigint, TVariable* var_maybe_bigint, TVariable* var_feedback); TNode TruncateNumberToWord32(TNode value); // Truncate the floating point value of a HeapNumber to an Int32. TNode TruncateHeapNumberValueToWord32(TNode object); // Conversions. void TryHeapNumberToSmi(TNode number, TVariable* output, Label* if_smi); void TryFloat32ToSmi(TNode number, TVariable* output, Label* if_smi); void TryFloat64ToSmi(TNode number, TVariable* output, Label* if_smi); TNode ChangeFloat32ToTagged(TNode value); TNode ChangeFloat64ToTagged(TNode value); TNode ChangeInt32ToTagged(TNode value); TNode ChangeInt32ToTaggedNoOverflow(TNode value); TNode ChangeUint32ToTagged(TNode value); TNode ChangeUintPtrToTagged(TNode value); TNode ChangeNumberToUint32(TNode value); TNode ChangeNumberToFloat64(TNode value); TNode ChangeTaggedNonSmiToInt32(TNode context, TNode input); TNode ChangeTaggedToFloat64(TNode context, TNode input); TNode ChangeBoolToInt32(TNode b); void TaggedToNumeric(TNode context, TNode value, TVariable* var_numeric); void TaggedToNumericWithFeedback(TNode context, TNode value, TVariable* var_numeric, TVariable* var_feedback); // Ensures that {var_shared_value} is shareable across Isolates, and throws if // not. void SharedValueBarrier(TNode context, TVariable* var_shared_value); TNode TimesSystemPointerSize(TNode value); TNode TimesSystemPointerSize(TNode value) { return Signed(TimesSystemPointerSize(implicit_cast>(value))); } TNode TimesSystemPointerSize(TNode value) { return Unsigned(TimesSystemPointerSize(implicit_cast>(value))); } TNode TimesTaggedSize(TNode value); TNode TimesTaggedSize(TNode value) { return Signed(TimesTaggedSize(implicit_cast>(value))); } TNode TimesTaggedSize(TNode value) { return Unsigned(TimesTaggedSize(implicit_cast>(value))); } TNode TimesDoubleSize(TNode value); TNode TimesDoubleSize(TNode value) { return Unsigned(TimesDoubleSize(implicit_cast>(value))); } TNode TimesDoubleSize(TNode value) { return Signed(TimesDoubleSize(implicit_cast>(value))); } // Type conversions. // Throws a TypeError for {method_name} if {value} is not coercible to Object, // or returns the {value} converted to a String otherwise. TNode ToThisString(TNode context, TNode value, TNode method_name); TNode ToThisString(TNode context, TNode value, char const* method_name) { return ToThisString(context, value, StringConstant(method_name)); } // Throws a TypeError for {method_name} if {value} is neither of the given // {primitive_type} nor a JSPrimitiveWrapper wrapping a value of // {primitive_type}, or returns the {value} (or wrapped value) otherwise. TNode ToThisValue(TNode context, TNode value, PrimitiveType primitive_type, char const* method_name); // Throws a TypeError for {method_name} if {value} is not of the given // instance type. void ThrowIfNotInstanceType(TNode context, TNode value, InstanceType instance_type, char const* method_name); // Throws a TypeError for {method_name} if {value} is not a JSReceiver. void ThrowIfNotJSReceiver(TNode context, TNode value, MessageTemplate msg_template, const char* method_name); void ThrowIfNotCallable(TNode context, TNode value, const char* method_name); void ThrowRangeError(TNode context, MessageTemplate message, base::Optional> arg0 = base::nullopt, base::Optional> arg1 = base::nullopt, base::Optional> arg2 = base::nullopt); void ThrowTypeError(TNode context, MessageTemplate message, char const* arg0 = nullptr, char const* arg1 = nullptr); void ThrowTypeError(TNode context, MessageTemplate message, base::Optional> arg0, base::Optional> arg1 = base::nullopt, base::Optional> arg2 = base::nullopt); TNode GetPendingMessage(); void SetPendingMessage(TNode message); // Type checks. // Check whether the map is for an object with special properties, such as a // JSProxy or an object with interceptors. TNode InstanceTypeEqual(TNode instance_type, int type); TNode IsNoElementsProtectorCellInvalid(); TNode IsMegaDOMProtectorCellInvalid(); TNode IsArrayIteratorProtectorCellInvalid(); TNode IsBigIntInstanceType(TNode instance_type); TNode IsBigInt(TNode object); TNode IsBoolean(TNode object); TNode IsCallableMap(TNode map); TNode IsCallable(TNode object); TNode TaggedIsCallable(TNode object); TNode IsConsStringInstanceType(TNode instance_type); TNode IsConstructorMap(TNode map); TNode IsConstructor(TNode object); TNode IsDeprecatedMap(TNode map); TNode IsNameDictionary(TNode object); TNode IsOrderedNameDictionary(TNode object); TNode IsGlobalDictionary(TNode object); TNode IsExtensibleMap(TNode map); TNode IsExtensibleNonPrototypeMap(TNode map); TNode IsExternalStringInstanceType(TNode instance_type); TNode IsFixedArray(TNode object); TNode IsFixedArraySubclass(TNode object); TNode IsFixedArrayWithKind(TNode object, ElementsKind kind); TNode IsFixedArrayWithKindOrEmpty(TNode object, ElementsKind kind); TNode IsFunctionWithPrototypeSlotMap(TNode map); TNode IsHashTable(TNode object); TNode IsEphemeronHashTable(TNode object); TNode IsHeapNumberInstanceType(TNode instance_type); TNode IsOddball(TNode object); TNode IsOddballInstanceType(TNode instance_type); TNode IsIndirectStringInstanceType(TNode instance_type); TNode IsJSArrayBuffer(TNode object); TNode IsJSDataView(TNode object); TNode IsJSArrayInstanceType(TNode instance_type); TNode IsJSArrayMap(TNode map); TNode IsJSArray(TNode object); TNode IsJSArrayIterator(TNode object); TNode IsJSAsyncGeneratorObject(TNode object); TNode IsFunctionInstanceType(TNode instance_type); TNode IsJSFunctionInstanceType(TNode instance_type); TNode IsJSFunctionMap(TNode map); TNode IsJSFunction(TNode object); TNode IsJSBoundFunction(TNode object); TNode IsJSGeneratorObject(TNode object); TNode IsJSGlobalProxyInstanceType(TNode instance_type); TNode IsJSGlobalProxyMap(TNode map); TNode IsJSGlobalProxy(TNode object); TNode IsJSObjectInstanceType(TNode instance_type); TNode IsJSObjectMap(TNode map); TNode IsJSObject(TNode object); TNode IsJSApiObjectInstanceType(TNode instance_type); TNode IsJSApiObjectMap(TNode map); TNode IsJSApiObject(TNode object); TNode IsJSFinalizationRegistryMap(TNode map); TNode IsJSFinalizationRegistry(TNode object); TNode IsJSPromiseMap(TNode map); TNode IsJSPromise(TNode object); TNode IsJSProxy(TNode object); TNode IsJSStringIterator(TNode object); TNode IsJSRegExpStringIterator(TNode object); TNode IsJSReceiverInstanceType(TNode instance_type); TNode IsJSReceiverMap(TNode map); TNode IsJSReceiver(TNode object); TNode IsJSRegExp(TNode object); TNode IsJSTypedArrayInstanceType(TNode instance_type); TNode IsJSTypedArrayMap(TNode map); TNode IsJSTypedArray(TNode object); TNode IsJSGeneratorMap(TNode map); TNode IsJSPrimitiveWrapperInstanceType(TNode instance_type); TNode IsJSPrimitiveWrapperMap(TNode map); TNode IsJSPrimitiveWrapper(TNode object); TNode IsJSSharedStructInstanceType(TNode instance_type); TNode IsJSSharedStructMap(TNode map); TNode IsJSSharedStruct(TNode object); TNode IsJSSharedStruct(TNode object); TNode IsJSWrappedFunction(TNode object); TNode IsMap(TNode object); TNode IsName(TNode object); TNode IsNameInstanceType(TNode instance_type); TNode IsNullOrJSReceiver(TNode object); TNode IsNullOrUndefined(TNode object); TNode IsNumberDictionary(TNode object); TNode IsOneByteStringInstanceType(TNode instance_type); TNode IsSeqOneByteStringInstanceType(TNode instance_type); TNode IsPrimitiveInstanceType(TNode instance_type); TNode IsPrivateName(TNode symbol); TNode IsPropertyArray(TNode object); TNode IsPropertyCell(TNode object); TNode IsPromiseReactionJobTask(TNode object); TNode IsPrototypeInitialArrayPrototype(TNode context, TNode map); TNode IsPrototypeTypedArrayPrototype(TNode context, TNode map); TNode IsFastAliasedArgumentsMap(TNode context, TNode map); TNode IsSlowAliasedArgumentsMap(TNode context, TNode map); TNode IsSloppyArgumentsMap(TNode context, TNode map); TNode IsStrictArgumentsMap(TNode context, TNode map); TNode IsSequentialStringInstanceType(TNode instance_type); TNode IsUncachedExternalStringInstanceType( TNode instance_type); TNode IsSpecialReceiverInstanceType(TNode instance_type); TNode IsCustomElementsReceiverInstanceType( TNode instance_type); TNode IsSpecialReceiverMap(TNode map); TNode IsStringInstanceType(TNode instance_type); TNode IsString(TNode object); TNode IsSeqOneByteString(TNode object); TNode IsSwissNameDictionary(TNode object); TNode IsSymbolInstanceType(TNode instance_type); TNode IsInternalizedStringInstanceType(TNode instance_type); TNode IsSharedStringInstanceType(TNode instance_type); TNode IsTemporalInstantInstanceType(TNode instance_type); TNode IsUniqueName(TNode object); TNode IsUniqueNameNoIndex(TNode object); TNode IsUniqueNameNoCachedIndex(TNode object); TNode IsUndetectableMap(TNode map); TNode IsNotWeakFixedArraySubclass(TNode object); TNode IsZeroOrContext(TNode object); TNode IsPromiseResolveProtectorCellInvalid(); TNode IsPromiseThenProtectorCellInvalid(); TNode IsArraySpeciesProtectorCellInvalid(); TNode IsIsConcatSpreadableProtectorCellInvalid(); TNode IsTypedArraySpeciesProtectorCellInvalid(); TNode IsRegExpSpeciesProtectorCellInvalid(); TNode IsPromiseSpeciesProtectorCellInvalid(); TNode LoadBasicMemoryChunkFlags(TNode object); TNode LoadRuntimeFlag(ExternalReference address_of_flag) { TNode flag_value = UncheckedCast( Load(MachineType::Uint8(), ExternalConstant(address_of_flag))); return Word32NotEqual(Word32And(flag_value, Int32Constant(0xFF)), Int32Constant(0)); } TNode IsMockArrayBufferAllocatorFlag() { return LoadRuntimeFlag( ExternalReference::address_of_mock_arraybuffer_allocator_flag()); } TNode HasBuiltinSubclassingFlag() { return LoadRuntimeFlag( ExternalReference::address_of_builtin_subclassing_flag()); } TNode HasSharedStringTableFlag() { return LoadRuntimeFlag( ExternalReference::address_of_shared_string_table_flag()); } // True iff |object| is a Smi or a HeapNumber or a BigInt. TNode IsNumeric(TNode object); // True iff |number| is either a Smi, or a HeapNumber whose value is not // within Smi range. TNode IsNumberNormalized(TNode number); TNode IsNumberPositive(TNode number); TNode IsHeapNumberPositive(TNode number); // True iff {number} is non-negative and less or equal than 2**53-1. TNode IsNumberNonNegativeSafeInteger(TNode number); // True iff {number} represents an integer value. TNode IsInteger(TNode number); TNode IsInteger(TNode number); // True iff abs({number}) <= 2**53 -1 TNode IsSafeInteger(TNode number); TNode IsSafeInteger(TNode number); // True iff {number} represents a valid uint32t value. TNode IsHeapNumberUint32(TNode number); // True iff {number} is a positive number and a valid array index in the range // [0, 2^32-1). TNode IsNumberArrayIndex(TNode number); template TNode FixedArraySizeDoesntFitInNewSpace(TNode element_count, int base_size); TNode IsMetaMap(TNode o) { return IsMapMap(o); } // ElementsKind helpers: TNode ElementsKindEqual(TNode a, TNode b) { return Word32Equal(a, b); } bool ElementsKindEqual(ElementsKind a, ElementsKind b) { return a == b; } TNode IsFastElementsKind(TNode elements_kind); bool IsFastElementsKind(ElementsKind kind) { return v8::internal::IsFastElementsKind(kind); } TNode IsFastOrNonExtensibleOrSealedElementsKind( TNode elements_kind); TNode IsDictionaryElementsKind(TNode elements_kind) { return ElementsKindEqual(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)); } TNode IsDoubleElementsKind(TNode elements_kind); bool IsDoubleElementsKind(ElementsKind kind) { return v8::internal::IsDoubleElementsKind(kind); } TNode IsFastSmiOrTaggedElementsKind(TNode elements_kind); TNode IsFastSmiElementsKind(TNode elements_kind); TNode IsHoleyFastElementsKind(TNode elements_kind); TNode IsHoleyFastElementsKindForRead(TNode elements_kind); TNode IsElementsKindGreaterThan(TNode target_kind, ElementsKind reference_kind); TNode IsElementsKindGreaterThanOrEqual(TNode target_kind, ElementsKind reference_kind); TNode IsElementsKindLessThanOrEqual(TNode target_kind, ElementsKind reference_kind); // Check if lower_reference_kind <= target_kind <= higher_reference_kind. TNode IsElementsKindInRange(TNode target_kind, ElementsKind lower_reference_kind, ElementsKind higher_reference_kind) { return IsInRange(target_kind, lower_reference_kind, higher_reference_kind); } // String helpers. // Load a character from a String (might flatten a ConsString). TNode StringCharCodeAt(TNode string, TNode index); // Return the single character string with only {code}. TNode StringFromSingleCharCode(TNode code); // Type conversion helpers. enum class BigIntHandling { kConvertToNumber, kThrow }; // Convert a String to a Number. TNode StringToNumber(TNode input); // Convert a Number to a String. TNode NumberToString(TNode input); TNode NumberToString(TNode input, Label* bailout); // Convert a Non-Number object to a Number. TNode NonNumberToNumber( TNode context, TNode input, BigIntHandling bigint_handling = BigIntHandling::kThrow); // Convert a Non-Number object to a Numeric. TNode NonNumberToNumeric(TNode context, TNode input); // Convert any object to a Number. // Conforms to ES#sec-tonumber if {bigint_handling} == kThrow. // With {bigint_handling} == kConvertToNumber, matches behavior of // tc39.github.io/proposal-bigint/#sec-number-constructor-number-value. TNode ToNumber( TNode context, TNode input, BigIntHandling bigint_handling = BigIntHandling::kThrow); TNode ToNumber_Inline(TNode context, TNode input); TNode ToNumberOrNumeric( LazyNode context, TNode input, TVariable* var_type_feedback, Object::Conversion mode, BigIntHandling bigint_handling = BigIntHandling::kThrow); // Convert any plain primitive to a Number. No need to handle BigInts since // they are not plain primitives. TNode PlainPrimitiveToNumber(TNode input); // Try to convert an object to a BigInt. Throws on failure (e.g. for Numbers). // https://tc39.github.io/proposal-bigint/#sec-to-bigint TNode ToBigInt(TNode context, TNode input); // Converts |input| to one of 2^32 integer values in the range 0 through // 2^32-1, inclusive. // ES#sec-touint32 TNode ToUint32(TNode context, TNode input); // Convert any object to a String. TNode ToString_Inline(TNode context, TNode input); TNode ToObject(TNode context, TNode input); // Same as ToObject but avoids the Builtin call if |input| is already a // JSReceiver. TNode ToObject_Inline(TNode context, TNode input); // ES6 7.1.15 ToLength, but with inlined fast path. TNode ToLength_Inline(TNode context, TNode input); TNode OrdinaryToPrimitive(TNode context, TNode input, OrdinaryToPrimitiveHint hint); // Returns a node that contains a decoded (unsigned!) value of a bit // field |BitField| in |word32|. Returns result as an uint32 node. template TNode DecodeWord32(TNode word32) { return DecodeWord32(word32, BitField::kShift, BitField::kMask); } // Returns a node that contains a decoded (unsigned!) value of a bit // field |BitField| in |word|. Returns result as a word-size node. template TNode DecodeWord(TNode word) { return DecodeWord(word, BitField::kShift, BitField::kMask); } // Returns a node that contains a decoded (unsigned!) value of a bit // field |BitField| in |word32|. Returns result as a word-size node. template TNode DecodeWordFromWord32(TNode word32) { return DecodeWord(ChangeUint32ToWord(word32)); } // Returns a node that contains a decoded (unsigned!) value of a bit // field |BitField| in |word|. Returns result as an uint32 node. template TNode DecodeWord32FromWord(TNode word) { return UncheckedCast( TruncateIntPtrToInt32(Signed(DecodeWord(word)))); } // Decodes an unsigned (!) value from |word32| to an uint32 node. TNode DecodeWord32(TNode word32, uint32_t shift, uint32_t mask); // Decodes an unsigned (!) value from |word| to a word-size node. TNode DecodeWord(TNode word, uint32_t shift, uintptr_t mask); // Returns a node that contains the updated values of a |BitField|. template TNode UpdateWord32(TNode word, TNode value, bool starts_as_zero = false) { return UpdateWord32(word, value, BitField::kShift, BitField::kMask, starts_as_zero); } // Returns a node that contains the updated values of a |BitField|. template TNode UpdateWord(TNode word, TNode value, bool starts_as_zero = false) { return UpdateWord(word, value, BitField::kShift, BitField::kMask, starts_as_zero); } // Returns a node that contains the updated values of a |BitField|. template TNode UpdateWordInWord32(TNode word, TNode value, bool starts_as_zero = false) { return UncheckedCast( TruncateIntPtrToInt32(Signed(UpdateWord( ChangeUint32ToWord(word), value, starts_as_zero)))); } // Returns a node that contains the updated values of a |BitField|. template TNode UpdateWord32InWord(TNode word, TNode value, bool starts_as_zero = false) { return UpdateWord(word, ChangeUint32ToWord(value), starts_as_zero); } // Returns a node that contains the updated {value} inside {word} starting // at {shift} and fitting in {mask}. TNode UpdateWord32(TNode word, TNode value, uint32_t shift, uint32_t mask, bool starts_as_zero = false); // Returns a node that contains the updated {value} inside {word} starting // at {shift} and fitting in {mask}. TNode UpdateWord(TNode word, TNode value, uint32_t shift, uintptr_t mask, bool starts_as_zero = false); // Returns true if any of the |T|'s bits in given |word32| are set. template TNode IsSetWord32(TNode word32) { return IsSetWord32(word32, T::kMask); } // Returns true if any of the mask's bits in given |word32| are set. TNode IsSetWord32(TNode word32, uint32_t mask) { return Word32NotEqual(Word32And(word32, Int32Constant(mask)), Int32Constant(0)); } // Returns true if none of the mask's bits in given |word32| are set. TNode IsNotSetWord32(TNode word32, uint32_t mask) { return Word32Equal(Word32And(word32, Int32Constant(mask)), Int32Constant(0)); } // Returns true if all of the mask's bits in a given |word32| are set. TNode IsAllSetWord32(TNode word32, uint32_t mask) { TNode const_mask = Int32Constant(mask); return Word32Equal(Word32And(word32, const_mask), const_mask); } // Returns true if the bit field |BitField| in |word32| is equal to a given // constant |value|. Avoids a shift compared to using DecodeWord32. template TNode IsEqualInWord32(TNode word32, typename BitField::FieldType value) { TNode masked_word32 = Word32And(word32, Int32Constant(BitField::kMask)); return Word32Equal(masked_word32, Int32Constant(BitField::encode(value))); } // Returns true if the bit field |BitField| in |word32| is not equal to a // given constant |value|. Avoids a shift compared to using DecodeWord32. template TNode IsNotEqualInWord32(TNode word32, typename BitField::FieldType value) { return Word32BinaryNot(IsEqualInWord32(word32, value)); } // Returns true if any of the |T|'s bits in given |word| are set. template TNode IsSetWord(TNode word) { return IsSetWord(word, T::kMask); } // Returns true if any of the mask's bits in given |word| are set. TNode IsSetWord(TNode word, uint32_t mask) { return WordNotEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0)); } // Returns true if any of the mask's bit are set in the given Smi. // Smi-encoding of the mask is performed implicitly! TNode IsSetSmi(TNode smi, int untagged_mask) { intptr_t mask_word = bit_cast(Smi::FromInt(untagged_mask)); return WordNotEqual(WordAnd(BitcastTaggedToWordForTagAndSmiBits(smi), IntPtrConstant(mask_word)), IntPtrConstant(0)); } // Returns true if all of the |T|'s bits in given |word32| are clear. template TNode IsClearWord32(TNode word32) { return IsClearWord32(word32, T::kMask); } // Returns true if all of the mask's bits in given |word32| are clear. TNode IsClearWord32(TNode word32, uint32_t mask) { return Word32Equal(Word32And(word32, Int32Constant(mask)), Int32Constant(0)); } // Returns true if all of the |T|'s bits in given |word| are clear. template TNode IsClearWord(TNode word) { return IsClearWord(word, T::kMask); } // Returns true if all of the mask's bits in given |word| are clear. TNode IsClearWord(TNode word, uint32_t mask) { return IntPtrEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0)); } void SetCounter(StatsCounter* counter, int value); void IncrementCounter(StatsCounter* counter, int delta); void DecrementCounter(StatsCounter* counter, int delta); template void Increment(TVariable* variable, int value = 1); template void Decrement(TVariable* variable, int value = 1) { Increment(variable, -value); } // Generates "if (false) goto label" code. Useful for marking a label as // "live" to avoid assertion failures during graph building. In the resulting // code this check will be eliminated. void Use(Label* label); // Various building blocks for stubs doing property lookups. // |if_notinternalized| is optional; |if_bailout| will be used by default. // Note: If |key| does not yet have a hash, |if_notinternalized| will be taken // even if |key| is an array index. |if_keyisunique| will never // be taken for array indices. void TryToName(TNode key, Label* if_keyisindex, TVariable* var_index, Label* if_keyisunique, TVariable* var_unique, Label* if_bailout, Label* if_notinternalized = nullptr); // Call non-allocating runtime String::WriteToFlat using fast C-calls. void StringWriteToFlatOneByte(TNode source, TNode sink, TNode start, TNode length); void StringWriteToFlatTwoByte(TNode source, TNode sink, TNode start, TNode length); // Calls External{One,Two}ByteString::GetChars with a fast C-call. TNode> ExternalOneByteStringGetChars( TNode string); TNode> ExternalTwoByteStringGetChars( TNode string); TNode> IntlAsciiCollationWeightsL1(); TNode> IntlAsciiCollationWeightsL3(); // Performs a hash computation and string table lookup for the given string, // and jumps to: // - |if_index| if the string is an array index like "123"; |var_index| // will contain the intptr representation of that index. // - |if_internalized| if the string exists in the string table; the // internalized version will be in |var_internalized|. // - |if_not_internalized| if the string is not in the string table (but // does not add it). // - |if_bailout| for unsupported cases (e.g. uncachable array index). void TryInternalizeString(TNode string, Label* if_index, TVariable* var_index, Label* if_internalized, TVariable* var_internalized, Label* if_not_internalized, Label* if_bailout); // Calculates array index for given dictionary entry and entry field. // See Dictionary::EntryToIndex(). template TNode EntryToIndex(TNode entry, int field_index); template TNode EntryToIndex(TNode entry) { return EntryToIndex(entry, Dictionary::kEntryKeyIndex); } // Loads the details for the entry with the given key_index. // Returns an untagged int32. template TNode LoadDetailsByKeyIndex(TNode container, TNode key_index); // Loads the value for the entry with the given key_index. // Returns a tagged value. template TNode LoadValueByKeyIndex(TNode container, TNode key_index); // Stores the details for the entry with the given key_index. // |details| must be a Smi. template void StoreDetailsByKeyIndex(TNode container, TNode key_index, TNode details); // Stores the value for the entry with the given key_index. template void StoreValueByKeyIndex( TNode container, TNode key_index, TNode value, WriteBarrierMode write_barrier = UPDATE_WRITE_BARRIER); // Calculate a valid size for the a hash table. TNode HashTableComputeCapacity(TNode at_least_space_for); TNode NameToIndexHashTableLookup(TNode table, TNode name, Label* not_found); template TNode GetNumberOfElements(TNode dictionary); TNode GetNumberDictionaryNumberOfElements( TNode dictionary) { return GetNumberOfElements(dictionary); } template void SetNumberOfElements(TNode dictionary, TNode num_elements_smi) { // Not supposed to be used for SwissNameDictionary. STATIC_ASSERT(!(std::is_same::value)); StoreFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex, num_elements_smi, SKIP_WRITE_BARRIER); } template TNode GetNumberOfDeletedElements(TNode dictionary) { // Not supposed to be used for SwissNameDictionary. STATIC_ASSERT(!(std::is_same::value)); return CAST(LoadFixedArrayElement( dictionary, Dictionary::kNumberOfDeletedElementsIndex)); } template void SetNumberOfDeletedElements(TNode dictionary, TNode num_deleted_smi) { // Not supposed to be used for SwissNameDictionary. STATIC_ASSERT(!(std::is_same::value)); StoreFixedArrayElement(dictionary, Dictionary::kNumberOfDeletedElementsIndex, num_deleted_smi, SKIP_WRITE_BARRIER); } template TNode GetCapacity(TNode dictionary) { // Not supposed to be used for SwissNameDictionary. STATIC_ASSERT(!(std::is_same::value)); return CAST( UnsafeLoadFixedArrayElement(dictionary, Dictionary::kCapacityIndex)); } template TNode GetNextEnumerationIndex(TNode dictionary) { return CAST(LoadFixedArrayElement(dictionary, Dictionary::kNextEnumerationIndexIndex)); } template void SetNextEnumerationIndex(TNode dictionary, TNode next_enum_index_smi) { StoreFixedArrayElement(dictionary, Dictionary::kNextEnumerationIndexIndex, next_enum_index_smi, SKIP_WRITE_BARRIER); } // Looks up an entry in a NameDictionaryBase successor. If the entry is found // control goes to {if_found} and {var_name_index} contains an index of the // key field of the entry found. If the key is not found control goes to // {if_not_found}. enum LookupMode { kFindExisting, kFindInsertionIndex }; template TNode LoadName(TNode key); template void NameDictionaryLookup(TNode dictionary, TNode unique_name, Label* if_found, TVariable* var_name_index, Label* if_not_found, LookupMode mode = kFindExisting); TNode ComputeSeededHash(TNode key); void NumberDictionaryLookup(TNode dictionary, TNode intptr_index, Label* if_found, TVariable* var_entry, Label* if_not_found); TNode BasicLoadNumberDictionaryElement( TNode dictionary, TNode intptr_index, Label* not_data, Label* if_hole); template void FindInsertionEntry(TNode dictionary, TNode key, TVariable* var_key_index); template void InsertEntry(TNode dictionary, TNode key, TNode value, TNode index, TNode enum_index); template void Add(TNode dictionary, TNode key, TNode value, Label* bailout); // Tries to check if {object} has own {unique_name} property. void TryHasOwnProperty(TNode object, TNode map, TNode instance_type, TNode unique_name, Label* if_found, Label* if_not_found, Label* if_bailout); // Operating mode for TryGetOwnProperty and CallGetterIfAccessor enum GetOwnPropertyMode { // kCallJSGetterDontUseCachedName is used when we want to get the result of // the getter call, and don't use cached_name_property when the getter is // the function template and it has cached_property_name, which would just // bailout for the IC system to create a named property handler kCallJSGetterDontUseCachedName, // kCallJSGetterUseCachedName is used when we want to get the result of // the getter call, and use cached_name_property when the getter is // the function template and it has cached_property_name, which would call // GetProperty rather than bailout for Generic/NoFeedback load kCallJSGetterUseCachedName, // kReturnAccessorPair is used when we're only getting the property // descriptor kReturnAccessorPair }; // Tries to get {object}'s own {unique_name} property value. If the property // is an accessor then it also calls a getter. If the property is a double // field it re-wraps value in an immutable heap number. {unique_name} must be // a unique name (Symbol or InternalizedString) that is not an array index. void TryGetOwnProperty(TNode context, TNode receiver, TNode object, TNode map, TNode instance_type, TNode unique_name, Label* if_found_value, TVariable* var_value, Label* if_not_found, Label* if_bailout); void TryGetOwnProperty(TNode context, TNode receiver, TNode object, TNode map, TNode instance_type, TNode unique_name, Label* if_found_value, TVariable* var_value, TVariable* var_details, TVariable* var_raw_value, Label* if_not_found, Label* if_bailout, GetOwnPropertyMode mode); TNode GetProperty(TNode context, TNode receiver, Handle name) { return GetProperty(context, receiver, HeapConstant(name)); } TNode GetProperty(TNode context, TNode receiver, TNode name) { return CallBuiltin(Builtin::kGetProperty, context, receiver, name); } TNode SetPropertyStrict(TNode context, TNode receiver, TNode key, TNode value) { return CallBuiltin(Builtin::kSetProperty, context, receiver, key, value); } TNode CreateDataProperty(TNode context, TNode receiver, TNode key, TNode value) { return CallBuiltin(Builtin::kCreateDataProperty, context, receiver, key, value); } TNode GetMethod(TNode context, TNode object, Handle name, Label* if_null_or_undefined); TNode GetIteratorMethod(TNode context, TNode heap_obj, Label* if_iteratorundefined); TNode CreateAsyncFromSyncIterator(TNode context, TNode sync_iterator); template TNode CallBuiltin(Builtin id, TNode context, TArgs... args) { return CallStub(Builtins::CallableFor(isolate(), id), context, args...); } template void TailCallBuiltin(Builtin id, TNode context, TArgs... args) { return TailCallStub(Builtins::CallableFor(isolate(), id), context, args...); } void LoadPropertyFromFastObject(TNode object, TNode map, TNode descriptors, TNode name_index, TVariable* var_details, TVariable* var_value); void LoadPropertyFromFastObject(TNode object, TNode map, TNode descriptors, TNode name_index, TNode, TVariable* var_value); template void LoadPropertyFromDictionary(TNode dictionary, TNode name_index, TVariable* var_details, TVariable* var_value); void LoadPropertyFromGlobalDictionary(TNode dictionary, TNode name_index, TVariable* var_details, TVariable* var_value, Label* if_deleted); // Generic property lookup generator. If the {object} is fast and // {unique_name} property is found then the control goes to {if_found_fast} // label and {var_meta_storage} and {var_name_index} will contain // DescriptorArray and an index of the descriptor's name respectively. // If the {object} is slow or global then the control goes to {if_found_dict} // or {if_found_global} and the {var_meta_storage} and {var_name_index} will // contain a dictionary and an index of the key field of the found entry. // If property is not found or given lookup is not supported then // the control goes to {if_not_found} or {if_bailout} respectively. // // Note: this code does not check if the global dictionary points to deleted // entry! This has to be done by the caller. void TryLookupProperty(TNode object, TNode map, TNode instance_type, TNode unique_name, Label* if_found_fast, Label* if_found_dict, Label* if_found_global, TVariable* var_meta_storage, TVariable* var_name_index, Label* if_not_found, Label* if_bailout); // This is a building block for TryLookupProperty() above. Supports only // non-special fast and dictionary objects. // TODO(v8:11167, v8:11177) |bailout| only needed for SetDataProperties // workaround. void TryLookupPropertyInSimpleObject(TNode object, TNode map, TNode unique_name, Label* if_found_fast, Label* if_found_dict, TVariable* var_meta_storage, TVariable* var_name_index, Label* if_not_found, Label* bailout); // This method jumps to if_found if the element is known to exist. To // if_absent if it's known to not exist. To if_not_found if the prototype // chain needs to be checked. And if_bailout if the lookup is unsupported. void TryLookupElement(TNode object, TNode map, TNode instance_type, TNode intptr_index, Label* if_found, Label* if_absent, Label* if_not_found, Label* if_bailout); // For integer indexed exotic cases, check if the given string cannot be a // special index. If we are not sure that the given string is not a special // index with a simple check, return False. Note that "False" return value // does not mean that the name_string is a special index in the current // implementation. void BranchIfMaybeSpecialIndex(TNode name_string, Label* if_maybe_special_index, Label* if_not_special_index); // This is a type of a lookup property in holder generator function. The {key} // is guaranteed to be an unique name. using LookupPropertyInHolder = std::function receiver, TNode holder, TNode map, TNode instance_type, TNode key, Label* next_holder, Label* if_bailout)>; // This is a type of a lookup element in holder generator function. The {key} // is an Int32 index. using LookupElementInHolder = std::function receiver, TNode holder, TNode map, TNode instance_type, TNode key, Label* next_holder, Label* if_bailout)>; // Generic property prototype chain lookup generator. // For properties it generates lookup using given {lookup_property_in_holder} // and for elements it uses {lookup_element_in_holder}. // Upon reaching the end of prototype chain the control goes to {if_end}. // If it can't handle the case {receiver}/{key} case then the control goes // to {if_bailout}. // If {if_proxy} is nullptr, proxies go to if_bailout. void TryPrototypeChainLookup( TNode receiver, TNode object, TNode key, const LookupPropertyInHolder& lookup_property_in_holder, const LookupElementInHolder& lookup_element_in_holder, Label* if_end, Label* if_bailout, Label* if_proxy, bool handle_private_names = false); // Instanceof helpers. // Returns true if {object} has {prototype} somewhere in it's prototype // chain, otherwise false is returned. Might cause arbitrary side effects // due to [[GetPrototypeOf]] invocations. TNode HasInPrototypeChain(TNode context, TNode object, TNode prototype); // ES6 section 7.3.19 OrdinaryHasInstance (C, O) TNode OrdinaryHasInstance(TNode context, TNode callable, TNode object); // Load type feedback vector from the stub caller's frame. TNode LoadFeedbackVectorForStub(); TNode LoadFeedbackVectorFromBaseline(); TNode LoadContextFromBaseline(); // Load type feedback vector from the stub caller's frame, skipping an // intermediate trampoline frame. TNode LoadFeedbackVectorForStubWithTrampoline(); // Load the value from closure's feedback cell. TNode LoadFeedbackCellValue(TNode closure); // Load the object from feedback vector cell for the given closure. // The returned object could be undefined if the closure does not have // a feedback vector associated with it. TNode LoadFeedbackVector(TNode closure); // Load the ClosureFeedbackCellArray that contains the feedback cells // used when creating closures from this function. This array could be // directly hanging off the FeedbackCell when there is no feedback vector // or available from the feedback vector's header. TNode LoadClosureFeedbackArray( TNode closure); // Update the type feedback vector. bool UpdateFeedbackModeEqual(UpdateFeedbackMode a, UpdateFeedbackMode b) { return a == b; } void UpdateFeedback(TNode feedback, TNode maybe_feedback_vector, TNode slot_id, UpdateFeedbackMode mode); void UpdateFeedback(TNode feedback, TNode feedback_vector, TNode slot_id); void MaybeUpdateFeedback(TNode feedback, TNode maybe_feedback_vector, TNode slot_id); // Report that there was a feedback update, performing any tasks that should // be done after a feedback update. void ReportFeedbackUpdate(TNode feedback_vector, TNode slot_id, const char* reason); // Combine the new feedback with the existing_feedback. Do nothing if // existing_feedback is nullptr. void CombineFeedback(TVariable* existing_feedback, int feedback); void CombineFeedback(TVariable* existing_feedback, TNode feedback); // Overwrite the existing feedback with new_feedback. Do nothing if // existing_feedback is nullptr. void OverwriteFeedback(TVariable* existing_feedback, int new_feedback); // Check if a property name might require protector invalidation when it is // used for a property store or deletion. void CheckForAssociatedProtector(TNode name, Label* if_protector); TNode LoadReceiverMap(TNode receiver); // Loads script context from the script context table. TNode LoadScriptContext(TNode context, TNode context_index); TNode Int32ToUint8Clamped(TNode int32_value); TNode Float64ToUint8Clamped(TNode float64_value); template TNode PrepareValueForWriteToTypedArray(TNode input, ElementsKind elements_kind, TNode context); // Store value to an elements array with given elements kind. // TODO(turbofan): For BIGINT64_ELEMENTS and BIGUINT64_ELEMENTS // we pass {value} as BigInt object instead of int64_t. We should // teach TurboFan to handle int64_t on 32-bit platforms eventually. template void StoreElement(TNode elements, ElementsKind kind, TNode index, TNode value); // Implements the BigInt part of // https://tc39.github.io/proposal-bigint/#sec-numbertorawbytes, // including truncation to 64 bits (i.e. modulo 2^64). // {var_high} is only used on 32-bit platforms. void BigIntToRawBytes(TNode bigint, TVariable* var_low, TVariable* var_high); void EmitElementStore(TNode object, TNode key, TNode value, ElementsKind elements_kind, KeyedAccessStoreMode store_mode, Label* bailout, TNode context, TVariable* maybe_converted_value = nullptr); TNode CheckForCapacityGrow( TNode object, TNode elements, ElementsKind kind, TNode length, TNode key, Label* bailout); TNode CopyElementsOnWrite(TNode object, TNode elements, ElementsKind kind, TNode length, Label* bailout); void TransitionElementsKind(TNode object, TNode map, ElementsKind from_kind, ElementsKind to_kind, Label* bailout); void TrapAllocationMemento(TNode object, Label* memento_found); TNode PageFromAddress(TNode address); // Store a weak in-place reference into the FeedbackVector. TNode StoreWeakReferenceInFeedbackVector( TNode feedback_vector, TNode slot, TNode value, int additional_offset = 0); // Create a new AllocationSite and install it into a feedback vector. TNode CreateAllocationSiteInFeedbackVector( TNode feedback_vector, TNode slot); TNode HasBoilerplate(TNode maybe_literal_site); TNode LoadTransitionInfo(TNode allocation_site); TNode LoadBoilerplate(TNode allocation_site); TNode LoadElementsKind(TNode allocation_site); enum class IndexAdvanceMode { kPre, kPost }; template using FastLoopBody = std::function index)>; template TNode BuildFastLoop( const VariableList& var_list, TNode start_index, TNode end_index, const FastLoopBody& body, int increment, IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre); template TNode BuildFastLoop( TNode start_index, TNode end_index, const FastLoopBody& body, int increment, IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) { return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body, increment, advance_mode); } enum class ForEachDirection { kForward, kReverse }; using FastArrayForEachBody = std::function array, TNode offset)>; template void BuildFastArrayForEach( TNode, HeapObject>> array, ElementsKind kind, TNode first_element_inclusive, TNode last_element_exclusive, const FastArrayForEachBody& body, ForEachDirection direction = ForEachDirection::kReverse); template TNode GetArrayAllocationSize(TNode element_count, ElementsKind kind, int header_size) { return ElementOffsetFromIndex(element_count, kind, header_size); } template TNode GetFixedArrayAllocationSize(TNode element_count, ElementsKind kind) { return GetArrayAllocationSize(element_count, kind, FixedArray::kHeaderSize); } TNode GetPropertyArrayAllocationSize(TNode element_count) { return GetArrayAllocationSize(element_count, PACKED_ELEMENTS, PropertyArray::kHeaderSize); } template void GotoIfFixedArraySizeDoesntFitInNewSpace(TNode element_count, Label* doesnt_fit, int base_size); void InitializeFieldsWithRoot(TNode object, TNode start_offset, TNode end_offset, RootIndex root); // Goto the given |target| if the context chain starting at |context| has any // extensions up to the given |depth|. Returns the Context with the // extensions if there was one, otherwise returns the Context at the given // |depth|. TNode GotoIfHasContextExtensionUpToDepth(TNode context, TNode depth, Label* target); TNode RelationalComparison( Operation op, TNode left, TNode right, TNode context, TVariable* var_type_feedback = nullptr) { return RelationalComparison( op, left, right, [=]() { return context; }, var_type_feedback); } TNode RelationalComparison( Operation op, TNode left, TNode right, const LazyNode& context, TVariable* var_type_feedback = nullptr); void BranchIfNumberRelationalComparison(Operation op, TNode left, TNode right, Label* if_true, Label* if_false); void BranchIfNumberEqual(TNode left, TNode right, Label* if_true, Label* if_false) { BranchIfNumberRelationalComparison(Operation::kEqual, left, right, if_true, if_false); } void BranchIfNumberNotEqual(TNode left, TNode right, Label* if_true, Label* if_false) { BranchIfNumberEqual(left, right, if_false, if_true); } void BranchIfNumberLessThan(TNode left, TNode right, Label* if_true, Label* if_false) { BranchIfNumberRelationalComparison(Operation::kLessThan, left, right, if_true, if_false); } void BranchIfNumberLessThanOrEqual(TNode left, TNode right, Label* if_true, Label* if_false) { BranchIfNumberRelationalComparison(Operation::kLessThanOrEqual, left, right, if_true, if_false); } void BranchIfNumberGreaterThan(TNode left, TNode right, Label* if_true, Label* if_false) { BranchIfNumberRelationalComparison(Operation::kGreaterThan, left, right, if_true, if_false); } void BranchIfNumberGreaterThanOrEqual(TNode left, TNode right, Label* if_true, Label* if_false) { BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, left, right, if_true, if_false); } void BranchIfAccessorPair(TNode value, Label* if_accessor_pair, Label* if_not_accessor_pair) { GotoIf(TaggedIsSmi(value), if_not_accessor_pair); Branch(IsAccessorPair(CAST(value)), if_accessor_pair, if_not_accessor_pair); } void GotoIfNumberGreaterThanOrEqual(TNode left, TNode right, Label* if_false); TNode Equal(TNode lhs, TNode rhs, TNode context, TVariable* var_type_feedback = nullptr) { return Equal( lhs, rhs, [=]() { return context; }, var_type_feedback); } TNode Equal(TNode lhs, TNode rhs, const LazyNode& context, TVariable* var_type_feedback = nullptr); TNode StrictEqual(TNode lhs, TNode rhs, TVariable* var_type_feedback = nullptr); // ECMA#sec-samevalue // Similar to StrictEqual except that NaNs are treated as equal and minus zero // differs from positive zero. enum class SameValueMode { kNumbersOnly, kFull }; void BranchIfSameValue(TNode lhs, TNode rhs, Label* if_true, Label* if_false, SameValueMode mode = SameValueMode::kFull); // A part of BranchIfSameValue() that handles two double values. // Treats NaN == NaN and +0 != -0. void BranchIfSameNumberValue(TNode lhs_value, TNode rhs_value, Label* if_true, Label* if_false); enum HasPropertyLookupMode { kHasProperty, kForInHasProperty }; TNode HasProperty(TNode context, TNode object, TNode key, HasPropertyLookupMode mode); // Due to naming conflict with the builtin function namespace. TNode HasProperty_Inline(TNode context, TNode object, TNode key) { return HasProperty(context, object, key, HasPropertyLookupMode::kHasProperty); } void ForInPrepare(TNode enumerator, TNode slot, TNode maybe_feedback_vector, TNode* cache_array_out, TNode* cache_length_out, UpdateFeedbackMode update_feedback_mode); TNode Typeof(TNode value); TNode GetSuperConstructor(TNode active_function); TNode SpeciesConstructor(TNode context, TNode object, TNode default_constructor); TNode InstanceOf(TNode object, TNode callable, TNode context); // Debug helpers TNode IsDebugActive(); TNode IsSideEffectFreeDebuggingActive(); // JSArrayBuffer helpers TNode LoadJSArrayBufferBackingStorePtr( TNode array_buffer); void ThrowIfArrayBufferIsDetached(TNode context, TNode array_buffer, const char* method_name); // JSArrayBufferView helpers TNode LoadJSArrayBufferViewBuffer( TNode array_buffer_view); TNode LoadJSArrayBufferViewByteLength( TNode array_buffer_view); TNode LoadJSArrayBufferViewByteOffset( TNode array_buffer_view); void ThrowIfArrayBufferViewBufferIsDetached( TNode context, TNode array_buffer_view, const char* method_name); // JSTypedArray helpers TNode LoadJSTypedArrayLengthAndCheckDetached( TNode typed_array, Label* detached); // Helper for length tracking JSTypedArrays and JSTypedArrays backed by // ResizableArrayBuffer. TNode LoadVariableLengthJSTypedArrayLength( TNode array, TNode buffer, Label* detached_or_out_of_bounds); // Helper for length tracking JSTypedArrays and JSTypedArrays backed by // ResizableArrayBuffer. TNode LoadVariableLengthJSTypedArrayByteLength( TNode context, TNode array, TNode buffer); TNode LoadVariableLengthJSArrayBufferViewByteLength( TNode array, TNode buffer, Label* detached_or_out_of_bounds); void IsJSArrayBufferViewDetachedOrOutOfBounds( TNode array_buffer_view, Label* detached_or_oob, Label* not_detached_nor_oob); TNode IsJSArrayBufferViewDetachedOrOutOfBoundsBoolean( TNode array_buffer_view); void CheckJSTypedArrayIndex(TNode index, TNode typed_array, Label* detached_or_out_of_bounds); TNode RabGsabElementsKindToElementByteSize( TNode elementsKind); TNode LoadJSTypedArrayDataPtr(TNode typed_array); TNode GetTypedArrayBuffer(TNode context, TNode array); template TNode ElementOffsetFromIndex(TNode index, ElementsKind kind, int base_size = 0); // Check that a field offset is within the bounds of the an object. TNode IsOffsetInBounds(TNode offset, TNode length, int header_size, ElementsKind kind = HOLEY_ELEMENTS); // Load a builtin's code from the builtin array in the isolate. TNode LoadBuiltin(TNode builtin_id); // Figure out the SFI's code object using its data field. // If |data_type_out| is provided, the instance type of the function data will // be stored in it. In case the code object is a builtin (data is a Smi), // data_type_out will be set to 0. // If |if_compile_lazy| is provided then the execution will go to the given // label in case of an CompileLazy code object. TNode GetSharedFunctionInfoCode( TNode shared_info, TVariable* data_type_out = nullptr, Label* if_compile_lazy = nullptr); TNode AllocateFunctionWithMapAndContext( TNode map, TNode shared_info, TNode context); // Promise helpers TNode PromiseHookFlags(); TNode HasAsyncEventDelegate(); #ifdef V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS TNode IsContextPromiseHookEnabled(TNode flags); #endif TNode IsIsolatePromiseHookEnabled(TNode flags); TNode IsAnyPromiseHookEnabled(TNode flags); TNode IsAnyPromiseHookEnabled() { return IsAnyPromiseHookEnabled(PromiseHookFlags()); } TNode IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate( TNode flags); TNode IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate() { return IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate( PromiseHookFlags()); } TNode IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate( TNode flags); TNode IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() { return IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate( PromiseHookFlags()); } TNode NeedsAnyPromiseHooks(TNode flags); TNode NeedsAnyPromiseHooks() { return NeedsAnyPromiseHooks(PromiseHookFlags()); } // for..in helpers void CheckPrototypeEnumCache(TNode receiver, TNode receiver_map, Label* if_fast, Label* if_slow); TNode CheckEnumCache(TNode receiver, Label* if_empty, Label* if_runtime); TNode GetArgumentValue(TorqueStructArguments args, TNode index); enum class FrameArgumentsArgcType { kCountIncludesReceiver, kCountExcludesReceiver }; TorqueStructArguments GetFrameArguments( TNode frame, TNode argc, FrameArgumentsArgcType argc_type = FrameArgumentsArgcType::kCountExcludesReceiver); inline TNode JSParameterCount(int argc_without_receiver) { return Int32Constant(argc_without_receiver + kJSArgcReceiverSlots); } inline TNode JSParameterCount(TNode argc_without_receiver) { return Int32Add(argc_without_receiver, Int32Constant(kJSArgcReceiverSlots)); } // Support for printf-style debugging void Print(const char* s); void Print(const char* prefix, TNode tagged_value); void Print(TNode tagged_value) { return Print(nullptr, tagged_value); } template TNode MakeTypeError(MessageTemplate message, TNode context, TArgs... args) { STATIC_ASSERT(sizeof...(TArgs) <= 3); return CAST(CallRuntime(Runtime::kNewTypeError, context, SmiConstant(message), args...)); } void Abort(AbortReason reason) { CallRuntime(Runtime::kAbort, NoContextConstant(), SmiConstant(reason)); Unreachable(); } bool ConstexprBoolNot(bool value) { return !value; } int31_t ConstexprIntegerLiteralToInt31(const IntegerLiteral& i) { return int31_t(i.To()); } int32_t ConstexprIntegerLiteralToInt32(const IntegerLiteral& i) { return i.To(); } uint32_t ConstexprIntegerLiteralToUint32(const IntegerLiteral& i) { return i.To(); } int8_t ConstexprIntegerLiteralToInt8(const IntegerLiteral& i) { return i.To(); } uint8_t ConstexprIntegerLiteralToUint8(const IntegerLiteral& i) { return i.To(); } uint64_t ConstexprIntegerLiteralToUint64(const IntegerLiteral& i) { return i.To(); } intptr_t ConstexprIntegerLiteralToIntptr(const IntegerLiteral& i) { return i.To(); } uintptr_t ConstexprIntegerLiteralToUintptr(const IntegerLiteral& i) { return i.To(); } double ConstexprIntegerLiteralToFloat64(const IntegerLiteral& i) { int64_t i_value = i.To(); double d_value = static_cast(i_value); CHECK_EQ(i_value, static_cast(d_value)); return d_value; } bool ConstexprIntegerLiteralEqual(IntegerLiteral lhs, IntegerLiteral rhs) { return lhs == rhs; } IntegerLiteral ConstexprIntegerLiteralAdd(const IntegerLiteral& lhs, const IntegerLiteral& rhs); IntegerLiteral ConstexprIntegerLiteralLeftShift(const IntegerLiteral& lhs, const IntegerLiteral& rhs); IntegerLiteral ConstexprIntegerLiteralBitwiseOr(const IntegerLiteral& lhs, const IntegerLiteral& rhs); bool ConstexprInt31Equal(int31_t a, int31_t b) { return a == b; } bool ConstexprInt31NotEqual(int31_t a, int31_t b) { return a != b; } bool ConstexprInt31GreaterThanEqual(int31_t a, int31_t b) { return a >= b; } bool ConstexprUint32Equal(uint32_t a, uint32_t b) { return a == b; } bool ConstexprUint32NotEqual(uint32_t a, uint32_t b) { return a != b; } bool ConstexprInt32Equal(int32_t a, int32_t b) { return a == b; } bool ConstexprInt32NotEqual(int32_t a, int32_t b) { return a != b; } bool ConstexprInt32GreaterThanEqual(int32_t a, int32_t b) { return a >= b; } uint32_t ConstexprUint32Add(uint32_t a, uint32_t b) { return a + b; } int32_t ConstexprUint32Sub(uint32_t a, uint32_t b) { return a - b; } int32_t ConstexprInt32Sub(int32_t a, int32_t b) { return a - b; } int32_t ConstexprInt32Add(int32_t a, int32_t b) { return a + b; } int31_t ConstexprInt31Add(int31_t a, int31_t b) { int32_t val; CHECK(!base::bits::SignedAddOverflow32(a, b, &val)); return val; } int31_t ConstexprInt31Mul(int31_t a, int31_t b) { int32_t val; CHECK(!base::bits::SignedMulOverflow32(a, b, &val)); return val; } int32_t ConstexprWord32Or(int32_t a, int32_t b) { return a | b; } uint32_t ConstexprWord32Shl(uint32_t a, int32_t b) { return a << b; } bool ConstexprUintPtrLessThan(uintptr_t a, uintptr_t b) { return a < b; } // CSA does not support 64-bit types on 32-bit platforms so as a workaround // the kMaxSafeIntegerUint64 is defined as uintptr and allowed to be used only // inside if constexpr (Is64()) i.e. on 64-bit architectures. static uintptr_t MaxSafeIntegerUintPtr() { #if defined(V8_HOST_ARCH_64_BIT) // This ifdef is required to avoid build issues on 32-bit MSVC which // complains about static_cast(kMaxSafeIntegerUint64). return kMaxSafeIntegerUint64; #else UNREACHABLE(); #endif } void PerformStackCheck(TNode context); void SetPropertyLength(TNode context, TNode array, TNode length); // Implements DescriptorArray::Search(). void DescriptorLookup(TNode unique_name, TNode descriptors, TNode bitfield3, Label* if_found, TVariable* var_name_index, Label* if_not_found); // Implements TransitionArray::SearchName() - searches for first transition // entry with given name (note that there could be multiple entries with // the same name). void TransitionLookup(TNode unique_name, TNode transitions, Label* if_found, TVariable* var_name_index, Label* if_not_found); // Implements generic search procedure like i::Search(). template void Lookup(TNode unique_name, TNode array, TNode number_of_valid_entries, Label* if_found, TVariable* var_name_index, Label* if_not_found); // Implements generic linear search procedure like i::LinearSearch(). template void LookupLinear(TNode unique_name, TNode array, TNode number_of_valid_entries, Label* if_found, TVariable* var_name_index, Label* if_not_found); // Implements generic binary search procedure like i::BinarySearch(). template void LookupBinary(TNode unique_name, TNode array, TNode number_of_valid_entries, Label* if_found, TVariable* var_name_index, Label* if_not_found); // Converts [Descriptor/Transition]Array entry number to a fixed array index. template TNode EntryIndexToIndex(TNode entry_index); // Implements [Descriptor/Transition]Array::ToKeyIndex. template TNode ToKeyIndex(TNode entry_index); // Implements [Descriptor/Transition]Array::GetKey. template TNode GetKey(TNode array, TNode entry_index); // Implements DescriptorArray::GetDetails. TNode DescriptorArrayGetDetails(TNode descriptors, TNode descriptor_number); using ForEachDescriptorBodyFunction = std::function descriptor_key_index)>; // Descriptor array accessors based on key_index, which is equal to // DescriptorArray::ToKeyIndex(descriptor). TNode LoadKeyByKeyIndex(TNode container, TNode key_index); TNode LoadDetailsByKeyIndex(TNode container, TNode key_index); TNode LoadValueByKeyIndex(TNode container, TNode key_index); TNode LoadFieldTypeByKeyIndex(TNode container, TNode key_index); TNode DescriptorEntryToIndex(TNode descriptor); // Descriptor array accessors based on descriptor. TNode LoadKeyByDescriptorEntry(TNode descriptors, TNode descriptor); TNode LoadKeyByDescriptorEntry(TNode descriptors, int descriptor); TNode LoadDetailsByDescriptorEntry( TNode descriptors, TNode descriptor); TNode LoadDetailsByDescriptorEntry( TNode descriptors, int descriptor); TNode LoadValueByDescriptorEntry(TNode descriptors, TNode descriptor); TNode LoadValueByDescriptorEntry(TNode descriptors, int descriptor); TNode LoadFieldTypeByDescriptorEntry( TNode descriptors, TNode descriptor); using ForEachKeyValueFunction = std::function key, TNode value)>; // For each JSObject property (in DescriptorArray order), check if the key is // enumerable, and if so, load the value from the receiver and evaluate the // closure. void ForEachEnumerableOwnProperty(TNode context, TNode map, TNode object, PropertiesEnumerationMode mode, const ForEachKeyValueFunction& body, Label* bailout); TNode CallGetterIfAccessor( TNode value, TNode holder, TNode details, TNode context, TNode receiver, TNode name, Label* if_bailout, GetOwnPropertyMode mode = kCallJSGetterDontUseCachedName); TNode TryToIntptr(TNode key, Label* if_not_intptr, TVariable* var_instance_type = nullptr); TNode ArrayCreate(TNode context, TNode length); // Allocate a clone of a mutable primitive, if {object} is a mutable // HeapNumber. TNode CloneIfMutablePrimitive(TNode object); TNode RefillMathRandom(TNode native_context); void RemoveFinalizationRegistryCellFromUnregisterTokenMap( TNode finalization_registry, TNode weak_cell); TNode FeedbackIteratorEntrySize() { return IntPtrConstant(FeedbackIterator::kEntrySize); } TNode FeedbackIteratorHandlerOffset() { return IntPtrConstant(FeedbackIterator::kHandlerOffset); } TNode AllocateSwissNameDictionary( TNode at_least_space_for); TNode AllocateSwissNameDictionary( int at_least_space_for); TNode AllocateSwissNameDictionaryWithCapacity( TNode capacity); // MT stands for "minus tag". TNode SwissNameDictionaryOffsetIntoDataTableMT( TNode dict, TNode index, int field_index); // MT stands for "minus tag". TNode SwissNameDictionaryOffsetIntoPropertyDetailsTableMT( TNode dict, TNode capacity, TNode index); TNode LoadSwissNameDictionaryNumberOfElements( TNode table, TNode capacity); TNode LoadSwissNameDictionaryNumberOfDeletedElements( TNode table, TNode capacity); // Specialized operation to be used when adding entries: // If used capacity (= number of present + deleted elements) is less than // |max_usable|, increment the number of present entries and return the used // capacity value (prior to the incrementation). Otherwise, goto |bailout|. TNode SwissNameDictionaryIncreaseElementCountOrBailout( TNode meta_table, TNode capacity, TNode max_usable_capacity, Label* bailout); // Specialized operation to be used when deleting entries: Decreases the // number of present entries and increases the number of deleted ones. Returns // new (= decremented) number of present entries. TNode SwissNameDictionaryUpdateCountsForDeletion( TNode meta_table, TNode capacity); void StoreSwissNameDictionaryCapacity(TNode table, TNode capacity); void StoreSwissNameDictionaryEnumToEntryMapping( TNode table, TNode capacity, TNode enum_index, TNode entry); TNode LoadSwissNameDictionaryKey(TNode dict, TNode entry); void StoreSwissNameDictionaryKeyAndValue(TNode dict, TNode entry, TNode key, TNode value); // Equivalent to SwissNameDictionary::SetCtrl, therefore preserves the copy of // the first group at the end of the control table. void SwissNameDictionarySetCtrl(TNode table, TNode capacity, TNode entry, TNode ctrl); TNode LoadSwissNameDictionaryCtrlTableGroup(TNode address); TNode LoadSwissNameDictionaryPropertyDetails( TNode table, TNode capacity, TNode entry); void StoreSwissNameDictionaryPropertyDetails(TNode table, TNode capacity, TNode entry, TNode details); TNode CopySwissNameDictionary( TNode original); void SwissNameDictionaryFindEntry(TNode table, TNode key, Label* found, TVariable* var_found_entry, Label* not_found); void SwissNameDictionaryAdd(TNode table, TNode key, TNode value, TNode property_details, Label* needs_resize); private: friend class CodeStubArguments; void HandleBreakOnNode(); TNode AllocateRawDoubleAligned(TNode size_in_bytes, AllocationFlags flags, TNode top_address, TNode limit_address); TNode AllocateRawUnaligned(TNode size_in_bytes, AllocationFlags flags, TNode top_address, TNode limit_address); TNode AllocateRaw(TNode size_in_bytes, AllocationFlags flags, TNode top_address, TNode limit_address); // Allocate and return a JSArray of given total size in bytes with header // fields initialized. TNode AllocateUninitializedJSArray( TNode array_map, TNode length, base::Optional> allocation_site, TNode size_in_bytes); // Increases the provided capacity to the next valid value, if necessary. template TNode AllocateOrderedHashTable(TNode capacity); // Uses the provided capacity (which must be valid) in verbatim. template TNode AllocateOrderedHashTableWithCapacity( TNode capacity); TNode SmiShiftBitsConstant() { return IntPtrConstant(kSmiShiftSize + kSmiTagSize); } TNode SmiShiftBitsConstant32() { return Int32Constant(kSmiShiftSize + kSmiTagSize); } TNode AllocateSlicedString(RootIndex map_root_index, TNode length, TNode parent, TNode offset); // Implements [Descriptor/Transition]Array::number_of_entries. template TNode NumberOfEntries(TNode array); // Implements [Descriptor/Transition]Array::GetSortedKeyIndex. template TNode GetSortedKeyIndex(TNode descriptors, TNode entry_index); TNode CollectFeedbackForString(TNode instance_type); void GenerateEqual_Same(TNode value, Label* if_equal, Label* if_notequal, TVariable* var_type_feedback = nullptr); static const int kElementLoopUnrollThreshold = 8; // {convert_bigint} is only meaningful when {mode} == kToNumber. TNode NonNumberToNumberOrNumeric( TNode context, TNode input, Object::Conversion mode, BigIntHandling bigint_handling = BigIntHandling::kThrow); void TaggedToNumeric(TNode context, TNode value, TVariable* var_numeric, TVariable* var_feedback); enum IsKnownTaggedPointer { kNo, kYes }; template void TaggedToWord32OrBigIntImpl(TNode context, TNode value, Label* if_number, TVariable* var_word32, IsKnownTaggedPointer is_known_tagged_pointer, Label* if_bigint = nullptr, TVariable* var_maybe_bigint = nullptr, TVariable* var_feedback = nullptr); // Low-level accessors for Descriptor arrays. template TNode LoadDescriptorArrayElement(TNode object, TNode index, int additional_offset); // Hide LoadRoot for subclasses of CodeStubAssembler. If you get an error // complaining about this method, don't make it public, add your root to // HEAP_(IM)MUTABLE_IMMOVABLE_OBJECT_LIST instead. If you *really* need // LoadRoot, use CodeAssembler::LoadRoot. TNode LoadRoot(RootIndex root_index) { return CodeAssembler::LoadRoot(root_index); } TNode LoadRootMapWord(RootIndex root_index) { return CodeAssembler::LoadRootMapWord(root_index); } template void StoreFixedArrayOrPropertyArrayElement( TNode> array, TNode index, TNode value, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, int additional_offset = 0); template void StoreElementTypedArrayBigInt(TNode elements, ElementsKind kind, TNode index, TNode value); template void StoreElementTypedArrayWord32(TNode elements, ElementsKind kind, TNode index, TNode value); // Store value to an elements array with given elements kind. // TODO(turbofan): For BIGINT64_ELEMENTS and BIGUINT64_ELEMENTS // we pass {value} as BigInt object instead of int64_t. We should // teach TurboFan to handle int64_t on 32-bit platforms eventually. // TODO(solanes): This method can go away and simplify into only one version // of StoreElement once we have "if constexpr" available to use. template void StoreElementTypedArray(TNode elements, ElementsKind kind, TNode index, TNode value); template void StoreElement(TNode elements, ElementsKind kind, TNode index, TNode value); template void StoreElement(TNode elements, ElementsKind kind, TNode index, TNode value); // Converts {input} to a number if {input} is a plain primitve (i.e. String or // Oddball) and stores the result in {var_result}. Otherwise, it bails out to // {if_bailout}. void TryPlainPrimitiveNonNumberToNumber(TNode input, TVariable* var_result, Label* if_bailout); void DcheckHasValidMap(TNode object); template void EmitElementStoreTypedArray(TNode typed_array, TNode key, TNode value, ElementsKind elements_kind, KeyedAccessStoreMode store_mode, Label* bailout, TNode context, TVariable* maybe_converted_value); template void EmitElementStoreTypedArrayUpdateValue( TNode value, ElementsKind elements_kind, TNode converted_value, TVariable* maybe_converted_value); }; class V8_EXPORT_PRIVATE CodeStubArguments { public: // |argc| specifies the number of arguments passed to the builtin excluding // the receiver. The arguments include the receiver. CodeStubArguments(CodeStubAssembler* assembler, TNode argc) : CodeStubArguments(assembler, argc, TNode()) {} CodeStubArguments(CodeStubAssembler* assembler, TNode argc) : CodeStubArguments(assembler, assembler->ChangeInt32ToIntPtr(argc)) {} CodeStubArguments(CodeStubAssembler* assembler, TNode argc, TNode fp); // Used by Torque to construct arguments based on a Torque-defined // struct of values. CodeStubArguments(CodeStubAssembler* assembler, TorqueStructArguments torque_arguments) : assembler_(assembler), argc_(torque_arguments.actual_count), base_(torque_arguments.base), fp_(torque_arguments.frame) {} TNode GetReceiver() const; // Replaces receiver argument on the expression stack. Should be used only // for manipulating arguments in trampoline builtins before tail calling // further with passing all the JS arguments as is. void SetReceiver(TNode object) const; // Computes address of the index'th argument. TNode AtIndexPtr(TNode index) const; // |index| is zero-based and does not include the receiver TNode AtIndex(TNode index) const; TNode AtIndex(int index) const; // Return the number of arguments (excluding the receiver). TNode GetLengthWithoutReceiver() const; // Return the number of arguments (including the receiver). TNode GetLengthWithReceiver() const; TorqueStructArguments GetTorqueArguments() const { return TorqueStructArguments{fp_, base_, GetLengthWithoutReceiver(), argc_}; } TNode GetOptionalArgumentValue(TNode index, TNode default_value); TNode GetOptionalArgumentValue(TNode index) { return GetOptionalArgumentValue(index, assembler_->UndefinedConstant()); } TNode GetOptionalArgumentValue(int index) { return GetOptionalArgumentValue(assembler_->IntPtrConstant(index)); } // Iteration doesn't include the receiver. |first| and |last| are zero-based. using ForEachBodyFunction = std::function arg)>; void ForEach(const ForEachBodyFunction& body, TNode first = {}, TNode last = {}) const { CodeStubAssembler::VariableList list(0, assembler_->zone()); ForEach(list, body, first, last); } void ForEach(const CodeStubAssembler::VariableList& vars, const ForEachBodyFunction& body, TNode first = {}, TNode last = {}) const; void PopAndReturn(TNode value); private: CodeStubAssembler* assembler_; TNode argc_; TNode base_; TNode fp_; }; class ToDirectStringAssembler : public CodeStubAssembler { private: enum StringPointerKind { PTR_TO_DATA, PTR_TO_STRING }; public: enum Flag { kDontUnpackSlicedStrings = 1 << 0, }; using Flags = base::Flags; ToDirectStringAssembler(compiler::CodeAssemblerState* state, TNode string, Flags flags = Flags()); // Converts flat cons, thin, and sliced strings and returns the direct // string. The result can be either a sequential or external string. // Jumps to if_bailout if the string if the string is indirect and cannot // be unpacked. TNode TryToDirect(Label* if_bailout); // Returns a pointer to the beginning of the string data. // Jumps to if_bailout if the external string cannot be unpacked. TNode PointerToData(Label* if_bailout) { return TryToSequential(PTR_TO_DATA, if_bailout); } // Returns a pointer that, offset-wise, looks like a String. // Jumps to if_bailout if the external string cannot be unpacked. TNode PointerToString(Label* if_bailout) { return TryToSequential(PTR_TO_STRING, if_bailout); } TNode string() { return var_string_.value(); } TNode instance_type() { return var_instance_type_.value(); } TNode offset() { return var_offset_.value(); } TNode is_external() { return var_is_external_.value(); } private: TNode TryToSequential(StringPointerKind ptr_kind, Label* if_bailout); TVariable var_string_; TVariable var_instance_type_; // TODO(v8:9880): Use UintPtrT here. TVariable var_offset_; TVariable var_is_external_; const Flags flags_; }; // Performs checks on a given prototype (e.g. map identity, property // verification), intended for use in fast path checks. class PrototypeCheckAssembler : public CodeStubAssembler { public: enum Flag { kCheckPrototypePropertyConstness = 1 << 0, kCheckPrototypePropertyIdentity = 1 << 1, kCheckFull = kCheckPrototypePropertyConstness | kCheckPrototypePropertyIdentity, }; using Flags = base::Flags; // A tuple describing a relevant property. It contains the descriptor index of // the property (within the descriptor array), the property's expected name // (stored as a root), and the property's expected value (stored on the native // context). struct DescriptorIndexNameValue { int descriptor_index; RootIndex name_root_index; int expected_value_context_index; }; PrototypeCheckAssembler(compiler::CodeAssemblerState* state, Flags flags, TNode native_context, TNode initial_prototype_map, base::Vector properties); void CheckAndBranch(TNode prototype, Label* if_unmodified, Label* if_modified); private: const Flags flags_; const TNode native_context_; const TNode initial_prototype_map_; const base::Vector properties_; }; DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags) #define CLASS_MAP_CONSTANT_ADAPTER(V, rootIndexName, rootAccessorName, \ class_name) \ template <> \ inline bool CodeStubAssembler::ClassHasMapConstant() { \ return true; \ } \ template <> \ inline TNode CodeStubAssembler::GetClassMapConstant() { \ return class_name##MapConstant(); \ } UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR(CLASS_MAP_CONSTANT_ADAPTER, _) } // namespace internal } // namespace v8 #endif // V8_CODEGEN_CODE_STUB_ASSEMBLER_H_