1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_ARM64
6
7 #include "src/codegen.h"
8 #include "src/ic/ic.h"
9 #include "src/ic/ic-compiler.h"
10 #include "src/ic/stub-cache.h"
11
12 namespace v8 {
13 namespace internal {
14
15
16 #define __ ACCESS_MASM(masm)
17
18 // Helper function used from LoadIC GenerateNormal.
19 //
20 // elements: Property dictionary. It is not clobbered if a jump to the miss
21 // label is done.
22 // name: Property name. It is not clobbered if a jump to the miss label is
23 // done
24 // result: Register for the result. It is only updated if a jump to the miss
25 // label is not done.
26 // The scratch registers need to be different from elements, name and result.
27 // The generated code assumes that the receiver has slow properties,
28 // is not a global object and does not have interceptors.
GenerateDictionaryLoad(MacroAssembler * masm,Label * miss,Register elements,Register name,Register result,Register scratch1,Register scratch2)29 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
30 Register elements, Register name,
31 Register result, Register scratch1,
32 Register scratch2) {
33 DCHECK(!AreAliased(elements, name, scratch1, scratch2));
34 DCHECK(!AreAliased(result, scratch1, scratch2));
35
36 Label done;
37
38 // Probe the dictionary.
39 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
40 name, scratch1, scratch2);
41
42 // If probing finds an entry check that the value is a normal property.
43 __ Bind(&done);
44
45 static const int kElementsStartOffset =
46 NameDictionary::kHeaderSize +
47 NameDictionary::kElementsStartIndex * kPointerSize;
48 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
49 __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
50 __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
51 __ B(ne, miss);
52
53 // Get the value at the masked, scaled index and return.
54 __ Ldr(result,
55 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
56 }
57
58
59 // Helper function used from StoreIC::GenerateNormal.
60 //
61 // elements: Property dictionary. It is not clobbered if a jump to the miss
62 // label is done.
63 // name: Property name. It is not clobbered if a jump to the miss label is
64 // done
65 // value: The value to store (never clobbered).
66 //
67 // The generated code assumes that the receiver has slow properties,
68 // is not a global object and does not have interceptors.
GenerateDictionaryStore(MacroAssembler * masm,Label * miss,Register elements,Register name,Register value,Register scratch1,Register scratch2)69 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
70 Register elements, Register name,
71 Register value, Register scratch1,
72 Register scratch2) {
73 DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
74
75 Label done;
76
77 // Probe the dictionary.
78 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
79 name, scratch1, scratch2);
80
81 // If probing finds an entry in the dictionary check that the value
82 // is a normal property that is not read only.
83 __ Bind(&done);
84
85 static const int kElementsStartOffset =
86 NameDictionary::kHeaderSize +
87 NameDictionary::kElementsStartIndex * kPointerSize;
88 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
89 static const int kTypeAndReadOnlyMask =
90 PropertyDetails::TypeField::kMask |
91 PropertyDetails::AttributesField::encode(READ_ONLY);
92 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
93 __ Tst(scratch1, kTypeAndReadOnlyMask);
94 __ B(ne, miss);
95
96 // Store the value at the masked, scaled index and return.
97 static const int kValueOffset = kElementsStartOffset + kPointerSize;
98 __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
99 __ Str(value, MemOperand(scratch2));
100
101 // Update the write barrier. Make sure not to clobber the value.
102 __ Mov(scratch1, value);
103 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
104 kDontSaveFPRegs);
105 }
106
GenerateNormal(MacroAssembler * masm)107 void LoadIC::GenerateNormal(MacroAssembler* masm) {
108 Register dictionary = x0;
109 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
110 DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
111 Label slow;
112
113 __ Ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
114 JSObject::kPropertiesOffset));
115 GenerateDictionaryLoad(masm, &slow, dictionary,
116 LoadDescriptor::NameRegister(), x0, x3, x4);
117 __ Ret();
118
119 // Dictionary load failed, go slow (but don't miss).
120 __ Bind(&slow);
121 GenerateRuntimeGetProperty(masm);
122 }
123
124
GenerateMiss(MacroAssembler * masm)125 void LoadIC::GenerateMiss(MacroAssembler* masm) {
126 // The return address is in lr.
127 Isolate* isolate = masm->isolate();
128 ASM_LOCATION("LoadIC::GenerateMiss");
129
130 DCHECK(!AreAliased(x4, x5, LoadWithVectorDescriptor::SlotRegister(),
131 LoadWithVectorDescriptor::VectorRegister()));
132 __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, x4, x5);
133
134 // Perform tail call to the entry.
135 __ Push(LoadWithVectorDescriptor::ReceiverRegister(),
136 LoadWithVectorDescriptor::NameRegister(),
137 LoadWithVectorDescriptor::SlotRegister(),
138 LoadWithVectorDescriptor::VectorRegister());
139 __ TailCallRuntime(Runtime::kLoadIC_Miss);
140 }
141
GenerateRuntimeGetProperty(MacroAssembler * masm)142 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
143 // The return address is in lr.
144 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
145
146 // Do tail-call to runtime routine.
147 __ TailCallRuntime(Runtime::kGetProperty);
148 }
149
150
GenerateMiss(MacroAssembler * masm)151 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
152 // The return address is in lr.
153 Isolate* isolate = masm->isolate();
154
155 DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::SlotRegister(),
156 LoadWithVectorDescriptor::VectorRegister()));
157 __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, x10, x11);
158
159 __ Push(LoadWithVectorDescriptor::ReceiverRegister(),
160 LoadWithVectorDescriptor::NameRegister(),
161 LoadWithVectorDescriptor::SlotRegister(),
162 LoadWithVectorDescriptor::VectorRegister());
163
164 // Perform tail call to the entry.
165 __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
166 }
167
GenerateRuntimeGetProperty(MacroAssembler * masm)168 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
169 // The return address is in lr.
170 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
171
172 // Do tail-call to runtime routine.
173 __ TailCallRuntime(Runtime::kKeyedGetProperty);
174 }
175
StoreIC_PushArgs(MacroAssembler * masm)176 static void StoreIC_PushArgs(MacroAssembler* masm) {
177 __ Push(StoreWithVectorDescriptor::ValueRegister(),
178 StoreWithVectorDescriptor::SlotRegister(),
179 StoreWithVectorDescriptor::VectorRegister(),
180 StoreWithVectorDescriptor::ReceiverRegister(),
181 StoreWithVectorDescriptor::NameRegister());
182 }
183
184
GenerateMiss(MacroAssembler * masm)185 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
186 ASM_LOCATION("KeyedStoreIC::GenerateMiss");
187 StoreIC_PushArgs(masm);
188 __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
189 }
190
GenerateSlow(MacroAssembler * masm)191 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
192 ASM_LOCATION("KeyedStoreIC::GenerateSlow");
193 StoreIC_PushArgs(masm);
194
195 // The slow case calls into the runtime to complete the store without causing
196 // an IC miss that would otherwise cause a transition to the generic stub.
197 __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
198 }
199
KeyedStoreGenerateMegamorphicHelper(MacroAssembler * masm,Label * fast_object,Label * fast_double,Label * slow,KeyedStoreCheckMap check_map,KeyedStoreIncrementLength increment_length,Register value,Register key,Register receiver,Register receiver_map,Register elements_map,Register elements)200 static void KeyedStoreGenerateMegamorphicHelper(
201 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
202 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
203 Register value, Register key, Register receiver, Register receiver_map,
204 Register elements_map, Register elements) {
205 DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
206 x10, x11));
207
208 Label transition_smi_elements;
209 Label transition_double_elements;
210 Label fast_double_without_map_check;
211 Label non_double_value;
212 Label finish_store;
213
214 __ Bind(fast_object);
215 if (check_map == kCheckMap) {
216 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
217 __ Cmp(elements_map,
218 Operand(masm->isolate()->factory()->fixed_array_map()));
219 __ B(ne, fast_double);
220 }
221
222 // HOLECHECK: guards "A[i] = V"
223 // We have to go to the runtime if the current value is the hole because there
224 // may be a callback on the element.
225 Label holecheck_passed;
226 __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
227 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
228 __ Ldr(x11, MemOperand(x10));
229 __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
230 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
231 __ bind(&holecheck_passed);
232
233 // Smi stores don't require further checks.
234 __ JumpIfSmi(value, &finish_store);
235
236 // Escape to elements kind transition case.
237 __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
238
239 __ Bind(&finish_store);
240 if (increment_length == kIncrementLength) {
241 // Add 1 to receiver->length.
242 __ Add(x10, key, Smi::FromInt(1));
243 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
244 }
245
246 Register address = x11;
247 __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
248 __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
249 __ Str(value, MemOperand(address));
250
251 Label dont_record_write;
252 __ JumpIfSmi(value, &dont_record_write);
253
254 // Update write barrier for the elements array address.
255 __ Mov(x10, value); // Preserve the value which is returned.
256 __ RecordWrite(elements, address, x10, kLRHasNotBeenSaved, kDontSaveFPRegs,
257 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
258
259 __ Bind(&dont_record_write);
260 __ Ret();
261
262
263 __ Bind(fast_double);
264 if (check_map == kCheckMap) {
265 // Check for fast double array case. If this fails, call through to the
266 // runtime.
267 __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
268 }
269
270 // HOLECHECK: guards "A[i] double hole?"
271 // We have to see if the double version of the hole is present. If so go to
272 // the runtime.
273 __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
274 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
275 __ Ldr(x11, MemOperand(x10));
276 __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
277 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
278
279 __ Bind(&fast_double_without_map_check);
280 __ StoreNumberToDoubleElements(value, key, elements, x10, d0,
281 &transition_double_elements);
282 if (increment_length == kIncrementLength) {
283 // Add 1 to receiver->length.
284 __ Add(x10, key, Smi::FromInt(1));
285 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
286 }
287 __ Ret();
288
289
290 __ Bind(&transition_smi_elements);
291 // Transition the array appropriately depending on the value type.
292 __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
293 __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
294
295 // Value is a double. Transition FAST_SMI_ELEMENTS ->
296 // FAST_DOUBLE_ELEMENTS and complete the store.
297 __ LoadTransitionedArrayMapConditional(
298 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, x10, x11, slow);
299 AllocationSiteMode mode =
300 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
301 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
302 receiver_map, mode, slow);
303 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
304 __ B(&fast_double_without_map_check);
305
306 __ Bind(&non_double_value);
307 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
308 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
309 receiver_map, x10, x11, slow);
310
311 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
312 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
313 masm, receiver, key, value, receiver_map, mode, slow);
314
315 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
316 __ B(&finish_store);
317
318 __ Bind(&transition_double_elements);
319 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
320 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
321 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
322 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
323 receiver_map, x10, x11, slow);
324 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
325 ElementsTransitionGenerator::GenerateDoubleToObject(
326 masm, receiver, key, value, receiver_map, mode, slow);
327 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
328 __ B(&finish_store);
329 }
330
331
GenerateMegamorphic(MacroAssembler * masm,LanguageMode language_mode)332 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
333 LanguageMode language_mode) {
334 ASM_LOCATION("KeyedStoreIC::GenerateMegamorphic");
335 Label slow;
336 Label array;
337 Label fast_object;
338 Label extra;
339 Label fast_object_grow;
340 Label fast_double_grow;
341 Label fast_double;
342 Label maybe_name_key;
343 Label miss;
344
345 Register value = StoreDescriptor::ValueRegister();
346 Register key = StoreDescriptor::NameRegister();
347 Register receiver = StoreDescriptor::ReceiverRegister();
348 DCHECK(receiver.is(x1));
349 DCHECK(key.is(x2));
350 DCHECK(value.is(x0));
351
352 Register receiver_map = x3;
353 Register elements = x4;
354 Register elements_map = x5;
355
356 __ JumpIfNotSmi(key, &maybe_name_key);
357 __ JumpIfSmi(receiver, &slow);
358 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
359
360 // Check that the receiver does not require access checks.
361 // The generic stub does not perform map checks.
362 __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
363 __ TestAndBranchIfAnySet(x10, (1 << Map::kIsAccessCheckNeeded), &slow);
364
365 // Check if the object is a JS array or not.
366 Register instance_type = x10;
367 __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
368 __ B(eq, &array);
369 // Check that the object is some kind of JS object EXCEPT JS Value type. In
370 // the case that the object is a value-wrapper object, we enter the runtime
371 // system to make sure that indexing into string objects works as intended.
372 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
373 __ Cmp(instance_type, JS_OBJECT_TYPE);
374 __ B(lo, &slow);
375
376 // Object case: Check key against length in the elements array.
377 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
378 // Check array bounds. Both the key and the length of FixedArray are smis.
379 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
380 __ Cmp(x10, Operand::UntagSmi(key));
381 __ B(hi, &fast_object);
382
383
384 __ Bind(&slow);
385 // Slow case, handle jump to runtime.
386 // Live values:
387 // x0: value
388 // x1: key
389 // x2: receiver
390 PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
391 // Never returns to here.
392
393 __ bind(&maybe_name_key);
394 __ Ldr(x10, FieldMemOperand(key, HeapObject::kMapOffset));
395 __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
396 __ JumpIfNotUniqueNameInstanceType(x10, &slow);
397
398 // The handlers in the stub cache expect a vector and slot. Since we won't
399 // change the IC from any downstream misses, a dummy vector can be used.
400 Register vector = StoreWithVectorDescriptor::VectorRegister();
401 Register slot = StoreWithVectorDescriptor::SlotRegister();
402 DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8));
403 Handle<TypeFeedbackVector> dummy_vector =
404 TypeFeedbackVector::DummyVector(masm->isolate());
405 int slot_index = dummy_vector->GetIndex(
406 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
407 __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
408 __ Mov(slot, Operand(Smi::FromInt(slot_index)));
409
410 masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, x5,
411 x6, x7, x8);
412 // Cache miss.
413 __ B(&miss);
414
415 __ Bind(&extra);
416 // Extra capacity case: Check if there is extra capacity to
417 // perform the store and update the length. Used for adding one
418 // element to the array by writing to array[array.length].
419
420 // Check for room in the elements backing store.
421 // Both the key and the length of FixedArray are smis.
422 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
423 __ Cmp(x10, Operand::UntagSmi(key));
424 __ B(ls, &slow);
425
426 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
427 __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
428 __ B(eq, &fast_object_grow);
429 __ Cmp(elements_map,
430 Operand(masm->isolate()->factory()->fixed_double_array_map()));
431 __ B(eq, &fast_double_grow);
432 __ B(&slow);
433
434
435 __ Bind(&array);
436 // Array case: Get the length and the elements array from the JS
437 // array. Check that the array is in fast mode (and writable); if it
438 // is the length is always a smi.
439
440 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
441
442 // Check the key against the length in the array.
443 __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
444 __ Cmp(x10, Operand::UntagSmi(key));
445 __ B(eq, &extra); // We can handle the case where we are appending 1 element.
446 __ B(lo, &slow);
447
448 KeyedStoreGenerateMegamorphicHelper(
449 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
450 value, key, receiver, receiver_map, elements_map, elements);
451 KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
452 &fast_double_grow, &slow, kDontCheckMap,
453 kIncrementLength, value, key, receiver,
454 receiver_map, elements_map, elements);
455
456 __ bind(&miss);
457 GenerateMiss(masm);
458 }
459
GenerateMiss(MacroAssembler * masm)460 void StoreIC::GenerateMiss(MacroAssembler* masm) {
461 StoreIC_PushArgs(masm);
462
463 // Tail call to the entry.
464 __ TailCallRuntime(Runtime::kStoreIC_Miss);
465 }
466
467
GenerateNormal(MacroAssembler * masm)468 void StoreIC::GenerateNormal(MacroAssembler* masm) {
469 Label miss;
470 Register value = StoreDescriptor::ValueRegister();
471 Register receiver = StoreDescriptor::ReceiverRegister();
472 Register name = StoreDescriptor::NameRegister();
473 Register dictionary = x5;
474 DCHECK(!AreAliased(value, receiver, name,
475 StoreWithVectorDescriptor::SlotRegister(),
476 StoreWithVectorDescriptor::VectorRegister(), x5, x6, x7));
477
478 __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
479
480 GenerateDictionaryStore(masm, &miss, dictionary, name, value, x6, x7);
481 Counters* counters = masm->isolate()->counters();
482 __ IncrementCounter(counters->ic_store_normal_hit(), 1, x6, x7);
483 __ Ret();
484
485 // Cache miss: Jump to runtime.
486 __ Bind(&miss);
487 __ IncrementCounter(counters->ic_store_normal_miss(), 1, x6, x7);
488 GenerateMiss(masm);
489 }
490
491
ComputeCondition(Token::Value op)492 Condition CompareIC::ComputeCondition(Token::Value op) {
493 switch (op) {
494 case Token::EQ_STRICT:
495 case Token::EQ:
496 return eq;
497 case Token::LT:
498 return lt;
499 case Token::GT:
500 return gt;
501 case Token::LTE:
502 return le;
503 case Token::GTE:
504 return ge;
505 default:
506 UNREACHABLE();
507 return al;
508 }
509 }
510
511
HasInlinedSmiCode(Address address)512 bool CompareIC::HasInlinedSmiCode(Address address) {
513 // The address of the instruction following the call.
514 Address info_address = Assembler::return_address_from_call_start(address);
515
516 InstructionSequence* patch_info = InstructionSequence::At(info_address);
517 return patch_info->IsInlineData();
518 }
519
520
521 // Activate a SMI fast-path by patching the instructions generated by
522 // JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
523 // JumpPatchSite::EmitPatchInfo().
PatchInlinedSmiCode(Isolate * isolate,Address address,InlinedSmiCheck check)524 void PatchInlinedSmiCode(Isolate* isolate, Address address,
525 InlinedSmiCheck check) {
526 // The patch information is encoded in the instruction stream using
527 // instructions which have no side effects, so we can safely execute them.
528 // The patch information is encoded directly after the call to the helper
529 // function which is requesting this patch operation.
530 Address info_address = Assembler::return_address_from_call_start(address);
531 InlineSmiCheckInfo info(info_address);
532
533 // Check and decode the patch information instruction.
534 if (!info.HasSmiCheck()) {
535 return;
536 }
537
538 if (FLAG_trace_ic) {
539 PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n",
540 static_cast<void*>(address), static_cast<void*>(info_address),
541 static_cast<void*>(info.SmiCheck()));
542 }
543
544 // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
545 // and JumpPatchSite::EmitJumpIfSmi().
546 // Changing
547 // tb(n)z xzr, #0, <target>
548 // to
549 // tb(!n)z test_reg, #0, <target>
550 Instruction* to_patch = info.SmiCheck();
551 PatchingAssembler patcher(isolate, to_patch, 1);
552 DCHECK(to_patch->IsTestBranch());
553 DCHECK(to_patch->ImmTestBranchBit5() == 0);
554 DCHECK(to_patch->ImmTestBranchBit40() == 0);
555
556 STATIC_ASSERT(kSmiTag == 0);
557 STATIC_ASSERT(kSmiTagMask == 1);
558
559 int branch_imm = to_patch->ImmTestBranch();
560 Register smi_reg;
561 if (check == ENABLE_INLINED_SMI_CHECK) {
562 DCHECK(to_patch->Rt() == xzr.code());
563 smi_reg = info.SmiRegister();
564 } else {
565 DCHECK(check == DISABLE_INLINED_SMI_CHECK);
566 DCHECK(to_patch->Rt() != xzr.code());
567 smi_reg = xzr;
568 }
569
570 if (to_patch->Mask(TestBranchMask) == TBZ) {
571 // This is JumpIfNotSmi(smi_reg, branch_imm).
572 patcher.tbnz(smi_reg, 0, branch_imm);
573 } else {
574 DCHECK(to_patch->Mask(TestBranchMask) == TBNZ);
575 // This is JumpIfSmi(smi_reg, branch_imm).
576 patcher.tbz(smi_reg, 0, branch_imm);
577 }
578 }
579 } // namespace internal
580 } // namespace v8
581
582 #endif // V8_TARGET_ARCH_ARM64
583