• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_ARM64
6 
7 #include "src/codegen.h"
8 #include "src/ic/ic.h"
9 #include "src/ic/ic-compiler.h"
10 #include "src/ic/stub-cache.h"
11 
12 namespace v8 {
13 namespace internal {
14 
15 
16 #define __ ACCESS_MASM(masm)
17 
18 
19 // "type" holds an instance type on entry and is not clobbered.
20 // Generated code branch on "global_object" if type is any kind of global
21 // JS object.
GenerateGlobalInstanceTypeCheck(MacroAssembler * masm,Register type,Label * global_object)22 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
23                                             Label* global_object) {
24   __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
25   __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
26   __ B(eq, global_object);
27 }
28 
29 
30 // Helper function used from LoadIC GenerateNormal.
31 //
32 // elements: Property dictionary. It is not clobbered if a jump to the miss
33 //           label is done.
34 // name:     Property name. It is not clobbered if a jump to the miss label is
35 //           done
36 // result:   Register for the result. It is only updated if a jump to the miss
37 //           label is not done.
38 // The scratch registers need to be different from elements, name and result.
39 // The generated code assumes that the receiver has slow properties,
40 // is not a global object and does not have interceptors.
GenerateDictionaryLoad(MacroAssembler * masm,Label * miss,Register elements,Register name,Register result,Register scratch1,Register scratch2)41 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
42                                    Register elements, Register name,
43                                    Register result, Register scratch1,
44                                    Register scratch2) {
45   DCHECK(!AreAliased(elements, name, scratch1, scratch2));
46   DCHECK(!AreAliased(result, scratch1, scratch2));
47 
48   Label done;
49 
50   // Probe the dictionary.
51   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
52                                                    name, scratch1, scratch2);
53 
54   // If probing finds an entry check that the value is a normal property.
55   __ Bind(&done);
56 
57   static const int kElementsStartOffset =
58       NameDictionary::kHeaderSize +
59       NameDictionary::kElementsStartIndex * kPointerSize;
60   static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
61   __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
62   __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
63   __ B(ne, miss);
64 
65   // Get the value at the masked, scaled index and return.
66   __ Ldr(result,
67          FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
68 }
69 
70 
71 // Helper function used from StoreIC::GenerateNormal.
72 //
73 // elements: Property dictionary. It is not clobbered if a jump to the miss
74 //           label is done.
75 // name:     Property name. It is not clobbered if a jump to the miss label is
76 //           done
77 // value:    The value to store (never clobbered).
78 //
79 // The generated code assumes that the receiver has slow properties,
80 // is not a global object and does not have interceptors.
GenerateDictionaryStore(MacroAssembler * masm,Label * miss,Register elements,Register name,Register value,Register scratch1,Register scratch2)81 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
82                                     Register elements, Register name,
83                                     Register value, Register scratch1,
84                                     Register scratch2) {
85   DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
86 
87   Label done;
88 
89   // Probe the dictionary.
90   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
91                                                    name, scratch1, scratch2);
92 
93   // If probing finds an entry in the dictionary check that the value
94   // is a normal property that is not read only.
95   __ Bind(&done);
96 
97   static const int kElementsStartOffset =
98       NameDictionary::kHeaderSize +
99       NameDictionary::kElementsStartIndex * kPointerSize;
100   static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
101   static const int kTypeAndReadOnlyMask =
102       PropertyDetails::TypeField::kMask |
103       PropertyDetails::AttributesField::encode(READ_ONLY);
104   __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
105   __ Tst(scratch1, kTypeAndReadOnlyMask);
106   __ B(ne, miss);
107 
108   // Store the value at the masked, scaled index and return.
109   static const int kValueOffset = kElementsStartOffset + kPointerSize;
110   __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
111   __ Str(value, MemOperand(scratch2));
112 
113   // Update the write barrier. Make sure not to clobber the value.
114   __ Mov(scratch1, value);
115   __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
116                  kDontSaveFPRegs);
117 }
118 
119 
120 // Checks the receiver for special cases (value type, slow case bits).
121 // Falls through for regular JS object and return the map of the
122 // receiver in 'map_scratch' if the receiver is not a SMI.
GenerateKeyedLoadReceiverCheck(MacroAssembler * masm,Register receiver,Register map_scratch,Register scratch,int interceptor_bit,Label * slow)123 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
124                                            Register receiver,
125                                            Register map_scratch,
126                                            Register scratch,
127                                            int interceptor_bit, Label* slow) {
128   DCHECK(!AreAliased(map_scratch, scratch));
129 
130   // Check that the object isn't a smi.
131   __ JumpIfSmi(receiver, slow);
132   // Get the map of the receiver.
133   __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
134   // Check bit field.
135   __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
136   __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
137   __ Tbnz(scratch, interceptor_bit, slow);
138 
139   // Check that the object is some kind of JS object EXCEPT JS Value type.
140   // In the case that the object is a value-wrapper object, we enter the
141   // runtime system to make sure that indexing into string objects work
142   // as intended.
143   STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
144   __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
145   __ Cmp(scratch, JS_OBJECT_TYPE);
146   __ B(lt, slow);
147 }
148 
149 
150 // Loads an indexed element from a fast case array.
151 //
152 // receiver - holds the receiver on entry.
153 //            Unchanged unless 'result' is the same register.
154 //
155 // key      - holds the smi key on entry.
156 //            Unchanged unless 'result' is the same register.
157 //
158 // elements - holds the elements of the receiver and its prototypes. Clobbered.
159 //
160 // result   - holds the result on exit if the load succeeded.
161 //            Allowed to be the the same as 'receiver' or 'key'.
162 //            Unchanged on bailout so 'receiver' and 'key' can be safely
163 //            used by further computation.
GenerateFastArrayLoad(MacroAssembler * masm,Register receiver,Register key,Register elements,Register scratch1,Register scratch2,Register result,Label * slow,LanguageMode language_mode)164 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
165                                   Register key, Register elements,
166                                   Register scratch1, Register scratch2,
167                                   Register result, Label* slow,
168                                   LanguageMode language_mode) {
169   DCHECK(!AreAliased(receiver, key, elements, scratch1, scratch2));
170 
171   Label check_prototypes, check_next_prototype;
172   Label done, in_bounds, absent;
173 
174   // Check for fast array.
175   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
176   __ AssertFastElements(elements);
177 
178   // Check that the key (index) is within bounds.
179   __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
180   __ Cmp(key, scratch1);
181   __ B(lo, &in_bounds);
182 
183   // Out of bounds. Check the prototype chain to see if we can just return
184   // 'undefined'.
185   __ Cmp(key, Operand(Smi::FromInt(0)));
186   __ B(lt, slow);  // Negative keys can't take the fast OOB path.
187   __ Bind(&check_prototypes);
188   __ Ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
189   __ Bind(&check_next_prototype);
190   __ Ldr(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
191   // scratch2: current prototype
192   __ JumpIfRoot(scratch2, Heap::kNullValueRootIndex, &absent);
193   __ Ldr(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
194   __ Ldr(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
195   // elements: elements of current prototype
196   // scratch2: map of current prototype
197   __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
198   __ B(lo, slow);
199   __ Ldrb(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
200   __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, slow);
201   __ Tbnz(scratch1, Map::kHasIndexedInterceptor, slow);
202   __ JumpIfNotRoot(elements, Heap::kEmptyFixedArrayRootIndex, slow);
203   __ B(&check_next_prototype);
204 
205   __ Bind(&absent);
206   if (is_strong(language_mode)) {
207     // Strong mode accesses must throw in this case, so call the runtime.
208     __ B(slow);
209   } else {
210     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
211     __ B(&done);
212   }
213 
214   __ Bind(&in_bounds);
215   // Fast case: Do the load.
216   __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
217   __ SmiUntag(scratch2, key);
218   __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
219 
220   // In case the loaded value is the_hole we have to check the prototype chain.
221   __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, &check_prototypes);
222 
223   // Move the value to the result register.
224   // 'result' can alias with 'receiver' or 'key' but these two must be
225   // preserved if we jump to 'slow'.
226   __ Mov(result, scratch2);
227   __ Bind(&done);
228 }
229 
230 
231 // Checks whether a key is an array index string or a unique name.
232 // Falls through if a key is a unique name.
233 // The map of the key is returned in 'map_scratch'.
234 // If the jump to 'index_string' is done the hash of the key is left
235 // in 'hash_scratch'.
GenerateKeyNameCheck(MacroAssembler * masm,Register key,Register map_scratch,Register hash_scratch,Label * index_string,Label * not_unique)236 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
237                                  Register map_scratch, Register hash_scratch,
238                                  Label* index_string, Label* not_unique) {
239   DCHECK(!AreAliased(key, map_scratch, hash_scratch));
240 
241   // Is the key a name?
242   Label unique;
243   __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
244                       not_unique, hi);
245   STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
246   __ B(eq, &unique);
247 
248   // Is the string an array index with cached numeric value?
249   __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
250   __ TestAndBranchIfAllClear(hash_scratch, Name::kContainsCachedArrayIndexMask,
251                              index_string);
252 
253   // Is the string internalized? We know it's a string, so a single bit test is
254   // enough.
255   __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
256   STATIC_ASSERT(kInternalizedTag == 0);
257   __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
258 
259   __ Bind(&unique);
260   // Fall through if the key is a unique name.
261 }
262 
263 
GenerateNormal(MacroAssembler * masm,LanguageMode language_mode)264 void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
265   Register dictionary = x0;
266   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
267   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
268   Label slow;
269 
270   __ Ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
271                                      JSObject::kPropertiesOffset));
272   GenerateDictionaryLoad(masm, &slow, dictionary,
273                          LoadDescriptor::NameRegister(), x0, x3, x4);
274   __ Ret();
275 
276   // Dictionary load failed, go slow (but don't miss).
277   __ Bind(&slow);
278   GenerateRuntimeGetProperty(masm, language_mode);
279 }
280 
281 
GenerateMiss(MacroAssembler * masm)282 void LoadIC::GenerateMiss(MacroAssembler* masm) {
283   // The return address is in lr.
284   Isolate* isolate = masm->isolate();
285   ASM_LOCATION("LoadIC::GenerateMiss");
286 
287   DCHECK(!AreAliased(x4, x5, LoadWithVectorDescriptor::SlotRegister(),
288                      LoadWithVectorDescriptor::VectorRegister()));
289   __ IncrementCounter(isolate->counters()->load_miss(), 1, x4, x5);
290 
291   // Perform tail call to the entry.
292   __ Push(LoadWithVectorDescriptor::ReceiverRegister(),
293           LoadWithVectorDescriptor::NameRegister(),
294           LoadWithVectorDescriptor::SlotRegister(),
295           LoadWithVectorDescriptor::VectorRegister());
296   __ TailCallRuntime(Runtime::kLoadIC_Miss);
297 }
298 
299 
GenerateRuntimeGetProperty(MacroAssembler * masm,LanguageMode language_mode)300 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
301                                         LanguageMode language_mode) {
302   // The return address is in lr.
303   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
304 
305   // Do tail-call to runtime routine.
306   __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
307                                               : Runtime::kGetProperty);
308 }
309 
310 
GenerateMiss(MacroAssembler * masm)311 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
312   // The return address is in lr.
313   Isolate* isolate = masm->isolate();
314 
315   DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::SlotRegister(),
316                      LoadWithVectorDescriptor::VectorRegister()));
317   __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
318 
319   __ Push(LoadWithVectorDescriptor::ReceiverRegister(),
320           LoadWithVectorDescriptor::NameRegister(),
321           LoadWithVectorDescriptor::SlotRegister(),
322           LoadWithVectorDescriptor::VectorRegister());
323 
324   // Perform tail call to the entry.
325   __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
326 }
327 
328 
GenerateRuntimeGetProperty(MacroAssembler * masm,LanguageMode language_mode)329 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
330                                              LanguageMode language_mode) {
331   // The return address is in lr.
332   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
333 
334   // Do tail-call to runtime routine.
335   __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
336                                               : Runtime::kKeyedGetProperty);
337 }
338 
339 
GenerateKeyedLoadWithSmiKey(MacroAssembler * masm,Register key,Register receiver,Register scratch1,Register scratch2,Register scratch3,Register scratch4,Register scratch5,Label * slow,LanguageMode language_mode)340 static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
341                                         Register receiver, Register scratch1,
342                                         Register scratch2, Register scratch3,
343                                         Register scratch4, Register scratch5,
344                                         Label* slow,
345                                         LanguageMode language_mode) {
346   DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
347                      scratch5));
348 
349   Isolate* isolate = masm->isolate();
350   Label check_number_dictionary;
351   // If we can load the value, it should be returned in x0.
352   Register result = x0;
353 
354   GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
355                                  Map::kHasIndexedInterceptor, slow);
356 
357   // Check the receiver's map to see if it has fast elements.
358   __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
359 
360   GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
361                         result, slow, language_mode);
362   __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1,
363                       scratch1, scratch2);
364   __ Ret();
365 
366   __ Bind(&check_number_dictionary);
367   __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
368   __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
369 
370   // Check whether we have a number dictionary.
371   __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
372 
373   __ LoadFromNumberDictionary(slow, scratch3, key, result, scratch1, scratch2,
374                               scratch4, scratch5);
375   __ Ret();
376 }
377 
GenerateKeyedLoadWithNameKey(MacroAssembler * masm,Register key,Register receiver,Register scratch1,Register scratch2,Register scratch3,Register scratch4,Register scratch5,Label * slow)378 static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
379                                          Register receiver, Register scratch1,
380                                          Register scratch2, Register scratch3,
381                                          Register scratch4, Register scratch5,
382                                          Label* slow) {
383   DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
384                      scratch5));
385 
386   Isolate* isolate = masm->isolate();
387   Label probe_dictionary, property_array_property;
388   // If we can load the value, it should be returned in x0.
389   Register result = x0;
390 
391   GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
392                                  Map::kHasNamedInterceptor, slow);
393 
394   // If the receiver is a fast-case object, check the stub cache. Otherwise
395   // probe the dictionary.
396   __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
397   __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
398   __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
399 
400   // The handlers in the stub cache expect a vector and slot. Since we won't
401   // change the IC from any downstream misses, a dummy vector can be used.
402   Register vector = LoadWithVectorDescriptor::VectorRegister();
403   Register slot = LoadWithVectorDescriptor::SlotRegister();
404   DCHECK(!AreAliased(vector, slot, scratch1, scratch2, scratch3, scratch4));
405   Handle<TypeFeedbackVector> dummy_vector =
406       TypeFeedbackVector::DummyVector(masm->isolate());
407   int slot_index = dummy_vector->GetIndex(
408       FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
409   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
410   __ Mov(slot, Operand(Smi::FromInt(slot_index)));
411 
412   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
413       Code::ComputeHandlerFlags(Code::LOAD_IC));
414   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
415                                                receiver, key, scratch1,
416                                                scratch2, scratch3, scratch4);
417   // Cache miss.
418   KeyedLoadIC::GenerateMiss(masm);
419 
420   // Do a quick inline probe of the receiver's dictionary, if it exists.
421   __ Bind(&probe_dictionary);
422   __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
423   __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
424   GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
425   // Load the property.
426   GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
427   __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1,
428                       scratch1, scratch2);
429   __ Ret();
430 }
431 
432 
GenerateMegamorphic(MacroAssembler * masm,LanguageMode language_mode)433 void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
434                                       LanguageMode language_mode) {
435   // The return address is in lr.
436   Label slow, check_name, index_smi, index_name;
437 
438   Register key = LoadDescriptor::NameRegister();
439   Register receiver = LoadDescriptor::ReceiverRegister();
440   DCHECK(key.is(x2));
441   DCHECK(receiver.is(x1));
442 
443   __ JumpIfNotSmi(key, &check_name);
444   __ Bind(&index_smi);
445   // Now the key is known to be a smi. This place is also jumped to from below
446   // where a numeric string is converted to a smi.
447   GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow,
448                               language_mode);
449 
450   // Slow case.
451   __ Bind(&slow);
452   __ IncrementCounter(masm->isolate()->counters()->keyed_load_generic_slow(), 1,
453                       x4, x3);
454   GenerateRuntimeGetProperty(masm, language_mode);
455 
456   __ Bind(&check_name);
457   GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
458 
459   GenerateKeyedLoadWithNameKey(masm, key, receiver, x4, x5, x6, x7, x3, &slow);
460 
461   __ Bind(&index_name);
462   __ IndexFromHash(x3, key);
463   // Now jump to the place where smi keys are handled.
464   __ B(&index_smi);
465 }
466 
467 
StoreIC_PushArgs(MacroAssembler * masm)468 static void StoreIC_PushArgs(MacroAssembler* masm) {
469   __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
470           StoreDescriptor::ValueRegister(),
471           VectorStoreICDescriptor::SlotRegister(),
472           VectorStoreICDescriptor::VectorRegister());
473 }
474 
475 
GenerateMiss(MacroAssembler * masm)476 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
477   ASM_LOCATION("KeyedStoreIC::GenerateMiss");
478   StoreIC_PushArgs(masm);
479   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
480 }
481 
482 
KeyedStoreGenerateMegamorphicHelper(MacroAssembler * masm,Label * fast_object,Label * fast_double,Label * slow,KeyedStoreCheckMap check_map,KeyedStoreIncrementLength increment_length,Register value,Register key,Register receiver,Register receiver_map,Register elements_map,Register elements)483 static void KeyedStoreGenerateMegamorphicHelper(
484     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
485     KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
486     Register value, Register key, Register receiver, Register receiver_map,
487     Register elements_map, Register elements) {
488   DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
489                      x10, x11));
490 
491   Label transition_smi_elements;
492   Label transition_double_elements;
493   Label fast_double_without_map_check;
494   Label non_double_value;
495   Label finish_store;
496 
497   __ Bind(fast_object);
498   if (check_map == kCheckMap) {
499     __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
500     __ Cmp(elements_map,
501            Operand(masm->isolate()->factory()->fixed_array_map()));
502     __ B(ne, fast_double);
503   }
504 
505   // HOLECHECK: guards "A[i] = V"
506   // We have to go to the runtime if the current value is the hole because there
507   // may be a callback on the element.
508   Label holecheck_passed;
509   __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
510   __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
511   __ Ldr(x11, MemOperand(x10));
512   __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
513   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
514   __ bind(&holecheck_passed);
515 
516   // Smi stores don't require further checks.
517   __ JumpIfSmi(value, &finish_store);
518 
519   // Escape to elements kind transition case.
520   __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
521 
522   __ Bind(&finish_store);
523   if (increment_length == kIncrementLength) {
524     // Add 1 to receiver->length.
525     __ Add(x10, key, Smi::FromInt(1));
526     __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
527   }
528 
529   Register address = x11;
530   __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
531   __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
532   __ Str(value, MemOperand(address));
533 
534   Label dont_record_write;
535   __ JumpIfSmi(value, &dont_record_write);
536 
537   // Update write barrier for the elements array address.
538   __ Mov(x10, value);  // Preserve the value which is returned.
539   __ RecordWrite(elements, address, x10, kLRHasNotBeenSaved, kDontSaveFPRegs,
540                  EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
541 
542   __ Bind(&dont_record_write);
543   __ Ret();
544 
545 
546   __ Bind(fast_double);
547   if (check_map == kCheckMap) {
548     // Check for fast double array case. If this fails, call through to the
549     // runtime.
550     __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
551   }
552 
553   // HOLECHECK: guards "A[i] double hole?"
554   // We have to see if the double version of the hole is present. If so go to
555   // the runtime.
556   __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
557   __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
558   __ Ldr(x11, MemOperand(x10));
559   __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
560   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
561 
562   __ Bind(&fast_double_without_map_check);
563   __ StoreNumberToDoubleElements(value, key, elements, x10, d0,
564                                  &transition_double_elements);
565   if (increment_length == kIncrementLength) {
566     // Add 1 to receiver->length.
567     __ Add(x10, key, Smi::FromInt(1));
568     __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
569   }
570   __ Ret();
571 
572 
573   __ Bind(&transition_smi_elements);
574   // Transition the array appropriately depending on the value type.
575   __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
576   __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
577 
578   // Value is a double. Transition FAST_SMI_ELEMENTS ->
579   // FAST_DOUBLE_ELEMENTS and complete the store.
580   __ LoadTransitionedArrayMapConditional(
581       FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, x10, x11, slow);
582   AllocationSiteMode mode =
583       AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
584   ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
585                                                    receiver_map, mode, slow);
586   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
587   __ B(&fast_double_without_map_check);
588 
589   __ Bind(&non_double_value);
590   // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
591   __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
592                                          receiver_map, x10, x11, slow);
593 
594   mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
595   ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
596       masm, receiver, key, value, receiver_map, mode, slow);
597 
598   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
599   __ B(&finish_store);
600 
601   __ Bind(&transition_double_elements);
602   // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
603   // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
604   // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
605   __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
606                                          receiver_map, x10, x11, slow);
607   mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
608   ElementsTransitionGenerator::GenerateDoubleToObject(
609       masm, receiver, key, value, receiver_map, mode, slow);
610   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
611   __ B(&finish_store);
612 }
613 
614 
GenerateMegamorphic(MacroAssembler * masm,LanguageMode language_mode)615 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
616                                        LanguageMode language_mode) {
617   ASM_LOCATION("KeyedStoreIC::GenerateMegamorphic");
618   Label slow;
619   Label array;
620   Label fast_object;
621   Label extra;
622   Label fast_object_grow;
623   Label fast_double_grow;
624   Label fast_double;
625   Label maybe_name_key;
626   Label miss;
627 
628   Register value = StoreDescriptor::ValueRegister();
629   Register key = StoreDescriptor::NameRegister();
630   Register receiver = StoreDescriptor::ReceiverRegister();
631   DCHECK(receiver.is(x1));
632   DCHECK(key.is(x2));
633   DCHECK(value.is(x0));
634 
635   Register receiver_map = x3;
636   Register elements = x4;
637   Register elements_map = x5;
638 
639   __ JumpIfNotSmi(key, &maybe_name_key);
640   __ JumpIfSmi(receiver, &slow);
641   __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
642 
643   // Check that the receiver does not require access checks and is not observed.
644   // The generic stub does not perform map checks or handle observed objects.
645   __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
646   __ TestAndBranchIfAnySet(
647       x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
648 
649   // Check if the object is a JS array or not.
650   Register instance_type = x10;
651   __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
652   __ B(eq, &array);
653   // Check that the object is some kind of JS object EXCEPT JS Value type. In
654   // the case that the object is a value-wrapper object, we enter the runtime
655   // system to make sure that indexing into string objects works as intended.
656   STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
657   __ Cmp(instance_type, JS_OBJECT_TYPE);
658   __ B(lo, &slow);
659 
660   // Object case: Check key against length in the elements array.
661   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
662   // Check array bounds. Both the key and the length of FixedArray are smis.
663   __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
664   __ Cmp(x10, Operand::UntagSmi(key));
665   __ B(hi, &fast_object);
666 
667 
668   __ Bind(&slow);
669   // Slow case, handle jump to runtime.
670   // Live values:
671   //  x0: value
672   //  x1: key
673   //  x2: receiver
674   PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
675   // Never returns to here.
676 
677   __ bind(&maybe_name_key);
678   __ Ldr(x10, FieldMemOperand(key, HeapObject::kMapOffset));
679   __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
680   __ JumpIfNotUniqueNameInstanceType(x10, &slow);
681 
682   // The handlers in the stub cache expect a vector and slot. Since we won't
683   // change the IC from any downstream misses, a dummy vector can be used.
684   Register vector = VectorStoreICDescriptor::VectorRegister();
685   Register slot = VectorStoreICDescriptor::SlotRegister();
686   DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8));
687   Handle<TypeFeedbackVector> dummy_vector =
688       TypeFeedbackVector::DummyVector(masm->isolate());
689   int slot_index = dummy_vector->GetIndex(
690       FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
691   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
692   __ Mov(slot, Operand(Smi::FromInt(slot_index)));
693 
694   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
695       Code::ComputeHandlerFlags(Code::STORE_IC));
696   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
697                                                receiver, key, x5, x6, x7, x8);
698   // Cache miss.
699   __ B(&miss);
700 
701   __ Bind(&extra);
702   // Extra capacity case: Check if there is extra capacity to
703   // perform the store and update the length. Used for adding one
704   // element to the array by writing to array[array.length].
705 
706   // Check for room in the elements backing store.
707   // Both the key and the length of FixedArray are smis.
708   __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
709   __ Cmp(x10, Operand::UntagSmi(key));
710   __ B(ls, &slow);
711 
712   __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
713   __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
714   __ B(eq, &fast_object_grow);
715   __ Cmp(elements_map,
716          Operand(masm->isolate()->factory()->fixed_double_array_map()));
717   __ B(eq, &fast_double_grow);
718   __ B(&slow);
719 
720 
721   __ Bind(&array);
722   // Array case: Get the length and the elements array from the JS
723   // array. Check that the array is in fast mode (and writable); if it
724   // is the length is always a smi.
725 
726   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
727 
728   // Check the key against the length in the array.
729   __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
730   __ Cmp(x10, Operand::UntagSmi(key));
731   __ B(eq, &extra);  // We can handle the case where we are appending 1 element.
732   __ B(lo, &slow);
733 
734   KeyedStoreGenerateMegamorphicHelper(
735       masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
736       value, key, receiver, receiver_map, elements_map, elements);
737   KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
738                                       &fast_double_grow, &slow, kDontCheckMap,
739                                       kIncrementLength, value, key, receiver,
740                                       receiver_map, elements_map, elements);
741 
742   __ bind(&miss);
743   GenerateMiss(masm);
744 }
745 
746 
GenerateMegamorphic(MacroAssembler * masm)747 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
748   Register receiver = StoreDescriptor::ReceiverRegister();
749   Register name = StoreDescriptor::NameRegister();
750   DCHECK(!AreAliased(receiver, name, StoreDescriptor::ValueRegister(), x3, x4,
751                      x5, x6));
752 
753   // Probe the stub cache.
754   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
755       Code::ComputeHandlerFlags(Code::STORE_IC));
756   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
757                                                receiver, name, x3, x4, x5, x6);
758 
759   // Cache miss: Jump to runtime.
760   GenerateMiss(masm);
761 }
762 
763 
GenerateMiss(MacroAssembler * masm)764 void StoreIC::GenerateMiss(MacroAssembler* masm) {
765   StoreIC_PushArgs(masm);
766 
767   // Tail call to the entry.
768   __ TailCallRuntime(Runtime::kStoreIC_Miss);
769 }
770 
771 
GenerateNormal(MacroAssembler * masm)772 void StoreIC::GenerateNormal(MacroAssembler* masm) {
773   Label miss;
774   Register value = StoreDescriptor::ValueRegister();
775   Register receiver = StoreDescriptor::ReceiverRegister();
776   Register name = StoreDescriptor::NameRegister();
777   Register dictionary = x5;
778   DCHECK(!AreAliased(value, receiver, name,
779                      VectorStoreICDescriptor::SlotRegister(),
780                      VectorStoreICDescriptor::VectorRegister(), x5, x6, x7));
781 
782   __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
783 
784   GenerateDictionaryStore(masm, &miss, dictionary, name, value, x6, x7);
785   Counters* counters = masm->isolate()->counters();
786   __ IncrementCounter(counters->store_normal_hit(), 1, x6, x7);
787   __ Ret();
788 
789   // Cache miss: Jump to runtime.
790   __ Bind(&miss);
791   __ IncrementCounter(counters->store_normal_miss(), 1, x6, x7);
792   GenerateMiss(masm);
793 }
794 
795 
ComputeCondition(Token::Value op)796 Condition CompareIC::ComputeCondition(Token::Value op) {
797   switch (op) {
798     case Token::EQ_STRICT:
799     case Token::EQ:
800       return eq;
801     case Token::LT:
802       return lt;
803     case Token::GT:
804       return gt;
805     case Token::LTE:
806       return le;
807     case Token::GTE:
808       return ge;
809     default:
810       UNREACHABLE();
811       return al;
812   }
813 }
814 
815 
HasInlinedSmiCode(Address address)816 bool CompareIC::HasInlinedSmiCode(Address address) {
817   // The address of the instruction following the call.
818   Address info_address = Assembler::return_address_from_call_start(address);
819 
820   InstructionSequence* patch_info = InstructionSequence::At(info_address);
821   return patch_info->IsInlineData();
822 }
823 
824 
825 // Activate a SMI fast-path by patching the instructions generated by
826 // JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
827 // JumpPatchSite::EmitPatchInfo().
PatchInlinedSmiCode(Isolate * isolate,Address address,InlinedSmiCheck check)828 void PatchInlinedSmiCode(Isolate* isolate, Address address,
829                          InlinedSmiCheck check) {
830   // The patch information is encoded in the instruction stream using
831   // instructions which have no side effects, so we can safely execute them.
832   // The patch information is encoded directly after the call to the helper
833   // function which is requesting this patch operation.
834   Address info_address = Assembler::return_address_from_call_start(address);
835   InlineSmiCheckInfo info(info_address);
836 
837   // Check and decode the patch information instruction.
838   if (!info.HasSmiCheck()) {
839     return;
840   }
841 
842   if (FLAG_trace_ic) {
843     PrintF("[  Patching ic at %p, marker=%p, SMI check=%p\n", address,
844            info_address, reinterpret_cast<void*>(info.SmiCheck()));
845   }
846 
847   // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
848   // and JumpPatchSite::EmitJumpIfSmi().
849   // Changing
850   //   tb(n)z xzr, #0, <target>
851   // to
852   //   tb(!n)z test_reg, #0, <target>
853   Instruction* to_patch = info.SmiCheck();
854   PatchingAssembler patcher(isolate, to_patch, 1);
855   DCHECK(to_patch->IsTestBranch());
856   DCHECK(to_patch->ImmTestBranchBit5() == 0);
857   DCHECK(to_patch->ImmTestBranchBit40() == 0);
858 
859   STATIC_ASSERT(kSmiTag == 0);
860   STATIC_ASSERT(kSmiTagMask == 1);
861 
862   int branch_imm = to_patch->ImmTestBranch();
863   Register smi_reg;
864   if (check == ENABLE_INLINED_SMI_CHECK) {
865     DCHECK(to_patch->Rt() == xzr.code());
866     smi_reg = info.SmiRegister();
867   } else {
868     DCHECK(check == DISABLE_INLINED_SMI_CHECK);
869     DCHECK(to_patch->Rt() != xzr.code());
870     smi_reg = xzr;
871   }
872 
873   if (to_patch->Mask(TestBranchMask) == TBZ) {
874     // This is JumpIfNotSmi(smi_reg, branch_imm).
875     patcher.tbnz(smi_reg, 0, branch_imm);
876   } else {
877     DCHECK(to_patch->Mask(TestBranchMask) == TBNZ);
878     // This is JumpIfSmi(smi_reg, branch_imm).
879     patcher.tbz(smi_reg, 0, branch_imm);
880   }
881 }
882 }  // namespace internal
883 }  // namespace v8
884 
885 #endif  // V8_TARGET_ARCH_ARM64
886