1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #if V8_TARGET_ARCH_ARM
8
9 #include "src/arm/assembler-arm.h"
10 #include "src/code-stubs.h"
11 #include "src/codegen.h"
12 #include "src/disasm.h"
13 #include "src/ic-inl.h"
14 #include "src/runtime.h"
15 #include "src/stub-cache.h"
16
17 namespace v8 {
18 namespace internal {
19
20
21 // ----------------------------------------------------------------------------
22 // Static IC stub generators.
23 //
24
25 #define __ ACCESS_MASM(masm)
26
27
GenerateGlobalInstanceTypeCheck(MacroAssembler * masm,Register type,Label * global_object)28 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
29 Register type,
30 Label* global_object) {
31 // Register usage:
32 // type: holds the receiver instance type on entry.
33 __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
34 __ b(eq, global_object);
35 __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
36 __ b(eq, global_object);
37 __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
38 __ b(eq, global_object);
39 }
40
41
42 // Generated code falls through if the receiver is a regular non-global
43 // JS object with slow properties and no interceptors.
GenerateNameDictionaryReceiverCheck(MacroAssembler * masm,Register receiver,Register elements,Register t0,Register t1,Label * miss)44 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
45 Register receiver,
46 Register elements,
47 Register t0,
48 Register t1,
49 Label* miss) {
50 // Register usage:
51 // receiver: holds the receiver on entry and is unchanged.
52 // elements: holds the property dictionary on fall through.
53 // Scratch registers:
54 // t0: used to holds the receiver map.
55 // t1: used to holds the receiver instance type, receiver bit mask and
56 // elements map.
57
58 // Check that the receiver isn't a smi.
59 __ JumpIfSmi(receiver, miss);
60
61 // Check that the receiver is a valid JS object.
62 __ CompareObjectType(receiver, t0, t1, FIRST_SPEC_OBJECT_TYPE);
63 __ b(lt, miss);
64
65 // If this assert fails, we have to check upper bound too.
66 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
67
68 GenerateGlobalInstanceTypeCheck(masm, t1, miss);
69
70 // Check that the global object does not require access checks.
71 __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
72 __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
73 (1 << Map::kHasNamedInterceptor)));
74 __ b(ne, miss);
75
76 __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
77 __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
78 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
79 __ cmp(t1, ip);
80 __ b(ne, miss);
81 }
82
83
84 // Helper function used from LoadIC GenerateNormal.
85 //
86 // elements: Property dictionary. It is not clobbered if a jump to the miss
87 // label is done.
88 // name: Property name. It is not clobbered if a jump to the miss label is
89 // done
90 // result: Register for the result. It is only updated if a jump to the miss
91 // label is not done. Can be the same as elements or name clobbering
92 // one of these in the case of not jumping to the miss label.
93 // The two scratch registers need to be different from elements, name and
94 // result.
95 // The generated code assumes that the receiver has slow properties,
96 // is not a global object and does not have interceptors.
GenerateDictionaryLoad(MacroAssembler * masm,Label * miss,Register elements,Register name,Register result,Register scratch1,Register scratch2)97 static void GenerateDictionaryLoad(MacroAssembler* masm,
98 Label* miss,
99 Register elements,
100 Register name,
101 Register result,
102 Register scratch1,
103 Register scratch2) {
104 // Main use of the scratch registers.
105 // scratch1: Used as temporary and to hold the capacity of the property
106 // dictionary.
107 // scratch2: Used as temporary.
108 Label done;
109
110 // Probe the dictionary.
111 NameDictionaryLookupStub::GeneratePositiveLookup(masm,
112 miss,
113 &done,
114 elements,
115 name,
116 scratch1,
117 scratch2);
118
119 // If probing finds an entry check that the value is a normal
120 // property.
121 __ bind(&done); // scratch2 == elements + 4 * index
122 const int kElementsStartOffset = NameDictionary::kHeaderSize +
123 NameDictionary::kElementsStartIndex * kPointerSize;
124 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
125 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
126 __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
127 __ b(ne, miss);
128
129 // Get the value at the masked, scaled index and return.
130 __ ldr(result,
131 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
132 }
133
134
135 // Helper function used from StoreIC::GenerateNormal.
136 //
137 // elements: Property dictionary. It is not clobbered if a jump to the miss
138 // label is done.
139 // name: Property name. It is not clobbered if a jump to the miss label is
140 // done
141 // value: The value to store.
142 // The two scratch registers need to be different from elements, name and
143 // result.
144 // The generated code assumes that the receiver has slow properties,
145 // is not a global object and does not have interceptors.
GenerateDictionaryStore(MacroAssembler * masm,Label * miss,Register elements,Register name,Register value,Register scratch1,Register scratch2)146 static void GenerateDictionaryStore(MacroAssembler* masm,
147 Label* miss,
148 Register elements,
149 Register name,
150 Register value,
151 Register scratch1,
152 Register scratch2) {
153 // Main use of the scratch registers.
154 // scratch1: Used as temporary and to hold the capacity of the property
155 // dictionary.
156 // scratch2: Used as temporary.
157 Label done;
158
159 // Probe the dictionary.
160 NameDictionaryLookupStub::GeneratePositiveLookup(masm,
161 miss,
162 &done,
163 elements,
164 name,
165 scratch1,
166 scratch2);
167
168 // If probing finds an entry in the dictionary check that the value
169 // is a normal property that is not read only.
170 __ bind(&done); // scratch2 == elements + 4 * index
171 const int kElementsStartOffset = NameDictionary::kHeaderSize +
172 NameDictionary::kElementsStartIndex * kPointerSize;
173 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
174 const int kTypeAndReadOnlyMask =
175 (PropertyDetails::TypeField::kMask |
176 PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
177 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
178 __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
179 __ b(ne, miss);
180
181 // Store the value at the masked, scaled index and return.
182 const int kValueOffset = kElementsStartOffset + kPointerSize;
183 __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
184 __ str(value, MemOperand(scratch2));
185
186 // Update the write barrier. Make sure not to clobber the value.
187 __ mov(scratch1, value);
188 __ RecordWrite(
189 elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
190 }
191
192
193 // Checks the receiver for special cases (value type, slow case bits).
194 // Falls through for regular JS object.
GenerateKeyedLoadReceiverCheck(MacroAssembler * masm,Register receiver,Register map,Register scratch,int interceptor_bit,Label * slow)195 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
196 Register receiver,
197 Register map,
198 Register scratch,
199 int interceptor_bit,
200 Label* slow) {
201 // Check that the object isn't a smi.
202 __ JumpIfSmi(receiver, slow);
203 // Get the map of the receiver.
204 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
205 // Check bit field.
206 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
207 __ tst(scratch,
208 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
209 __ b(ne, slow);
210 // Check that the object is some kind of JS object EXCEPT JS Value type.
211 // In the case that the object is a value-wrapper object,
212 // we enter the runtime system to make sure that indexing into string
213 // objects work as intended.
214 ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
215 __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
216 __ cmp(scratch, Operand(JS_OBJECT_TYPE));
217 __ b(lt, slow);
218 }
219
220
221 // Loads an indexed element from a fast case array.
222 // If not_fast_array is NULL, doesn't perform the elements map check.
GenerateFastArrayLoad(MacroAssembler * masm,Register receiver,Register key,Register elements,Register scratch1,Register scratch2,Register result,Label * not_fast_array,Label * out_of_range)223 static void GenerateFastArrayLoad(MacroAssembler* masm,
224 Register receiver,
225 Register key,
226 Register elements,
227 Register scratch1,
228 Register scratch2,
229 Register result,
230 Label* not_fast_array,
231 Label* out_of_range) {
232 // Register use:
233 //
234 // receiver - holds the receiver on entry.
235 // Unchanged unless 'result' is the same register.
236 //
237 // key - holds the smi key on entry.
238 // Unchanged unless 'result' is the same register.
239 //
240 // elements - holds the elements of the receiver on exit.
241 //
242 // result - holds the result on exit if the load succeeded.
243 // Allowed to be the the same as 'receiver' or 'key'.
244 // Unchanged on bailout so 'receiver' and 'key' can be safely
245 // used by further computation.
246 //
247 // Scratch registers:
248 //
249 // scratch1 - used to hold elements map and elements length.
250 // Holds the elements map if not_fast_array branch is taken.
251 //
252 // scratch2 - used to hold the loaded value.
253
254 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
255 if (not_fast_array != NULL) {
256 // Check that the object is in fast mode and writable.
257 __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
258 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
259 __ cmp(scratch1, ip);
260 __ b(ne, not_fast_array);
261 } else {
262 __ AssertFastElements(elements);
263 }
264 // Check that the key (index) is within bounds.
265 __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
266 __ cmp(key, Operand(scratch1));
267 __ b(hs, out_of_range);
268 // Fast case: Do the load.
269 __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
270 __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
271 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
272 __ cmp(scratch2, ip);
273 // In case the loaded value is the_hole we have to consult GetProperty
274 // to ensure the prototype chain is searched.
275 __ b(eq, out_of_range);
276 __ mov(result, scratch2);
277 }
278
279
280 // Checks whether a key is an array index string or a unique name.
281 // Falls through if a key is a unique name.
GenerateKeyNameCheck(MacroAssembler * masm,Register key,Register map,Register hash,Label * index_string,Label * not_unique)282 static void GenerateKeyNameCheck(MacroAssembler* masm,
283 Register key,
284 Register map,
285 Register hash,
286 Label* index_string,
287 Label* not_unique) {
288 // The key is not a smi.
289 Label unique;
290 // Is it a name?
291 __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
292 __ b(hi, not_unique);
293 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
294 __ b(eq, &unique);
295
296 // Is the string an array index, with cached numeric value?
297 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
298 __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
299 __ b(eq, index_string);
300
301 // Is the string internalized? We know it's a string, so a single
302 // bit test is enough.
303 // map: key map
304 __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
305 STATIC_ASSERT(kInternalizedTag == 0);
306 __ tst(hash, Operand(kIsNotInternalizedMask));
307 __ b(ne, not_unique);
308
309 __ bind(&unique);
310 }
311
312
GenerateMegamorphic(MacroAssembler * masm)313 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
314 // ----------- S t a t e -------------
315 // -- r2 : name
316 // -- lr : return address
317 // -- r0 : receiver
318 // -----------------------------------
319
320 // Probe the stub cache.
321 Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
322 masm->isolate()->stub_cache()->GenerateProbe(
323 masm, flags, r0, r2, r3, r4, r5, r6);
324
325 // Cache miss: Jump to runtime.
326 GenerateMiss(masm);
327 }
328
329
GenerateNormal(MacroAssembler * masm)330 void LoadIC::GenerateNormal(MacroAssembler* masm) {
331 // ----------- S t a t e -------------
332 // -- r2 : name
333 // -- lr : return address
334 // -- r0 : receiver
335 // -----------------------------------
336 Label miss, slow;
337
338 GenerateNameDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
339
340 // r1: elements
341 GenerateDictionaryLoad(masm, &slow, r1, r2, r0, r3, r4);
342 __ Ret();
343
344 // Dictionary load failed, go slow (but don't miss).
345 __ bind(&slow);
346 GenerateRuntimeGetProperty(masm);
347
348 // Cache miss: Jump to runtime.
349 __ bind(&miss);
350 GenerateMiss(masm);
351 }
352
353
GenerateMiss(MacroAssembler * masm)354 void LoadIC::GenerateMiss(MacroAssembler* masm) {
355 // ----------- S t a t e -------------
356 // -- r2 : name
357 // -- lr : return address
358 // -- r0 : receiver
359 // -----------------------------------
360 Isolate* isolate = masm->isolate();
361
362 __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
363
364 __ mov(r3, r0);
365 __ Push(r3, r2);
366
367 // Perform tail call to the entry.
368 ExternalReference ref =
369 ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
370 __ TailCallExternalReference(ref, 2, 1);
371 }
372
373
GenerateRuntimeGetProperty(MacroAssembler * masm)374 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
375 // ---------- S t a t e --------------
376 // -- r2 : name
377 // -- lr : return address
378 // -- r0 : receiver
379 // -----------------------------------
380
381 __ mov(r3, r0);
382 __ Push(r3, r2);
383
384 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
385 }
386
387
GenerateMappedArgumentsLookup(MacroAssembler * masm,Register object,Register key,Register scratch1,Register scratch2,Register scratch3,Label * unmapped_case,Label * slow_case)388 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
389 Register object,
390 Register key,
391 Register scratch1,
392 Register scratch2,
393 Register scratch3,
394 Label* unmapped_case,
395 Label* slow_case) {
396 Heap* heap = masm->isolate()->heap();
397
398 // Check that the receiver is a JSObject. Because of the map check
399 // later, we do not need to check for interceptors or whether it
400 // requires access checks.
401 __ JumpIfSmi(object, slow_case);
402 // Check that the object is some kind of JSObject.
403 __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
404 __ b(lt, slow_case);
405
406 // Check that the key is a positive smi.
407 __ tst(key, Operand(0x80000001));
408 __ b(ne, slow_case);
409
410 // Load the elements into scratch1 and check its map.
411 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
412 __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
413 __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
414
415 // Check if element is in the range of mapped arguments. If not, jump
416 // to the unmapped lookup with the parameter map in scratch1.
417 __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
418 __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
419 __ cmp(key, Operand(scratch2));
420 __ b(cs, unmapped_case);
421
422 // Load element index and check whether it is the hole.
423 const int kOffset =
424 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
425
426 __ mov(scratch3, Operand(kPointerSize >> 1));
427 __ mul(scratch3, key, scratch3);
428 __ add(scratch3, scratch3, Operand(kOffset));
429
430 __ ldr(scratch2, MemOperand(scratch1, scratch3));
431 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
432 __ cmp(scratch2, scratch3);
433 __ b(eq, unmapped_case);
434
435 // Load value from context and return it. We can reuse scratch1 because
436 // we do not jump to the unmapped lookup (which requires the parameter
437 // map in scratch1).
438 __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
439 __ mov(scratch3, Operand(kPointerSize >> 1));
440 __ mul(scratch3, scratch2, scratch3);
441 __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
442 return MemOperand(scratch1, scratch3);
443 }
444
445
GenerateUnmappedArgumentsLookup(MacroAssembler * masm,Register key,Register parameter_map,Register scratch,Label * slow_case)446 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
447 Register key,
448 Register parameter_map,
449 Register scratch,
450 Label* slow_case) {
451 // Element is in arguments backing store, which is referenced by the
452 // second element of the parameter_map. The parameter_map register
453 // must be loaded with the parameter map of the arguments object and is
454 // overwritten.
455 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
456 Register backing_store = parameter_map;
457 __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
458 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
459 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
460 DONT_DO_SMI_CHECK);
461 __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
462 __ cmp(key, Operand(scratch));
463 __ b(cs, slow_case);
464 __ mov(scratch, Operand(kPointerSize >> 1));
465 __ mul(scratch, key, scratch);
466 __ add(scratch,
467 scratch,
468 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
469 return MemOperand(backing_store, scratch);
470 }
471
472
GenerateSloppyArguments(MacroAssembler * masm)473 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
474 // ---------- S t a t e --------------
475 // -- lr : return address
476 // -- r0 : key
477 // -- r1 : receiver
478 // -----------------------------------
479 Label slow, notin;
480 MemOperand mapped_location =
481 GenerateMappedArgumentsLookup(masm, r1, r0, r2, r3, r4, ¬in, &slow);
482 __ ldr(r0, mapped_location);
483 __ Ret();
484 __ bind(¬in);
485 // The unmapped lookup expects that the parameter map is in r2.
486 MemOperand unmapped_location =
487 GenerateUnmappedArgumentsLookup(masm, r0, r2, r3, &slow);
488 __ ldr(r2, unmapped_location);
489 __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
490 __ cmp(r2, r3);
491 __ b(eq, &slow);
492 __ mov(r0, r2);
493 __ Ret();
494 __ bind(&slow);
495 GenerateMiss(masm);
496 }
497
498
GenerateSloppyArguments(MacroAssembler * masm)499 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
500 // ---------- S t a t e --------------
501 // -- r0 : value
502 // -- r1 : key
503 // -- r2 : receiver
504 // -- lr : return address
505 // -----------------------------------
506 Label slow, notin;
507 MemOperand mapped_location =
508 GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, ¬in, &slow);
509 __ str(r0, mapped_location);
510 __ add(r6, r3, r5);
511 __ mov(r9, r0);
512 __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
513 __ Ret();
514 __ bind(¬in);
515 // The unmapped lookup expects that the parameter map is in r3.
516 MemOperand unmapped_location =
517 GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
518 __ str(r0, unmapped_location);
519 __ add(r6, r3, r4);
520 __ mov(r9, r0);
521 __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
522 __ Ret();
523 __ bind(&slow);
524 GenerateMiss(masm);
525 }
526
527
GenerateMiss(MacroAssembler * masm)528 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
529 // ---------- S t a t e --------------
530 // -- lr : return address
531 // -- r0 : key
532 // -- r1 : receiver
533 // -----------------------------------
534 Isolate* isolate = masm->isolate();
535
536 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
537
538 __ Push(r1, r0);
539
540 // Perform tail call to the entry.
541 ExternalReference ref =
542 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
543
544 __ TailCallExternalReference(ref, 2, 1);
545 }
546
547
GenerateRuntimeGetProperty(MacroAssembler * masm)548 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
549 // ---------- S t a t e --------------
550 // -- lr : return address
551 // -- r0 : key
552 // -- r1 : receiver
553 // -----------------------------------
554
555 __ Push(r1, r0);
556
557 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
558 }
559
560
GenerateGeneric(MacroAssembler * masm)561 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
562 // ---------- S t a t e --------------
563 // -- lr : return address
564 // -- r0 : key
565 // -- r1 : receiver
566 // -----------------------------------
567 Label slow, check_name, index_smi, index_name, property_array_property;
568 Label probe_dictionary, check_number_dictionary;
569
570 Register key = r0;
571 Register receiver = r1;
572
573 Isolate* isolate = masm->isolate();
574
575 // Check that the key is a smi.
576 __ JumpIfNotSmi(key, &check_name);
577 __ bind(&index_smi);
578 // Now the key is known to be a smi. This place is also jumped to from below
579 // where a numeric string is converted to a smi.
580
581 GenerateKeyedLoadReceiverCheck(
582 masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
583
584 // Check the receiver's map to see if it has fast elements.
585 __ CheckFastElements(r2, r3, &check_number_dictionary);
586
587 GenerateFastArrayLoad(
588 masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
589 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r2, r3);
590 __ Ret();
591
592 __ bind(&check_number_dictionary);
593 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
594 __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
595
596 // Check whether the elements is a number dictionary.
597 // r0: key
598 // r3: elements map
599 // r4: elements
600 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
601 __ cmp(r3, ip);
602 __ b(ne, &slow);
603 __ SmiUntag(r2, r0);
604 __ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
605 __ Ret();
606
607 // Slow case, key and receiver still in r0 and r1.
608 __ bind(&slow);
609 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
610 1, r2, r3);
611 GenerateRuntimeGetProperty(masm);
612
613 __ bind(&check_name);
614 GenerateKeyNameCheck(masm, key, r2, r3, &index_name, &slow);
615
616 GenerateKeyedLoadReceiverCheck(
617 masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
618
619 // If the receiver is a fast-case object, check the keyed lookup
620 // cache. Otherwise probe the dictionary.
621 __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
622 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
623 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
624 __ cmp(r4, ip);
625 __ b(eq, &probe_dictionary);
626
627 // Load the map of the receiver, compute the keyed lookup cache hash
628 // based on 32 bits of the map pointer and the name hash.
629 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
630 __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
631 __ ldr(r4, FieldMemOperand(r0, Name::kHashFieldOffset));
632 __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift));
633 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
634 __ And(r3, r3, Operand(mask));
635
636 // Load the key (consisting of map and unique name) from the cache and
637 // check for match.
638 Label load_in_object_property;
639 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
640 Label hit_on_nth_entry[kEntriesPerBucket];
641 ExternalReference cache_keys =
642 ExternalReference::keyed_lookup_cache_keys(isolate);
643
644 __ mov(r4, Operand(cache_keys));
645 __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
646
647 for (int i = 0; i < kEntriesPerBucket - 1; i++) {
648 Label try_next_entry;
649 // Load map and move r4 to next entry.
650 __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
651 __ cmp(r2, r5);
652 __ b(ne, &try_next_entry);
653 __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name
654 __ cmp(r0, r5);
655 __ b(eq, &hit_on_nth_entry[i]);
656 __ bind(&try_next_entry);
657 }
658
659 // Last entry: Load map and move r4 to name.
660 __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
661 __ cmp(r2, r5);
662 __ b(ne, &slow);
663 __ ldr(r5, MemOperand(r4));
664 __ cmp(r0, r5);
665 __ b(ne, &slow);
666
667 // Get field offset.
668 // r0 : key
669 // r1 : receiver
670 // r2 : receiver's map
671 // r3 : lookup cache index
672 ExternalReference cache_field_offsets =
673 ExternalReference::keyed_lookup_cache_field_offsets(isolate);
674
675 // Hit on nth entry.
676 for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
677 __ bind(&hit_on_nth_entry[i]);
678 __ mov(r4, Operand(cache_field_offsets));
679 if (i != 0) {
680 __ add(r3, r3, Operand(i));
681 }
682 __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
683 __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
684 __ sub(r5, r5, r6, SetCC);
685 __ b(ge, &property_array_property);
686 if (i != 0) {
687 __ jmp(&load_in_object_property);
688 }
689 }
690
691 // Load in-object property.
692 __ bind(&load_in_object_property);
693 __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
694 __ add(r6, r6, r5); // Index from start of object.
695 __ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag.
696 __ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2));
697 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
698 1, r2, r3);
699 __ Ret();
700
701 // Load property array property.
702 __ bind(&property_array_property);
703 __ ldr(r1, FieldMemOperand(r1, JSObject::kPropertiesOffset));
704 __ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
705 __ ldr(r0, MemOperand(r1, r5, LSL, kPointerSizeLog2));
706 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
707 1, r2, r3);
708 __ Ret();
709
710 // Do a quick inline probe of the receiver's dictionary, if it
711 // exists.
712 __ bind(&probe_dictionary);
713 // r1: receiver
714 // r0: key
715 // r3: elements
716 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
717 __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
718 GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
719 // Load the property to r0.
720 GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
721 __ IncrementCounter(
722 isolate->counters()->keyed_load_generic_symbol(), 1, r2, r3);
723 __ Ret();
724
725 __ bind(&index_name);
726 __ IndexFromHash(r3, key);
727 // Now jump to the place where smi keys are handled.
728 __ jmp(&index_smi);
729 }
730
731
GenerateString(MacroAssembler * masm)732 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
733 // ---------- S t a t e --------------
734 // -- lr : return address
735 // -- r0 : key (index)
736 // -- r1 : receiver
737 // -----------------------------------
738 Label miss;
739
740 Register receiver = r1;
741 Register index = r0;
742 Register scratch = r3;
743 Register result = r0;
744
745 StringCharAtGenerator char_at_generator(receiver,
746 index,
747 scratch,
748 result,
749 &miss, // When not a string.
750 &miss, // When not a number.
751 &miss, // When index out of range.
752 STRING_INDEX_IS_ARRAY_INDEX);
753 char_at_generator.GenerateFast(masm);
754 __ Ret();
755
756 StubRuntimeCallHelper call_helper;
757 char_at_generator.GenerateSlow(masm, call_helper);
758
759 __ bind(&miss);
760 GenerateMiss(masm);
761 }
762
763
GenerateIndexedInterceptor(MacroAssembler * masm)764 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
765 // ---------- S t a t e --------------
766 // -- lr : return address
767 // -- r0 : key
768 // -- r1 : receiver
769 // -----------------------------------
770 Label slow;
771
772 // Check that the receiver isn't a smi.
773 __ JumpIfSmi(r1, &slow);
774
775 // Check that the key is an array index, that is Uint32.
776 __ NonNegativeSmiTst(r0);
777 __ b(ne, &slow);
778
779 // Get the map of the receiver.
780 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
781
782 // Check that it has indexed interceptor and access checks
783 // are not enabled for this object.
784 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
785 __ and_(r3, r3, Operand(kSlowCaseBitFieldMask));
786 __ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor));
787 __ b(ne, &slow);
788
789 // Everything is fine, call runtime.
790 __ Push(r1, r0); // Receiver, key.
791
792 // Perform tail call to the entry.
793 __ TailCallExternalReference(
794 ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
795 masm->isolate()),
796 2,
797 1);
798
799 __ bind(&slow);
800 GenerateMiss(masm);
801 }
802
803
GenerateMiss(MacroAssembler * masm)804 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
805 // ---------- S t a t e --------------
806 // -- r0 : value
807 // -- r1 : key
808 // -- r2 : receiver
809 // -- lr : return address
810 // -----------------------------------
811
812 // Push receiver, key and value for runtime call.
813 __ Push(r2, r1, r0);
814
815 ExternalReference ref =
816 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
817 __ TailCallExternalReference(ref, 3, 1);
818 }
819
820
GenerateSlow(MacroAssembler * masm)821 void StoreIC::GenerateSlow(MacroAssembler* masm) {
822 // ---------- S t a t e --------------
823 // -- r0 : value
824 // -- r2 : key
825 // -- r1 : receiver
826 // -- lr : return address
827 // -----------------------------------
828
829 // Push receiver, key and value for runtime call.
830 __ Push(r1, r2, r0);
831
832 // The slow case calls into the runtime to complete the store without causing
833 // an IC miss that would otherwise cause a transition to the generic stub.
834 ExternalReference ref =
835 ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
836 __ TailCallExternalReference(ref, 3, 1);
837 }
838
839
GenerateSlow(MacroAssembler * masm)840 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
841 // ---------- S t a t e --------------
842 // -- r0 : value
843 // -- r1 : key
844 // -- r2 : receiver
845 // -- lr : return address
846 // -----------------------------------
847
848 // Push receiver, key and value for runtime call.
849 __ Push(r2, r1, r0);
850
851 // The slow case calls into the runtime to complete the store without causing
852 // an IC miss that would otherwise cause a transition to the generic stub.
853 ExternalReference ref =
854 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
855 __ TailCallExternalReference(ref, 3, 1);
856 }
857
858
GenerateRuntimeSetProperty(MacroAssembler * masm,StrictMode strict_mode)859 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
860 StrictMode strict_mode) {
861 // ---------- S t a t e --------------
862 // -- r0 : value
863 // -- r1 : key
864 // -- r2 : receiver
865 // -- lr : return address
866 // -----------------------------------
867
868 // Push receiver, key and value for runtime call.
869 __ Push(r2, r1, r0);
870
871 __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
872 __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
873 __ Push(r1, r0);
874
875 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
876 }
877
878
KeyedStoreGenerateGenericHelper(MacroAssembler * masm,Label * fast_object,Label * fast_double,Label * slow,KeyedStoreCheckMap check_map,KeyedStoreIncrementLength increment_length,Register value,Register key,Register receiver,Register receiver_map,Register elements_map,Register elements)879 static void KeyedStoreGenerateGenericHelper(
880 MacroAssembler* masm,
881 Label* fast_object,
882 Label* fast_double,
883 Label* slow,
884 KeyedStoreCheckMap check_map,
885 KeyedStoreIncrementLength increment_length,
886 Register value,
887 Register key,
888 Register receiver,
889 Register receiver_map,
890 Register elements_map,
891 Register elements) {
892 Label transition_smi_elements;
893 Label finish_object_store, non_double_value, transition_double_elements;
894 Label fast_double_without_map_check;
895
896 // Fast case: Do the store, could be either Object or double.
897 __ bind(fast_object);
898 Register scratch_value = r4;
899 Register address = r5;
900 if (check_map == kCheckMap) {
901 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
902 __ cmp(elements_map,
903 Operand(masm->isolate()->factory()->fixed_array_map()));
904 __ b(ne, fast_double);
905 }
906
907 // HOLECHECK: guards "A[i] = V"
908 // We have to go to the runtime if the current value is the hole because
909 // there may be a callback on the element
910 Label holecheck_passed1;
911 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
912 __ ldr(scratch_value,
913 MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
914 __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
915 __ b(ne, &holecheck_passed1);
916 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
917 slow);
918
919 __ bind(&holecheck_passed1);
920
921 // Smi stores don't require further checks.
922 Label non_smi_value;
923 __ JumpIfNotSmi(value, &non_smi_value);
924
925 if (increment_length == kIncrementLength) {
926 // Add 1 to receiver->length.
927 __ add(scratch_value, key, Operand(Smi::FromInt(1)));
928 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
929 }
930 // It's irrelevant whether array is smi-only or not when writing a smi.
931 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
932 __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
933 __ Ret();
934
935 __ bind(&non_smi_value);
936 // Escape to elements kind transition case.
937 __ CheckFastObjectElements(receiver_map, scratch_value,
938 &transition_smi_elements);
939
940 // Fast elements array, store the value to the elements backing store.
941 __ bind(&finish_object_store);
942 if (increment_length == kIncrementLength) {
943 // Add 1 to receiver->length.
944 __ add(scratch_value, key, Operand(Smi::FromInt(1)));
945 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
946 }
947 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
948 __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
949 __ str(value, MemOperand(address));
950 // Update write barrier for the elements array address.
951 __ mov(scratch_value, value); // Preserve the value which is returned.
952 __ RecordWrite(elements,
953 address,
954 scratch_value,
955 kLRHasNotBeenSaved,
956 kDontSaveFPRegs,
957 EMIT_REMEMBERED_SET,
958 OMIT_SMI_CHECK);
959 __ Ret();
960
961 __ bind(fast_double);
962 if (check_map == kCheckMap) {
963 // Check for fast double array case. If this fails, call through to the
964 // runtime.
965 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
966 __ b(ne, slow);
967 }
968
969 // HOLECHECK: guards "A[i] double hole?"
970 // We have to see if the double version of the hole is present. If so
971 // go to the runtime.
972 __ add(address, elements,
973 Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32))
974 - kHeapObjectTag));
975 __ ldr(scratch_value,
976 MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
977 __ cmp(scratch_value, Operand(kHoleNanUpper32));
978 __ b(ne, &fast_double_without_map_check);
979 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
980 slow);
981
982 __ bind(&fast_double_without_map_check);
983 __ StoreNumberToDoubleElements(value, key, elements, r3, d0,
984 &transition_double_elements);
985 if (increment_length == kIncrementLength) {
986 // Add 1 to receiver->length.
987 __ add(scratch_value, key, Operand(Smi::FromInt(1)));
988 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
989 }
990 __ Ret();
991
992 __ bind(&transition_smi_elements);
993 // Transition the array appropriately depending on the value type.
994 __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
995 __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
996 __ b(ne, &non_double_value);
997
998 // Value is a double. Transition FAST_SMI_ELEMENTS ->
999 // FAST_DOUBLE_ELEMENTS and complete the store.
1000 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
1001 FAST_DOUBLE_ELEMENTS,
1002 receiver_map,
1003 r4,
1004 slow);
1005 ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
1006 AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
1007 FAST_DOUBLE_ELEMENTS);
1008 ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
1009 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1010 __ jmp(&fast_double_without_map_check);
1011
1012 __ bind(&non_double_value);
1013 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
1014 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
1015 FAST_ELEMENTS,
1016 receiver_map,
1017 r4,
1018 slow);
1019 ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
1020 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
1021 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
1022 slow);
1023 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1024 __ jmp(&finish_object_store);
1025
1026 __ bind(&transition_double_elements);
1027 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
1028 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
1029 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
1030 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
1031 FAST_ELEMENTS,
1032 receiver_map,
1033 r4,
1034 slow);
1035 ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
1036 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
1037 ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
1038 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1039 __ jmp(&finish_object_store);
1040 }
1041
1042
GenerateGeneric(MacroAssembler * masm,StrictMode strict_mode)1043 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
1044 StrictMode strict_mode) {
1045 // ---------- S t a t e --------------
1046 // -- r0 : value
1047 // -- r1 : key
1048 // -- r2 : receiver
1049 // -- lr : return address
1050 // -----------------------------------
1051 Label slow, fast_object, fast_object_grow;
1052 Label fast_double, fast_double_grow;
1053 Label array, extra, check_if_double_array;
1054
1055 // Register usage.
1056 Register value = r0;
1057 Register key = r1;
1058 Register receiver = r2;
1059 Register receiver_map = r3;
1060 Register elements_map = r6;
1061 Register elements = r9; // Elements array of the receiver.
1062 // r4 and r5 are used as general scratch registers.
1063
1064 // Check that the key is a smi.
1065 __ JumpIfNotSmi(key, &slow);
1066 // Check that the object isn't a smi.
1067 __ JumpIfSmi(receiver, &slow);
1068 // Get the map of the object.
1069 __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
1070 // Check that the receiver does not require access checks and is not observed.
1071 // The generic stub does not perform map checks or handle observed objects.
1072 __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
1073 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
1074 __ b(ne, &slow);
1075 // Check if the object is a JS array or not.
1076 __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
1077 __ cmp(r4, Operand(JS_ARRAY_TYPE));
1078 __ b(eq, &array);
1079 // Check that the object is some kind of JSObject.
1080 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
1081 __ b(lt, &slow);
1082
1083 // Object case: Check key against length in the elements array.
1084 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1085 // Check array bounds. Both the key and the length of FixedArray are smis.
1086 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
1087 __ cmp(key, Operand(ip));
1088 __ b(lo, &fast_object);
1089
1090 // Slow case, handle jump to runtime.
1091 __ bind(&slow);
1092 // Entry registers are intact.
1093 // r0: value.
1094 // r1: key.
1095 // r2: receiver.
1096 GenerateRuntimeSetProperty(masm, strict_mode);
1097
1098 // Extra capacity case: Check if there is extra capacity to
1099 // perform the store and update the length. Used for adding one
1100 // element to the array by writing to array[array.length].
1101 __ bind(&extra);
1102 // Condition code from comparing key and array length is still available.
1103 __ b(ne, &slow); // Only support writing to writing to array[array.length].
1104 // Check for room in the elements backing store.
1105 // Both the key and the length of FixedArray are smis.
1106 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
1107 __ cmp(key, Operand(ip));
1108 __ b(hs, &slow);
1109 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
1110 __ cmp(elements_map,
1111 Operand(masm->isolate()->factory()->fixed_array_map()));
1112 __ b(ne, &check_if_double_array);
1113 __ jmp(&fast_object_grow);
1114
1115 __ bind(&check_if_double_array);
1116 __ cmp(elements_map,
1117 Operand(masm->isolate()->factory()->fixed_double_array_map()));
1118 __ b(ne, &slow);
1119 __ jmp(&fast_double_grow);
1120
1121 // Array case: Get the length and the elements array from the JS
1122 // array. Check that the array is in fast mode (and writable); if it
1123 // is the length is always a smi.
1124 __ bind(&array);
1125 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1126
1127 // Check the key against the length in the array.
1128 __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
1129 __ cmp(key, Operand(ip));
1130 __ b(hs, &extra);
1131
1132 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
1133 &slow, kCheckMap, kDontIncrementLength,
1134 value, key, receiver, receiver_map,
1135 elements_map, elements);
1136 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
1137 &slow, kDontCheckMap, kIncrementLength,
1138 value, key, receiver, receiver_map,
1139 elements_map, elements);
1140 }
1141
1142
GenerateMegamorphic(MacroAssembler * masm)1143 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
1144 // ----------- S t a t e -------------
1145 // -- r0 : value
1146 // -- r1 : receiver
1147 // -- r2 : name
1148 // -- lr : return address
1149 // -----------------------------------
1150
1151 // Get the receiver from the stack and probe the stub cache.
1152 Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
1153
1154 masm->isolate()->stub_cache()->GenerateProbe(
1155 masm, flags, r1, r2, r3, r4, r5, r6);
1156
1157 // Cache miss: Jump to runtime.
1158 GenerateMiss(masm);
1159 }
1160
1161
GenerateMiss(MacroAssembler * masm)1162 void StoreIC::GenerateMiss(MacroAssembler* masm) {
1163 // ----------- S t a t e -------------
1164 // -- r0 : value
1165 // -- r1 : receiver
1166 // -- r2 : name
1167 // -- lr : return address
1168 // -----------------------------------
1169
1170 __ Push(r1, r2, r0);
1171
1172 // Perform tail call to the entry.
1173 ExternalReference ref =
1174 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
1175 __ TailCallExternalReference(ref, 3, 1);
1176 }
1177
1178
GenerateNormal(MacroAssembler * masm)1179 void StoreIC::GenerateNormal(MacroAssembler* masm) {
1180 // ----------- S t a t e -------------
1181 // -- r0 : value
1182 // -- r1 : receiver
1183 // -- r2 : name
1184 // -- lr : return address
1185 // -----------------------------------
1186 Label miss;
1187
1188 GenerateNameDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
1189
1190 GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
1191 Counters* counters = masm->isolate()->counters();
1192 __ IncrementCounter(counters->store_normal_hit(),
1193 1, r4, r5);
1194 __ Ret();
1195
1196 __ bind(&miss);
1197 __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
1198 GenerateMiss(masm);
1199 }
1200
1201
GenerateRuntimeSetProperty(MacroAssembler * masm,StrictMode strict_mode)1202 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
1203 StrictMode strict_mode) {
1204 // ----------- S t a t e -------------
1205 // -- r0 : value
1206 // -- r1 : receiver
1207 // -- r2 : name
1208 // -- lr : return address
1209 // -----------------------------------
1210
1211 __ Push(r1, r2, r0);
1212
1213 __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
1214 __ mov(r0, Operand(Smi::FromInt(strict_mode)));
1215 __ Push(r1, r0);
1216
1217 // Do tail-call to runtime routine.
1218 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1219 }
1220
1221
1222 #undef __
1223
1224
ComputeCondition(Token::Value op)1225 Condition CompareIC::ComputeCondition(Token::Value op) {
1226 switch (op) {
1227 case Token::EQ_STRICT:
1228 case Token::EQ:
1229 return eq;
1230 case Token::LT:
1231 return lt;
1232 case Token::GT:
1233 return gt;
1234 case Token::LTE:
1235 return le;
1236 case Token::GTE:
1237 return ge;
1238 default:
1239 UNREACHABLE();
1240 return kNoCondition;
1241 }
1242 }
1243
1244
HasInlinedSmiCode(Address address)1245 bool CompareIC::HasInlinedSmiCode(Address address) {
1246 // The address of the instruction following the call.
1247 Address cmp_instruction_address =
1248 Assembler::return_address_from_call_start(address);
1249
1250 // If the instruction following the call is not a cmp rx, #yyy, nothing
1251 // was inlined.
1252 Instr instr = Assembler::instr_at(cmp_instruction_address);
1253 return Assembler::IsCmpImmediate(instr);
1254 }
1255
1256
PatchInlinedSmiCode(Address address,InlinedSmiCheck check)1257 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
1258 Address cmp_instruction_address =
1259 Assembler::return_address_from_call_start(address);
1260
1261 // If the instruction following the call is not a cmp rx, #yyy, nothing
1262 // was inlined.
1263 Instr instr = Assembler::instr_at(cmp_instruction_address);
1264 if (!Assembler::IsCmpImmediate(instr)) {
1265 return;
1266 }
1267
1268 // The delta to the start of the map check instruction and the
1269 // condition code uses at the patched jump.
1270 int delta = Assembler::GetCmpImmediateRawImmediate(instr);
1271 delta +=
1272 Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
1273 // If the delta is 0 the instruction is cmp r0, #0 which also signals that
1274 // nothing was inlined.
1275 if (delta == 0) {
1276 return;
1277 }
1278
1279 if (FLAG_trace_ic) {
1280 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
1281 address, cmp_instruction_address, delta);
1282 }
1283
1284 Address patch_address =
1285 cmp_instruction_address - delta * Instruction::kInstrSize;
1286 Instr instr_at_patch = Assembler::instr_at(patch_address);
1287 Instr branch_instr =
1288 Assembler::instr_at(patch_address + Instruction::kInstrSize);
1289 // This is patching a conditional "jump if not smi/jump if smi" site.
1290 // Enabling by changing from
1291 // cmp rx, rx
1292 // b eq/ne, <target>
1293 // to
1294 // tst rx, #kSmiTagMask
1295 // b ne/eq, <target>
1296 // and vice-versa to be disabled again.
1297 CodePatcher patcher(patch_address, 2);
1298 Register reg = Assembler::GetRn(instr_at_patch);
1299 if (check == ENABLE_INLINED_SMI_CHECK) {
1300 ASSERT(Assembler::IsCmpRegister(instr_at_patch));
1301 ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
1302 Assembler::GetRm(instr_at_patch).code());
1303 patcher.masm()->tst(reg, Operand(kSmiTagMask));
1304 } else {
1305 ASSERT(check == DISABLE_INLINED_SMI_CHECK);
1306 ASSERT(Assembler::IsTstImmediate(instr_at_patch));
1307 patcher.masm()->cmp(reg, reg);
1308 }
1309 ASSERT(Assembler::IsBranch(branch_instr));
1310 if (Assembler::GetCondition(branch_instr) == eq) {
1311 patcher.EmitCondition(ne);
1312 } else {
1313 ASSERT(Assembler::GetCondition(branch_instr) == ne);
1314 patcher.EmitCondition(eq);
1315 }
1316 }
1317
1318
1319 } } // namespace v8::internal
1320
1321 #endif // V8_TARGET_ARCH_ARM
1322